| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Stack trace management functions |
| 4 | * |
| 5 | * Copyright (C) 2022 Loongson Technology Corporation Limited |
| 6 | */ |
| 7 | #include <linux/sched.h> |
| 8 | #include <linux/stacktrace.h> |
| 9 | #include <linux/uaccess.h> |
| 10 | |
| 11 | #include <asm/stacktrace.h> |
| 12 | #include <asm/unwind.h> |
| 13 | |
| 14 | void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, |
| 15 | struct task_struct *task, struct pt_regs *regs) |
| 16 | { |
| 17 | unsigned long addr; |
| 18 | struct pt_regs dummyregs; |
| 19 | struct unwind_state state; |
| 20 | |
| 21 | if (!regs) { |
| 22 | regs = &dummyregs; |
| 23 | |
| 24 | if (task == current) { |
| 25 | regs->regs[3] = (unsigned long)__builtin_frame_address(0); |
| 26 | regs->csr_era = (unsigned long)__builtin_return_address(0); |
| 27 | } else { |
| 28 | regs->regs[3] = thread_saved_fp(task); |
| 29 | regs->csr_era = thread_saved_ra(task); |
| 30 | } |
| 31 | regs->regs[1] = 0; |
| 32 | regs->regs[22] = 0; |
| 33 | } |
| 34 | |
| 35 | for (unwind_start(&state, task, regs); |
| 36 | !unwind_done(state: &state); unwind_next_frame(state: &state)) { |
| 37 | addr = unwind_get_return_address(state: &state); |
| 38 | if (!addr || !consume_entry(cookie, addr)) |
| 39 | break; |
| 40 | } |
| 41 | } |
| 42 | |
| 43 | int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, |
| 44 | void *cookie, struct task_struct *task) |
| 45 | { |
| 46 | unsigned long addr; |
| 47 | struct pt_regs dummyregs; |
| 48 | struct pt_regs *regs = &dummyregs; |
| 49 | struct unwind_state state; |
| 50 | |
| 51 | if (task == current) { |
| 52 | regs->regs[3] = (unsigned long)__builtin_frame_address(0); |
| 53 | regs->csr_era = (unsigned long)__builtin_return_address(0); |
| 54 | regs->regs[22] = 0; |
| 55 | } else { |
| 56 | regs->regs[3] = thread_saved_fp(task); |
| 57 | regs->csr_era = thread_saved_ra(task); |
| 58 | regs->regs[22] = task->thread.reg22; |
| 59 | } |
| 60 | regs->regs[1] = 0; |
| 61 | |
| 62 | for (unwind_start(&state, task, regs); |
| 63 | !unwind_done(state: &state) && !unwind_error(state: &state); unwind_next_frame(state: &state)) { |
| 64 | addr = unwind_get_return_address(state: &state); |
| 65 | |
| 66 | /* |
| 67 | * A NULL or invalid return address probably means there's some |
| 68 | * generated code which __kernel_text_address() doesn't know about. |
| 69 | */ |
| 70 | if (!addr) |
| 71 | return -EINVAL; |
| 72 | |
| 73 | if (!consume_entry(cookie, addr)) |
| 74 | return -EINVAL; |
| 75 | } |
| 76 | |
| 77 | /* Check for stack corruption */ |
| 78 | if (unwind_error(state: &state)) |
| 79 | return -EINVAL; |
| 80 | |
| 81 | return 0; |
| 82 | } |
| 83 | |
| 84 | static int |
| 85 | copy_stack_frame(unsigned long fp, struct stack_frame *frame) |
| 86 | { |
| 87 | int ret = 1; |
| 88 | unsigned long err; |
| 89 | unsigned long __user *user_frame_tail; |
| 90 | |
| 91 | user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame)); |
| 92 | if (!access_ok(user_frame_tail, sizeof(*frame))) |
| 93 | return 0; |
| 94 | |
| 95 | pagefault_disable(); |
| 96 | err = (__copy_from_user_inatomic(to: frame, from: user_frame_tail, n: sizeof(*frame))); |
| 97 | if (err || (unsigned long)user_frame_tail >= frame->fp) |
| 98 | ret = 0; |
| 99 | pagefault_enable(); |
| 100 | |
| 101 | return ret; |
| 102 | } |
| 103 | |
| 104 | void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, |
| 105 | const struct pt_regs *regs) |
| 106 | { |
| 107 | unsigned long fp = regs->regs[22]; |
| 108 | |
| 109 | while (fp && !((unsigned long)fp & 0xf)) { |
| 110 | struct stack_frame frame; |
| 111 | |
| 112 | frame.fp = 0; |
| 113 | frame.ra = 0; |
| 114 | if (!copy_stack_frame(fp, frame: &frame)) |
| 115 | break; |
| 116 | if (!frame.ra) |
| 117 | break; |
| 118 | if (!consume_entry(cookie, frame.ra)) |
| 119 | break; |
| 120 | fp = frame.fp; |
| 121 | } |
| 122 | } |
| 123 | |