| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Stack trace management functions |
| 4 | * |
| 5 | * Copyright IBM Corp. 2006 |
| 6 | */ |
| 7 | |
| 8 | #include <linux/perf_event.h> |
| 9 | #include <linux/stacktrace.h> |
| 10 | #include <linux/uaccess.h> |
| 11 | #include <asm/asm-offsets.h> |
| 12 | #include <asm/stacktrace.h> |
| 13 | #include <asm/unwind.h> |
| 14 | #include <asm/kprobes.h> |
| 15 | #include <asm/ptrace.h> |
| 16 | |
| 17 | void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, |
| 18 | struct task_struct *task, struct pt_regs *regs) |
| 19 | { |
| 20 | struct unwind_state state; |
| 21 | unsigned long addr; |
| 22 | |
| 23 | unwind_for_each_frame(&state, task, regs, 0) { |
| 24 | addr = unwind_get_return_address(state: &state); |
| 25 | if (!addr || !consume_entry(cookie, addr)) |
| 26 | break; |
| 27 | } |
| 28 | } |
| 29 | |
| 30 | int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, |
| 31 | void *cookie, struct task_struct *task) |
| 32 | { |
| 33 | struct unwind_state state; |
| 34 | unsigned long addr; |
| 35 | |
| 36 | unwind_for_each_frame(&state, task, NULL, 0) { |
| 37 | if (state.stack_info.type != STACK_TYPE_TASK) |
| 38 | return -EINVAL; |
| 39 | |
| 40 | if (state.regs) |
| 41 | return -EINVAL; |
| 42 | |
| 43 | addr = unwind_get_return_address(state: &state); |
| 44 | if (!addr) |
| 45 | return -EINVAL; |
| 46 | |
| 47 | #ifdef CONFIG_RETHOOK |
| 48 | /* |
| 49 | * Mark stacktraces with krethook functions on them |
| 50 | * as unreliable. |
| 51 | */ |
| 52 | if (state.ip == (unsigned long)arch_rethook_trampoline) |
| 53 | return -EINVAL; |
| 54 | #endif |
| 55 | |
| 56 | if (!consume_entry(cookie, addr)) |
| 57 | return -EINVAL; |
| 58 | } |
| 59 | |
| 60 | /* Check for stack corruption */ |
| 61 | if (unwind_error(state: &state)) |
| 62 | return -EINVAL; |
| 63 | return 0; |
| 64 | } |
| 65 | |
| 66 | static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie, |
| 67 | struct perf_callchain_entry_ctx *entry, bool perf, |
| 68 | unsigned long ip) |
| 69 | { |
| 70 | #ifdef CONFIG_PERF_EVENTS |
| 71 | if (perf) { |
| 72 | if (perf_callchain_store(ctx: entry, ip)) |
| 73 | return false; |
| 74 | return true; |
| 75 | } |
| 76 | #endif |
| 77 | return consume_entry(cookie, ip); |
| 78 | } |
| 79 | |
| 80 | static inline bool ip_invalid(unsigned long ip) |
| 81 | { |
| 82 | /* |
| 83 | * Perform some basic checks if an instruction address taken |
| 84 | * from unreliable source is invalid. |
| 85 | */ |
| 86 | if (ip & 1) |
| 87 | return true; |
| 88 | if (ip < mmap_min_addr) |
| 89 | return true; |
| 90 | if (ip >= current->mm->context.asce_limit) |
| 91 | return true; |
| 92 | return false; |
| 93 | } |
| 94 | |
| 95 | static inline bool ip_within_vdso(unsigned long ip) |
| 96 | { |
| 97 | return in_range(ip, current->mm->context.vdso_base, vdso_text_size()); |
| 98 | } |
| 99 | |
| 100 | void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie, |
| 101 | struct perf_callchain_entry_ctx *entry, |
| 102 | const struct pt_regs *regs, bool perf) |
| 103 | { |
| 104 | struct stack_frame_vdso_wrapper __user *sf_vdso; |
| 105 | struct stack_frame_user __user *sf; |
| 106 | unsigned long ip, sp; |
| 107 | |
| 108 | if (!current->mm) |
| 109 | return; |
| 110 | ip = instruction_pointer(regs); |
| 111 | if (!store_ip(consume_entry, cookie, entry, perf, ip)) |
| 112 | return; |
| 113 | sf = (void __user *)user_stack_pointer(regs); |
| 114 | pagefault_disable(); |
| 115 | while (1) { |
| 116 | if (__get_user(sp, &sf->back_chain)) |
| 117 | break; |
| 118 | /* |
| 119 | * VDSO entry code has a non-standard stack frame layout. |
| 120 | * See VDSO user wrapper code for details. |
| 121 | */ |
| 122 | if (!sp && ip_within_vdso(ip)) { |
| 123 | sf_vdso = (void __user *)sf; |
| 124 | if (__get_user(ip, &sf_vdso->return_address)) |
| 125 | break; |
| 126 | sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD; |
| 127 | sf = (void __user *)sp; |
| 128 | if (__get_user(sp, &sf->back_chain)) |
| 129 | break; |
| 130 | } else { |
| 131 | sf = (void __user *)sp; |
| 132 | if (__get_user(ip, &sf->gprs[8])) |
| 133 | break; |
| 134 | } |
| 135 | /* Validate SP and RA (ABI requires SP to be 8 byte aligned). */ |
| 136 | if (sp & 0x7 || ip_invalid(ip)) |
| 137 | break; |
| 138 | if (!store_ip(consume_entry, cookie, entry, perf, ip)) |
| 139 | break; |
| 140 | } |
| 141 | pagefault_enable(); |
| 142 | } |
| 143 | |
| 144 | void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, |
| 145 | const struct pt_regs *regs) |
| 146 | { |
| 147 | arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, perf: false); |
| 148 | } |
| 149 | |