| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _ASM_X86_FTRACE_H |
| 3 | #define _ASM_X86_FTRACE_H |
| 4 | |
| 5 | #include <asm/ptrace.h> |
| 6 | |
| 7 | #ifdef CONFIG_FUNCTION_TRACER |
| 8 | #ifndef CC_USING_FENTRY |
| 9 | # error Compiler does not support fentry? |
| 10 | #endif |
| 11 | # define MCOUNT_ADDR ((unsigned long)(__fentry__)) |
| 12 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ |
| 13 | |
| 14 | /* Ignore unused weak functions which will have non zero offsets */ |
| 15 | #ifdef CONFIG_HAVE_FENTRY |
| 16 | # include <asm/ibt.h> |
| 17 | /* Add offset for endbr64 if IBT enabled */ |
| 18 | # define FTRACE_MCOUNT_MAX_OFFSET ENDBR_INSN_SIZE |
| 19 | #endif |
| 20 | |
| 21 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 22 | #define ARCH_SUPPORTS_FTRACE_OPS 1 |
| 23 | #endif |
| 24 | |
| 25 | #ifndef __ASSEMBLER__ |
| 26 | extern void __fentry__(void); |
| 27 | |
| 28 | static inline unsigned long ftrace_call_adjust(unsigned long addr) |
| 29 | { |
| 30 | /* |
| 31 | * addr is the address of the mcount call instruction. |
| 32 | * recordmcount does the necessary offset calculation. |
| 33 | */ |
| 34 | return addr; |
| 35 | } |
| 36 | |
| 37 | static inline unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip) |
| 38 | { |
| 39 | if (is_endbr(val: (void*)(fentry_ip - ENDBR_INSN_SIZE))) |
| 40 | fentry_ip -= ENDBR_INSN_SIZE; |
| 41 | |
| 42 | return fentry_ip; |
| 43 | } |
| 44 | #define ftrace_get_symaddr(fentry_ip) arch_ftrace_get_symaddr(fentry_ip) |
| 45 | |
| 46 | #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS |
| 47 | |
| 48 | #include <linux/ftrace_regs.h> |
| 49 | |
| 50 | static __always_inline struct pt_regs * |
| 51 | arch_ftrace_get_regs(struct ftrace_regs *fregs) |
| 52 | { |
| 53 | /* Only when FL_SAVE_REGS is set, cs will be non zero */ |
| 54 | if (!arch_ftrace_regs(fregs)->regs.cs) |
| 55 | return NULL; |
| 56 | return &arch_ftrace_regs(fregs)->regs; |
| 57 | } |
| 58 | |
| 59 | #define arch_ftrace_partial_regs(regs) do { \ |
| 60 | regs->flags &= ~X86_EFLAGS_FIXED; \ |
| 61 | regs->cs = __KERNEL_CS; \ |
| 62 | } while (0) |
| 63 | |
| 64 | #define arch_ftrace_fill_perf_regs(fregs, _regs) do { \ |
| 65 | (_regs)->ip = arch_ftrace_regs(fregs)->regs.ip; \ |
| 66 | (_regs)->sp = arch_ftrace_regs(fregs)->regs.sp; \ |
| 67 | (_regs)->cs = __KERNEL_CS; \ |
| 68 | (_regs)->flags = 0; \ |
| 69 | } while (0) |
| 70 | |
| 71 | #define ftrace_regs_set_instruction_pointer(fregs, _ip) \ |
| 72 | do { arch_ftrace_regs(fregs)->regs.ip = (_ip); } while (0) |
| 73 | |
| 74 | |
| 75 | static __always_inline unsigned long |
| 76 | ftrace_regs_get_return_address(struct ftrace_regs *fregs) |
| 77 | { |
| 78 | return *(unsigned long *)ftrace_regs_get_stack_pointer(fregs); |
| 79 | } |
| 80 | |
| 81 | struct ftrace_ops; |
| 82 | #define ftrace_graph_func ftrace_graph_func |
| 83 | void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, |
| 84 | struct ftrace_ops *op, struct ftrace_regs *fregs); |
| 85 | #else |
| 86 | #define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR |
| 87 | #endif |
| 88 | |
| 89 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
| 90 | /* |
| 91 | * When a ftrace registered caller is tracing a function that is |
| 92 | * also set by a register_ftrace_direct() call, it needs to be |
| 93 | * differentiated in the ftrace_caller trampoline. To do this, we |
| 94 | * place the direct caller in the ORIG_AX part of pt_regs. This |
| 95 | * tells the ftrace_caller that there's a direct caller. |
| 96 | */ |
| 97 | static inline void |
| 98 | __arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr) |
| 99 | { |
| 100 | /* Emulate a call */ |
| 101 | regs->orig_ax = addr; |
| 102 | } |
| 103 | #define arch_ftrace_set_direct_caller(fregs, addr) \ |
| 104 | __arch_ftrace_set_direct_caller(&arch_ftrace_regs(fregs)->regs, addr) |
| 105 | #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
| 106 | |
| 107 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 108 | |
| 109 | struct dyn_arch_ftrace { |
| 110 | /* No extra data needed for x86 */ |
| 111 | }; |
| 112 | |
| 113 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 114 | #endif /* __ASSEMBLER__ */ |
| 115 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 116 | |
| 117 | |
| 118 | #ifndef __ASSEMBLER__ |
| 119 | |
| 120 | void prepare_ftrace_return(unsigned long ip, unsigned long *parent, |
| 121 | unsigned long frame_pointer); |
| 122 | |
| 123 | #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) |
| 124 | extern void set_ftrace_ops_ro(void); |
| 125 | #else |
| 126 | static inline void set_ftrace_ops_ro(void) { } |
| 127 | #endif |
| 128 | |
| 129 | #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME |
| 130 | static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) |
| 131 | { |
| 132 | /* |
| 133 | * Compare the symbol name with the system call name. Skip the |
| 134 | * "__x64_sys", "__ia32_sys", "__do_sys" or simple "sys" prefix. |
| 135 | */ |
| 136 | return !strcmp(sym + 3, name + 3) || |
| 137 | (!strncmp(sym, "__x64_" , 6) && !strcmp(sym + 9, name + 3)) || |
| 138 | (!strncmp(sym, "__ia32_" , 7) && !strcmp(sym + 10, name + 3)) || |
| 139 | (!strncmp(sym, "__do_sys" , 8) && !strcmp(sym + 8, name + 3)); |
| 140 | } |
| 141 | |
| 142 | #ifndef COMPILE_OFFSETS |
| 143 | |
| 144 | #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION) |
| 145 | #include <linux/compat.h> |
| 146 | |
| 147 | /* |
| 148 | * Because ia32 syscalls do not map to x86_64 syscall numbers |
| 149 | * this screws up the trace output when tracing a ia32 task. |
| 150 | * Instead of reporting bogus syscalls, just do not trace them. |
| 151 | * |
| 152 | * If the user really wants these, then they should use the |
| 153 | * raw syscall tracepoints with filtering. |
| 154 | */ |
| 155 | #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1 |
| 156 | static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) |
| 157 | { |
| 158 | return in_32bit_syscall(); |
| 159 | } |
| 160 | #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */ |
| 161 | #endif /* !COMPILE_OFFSETS */ |
| 162 | #endif /* !__ASSEMBLER__ */ |
| 163 | |
| 164 | #endif /* _ASM_X86_FTRACE_H */ |
| 165 | |