1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * arch/arm64/include/asm/ftrace.h 4 * 5 * Copyright (C) 2013 Linaro Limited 6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> 7 */ 8 #ifndef __ASM_FTRACE_H 9 #define __ASM_FTRACE_H 10 11 #include <asm/insn.h> 12 13 #define HAVE_FUNCTION_GRAPH_FP_TEST 14 15 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS 16 #define ARCH_SUPPORTS_FTRACE_OPS 1 17 #else 18 #define MCOUNT_ADDR ((unsigned long)_mcount) 19 #endif 20 21 /* The BL at the callsite's adjusted rec->ip */ 22 #define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE 23 24 #define FTRACE_PLT_IDX 0 25 #define NR_FTRACE_PLTS 1 26 27 /* 28 * Currently, gcc tends to save the link register after the local variables 29 * on the stack. This causes the max stack tracer to report the function 30 * frame sizes for the wrong functions. By defining 31 * ARCH_FTRACE_SHIFT_STACK_TRACER, it will tell the stack tracer to expect 32 * to find the return address on the stack after the local variables have 33 * been set up. 34 * 35 * Note, this may change in the future, and we will need to deal with that 36 * if it were to happen. 37 */ 38 #define ARCH_FTRACE_SHIFT_STACK_TRACER 1 39 40 #ifndef __ASSEMBLY__ 41 #include <linux/compat.h> 42 43 extern void _mcount(unsigned long); 44 extern void *return_address(unsigned int); 45 46 struct dyn_arch_ftrace { 47 /* No extra data needed for arm64 */ 48 }; 49 50 extern unsigned long ftrace_graph_call; 51 52 extern void return_to_handler(void); 53 54 unsigned long ftrace_call_adjust(unsigned long addr); 55 56 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS 57 struct dyn_ftrace; 58 struct ftrace_ops; 59 60 #define arch_ftrace_get_regs(regs) NULL 61 62 /* 63 * Note: sizeof(struct ftrace_regs) must be a multiple of 16 to ensure correct 64 * stack alignment 65 */ 66 struct ftrace_regs { 67 /* x0 - x8 */ 68 unsigned long regs[9]; 69 70 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 71 unsigned long direct_tramp; 72 #else 73 unsigned long __unused; 74 #endif 75 76 unsigned long fp; 77 unsigned long lr; 78 79 unsigned long sp; 80 unsigned long pc; 81 }; 82 83 static __always_inline unsigned long 84 ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs) 85 { 86 return fregs->pc; 87 } 88 89 static __always_inline void 90 ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, 91 unsigned long pc) 92 { 93 fregs->pc = pc; 94 } 95 96 static __always_inline unsigned long 97 ftrace_regs_get_stack_pointer(const struct ftrace_regs *fregs) 98 { 99 return fregs->sp; 100 } 101 102 static __always_inline unsigned long 103 ftrace_regs_get_argument(struct ftrace_regs *fregs, unsigned int n) 104 { 105 if (n < 8) 106 return fregs->regs[n]; 107 return 0; 108 } 109 110 static __always_inline unsigned long 111 ftrace_regs_get_return_value(const struct ftrace_regs *fregs) 112 { 113 return fregs->regs[0]; 114 } 115 116 static __always_inline void 117 ftrace_regs_set_return_value(struct ftrace_regs *fregs, 118 unsigned long ret) 119 { 120 fregs->regs[0] = ret; 121 } 122 123 static __always_inline void 124 ftrace_override_function_with_return(struct ftrace_regs *fregs) 125 { 126 fregs->pc = fregs->lr; 127 } 128 129 int ftrace_regs_query_register_offset(const char *name); 130 131 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); 132 #define ftrace_init_nop ftrace_init_nop 133 134 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, 135 struct ftrace_ops *op, struct ftrace_regs *fregs); 136 #define ftrace_graph_func ftrace_graph_func 137 138 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 139 static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, 140 unsigned long addr) 141 { 142 /* 143 * The ftrace trampoline will return to this address instead of the 144 * instrumented function. 145 */ 146 fregs->direct_tramp = addr; 147 } 148 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 149 150 #endif 151 152 #define ftrace_return_address(n) return_address(n) 153 154 /* 155 * Because AArch32 mode does not share the same syscall table with AArch64, 156 * tracing compat syscalls may result in reporting bogus syscalls or even 157 * hang-up, so just do not trace them. 158 * See kernel/trace/trace_syscalls.c 159 * 160 * x86 code says: 161 * If the user really wants these, then they should use the 162 * raw syscall tracepoints with filtering. 163 */ 164 #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 165 static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) 166 { 167 return is_compat_task(); 168 } 169 170 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME 171 172 static inline bool arch_syscall_match_sym_name(const char *sym, 173 const char *name) 174 { 175 /* 176 * Since all syscall functions have __arm64_ prefix, we must skip it. 177 * However, as we described above, we decided to ignore compat 178 * syscalls, so we don't care about __arm64_compat_ prefix here. 179 */ 180 return !strcmp(sym + 8, name); 181 } 182 #endif /* ifndef __ASSEMBLY__ */ 183 184 #ifndef __ASSEMBLY__ 185 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 186 struct fgraph_ret_regs { 187 /* x0 - x7 */ 188 unsigned long regs[8]; 189 190 unsigned long fp; 191 unsigned long __unused; 192 }; 193 194 static inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs) 195 { 196 return ret_regs->regs[0]; 197 } 198 199 static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs) 200 { 201 return ret_regs->fp; 202 } 203 204 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, 205 unsigned long frame_pointer); 206 207 #endif /* ifdef CONFIG_FUNCTION_GRAPH_TRACER */ 208 #endif 209 210 #endif /* __ASM_FTRACE_H */ 211
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.