1 // SPDX-License-Identifier: GPL-2.0 !! 1 #include <linux/spinlock.h> 2 // Copyright (C) 2018 Hangzhou C-SKY Microsyst !! 2 #include <linux/hardirq.h> 3 << 4 #include <linux/ftrace.h> 3 #include <linux/ftrace.h> 5 #include <linux/uaccess.h> !! 4 #include <linux/percpu.h> 6 #include <linux/stop_machine.h> !! 5 #include <linux/init.h> 7 #include <asm/cacheflush.h> !! 6 #include <linux/list.h> >> 7 #include <trace/syscall.h> 8 8 9 #ifdef CONFIG_DYNAMIC_FTRACE !! 9 #include <asm/ftrace.h> 10 10 11 #define NOP 0x4000 !! 11 #ifdef CONFIG_DYNAMIC_FTRACE 12 #define NOP32_HI 0xc400 !! 12 static const u32 ftrace_nop = 0x01000000; 13 #define NOP32_LO 0x4820 << 14 #define PUSH_LR 0x14d0 << 15 #define MOVIH_LINK 0xea3a << 16 #define ORI_LINK 0xef5a << 17 #define JSR_LINK 0xe8fa << 18 #define BSR_LINK 0xe000 << 19 13 20 /* !! 14 static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) 21 * Gcc-csky with -pg will insert stub in funct << 22 * push lr << 23 * jbsr _mcount << 24 * nop32 << 25 * nop32 << 26 * << 27 * If the (callee - current_pc) is less then 6 << 28 * push lr << 29 * bsr _mcount << 30 * nop32 << 31 * nop32 << 32 * else we'll use (movih + ori + jsr): << 33 * push lr << 34 * movih r26, ... << 35 * ori r26, ... << 36 * jsr r26 << 37 * << 38 * (r26 is our reserved link-reg) << 39 * << 40 */ << 41 static inline void make_jbsr(unsigned long cal << 42 uint16_t *call, b << 43 { 15 { 44 long offset; !! 16 u32 call; 45 !! 17 s32 off; 46 call[0] = nolr ? NOP : PUSH_LR; << 47 18 48 offset = (long) callee - (long) pc; !! 19 off = ((s32)addr - (s32)ip); >> 20 call = 0x40000000 | ((u32)off >> 2); 49 21 50 if (unlikely(offset < -67108864 || off !! 22 return call; 51 call[1] = MOVIH_LINK; << 52 call[2] = callee >> 16; << 53 call[3] = ORI_LINK; << 54 call[4] = callee & 0xffff; << 55 call[5] = JSR_LINK; << 56 call[6] = 0; << 57 } else { << 58 offset = offset >> 1; << 59 << 60 call[1] = BSR_LINK | << 61 ((uint16_t)((unsigned << 62 call[2] = (uint16_t)((unsigned << 63 call[3] = call[5] = NOP32_HI; << 64 call[4] = call[6] = NOP32_LO; << 65 } << 66 } << 67 << 68 static uint16_t nops[7] = {NOP, NOP32_HI, NOP3 << 69 NOP32_HI, NOP3 << 70 static int ftrace_check_current_nop(unsigned l << 71 { << 72 uint16_t olds[7]; << 73 unsigned long hook_pos = hook - 2; << 74 << 75 if (copy_from_kernel_nofault((void *)o << 76 sizeof(nops))) << 77 return -EFAULT; << 78 << 79 if (memcmp((void *)nops, (void *)olds, << 80 pr_err("%p: nop but get (%04x << 81 (void *)hook_pos, << 82 olds[0], olds[1], olds << 83 olds[6]); << 84 << 85 return -EINVAL; << 86 } << 87 << 88 return 0; << 89 } 23 } 90 24 91 static int ftrace_modify_code(unsigned long ho !! 25 static int ftrace_modify_code(unsigned long ip, u32 old, u32 new) 92 bool enable, boo << 93 { 26 { 94 uint16_t call[7]; !! 27 u32 replaced; >> 28 int faulted; 95 29 96 unsigned long hook_pos = hook - 2; !! 30 __asm__ __volatile__( 97 int ret = 0; !! 31 "1: cas [%[ip]], %[old], %[new]\n" >> 32 " flush %[ip]\n" >> 33 " mov 0, %[faulted]\n" >> 34 "2:\n" >> 35 " .section .fixup,#alloc,#execinstr\n" >> 36 " .align 4\n" >> 37 "3: sethi %%hi(2b), %[faulted]\n" >> 38 " jmpl %[faulted] + %%lo(2b), %%g0\n" >> 39 " mov 1, %[faulted]\n" >> 40 " .previous\n" >> 41 " .section __ex_table,\"a\"\n" >> 42 " .align 4\n" >> 43 " .word 1b, 3b\n" >> 44 " .previous\n" >> 45 : "=r" (replaced), [faulted] "=r" (faulted) >> 46 : [new] "" (new), [old] "r" (old), [ip] "r" (ip) >> 47 : "memory"); 98 48 99 make_jbsr(target, hook, call, nolr); !! 49 if (replaced != old && replaced != new) >> 50 faulted = 2; 100 51 101 ret = copy_to_kernel_nofault((void *)h !! 52 return faulted; 102 sizeof(nops)) !! 53 } 103 if (ret) << 104 return -EPERM; << 105 54 106 flush_icache_range(hook_pos, hook_pos !! 55 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) >> 56 { >> 57 unsigned long ip = rec->ip; >> 58 u32 old, new; 107 59 108 return 0; !! 60 old = ftrace_call_replace(ip, addr); >> 61 new = ftrace_nop; >> 62 return ftrace_modify_code(ip, old, new); 109 } 63 } 110 64 111 int ftrace_make_call(struct dyn_ftrace *rec, u 65 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 112 { 66 { 113 int ret = ftrace_check_current_nop(rec !! 67 unsigned long ip = rec->ip; 114 !! 68 u32 old, new; 115 if (ret) << 116 return ret; << 117 69 118 return ftrace_modify_code(rec->ip, add !! 70 old = ftrace_nop; >> 71 new = ftrace_call_replace(ip, addr); >> 72 return ftrace_modify_code(ip, old, new); 119 } 73 } 120 74 121 int ftrace_make_nop(struct module *mod, struct !! 75 int ftrace_update_ftrace_func(ftrace_func_t func) 122 unsigned long addr) << 123 { 76 { 124 return ftrace_modify_code(rec->ip, add !! 77 unsigned long ip = (unsigned long)(&ftrace_call); >> 78 u32 old, new; >> 79 >> 80 old = *(u32 *) &ftrace_call; >> 81 new = ftrace_call_replace(ip, (unsigned long)func); >> 82 return ftrace_modify_code(ip, old, new); 125 } 83 } 126 84 127 int ftrace_update_ftrace_func(ftrace_func_t fu !! 85 int __init ftrace_dyn_arch_init(void) 128 { 86 { 129 int ret = ftrace_modify_code((unsigned !! 87 return 0; 130 (unsigned long << 131 if (!ret) << 132 ret = ftrace_modify_code((unsi << 133 (unsigned long << 134 return ret; << 135 } << 136 #endif /* CONFIG_DYNAMIC_FTRACE */ << 137 << 138 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS << 139 int ftrace_modify_call(struct dyn_ftrace *rec, << 140 unsigned long addr) << 141 { << 142 return ftrace_modify_code(rec->ip, add << 143 } 88 } 144 #endif 89 #endif 145 90 146 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 91 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 147 void prepare_ftrace_return(unsigned long *pare << 148 unsigned long frame << 149 { << 150 unsigned long return_hooker = (unsigne << 151 unsigned long old; << 152 << 153 if (unlikely(atomic_read(¤t->tra << 154 return; << 155 << 156 old = *parent; << 157 << 158 if (!function_graph_enter(old, self_ad << 159 *(unsigned long *)fram << 160 /* << 161 * For csky-gcc function has s << 162 * subi sp, sp, 8 << 163 * stw r8, (sp, 0) << 164 * mov r8, sp << 165 * st.w r15, (sp, 0x4) << 166 * push r15 << 167 * jl _mcount << 168 * We only need set *parent fo << 169 * << 170 * For csky-gcc function has n << 171 * subi sp, sp, 4 << 172 * stw r8, (sp, 0) << 173 * mov r8, sp << 174 * push r15 << 175 * jl _mcount << 176 * We need set *parent and *(f << 177 * because lr is resumed twice << 178 */ << 179 *parent = return_hooker; << 180 frame_pointer += 4; << 181 if (*(unsigned long *)frame_po << 182 *(unsigned long *)fram << 183 } << 184 } << 185 92 186 #ifdef CONFIG_DYNAMIC_FTRACE 93 #ifdef CONFIG_DYNAMIC_FTRACE >> 94 extern void ftrace_graph_call(void); >> 95 187 int ftrace_enable_ftrace_graph_caller(void) 96 int ftrace_enable_ftrace_graph_caller(void) 188 { 97 { 189 return ftrace_modify_code((unsigned lo !! 98 unsigned long ip = (unsigned long)(&ftrace_graph_call); 190 (unsigned long)&ftrace !! 99 u32 old, new; >> 100 >> 101 old = *(u32 *) &ftrace_graph_call; >> 102 new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller); >> 103 return ftrace_modify_code(ip, old, new); 191 } 104 } 192 105 193 int ftrace_disable_ftrace_graph_caller(void) 106 int ftrace_disable_ftrace_graph_caller(void) 194 { 107 { 195 return ftrace_modify_code((unsigned lo !! 108 unsigned long ip = (unsigned long)(&ftrace_graph_call); 196 (unsigned long)&ftrace !! 109 u32 old, new; 197 } << 198 #endif /* CONFIG_DYNAMIC_FTRACE */ << 199 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ << 200 110 201 #ifdef CONFIG_DYNAMIC_FTRACE !! 111 old = *(u32 *) &ftrace_graph_call; 202 #ifndef CONFIG_CPU_HAS_ICACHE_INS !! 112 new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub); 203 struct ftrace_modify_param { << 204 int command; << 205 atomic_t cpu_count; << 206 }; << 207 << 208 static int __ftrace_modify_code(void *data) << 209 { << 210 struct ftrace_modify_param *param = da << 211 << 212 if (atomic_inc_return(¶m->cpu_coun << 213 ftrace_modify_all_code(param-> << 214 atomic_inc(¶m->cpu_count); << 215 } else { << 216 while (atomic_read(¶m->cpu << 217 cpu_relax(); << 218 local_icache_inv_all(NULL); << 219 } << 220 113 221 return 0; !! 114 return ftrace_modify_code(ip, old, new); 222 } 115 } 223 116 224 void arch_ftrace_update_code(int command) !! 117 #endif /* !CONFIG_DYNAMIC_FTRACE */ >> 118 >> 119 /* >> 120 * Hook the return address and push it in the stack of return addrs >> 121 * in current thread info. >> 122 */ >> 123 unsigned long prepare_ftrace_return(unsigned long parent, >> 124 unsigned long self_addr, >> 125 unsigned long frame_pointer) 225 { 126 { 226 struct ftrace_modify_param param = { c !! 127 unsigned long return_hooker = (unsigned long) &return_to_handler; >> 128 struct ftrace_graph_ent trace; 227 129 228 stop_machine(__ftrace_modify_code, &pa !! 130 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 229 } !! 131 return parent + 8UL; 230 #endif << 231 #endif /* CONFIG_DYNAMIC_FTRACE */ << 232 132 233 /* _mcount is defined in abi's mcount.S */ !! 133 trace.func = self_addr; 234 EXPORT_SYMBOL(_mcount); !! 134 trace.depth = current->curr_ret_stack + 1; >> 135 >> 136 /* Only trace if the calling function expects to */ >> 137 if (!ftrace_graph_entry(&trace)) >> 138 return parent + 8UL; >> 139 >> 140 if (ftrace_push_return_trace(parent, self_addr, &trace.depth, >> 141 frame_pointer, NULL) == -EBUSY) >> 142 return parent + 8UL; >> 143 >> 144 return return_hooker; >> 145 } >> 146 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 235 147
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.