1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* !! 2 #include <linux/spinlock.h> 3 * Copyright (C) 2008 Matt Fleming <matt@conso !! 3 #include <linux/hardirq.h> 4 * Copyright (C) 2008 Paul Mundt <lethal@linux << 5 * << 6 * Code for replacing ftrace calls with jumps. << 7 * << 8 * Copyright (C) 2007-2008 Steven Rostedt <sro << 9 * << 10 * Thanks goes to Ingo Molnar, for suggesting << 11 * Mathieu Desnoyers, for suggesting postponin << 12 * Arjan van de Ven, for keeping me straight, << 13 * the dangers of modifying code on the run. << 14 */ << 15 #include <linux/uaccess.h> << 16 #include <linux/ftrace.h> 4 #include <linux/ftrace.h> 17 #include <linux/string.h> !! 5 #include <linux/percpu.h> 18 #include <linux/init.h> 6 #include <linux/init.h> 19 #include <linux/io.h> !! 7 #include <linux/list.h> 20 #include <linux/kernel.h> << 21 #include <asm/ftrace.h> << 22 #include <asm/cacheflush.h> << 23 #include <asm/unistd.h> << 24 #include <trace/syscall.h> 8 #include <trace/syscall.h> 25 9 26 #ifdef CONFIG_DYNAMIC_FTRACE !! 10 #include <asm/ftrace.h> 27 static unsigned char ftrace_replaced_code[MCOU << 28 << 29 static unsigned char ftrace_nop[4]; << 30 /* << 31 * If we're trying to nop out a call to a func << 32 * place a call to the address after the memor << 33 * << 34 * 8c011060 <a>: << 35 * 8c011060: 02 d1 mov.l 8c0 << 36 * 8c011062: 22 4f sts.l pr, << 37 * 8c011064: 02 c7 mova 8c0 << 38 * 8c011066: 2b 41 jmp @r1 << 39 * 8c011068: 2a 40 lds r0, << 40 * 8c01106a: 09 00 nop << 41 * 8c01106c: 68 24 .word 0x246 << 42 * 8c01106e: 1d 8c .word 0x8c1 << 43 * 8c011070: 26 4f lds.l @r1 << 44 * << 45 * We write 0x8c011070 to 0x8c01106c so that o << 46 * past the _mcount call and continue executin << 47 */ << 48 static unsigned char *ftrace_nop_replace(unsig << 49 { << 50 __raw_writel(ip + MCOUNT_INSN_SIZE, ft << 51 return ftrace_nop; << 52 } << 53 << 54 static unsigned char *ftrace_call_replace(unsi << 55 { << 56 /* Place the address in the memory tab << 57 __raw_writel(addr, ftrace_replaced_cod << 58 << 59 /* << 60 * No locking needed, this must be cal << 61 * which in essence is like running on << 62 */ << 63 return ftrace_replaced_code; << 64 } << 65 << 66 /* << 67 * Modifying code must take extra care. On an << 68 * the code being modified is also being execu << 69 * that CPU will have undefined results and po << 70 * We use kstop_machine to stop other CPUS fro << 71 * But this does not stop NMIs from happening. << 72 * to protect against that. We separate out th << 73 * the code to take care of this. << 74 * << 75 * Two buffers are added: An IP buffer and a " << 76 * << 77 * 1) Put the instruction pointer into the IP << 78 * and the new code into the "code" buffer. << 79 * 2) Wait for any running NMIs to finish and << 80 * we are modifying code, it is done in an << 81 * 3) Write the code << 82 * 4) clear the flag. << 83 * 5) Wait for any running NMIs to finish. << 84 * << 85 * If an NMI is executed, the first thing it d << 86 * "ftrace_nmi_enter". This will check if the << 87 * and if it is, it will write what is in the << 88 * << 89 * The trick is, it does not matter if everyon << 90 * content to the code location. Also, if a CP << 91 * it is OK to write to that code location if << 92 * are the same as what exists. << 93 */ << 94 #define MOD_CODE_WRITE_FLAG (1 << 31) /* set << 95 static atomic_t nmi_running = ATOMIC_INIT(0); << 96 static int mod_code_status; /* hol << 97 static void *mod_code_ip; /* hol << 98 static void *mod_code_newcode; /* hol << 99 << 100 static void clear_mod_flag(void) << 101 { << 102 int old = atomic_read(&nmi_running); << 103 << 104 for (;;) { << 105 int new = old & ~MOD_CODE_WRIT << 106 << 107 if (old == new) << 108 break; << 109 11 110 old = atomic_cmpxchg(&nmi_runn !! 12 #ifdef CONFIG_DYNAMIC_FTRACE 111 } !! 13 static const u32 ftrace_nop = 0x01000000; 112 } << 113 14 114 static void ftrace_mod_code(void) !! 15 static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) 115 { 16 { 116 /* !! 17 u32 call; 117 * Yes, more than one CPU process can !! 18 s32 off; 118 * (and the code itself) << 119 * But if one were to fail, then they << 120 * to succeed, then they all should. << 121 */ << 122 mod_code_status = copy_to_kernel_nofau << 123 M << 124 << 125 /* if we fail, then kill any new write << 126 if (mod_code_status) << 127 clear_mod_flag(); << 128 } << 129 19 130 void arch_ftrace_nmi_enter(void) !! 20 off = ((s32)addr - (s32)ip); 131 { !! 21 call = 0x40000000 | ((u32)off >> 2); 132 if (atomic_inc_return(&nmi_running) & << 133 smp_rmb(); << 134 ftrace_mod_code(); << 135 } << 136 /* Must have previous changes seen bef << 137 smp_mb(); << 138 } << 139 22 140 void arch_ftrace_nmi_exit(void) !! 23 return call; 141 { << 142 /* Finish all executions before cleari << 143 smp_mb(); << 144 atomic_dec(&nmi_running); << 145 } 24 } 146 25 147 static void wait_for_nmi_and_set_mod_flag(void !! 26 static int ftrace_modify_code(unsigned long ip, u32 old, u32 new) 148 { 27 { 149 if (!atomic_cmpxchg(&nmi_running, 0, M !! 28 u32 replaced; 150 return; !! 29 int faulted; 151 30 152 do { !! 31 __asm__ __volatile__( 153 cpu_relax(); !! 32 "1: cas [%[ip]], %[old], %[new]\n" 154 } while (atomic_cmpxchg(&nmi_running, !! 33 " flush %[ip]\n" 155 } !! 34 " mov 0, %[faulted]\n" >> 35 "2:\n" >> 36 " .section .fixup,#alloc,#execinstr\n" >> 37 " .align 4\n" >> 38 "3: sethi %%hi(2b), %[faulted]\n" >> 39 " jmpl %[faulted] + %%lo(2b), %%g0\n" >> 40 " mov 1, %[faulted]\n" >> 41 " .previous\n" >> 42 " .section __ex_table,\"a\"\n" >> 43 " .align 4\n" >> 44 " .word 1b, 3b\n" >> 45 " .previous\n" >> 46 : "=r" (replaced), [faulted] "=r" (faulted) >> 47 : [new] "" (new), [old] "r" (old), [ip] "r" (ip) >> 48 : "memory"); 156 49 157 static void wait_for_nmi(void) !! 50 if (replaced != old && replaced != new) 158 { !! 51 faulted = 2; 159 if (!atomic_read(&nmi_running)) << 160 return; << 161 52 162 do { !! 53 return faulted; 163 cpu_relax(); << 164 } while (atomic_read(&nmi_running)); << 165 } 54 } 166 55 167 static int !! 56 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) 168 do_ftrace_mod_code(unsigned long ip, void *new << 169 { 57 { 170 mod_code_ip = (void *)ip; !! 58 unsigned long ip = rec->ip; 171 mod_code_newcode = new_code; !! 59 u32 old, new; 172 << 173 /* The buffers need to be visible befo << 174 smp_mb(); << 175 << 176 wait_for_nmi_and_set_mod_flag(); << 177 << 178 /* Make sure all running NMIs have fin << 179 smp_mb(); << 180 << 181 ftrace_mod_code(); << 182 << 183 /* Make sure the write happens before << 184 smp_mb(); << 185 << 186 clear_mod_flag(); << 187 wait_for_nmi(); << 188 60 189 return mod_code_status; !! 61 old = ftrace_call_replace(ip, addr); >> 62 new = ftrace_nop; >> 63 return ftrace_modify_code(ip, old, new); 190 } 64 } 191 65 192 static int ftrace_modify_code(unsigned long ip !! 66 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 193 unsigned char *new_code << 194 { 67 { 195 unsigned char replaced[MCOUNT_INSN_SIZ !! 68 unsigned long ip = rec->ip; 196 !! 69 u32 old, new; 197 /* << 198 * Note: << 199 * We are paranoid about modifying tex << 200 * could cause us to read or write to << 201 * Carefully read and modify the code << 202 * sure what we read is what we expect << 203 */ << 204 << 205 /* read the text we want to modify */ << 206 if (copy_from_kernel_nofault(replaced, << 207 return -EFAULT; << 208 << 209 /* Make sure it is what we expect it t << 210 if (memcmp(replaced, old_code, MCOUNT_ << 211 return -EINVAL; << 212 << 213 /* replace the text with the new text << 214 if (do_ftrace_mod_code(ip, new_code)) << 215 return -EPERM; << 216 << 217 flush_icache_range(ip, ip + MCOUNT_INS << 218 70 219 return 0; !! 71 old = ftrace_nop; >> 72 new = ftrace_call_replace(ip, addr); >> 73 return ftrace_modify_code(ip, old, new); 220 } 74 } 221 75 222 int ftrace_update_ftrace_func(ftrace_func_t fu 76 int ftrace_update_ftrace_func(ftrace_func_t func) 223 { 77 { 224 unsigned long ip = (unsigned long)(&ft !! 78 unsigned long ip = (unsigned long)(&ftrace_call); 225 unsigned char old[MCOUNT_INSN_SIZE], * !! 79 u32 old, new; 226 80 227 memcpy(old, (unsigned char *)ip, MCOUN !! 81 old = *(u32 *) &ftrace_call; 228 new = ftrace_call_replace(ip, (unsigne 82 new = ftrace_call_replace(ip, (unsigned long)func); 229 << 230 return ftrace_modify_code(ip, old, new 83 return ftrace_modify_code(ip, old, new); 231 } 84 } 232 !! 85 #endif 233 int ftrace_make_nop(struct module *mod, << 234 struct dyn_ftrace *rec, un << 235 { << 236 unsigned char *new, *old; << 237 unsigned long ip = rec->ip; << 238 << 239 old = ftrace_call_replace(ip, addr); << 240 new = ftrace_nop_replace(ip); << 241 << 242 return ftrace_modify_code(rec->ip, old << 243 } << 244 << 245 int ftrace_make_call(struct dyn_ftrace *rec, u << 246 { << 247 unsigned char *new, *old; << 248 unsigned long ip = rec->ip; << 249 << 250 old = ftrace_nop_replace(ip); << 251 new = ftrace_call_replace(ip, addr); << 252 << 253 return ftrace_modify_code(rec->ip, old << 254 } << 255 #endif /* CONFIG_DYNAMIC_FTRACE */ << 256 86 257 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 87 #ifdef CONFIG_FUNCTION_GRAPH_TRACER >> 88 258 #ifdef CONFIG_DYNAMIC_FTRACE 89 #ifdef CONFIG_DYNAMIC_FTRACE 259 extern void ftrace_graph_call(void); 90 extern void ftrace_graph_call(void); 260 91 261 static int ftrace_mod(unsigned long ip, unsign << 262 unsigned long new_addr) << 263 { << 264 unsigned char code[MCOUNT_INSN_SIZE]; << 265 << 266 if (copy_from_kernel_nofault(code, (vo << 267 return -EFAULT; << 268 << 269 if (old_addr != __raw_readl((unsigned << 270 return -EINVAL; << 271 << 272 __raw_writel(new_addr, ip); << 273 return 0; << 274 } << 275 << 276 int ftrace_enable_ftrace_graph_caller(void) 92 int ftrace_enable_ftrace_graph_caller(void) 277 { 93 { 278 unsigned long ip, old_addr, new_addr; !! 94 unsigned long ip = (unsigned long)(&ftrace_graph_call); 279 !! 95 u32 old, new; 280 ip = (unsigned long)(&ftrace_graph_cal << 281 old_addr = (unsigned long)(&skip_trace << 282 new_addr = (unsigned long)(&ftrace_gra << 283 96 284 return ftrace_mod(ip, old_addr, new_ad !! 97 old = *(u32 *) &ftrace_graph_call; >> 98 new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller); >> 99 return ftrace_modify_code(ip, old, new); 285 } 100 } 286 101 287 int ftrace_disable_ftrace_graph_caller(void) 102 int ftrace_disable_ftrace_graph_caller(void) 288 { 103 { 289 unsigned long ip, old_addr, new_addr; !! 104 unsigned long ip = (unsigned long)(&ftrace_graph_call); >> 105 u32 old, new; 290 106 291 ip = (unsigned long)(&ftrace_graph_cal !! 107 old = *(u32 *) &ftrace_graph_call; 292 old_addr = (unsigned long)(&ftrace_gra !! 108 new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub); 293 new_addr = (unsigned long)(&skip_trace << 294 109 295 return ftrace_mod(ip, old_addr, new_ad !! 110 return ftrace_modify_code(ip, old, new); 296 } 111 } 297 #endif /* CONFIG_DYNAMIC_FTRACE */ !! 112 >> 113 #endif /* !CONFIG_DYNAMIC_FTRACE */ 298 114 299 /* 115 /* 300 * Hook the return address and push it in the 116 * Hook the return address and push it in the stack of return addrs 301 * in the current thread info. !! 117 * in current thread info. 302 * << 303 * This is the main routine for the function g << 304 * graph tracer essentially works like this: << 305 * << 306 * parent is the stack address containing self << 307 * We pull the real return address out of pare << 308 * current's ret_stack. Then, we replace the r << 309 * with the address of return_to_handler. self << 310 * called mcount. << 311 * << 312 * When self_addr returns, it will jump to ret << 313 * ftrace_return_to_handler. ftrace_return_to_ << 314 * return address off of current's ret_stack a << 315 */ 118 */ 316 void prepare_ftrace_return(unsigned long *pare !! 119 unsigned long prepare_ftrace_return(unsigned long parent, >> 120 unsigned long self_addr, >> 121 unsigned long frame_pointer) 317 { 122 { 318 unsigned long old; !! 123 unsigned long return_hooker = (unsigned long) &return_to_handler; 319 int faulted; << 320 unsigned long return_hooker = (unsigne << 321 << 322 if (unlikely(ftrace_graph_is_dead())) << 323 return; << 324 124 325 if (unlikely(atomic_read(¤t->tra 125 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 326 return; !! 126 return parent + 8UL; 327 127 328 /* !! 128 if (function_graph_enter(parent, self_addr, frame_pointer, NULL)) 329 * Protect against fault, even if it s !! 129 return parent + 8UL; 330 * happen. This tool is too much intru << 331 * ignore such a protection. << 332 */ << 333 __asm__ __volatile__( << 334 "1: << 335 "mov.l @%2, %0 << 336 "2: << 337 "mov.l %3, @%2 << 338 "mov #0, %1 << 339 "3: << 340 ".section .fixup, \"ax\" << 341 "4: << 342 "mov.l 5f, %0 << 343 "jmp @%0 << 344 " mov #1, %1 << 345 ".balign 4 << 346 "5: .long 3b << 347 ".previous << 348 ".section __ex_table,\"a\" << 349 ".long 1b, 4b << 350 ".long 2b, 4b << 351 ".previous << 352 : "=&r" (old), "=r" (faulted) << 353 : "r" (parent), "r" (return_ho << 354 ); << 355 << 356 if (unlikely(faulted)) { << 357 ftrace_graph_stop(); << 358 WARN_ON(1); << 359 return; << 360 } << 361 130 362 if (function_graph_enter(old, self_add !! 131 return return_hooker; 363 __raw_writel(old, parent); << 364 } 132 } 365 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 133 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 366 134
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.