1 // SPDX-License-Identifier: GPL-2.0 1 2 /* 3 * Copyright (C) 2008 Matt Fleming <matt@conso 4 * Copyright (C) 2008 Paul Mundt <lethal@linux 5 * 6 * Code for replacing ftrace calls with jumps. 7 * 8 * Copyright (C) 2007-2008 Steven Rostedt <sro 9 * 10 * Thanks goes to Ingo Molnar, for suggesting 11 * Mathieu Desnoyers, for suggesting postponin 12 * Arjan van de Ven, for keeping me straight, 13 * the dangers of modifying code on the run. 14 */ 15 #include <linux/uaccess.h> 16 #include <linux/ftrace.h> 17 #include <linux/string.h> 18 #include <linux/init.h> 19 #include <linux/io.h> 20 #include <linux/kernel.h> 21 #include <asm/ftrace.h> 22 #include <asm/cacheflush.h> 23 #include <asm/unistd.h> 24 #include <trace/syscall.h> 25 26 #ifdef CONFIG_DYNAMIC_FTRACE 27 static unsigned char ftrace_replaced_code[MCOU 28 29 static unsigned char ftrace_nop[4]; 30 /* 31 * If we're trying to nop out a call to a func 32 * place a call to the address after the memor 33 * 34 * 8c011060 <a>: 35 * 8c011060: 02 d1 mov.l 8c0 36 * 8c011062: 22 4f sts.l pr, 37 * 8c011064: 02 c7 mova 8c0 38 * 8c011066: 2b 41 jmp @r1 39 * 8c011068: 2a 40 lds r0, 40 * 8c01106a: 09 00 nop 41 * 8c01106c: 68 24 .word 0x246 42 * 8c01106e: 1d 8c .word 0x8c1 43 * 8c011070: 26 4f lds.l @r1 44 * 45 * We write 0x8c011070 to 0x8c01106c so that o 46 * past the _mcount call and continue executin 47 */ 48 static unsigned char *ftrace_nop_replace(unsig 49 { 50 __raw_writel(ip + MCOUNT_INSN_SIZE, ft 51 return ftrace_nop; 52 } 53 54 static unsigned char *ftrace_call_replace(unsi 55 { 56 /* Place the address in the memory tab 57 __raw_writel(addr, ftrace_replaced_cod 58 59 /* 60 * No locking needed, this must be cal 61 * which in essence is like running on 62 */ 63 return ftrace_replaced_code; 64 } 65 66 /* 67 * Modifying code must take extra care. On an 68 * the code being modified is also being execu 69 * that CPU will have undefined results and po 70 * We use kstop_machine to stop other CPUS fro 71 * But this does not stop NMIs from happening. 72 * to protect against that. We separate out th 73 * the code to take care of this. 74 * 75 * Two buffers are added: An IP buffer and a " 76 * 77 * 1) Put the instruction pointer into the IP 78 * and the new code into the "code" buffer. 79 * 2) Wait for any running NMIs to finish and 80 * we are modifying code, it is done in an 81 * 3) Write the code 82 * 4) clear the flag. 83 * 5) Wait for any running NMIs to finish. 84 * 85 * If an NMI is executed, the first thing it d 86 * "ftrace_nmi_enter". This will check if the 87 * and if it is, it will write what is in the 88 * 89 * The trick is, it does not matter if everyon 90 * content to the code location. Also, if a CP 91 * it is OK to write to that code location if 92 * are the same as what exists. 93 */ 94 #define MOD_CODE_WRITE_FLAG (1 << 31) /* set 95 static atomic_t nmi_running = ATOMIC_INIT(0); 96 static int mod_code_status; /* hol 97 static void *mod_code_ip; /* hol 98 static void *mod_code_newcode; /* hol 99 100 static void clear_mod_flag(void) 101 { 102 int old = atomic_read(&nmi_running); 103 104 for (;;) { 105 int new = old & ~MOD_CODE_WRIT 106 107 if (old == new) 108 break; 109 110 old = atomic_cmpxchg(&nmi_runn 111 } 112 } 113 114 static void ftrace_mod_code(void) 115 { 116 /* 117 * Yes, more than one CPU process can 118 * (and the code itself) 119 * But if one were to fail, then they 120 * to succeed, then they all should. 121 */ 122 mod_code_status = copy_to_kernel_nofau 123 M 124 125 /* if we fail, then kill any new write 126 if (mod_code_status) 127 clear_mod_flag(); 128 } 129 130 void arch_ftrace_nmi_enter(void) 131 { 132 if (atomic_inc_return(&nmi_running) & 133 smp_rmb(); 134 ftrace_mod_code(); 135 } 136 /* Must have previous changes seen bef 137 smp_mb(); 138 } 139 140 void arch_ftrace_nmi_exit(void) 141 { 142 /* Finish all executions before cleari 143 smp_mb(); 144 atomic_dec(&nmi_running); 145 } 146 147 static void wait_for_nmi_and_set_mod_flag(void 148 { 149 if (!atomic_cmpxchg(&nmi_running, 0, M 150 return; 151 152 do { 153 cpu_relax(); 154 } while (atomic_cmpxchg(&nmi_running, 155 } 156 157 static void wait_for_nmi(void) 158 { 159 if (!atomic_read(&nmi_running)) 160 return; 161 162 do { 163 cpu_relax(); 164 } while (atomic_read(&nmi_running)); 165 } 166 167 static int 168 do_ftrace_mod_code(unsigned long ip, void *new 169 { 170 mod_code_ip = (void *)ip; 171 mod_code_newcode = new_code; 172 173 /* The buffers need to be visible befo 174 smp_mb(); 175 176 wait_for_nmi_and_set_mod_flag(); 177 178 /* Make sure all running NMIs have fin 179 smp_mb(); 180 181 ftrace_mod_code(); 182 183 /* Make sure the write happens before 184 smp_mb(); 185 186 clear_mod_flag(); 187 wait_for_nmi(); 188 189 return mod_code_status; 190 } 191 192 static int ftrace_modify_code(unsigned long ip 193 unsigned char *new_code 194 { 195 unsigned char replaced[MCOUNT_INSN_SIZ 196 197 /* 198 * Note: 199 * We are paranoid about modifying tex 200 * could cause us to read or write to 201 * Carefully read and modify the code 202 * sure what we read is what we expect 203 */ 204 205 /* read the text we want to modify */ 206 if (copy_from_kernel_nofault(replaced, 207 return -EFAULT; 208 209 /* Make sure it is what we expect it t 210 if (memcmp(replaced, old_code, MCOUNT_ 211 return -EINVAL; 212 213 /* replace the text with the new text 214 if (do_ftrace_mod_code(ip, new_code)) 215 return -EPERM; 216 217 flush_icache_range(ip, ip + MCOUNT_INS 218 219 return 0; 220 } 221 222 int ftrace_update_ftrace_func(ftrace_func_t fu 223 { 224 unsigned long ip = (unsigned long)(&ft 225 unsigned char old[MCOUNT_INSN_SIZE], * 226 227 memcpy(old, (unsigned char *)ip, MCOUN 228 new = ftrace_call_replace(ip, (unsigne 229 230 return ftrace_modify_code(ip, old, new 231 } 232 233 int ftrace_make_nop(struct module *mod, 234 struct dyn_ftrace *rec, un 235 { 236 unsigned char *new, *old; 237 unsigned long ip = rec->ip; 238 239 old = ftrace_call_replace(ip, addr); 240 new = ftrace_nop_replace(ip); 241 242 return ftrace_modify_code(rec->ip, old 243 } 244 245 int ftrace_make_call(struct dyn_ftrace *rec, u 246 { 247 unsigned char *new, *old; 248 unsigned long ip = rec->ip; 249 250 old = ftrace_nop_replace(ip); 251 new = ftrace_call_replace(ip, addr); 252 253 return ftrace_modify_code(rec->ip, old 254 } 255 #endif /* CONFIG_DYNAMIC_FTRACE */ 256 257 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 258 #ifdef CONFIG_DYNAMIC_FTRACE 259 extern void ftrace_graph_call(void); 260 261 static int ftrace_mod(unsigned long ip, unsign 262 unsigned long new_addr) 263 { 264 unsigned char code[MCOUNT_INSN_SIZE]; 265 266 if (copy_from_kernel_nofault(code, (vo 267 return -EFAULT; 268 269 if (old_addr != __raw_readl((unsigned 270 return -EINVAL; 271 272 __raw_writel(new_addr, ip); 273 return 0; 274 } 275 276 int ftrace_enable_ftrace_graph_caller(void) 277 { 278 unsigned long ip, old_addr, new_addr; 279 280 ip = (unsigned long)(&ftrace_graph_cal 281 old_addr = (unsigned long)(&skip_trace 282 new_addr = (unsigned long)(&ftrace_gra 283 284 return ftrace_mod(ip, old_addr, new_ad 285 } 286 287 int ftrace_disable_ftrace_graph_caller(void) 288 { 289 unsigned long ip, old_addr, new_addr; 290 291 ip = (unsigned long)(&ftrace_graph_cal 292 old_addr = (unsigned long)(&ftrace_gra 293 new_addr = (unsigned long)(&skip_trace 294 295 return ftrace_mod(ip, old_addr, new_ad 296 } 297 #endif /* CONFIG_DYNAMIC_FTRACE */ 298 299 /* 300 * Hook the return address and push it in the 301 * in the current thread info. 302 * 303 * This is the main routine for the function g 304 * graph tracer essentially works like this: 305 * 306 * parent is the stack address containing self 307 * We pull the real return address out of pare 308 * current's ret_stack. Then, we replace the r 309 * with the address of return_to_handler. self 310 * called mcount. 311 * 312 * When self_addr returns, it will jump to ret 313 * ftrace_return_to_handler. ftrace_return_to_ 314 * return address off of current's ret_stack a 315 */ 316 void prepare_ftrace_return(unsigned long *pare 317 { 318 unsigned long old; 319 int faulted; 320 unsigned long return_hooker = (unsigne 321 322 if (unlikely(ftrace_graph_is_dead())) 323 return; 324 325 if (unlikely(atomic_read(¤t->tra 326 return; 327 328 /* 329 * Protect against fault, even if it s 330 * happen. This tool is too much intru 331 * ignore such a protection. 332 */ 333 __asm__ __volatile__( 334 "1: 335 "mov.l @%2, %0 336 "2: 337 "mov.l %3, @%2 338 "mov #0, %1 339 "3: 340 ".section .fixup, \"ax\" 341 "4: 342 "mov.l 5f, %0 343 "jmp @%0 344 " mov #1, %1 345 ".balign 4 346 "5: .long 3b 347 ".previous 348 ".section __ex_table,\"a\" 349 ".long 1b, 4b 350 ".long 2b, 4b 351 ".previous 352 : "=&r" (old), "=r" (faulted) 353 : "r" (parent), "r" (return_ho 354 ); 355 356 if (unlikely(faulted)) { 357 ftrace_graph_stop(); 358 WARN_ON(1); 359 return; 360 } 361 362 if (function_graph_enter(old, self_add 363 __raw_writel(old, parent); 364 } 365 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 366
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.