1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * Code for tracing calls in Linux kernel. !! 3 * Code for replacing ftrace calls with jumps. 4 * Copyright (C) 2009-2016 Helge Deller <delle << 5 * 4 * 6 * based on code for x86 which is: << 7 * Copyright (C) 2007-2008 Steven Rostedt <sro 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> >> 6 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China >> 7 * Author: Wu Zhangjin <wuzhangjin@gmail.com> 8 * 8 * 9 * future possible enhancements: !! 9 * Thanks goes to Steven Rostedt for writing the original x86 version. 10 * - add CONFIG_STACK_TRACER << 11 */ 10 */ 12 11 >> 12 #include <linux/uaccess.h> 13 #include <linux/init.h> 13 #include <linux/init.h> 14 #include <linux/ftrace.h> 14 #include <linux/ftrace.h> 15 #include <linux/uaccess.h> !! 15 #include <linux/syscalls.h> 16 #include <linux/kprobes.h> << 17 #include <linux/ptrace.h> << 18 #include <linux/jump_label.h> << 19 << 20 #include <asm/assembly.h> << 21 #include <asm/sections.h> << 22 #include <asm/ftrace.h> << 23 #include <asm/patch.h> << 24 16 25 #define __hot __section(".text.hot") !! 17 #include <asm/asm.h> >> 18 #include <asm/asm-offsets.h> >> 19 #include <asm/cacheflush.h> >> 20 #include <asm/syscall.h> >> 21 #include <asm/uasm.h> >> 22 #include <asm/unistd.h> 26 23 27 #ifdef CONFIG_FUNCTION_GRAPH_TRACER !! 24 #include <asm-generic/sections.h> 28 static DEFINE_STATIC_KEY_FALSE(ftrace_graph_en << 29 25 30 /* !! 26 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) 31 * Hook the return address and push it in the !! 27 #define MCOUNT_OFFSET_INSNS 5 32 * in current thread info. !! 28 #else 33 */ !! 29 #define MCOUNT_OFFSET_INSNS 4 34 static void __hot prepare_ftrace_return(unsign !! 30 #endif 35 unsign !! 31 >> 32 #ifdef CONFIG_DYNAMIC_FTRACE >> 33 >> 34 /* Arch override because MIPS doesn't need to run this from stop_machine() */ >> 35 void arch_ftrace_update_code(int command) 36 { 36 { 37 unsigned long old; !! 37 ftrace_modify_all_code(command); 38 extern int parisc_return_to_handler; !! 38 } 39 39 40 if (unlikely(ftrace_graph_is_dead())) !! 40 #endif 41 return; << 42 41 43 if (unlikely(atomic_read(¤t->tra !! 42 #ifdef CONFIG_DYNAMIC_FTRACE 44 return; << 45 43 46 old = *parent; !! 44 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ >> 45 #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ >> 46 #define JUMP_RANGE_MASK ((1UL << 28) - 1) >> 47 >> 48 #define INSN_NOP 0x00000000 /* nop */ >> 49 #define INSN_JAL(addr) \ >> 50 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) >> 51 >> 52 static unsigned int insn_jal_ftrace_caller __read_mostly; >> 53 static unsigned int insn_la_mcount[2] __read_mostly; >> 54 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly; >> 55 >> 56 static inline void ftrace_dyn_arch_init_insns(void) >> 57 { >> 58 u32 *buf; >> 59 unsigned int v1; >> 60 >> 61 /* la v1, _mcount */ >> 62 v1 = 3; >> 63 buf = (u32 *)&insn_la_mcount[0]; >> 64 UASM_i_LA(&buf, v1, MCOUNT_ADDR); >> 65 >> 66 /* jal (ftrace_caller + 8), jump over the first two instruction */ >> 67 buf = (u32 *)&insn_jal_ftrace_caller; >> 68 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK); 47 69 48 if (!function_graph_enter(old, self_ad !! 70 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 49 /* activate parisc_return_to_h !! 71 /* j ftrace_graph_caller */ 50 *parent = (unsigned long) &par !! 72 buf = (u32 *)&insn_j_ftrace_graph_caller; >> 73 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK); >> 74 #endif 51 } 75 } 52 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ << 53 << 54 static ftrace_func_t ftrace_func; << 55 76 56 asmlinkage void notrace __hot ftrace_function_ !! 77 static int ftrace_modify_code(unsigned long ip, unsigned int new_code) 57 unsigned long << 58 unsigned long << 59 struct ftrace_ << 60 { 78 { 61 extern struct ftrace_ops *function_tra !! 79 int faulted; >> 80 mm_segment_t old_fs; 62 81 63 ftrace_func(self_addr, parent, functio !! 82 /* *(unsigned int *)ip = new_code; */ >> 83 safe_store_code(new_code, ip, faulted); 64 84 65 #ifdef CONFIG_FUNCTION_GRAPH_TRACER !! 85 if (unlikely(faulted)) 66 if (static_branch_unlikely(&ftrace_gra !! 86 return -EFAULT; 67 unsigned long *parent_rp; << 68 87 69 /* calculate pointer to %rp in !! 88 old_fs = get_fs(); 70 parent_rp = (unsigned long *) !! 89 set_fs(get_ds()); 71 /* sanity check: parent_rp sho !! 90 flush_icache_range(ip, ip + 8); 72 if (*parent_rp != parent) !! 91 set_fs(old_fs); 73 return; << 74 92 75 prepare_ftrace_return(parent_r !! 93 return 0; 76 return; << 77 } << 78 #endif << 79 } 94 } 80 95 81 #if defined(CONFIG_DYNAMIC_FTRACE) && defined( !! 96 #ifndef CONFIG_64BIT 82 int ftrace_enable_ftrace_graph_caller(void) !! 97 static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, 83 { !! 98 unsigned int new_code2) 84 static_key_enable(&ftrace_graph_enable !! 99 { >> 100 int faulted; >> 101 mm_segment_t old_fs; >> 102 >> 103 safe_store_code(new_code1, ip, faulted); >> 104 if (unlikely(faulted)) >> 105 return -EFAULT; >> 106 >> 107 ip += 4; >> 108 safe_store_code(new_code2, ip, faulted); >> 109 if (unlikely(faulted)) >> 110 return -EFAULT; >> 111 >> 112 ip -= 4; >> 113 old_fs = get_fs(); >> 114 set_fs(get_ds()); >> 115 flush_icache_range(ip, ip + 8); >> 116 set_fs(old_fs); >> 117 85 return 0; 118 return 0; 86 } 119 } 87 120 88 int ftrace_disable_ftrace_graph_caller(void) !! 121 static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1, >> 122 unsigned int new_code2) 89 { 123 { 90 static_key_enable(&ftrace_graph_enable !! 124 int faulted; >> 125 mm_segment_t old_fs; >> 126 >> 127 ip += 4; >> 128 safe_store_code(new_code2, ip, faulted); >> 129 if (unlikely(faulted)) >> 130 return -EFAULT; >> 131 >> 132 ip -= 4; >> 133 safe_store_code(new_code1, ip, faulted); >> 134 if (unlikely(faulted)) >> 135 return -EFAULT; >> 136 >> 137 old_fs = get_fs(); >> 138 set_fs(get_ds()); >> 139 flush_icache_range(ip, ip + 8); >> 140 set_fs(old_fs); >> 141 91 return 0; 142 return 0; 92 } 143 } 93 #endif 144 #endif 94 145 95 #ifdef CONFIG_DYNAMIC_FTRACE !! 146 /* 96 int ftrace_update_ftrace_func(ftrace_func_t fu !! 147 * The details about the calling site of mcount on MIPS 97 { !! 148 * 98 ftrace_func = func; !! 149 * 1. For kernel: 99 return 0; !! 150 * 100 } !! 151 * move at, ra >> 152 * jal _mcount --> nop >> 153 * sub sp, sp, 8 --> nop (CONFIG_32BIT) >> 154 * >> 155 * 2. For modules: >> 156 * >> 157 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT >> 158 * >> 159 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) >> 160 * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT) >> 161 * move at, ra >> 162 * move $12, ra_address >> 163 * jalr v1 >> 164 * sub sp, sp, 8 >> 165 * 1: offset = 5 instructions >> 166 * 2.2 For the Other situations >> 167 * >> 168 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) >> 169 * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT) >> 170 * move at, ra >> 171 * jalr v1 >> 172 * nop | move $12, ra_address | sub sp, sp, 8 >> 173 * 1: offset = 4 instructions >> 174 */ 101 175 102 int ftrace_modify_call(struct dyn_ftrace *rec, !! 176 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) 103 unsigned long addr) << 104 { << 105 return 0; << 106 } << 107 177 108 unsigned long ftrace_call_adjust(unsigned long !! 178 int ftrace_make_nop(struct module *mod, >> 179 struct dyn_ftrace *rec, unsigned long addr) 109 { 180 { 110 return addr+(FTRACE_PATCHABLE_FUNCTION !! 181 unsigned int new; >> 182 unsigned long ip = rec->ip; >> 183 >> 184 /* >> 185 * If ip is in kernel space, no long call, otherwise, long call is >> 186 * needed. >> 187 */ >> 188 new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F; >> 189 #ifdef CONFIG_64BIT >> 190 return ftrace_modify_code(ip, new); >> 191 #else >> 192 /* >> 193 * On 32 bit MIPS platforms, gcc adds a stack adjust >> 194 * instruction in the delay slot after the branch to >> 195 * mcount and expects mcount to restore the sp on return. >> 196 * This is based on a legacy API and does nothing but >> 197 * waste instructions so it's being removed at runtime. >> 198 */ >> 199 return ftrace_modify_code_2(ip, new, INSN_NOP); >> 200 #endif 111 } 201 } 112 202 113 int ftrace_make_call(struct dyn_ftrace *rec, u 203 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 114 { 204 { 115 u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZ !! 205 unsigned int new; 116 u32 *tramp; !! 206 unsigned long ip = rec->ip; 117 int size, ret, i; << 118 void *ip; << 119 << 120 #ifdef CONFIG_64BIT << 121 unsigned long addr2 = << 122 (unsigned long)dereference_fun << 123 207 124 u32 ftrace_trampoline[] = { !! 208 new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; 125 0x73c10208, /* std,ma r1,100(s << 126 0x0c2110c1, /* ldd -10(r1),r1 << 127 0xe820d002, /* bve,n (r1) */ << 128 addr2 >> 32, << 129 addr2 & 0xffffffff, << 130 0xe83f1fd7, /* b,l,n .-14,r1 * << 131 }; << 132 << 133 u32 ftrace_trampoline_unaligned[] = { << 134 addr2 >> 32, << 135 addr2 & 0xffffffff, << 136 0x37de0200, /* ldo 100(sp),sp << 137 0x73c13e01, /* std r1,-100(sp) << 138 0x34213ff9, /* ldo -4(r1),r1 * << 139 0x50213fc1, /* ldd -20(r1),r1 << 140 0xe820d002, /* bve,n (r1) */ << 141 0xe83f1fcf, /* b,l,n .-20,r1 * << 142 }; << 143 209 144 BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampol !! 210 #ifdef CONFIG_64BIT 145 FTRACE_PATCHAB !! 211 return ftrace_modify_code(ip, new); 146 #else 212 #else 147 u32 ftrace_trampoline[] = { !! 213 return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ? 148 (u32)addr, !! 214 INSN_NOP : insn_la_mcount[1]); 149 0x6fc10080, /* stw,ma r1,40(sp << 150 0x48213fd1, /* ldw -18(r1),r1 << 151 0xe820c002, /* bv,n r0(r1) */ << 152 0xe83f1fdf, /* b,l,n .-c,r1 */ << 153 }; << 154 #endif 215 #endif >> 216 } 155 217 156 BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampol !! 218 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call)) 157 FTRACE_PATCHAB << 158 219 159 size = sizeof(ftrace_trampoline); !! 220 int ftrace_update_ftrace_func(ftrace_func_t func) 160 tramp = ftrace_trampoline; !! 221 { >> 222 unsigned int new; 161 223 162 #ifdef CONFIG_64BIT !! 224 new = INSN_JAL((unsigned long)func); 163 if (rec->ip & 0x4) { << 164 size = sizeof(ftrace_trampolin << 165 tramp = ftrace_trampoline_unal << 166 } << 167 #endif << 168 225 169 ip = (void *)(rec->ip + 4 - size); !! 226 return ftrace_modify_code(FTRACE_CALL_IP, new); >> 227 } 170 228 171 ret = copy_from_kernel_nofault(insn, i !! 229 int __init ftrace_dyn_arch_init(void) 172 if (ret) !! 230 { 173 return ret; !! 231 /* Encode the instructions when booting */ >> 232 ftrace_dyn_arch_init_insns(); 174 233 175 for (i = 0; i < size / 4; i++) { !! 234 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */ 176 if (insn[i] != INSN_NOP) !! 235 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP); 177 return -EINVAL; << 178 } << 179 236 180 __patch_text_multiple(ip, tramp, size) << 181 return 0; 237 return 0; 182 } 238 } >> 239 #endif /* CONFIG_DYNAMIC_FTRACE */ 183 240 184 int ftrace_make_nop(struct module *mod, struct !! 241 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 185 unsigned long addr) !! 242 >> 243 #ifdef CONFIG_DYNAMIC_FTRACE >> 244 >> 245 extern void ftrace_graph_call(void); >> 246 #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call)) >> 247 >> 248 int ftrace_enable_ftrace_graph_caller(void) >> 249 { >> 250 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, >> 251 insn_j_ftrace_graph_caller); >> 252 } >> 253 >> 254 int ftrace_disable_ftrace_graph_caller(void) 186 { 255 { 187 u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZ !! 256 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP); 188 int i; !! 257 } >> 258 >> 259 #endif /* CONFIG_DYNAMIC_FTRACE */ >> 260 >> 261 #ifndef KBUILD_MCOUNT_RA_ADDRESS 189 262 190 for (i = 0; i < ARRAY_SIZE(insn); i++) !! 263 #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */ 191 insn[i] = INSN_NOP; !! 264 #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ >> 265 #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ >> 266 >> 267 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long >> 268 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) >> 269 { >> 270 unsigned long sp, ip, tmp; >> 271 unsigned int code; >> 272 int faulted; >> 273 >> 274 /* >> 275 * For module, move the ip from the return address after the >> 276 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for >> 277 * kernel, move after the instruction "move ra, at"(offset is 16) >> 278 */ >> 279 ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24); >> 280 >> 281 /* >> 282 * search the text until finding the non-store instruction or "s{d,w} >> 283 * ra, offset(sp)" instruction >> 284 */ >> 285 do { >> 286 /* get the code at "ip": code = *(unsigned int *)ip; */ >> 287 safe_load_code(code, ip, faulted); >> 288 >> 289 if (unlikely(faulted)) >> 290 return 0; >> 291 /* >> 292 * If we hit the non-store instruction before finding where the >> 293 * ra is stored, then this is a leaf function and it does not >> 294 * store the ra on the stack >> 295 */ >> 296 if ((code & S_R_SP) != S_R_SP) >> 297 return parent_ra_addr; >> 298 >> 299 /* Move to the next instruction */ >> 300 ip -= 4; >> 301 } while ((code & S_RA_SP) != S_RA_SP); >> 302 >> 303 sp = fp + (code & OFFSET_MASK); >> 304 >> 305 /* tmp = *(unsigned long *)sp; */ >> 306 safe_load_stack(tmp, sp, faulted); >> 307 if (unlikely(faulted)) >> 308 return 0; 192 309 193 __patch_text((void *)rec->ip, INSN_NOP !! 310 if (tmp == old_parent_ra) 194 __patch_text_multiple((void *)rec->ip !! 311 return sp; 195 insn, sizeof(ins << 196 return 0; 312 return 0; 197 } 313 } 198 #endif << 199 314 200 #ifdef CONFIG_KPROBES_ON_FTRACE !! 315 #endif /* !KBUILD_MCOUNT_RA_ADDRESS */ 201 void kprobe_ftrace_handler(unsigned long ip, u !! 316 202 struct ftrace_ops * !! 317 /* 203 { !! 318 * Hook the return address and push it in the stack of return addrs 204 struct kprobe_ctlblk *kcb; !! 319 * in current thread info. 205 struct pt_regs *regs; !! 320 */ 206 struct kprobe *p; !! 321 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, 207 int bit; !! 322 unsigned long fp) >> 323 { >> 324 unsigned long old_parent_ra; >> 325 unsigned long return_hooker = (unsigned long) >> 326 &return_to_handler; >> 327 int faulted, insns; 208 328 209 if (unlikely(kprobe_ftrace_disabled)) !! 329 if (unlikely(ftrace_graph_is_dead())) 210 return; 330 return; 211 331 212 bit = ftrace_test_recursion_trylock(ip !! 332 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 213 if (bit < 0) << 214 return; 333 return; 215 334 216 regs = ftrace_get_regs(fregs); !! 335 /* 217 p = get_kprobe((kprobe_opcode_t *)ip); !! 336 * "parent_ra_addr" is the stack address saved the return address of 218 if (unlikely(!p) || kprobe_disabled(p) !! 337 * the caller of _mcount. >> 338 * >> 339 * if the gcc < 4.5, a leaf function does not save the return address >> 340 * in the stack address, so, we "emulate" one in _mcount's stack space, >> 341 * and hijack it directly, but for a non-leaf function, it save the >> 342 * return address to the its own stack space, we can not hijack it >> 343 * directly, but need to find the real stack address, >> 344 * ftrace_get_parent_addr() does it! >> 345 * >> 346 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a >> 347 * non-leaf function, the location of the return address will be saved >> 348 * to $12 for us, and for a leaf function, only put a zero into $12. we >> 349 * do it in ftrace_graph_caller of mcount.S. >> 350 */ >> 351 >> 352 /* old_parent_ra = *parent_ra_addr; */ >> 353 safe_load_stack(old_parent_ra, parent_ra_addr, faulted); >> 354 if (unlikely(faulted)) 219 goto out; 355 goto out; 220 !! 356 #ifndef KBUILD_MCOUNT_RA_ADDRESS 221 if (kprobe_running()) { !! 357 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, 222 kprobes_inc_nmissed_count(p); !! 358 old_parent_ra, (unsigned long)parent_ra_addr, fp); >> 359 /* >> 360 * If fails when getting the stack address of the non-leaf function's >> 361 * ra, stop function graph tracer and return >> 362 */ >> 363 if (parent_ra_addr == NULL) >> 364 goto out; >> 365 #endif >> 366 /* *parent_ra_addr = return_hooker; */ >> 367 safe_store_stack(return_hooker, parent_ra_addr, faulted); >> 368 if (unlikely(faulted)) 223 goto out; 369 goto out; 224 } << 225 370 226 __this_cpu_write(current_kprobe, p); !! 371 /* >> 372 * Get the recorded ip of the current mcount calling site in the >> 373 * __mcount_loc section, which will be used to filter the function >> 374 * entries configured through the tracing/set_graph_function interface. >> 375 */ >> 376 >> 377 insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; >> 378 self_ra -= (MCOUNT_INSN_SIZE * insns); >> 379 >> 380 if (function_graph_enter(old_parent_ra, self_ra, fp, NULL)) >> 381 *parent_ra_addr = old_parent_ra; >> 382 return; >> 383 out: >> 384 ftrace_graph_stop(); >> 385 WARN_ON(1); >> 386 } >> 387 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 227 388 228 kcb = get_kprobe_ctlblk(); !! 389 #ifdef CONFIG_FTRACE_SYSCALLS 229 kcb->kprobe_status = KPROBE_HIT_ACTIVE << 230 390 231 regs->iaoq[0] = ip; !! 391 #ifdef CONFIG_32BIT 232 regs->iaoq[1] = ip + 4; !! 392 unsigned long __init arch_syscall_addr(int nr) 233 !! 393 { 234 if (!p->pre_handler || !p->pre_handler !! 394 return (unsigned long)sys_call_table[nr - __NR_O32_Linux]; 235 regs->iaoq[0] = ip + 4; << 236 regs->iaoq[1] = ip + 8; << 237 << 238 if (unlikely(p->post_handler)) << 239 kcb->kprobe_status = K << 240 p->post_handler(p, reg << 241 } << 242 } << 243 __this_cpu_write(current_kprobe, NULL) << 244 out: << 245 ftrace_test_recursion_unlock(bit); << 246 } 395 } 247 NOKPROBE_SYMBOL(kprobe_ftrace_handler); !! 396 #endif >> 397 >> 398 #ifdef CONFIG_64BIT 248 399 249 int arch_prepare_kprobe_ftrace(struct kprobe * !! 400 unsigned long __init arch_syscall_addr(int nr) 250 { 401 { 251 p->ainsn.insn = NULL; !! 402 #ifdef CONFIG_MIPS32_N32 252 return 0; !! 403 if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls) >> 404 return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux]; >> 405 #endif >> 406 if (nr >= __NR_64_Linux && nr <= __NR_64_Linux + __NR_64_Linux_syscalls) >> 407 return (unsigned long)sys_call_table[nr - __NR_64_Linux]; >> 408 #ifdef CONFIG_MIPS32_O32 >> 409 if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls) >> 410 return (unsigned long)sys32_call_table[nr - __NR_O32_Linux]; >> 411 #endif >> 412 >> 413 return (unsigned long) &sys_ni_syscall; 253 } 414 } 254 #endif 415 #endif >> 416 >> 417 #endif /* CONFIG_FTRACE_SYSCALLS */ 255 418
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.