1 // SPDX-License-Identifier: GPL-2.0 << 2 /* 1 /* 3 * Code for tracing calls in Linux kernel. !! 2 * Code for replacing ftrace calls with jumps. 4 * Copyright (C) 2009-2016 Helge Deller <delle << 5 * 3 * 6 * based on code for x86 which is: << 7 * Copyright (C) 2007-2008 Steven Rostedt <sro 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> >> 5 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China >> 6 * Author: Wu Zhangjin <wuzhangjin@gmail.com> 8 * 7 * 9 * future possible enhancements: !! 8 * Thanks goes to Steven Rostedt for writing the original x86 version. 10 * - add CONFIG_STACK_TRACER << 11 */ 9 */ 12 10 >> 11 #include <linux/uaccess.h> 13 #include <linux/init.h> 12 #include <linux/init.h> 14 #include <linux/ftrace.h> 13 #include <linux/ftrace.h> 15 #include <linux/uaccess.h> !! 14 #include <linux/syscalls.h> 16 #include <linux/kprobes.h> << 17 #include <linux/ptrace.h> << 18 #include <linux/jump_label.h> << 19 << 20 #include <asm/assembly.h> << 21 #include <asm/sections.h> << 22 #include <asm/ftrace.h> << 23 #include <asm/patch.h> << 24 15 25 #define __hot __section(".text.hot") !! 16 #include <asm/asm.h> >> 17 #include <asm/asm-offsets.h> >> 18 #include <asm/cacheflush.h> >> 19 #include <asm/syscall.h> >> 20 #include <asm/uasm.h> >> 21 #include <asm/unistd.h> 26 22 27 #ifdef CONFIG_FUNCTION_GRAPH_TRACER !! 23 #include <asm-generic/sections.h> 28 static DEFINE_STATIC_KEY_FALSE(ftrace_graph_en << 29 24 30 /* !! 25 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) 31 * Hook the return address and push it in the !! 26 #define MCOUNT_OFFSET_INSNS 5 32 * in current thread info. !! 27 #else 33 */ !! 28 #define MCOUNT_OFFSET_INSNS 4 34 static void __hot prepare_ftrace_return(unsign !! 29 #endif 35 unsign !! 30 >> 31 #ifdef CONFIG_DYNAMIC_FTRACE >> 32 >> 33 /* Arch override because MIPS doesn't need to run this from stop_machine() */ >> 34 void arch_ftrace_update_code(int command) 36 { 35 { 37 unsigned long old; !! 36 ftrace_modify_all_code(command); 38 extern int parisc_return_to_handler; !! 37 } 39 38 40 if (unlikely(ftrace_graph_is_dead())) !! 39 #endif 41 return; << 42 40 43 if (unlikely(atomic_read(¤t->tra !! 41 #ifdef CONFIG_DYNAMIC_FTRACE 44 return; << 45 42 46 old = *parent; !! 43 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ >> 44 #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ >> 45 #define JUMP_RANGE_MASK ((1UL << 28) - 1) >> 46 >> 47 #define INSN_NOP 0x00000000 /* nop */ >> 48 #define INSN_JAL(addr) \ >> 49 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) >> 50 >> 51 static unsigned int insn_jal_ftrace_caller __read_mostly; >> 52 static unsigned int insn_la_mcount[2] __read_mostly; >> 53 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly; >> 54 >> 55 static inline void ftrace_dyn_arch_init_insns(void) >> 56 { >> 57 u32 *buf; >> 58 unsigned int v1; >> 59 >> 60 /* la v1, _mcount */ >> 61 v1 = 3; >> 62 buf = (u32 *)&insn_la_mcount[0]; >> 63 UASM_i_LA(&buf, v1, MCOUNT_ADDR); >> 64 >> 65 /* jal (ftrace_caller + 8), jump over the first two instruction */ >> 66 buf = (u32 *)&insn_jal_ftrace_caller; >> 67 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK); 47 68 48 if (!function_graph_enter(old, self_ad !! 69 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 49 /* activate parisc_return_to_h !! 70 /* j ftrace_graph_caller */ 50 *parent = (unsigned long) &par !! 71 buf = (u32 *)&insn_j_ftrace_graph_caller; >> 72 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK); >> 73 #endif 51 } 74 } 52 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ << 53 75 54 static ftrace_func_t ftrace_func; !! 76 static int ftrace_modify_code(unsigned long ip, unsigned int new_code) 55 << 56 asmlinkage void notrace __hot ftrace_function_ << 57 unsigned long << 58 unsigned long << 59 struct ftrace_ << 60 { 77 { 61 extern struct ftrace_ops *function_tra !! 78 int faulted; >> 79 mm_segment_t old_fs; 62 80 63 ftrace_func(self_addr, parent, functio !! 81 /* *(unsigned int *)ip = new_code; */ >> 82 safe_store_code(new_code, ip, faulted); 64 83 65 #ifdef CONFIG_FUNCTION_GRAPH_TRACER !! 84 if (unlikely(faulted)) 66 if (static_branch_unlikely(&ftrace_gra !! 85 return -EFAULT; 67 unsigned long *parent_rp; << 68 86 69 /* calculate pointer to %rp in !! 87 old_fs = get_fs(); 70 parent_rp = (unsigned long *) !! 88 set_fs(get_ds()); 71 /* sanity check: parent_rp sho !! 89 flush_icache_range(ip, ip + 8); 72 if (*parent_rp != parent) !! 90 set_fs(old_fs); 73 return; << 74 91 75 prepare_ftrace_return(parent_r !! 92 return 0; 76 return; << 77 } << 78 #endif << 79 } 93 } 80 94 81 #if defined(CONFIG_DYNAMIC_FTRACE) && defined( !! 95 #ifndef CONFIG_64BIT 82 int ftrace_enable_ftrace_graph_caller(void) !! 96 static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, 83 { !! 97 unsigned int new_code2) 84 static_key_enable(&ftrace_graph_enable !! 98 { >> 99 int faulted; >> 100 mm_segment_t old_fs; >> 101 >> 102 safe_store_code(new_code1, ip, faulted); >> 103 if (unlikely(faulted)) >> 104 return -EFAULT; >> 105 >> 106 ip += 4; >> 107 safe_store_code(new_code2, ip, faulted); >> 108 if (unlikely(faulted)) >> 109 return -EFAULT; >> 110 >> 111 ip -= 4; >> 112 old_fs = get_fs(); >> 113 set_fs(get_ds()); >> 114 flush_icache_range(ip, ip + 8); >> 115 set_fs(old_fs); >> 116 85 return 0; 117 return 0; 86 } 118 } 87 119 88 int ftrace_disable_ftrace_graph_caller(void) !! 120 static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1, >> 121 unsigned int new_code2) 89 { 122 { 90 static_key_enable(&ftrace_graph_enable !! 123 int faulted; >> 124 mm_segment_t old_fs; >> 125 >> 126 ip += 4; >> 127 safe_store_code(new_code2, ip, faulted); >> 128 if (unlikely(faulted)) >> 129 return -EFAULT; >> 130 >> 131 ip -= 4; >> 132 safe_store_code(new_code1, ip, faulted); >> 133 if (unlikely(faulted)) >> 134 return -EFAULT; >> 135 >> 136 old_fs = get_fs(); >> 137 set_fs(get_ds()); >> 138 flush_icache_range(ip, ip + 8); >> 139 set_fs(old_fs); >> 140 91 return 0; 141 return 0; 92 } 142 } 93 #endif 143 #endif 94 144 95 #ifdef CONFIG_DYNAMIC_FTRACE !! 145 /* 96 int ftrace_update_ftrace_func(ftrace_func_t fu !! 146 * The details about the calling site of mcount on MIPS 97 { !! 147 * 98 ftrace_func = func; !! 148 * 1. For kernel: 99 return 0; !! 149 * 100 } !! 150 * move at, ra >> 151 * jal _mcount --> nop >> 152 * sub sp, sp, 8 --> nop (CONFIG_32BIT) >> 153 * >> 154 * 2. For modules: >> 155 * >> 156 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT >> 157 * >> 158 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) >> 159 * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT) >> 160 * move at, ra >> 161 * move $12, ra_address >> 162 * jalr v1 >> 163 * sub sp, sp, 8 >> 164 * 1: offset = 5 instructions >> 165 * 2.2 For the Other situations >> 166 * >> 167 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) >> 168 * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT) >> 169 * move at, ra >> 170 * jalr v1 >> 171 * nop | move $12, ra_address | sub sp, sp, 8 >> 172 * 1: offset = 4 instructions >> 173 */ 101 174 102 int ftrace_modify_call(struct dyn_ftrace *rec, !! 175 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) 103 unsigned long addr) << 104 { << 105 return 0; << 106 } << 107 176 108 unsigned long ftrace_call_adjust(unsigned long !! 177 int ftrace_make_nop(struct module *mod, >> 178 struct dyn_ftrace *rec, unsigned long addr) 109 { 179 { 110 return addr+(FTRACE_PATCHABLE_FUNCTION !! 180 unsigned int new; >> 181 unsigned long ip = rec->ip; >> 182 >> 183 /* >> 184 * If ip is in kernel space, no long call, otherwise, long call is >> 185 * needed. >> 186 */ >> 187 new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F; >> 188 #ifdef CONFIG_64BIT >> 189 return ftrace_modify_code(ip, new); >> 190 #else >> 191 /* >> 192 * On 32 bit MIPS platforms, gcc adds a stack adjust >> 193 * instruction in the delay slot after the branch to >> 194 * mcount and expects mcount to restore the sp on return. >> 195 * This is based on a legacy API and does nothing but >> 196 * waste instructions so it's being removed at runtime. >> 197 */ >> 198 return ftrace_modify_code_2(ip, new, INSN_NOP); >> 199 #endif 111 } 200 } 112 201 113 int ftrace_make_call(struct dyn_ftrace *rec, u 202 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 114 { 203 { 115 u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZ !! 204 unsigned int new; 116 u32 *tramp; !! 205 unsigned long ip = rec->ip; 117 int size, ret, i; << 118 void *ip; << 119 << 120 #ifdef CONFIG_64BIT << 121 unsigned long addr2 = << 122 (unsigned long)dereference_fun << 123 206 124 u32 ftrace_trampoline[] = { !! 207 new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; 125 0x73c10208, /* std,ma r1,100(s << 126 0x0c2110c1, /* ldd -10(r1),r1 << 127 0xe820d002, /* bve,n (r1) */ << 128 addr2 >> 32, << 129 addr2 & 0xffffffff, << 130 0xe83f1fd7, /* b,l,n .-14,r1 * << 131 }; << 132 << 133 u32 ftrace_trampoline_unaligned[] = { << 134 addr2 >> 32, << 135 addr2 & 0xffffffff, << 136 0x37de0200, /* ldo 100(sp),sp << 137 0x73c13e01, /* std r1,-100(sp) << 138 0x34213ff9, /* ldo -4(r1),r1 * << 139 0x50213fc1, /* ldd -20(r1),r1 << 140 0xe820d002, /* bve,n (r1) */ << 141 0xe83f1fcf, /* b,l,n .-20,r1 * << 142 }; << 143 208 144 BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampol !! 209 #ifdef CONFIG_64BIT 145 FTRACE_PATCHAB !! 210 return ftrace_modify_code(ip, new); 146 #else 211 #else 147 u32 ftrace_trampoline[] = { !! 212 return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ? 148 (u32)addr, !! 213 INSN_NOP : insn_la_mcount[1]); 149 0x6fc10080, /* stw,ma r1,40(sp << 150 0x48213fd1, /* ldw -18(r1),r1 << 151 0xe820c002, /* bv,n r0(r1) */ << 152 0xe83f1fdf, /* b,l,n .-c,r1 */ << 153 }; << 154 #endif 214 #endif >> 215 } 155 216 156 BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampol !! 217 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call)) 157 FTRACE_PATCHAB << 158 218 159 size = sizeof(ftrace_trampoline); !! 219 int ftrace_update_ftrace_func(ftrace_func_t func) 160 tramp = ftrace_trampoline; !! 220 { >> 221 unsigned int new; 161 222 162 #ifdef CONFIG_64BIT !! 223 new = INSN_JAL((unsigned long)func); 163 if (rec->ip & 0x4) { << 164 size = sizeof(ftrace_trampolin << 165 tramp = ftrace_trampoline_unal << 166 } << 167 #endif << 168 224 169 ip = (void *)(rec->ip + 4 - size); !! 225 return ftrace_modify_code(FTRACE_CALL_IP, new); >> 226 } 170 227 171 ret = copy_from_kernel_nofault(insn, i !! 228 int __init ftrace_dyn_arch_init(void) 172 if (ret) !! 229 { 173 return ret; !! 230 /* Encode the instructions when booting */ 174 !! 231 ftrace_dyn_arch_init_insns(); 175 for (i = 0; i < size / 4; i++) { !! 232 176 if (insn[i] != INSN_NOP) !! 233 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */ 177 return -EINVAL; !! 234 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP); 178 } << 179 235 180 __patch_text_multiple(ip, tramp, size) << 181 return 0; 236 return 0; 182 } 237 } >> 238 #endif /* CONFIG_DYNAMIC_FTRACE */ >> 239 >> 240 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 183 241 184 int ftrace_make_nop(struct module *mod, struct !! 242 #ifdef CONFIG_DYNAMIC_FTRACE 185 unsigned long addr) !! 243 >> 244 extern void ftrace_graph_call(void); >> 245 #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call)) >> 246 >> 247 int ftrace_enable_ftrace_graph_caller(void) 186 { 248 { 187 u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZ !! 249 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, 188 int i; !! 250 insn_j_ftrace_graph_caller); >> 251 } 189 252 190 for (i = 0; i < ARRAY_SIZE(insn); i++) !! 253 int ftrace_disable_ftrace_graph_caller(void) 191 insn[i] = INSN_NOP; !! 254 { >> 255 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP); >> 256 } >> 257 >> 258 #endif /* CONFIG_DYNAMIC_FTRACE */ >> 259 >> 260 #ifndef KBUILD_MCOUNT_RA_ADDRESS 192 261 193 __patch_text((void *)rec->ip, INSN_NOP !! 262 #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */ 194 __patch_text_multiple((void *)rec->ip !! 263 #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ 195 insn, sizeof(ins !! 264 #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ >> 265 >> 266 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long >> 267 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) >> 268 { >> 269 unsigned long sp, ip, tmp; >> 270 unsigned int code; >> 271 int faulted; >> 272 >> 273 /* >> 274 * For module, move the ip from the return address after the >> 275 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for >> 276 * kernel, move after the instruction "move ra, at"(offset is 16) >> 277 */ >> 278 ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24); >> 279 >> 280 /* >> 281 * search the text until finding the non-store instruction or "s{d,w} >> 282 * ra, offset(sp)" instruction >> 283 */ >> 284 do { >> 285 /* get the code at "ip": code = *(unsigned int *)ip; */ >> 286 safe_load_code(code, ip, faulted); >> 287 >> 288 if (unlikely(faulted)) >> 289 return 0; >> 290 /* >> 291 * If we hit the non-store instruction before finding where the >> 292 * ra is stored, then this is a leaf function and it does not >> 293 * store the ra on the stack >> 294 */ >> 295 if ((code & S_R_SP) != S_R_SP) >> 296 return parent_ra_addr; >> 297 >> 298 /* Move to the next instruction */ >> 299 ip -= 4; >> 300 } while ((code & S_RA_SP) != S_RA_SP); >> 301 >> 302 sp = fp + (code & OFFSET_MASK); >> 303 >> 304 /* tmp = *(unsigned long *)sp; */ >> 305 safe_load_stack(tmp, sp, faulted); >> 306 if (unlikely(faulted)) >> 307 return 0; >> 308 >> 309 if (tmp == old_parent_ra) >> 310 return sp; 196 return 0; 311 return 0; 197 } 312 } 198 #endif << 199 313 200 #ifdef CONFIG_KPROBES_ON_FTRACE !! 314 #endif /* !KBUILD_MCOUNT_RA_ADDRESS */ 201 void kprobe_ftrace_handler(unsigned long ip, u !! 315 202 struct ftrace_ops * !! 316 /* 203 { !! 317 * Hook the return address and push it in the stack of return addrs 204 struct kprobe_ctlblk *kcb; !! 318 * in current thread info. 205 struct pt_regs *regs; !! 319 */ 206 struct kprobe *p; !! 320 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, 207 int bit; !! 321 unsigned long fp) >> 322 { >> 323 unsigned long old_parent_ra; >> 324 struct ftrace_graph_ent trace; >> 325 unsigned long return_hooker = (unsigned long) >> 326 &return_to_handler; >> 327 int faulted, insns; 208 328 209 if (unlikely(kprobe_ftrace_disabled)) !! 329 if (unlikely(ftrace_graph_is_dead())) 210 return; 330 return; 211 331 212 bit = ftrace_test_recursion_trylock(ip !! 332 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 213 if (bit < 0) << 214 return; 333 return; 215 334 216 regs = ftrace_get_regs(fregs); !! 335 /* 217 p = get_kprobe((kprobe_opcode_t *)ip); !! 336 * "parent_ra_addr" is the stack address saved the return address of 218 if (unlikely(!p) || kprobe_disabled(p) !! 337 * the caller of _mcount. >> 338 * >> 339 * if the gcc < 4.5, a leaf function does not save the return address >> 340 * in the stack address, so, we "emulate" one in _mcount's stack space, >> 341 * and hijack it directly, but for a non-leaf function, it save the >> 342 * return address to the its own stack space, we can not hijack it >> 343 * directly, but need to find the real stack address, >> 344 * ftrace_get_parent_addr() does it! >> 345 * >> 346 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a >> 347 * non-leaf function, the location of the return address will be saved >> 348 * to $12 for us, and for a leaf function, only put a zero into $12. we >> 349 * do it in ftrace_graph_caller of mcount.S. >> 350 */ >> 351 >> 352 /* old_parent_ra = *parent_ra_addr; */ >> 353 safe_load_stack(old_parent_ra, parent_ra_addr, faulted); >> 354 if (unlikely(faulted)) 219 goto out; 355 goto out; 220 !! 356 #ifndef KBUILD_MCOUNT_RA_ADDRESS 221 if (kprobe_running()) { !! 357 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, 222 kprobes_inc_nmissed_count(p); !! 358 old_parent_ra, (unsigned long)parent_ra_addr, fp); >> 359 /* >> 360 * If fails when getting the stack address of the non-leaf function's >> 361 * ra, stop function graph tracer and return >> 362 */ >> 363 if (parent_ra_addr == 0) >> 364 goto out; >> 365 #endif >> 366 /* *parent_ra_addr = return_hooker; */ >> 367 safe_store_stack(return_hooker, parent_ra_addr, faulted); >> 368 if (unlikely(faulted)) 223 goto out; 369 goto out; 224 } << 225 << 226 __this_cpu_write(current_kprobe, p); << 227 << 228 kcb = get_kprobe_ctlblk(); << 229 kcb->kprobe_status = KPROBE_HIT_ACTIVE << 230 370 231 regs->iaoq[0] = ip; !! 371 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp, 232 regs->iaoq[1] = ip + 4; !! 372 NULL) == -EBUSY) { >> 373 *parent_ra_addr = old_parent_ra; >> 374 return; >> 375 } 233 376 234 if (!p->pre_handler || !p->pre_handler !! 377 /* 235 regs->iaoq[0] = ip + 4; !! 378 * Get the recorded ip of the current mcount calling site in the 236 regs->iaoq[1] = ip + 8; !! 379 * __mcount_loc section, which will be used to filter the function 237 !! 380 * entries configured through the tracing/set_graph_function interface. 238 if (unlikely(p->post_handler)) !! 381 */ 239 kcb->kprobe_status = K !! 382 240 p->post_handler(p, reg !! 383 insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; 241 } !! 384 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); >> 385 >> 386 /* Only trace if the calling function expects to */ >> 387 if (!ftrace_graph_entry(&trace)) { >> 388 current->curr_ret_stack--; >> 389 *parent_ra_addr = old_parent_ra; 242 } 390 } 243 __this_cpu_write(current_kprobe, NULL) !! 391 return; 244 out: 392 out: 245 ftrace_test_recursion_unlock(bit); !! 393 ftrace_graph_stop(); >> 394 WARN_ON(1); 246 } 395 } 247 NOKPROBE_SYMBOL(kprobe_ftrace_handler); !! 396 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ >> 397 >> 398 #ifdef CONFIG_FTRACE_SYSCALLS 248 399 249 int arch_prepare_kprobe_ftrace(struct kprobe * !! 400 #ifdef CONFIG_32BIT >> 401 unsigned long __init arch_syscall_addr(int nr) 250 { 402 { 251 p->ainsn.insn = NULL; !! 403 return (unsigned long)sys_call_table[nr - __NR_O32_Linux]; 252 return 0; << 253 } 404 } 254 #endif 405 #endif >> 406 >> 407 #ifdef CONFIG_64BIT >> 408 >> 409 unsigned long __init arch_syscall_addr(int nr) >> 410 { >> 411 #ifdef CONFIG_MIPS32_N32 >> 412 if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls) >> 413 return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux]; >> 414 #endif >> 415 if (nr >= __NR_64_Linux && nr <= __NR_64_Linux + __NR_64_Linux_syscalls) >> 416 return (unsigned long)sys_call_table[nr - __NR_64_Linux]; >> 417 #ifdef CONFIG_MIPS32_O32 >> 418 if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls) >> 419 return (unsigned long)sys32_call_table[nr - __NR_O32_Linux]; >> 420 #endif >> 421 >> 422 return (unsigned long) &sys_ni_syscall; >> 423 } >> 424 #endif >> 425 >> 426 #endif /* CONFIG_FTRACE_SYSCALLS */ 255 427
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.