1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * Code for tracing calls in Linux kernel. !! 3 * Code for replacing ftrace calls with jumps. 4 * Copyright (C) 2009-2016 Helge Deller <delle << 5 * 4 * 6 * based on code for x86 which is: << 7 * Copyright (C) 2007-2008 Steven Rostedt <sro 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> >> 6 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China >> 7 * Author: Wu Zhangjin <wuzhangjin@gmail.com> 8 * 8 * 9 * future possible enhancements: !! 9 * Thanks goes to Steven Rostedt for writing the original x86 version. 10 * - add CONFIG_STACK_TRACER << 11 */ 10 */ 12 11 >> 12 #include <linux/uaccess.h> 13 #include <linux/init.h> 13 #include <linux/init.h> 14 #include <linux/ftrace.h> 14 #include <linux/ftrace.h> 15 #include <linux/uaccess.h> !! 15 #include <linux/syscalls.h> 16 #include <linux/kprobes.h> << 17 #include <linux/ptrace.h> << 18 #include <linux/jump_label.h> << 19 << 20 #include <asm/assembly.h> << 21 #include <asm/sections.h> << 22 #include <asm/ftrace.h> << 23 #include <asm/patch.h> << 24 16 25 #define __hot __section(".text.hot") !! 17 #include <asm/asm.h> >> 18 #include <asm/asm-offsets.h> >> 19 #include <asm/cacheflush.h> >> 20 #include <asm/syscall.h> >> 21 #include <asm/uasm.h> >> 22 #include <asm/unistd.h> 26 23 27 #ifdef CONFIG_FUNCTION_GRAPH_TRACER !! 24 #include <asm-generic/sections.h> 28 static DEFINE_STATIC_KEY_FALSE(ftrace_graph_en << 29 25 30 /* !! 26 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) 31 * Hook the return address and push it in the !! 27 #define MCOUNT_OFFSET_INSNS 5 32 * in current thread info. !! 28 #else 33 */ !! 29 #define MCOUNT_OFFSET_INSNS 4 34 static void __hot prepare_ftrace_return(unsign !! 30 #endif 35 unsign << 36 { << 37 unsigned long old; << 38 extern int parisc_return_to_handler; << 39 31 40 if (unlikely(ftrace_graph_is_dead())) !! 32 #ifdef CONFIG_DYNAMIC_FTRACE 41 return; << 42 33 43 if (unlikely(atomic_read(¤t->tra !! 34 /* Arch override because MIPS doesn't need to run this from stop_machine() */ 44 return; !! 35 void arch_ftrace_update_code(int command) >> 36 { >> 37 ftrace_modify_all_code(command); >> 38 } 45 39 46 old = *parent; !! 40 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ >> 41 #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ >> 42 #define JUMP_RANGE_MASK ((1UL << 28) - 1) 47 43 48 if (!function_graph_enter(old, self_ad !! 44 #define INSN_NOP 0x00000000 /* nop */ 49 /* activate parisc_return_to_h !! 45 #define INSN_JAL(addr) \ 50 *parent = (unsigned long) &par !! 46 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) 51 } << 52 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ << 53 47 54 static ftrace_func_t ftrace_func; !! 48 static unsigned int insn_jal_ftrace_caller __read_mostly; >> 49 static unsigned int insn_la_mcount[2] __read_mostly; >> 50 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly; 55 51 56 asmlinkage void notrace __hot ftrace_function_ !! 52 static inline void ftrace_dyn_arch_init_insns(void) 57 unsigned long << 58 unsigned long << 59 struct ftrace_ << 60 { 53 { 61 extern struct ftrace_ops *function_tra !! 54 u32 *buf; 62 !! 55 unsigned int v1; 63 ftrace_func(self_addr, parent, functio << 64 56 65 #ifdef CONFIG_FUNCTION_GRAPH_TRACER !! 57 /* la v1, _mcount */ 66 if (static_branch_unlikely(&ftrace_gra !! 58 v1 = 3; 67 unsigned long *parent_rp; !! 59 buf = (u32 *)&insn_la_mcount[0]; >> 60 UASM_i_LA(&buf, v1, MCOUNT_ADDR); 68 61 69 /* calculate pointer to %rp in !! 62 /* jal (ftrace_caller + 8), jump over the first two instruction */ 70 parent_rp = (unsigned long *) !! 63 buf = (u32 *)&insn_jal_ftrace_caller; 71 /* sanity check: parent_rp sho !! 64 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK); 72 if (*parent_rp != parent) << 73 return; << 74 65 75 prepare_ftrace_return(parent_r !! 66 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 76 return; !! 67 /* j ftrace_graph_caller */ 77 } !! 68 buf = (u32 *)&insn_j_ftrace_graph_caller; >> 69 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK); 78 #endif 70 #endif 79 } 71 } 80 72 81 #if defined(CONFIG_DYNAMIC_FTRACE) && defined( !! 73 static int ftrace_modify_code(unsigned long ip, unsigned int new_code) 82 int ftrace_enable_ftrace_graph_caller(void) << 83 { 74 { 84 static_key_enable(&ftrace_graph_enable !! 75 int faulted; 85 return 0; !! 76 86 } !! 77 /* *(unsigned int *)ip = new_code; */ >> 78 safe_store_code(new_code, ip, faulted); >> 79 >> 80 if (unlikely(faulted)) >> 81 return -EFAULT; >> 82 >> 83 flush_icache_range(ip, ip + 8); 87 84 88 int ftrace_disable_ftrace_graph_caller(void) << 89 { << 90 static_key_enable(&ftrace_graph_enable << 91 return 0; 85 return 0; 92 } 86 } 93 #endif << 94 87 95 #ifdef CONFIG_DYNAMIC_FTRACE !! 88 #ifndef CONFIG_64BIT 96 int ftrace_update_ftrace_func(ftrace_func_t fu !! 89 static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, 97 { !! 90 unsigned int new_code2) 98 ftrace_func = func; !! 91 { >> 92 int faulted; >> 93 >> 94 safe_store_code(new_code1, ip, faulted); >> 95 if (unlikely(faulted)) >> 96 return -EFAULT; >> 97 >> 98 ip += 4; >> 99 safe_store_code(new_code2, ip, faulted); >> 100 if (unlikely(faulted)) >> 101 return -EFAULT; >> 102 >> 103 ip -= 4; >> 104 flush_icache_range(ip, ip + 8); >> 105 99 return 0; 106 return 0; 100 } 107 } 101 108 102 int ftrace_modify_call(struct dyn_ftrace *rec, !! 109 static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1, 103 unsigned long addr) !! 110 unsigned int new_code2) 104 { 111 { >> 112 int faulted; >> 113 >> 114 ip += 4; >> 115 safe_store_code(new_code2, ip, faulted); >> 116 if (unlikely(faulted)) >> 117 return -EFAULT; >> 118 >> 119 ip -= 4; >> 120 safe_store_code(new_code1, ip, faulted); >> 121 if (unlikely(faulted)) >> 122 return -EFAULT; >> 123 >> 124 flush_icache_range(ip, ip + 8); >> 125 105 return 0; 126 return 0; 106 } 127 } >> 128 #endif >> 129 >> 130 /* >> 131 * The details about the calling site of mcount on MIPS >> 132 * >> 133 * 1. For kernel: >> 134 * >> 135 * move at, ra >> 136 * jal _mcount --> nop >> 137 * sub sp, sp, 8 --> nop (CONFIG_32BIT) >> 138 * >> 139 * 2. For modules: >> 140 * >> 141 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT >> 142 * >> 143 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) >> 144 * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT) >> 145 * move at, ra >> 146 * move $12, ra_address >> 147 * jalr v1 >> 148 * sub sp, sp, 8 >> 149 * 1: offset = 5 instructions >> 150 * 2.2 For the Other situations >> 151 * >> 152 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) >> 153 * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT) >> 154 * move at, ra >> 155 * jalr v1 >> 156 * nop | move $12, ra_address | sub sp, sp, 8 >> 157 * 1: offset = 4 instructions >> 158 */ >> 159 >> 160 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) 107 161 108 unsigned long ftrace_call_adjust(unsigned long !! 162 int ftrace_make_nop(struct module *mod, >> 163 struct dyn_ftrace *rec, unsigned long addr) 109 { 164 { 110 return addr+(FTRACE_PATCHABLE_FUNCTION !! 165 unsigned int new; >> 166 unsigned long ip = rec->ip; >> 167 >> 168 /* >> 169 * If ip is in kernel space, no long call, otherwise, long call is >> 170 * needed. >> 171 */ >> 172 new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F; >> 173 #ifdef CONFIG_64BIT >> 174 return ftrace_modify_code(ip, new); >> 175 #else >> 176 /* >> 177 * On 32 bit MIPS platforms, gcc adds a stack adjust >> 178 * instruction in the delay slot after the branch to >> 179 * mcount and expects mcount to restore the sp on return. >> 180 * This is based on a legacy API and does nothing but >> 181 * waste instructions so it's being removed at runtime. >> 182 */ >> 183 return ftrace_modify_code_2(ip, new, INSN_NOP); >> 184 #endif 111 } 185 } 112 186 113 int ftrace_make_call(struct dyn_ftrace *rec, u 187 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 114 { 188 { 115 u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZ !! 189 unsigned int new; 116 u32 *tramp; !! 190 unsigned long ip = rec->ip; 117 int size, ret, i; << 118 void *ip; << 119 191 120 #ifdef CONFIG_64BIT !! 192 new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; 121 unsigned long addr2 = << 122 (unsigned long)dereference_fun << 123 193 124 u32 ftrace_trampoline[] = { !! 194 #ifdef CONFIG_64BIT 125 0x73c10208, /* std,ma r1,100(s !! 195 return ftrace_modify_code(ip, new); 126 0x0c2110c1, /* ldd -10(r1),r1 << 127 0xe820d002, /* bve,n (r1) */ << 128 addr2 >> 32, << 129 addr2 & 0xffffffff, << 130 0xe83f1fd7, /* b,l,n .-14,r1 * << 131 }; << 132 << 133 u32 ftrace_trampoline_unaligned[] = { << 134 addr2 >> 32, << 135 addr2 & 0xffffffff, << 136 0x37de0200, /* ldo 100(sp),sp << 137 0x73c13e01, /* std r1,-100(sp) << 138 0x34213ff9, /* ldo -4(r1),r1 * << 139 0x50213fc1, /* ldd -20(r1),r1 << 140 0xe820d002, /* bve,n (r1) */ << 141 0xe83f1fcf, /* b,l,n .-20,r1 * << 142 }; << 143 << 144 BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampol << 145 FTRACE_PATCHAB << 146 #else 196 #else 147 u32 ftrace_trampoline[] = { !! 197 return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ? 148 (u32)addr, !! 198 INSN_NOP : insn_la_mcount[1]); 149 0x6fc10080, /* stw,ma r1,40(sp << 150 0x48213fd1, /* ldw -18(r1),r1 << 151 0xe820c002, /* bv,n r0(r1) */ << 152 0xe83f1fdf, /* b,l,n .-c,r1 */ << 153 }; << 154 #endif 199 #endif >> 200 } 155 201 156 BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampol !! 202 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call)) 157 FTRACE_PATCHAB << 158 203 159 size = sizeof(ftrace_trampoline); !! 204 int ftrace_update_ftrace_func(ftrace_func_t func) 160 tramp = ftrace_trampoline; !! 205 { >> 206 unsigned int new; 161 207 162 #ifdef CONFIG_64BIT !! 208 new = INSN_JAL((unsigned long)func); 163 if (rec->ip & 0x4) { << 164 size = sizeof(ftrace_trampolin << 165 tramp = ftrace_trampoline_unal << 166 } << 167 #endif << 168 209 169 ip = (void *)(rec->ip + 4 - size); !! 210 return ftrace_modify_code(FTRACE_CALL_IP, new); >> 211 } 170 212 171 ret = copy_from_kernel_nofault(insn, i !! 213 int __init ftrace_dyn_arch_init(void) 172 if (ret) !! 214 { 173 return ret; !! 215 /* Encode the instructions when booting */ >> 216 ftrace_dyn_arch_init_insns(); 174 217 175 for (i = 0; i < size / 4; i++) { !! 218 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */ 176 if (insn[i] != INSN_NOP) !! 219 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP); 177 return -EINVAL; << 178 } << 179 220 180 __patch_text_multiple(ip, tramp, size) << 181 return 0; 221 return 0; 182 } 222 } >> 223 #endif /* CONFIG_DYNAMIC_FTRACE */ 183 224 184 int ftrace_make_nop(struct module *mod, struct !! 225 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 185 unsigned long addr) !! 226 >> 227 #ifdef CONFIG_DYNAMIC_FTRACE >> 228 >> 229 extern void ftrace_graph_call(void); >> 230 #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call)) >> 231 >> 232 int ftrace_enable_ftrace_graph_caller(void) 186 { 233 { 187 u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZ !! 234 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, 188 int i; !! 235 insn_j_ftrace_graph_caller); >> 236 } >> 237 >> 238 int ftrace_disable_ftrace_graph_caller(void) >> 239 { >> 240 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP); >> 241 } >> 242 >> 243 #endif /* CONFIG_DYNAMIC_FTRACE */ 189 244 190 for (i = 0; i < ARRAY_SIZE(insn); i++) !! 245 #ifndef KBUILD_MCOUNT_RA_ADDRESS 191 insn[i] = INSN_NOP; << 192 246 193 __patch_text((void *)rec->ip, INSN_NOP !! 247 #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */ 194 __patch_text_multiple((void *)rec->ip !! 248 #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ 195 insn, sizeof(ins !! 249 #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ >> 250 >> 251 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long >> 252 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) >> 253 { >> 254 unsigned long sp, ip, tmp; >> 255 unsigned int code; >> 256 int faulted; >> 257 >> 258 /* >> 259 * For module, move the ip from the return address after the >> 260 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for >> 261 * kernel, move after the instruction "move ra, at"(offset is 16) >> 262 */ >> 263 ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24); >> 264 >> 265 /* >> 266 * search the text until finding the non-store instruction or "s{d,w} >> 267 * ra, offset(sp)" instruction >> 268 */ >> 269 do { >> 270 /* get the code at "ip": code = *(unsigned int *)ip; */ >> 271 safe_load_code(code, ip, faulted); >> 272 >> 273 if (unlikely(faulted)) >> 274 return 0; >> 275 /* >> 276 * If we hit the non-store instruction before finding where the >> 277 * ra is stored, then this is a leaf function and it does not >> 278 * store the ra on the stack >> 279 */ >> 280 if ((code & S_R_SP) != S_R_SP) >> 281 return parent_ra_addr; >> 282 >> 283 /* Move to the next instruction */ >> 284 ip -= 4; >> 285 } while ((code & S_RA_SP) != S_RA_SP); >> 286 >> 287 sp = fp + (code & OFFSET_MASK); >> 288 >> 289 /* tmp = *(unsigned long *)sp; */ >> 290 safe_load_stack(tmp, sp, faulted); >> 291 if (unlikely(faulted)) >> 292 return 0; >> 293 >> 294 if (tmp == old_parent_ra) >> 295 return sp; 196 return 0; 296 return 0; 197 } 297 } 198 #endif << 199 298 200 #ifdef CONFIG_KPROBES_ON_FTRACE !! 299 #endif /* !KBUILD_MCOUNT_RA_ADDRESS */ 201 void kprobe_ftrace_handler(unsigned long ip, u << 202 struct ftrace_ops * << 203 { << 204 struct kprobe_ctlblk *kcb; << 205 struct pt_regs *regs; << 206 struct kprobe *p; << 207 int bit; << 208 300 209 if (unlikely(kprobe_ftrace_disabled)) !! 301 /* >> 302 * Hook the return address and push it in the stack of return addrs >> 303 * in current thread info. >> 304 */ >> 305 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, >> 306 unsigned long fp) >> 307 { >> 308 unsigned long old_parent_ra; >> 309 unsigned long return_hooker = (unsigned long) >> 310 &return_to_handler; >> 311 int faulted, insns; >> 312 >> 313 if (unlikely(ftrace_graph_is_dead())) 210 return; 314 return; 211 315 212 bit = ftrace_test_recursion_trylock(ip !! 316 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 213 if (bit < 0) << 214 return; 317 return; 215 318 216 regs = ftrace_get_regs(fregs); !! 319 /* 217 p = get_kprobe((kprobe_opcode_t *)ip); !! 320 * "parent_ra_addr" is the stack address where the return address of 218 if (unlikely(!p) || kprobe_disabled(p) !! 321 * the caller of _mcount is saved. >> 322 * >> 323 * If gcc < 4.5, a leaf function does not save the return address >> 324 * in the stack address, so we "emulate" one in _mcount's stack space, >> 325 * and hijack it directly. >> 326 * For a non-leaf function, it does save the return address to its own >> 327 * stack space, so we can not hijack it directly, but need to find the >> 328 * real stack address, which is done by ftrace_get_parent_addr(). >> 329 * >> 330 * If gcc >= 4.5, with the new -mmcount-ra-address option, for a >> 331 * non-leaf function, the location of the return address will be saved >> 332 * to $12 for us. >> 333 * For a leaf function, it just puts a zero into $12, so we handle >> 334 * it in ftrace_graph_caller() of mcount.S. >> 335 */ >> 336 >> 337 /* old_parent_ra = *parent_ra_addr; */ >> 338 safe_load_stack(old_parent_ra, parent_ra_addr, faulted); >> 339 if (unlikely(faulted)) 219 goto out; 340 goto out; 220 !! 341 #ifndef KBUILD_MCOUNT_RA_ADDRESS 221 if (kprobe_running()) { !! 342 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, 222 kprobes_inc_nmissed_count(p); !! 343 old_parent_ra, (unsigned long)parent_ra_addr, fp); >> 344 /* >> 345 * If fails when getting the stack address of the non-leaf function's >> 346 * ra, stop function graph tracer and return >> 347 */ >> 348 if (parent_ra_addr == NULL) >> 349 goto out; >> 350 #endif >> 351 /* *parent_ra_addr = return_hooker; */ >> 352 safe_store_stack(return_hooker, parent_ra_addr, faulted); >> 353 if (unlikely(faulted)) 223 goto out; 354 goto out; 224 } << 225 355 226 __this_cpu_write(current_kprobe, p); !! 356 /* >> 357 * Get the recorded ip of the current mcount calling site in the >> 358 * __mcount_loc section, which will be used to filter the function >> 359 * entries configured through the tracing/set_graph_function interface. >> 360 */ >> 361 >> 362 insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; >> 363 self_ra -= (MCOUNT_INSN_SIZE * insns); >> 364 >> 365 if (function_graph_enter(old_parent_ra, self_ra, fp, NULL)) >> 366 *parent_ra_addr = old_parent_ra; >> 367 return; >> 368 out: >> 369 ftrace_graph_stop(); >> 370 WARN_ON(1); >> 371 } >> 372 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 227 373 228 kcb = get_kprobe_ctlblk(); !! 374 #ifdef CONFIG_FTRACE_SYSCALLS 229 kcb->kprobe_status = KPROBE_HIT_ACTIVE << 230 375 231 regs->iaoq[0] = ip; !! 376 #ifdef CONFIG_32BIT 232 regs->iaoq[1] = ip + 4; !! 377 unsigned long __init arch_syscall_addr(int nr) 233 !! 378 { 234 if (!p->pre_handler || !p->pre_handler !! 379 return (unsigned long)sys_call_table[nr - __NR_O32_Linux]; 235 regs->iaoq[0] = ip + 4; << 236 regs->iaoq[1] = ip + 8; << 237 << 238 if (unlikely(p->post_handler)) << 239 kcb->kprobe_status = K << 240 p->post_handler(p, reg << 241 } << 242 } << 243 __this_cpu_write(current_kprobe, NULL) << 244 out: << 245 ftrace_test_recursion_unlock(bit); << 246 } 380 } 247 NOKPROBE_SYMBOL(kprobe_ftrace_handler); !! 381 #endif >> 382 >> 383 #ifdef CONFIG_64BIT 248 384 249 int arch_prepare_kprobe_ftrace(struct kprobe * !! 385 unsigned long __init arch_syscall_addr(int nr) 250 { 386 { 251 p->ainsn.insn = NULL; !! 387 #ifdef CONFIG_MIPS32_N32 252 return 0; !! 388 if (nr >= __NR_N32_Linux && nr < __NR_N32_Linux + __NR_N32_Linux_syscalls) >> 389 return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux]; >> 390 #endif >> 391 if (nr >= __NR_64_Linux && nr < __NR_64_Linux + __NR_64_Linux_syscalls) >> 392 return (unsigned long)sys_call_table[nr - __NR_64_Linux]; >> 393 #ifdef CONFIG_MIPS32_O32 >> 394 if (nr >= __NR_O32_Linux && nr < __NR_O32_Linux + __NR_O32_Linux_syscalls) >> 395 return (unsigned long)sys32_call_table[nr - __NR_O32_Linux]; >> 396 #endif >> 397 >> 398 return (unsigned long) &sys_ni_syscall; 253 } 399 } 254 #endif 400 #endif >> 401 >> 402 #endif /* CONFIG_FTRACE_SYSCALLS */ 255 403
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.