1 # SPDX-License-Identifier: GPL-2.0-only << 2 # 1 # 3 # Architectures that offer an FUNCTION_TRACER 2 # Architectures that offer an FUNCTION_TRACER implementation should 4 # select HAVE_FUNCTION_TRACER: 3 # select HAVE_FUNCTION_TRACER: 5 # 4 # 6 5 7 config USER_STACKTRACE_SUPPORT 6 config USER_STACKTRACE_SUPPORT 8 bool 7 bool 9 8 10 config NOP_TRACER 9 config NOP_TRACER 11 bool 10 bool 12 11 13 config HAVE_RETHOOK !! 12 config HAVE_FTRACE_NMI_ENTER 14 bool 13 bool 15 << 16 config RETHOOK << 17 bool << 18 depends on HAVE_RETHOOK << 19 help 14 help 20 Enable generic return hooking featur !! 15 See Documentation/trace/ftrace-implementation.txt 21 API, which will be used by other fun << 22 features like fprobe and kprobes. << 23 16 24 config HAVE_FUNCTION_TRACER 17 config HAVE_FUNCTION_TRACER 25 bool 18 bool 26 help 19 help 27 See Documentation/trace/ftrace-desig !! 20 See Documentation/trace/ftrace-implementation.txt 28 21 29 config HAVE_FUNCTION_GRAPH_TRACER 22 config HAVE_FUNCTION_GRAPH_TRACER 30 bool 23 bool 31 help 24 help 32 See Documentation/trace/ftrace-desig !! 25 See Documentation/trace/ftrace-implementation.txt 33 26 34 config HAVE_FUNCTION_GRAPH_RETVAL !! 27 config HAVE_FUNCTION_GRAPH_FP_TEST 35 bool << 36 << 37 config HAVE_DYNAMIC_FTRACE << 38 bool 28 bool 39 help 29 help 40 See Documentation/trace/ftrace-desig !! 30 An arch may pass in a unique value (frame pointer) to both the 41 !! 31 entering and exiting of a function. On exit, the value is compared 42 config HAVE_DYNAMIC_FTRACE_WITH_REGS !! 32 and if it does not match, then it will panic the kernel. 43 bool << 44 << 45 config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS << 46 bool << 47 << 48 config HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS << 49 bool << 50 33 51 config HAVE_DYNAMIC_FTRACE_WITH_ARGS !! 34 config HAVE_FUNCTION_TRACE_MCOUNT_TEST 52 bool 35 bool 53 help 36 help 54 If this is set, then arguments and st !! 37 See Documentation/trace/ftrace-implementation.txt 55 the ftrace_regs passed into the funct << 56 by default, even without setting the << 57 This allows for use of ftrace_regs_ge << 58 ftrace_regs_get_stack_pointer(). << 59 38 60 config HAVE_DYNAMIC_FTRACE_NO_PATCHABLE !! 39 config HAVE_DYNAMIC_FTRACE 61 bool 40 bool 62 help 41 help 63 If the architecture generates __patc !! 42 See Documentation/trace/ftrace-implementation.txt 64 but does not want them included in t << 65 43 66 config HAVE_FTRACE_MCOUNT_RECORD 44 config HAVE_FTRACE_MCOUNT_RECORD 67 bool 45 bool 68 help 46 help 69 See Documentation/trace/ftrace-desig !! 47 See Documentation/trace/ftrace-implementation.txt 70 << 71 config HAVE_SYSCALL_TRACEPOINTS << 72 bool << 73 help << 74 See Documentation/trace/ftrace-desig << 75 48 76 config HAVE_FENTRY !! 49 config HAVE_HW_BRANCH_TRACER 77 bool 50 bool 78 help << 79 Arch supports the gcc options -pg wi << 80 << 81 config HAVE_NOP_MCOUNT << 82 bool << 83 help << 84 Arch supports the gcc options -pg wi << 85 51 86 config HAVE_OBJTOOL_MCOUNT !! 52 config HAVE_SYSCALL_TRACEPOINTS 87 bool 53 bool 88 help 54 help 89 Arch supports objtool --mcount !! 55 See Documentation/trace/ftrace-implementation.txt 90 56 91 config HAVE_OBJTOOL_NOP_MCOUNT !! 57 config TRACER_MAX_TRACE 92 bool 58 bool 93 help << 94 Arch supports the objtool options -- << 95 An architecture can select this if i << 96 of ftrace locations. << 97 59 98 config HAVE_C_RECORDMCOUNT !! 60 config RING_BUFFER 99 bool 61 bool 100 help << 101 C version of recordmcount available? << 102 62 103 config HAVE_BUILDTIME_MCOUNT_SORT !! 63 config FTRACE_NMI_ENTER 104 bool << 105 help << 106 An architecture selects this if it so << 107 at build time. << 108 << 109 config BUILDTIME_MCOUNT_SORT << 110 bool 64 bool >> 65 depends on HAVE_FTRACE_NMI_ENTER 111 default y 66 default y 112 depends on HAVE_BUILDTIME_MCOUNT_SORT & << 113 help << 114 Sort the mcount_loc section at build << 115 << 116 config TRACER_MAX_TRACE << 117 bool << 118 << 119 config TRACE_CLOCK << 120 bool << 121 << 122 config RING_BUFFER << 123 bool << 124 select TRACE_CLOCK << 125 select IRQ_WORK << 126 67 127 config EVENT_TRACING 68 config EVENT_TRACING 128 select CONTEXT_SWITCH_TRACER 69 select CONTEXT_SWITCH_TRACER 129 select GLOB << 130 bool 70 bool 131 71 132 config CONTEXT_SWITCH_TRACER 72 config CONTEXT_SWITCH_TRACER 133 bool 73 bool 134 74 135 config RING_BUFFER_ALLOW_SWAP 75 config RING_BUFFER_ALLOW_SWAP 136 bool 76 bool 137 help 77 help 138 Allow the use of ring_buffer_swap_cpu 78 Allow the use of ring_buffer_swap_cpu. 139 Adds a very slight overhead to tracin 79 Adds a very slight overhead to tracing when enabled. 140 80 141 config PREEMPTIRQ_TRACEPOINTS << 142 bool << 143 depends on TRACE_PREEMPT_TOGGLE || TRA << 144 select TRACING << 145 default y << 146 help << 147 Create preempt/irq toggle tracepoint << 148 of the kernel can use them to genera << 149 << 150 # All tracer options should select GENERIC_TRA 81 # All tracer options should select GENERIC_TRACER. For those options that are 151 # enabled by all tracers (context switch and e 82 # enabled by all tracers (context switch and event tracer) they select TRACING. 152 # This allows those options to appear when no 83 # This allows those options to appear when no other tracer is selected. But the 153 # options do not appear when something else se 84 # options do not appear when something else selects it. We need the two options 154 # GENERIC_TRACER and TRACING to avoid circular 85 # GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 155 # hiding of the automatic options. !! 86 # hidding of the automatic options. 156 87 157 config TRACING 88 config TRACING 158 bool 89 bool >> 90 select DEBUG_FS 159 select RING_BUFFER 91 select RING_BUFFER 160 select STACKTRACE if STACKTRACE_SUPPOR 92 select STACKTRACE if STACKTRACE_SUPPORT 161 select TRACEPOINTS 93 select TRACEPOINTS 162 select NOP_TRACER 94 select NOP_TRACER 163 select BINARY_PRINTF 95 select BINARY_PRINTF 164 select EVENT_TRACING 96 select EVENT_TRACING 165 select TRACE_CLOCK << 166 select NEED_TASKS_RCU << 167 97 168 config GENERIC_TRACER 98 config GENERIC_TRACER 169 bool 99 bool 170 select TRACING 100 select TRACING 171 101 172 # 102 # 173 # Minimum requirements an architecture has to 103 # Minimum requirements an architecture has to meet for us to 174 # be able to offer generic tracing facilities: 104 # be able to offer generic tracing facilities: 175 # 105 # 176 config TRACING_SUPPORT 106 config TRACING_SUPPORT 177 bool 107 bool 178 depends on TRACE_IRQFLAGS_SUPPORT !! 108 # PPC32 has no irqflags tracing support, but it can use most of the >> 109 # tracers anyway, they were tested to build and work. Note that new >> 110 # exceptions to this list aren't welcomed, better implement the >> 111 # irqflags tracing for your architecture. >> 112 depends on TRACE_IRQFLAGS_SUPPORT || PPC32 179 depends on STACKTRACE_SUPPORT 113 depends on STACKTRACE_SUPPORT 180 default y 114 default y 181 115 >> 116 if TRACING_SUPPORT >> 117 182 menuconfig FTRACE 118 menuconfig FTRACE 183 bool "Tracers" 119 bool "Tracers" 184 depends on TRACING_SUPPORT << 185 default y if DEBUG_KERNEL 120 default y if DEBUG_KERNEL 186 help 121 help 187 Enable the kernel tracing infrastruc !! 122 Enable the kernel tracing infrastructure. 188 123 189 if FTRACE 124 if FTRACE 190 125 191 config BOOTTIME_TRACING << 192 bool "Boot-time Tracing support" << 193 depends on TRACING << 194 select BOOT_CONFIG << 195 help << 196 Enable developer to setup ftrace sub << 197 kernel cmdline at boot time for debu << 198 initialization and boot process. << 199 << 200 config FUNCTION_TRACER 126 config FUNCTION_TRACER 201 bool "Kernel Function Tracer" 127 bool "Kernel Function Tracer" 202 depends on HAVE_FUNCTION_TRACER 128 depends on HAVE_FUNCTION_TRACER >> 129 select FRAME_POINTER 203 select KALLSYMS 130 select KALLSYMS 204 select GENERIC_TRACER 131 select GENERIC_TRACER 205 select CONTEXT_SWITCH_TRACER 132 select CONTEXT_SWITCH_TRACER 206 select GLOB << 207 select NEED_TASKS_RCU << 208 select TASKS_RUDE_RCU << 209 help 133 help 210 Enable the kernel to trace every ker 134 Enable the kernel to trace every kernel function. This is done 211 by using a compiler feature to inser 135 by using a compiler feature to insert a small, 5-byte No-Operation 212 instruction at the beginning of ever !! 136 instruction to the beginning of every kernel function, which NOP 213 sequence is then dynamically patched 137 sequence is then dynamically patched into a tracer call when 214 tracing is enabled by the administra 138 tracing is enabled by the administrator. If it's runtime disabled 215 (the bootup default), then the overh 139 (the bootup default), then the overhead of the instructions is very 216 small and not measurable even in mic !! 140 small and not measurable even in micro-benchmarks. 217 x86, but may have impact on other ar << 218 141 219 config FUNCTION_GRAPH_TRACER 142 config FUNCTION_GRAPH_TRACER 220 bool "Kernel Function Graph Tracer" 143 bool "Kernel Function Graph Tracer" 221 depends on HAVE_FUNCTION_GRAPH_TRACER 144 depends on HAVE_FUNCTION_GRAPH_TRACER 222 depends on FUNCTION_TRACER 145 depends on FUNCTION_TRACER 223 depends on !X86_32 || !CC_OPTIMIZE_FOR 146 depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE 224 default y 147 default y 225 help 148 help 226 Enable the kernel to trace a functio 149 Enable the kernel to trace a function at both its return 227 and its entry. 150 and its entry. 228 Its first purpose is to trace the du 151 Its first purpose is to trace the duration of functions and 229 draw a call graph for each thread wi 152 draw a call graph for each thread with some information like 230 the return value. This is done by se !! 153 the return value. This is done by setting the current return 231 address on the current task structur 154 address on the current task structure into a stack of calls. 232 155 233 config FUNCTION_GRAPH_RETVAL << 234 bool "Kernel Function Graph Return Val << 235 depends on HAVE_FUNCTION_GRAPH_RETVAL << 236 depends on FUNCTION_GRAPH_TRACER << 237 default n << 238 help << 239 Support recording and printing the f << 240 using function graph tracer. It can << 241 that return errors. This feature is << 242 enable it via the trace option funcg << 243 See Documentation/trace/ftrace.rst << 244 << 245 config DYNAMIC_FTRACE << 246 bool "enable/disable function tracing << 247 depends on FUNCTION_TRACER << 248 depends on HAVE_DYNAMIC_FTRACE << 249 default y << 250 help << 251 This option will modify all the call << 252 dynamically (will patch them out of << 253 replace them with a No-Op instructio << 254 compile time, a table is made of all << 255 can function trace, and this table i << 256 image. When this is enabled, functio << 257 enabled, and the functions not enabl << 258 performance of the system. << 259 << 260 See the files in /sys/kernel/tracing << 261 available_filter_functions << 262 set_ftrace_filter << 263 set_ftrace_notrace << 264 << 265 This way a CONFIG_FUNCTION_TRACER ke << 266 otherwise has native performance as << 267 << 268 config DYNAMIC_FTRACE_WITH_REGS << 269 def_bool y << 270 depends on DYNAMIC_FTRACE << 271 depends on HAVE_DYNAMIC_FTRACE_WITH_RE << 272 << 273 config DYNAMIC_FTRACE_WITH_DIRECT_CALLS << 274 def_bool y << 275 depends on DYNAMIC_FTRACE_WITH_REGS || << 276 depends on HAVE_DYNAMIC_FTRACE_WITH_DI << 277 << 278 config DYNAMIC_FTRACE_WITH_CALL_OPS << 279 def_bool y << 280 depends on HAVE_DYNAMIC_FTRACE_WITH_CA << 281 << 282 config DYNAMIC_FTRACE_WITH_ARGS << 283 def_bool y << 284 depends on DYNAMIC_FTRACE << 285 depends on HAVE_DYNAMIC_FTRACE_WITH_AR << 286 << 287 config FPROBE << 288 bool "Kernel Function Probe (fprobe)" << 289 depends on FUNCTION_TRACER << 290 depends on DYNAMIC_FTRACE_WITH_REGS << 291 depends on HAVE_RETHOOK << 292 select RETHOOK << 293 default n << 294 help << 295 This option enables kernel function << 296 The fprobe is similar to kprobes, bu << 297 entries and exits. This also can pro << 298 fprobe. << 299 << 300 If unsure, say N. << 301 << 302 config FUNCTION_PROFILER << 303 bool "Kernel function profiler" << 304 depends on FUNCTION_TRACER << 305 default n << 306 help << 307 This option enables the kernel funct << 308 in debugfs called function_profile_e << 309 When a 1 is echoed into this file pr << 310 zero is entered, profiling stops. A << 311 the trace_stat directory; this file << 312 have been hit and their counters. << 313 << 314 If in doubt, say N. << 315 << 316 config STACK_TRACER << 317 bool "Trace max stack" << 318 depends on HAVE_FUNCTION_TRACER << 319 select FUNCTION_TRACER << 320 select STACKTRACE << 321 select KALLSYMS << 322 help << 323 This special tracer records the maxi << 324 kernel and displays it in /sys/kerne << 325 << 326 This tracer works by hooking into ev << 327 kernel executes, and keeping a maxim << 328 stack-trace saved. If this is confi << 329 then it will not have any overhead w << 330 is disabled. << 331 << 332 To enable the stack tracer on bootup << 333 on the kernel command line. << 334 << 335 The stack tracer can also be enabled << 336 sysctl kernel.stack_tracer_enabled << 337 << 338 Say N if unsure. << 339 << 340 config TRACE_PREEMPT_TOGGLE << 341 bool << 342 help << 343 Enables hooks which will be called w << 344 and last enabled. << 345 156 346 config IRQSOFF_TRACER 157 config IRQSOFF_TRACER 347 bool "Interrupts-off Latency Tracer" 158 bool "Interrupts-off Latency Tracer" 348 default n 159 default n 349 depends on TRACE_IRQFLAGS_SUPPORT 160 depends on TRACE_IRQFLAGS_SUPPORT >> 161 depends on GENERIC_TIME 350 select TRACE_IRQFLAGS 162 select TRACE_IRQFLAGS 351 select GENERIC_TRACER 163 select GENERIC_TRACER 352 select TRACER_MAX_TRACE 164 select TRACER_MAX_TRACE 353 select RING_BUFFER_ALLOW_SWAP 165 select RING_BUFFER_ALLOW_SWAP 354 select TRACER_SNAPSHOT << 355 select TRACER_SNAPSHOT_PER_CPU_SWAP << 356 help 166 help 357 This option measures the time spent 167 This option measures the time spent in irqs-off critical 358 sections, with microsecond accuracy. 168 sections, with microsecond accuracy. 359 169 360 The default measurement method is a 170 The default measurement method is a maximum search, which is 361 disabled by default and can be runti 171 disabled by default and can be runtime (re-)started 362 via: 172 via: 363 173 364 echo 0 > /sys/kernel/tracing/tra !! 174 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 365 175 366 (Note that kernel size and overhead !! 176 (Note that kernel size and overhead increases with this option 367 enabled. This option and the preempt 177 enabled. This option and the preempt-off timing option can be 368 used together or separately.) 178 used together or separately.) 369 179 370 config PREEMPT_TRACER 180 config PREEMPT_TRACER 371 bool "Preemption-off Latency Tracer" 181 bool "Preemption-off Latency Tracer" 372 default n 182 default n 373 depends on PREEMPTION !! 183 depends on GENERIC_TIME >> 184 depends on PREEMPT 374 select GENERIC_TRACER 185 select GENERIC_TRACER 375 select TRACER_MAX_TRACE 186 select TRACER_MAX_TRACE 376 select RING_BUFFER_ALLOW_SWAP 187 select RING_BUFFER_ALLOW_SWAP 377 select TRACER_SNAPSHOT << 378 select TRACER_SNAPSHOT_PER_CPU_SWAP << 379 select TRACE_PREEMPT_TOGGLE << 380 help 188 help 381 This option measures the time spent !! 189 This option measures the time spent in preemption off critical 382 sections, with microsecond accuracy. 190 sections, with microsecond accuracy. 383 191 384 The default measurement method is a 192 The default measurement method is a maximum search, which is 385 disabled by default and can be runti 193 disabled by default and can be runtime (re-)started 386 via: 194 via: 387 195 388 echo 0 > /sys/kernel/tracing/tra !! 196 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 389 197 390 (Note that kernel size and overhead !! 198 (Note that kernel size and overhead increases with this option 391 enabled. This option and the irqs-of 199 enabled. This option and the irqs-off timing option can be 392 used together or separately.) 200 used together or separately.) 393 201 >> 202 config SYSPROF_TRACER >> 203 bool "Sysprof Tracer" >> 204 depends on X86 >> 205 select GENERIC_TRACER >> 206 select CONTEXT_SWITCH_TRACER >> 207 help >> 208 This tracer provides the trace needed by the 'Sysprof' userspace >> 209 tool. >> 210 394 config SCHED_TRACER 211 config SCHED_TRACER 395 bool "Scheduling Latency Tracer" 212 bool "Scheduling Latency Tracer" 396 select GENERIC_TRACER 213 select GENERIC_TRACER 397 select CONTEXT_SWITCH_TRACER 214 select CONTEXT_SWITCH_TRACER 398 select TRACER_MAX_TRACE 215 select TRACER_MAX_TRACE 399 select TRACER_SNAPSHOT << 400 help 216 help 401 This tracer tracks the latency of th 217 This tracer tracks the latency of the highest priority task 402 to be scheduled in, starting from th 218 to be scheduled in, starting from the point it has woken up. 403 219 404 config HWLAT_TRACER << 405 bool "Tracer to detect hardware latenc << 406 select GENERIC_TRACER << 407 select TRACER_MAX_TRACE << 408 help << 409 This tracer, when enabled will create << 410 depending on what the cpumask file is << 411 spinning in a loop looking for interr << 412 something other than the kernel. For << 413 System Management Interrupt (SMI) tak << 414 time, this tracer will detect it. Thi << 415 if a system is reliable for Real Time << 416 << 417 Some files are created in the tracing << 418 is enabled: << 419 << 420 hwlat_detector/width - time in us << 421 hwlat_detector/window - time in us << 422 iteration << 423 << 424 A kernel thread is created that will << 425 for "width" microseconds in every "wi << 426 for "window - width" microseconds, wh << 427 continue to operate. << 428 << 429 The output will appear in the trace a << 430 << 431 When the tracer is not running, it ha << 432 but when it is running, it can cause << 433 periodically non responsive. Do not r << 434 production system. << 435 << 436 To enable this tracer, echo in "hwlat << 437 file. Every time a latency is greater << 438 be recorded into the ring buffer. << 439 << 440 config OSNOISE_TRACER << 441 bool "OS Noise tracer" << 442 select GENERIC_TRACER << 443 select TRACER_MAX_TRACE << 444 help << 445 In the context of high-performance c << 446 System Noise (osnoise) refers to the << 447 application due to activities inside << 448 context of Linux, NMIs, IRQs, SoftIR << 449 can cause noise to the system. Moreo << 450 also cause noise, for example, via S << 451 << 452 The osnoise tracer leverages the hwl << 453 loop with preemption, SoftIRQs and I << 454 the sources of osnoise during its ex << 455 note of the entry and exit point of << 456 increasing a per-cpu interference co << 457 counter for each source of interfere << 458 NMI, IRQs, SoftIRQs, and threads is << 459 observes these interferences' entry << 460 without any interference from the op << 461 hardware noise counter increases, po << 462 noise. In this way, osnoise can acco << 463 interference. At the end of the peri << 464 the sum of all noise, the max single << 465 available for the thread, and the co << 466 << 467 In addition to the tracer, a set of << 468 facilitate the identification of the << 469 << 470 The output will appear in the trace << 471 << 472 To enable this tracer, echo in "osno << 473 file. << 474 << 475 config TIMERLAT_TRACER << 476 bool "Timerlat tracer" << 477 select OSNOISE_TRACER << 478 select GENERIC_TRACER << 479 help << 480 The timerlat tracer aims to help the << 481 to find sources of wakeup latencies << 482 << 483 The tracer creates a per-cpu kernel << 484 The tracer thread sets a periodic ti << 485 to sleep waiting for the timer to fi << 486 then computes a wakeup latency value << 487 the current time and the absolute ti << 488 to expire. << 489 << 490 The tracer prints two lines at every << 491 timer latency observed at the hardir << 492 activation of the thread. The second << 493 by the thread, which is the same lev << 494 ACTIVATION ID field serves to relate << 495 respective thread execution. << 496 << 497 The tracer is build on top of osnois << 498 events can be used to trace the sour << 499 IRQs and other threads. It also enab << 500 stacktrace at the IRQ context, which << 501 path that can cause thread delay. << 502 << 503 config MMIOTRACE << 504 bool "Memory mapped IO tracing" << 505 depends on HAVE_MMIOTRACE_SUPPORT && P << 506 select GENERIC_TRACER << 507 help << 508 Mmiotrace traces Memory Mapped I/O a << 509 debugging and reverse engineering. I << 510 implementation and works via page fa << 511 default and can be enabled at run-ti << 512 << 513 See Documentation/trace/mmiotrace.rs << 514 If you are not helping to develop dr << 515 << 516 config ENABLE_DEFAULT_TRACERS 220 config ENABLE_DEFAULT_TRACERS 517 bool "Trace process context switches a 221 bool "Trace process context switches and events" 518 depends on !GENERIC_TRACER 222 depends on !GENERIC_TRACER 519 select TRACING 223 select TRACING 520 help 224 help 521 This tracer hooks to various trace p !! 225 This tracer hooks to various trace points in the kernel 522 allowing the user to pick and choose 226 allowing the user to pick and choose which trace point they 523 want to trace. It also includes the 227 want to trace. It also includes the sched_switch tracer plugin. 524 228 525 config FTRACE_SYSCALLS 229 config FTRACE_SYSCALLS 526 bool "Trace syscalls" 230 bool "Trace syscalls" 527 depends on HAVE_SYSCALL_TRACEPOINTS 231 depends on HAVE_SYSCALL_TRACEPOINTS 528 select GENERIC_TRACER 232 select GENERIC_TRACER 529 select KALLSYMS 233 select KALLSYMS 530 help 234 help 531 Basic tracer to catch the syscall en 235 Basic tracer to catch the syscall entry and exit events. 532 236 533 config TRACER_SNAPSHOT !! 237 config BOOT_TRACER 534 bool "Create a snapshot trace buffer" !! 238 bool "Trace boot initcalls" 535 select TRACER_MAX_TRACE !! 239 select GENERIC_TRACER >> 240 select CONTEXT_SWITCH_TRACER 536 help 241 help 537 Allow tracing users to take snapshot !! 242 This tracer helps developers to optimize boot times: it records 538 ftrace interface, e.g.: !! 243 the timings of the initcalls and traces key events and the identity >> 244 of tasks that can cause boot delays, such as context-switches. >> 245 >> 246 Its aim is to be parsed by the scripts/bootgraph.pl tool to >> 247 produce pretty graphics about boot inefficiencies, giving a visual >> 248 representation of the delays during initcalls - but the raw >> 249 /debug/tracing/trace text output is readable too. 539 250 540 echo 1 > /sys/kernel/tracing/sna !! 251 You must pass in initcall_debug and ftrace=initcall to the kernel 541 cat snapshot !! 252 command line to enable this on bootup. 542 << 543 config TRACER_SNAPSHOT_PER_CPU_SWAP << 544 bool "Allow snapshot to swap per CPU" << 545 depends on TRACER_SNAPSHOT << 546 select RING_BUFFER_ALLOW_SWAP << 547 help << 548 Allow doing a snapshot of a single C << 549 full swap (all buffers). If this is << 550 allowed: << 551 << 552 echo 1 > /sys/kernel/tracing/per << 553 << 554 After which, only the tracing buffer << 555 the main tracing buffer, and the oth << 556 << 557 When this is enabled, this adds a li << 558 trace recording, as it needs to add << 559 recording with swaps. But this does << 560 of the overall system. This is enabl << 561 or irq latency tracers are enabled, << 562 and already adds the overhead (plus << 563 253 564 config TRACE_BRANCH_PROFILING 254 config TRACE_BRANCH_PROFILING 565 bool 255 bool 566 select GENERIC_TRACER 256 select GENERIC_TRACER 567 257 568 choice 258 choice 569 prompt "Branch Profiling" 259 prompt "Branch Profiling" 570 default BRANCH_PROFILE_NONE 260 default BRANCH_PROFILE_NONE 571 help 261 help 572 The branch profiling is a software pr 262 The branch profiling is a software profiler. It will add hooks 573 into the C conditionals to test which 263 into the C conditionals to test which path a branch takes. 574 264 575 The likely/unlikely profiler only loo 265 The likely/unlikely profiler only looks at the conditions that 576 are annotated with a likely or unlike 266 are annotated with a likely or unlikely macro. 577 267 578 The "all branch" profiler will profil !! 268 The "all branch" profiler will profile every if statement in the 579 kernel. This profiler will also enabl 269 kernel. This profiler will also enable the likely/unlikely 580 profiler. !! 270 profiler as well. 581 271 582 Either of the above profilers adds a !! 272 Either of the above profilers add a bit of overhead to the system. 583 If unsure, choose "No branch profilin !! 273 If unsure choose "No branch profiling". 584 274 585 config BRANCH_PROFILE_NONE 275 config BRANCH_PROFILE_NONE 586 bool "No branch profiling" 276 bool "No branch profiling" 587 help 277 help 588 No branch profiling. Branch profilin !! 278 No branch profiling. Branch profiling adds a bit of overhead. 589 Only enable it if you want to analys !! 279 Only enable it if you want to analyse the branching behavior. 590 Otherwise keep it disabled. !! 280 Otherwise keep it disabled. 591 281 592 config PROFILE_ANNOTATED_BRANCHES 282 config PROFILE_ANNOTATED_BRANCHES 593 bool "Trace likely/unlikely profiler" 283 bool "Trace likely/unlikely profiler" 594 select TRACE_BRANCH_PROFILING 284 select TRACE_BRANCH_PROFILING 595 help 285 help 596 This tracer profiles all likely and !! 286 This tracer profiles all the the likely and unlikely macros 597 in the kernel. It will display the r 287 in the kernel. It will display the results in: 598 288 599 /sys/kernel/tracing/trace_stat/branc !! 289 /sys/kernel/debug/tracing/profile_annotated_branch 600 290 601 Note: this will add a significant ov !! 291 Note: this will add a significant overhead, only turn this 602 on if you need to profile the system 292 on if you need to profile the system's use of these macros. 603 293 604 config PROFILE_ALL_BRANCHES 294 config PROFILE_ALL_BRANCHES 605 bool "Profile all if conditionals" if !! 295 bool "Profile all if conditionals" 606 select TRACE_BRANCH_PROFILING 296 select TRACE_BRANCH_PROFILING 607 help 297 help 608 This tracer profiles all branch cond 298 This tracer profiles all branch conditions. Every if () 609 taken in the kernel is recorded whet 299 taken in the kernel is recorded whether it hit or miss. 610 The results will be displayed in: 300 The results will be displayed in: 611 301 612 /sys/kernel/tracing/trace_stat/branc !! 302 /sys/kernel/debug/tracing/profile_branch 613 303 614 This option also enables the likely/ 304 This option also enables the likely/unlikely profiler. 615 305 616 This configuration, when enabled, wi 306 This configuration, when enabled, will impose a great overhead 617 on the system. This should only be e 307 on the system. This should only be enabled when the system 618 is to be analyzed in much detail. !! 308 is to be analyzed 619 endchoice 309 endchoice 620 310 621 config TRACING_BRANCHES 311 config TRACING_BRANCHES 622 bool 312 bool 623 help 313 help 624 Selected by tracers that will trace 314 Selected by tracers that will trace the likely and unlikely 625 conditions. This prevents the tracer 315 conditions. This prevents the tracers themselves from being 626 profiled. Profiling the tracing infr 316 profiled. Profiling the tracing infrastructure can only happen 627 when the likelys and unlikelys are n 317 when the likelys and unlikelys are not being traced. 628 318 629 config BRANCH_TRACER 319 config BRANCH_TRACER 630 bool "Trace likely/unlikely instances" 320 bool "Trace likely/unlikely instances" 631 depends on TRACE_BRANCH_PROFILING 321 depends on TRACE_BRANCH_PROFILING 632 select TRACING_BRANCHES 322 select TRACING_BRANCHES 633 help 323 help 634 This traces the events of likely and 324 This traces the events of likely and unlikely condition 635 calls in the kernel. The difference 325 calls in the kernel. The difference between this and the 636 "Trace likely/unlikely profiler" is 326 "Trace likely/unlikely profiler" is that this is not a 637 histogram of the callers, but actual 327 histogram of the callers, but actually places the calling 638 events into a running trace buffer t 328 events into a running trace buffer to see when and where the 639 events happened, as well as their re 329 events happened, as well as their results. 640 330 641 Say N if unsure. 331 Say N if unsure. 642 332 643 config BLK_DEV_IO_TRACE !! 333 config POWER_TRACER 644 bool "Support for tracing block IO act !! 334 bool "Trace power consumption behavior" 645 depends on SYSFS !! 335 depends on X86 646 depends on BLOCK << 647 select RELAY << 648 select DEBUG_FS << 649 select TRACEPOINTS << 650 select GENERIC_TRACER 336 select GENERIC_TRACER 651 select STACKTRACE << 652 help 337 help 653 Say Y here if you want to be able to !! 338 This tracer helps developers to analyze and optimize the kernels 654 on a given queue. Tracing allows you !! 339 power management decisions, specifically the C-state and P-state 655 on a block device queue. For more in !! 340 behavior. 656 support tools needed), fetch the blk << 657 341 658 git://git.kernel.dk/blktrace.git << 659 << 660 Tracing also is possible using the f << 661 << 662 echo 1 > /sys/block/sda/sda1/trace << 663 echo blk > /sys/kernel/tracing/cur << 664 cat /sys/kernel/tracing/trace_pipe << 665 << 666 If unsure, say N. << 667 342 668 config FPROBE_EVENTS !! 343 config STACK_TRACER 669 depends on FPROBE !! 344 bool "Trace max stack" 670 depends on HAVE_REGS_AND_STACK_ACCESS_ !! 345 depends on HAVE_FUNCTION_TRACER 671 bool "Enable fprobe-based dynamic even !! 346 select FUNCTION_TRACER 672 select TRACING !! 347 select STACKTRACE 673 select PROBE_EVENTS !! 348 select KALLSYMS 674 select DYNAMIC_EVENTS << 675 default y << 676 help << 677 This allows user to add tracing even << 678 exit via ftrace interface. The synta << 679 and the kprobe events on function en << 680 transparently converted to this fpro << 681 << 682 config PROBE_EVENTS_BTF_ARGS << 683 depends on HAVE_FUNCTION_ARG_ACCESS_AP << 684 depends on FPROBE_EVENTS || KPROBE_EVE << 685 depends on DEBUG_INFO_BTF && BPF_SYSCA << 686 bool "Support BTF function arguments f << 687 default y << 688 help << 689 The user can specify the arguments o << 690 of the arguments of the probed funct << 691 kernel function entry or a tracepoin << 692 This is available only if BTF (BPF T << 693 << 694 config KPROBE_EVENTS << 695 depends on KPROBES << 696 depends on HAVE_REGS_AND_STACK_ACCESS_ << 697 bool "Enable kprobes-based dynamic eve << 698 select TRACING << 699 select PROBE_EVENTS << 700 select DYNAMIC_EVENTS << 701 default y << 702 help << 703 This allows the user to add tracing << 704 on the fly via the ftrace interface. << 705 Documentation/trace/kprobetrace.rst << 706 << 707 Those events can be inserted whereve << 708 various register and memory values. << 709 << 710 This option is also required by perf << 711 If you want to use perf tools, this << 712 << 713 config KPROBE_EVENTS_ON_NOTRACE << 714 bool "Do NOT protect notrace function << 715 depends on KPROBE_EVENTS << 716 depends on DYNAMIC_FTRACE << 717 default n << 718 help 349 help 719 This is only for the developers who !! 350 This special tracer records the maximum stack footprint of the 720 using kprobe events. !! 351 kernel and displays it in /sys/kernel/debug/tracing/stack_trace. 721 << 722 If kprobes can use ftrace instead of << 723 functions are protected from kprobe- << 724 recursion or any unexpected executio << 725 crash. << 726 << 727 This option disables such protection << 728 events on ftrace functions for debug << 729 Note that this might let you shoot y << 730 352 731 If unsure, say N. !! 353 This tracer works by hooking into every function call that the >> 354 kernel executes, and keeping a maximum stack depth value and >> 355 stack-trace saved. If this is configured with DYNAMIC_FTRACE >> 356 then it will not have any overhead while the stack tracer >> 357 is disabled. 732 358 733 config UPROBE_EVENTS !! 359 To enable the stack tracer on bootup, pass in 'stacktrace' 734 bool "Enable uprobes-based dynamic eve !! 360 on the kernel command line. 735 depends on ARCH_SUPPORTS_UPROBES << 736 depends on MMU << 737 depends on PERF_EVENTS << 738 select UPROBES << 739 select PROBE_EVENTS << 740 select DYNAMIC_EVENTS << 741 select TRACING << 742 default y << 743 help << 744 This allows the user to add tracing << 745 dynamic events (similar to tracepoin << 746 events interface. Those events can b << 747 can probe, and record various regist << 748 This option is required if you plan << 749 of perf tools on user space applicat << 750 << 751 config BPF_EVENTS << 752 depends on BPF_SYSCALL << 753 depends on (KPROBE_EVENTS || UPROBE_EV << 754 bool << 755 default y << 756 help << 757 This allows the user to attach BPF p << 758 tracepoint events. << 759 361 760 config DYNAMIC_EVENTS !! 362 The stack tracer can also be enabled or disabled via the 761 def_bool n !! 363 sysctl kernel.stack_tracer_enabled 762 364 763 config PROBE_EVENTS !! 365 Say N if unsure. 764 def_bool n << 765 366 766 config BPF_KPROBE_OVERRIDE !! 367 config HW_BRANCH_TRACER 767 bool "Enable BPF programs to override !! 368 depends on HAVE_HW_BRANCH_TRACER 768 depends on BPF_EVENTS !! 369 bool "Trace hw branches" 769 depends on FUNCTION_ERROR_INJECTION !! 370 select GENERIC_TRACER 770 default n << 771 help 371 help 772 Allows BPF to override the execution !! 372 This tracer records all branches on the system in a circular 773 set a different return value. This i !! 373 buffer giving access to the last N branches for each cpu. 774 << 775 config FTRACE_MCOUNT_RECORD << 776 def_bool y << 777 depends on DYNAMIC_FTRACE << 778 depends on HAVE_FTRACE_MCOUNT_RECORD << 779 << 780 config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_EN << 781 bool << 782 depends on FTRACE_MCOUNT_RECORD << 783 << 784 config FTRACE_MCOUNT_USE_CC << 785 def_bool y << 786 depends on $(cc-option,-mrecord-mcount << 787 depends on !FTRACE_MCOUNT_USE_PATCHABL << 788 depends on FTRACE_MCOUNT_RECORD << 789 << 790 config FTRACE_MCOUNT_USE_OBJTOOL << 791 def_bool y << 792 depends on HAVE_OBJTOOL_MCOUNT << 793 depends on !FTRACE_MCOUNT_USE_PATCHABL << 794 depends on !FTRACE_MCOUNT_USE_CC << 795 depends on FTRACE_MCOUNT_RECORD << 796 select OBJTOOL << 797 374 798 config FTRACE_MCOUNT_USE_RECORDMCOUNT !! 375 config KMEMTRACE 799 def_bool y !! 376 bool "Trace SLAB allocations" 800 depends on !FTRACE_MCOUNT_USE_PATCHABL !! 377 select GENERIC_TRACER 801 depends on !FTRACE_MCOUNT_USE_CC << 802 depends on !FTRACE_MCOUNT_USE_OBJTOOL << 803 depends on FTRACE_MCOUNT_RECORD << 804 << 805 config TRACING_MAP << 806 bool << 807 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG << 808 help << 809 tracing_map is a special-purpose loc << 810 separated out as a stand-alone facil << 811 to be shared between multiple tracer << 812 generally used outside of that conte << 813 selected by tracers that use it. << 814 << 815 config SYNTH_EVENTS << 816 bool "Synthetic trace events" << 817 select TRACING << 818 select DYNAMIC_EVENTS << 819 default n << 820 help 378 help 821 Synthetic events are user-defined tr !! 379 kmemtrace provides tracing for slab allocator functions, such as 822 used to combine data from other trac !! 380 kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected 823 data source. Synthetic events can b !! 381 data is then fed to the userspace application in order to analyse 824 via the trace() action of histogram !! 382 allocation hotspots, internal fragmentation and so on, making it 825 by way of an in-kernel API. !! 383 possible to see how well an allocator performs, as well as debug >> 384 and profile kernel code. 826 385 827 See Documentation/trace/events.rst o !! 386 This requires an userspace application to use. See 828 Documentation/trace/histogram.rst fo !! 387 Documentation/trace/kmemtrace.txt for more information. 829 388 830 If in doubt, say N. !! 389 Saying Y will make the kernel somewhat larger and slower. However, 831 !! 390 if you disable kmemtrace at run-time or boot-time, the performance 832 config USER_EVENTS !! 391 impact is minimal (depending on the arch the kernel is built for). 833 bool "User trace events" << 834 select TRACING << 835 select DYNAMIC_EVENTS << 836 help << 837 User trace events are user-defined t << 838 can be used like an existing kernel << 839 events are generated by writing to a << 840 processes can determine if their tra << 841 generated by registering a value and << 842 that reflects when it is enabled or << 843 << 844 See Documentation/trace/user_events. << 845 If in doubt, say N. << 846 << 847 config HIST_TRIGGERS << 848 bool "Histogram triggers" << 849 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG << 850 select TRACING_MAP << 851 select TRACING << 852 select DYNAMIC_EVENTS << 853 select SYNTH_EVENTS << 854 default n << 855 help << 856 Hist triggers allow one or more arbi << 857 to be aggregated into hash tables an << 858 reading a debugfs/tracefs file. The << 859 gathering quick and dirty (though pr << 860 event activity as an initial guide f << 861 using more advanced tools. << 862 << 863 Inter-event tracing of quantities su << 864 supported using hist triggers under << 865 << 866 See Documentation/trace/histogram.rs << 867 If in doubt, say N. << 868 << 869 config TRACE_EVENT_INJECT << 870 bool "Trace event injection" << 871 depends on TRACING << 872 help << 873 Allow user-space to inject a specifi << 874 buffer. This is mainly used for test << 875 392 876 If unsure, say N. 393 If unsure, say N. 877 394 878 config TRACEPOINT_BENCHMARK !! 395 config WORKQUEUE_TRACER 879 bool "Add tracepoint that benchmarks t !! 396 bool "Trace workqueues" >> 397 select GENERIC_TRACER 880 help 398 help 881 This option creates the tracepoint "b !! 399 The workqueue tracer provides some statistical informations 882 When the tracepoint is enabled, it ki !! 400 about each cpu workqueue thread such as the number of the 883 goes into an infinite loop (calling c !! 401 works inserted and executed since their creation. It can help 884 run), and calls the tracepoint. Each !! 402 to evaluate the amount of work each of them have to perform. 885 it took to write to the tracepoint an !! 403 For example it can help a developer to decide whether he should 886 data will be passed to the tracepoint !! 404 choose a per cpu workqueue instead of a singlethreaded one. 887 will report the time it took to do th << 888 The string written to the tracepoint << 889 to keep the time the same. The initia << 890 "START". The second string records th << 891 write which is not added to the rest << 892 << 893 As it is a tight loop, it benchmarks << 894 we care most about hot paths that are << 895 << 896 An example of the output: << 897 << 898 START << 899 first=3672 [COLD CACHED] << 900 last=632 first=3672 max=632 min= << 901 last=278 first=3672 max=632 min= << 902 last=277 first=3672 max=632 min= << 903 last=273 first=3672 max=632 min= << 904 last=273 first=3672 max=632 min= << 905 last=281 first=3672 max=632 min= << 906 << 907 405 908 config RING_BUFFER_BENCHMARK !! 406 config BLK_DEV_IO_TRACE 909 tristate "Ring buffer benchmark stress !! 407 bool "Support for tracing block io actions" 910 depends on RING_BUFFER !! 408 depends on SYSFS >> 409 depends on BLOCK >> 410 select RELAY >> 411 select DEBUG_FS >> 412 select TRACEPOINTS >> 413 select GENERIC_TRACER >> 414 select STACKTRACE 911 help 415 help 912 This option creates a test to stress !! 416 Say Y here if you want to be able to trace the block layer actions 913 It creates its own ring buffer such !! 417 on a given queue. Tracing allows you to see any traffic happening 914 any other users of the ring buffer ( !! 418 on a block device queue. For more information (and the userspace 915 a producer and consumer that will ru !! 419 support tools needed), fetch the blktrace tools from: 916 10 seconds. Each interval it will pr << 917 it recorded and give a rough estimat << 918 420 919 It does not disable interrupts or ra !! 421 git://git.kernel.dk/blktrace.git 920 affected by processes that are runni << 921 422 922 If unsure, say N. !! 423 Tracing also is possible using the ftrace interface, e.g.: 923 424 924 config TRACE_EVAL_MAP_FILE !! 425 echo 1 > /sys/block/sda/sda1/trace/enable 925 bool "Show eval mappings for trace even !! 426 echo blk > /sys/kernel/debug/tracing/current_tracer 926 depends on TRACING !! 427 cat /sys/kernel/debug/tracing/trace_pipe 927 help << 928 The "print fmt" of the trace events wi << 929 instead of their values. This can caus << 930 that use this string to parse the raw << 931 how to convert the string to its value << 932 << 933 To fix this, there's a special macro i << 934 to convert an enum/sizeof into its val << 935 the print fmt strings will be converte << 936 << 937 If something does not get converted pr << 938 used to show what enums/sizeof the ker << 939 << 940 This option is for debugging the conve << 941 in the tracing directory called "eval_ << 942 names matched with their values and wh << 943 belong too. << 944 << 945 Normally, the mapping of the strings t << 946 boot up or module load. With this opti << 947 they are needed for the "eval_map" fil << 948 increase the memory footprint of the r << 949 428 950 If unsure, say N. !! 429 If unsure, say N. 951 430 952 config FTRACE_RECORD_RECURSION !! 431 config DYNAMIC_FTRACE 953 bool "Record functions that recurse in !! 432 bool "enable/disable ftrace tracepoints dynamically" 954 depends on FUNCTION_TRACER 433 depends on FUNCTION_TRACER >> 434 depends on HAVE_DYNAMIC_FTRACE >> 435 default y 955 help 436 help 956 All callbacks that attach to the fun !! 437 This option will modify all the calls to ftrace dynamically 957 of protection against recursion. Eve !! 438 (will patch them out of the binary image and replaces them 958 it adds overhead. This option will c !! 439 with a No-Op instruction) as they are called. A table is 959 file system called "recursed_functio !! 440 created to dynamically enable them again. 960 that triggered a recursion. !! 441 961 !! 442 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise 962 This will add more overhead to cases !! 443 has native performance as long as no tracing is active. 963 !! 444 964 If unsure, say N !! 445 The changes to the code are done by a kernel thread that 965 !! 446 wakes up once a second and checks to see if any ftrace calls 966 config FTRACE_RECORD_RECURSION_SIZE !! 447 were made. If so, it runs stop_machine (stops all CPUS) 967 int "Max number of recursed functions !! 448 and modifies the code to jump over the call to ftrace. 968 default 128 << 969 depends on FTRACE_RECORD_RECURSION << 970 help << 971 This defines the limit of number of << 972 listed in the "recursed_functions" f << 973 the functions that caused a recursio << 974 This file can be reset, but the limi << 975 size at runtime. << 976 449 977 config FTRACE_VALIDATE_RCU_IS_WATCHING !! 450 config FUNCTION_PROFILER 978 bool "Validate RCU is on during ftrace !! 451 bool "Kernel function profiler" 979 depends on FUNCTION_TRACER 452 depends on FUNCTION_TRACER 980 depends on ARCH_WANTS_NO_INSTR !! 453 default n 981 help << 982 All callbacks that attach to the fun << 983 protection against recursion. This o << 984 ftrace (and other users of ftrace_te << 985 called outside of RCU, as if they ar << 986 also has a noticeable overhead when << 987 << 988 If unsure, say N << 989 << 990 config RING_BUFFER_RECORD_RECURSION << 991 bool "Record functions that recurse in << 992 depends on FTRACE_RECORD_RECURSION << 993 # default y, because it is coupled wit << 994 default y << 995 help << 996 The ring buffer has its own internal << 997 recursion happens it won't cause har << 998 but it does cause unwanted overhead. << 999 place where recursion was detected i << 1000 file. << 1001 << 1002 This will add more overhead to case << 1003 << 1004 config GCOV_PROFILE_FTRACE << 1005 bool "Enable GCOV profiling on ftrace << 1006 depends on GCOV_KERNEL << 1007 help 454 help 1008 Enable GCOV profiling on ftrace sub !! 455 This option enables the kernel function profiler. A file is created 1009 which functions/lines are tested. !! 456 in debugfs called function_profile_enabled which defaults to zero. >> 457 When a 1 is echoed into this file profiling begins, and when a >> 458 zero is entered, profiling stops. A file in the trace_stats >> 459 directory called functions, that show the list of functions that >> 460 have been hit and their counters. 1010 461 1011 If unsure, say N. !! 462 If in doubt, say N 1012 463 1013 Note that on a kernel compiled with !! 464 config FTRACE_MCOUNT_RECORD 1014 run significantly slower. !! 465 def_bool y >> 466 depends on DYNAMIC_FTRACE >> 467 depends on HAVE_FTRACE_MCOUNT_RECORD 1015 468 1016 config FTRACE_SELFTEST 469 config FTRACE_SELFTEST 1017 bool 470 bool 1018 471 1019 config FTRACE_STARTUP_TEST 472 config FTRACE_STARTUP_TEST 1020 bool "Perform a startup test on ftrac 473 bool "Perform a startup test on ftrace" 1021 depends on GENERIC_TRACER 474 depends on GENERIC_TRACER 1022 select FTRACE_SELFTEST 475 select FTRACE_SELFTEST 1023 help 476 help 1024 This option performs a series of st 477 This option performs a series of startup tests on ftrace. On bootup 1025 a series of tests are made to verif 478 a series of tests are made to verify that the tracer is 1026 functioning properly. It will do te 479 functioning properly. It will do tests on all the configured 1027 tracers of ftrace. 480 tracers of ftrace. 1028 481 1029 config EVENT_TRACE_STARTUP_TEST << 1030 bool "Run selftest on trace events" << 1031 depends on FTRACE_STARTUP_TEST << 1032 default y << 1033 help << 1034 This option performs a test on all << 1035 It basically just enables each even << 1036 will trigger events (not necessaril << 1037 This may take some time run as ther << 1038 << 1039 config EVENT_TRACE_TEST_SYSCALLS 482 config EVENT_TRACE_TEST_SYSCALLS 1040 bool "Run selftest on syscall events" 483 bool "Run selftest on syscall events" 1041 depends on EVENT_TRACE_STARTUP_TEST !! 484 depends on FTRACE_STARTUP_TEST 1042 help 485 help 1043 This option will also enable testing 486 This option will also enable testing every syscall event. 1044 It only enables the event and disabl 487 It only enables the event and disables it and runs various loads 1045 with the event enabled. This adds a 488 with the event enabled. This adds a bit more time for kernel boot 1046 up since it runs this on every syste 489 up since it runs this on every system call defined. 1047 490 1048 TBD - enable a way to actually call 491 TBD - enable a way to actually call the syscalls as we test their 1049 events 492 events 1050 493 1051 config FTRACE_SORT_STARTUP_TEST !! 494 config MMIOTRACE 1052 bool "Verify compile time sorting of f !! 495 bool "Memory mapped IO tracing" 1053 depends on DYNAMIC_FTRACE !! 496 depends on HAVE_MMIOTRACE_SUPPORT && PCI 1054 depends on BUILDTIME_MCOUNT_SORT !! 497 select GENERIC_TRACER 1055 help << 1056 Sorting of the mcount_loc sections t << 1057 where the ftrace knows where to patc << 1058 and other callbacks is done at compi << 1059 is not done correctly, it will cause << 1060 When this is set, the sorted section << 1061 are in deed sorted and will warn if << 1062 << 1063 If unsure, say N << 1064 << 1065 config RING_BUFFER_STARTUP_TEST << 1066 bool "Ring buffer startup self test" << 1067 depends on RING_BUFFER << 1068 help << 1069 Run a simple self test on the ring b << 1070 kernel boot sequence, the test will << 1071 a thread per cpu. Each thread will w << 1072 into the ring buffer. Another thread << 1073 to each of the threads, where the IP << 1074 to the ring buffer, to test/stress t << 1075 If any anomalies are discovered, a w << 1076 and all ring buffers will be disable << 1077 << 1078 The test runs for 10 seconds. This w << 1079 by at least 10 more seconds. << 1080 << 1081 At the end of the test, statistics a << 1082 It will output the stats of each per << 1083 was written, the sizes, what was rea << 1084 other similar details. << 1085 << 1086 If unsure, say N << 1087 << 1088 config RING_BUFFER_VALIDATE_TIME_DELTAS << 1089 bool "Verify ring buffer time stamp d << 1090 depends on RING_BUFFER << 1091 help 498 help 1092 This will audit the time stamps on !! 499 Mmiotrace traces Memory Mapped I/O access and is meant for 1093 buffer to make sure that all the ti !! 500 debugging and reverse engineering. It is called from the ioremap 1094 events on a sub buffer matches the !! 501 implementation and works via page faults. Tracing is disabled by 1095 This audit is performed for every e !! 502 default and can be enabled at run-time. 1096 interrupted, or interrupting anothe << 1097 is also made when traversing sub bu << 1098 that all the deltas on the previous << 1099 add up to be greater than the curre << 1100 << 1101 NOTE: This adds significant overhea << 1102 and should only be used to test the << 1103 Do not use it on production systems << 1104 503 1105 Only say Y if you understand what t !! 504 See Documentation/trace/mmiotrace.txt. 1106 still want it enabled. Otherwise sa !! 505 If you are not helping to develop drivers, say N. 1107 506 1108 config MMIOTRACE_TEST 507 config MMIOTRACE_TEST 1109 tristate "Test module for mmiotrace" 508 tristate "Test module for mmiotrace" 1110 depends on MMIOTRACE && m 509 depends on MMIOTRACE && m 1111 help 510 help 1112 This is a dumb module for testing m 511 This is a dumb module for testing mmiotrace. It is very dangerous 1113 as it will write garbage to IO memo 512 as it will write garbage to IO memory starting at a given address. 1114 However, it should be safe to use o 513 However, it should be safe to use on e.g. unused portion of VRAM. 1115 514 1116 Say N, unless you absolutely know w 515 Say N, unless you absolutely know what you are doing. 1117 516 1118 config PREEMPTIRQ_DELAY_TEST !! 517 config RING_BUFFER_BENCHMARK 1119 tristate "Test module to create a pre !! 518 tristate "Ring buffer benchmark stress tester" 1120 depends on m !! 519 depends on RING_BUFFER 1121 help << 1122 Select this option to build a test << 1123 tracers by executing a preempt or i << 1124 configurable delay. The module busy << 1125 critical section. << 1126 << 1127 For example, the following invocati << 1128 irq-disabled critical sections for << 1129 modprobe preemptirq_delay_test test << 1130 << 1131 What's more, if you want to attach << 1132 tracer is running on, specify cpu_a << 1133 command. << 1134 << 1135 If unsure, say N << 1136 << 1137 config SYNTH_EVENT_GEN_TEST << 1138 tristate "Test module for in-kernel s << 1139 depends on SYNTH_EVENTS && m << 1140 help << 1141 This option creates a test module t << 1142 functionality of in-kernel syntheti << 1143 generation. << 1144 << 1145 To test, insert the module, and the << 1146 for the generated sample events. << 1147 << 1148 If unsure, say N. << 1149 << 1150 config KPROBE_EVENT_GEN_TEST << 1151 tristate "Test module for in-kernel k << 1152 depends on KPROBE_EVENTS && m << 1153 help << 1154 This option creates a test module t << 1155 functionality of in-kernel kprobe e << 1156 << 1157 To test, insert the module, and the << 1158 for the generated kprobe events. << 1159 << 1160 If unsure, say N. << 1161 << 1162 config HIST_TRIGGERS_DEBUG << 1163 bool "Hist trigger debug support" << 1164 depends on HIST_TRIGGERS << 1165 help 520 help 1166 Add "hist_debug" file for each even !! 521 This option creates a test to stress the ring buffer and bench mark it. 1167 dump out a bunch of internal detail !! 522 It creates its own ring buffer such that it will not interfer with 1168 defined on that event. !! 523 any other users of the ring buffer (such as ftrace). It then creates 1169 !! 524 a producer and consumer that will run for 10 seconds and sleep for 1170 The hist_debug file serves a couple !! 525 10 seconds. Each interval it will print out the number of events 1171 !! 526 it recorded and give a rough estimate of how long each iteration took. 1172 - Helps developers verify that no << 1173 527 1174 - Provides educational informatio !! 528 It does not disable interrupts or raise its priority, so it may be 1175 of the hist trigger internals a !! 529 affected by processes that are running. 1176 Documentation/trace/histogram-d << 1177 530 1178 The hist_debug output only covers t !! 531 If unsure, say N 1179 related to the histogram definition << 1180 display the internals of map bucket << 1181 running histograms. << 1182 532 1183 If unsure, say N. !! 533 endif # FTRACE 1184 534 1185 source "kernel/trace/rv/Kconfig" !! 535 endif # TRACING_SUPPORT 1186 536 1187 endif # FTRACE <<
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.