~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/trace/trace.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 
  3 #ifndef _LINUX_KERNEL_TRACE_H
  4 #define _LINUX_KERNEL_TRACE_H
  5 
  6 #include <linux/fs.h>
  7 #include <linux/atomic.h>
  8 #include <linux/sched.h>
  9 #include <linux/clocksource.h>
 10 #include <linux/ring_buffer.h>
 11 #include <linux/mmiotrace.h>
 12 #include <linux/tracepoint.h>
 13 #include <linux/ftrace.h>
 14 #include <linux/trace.h>
 15 #include <linux/hw_breakpoint.h>
 16 #include <linux/trace_seq.h>
 17 #include <linux/trace_events.h>
 18 #include <linux/compiler.h>
 19 #include <linux/glob.h>
 20 #include <linux/irq_work.h>
 21 #include <linux/workqueue.h>
 22 #include <linux/ctype.h>
 23 #include <linux/once_lite.h>
 24 
 25 #include "pid_list.h"
 26 
 27 #ifdef CONFIG_FTRACE_SYSCALLS
 28 #include <asm/unistd.h>         /* For NR_syscalls           */
 29 #include <asm/syscall.h>        /* some archs define it here */
 30 #endif
 31 
 32 #define TRACE_MODE_WRITE        0640
 33 #define TRACE_MODE_READ         0440
 34 
 35 enum trace_type {
 36         __TRACE_FIRST_TYPE = 0,
 37 
 38         TRACE_FN,
 39         TRACE_CTX,
 40         TRACE_WAKE,
 41         TRACE_STACK,
 42         TRACE_PRINT,
 43         TRACE_BPRINT,
 44         TRACE_MMIO_RW,
 45         TRACE_MMIO_MAP,
 46         TRACE_BRANCH,
 47         TRACE_GRAPH_RET,
 48         TRACE_GRAPH_ENT,
 49         TRACE_USER_STACK,
 50         TRACE_BLK,
 51         TRACE_BPUTS,
 52         TRACE_HWLAT,
 53         TRACE_OSNOISE,
 54         TRACE_TIMERLAT,
 55         TRACE_RAW_DATA,
 56         TRACE_FUNC_REPEATS,
 57 
 58         __TRACE_LAST_TYPE,
 59 };
 60 
 61 
 62 #undef __field
 63 #define __field(type, item)             type    item;
 64 
 65 #undef __field_fn
 66 #define __field_fn(type, item)          type    item;
 67 
 68 #undef __field_struct
 69 #define __field_struct(type, item)      __field(type, item)
 70 
 71 #undef __field_desc
 72 #define __field_desc(type, container, item)
 73 
 74 #undef __field_packed
 75 #define __field_packed(type, container, item)
 76 
 77 #undef __array
 78 #define __array(type, item, size)       type    item[size];
 79 
 80 /*
 81  * For backward compatibility, older user space expects to see the
 82  * kernel_stack event with a fixed size caller field. But today the fix
 83  * size is ignored by the kernel, and the real structure is dynamic.
 84  * Expose to user space: "unsigned long caller[8];" but the real structure
 85  * will be "unsigned long caller[] __counted_by(size)"
 86  */
 87 #undef __stack_array
 88 #define __stack_array(type, item, size, field)          type item[] __counted_by(field);
 89 
 90 #undef __array_desc
 91 #define __array_desc(type, container, item, size)
 92 
 93 #undef __dynamic_array
 94 #define __dynamic_array(type, item)     type    item[];
 95 
 96 #undef __rel_dynamic_array
 97 #define __rel_dynamic_array(type, item) type    item[];
 98 
 99 #undef F_STRUCT
100 #define F_STRUCT(args...)               args
101 
102 #undef FTRACE_ENTRY
103 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print)             \
104         struct struct_name {                                            \
105                 struct trace_entry      ent;                            \
106                 tstruct                                                 \
107         }
108 
109 #undef FTRACE_ENTRY_DUP
110 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
111 
112 #undef FTRACE_ENTRY_REG
113 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn)  \
114         FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
115 
116 #undef FTRACE_ENTRY_PACKED
117 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print)      \
118         FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
119 
120 #include "trace_entries.h"
121 
122 /* Use this for memory failure errors */
123 #define MEM_FAIL(condition, fmt, ...)                                   \
124         DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__)
125 
126 #define FAULT_STRING "(fault)"
127 
128 #define HIST_STACKTRACE_DEPTH   16
129 #define HIST_STACKTRACE_SIZE    (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
130 #define HIST_STACKTRACE_SKIP    5
131 
132 /*
133  * syscalls are special, and need special handling, this is why
134  * they are not included in trace_entries.h
135  */
136 struct syscall_trace_enter {
137         struct trace_entry      ent;
138         int                     nr;
139         unsigned long           args[];
140 };
141 
142 struct syscall_trace_exit {
143         struct trace_entry      ent;
144         int                     nr;
145         long                    ret;
146 };
147 
148 struct kprobe_trace_entry_head {
149         struct trace_entry      ent;
150         unsigned long           ip;
151 };
152 
153 struct eprobe_trace_entry_head {
154         struct trace_entry      ent;
155 };
156 
157 struct kretprobe_trace_entry_head {
158         struct trace_entry      ent;
159         unsigned long           func;
160         unsigned long           ret_ip;
161 };
162 
163 struct fentry_trace_entry_head {
164         struct trace_entry      ent;
165         unsigned long           ip;
166 };
167 
168 struct fexit_trace_entry_head {
169         struct trace_entry      ent;
170         unsigned long           func;
171         unsigned long           ret_ip;
172 };
173 
174 #define TRACE_BUF_SIZE          1024
175 
176 struct trace_array;
177 
178 /*
179  * The CPU trace array - it consists of thousands of trace entries
180  * plus some other descriptor data: (for example which task started
181  * the trace, etc.)
182  */
183 struct trace_array_cpu {
184         atomic_t                disabled;
185         void                    *buffer_page;   /* ring buffer spare */
186 
187         unsigned long           entries;
188         unsigned long           saved_latency;
189         unsigned long           critical_start;
190         unsigned long           critical_end;
191         unsigned long           critical_sequence;
192         unsigned long           nice;
193         unsigned long           policy;
194         unsigned long           rt_priority;
195         unsigned long           skipped_entries;
196         u64                     preempt_timestamp;
197         pid_t                   pid;
198         kuid_t                  uid;
199         char                    comm[TASK_COMM_LEN];
200 
201 #ifdef CONFIG_FUNCTION_TRACER
202         int                     ftrace_ignore_pid;
203 #endif
204         bool                    ignore_pid;
205 };
206 
207 struct tracer;
208 struct trace_option_dentry;
209 
210 struct array_buffer {
211         struct trace_array              *tr;
212         struct trace_buffer             *buffer;
213         struct trace_array_cpu __percpu *data;
214         u64                             time_start;
215         int                             cpu;
216 };
217 
218 #define TRACE_FLAGS_MAX_SIZE            32
219 
220 struct trace_options {
221         struct tracer                   *tracer;
222         struct trace_option_dentry      *topts;
223 };
224 
225 struct trace_pid_list *trace_pid_list_alloc(void);
226 void trace_pid_list_free(struct trace_pid_list *pid_list);
227 bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid);
228 int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid);
229 int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid);
230 int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid);
231 int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid,
232                         unsigned int *next);
233 
234 enum {
235         TRACE_PIDS              = BIT(0),
236         TRACE_NO_PIDS           = BIT(1),
237 };
238 
239 static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list,
240                                     struct trace_pid_list *no_pid_list)
241 {
242         /* Return true if the pid list in type has pids */
243         return ((type & TRACE_PIDS) && pid_list) ||
244                 ((type & TRACE_NO_PIDS) && no_pid_list);
245 }
246 
247 static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list,
248                                          struct trace_pid_list *no_pid_list)
249 {
250         /*
251          * Turning off what is in @type, return true if the "other"
252          * pid list, still has pids in it.
253          */
254         return (!(type & TRACE_PIDS) && pid_list) ||
255                 (!(type & TRACE_NO_PIDS) && no_pid_list);
256 }
257 
258 typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
259 
260 /**
261  * struct cond_snapshot - conditional snapshot data and callback
262  *
263  * The cond_snapshot structure encapsulates a callback function and
264  * data associated with the snapshot for a given tracing instance.
265  *
266  * When a snapshot is taken conditionally, by invoking
267  * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
268  * passed in turn to the cond_snapshot.update() function.  That data
269  * can be compared by the update() implementation with the cond_data
270  * contained within the struct cond_snapshot instance associated with
271  * the trace_array.  Because the tr->max_lock is held throughout the
272  * update() call, the update() function can directly retrieve the
273  * cond_snapshot and cond_data associated with the per-instance
274  * snapshot associated with the trace_array.
275  *
276  * The cond_snapshot.update() implementation can save data to be
277  * associated with the snapshot if it decides to, and returns 'true'
278  * in that case, or it returns 'false' if the conditional snapshot
279  * shouldn't be taken.
280  *
281  * The cond_snapshot instance is created and associated with the
282  * user-defined cond_data by tracing_cond_snapshot_enable().
283  * Likewise, the cond_snapshot instance is destroyed and is no longer
284  * associated with the trace instance by
285  * tracing_cond_snapshot_disable().
286  *
287  * The method below is required.
288  *
289  * @update: When a conditional snapshot is invoked, the update()
290  *      callback function is invoked with the tr->max_lock held.  The
291  *      update() implementation signals whether or not to actually
292  *      take the snapshot, by returning 'true' if so, 'false' if no
293  *      snapshot should be taken.  Because the max_lock is held for
294  *      the duration of update(), the implementation is safe to
295  *      directly retrieved and save any implementation data it needs
296  *      to in association with the snapshot.
297  */
298 struct cond_snapshot {
299         void                            *cond_data;
300         cond_update_fn_t                update;
301 };
302 
303 /*
304  * struct trace_func_repeats - used to keep track of the consecutive
305  * (on the same CPU) calls of a single function.
306  */
307 struct trace_func_repeats {
308         unsigned long   ip;
309         unsigned long   parent_ip;
310         unsigned long   count;
311         u64             ts_last_call;
312 };
313 
314 /*
315  * The trace array - an array of per-CPU trace arrays. This is the
316  * highest level data structure that individual tracers deal with.
317  * They have on/off state as well:
318  */
319 struct trace_array {
320         struct list_head        list;
321         char                    *name;
322         struct array_buffer     array_buffer;
323 #ifdef CONFIG_TRACER_MAX_TRACE
324         /*
325          * The max_buffer is used to snapshot the trace when a maximum
326          * latency is reached, or when the user initiates a snapshot.
327          * Some tracers will use this to store a maximum trace while
328          * it continues examining live traces.
329          *
330          * The buffers for the max_buffer are set up the same as the array_buffer
331          * When a snapshot is taken, the buffer of the max_buffer is swapped
332          * with the buffer of the array_buffer and the buffers are reset for
333          * the array_buffer so the tracing can continue.
334          */
335         struct array_buffer     max_buffer;
336         bool                    allocated_snapshot;
337         spinlock_t              snapshot_trigger_lock;
338         unsigned int            snapshot;
339         unsigned int            mapped;
340         unsigned long           max_latency;
341 #ifdef CONFIG_FSNOTIFY
342         struct dentry           *d_max_latency;
343         struct work_struct      fsnotify_work;
344         struct irq_work         fsnotify_irqwork;
345 #endif
346 #endif
347         struct trace_pid_list   __rcu *filtered_pids;
348         struct trace_pid_list   __rcu *filtered_no_pids;
349         /*
350          * max_lock is used to protect the swapping of buffers
351          * when taking a max snapshot. The buffers themselves are
352          * protected by per_cpu spinlocks. But the action of the swap
353          * needs its own lock.
354          *
355          * This is defined as a arch_spinlock_t in order to help
356          * with performance when lockdep debugging is enabled.
357          *
358          * It is also used in other places outside the update_max_tr
359          * so it needs to be defined outside of the
360          * CONFIG_TRACER_MAX_TRACE.
361          */
362         arch_spinlock_t         max_lock;
363         int                     buffer_disabled;
364 #ifdef CONFIG_FTRACE_SYSCALLS
365         int                     sys_refcount_enter;
366         int                     sys_refcount_exit;
367         struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
368         struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
369 #endif
370         int                     stop_count;
371         int                     clock_id;
372         int                     nr_topts;
373         bool                    clear_trace;
374         int                     buffer_percent;
375         unsigned int            n_err_log_entries;
376         struct tracer           *current_trace;
377         unsigned int            trace_flags;
378         unsigned char           trace_flags_index[TRACE_FLAGS_MAX_SIZE];
379         unsigned int            flags;
380         raw_spinlock_t          start_lock;
381         const char              *system_names;
382         struct list_head        err_log;
383         struct dentry           *dir;
384         struct dentry           *options;
385         struct dentry           *percpu_dir;
386         struct eventfs_inode    *event_dir;
387         struct trace_options    *topts;
388         struct list_head        systems;
389         struct list_head        events;
390         struct trace_event_file *trace_marker_file;
391         cpumask_var_t           tracing_cpumask; /* only trace on set CPUs */
392         /* one per_cpu trace_pipe can be opened by only one user */
393         cpumask_var_t           pipe_cpumask;
394         int                     ref;
395         int                     trace_ref;
396 #ifdef CONFIG_FUNCTION_TRACER
397         struct ftrace_ops       *ops;
398         struct trace_pid_list   __rcu *function_pids;
399         struct trace_pid_list   __rcu *function_no_pids;
400 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
401         struct fgraph_ops       *gops;
402 #endif
403 #ifdef CONFIG_DYNAMIC_FTRACE
404         /* All of these are protected by the ftrace_lock */
405         struct list_head        func_probes;
406         struct list_head        mod_trace;
407         struct list_head        mod_notrace;
408 #endif
409         /* function tracing enabled */
410         int                     function_enabled;
411 #endif
412         int                     no_filter_buffering_ref;
413         struct list_head        hist_vars;
414 #ifdef CONFIG_TRACER_SNAPSHOT
415         struct cond_snapshot    *cond_snapshot;
416 #endif
417         struct trace_func_repeats       __percpu *last_func_repeats;
418         /*
419          * On boot up, the ring buffer is set to the minimum size, so that
420          * we do not waste memory on systems that are not using tracing.
421          */
422         bool ring_buffer_expanded;
423 };
424 
425 enum {
426         TRACE_ARRAY_FL_GLOBAL   = (1 << 0)
427 };
428 
429 extern struct list_head ftrace_trace_arrays;
430 
431 extern struct mutex trace_types_lock;
432 
433 extern int trace_array_get(struct trace_array *tr);
434 extern int tracing_check_open_get_tr(struct trace_array *tr);
435 extern struct trace_array *trace_array_find(const char *instance);
436 extern struct trace_array *trace_array_find_get(const char *instance);
437 
438 extern u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe);
439 extern int tracing_set_filter_buffering(struct trace_array *tr, bool set);
440 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
441 
442 extern bool trace_clock_in_ns(struct trace_array *tr);
443 
444 /*
445  * The global tracer (top) should be the first trace array added,
446  * but we check the flag anyway.
447  */
448 static inline struct trace_array *top_trace_array(void)
449 {
450         struct trace_array *tr;
451 
452         if (list_empty(&ftrace_trace_arrays))
453                 return NULL;
454 
455         tr = list_entry(ftrace_trace_arrays.prev,
456                         typeof(*tr), list);
457         WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
458         return tr;
459 }
460 
461 #define FTRACE_CMP_TYPE(var, type) \
462         __builtin_types_compatible_p(typeof(var), type *)
463 
464 #undef IF_ASSIGN
465 #define IF_ASSIGN(var, entry, etype, id)                        \
466         if (FTRACE_CMP_TYPE(var, etype)) {                      \
467                 var = (typeof(var))(entry);                     \
468                 WARN_ON(id != 0 && (entry)->type != id);        \
469                 break;                                          \
470         }
471 
472 /* Will cause compile errors if type is not found. */
473 extern void __ftrace_bad_type(void);
474 
475 /*
476  * The trace_assign_type is a verifier that the entry type is
477  * the same as the type being assigned. To add new types simply
478  * add a line with the following format:
479  *
480  * IF_ASSIGN(var, ent, type, id);
481  *
482  *  Where "type" is the trace type that includes the trace_entry
483  *  as the "ent" item. And "id" is the trace identifier that is
484  *  used in the trace_type enum.
485  *
486  *  If the type can have more than one id, then use zero.
487  */
488 #define trace_assign_type(var, ent)                                     \
489         do {                                                            \
490                 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);     \
491                 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);        \
492                 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);   \
493                 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
494                 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);   \
495                 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
496                 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS);   \
497                 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT);   \
498                 IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\
499                 IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\
500                 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
501                 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,          \
502                           TRACE_MMIO_RW);                               \
503                 IF_ASSIGN(var, ent, struct trace_mmiotrace_map,         \
504                           TRACE_MMIO_MAP);                              \
505                 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
506                 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,      \
507                           TRACE_GRAPH_ENT);             \
508                 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,      \
509                           TRACE_GRAPH_RET);             \
510                 IF_ASSIGN(var, ent, struct func_repeats_entry,          \
511                           TRACE_FUNC_REPEATS);                          \
512                 __ftrace_bad_type();                                    \
513         } while (0)
514 
515 /*
516  * An option specific to a tracer. This is a boolean value.
517  * The bit is the bit index that sets its value on the
518  * flags value in struct tracer_flags.
519  */
520 struct tracer_opt {
521         const char      *name; /* Will appear on the trace_options file */
522         u32             bit; /* Mask assigned in val field in tracer_flags */
523 };
524 
525 /*
526  * The set of specific options for a tracer. Your tracer
527  * have to set the initial value of the flags val.
528  */
529 struct tracer_flags {
530         u32                     val;
531         struct tracer_opt       *opts;
532         struct tracer           *trace;
533 };
534 
535 /* Makes more easy to define a tracer opt */
536 #define TRACER_OPT(s, b)        .name = #s, .bit = b
537 
538 
539 struct trace_option_dentry {
540         struct tracer_opt               *opt;
541         struct tracer_flags             *flags;
542         struct trace_array              *tr;
543         struct dentry                   *entry;
544 };
545 
546 /**
547  * struct tracer - a specific tracer and its callbacks to interact with tracefs
548  * @name: the name chosen to select it on the available_tracers file
549  * @init: called when one switches to this tracer (echo name > current_tracer)
550  * @reset: called when one switches to another tracer
551  * @start: called when tracing is unpaused (echo 1 > tracing_on)
552  * @stop: called when tracing is paused (echo 0 > tracing_on)
553  * @update_thresh: called when tracing_thresh is updated
554  * @open: called when the trace file is opened
555  * @pipe_open: called when the trace_pipe file is opened
556  * @close: called when the trace file is released
557  * @pipe_close: called when the trace_pipe file is released
558  * @read: override the default read callback on trace_pipe
559  * @splice_read: override the default splice_read callback on trace_pipe
560  * @selftest: selftest to run on boot (see trace_selftest.c)
561  * @print_headers: override the first lines that describe your columns
562  * @print_line: callback that prints a trace
563  * @set_flag: signals one of your private flags changed (trace_options file)
564  * @flags: your private flags
565  */
566 struct tracer {
567         const char              *name;
568         int                     (*init)(struct trace_array *tr);
569         void                    (*reset)(struct trace_array *tr);
570         void                    (*start)(struct trace_array *tr);
571         void                    (*stop)(struct trace_array *tr);
572         int                     (*update_thresh)(struct trace_array *tr);
573         void                    (*open)(struct trace_iterator *iter);
574         void                    (*pipe_open)(struct trace_iterator *iter);
575         void                    (*close)(struct trace_iterator *iter);
576         void                    (*pipe_close)(struct trace_iterator *iter);
577         ssize_t                 (*read)(struct trace_iterator *iter,
578                                         struct file *filp, char __user *ubuf,
579                                         size_t cnt, loff_t *ppos);
580         ssize_t                 (*splice_read)(struct trace_iterator *iter,
581                                                struct file *filp,
582                                                loff_t *ppos,
583                                                struct pipe_inode_info *pipe,
584                                                size_t len,
585                                                unsigned int flags);
586 #ifdef CONFIG_FTRACE_STARTUP_TEST
587         int                     (*selftest)(struct tracer *trace,
588                                             struct trace_array *tr);
589 #endif
590         void                    (*print_header)(struct seq_file *m);
591         enum print_line_t       (*print_line)(struct trace_iterator *iter);
592         /* If you handled the flag setting, return 0 */
593         int                     (*set_flag)(struct trace_array *tr,
594                                             u32 old_flags, u32 bit, int set);
595         /* Return 0 if OK with change, else return non-zero */
596         int                     (*flag_changed)(struct trace_array *tr,
597                                                 u32 mask, int set);
598         struct tracer           *next;
599         struct tracer_flags     *flags;
600         int                     enabled;
601         bool                    print_max;
602         bool                    allow_instances;
603 #ifdef CONFIG_TRACER_MAX_TRACE
604         bool                    use_max_tr;
605 #endif
606         /* True if tracer cannot be enabled in kernel param */
607         bool                    noboot;
608 };
609 
610 static inline struct ring_buffer_iter *
611 trace_buffer_iter(struct trace_iterator *iter, int cpu)
612 {
613         return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
614 }
615 
616 int tracer_init(struct tracer *t, struct trace_array *tr);
617 int tracing_is_enabled(void);
618 void tracing_reset_online_cpus(struct array_buffer *buf);
619 void tracing_reset_all_online_cpus(void);
620 void tracing_reset_all_online_cpus_unlocked(void);
621 int tracing_open_generic(struct inode *inode, struct file *filp);
622 int tracing_open_generic_tr(struct inode *inode, struct file *filp);
623 int tracing_release_generic_tr(struct inode *inode, struct file *file);
624 int tracing_open_file_tr(struct inode *inode, struct file *filp);
625 int tracing_release_file_tr(struct inode *inode, struct file *filp);
626 int tracing_single_release_file_tr(struct inode *inode, struct file *filp);
627 bool tracing_is_disabled(void);
628 bool tracer_tracing_is_on(struct trace_array *tr);
629 void tracer_tracing_on(struct trace_array *tr);
630 void tracer_tracing_off(struct trace_array *tr);
631 struct dentry *trace_create_file(const char *name,
632                                  umode_t mode,
633                                  struct dentry *parent,
634                                  void *data,
635                                  const struct file_operations *fops);
636 
637 int tracing_init_dentry(void);
638 
639 struct ring_buffer_event;
640 
641 struct ring_buffer_event *
642 trace_buffer_lock_reserve(struct trace_buffer *buffer,
643                           int type,
644                           unsigned long len,
645                           unsigned int trace_ctx);
646 
647 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
648                                                 struct trace_array_cpu *data);
649 
650 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
651                                           int *ent_cpu, u64 *ent_ts);
652 
653 void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
654                                         struct ring_buffer_event *event);
655 
656 bool trace_is_tracepoint_string(const char *str);
657 const char *trace_event_format(struct trace_iterator *iter, const char *fmt);
658 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
659                          va_list ap) __printf(2, 0);
660 char *trace_iter_expand_format(struct trace_iterator *iter);
661 
662 int trace_empty(struct trace_iterator *iter);
663 
664 void *trace_find_next_entry_inc(struct trace_iterator *iter);
665 
666 void trace_init_global_iter(struct trace_iterator *iter);
667 
668 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
669 
670 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
671 unsigned long trace_total_entries(struct trace_array *tr);
672 
673 void trace_function(struct trace_array *tr,
674                     unsigned long ip,
675                     unsigned long parent_ip,
676                     unsigned int trace_ctx);
677 void trace_graph_function(struct trace_array *tr,
678                     unsigned long ip,
679                     unsigned long parent_ip,
680                     unsigned int trace_ctx);
681 void trace_latency_header(struct seq_file *m);
682 void trace_default_header(struct seq_file *m);
683 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
684 
685 void trace_graph_return(struct ftrace_graph_ret *trace, struct fgraph_ops *gops);
686 int trace_graph_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops);
687 
688 void tracing_start_cmdline_record(void);
689 void tracing_stop_cmdline_record(void);
690 void tracing_start_tgid_record(void);
691 void tracing_stop_tgid_record(void);
692 
693 int register_tracer(struct tracer *type);
694 int is_tracing_stopped(void);
695 
696 loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
697 
698 extern cpumask_var_t __read_mostly tracing_buffer_mask;
699 
700 #define for_each_tracing_cpu(cpu)       \
701         for_each_cpu(cpu, tracing_buffer_mask)
702 
703 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
704 
705 extern unsigned long tracing_thresh;
706 
707 /* PID filtering */
708 
709 extern int pid_max;
710 
711 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
712                              pid_t search_pid);
713 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
714                             struct trace_pid_list *filtered_no_pids,
715                             struct task_struct *task);
716 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
717                                   struct task_struct *self,
718                                   struct task_struct *task);
719 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
720 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
721 int trace_pid_show(struct seq_file *m, void *v);
722 int trace_pid_write(struct trace_pid_list *filtered_pids,
723                     struct trace_pid_list **new_pid_list,
724                     const char __user *ubuf, size_t cnt);
725 
726 #ifdef CONFIG_TRACER_MAX_TRACE
727 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
728                    void *cond_data);
729 void update_max_tr_single(struct trace_array *tr,
730                           struct task_struct *tsk, int cpu);
731 
732 #ifdef CONFIG_FSNOTIFY
733 #define LATENCY_FS_NOTIFY
734 #endif
735 #endif /* CONFIG_TRACER_MAX_TRACE */
736 
737 #ifdef LATENCY_FS_NOTIFY
738 void latency_fsnotify(struct trace_array *tr);
739 #else
740 static inline void latency_fsnotify(struct trace_array *tr) { }
741 #endif
742 
743 #ifdef CONFIG_STACKTRACE
744 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
745 #else
746 static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
747                                  int skip)
748 {
749 }
750 #endif /* CONFIG_STACKTRACE */
751 
752 void trace_last_func_repeats(struct trace_array *tr,
753                              struct trace_func_repeats *last_info,
754                              unsigned int trace_ctx);
755 
756 extern u64 ftrace_now(int cpu);
757 
758 extern void trace_find_cmdline(int pid, char comm[]);
759 extern int trace_find_tgid(int pid);
760 extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
761 
762 #ifdef CONFIG_DYNAMIC_FTRACE
763 extern unsigned long ftrace_update_tot_cnt;
764 extern unsigned long ftrace_number_of_pages;
765 extern unsigned long ftrace_number_of_groups;
766 void ftrace_init_trace_array(struct trace_array *tr);
767 #else
768 static inline void ftrace_init_trace_array(struct trace_array *tr) { }
769 #endif
770 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
771 extern int DYN_FTRACE_TEST_NAME(void);
772 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
773 extern int DYN_FTRACE_TEST_NAME2(void);
774 
775 extern void trace_set_ring_buffer_expanded(struct trace_array *tr);
776 extern bool tracing_selftest_disabled;
777 
778 #ifdef CONFIG_FTRACE_STARTUP_TEST
779 extern void __init disable_tracing_selftest(const char *reason);
780 
781 extern int trace_selftest_startup_function(struct tracer *trace,
782                                            struct trace_array *tr);
783 extern int trace_selftest_startup_function_graph(struct tracer *trace,
784                                                  struct trace_array *tr);
785 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
786                                           struct trace_array *tr);
787 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
788                                              struct trace_array *tr);
789 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
790                                                  struct trace_array *tr);
791 extern int trace_selftest_startup_wakeup(struct tracer *trace,
792                                          struct trace_array *tr);
793 extern int trace_selftest_startup_nop(struct tracer *trace,
794                                          struct trace_array *tr);
795 extern int trace_selftest_startup_branch(struct tracer *trace,
796                                          struct trace_array *tr);
797 /*
798  * Tracer data references selftest functions that only occur
799  * on boot up. These can be __init functions. Thus, when selftests
800  * are enabled, then the tracers need to reference __init functions.
801  */
802 #define __tracer_data           __refdata
803 #else
804 static inline void __init disable_tracing_selftest(const char *reason)
805 {
806 }
807 /* Tracers are seldom changed. Optimize when selftests are disabled. */
808 #define __tracer_data           __read_mostly
809 #endif /* CONFIG_FTRACE_STARTUP_TEST */
810 
811 extern void *head_page(struct trace_array_cpu *data);
812 extern unsigned long long ns2usecs(u64 nsec);
813 extern int
814 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
815 extern int
816 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
817 extern int
818 trace_array_vprintk(struct trace_array *tr,
819                     unsigned long ip, const char *fmt, va_list args);
820 int trace_array_printk_buf(struct trace_buffer *buffer,
821                            unsigned long ip, const char *fmt, ...);
822 void trace_printk_seq(struct trace_seq *s);
823 enum print_line_t print_trace_line(struct trace_iterator *iter);
824 
825 extern char trace_find_mark(unsigned long long duration);
826 
827 struct ftrace_hash;
828 
829 struct ftrace_mod_load {
830         struct list_head        list;
831         char                    *func;
832         char                    *module;
833         int                      enable;
834 };
835 
836 enum {
837         FTRACE_HASH_FL_MOD      = (1 << 0),
838 };
839 
840 struct ftrace_hash {
841         unsigned long           size_bits;
842         struct hlist_head       *buckets;
843         unsigned long           count;
844         unsigned long           flags;
845         struct rcu_head         rcu;
846 };
847 
848 struct ftrace_func_entry *
849 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
850 
851 static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
852 {
853         return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
854 }
855 
856 /* Standard output formatting function used for function return traces */
857 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
858 
859 /* Flag options */
860 #define TRACE_GRAPH_PRINT_OVERRUN       0x1
861 #define TRACE_GRAPH_PRINT_CPU           0x2
862 #define TRACE_GRAPH_PRINT_OVERHEAD      0x4
863 #define TRACE_GRAPH_PRINT_PROC          0x8
864 #define TRACE_GRAPH_PRINT_DURATION      0x10
865 #define TRACE_GRAPH_PRINT_ABS_TIME      0x20
866 #define TRACE_GRAPH_PRINT_REL_TIME      0x40
867 #define TRACE_GRAPH_PRINT_IRQS          0x80
868 #define TRACE_GRAPH_PRINT_TAIL          0x100
869 #define TRACE_GRAPH_SLEEP_TIME          0x200
870 #define TRACE_GRAPH_GRAPH_TIME          0x400
871 #define TRACE_GRAPH_PRINT_RETVAL        0x800
872 #define TRACE_GRAPH_PRINT_RETVAL_HEX    0x1000
873 #define TRACE_GRAPH_PRINT_FILL_SHIFT    28
874 #define TRACE_GRAPH_PRINT_FILL_MASK     (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
875 
876 extern void ftrace_graph_sleep_time_control(bool enable);
877 
878 #ifdef CONFIG_FUNCTION_PROFILER
879 extern void ftrace_graph_graph_time_control(bool enable);
880 #else
881 static inline void ftrace_graph_graph_time_control(bool enable) { }
882 #endif
883 
884 extern enum print_line_t
885 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
886 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
887 extern void
888 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
889 extern void graph_trace_open(struct trace_iterator *iter);
890 extern void graph_trace_close(struct trace_iterator *iter);
891 extern int __trace_graph_entry(struct trace_array *tr,
892                                struct ftrace_graph_ent *trace,
893                                unsigned int trace_ctx);
894 extern void __trace_graph_return(struct trace_array *tr,
895                                  struct ftrace_graph_ret *trace,
896                                  unsigned int trace_ctx);
897 extern void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops);
898 extern int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops);
899 extern void free_fgraph_ops(struct trace_array *tr);
900 
901 enum {
902         TRACE_GRAPH_FL          = 1,
903 
904         /*
905          * In the very unlikely case that an interrupt came in
906          * at a start of graph tracing, and we want to trace
907          * the function in that interrupt, the depth can be greater
908          * than zero, because of the preempted start of a previous
909          * trace. In an even more unlikely case, depth could be 2
910          * if a softirq interrupted the start of graph tracing,
911          * followed by an interrupt preempting a start of graph
912          * tracing in the softirq, and depth can even be 3
913          * if an NMI came in at the start of an interrupt function
914          * that preempted a softirq start of a function that
915          * preempted normal context!!!! Luckily, it can't be
916          * greater than 3, so the next two bits are a mask
917          * of what the depth is when we set TRACE_GRAPH_FL
918          */
919 
920         TRACE_GRAPH_DEPTH_START_BIT,
921         TRACE_GRAPH_DEPTH_END_BIT,
922 
923         /*
924          * To implement set_graph_notrace, if this bit is set, we ignore
925          * function graph tracing of called functions, until the return
926          * function is called to clear it.
927          */
928         TRACE_GRAPH_NOTRACE_BIT,
929 };
930 
931 #define TRACE_GRAPH_NOTRACE             (1 << TRACE_GRAPH_NOTRACE_BIT)
932 
933 static inline unsigned long ftrace_graph_depth(unsigned long *task_var)
934 {
935         return (*task_var >> TRACE_GRAPH_DEPTH_START_BIT) & 3;
936 }
937 
938 static inline void ftrace_graph_set_depth(unsigned long *task_var, int depth)
939 {
940         *task_var &= ~(3 << TRACE_GRAPH_DEPTH_START_BIT);
941         *task_var |= (depth & 3) << TRACE_GRAPH_DEPTH_START_BIT;
942 }
943 
944 #ifdef CONFIG_DYNAMIC_FTRACE
945 extern struct ftrace_hash __rcu *ftrace_graph_hash;
946 extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
947 
948 static inline int
949 ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace)
950 {
951         unsigned long addr = trace->func;
952         int ret = 0;
953         struct ftrace_hash *hash;
954 
955         preempt_disable_notrace();
956 
957         /*
958          * Have to open code "rcu_dereference_sched()" because the
959          * function graph tracer can be called when RCU is not
960          * "watching".
961          * Protected with schedule_on_each_cpu(ftrace_sync)
962          */
963         hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
964 
965         if (ftrace_hash_empty(hash)) {
966                 ret = 1;
967                 goto out;
968         }
969 
970         if (ftrace_lookup_ip(hash, addr)) {
971                 /*
972                  * This needs to be cleared on the return functions
973                  * when the depth is zero.
974                  */
975                 *task_var |= TRACE_GRAPH_FL;
976                 ftrace_graph_set_depth(task_var, trace->depth);
977 
978                 /*
979                  * If no irqs are to be traced, but a set_graph_function
980                  * is set, and called by an interrupt handler, we still
981                  * want to trace it.
982                  */
983                 if (in_hardirq())
984                         trace_recursion_set(TRACE_IRQ_BIT);
985                 else
986                         trace_recursion_clear(TRACE_IRQ_BIT);
987                 ret = 1;
988         }
989 
990 out:
991         preempt_enable_notrace();
992         return ret;
993 }
994 
995 static inline void
996 ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace)
997 {
998         unsigned long *task_var = fgraph_get_task_var(gops);
999 
1000         if ((*task_var & TRACE_GRAPH_FL) &&
1001             trace->depth == ftrace_graph_depth(task_var))
1002                 *task_var &= ~TRACE_GRAPH_FL;
1003 }
1004 
1005 static inline int ftrace_graph_notrace_addr(unsigned long addr)
1006 {
1007         int ret = 0;
1008         struct ftrace_hash *notrace_hash;
1009 
1010         preempt_disable_notrace();
1011 
1012         /*
1013          * Have to open code "rcu_dereference_sched()" because the
1014          * function graph tracer can be called when RCU is not
1015          * "watching".
1016          * Protected with schedule_on_each_cpu(ftrace_sync)
1017          */
1018         notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
1019                                                  !preemptible());
1020 
1021         if (ftrace_lookup_ip(notrace_hash, addr))
1022                 ret = 1;
1023 
1024         preempt_enable_notrace();
1025         return ret;
1026 }
1027 #else
1028 static inline int ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace)
1029 {
1030         return 1;
1031 }
1032 
1033 static inline int ftrace_graph_notrace_addr(unsigned long addr)
1034 {
1035         return 0;
1036 }
1037 static inline void ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace)
1038 { }
1039 #endif /* CONFIG_DYNAMIC_FTRACE */
1040 
1041 extern unsigned int fgraph_max_depth;
1042 
1043 static inline bool
1044 ftrace_graph_ignore_func(struct fgraph_ops *gops, struct ftrace_graph_ent *trace)
1045 {
1046         unsigned long *task_var = fgraph_get_task_var(gops);
1047 
1048         /* trace it when it is-nested-in or is a function enabled. */
1049         return !((*task_var & TRACE_GRAPH_FL) ||
1050                  ftrace_graph_addr(task_var, trace)) ||
1051                 (trace->depth < 0) ||
1052                 (fgraph_max_depth && trace->depth >= fgraph_max_depth);
1053 }
1054 
1055 void fgraph_init_ops(struct ftrace_ops *dst_ops,
1056                      struct ftrace_ops *src_ops);
1057 
1058 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
1059 static inline enum print_line_t
1060 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1061 {
1062         return TRACE_TYPE_UNHANDLED;
1063 }
1064 static inline void free_fgraph_ops(struct trace_array *tr) { }
1065 /* ftrace_ops may not be defined */
1066 #define init_array_fgraph_ops(tr, ops) do { } while (0)
1067 #define allocate_fgraph_ops(tr, ops) ({ 0; })
1068 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1069 
1070 extern struct list_head ftrace_pids;
1071 
1072 #ifdef CONFIG_FUNCTION_TRACER
1073 
1074 #define FTRACE_PID_IGNORE       -1
1075 #define FTRACE_PID_TRACE        -2
1076 
1077 struct ftrace_func_command {
1078         struct list_head        list;
1079         char                    *name;
1080         int                     (*func)(struct trace_array *tr,
1081                                         struct ftrace_hash *hash,
1082                                         char *func, char *cmd,
1083                                         char *params, int enable);
1084 };
1085 extern bool ftrace_filter_param __initdata;
1086 static inline int ftrace_trace_task(struct trace_array *tr)
1087 {
1088         return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) !=
1089                 FTRACE_PID_IGNORE;
1090 }
1091 extern int ftrace_is_dead(void);
1092 int ftrace_create_function_files(struct trace_array *tr,
1093                                  struct dentry *parent);
1094 void ftrace_destroy_function_files(struct trace_array *tr);
1095 int ftrace_allocate_ftrace_ops(struct trace_array *tr);
1096 void ftrace_free_ftrace_ops(struct trace_array *tr);
1097 void ftrace_init_global_array_ops(struct trace_array *tr);
1098 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
1099 void ftrace_reset_array_ops(struct trace_array *tr);
1100 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
1101 void ftrace_init_tracefs_toplevel(struct trace_array *tr,
1102                                   struct dentry *d_tracer);
1103 void ftrace_clear_pids(struct trace_array *tr);
1104 int init_function_trace(void);
1105 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
1106 #else
1107 static inline int ftrace_trace_task(struct trace_array *tr)
1108 {
1109         return 1;
1110 }
1111 static inline int ftrace_is_dead(void) { return 0; }
1112 static inline int
1113 ftrace_create_function_files(struct trace_array *tr,
1114                              struct dentry *parent)
1115 {
1116         return 0;
1117 }
1118 static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr)
1119 {
1120         return 0;
1121 }
1122 static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { }
1123 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
1124 static inline __init void
1125 ftrace_init_global_array_ops(struct trace_array *tr) { }
1126 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
1127 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
1128 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
1129 static inline void ftrace_clear_pids(struct trace_array *tr) { }
1130 static inline int init_function_trace(void) { return 0; }
1131 static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
1132 /* ftace_func_t type is not defined, use macro instead of static inline */
1133 #define ftrace_init_array_ops(tr, func) do { } while (0)
1134 #endif /* CONFIG_FUNCTION_TRACER */
1135 
1136 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
1137 
1138 struct ftrace_probe_ops {
1139         void                    (*func)(unsigned long ip,
1140                                         unsigned long parent_ip,
1141                                         struct trace_array *tr,
1142                                         struct ftrace_probe_ops *ops,
1143                                         void *data);
1144         int                     (*init)(struct ftrace_probe_ops *ops,
1145                                         struct trace_array *tr,
1146                                         unsigned long ip, void *init_data,
1147                                         void **data);
1148         void                    (*free)(struct ftrace_probe_ops *ops,
1149                                         struct trace_array *tr,
1150                                         unsigned long ip, void *data);
1151         int                     (*print)(struct seq_file *m,
1152                                          unsigned long ip,
1153                                          struct ftrace_probe_ops *ops,
1154                                          void *data);
1155 };
1156 
1157 struct ftrace_func_mapper;
1158 typedef int (*ftrace_mapper_func)(void *data);
1159 
1160 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
1161 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
1162                                            unsigned long ip);
1163 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1164                                unsigned long ip, void *data);
1165 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1166                                    unsigned long ip);
1167 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1168                              ftrace_mapper_func free_func);
1169 
1170 extern int
1171 register_ftrace_function_probe(char *glob, struct trace_array *tr,
1172                                struct ftrace_probe_ops *ops, void *data);
1173 extern int
1174 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1175                                       struct ftrace_probe_ops *ops);
1176 extern void clear_ftrace_function_probes(struct trace_array *tr);
1177 
1178 int register_ftrace_command(struct ftrace_func_command *cmd);
1179 int unregister_ftrace_command(struct ftrace_func_command *cmd);
1180 
1181 void ftrace_create_filter_files(struct ftrace_ops *ops,
1182                                 struct dentry *parent);
1183 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1184 
1185 extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
1186                              int len, int reset);
1187 extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
1188                               int len, int reset);
1189 #else
1190 struct ftrace_func_command;
1191 
1192 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1193 {
1194         return -EINVAL;
1195 }
1196 static inline __init int unregister_ftrace_command(char *cmd_name)
1197 {
1198         return -EINVAL;
1199 }
1200 static inline void clear_ftrace_function_probes(struct trace_array *tr)
1201 {
1202 }
1203 
1204 /*
1205  * The ops parameter passed in is usually undefined.
1206  * This must be a macro.
1207  */
1208 #define ftrace_create_filter_files(ops, parent) do { } while (0)
1209 #define ftrace_destroy_filter_files(ops) do { } while (0)
1210 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
1211 
1212 bool ftrace_event_is_function(struct trace_event_call *call);
1213 
1214 /*
1215  * struct trace_parser - servers for reading the user input separated by spaces
1216  * @cont: set if the input is not complete - no final space char was found
1217  * @buffer: holds the parsed user input
1218  * @idx: user input length
1219  * @size: buffer size
1220  */
1221 struct trace_parser {
1222         bool            cont;
1223         char            *buffer;
1224         unsigned        idx;
1225         unsigned        size;
1226 };
1227 
1228 static inline bool trace_parser_loaded(struct trace_parser *parser)
1229 {
1230         return (parser->idx != 0);
1231 }
1232 
1233 static inline bool trace_parser_cont(struct trace_parser *parser)
1234 {
1235         return parser->cont;
1236 }
1237 
1238 static inline void trace_parser_clear(struct trace_parser *parser)
1239 {
1240         parser->cont = false;
1241         parser->idx = 0;
1242 }
1243 
1244 extern int trace_parser_get_init(struct trace_parser *parser, int size);
1245 extern void trace_parser_put(struct trace_parser *parser);
1246 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1247         size_t cnt, loff_t *ppos);
1248 
1249 /*
1250  * Only create function graph options if function graph is configured.
1251  */
1252 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1253 # define FGRAPH_FLAGS                                           \
1254                 C(DISPLAY_GRAPH,        "display-graph"),
1255 #else
1256 # define FGRAPH_FLAGS
1257 #endif
1258 
1259 #ifdef CONFIG_BRANCH_TRACER
1260 # define BRANCH_FLAGS                                   \
1261                 C(BRANCH,               "branch"),
1262 #else
1263 # define BRANCH_FLAGS
1264 #endif
1265 
1266 #ifdef CONFIG_FUNCTION_TRACER
1267 # define FUNCTION_FLAGS                                         \
1268                 C(FUNCTION,             "function-trace"),      \
1269                 C(FUNC_FORK,            "function-fork"),
1270 # define FUNCTION_DEFAULT_FLAGS         TRACE_ITER_FUNCTION
1271 #else
1272 # define FUNCTION_FLAGS
1273 # define FUNCTION_DEFAULT_FLAGS         0UL
1274 # define TRACE_ITER_FUNC_FORK           0UL
1275 #endif
1276 
1277 #ifdef CONFIG_STACKTRACE
1278 # define STACK_FLAGS                            \
1279                 C(STACKTRACE,           "stacktrace"),
1280 #else
1281 # define STACK_FLAGS
1282 #endif
1283 
1284 /*
1285  * trace_iterator_flags is an enumeration that defines bit
1286  * positions into trace_flags that controls the output.
1287  *
1288  * NOTE: These bits must match the trace_options array in
1289  *       trace.c (this macro guarantees it).
1290  */
1291 #define TRACE_FLAGS                                             \
1292                 C(PRINT_PARENT,         "print-parent"),        \
1293                 C(SYM_OFFSET,           "sym-offset"),          \
1294                 C(SYM_ADDR,             "sym-addr"),            \
1295                 C(VERBOSE,              "verbose"),             \
1296                 C(RAW,                  "raw"),                 \
1297                 C(HEX,                  "hex"),                 \
1298                 C(BIN,                  "bin"),                 \
1299                 C(BLOCK,                "block"),               \
1300                 C(FIELDS,               "fields"),              \
1301                 C(PRINTK,               "trace_printk"),        \
1302                 C(ANNOTATE,             "annotate"),            \
1303                 C(USERSTACKTRACE,       "userstacktrace"),      \
1304                 C(SYM_USEROBJ,          "sym-userobj"),         \
1305                 C(PRINTK_MSGONLY,       "printk-msg-only"),     \
1306                 C(CONTEXT_INFO,         "context-info"),   /* Print pid/cpu/time */ \
1307                 C(LATENCY_FMT,          "latency-format"),      \
1308                 C(RECORD_CMD,           "record-cmd"),          \
1309                 C(RECORD_TGID,          "record-tgid"),         \
1310                 C(OVERWRITE,            "overwrite"),           \
1311                 C(STOP_ON_FREE,         "disable_on_free"),     \
1312                 C(IRQ_INFO,             "irq-info"),            \
1313                 C(MARKERS,              "markers"),             \
1314                 C(EVENT_FORK,           "event-fork"),          \
1315                 C(PAUSE_ON_TRACE,       "pause-on-trace"),      \
1316                 C(HASH_PTR,             "hash-ptr"),    /* Print hashed pointer */ \
1317                 FUNCTION_FLAGS                                  \
1318                 FGRAPH_FLAGS                                    \
1319                 STACK_FLAGS                                     \
1320                 BRANCH_FLAGS
1321 
1322 /*
1323  * By defining C, we can make TRACE_FLAGS a list of bit names
1324  * that will define the bits for the flag masks.
1325  */
1326 #undef C
1327 #define C(a, b) TRACE_ITER_##a##_BIT
1328 
1329 enum trace_iterator_bits {
1330         TRACE_FLAGS
1331         /* Make sure we don't go more than we have bits for */
1332         TRACE_ITER_LAST_BIT
1333 };
1334 
1335 /*
1336  * By redefining C, we can make TRACE_FLAGS a list of masks that
1337  * use the bits as defined above.
1338  */
1339 #undef C
1340 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1341 
1342 enum trace_iterator_flags { TRACE_FLAGS };
1343 
1344 /*
1345  * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1346  * control the output of kernel symbols.
1347  */
1348 #define TRACE_ITER_SYM_MASK \
1349         (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1350 
1351 extern struct tracer nop_trace;
1352 
1353 #ifdef CONFIG_BRANCH_TRACER
1354 extern int enable_branch_tracing(struct trace_array *tr);
1355 extern void disable_branch_tracing(void);
1356 static inline int trace_branch_enable(struct trace_array *tr)
1357 {
1358         if (tr->trace_flags & TRACE_ITER_BRANCH)
1359                 return enable_branch_tracing(tr);
1360         return 0;
1361 }
1362 static inline void trace_branch_disable(void)
1363 {
1364         /* due to races, always disable */
1365         disable_branch_tracing();
1366 }
1367 #else
1368 static inline int trace_branch_enable(struct trace_array *tr)
1369 {
1370         return 0;
1371 }
1372 static inline void trace_branch_disable(void)
1373 {
1374 }
1375 #endif /* CONFIG_BRANCH_TRACER */
1376 
1377 /* set ring buffers to default size if not already done so */
1378 int tracing_update_buffers(struct trace_array *tr);
1379 
1380 union trace_synth_field {
1381         u8                              as_u8;
1382         u16                             as_u16;
1383         u32                             as_u32;
1384         u64                             as_u64;
1385         struct trace_dynamic_info       as_dynamic;
1386 };
1387 
1388 struct ftrace_event_field {
1389         struct list_head        link;
1390         const char              *name;
1391         const char              *type;
1392         int                     filter_type;
1393         int                     offset;
1394         int                     size;
1395         int                     is_signed;
1396         int                     len;
1397 };
1398 
1399 struct prog_entry;
1400 
1401 struct event_filter {
1402         struct prog_entry __rcu *prog;
1403         char                    *filter_string;
1404 };
1405 
1406 struct event_subsystem {
1407         struct list_head        list;
1408         const char              *name;
1409         struct event_filter     *filter;
1410         int                     ref_count;
1411 };
1412 
1413 struct trace_subsystem_dir {
1414         struct list_head                list;
1415         struct event_subsystem          *subsystem;
1416         struct trace_array              *tr;
1417         struct eventfs_inode            *ei;
1418         int                             ref_count;
1419         int                             nr_events;
1420 };
1421 
1422 extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1423                                      struct trace_buffer *buffer,
1424                                      struct ring_buffer_event *event);
1425 
1426 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1427                                      struct trace_buffer *buffer,
1428                                      struct ring_buffer_event *event,
1429                                      unsigned int trcace_ctx,
1430                                      struct pt_regs *regs);
1431 
1432 static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1433                                               struct trace_buffer *buffer,
1434                                               struct ring_buffer_event *event,
1435                                               unsigned int trace_ctx)
1436 {
1437         trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
1438 }
1439 
1440 DECLARE_PER_CPU(bool, trace_taskinfo_save);
1441 int trace_save_cmdline(struct task_struct *tsk);
1442 int trace_create_savedcmd(void);
1443 int trace_alloc_tgid_map(void);
1444 void trace_free_saved_cmdlines_buffer(void);
1445 
1446 extern const struct file_operations tracing_saved_cmdlines_fops;
1447 extern const struct file_operations tracing_saved_tgids_fops;
1448 extern const struct file_operations tracing_saved_cmdlines_size_fops;
1449 
1450 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1451 DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1452 void trace_buffered_event_disable(void);
1453 void trace_buffered_event_enable(void);
1454 
1455 void early_enable_events(struct trace_array *tr, char *buf, bool disable_first);
1456 
1457 static inline void
1458 __trace_event_discard_commit(struct trace_buffer *buffer,
1459                              struct ring_buffer_event *event)
1460 {
1461         if (this_cpu_read(trace_buffered_event) == event) {
1462                 /* Simply release the temp buffer and enable preemption */
1463                 this_cpu_dec(trace_buffered_event_cnt);
1464                 preempt_enable_notrace();
1465                 return;
1466         }
1467         /* ring_buffer_discard_commit() enables preemption */
1468         ring_buffer_discard_commit(buffer, event);
1469 }
1470 
1471 /*
1472  * Helper function for event_trigger_unlock_commit{_regs}().
1473  * If there are event triggers attached to this event that requires
1474  * filtering against its fields, then they will be called as the
1475  * entry already holds the field information of the current event.
1476  *
1477  * It also checks if the event should be discarded or not.
1478  * It is to be discarded if the event is soft disabled and the
1479  * event was only recorded to process triggers, or if the event
1480  * filter is active and this event did not match the filters.
1481  *
1482  * Returns true if the event is discarded, false otherwise.
1483  */
1484 static inline bool
1485 __event_trigger_test_discard(struct trace_event_file *file,
1486                              struct trace_buffer *buffer,
1487                              struct ring_buffer_event *event,
1488                              void *entry,
1489                              enum event_trigger_type *tt)
1490 {
1491         unsigned long eflags = file->flags;
1492 
1493         if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1494                 *tt = event_triggers_call(file, buffer, entry, event);
1495 
1496         if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
1497                                     EVENT_FILE_FL_FILTERED |
1498                                     EVENT_FILE_FL_PID_FILTER))))
1499                 return false;
1500 
1501         if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
1502                 goto discard;
1503 
1504         if (file->flags & EVENT_FILE_FL_FILTERED &&
1505             !filter_match_preds(file->filter, entry))
1506                 goto discard;
1507 
1508         if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
1509             trace_event_ignore_this_pid(file))
1510                 goto discard;
1511 
1512         return false;
1513  discard:
1514         __trace_event_discard_commit(buffer, event);
1515         return true;
1516 }
1517 
1518 /**
1519  * event_trigger_unlock_commit - handle triggers and finish event commit
1520  * @file: The file pointer associated with the event
1521  * @buffer: The ring buffer that the event is being written to
1522  * @event: The event meta data in the ring buffer
1523  * @entry: The event itself
1524  * @trace_ctx: The tracing context flags.
1525  *
1526  * This is a helper function to handle triggers that require data
1527  * from the event itself. It also tests the event against filters and
1528  * if the event is soft disabled and should be discarded.
1529  */
1530 static inline void
1531 event_trigger_unlock_commit(struct trace_event_file *file,
1532                             struct trace_buffer *buffer,
1533                             struct ring_buffer_event *event,
1534                             void *entry, unsigned int trace_ctx)
1535 {
1536         enum event_trigger_type tt = ETT_NONE;
1537 
1538         if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1539                 trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);
1540 
1541         if (tt)
1542                 event_triggers_post_call(file, tt);
1543 }
1544 
1545 #define FILTER_PRED_INVALID     ((unsigned short)-1)
1546 #define FILTER_PRED_IS_RIGHT    (1 << 15)
1547 #define FILTER_PRED_FOLD        (1 << 15)
1548 
1549 /*
1550  * The max preds is the size of unsigned short with
1551  * two flags at the MSBs. One bit is used for both the IS_RIGHT
1552  * and FOLD flags. The other is reserved.
1553  *
1554  * 2^14 preds is way more than enough.
1555  */
1556 #define MAX_FILTER_PRED         16384
1557 
1558 struct filter_pred;
1559 struct regex;
1560 
1561 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1562 
1563 enum regex_type {
1564         MATCH_FULL = 0,
1565         MATCH_FRONT_ONLY,
1566         MATCH_MIDDLE_ONLY,
1567         MATCH_END_ONLY,
1568         MATCH_GLOB,
1569         MATCH_INDEX,
1570 };
1571 
1572 struct regex {
1573         char                    pattern[MAX_FILTER_STR_VAL];
1574         int                     len;
1575         int                     field_len;
1576         regex_match_func        match;
1577 };
1578 
1579 static inline bool is_string_field(struct ftrace_event_field *field)
1580 {
1581         return field->filter_type == FILTER_DYN_STRING ||
1582                field->filter_type == FILTER_RDYN_STRING ||
1583                field->filter_type == FILTER_STATIC_STRING ||
1584                field->filter_type == FILTER_PTR_STRING ||
1585                field->filter_type == FILTER_COMM;
1586 }
1587 
1588 static inline bool is_function_field(struct ftrace_event_field *field)
1589 {
1590         return field->filter_type == FILTER_TRACE_FN;
1591 }
1592 
1593 extern enum regex_type
1594 filter_parse_regex(char *buff, int len, char **search, int *not);
1595 extern void print_event_filter(struct trace_event_file *file,
1596                                struct trace_seq *s);
1597 extern int apply_event_filter(struct trace_event_file *file,
1598                               char *filter_string);
1599 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1600                                         char *filter_string);
1601 extern void print_subsystem_event_filter(struct event_subsystem *system,
1602                                          struct trace_seq *s);
1603 extern int filter_assign_type(const char *type);
1604 extern int create_event_filter(struct trace_array *tr,
1605                                struct trace_event_call *call,
1606                                char *filter_str, bool set_str,
1607                                struct event_filter **filterp);
1608 extern void free_event_filter(struct event_filter *filter);
1609 
1610 struct ftrace_event_field *
1611 trace_find_event_field(struct trace_event_call *call, char *name);
1612 
1613 extern void trace_event_enable_cmd_record(bool enable);
1614 extern void trace_event_enable_tgid_record(bool enable);
1615 
1616 extern int event_trace_init(void);
1617 extern int init_events(void);
1618 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1619 extern int event_trace_del_tracer(struct trace_array *tr);
1620 extern void __trace_early_add_events(struct trace_array *tr);
1621 
1622 extern struct trace_event_file *__find_event_file(struct trace_array *tr,
1623                                                   const char *system,
1624                                                   const char *event);
1625 extern struct trace_event_file *find_event_file(struct trace_array *tr,
1626                                                 const char *system,
1627                                                 const char *event);
1628 
1629 static inline void *event_file_data(struct file *filp)
1630 {
1631         return READ_ONCE(file_inode(filp)->i_private);
1632 }
1633 
1634 extern struct mutex event_mutex;
1635 extern struct list_head ftrace_events;
1636 
1637 /*
1638  * When the trace_event_file is the filp->i_private pointer,
1639  * it must be taken under the event_mutex lock, and then checked
1640  * if the EVENT_FILE_FL_FREED flag is set. If it is, then the
1641  * data pointed to by the trace_event_file can not be trusted.
1642  *
1643  * Use the event_file_file() to access the trace_event_file from
1644  * the filp the first time under the event_mutex and check for
1645  * NULL. If it is needed to be retrieved again and the event_mutex
1646  * is still held, then the event_file_data() can be used and it
1647  * is guaranteed to be valid.
1648  */
1649 static inline struct trace_event_file *event_file_file(struct file *filp)
1650 {
1651         struct trace_event_file *file;
1652 
1653         lockdep_assert_held(&event_mutex);
1654         file = READ_ONCE(file_inode(filp)->i_private);
1655         if (!file || file->flags & EVENT_FILE_FL_FREED)
1656                 return NULL;
1657         return file;
1658 }
1659 
1660 extern const struct file_operations event_trigger_fops;
1661 extern const struct file_operations event_hist_fops;
1662 extern const struct file_operations event_hist_debug_fops;
1663 extern const struct file_operations event_inject_fops;
1664 
1665 #ifdef CONFIG_HIST_TRIGGERS
1666 extern int register_trigger_hist_cmd(void);
1667 extern int register_trigger_hist_enable_disable_cmds(void);
1668 #else
1669 static inline int register_trigger_hist_cmd(void) { return 0; }
1670 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1671 #endif
1672 
1673 extern int register_trigger_cmds(void);
1674 extern void clear_event_triggers(struct trace_array *tr);
1675 
1676 enum {
1677         EVENT_TRIGGER_FL_PROBE          = BIT(0),
1678 };
1679 
1680 struct event_trigger_data {
1681         unsigned long                   count;
1682         int                             ref;
1683         int                             flags;
1684         struct event_trigger_ops        *ops;
1685         struct event_command            *cmd_ops;
1686         struct event_filter __rcu       *filter;
1687         char                            *filter_str;
1688         void                            *private_data;
1689         bool                            paused;
1690         bool                            paused_tmp;
1691         struct list_head                list;
1692         char                            *name;
1693         struct list_head                named_list;
1694         struct event_trigger_data       *named_data;
1695 };
1696 
1697 /* Avoid typos */
1698 #define ENABLE_EVENT_STR        "enable_event"
1699 #define DISABLE_EVENT_STR       "disable_event"
1700 #define ENABLE_HIST_STR         "enable_hist"
1701 #define DISABLE_HIST_STR        "disable_hist"
1702 
1703 struct enable_trigger_data {
1704         struct trace_event_file         *file;
1705         bool                            enable;
1706         bool                            hist;
1707 };
1708 
1709 extern int event_enable_trigger_print(struct seq_file *m,
1710                                       struct event_trigger_data *data);
1711 extern void event_enable_trigger_free(struct event_trigger_data *data);
1712 extern int event_enable_trigger_parse(struct event_command *cmd_ops,
1713                                       struct trace_event_file *file,
1714                                       char *glob, char *cmd,
1715                                       char *param_and_filter);
1716 extern int event_enable_register_trigger(char *glob,
1717                                          struct event_trigger_data *data,
1718                                          struct trace_event_file *file);
1719 extern void event_enable_unregister_trigger(char *glob,
1720                                             struct event_trigger_data *test,
1721                                             struct trace_event_file *file);
1722 extern void trigger_data_free(struct event_trigger_data *data);
1723 extern int event_trigger_init(struct event_trigger_data *data);
1724 extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1725                                               int trigger_enable);
1726 extern void update_cond_flag(struct trace_event_file *file);
1727 extern int set_trigger_filter(char *filter_str,
1728                               struct event_trigger_data *trigger_data,
1729                               struct trace_event_file *file);
1730 extern struct event_trigger_data *find_named_trigger(const char *name);
1731 extern bool is_named_trigger(struct event_trigger_data *test);
1732 extern int save_named_trigger(const char *name,
1733                               struct event_trigger_data *data);
1734 extern void del_named_trigger(struct event_trigger_data *data);
1735 extern void pause_named_trigger(struct event_trigger_data *data);
1736 extern void unpause_named_trigger(struct event_trigger_data *data);
1737 extern void set_named_trigger_data(struct event_trigger_data *data,
1738                                    struct event_trigger_data *named_data);
1739 extern struct event_trigger_data *
1740 get_named_trigger_data(struct event_trigger_data *data);
1741 extern int register_event_command(struct event_command *cmd);
1742 extern int unregister_event_command(struct event_command *cmd);
1743 extern int register_trigger_hist_enable_disable_cmds(void);
1744 extern bool event_trigger_check_remove(const char *glob);
1745 extern bool event_trigger_empty_param(const char *param);
1746 extern int event_trigger_separate_filter(char *param_and_filter, char **param,
1747                                          char **filter, bool param_required);
1748 extern struct event_trigger_data *
1749 event_trigger_alloc(struct event_command *cmd_ops,
1750                     char *cmd,
1751                     char *param,
1752                     void *private_data);
1753 extern int event_trigger_parse_num(char *trigger,
1754                                    struct event_trigger_data *trigger_data);
1755 extern int event_trigger_set_filter(struct event_command *cmd_ops,
1756                                     struct trace_event_file *file,
1757                                     char *param,
1758                                     struct event_trigger_data *trigger_data);
1759 extern void event_trigger_reset_filter(struct event_command *cmd_ops,
1760                                        struct event_trigger_data *trigger_data);
1761 extern int event_trigger_register(struct event_command *cmd_ops,
1762                                   struct trace_event_file *file,
1763                                   char *glob,
1764                                   struct event_trigger_data *trigger_data);
1765 extern void event_trigger_unregister(struct event_command *cmd_ops,
1766                                      struct trace_event_file *file,
1767                                      char *glob,
1768                                      struct event_trigger_data *trigger_data);
1769 
1770 extern void event_file_get(struct trace_event_file *file);
1771 extern void event_file_put(struct trace_event_file *file);
1772 
1773 /**
1774  * struct event_trigger_ops - callbacks for trace event triggers
1775  *
1776  * The methods in this structure provide per-event trigger hooks for
1777  * various trigger operations.
1778  *
1779  * The @init and @free methods are used during trigger setup and
1780  * teardown, typically called from an event_command's @parse()
1781  * function implementation.
1782  *
1783  * The @print method is used to print the trigger spec.
1784  *
1785  * The @trigger method is the function that actually implements the
1786  * trigger and is called in the context of the triggering event
1787  * whenever that event occurs.
1788  *
1789  * All the methods below, except for @init() and @free(), must be
1790  * implemented.
1791  *
1792  * @trigger: The trigger 'probe' function called when the triggering
1793  *      event occurs.  The data passed into this callback is the data
1794  *      that was supplied to the event_command @reg() function that
1795  *      registered the trigger (see struct event_command) along with
1796  *      the trace record, rec.
1797  *
1798  * @init: An optional initialization function called for the trigger
1799  *      when the trigger is registered (via the event_command reg()
1800  *      function).  This can be used to perform per-trigger
1801  *      initialization such as incrementing a per-trigger reference
1802  *      count, for instance.  This is usually implemented by the
1803  *      generic utility function @event_trigger_init() (see
1804  *      trace_event_triggers.c).
1805  *
1806  * @free: An optional de-initialization function called for the
1807  *      trigger when the trigger is unregistered (via the
1808  *      event_command @reg() function).  This can be used to perform
1809  *      per-trigger de-initialization such as decrementing a
1810  *      per-trigger reference count and freeing corresponding trigger
1811  *      data, for instance.  This is usually implemented by the
1812  *      generic utility function @event_trigger_free() (see
1813  *      trace_event_triggers.c).
1814  *
1815  * @print: The callback function invoked to have the trigger print
1816  *      itself.  This is usually implemented by a wrapper function
1817  *      that calls the generic utility function @event_trigger_print()
1818  *      (see trace_event_triggers.c).
1819  */
1820 struct event_trigger_ops {
1821         void                    (*trigger)(struct event_trigger_data *data,
1822                                            struct trace_buffer *buffer,
1823                                            void *rec,
1824                                            struct ring_buffer_event *rbe);
1825         int                     (*init)(struct event_trigger_data *data);
1826         void                    (*free)(struct event_trigger_data *data);
1827         int                     (*print)(struct seq_file *m,
1828                                          struct event_trigger_data *data);
1829 };
1830 
1831 /**
1832  * struct event_command - callbacks and data members for event commands
1833  *
1834  * Event commands are invoked by users by writing the command name
1835  * into the 'trigger' file associated with a trace event.  The
1836  * parameters associated with a specific invocation of an event
1837  * command are used to create an event trigger instance, which is
1838  * added to the list of trigger instances associated with that trace
1839  * event.  When the event is hit, the set of triggers associated with
1840  * that event is invoked.
1841  *
1842  * The data members in this structure provide per-event command data
1843  * for various event commands.
1844  *
1845  * All the data members below, except for @post_trigger, must be set
1846  * for each event command.
1847  *
1848  * @name: The unique name that identifies the event command.  This is
1849  *      the name used when setting triggers via trigger files.
1850  *
1851  * @trigger_type: A unique id that identifies the event command
1852  *      'type'.  This value has two purposes, the first to ensure that
1853  *      only one trigger of the same type can be set at a given time
1854  *      for a particular event e.g. it doesn't make sense to have both
1855  *      a traceon and traceoff trigger attached to a single event at
1856  *      the same time, so traceon and traceoff have the same type
1857  *      though they have different names.  The @trigger_type value is
1858  *      also used as a bit value for deferring the actual trigger
1859  *      action until after the current event is finished.  Some
1860  *      commands need to do this if they themselves log to the trace
1861  *      buffer (see the @post_trigger() member below).  @trigger_type
1862  *      values are defined by adding new values to the trigger_type
1863  *      enum in include/linux/trace_events.h.
1864  *
1865  * @flags: See the enum event_command_flags below.
1866  *
1867  * All the methods below, except for @set_filter() and @unreg_all(),
1868  * must be implemented.
1869  *
1870  * @parse: The callback function responsible for parsing and
1871  *      registering the trigger written to the 'trigger' file by the
1872  *      user.  It allocates the trigger instance and registers it with
1873  *      the appropriate trace event.  It makes use of the other
1874  *      event_command callback functions to orchestrate this, and is
1875  *      usually implemented by the generic utility function
1876  *      @event_trigger_callback() (see trace_event_triggers.c).
1877  *
1878  * @reg: Adds the trigger to the list of triggers associated with the
1879  *      event, and enables the event trigger itself, after
1880  *      initializing it (via the event_trigger_ops @init() function).
1881  *      This is also where commands can use the @trigger_type value to
1882  *      make the decision as to whether or not multiple instances of
1883  *      the trigger should be allowed.  This is usually implemented by
1884  *      the generic utility function @register_trigger() (see
1885  *      trace_event_triggers.c).
1886  *
1887  * @unreg: Removes the trigger from the list of triggers associated
1888  *      with the event, and disables the event trigger itself, after
1889  *      initializing it (via the event_trigger_ops @free() function).
1890  *      This is usually implemented by the generic utility function
1891  *      @unregister_trigger() (see trace_event_triggers.c).
1892  *
1893  * @unreg_all: An optional function called to remove all the triggers
1894  *      from the list of triggers associated with the event.  Called
1895  *      when a trigger file is opened in truncate mode.
1896  *
1897  * @set_filter: An optional function called to parse and set a filter
1898  *      for the trigger.  If no @set_filter() method is set for the
1899  *      event command, filters set by the user for the command will be
1900  *      ignored.  This is usually implemented by the generic utility
1901  *      function @set_trigger_filter() (see trace_event_triggers.c).
1902  *
1903  * @get_trigger_ops: The callback function invoked to retrieve the
1904  *      event_trigger_ops implementation associated with the command.
1905  *      This callback function allows a single event_command to
1906  *      support multiple trigger implementations via different sets of
1907  *      event_trigger_ops, depending on the value of the @param
1908  *      string.
1909  */
1910 struct event_command {
1911         struct list_head        list;
1912         char                    *name;
1913         enum event_trigger_type trigger_type;
1914         int                     flags;
1915         int                     (*parse)(struct event_command *cmd_ops,
1916                                          struct trace_event_file *file,
1917                                          char *glob, char *cmd,
1918                                          char *param_and_filter);
1919         int                     (*reg)(char *glob,
1920                                        struct event_trigger_data *data,
1921                                        struct trace_event_file *file);
1922         void                    (*unreg)(char *glob,
1923                                          struct event_trigger_data *data,
1924                                          struct trace_event_file *file);
1925         void                    (*unreg_all)(struct trace_event_file *file);
1926         int                     (*set_filter)(char *filter_str,
1927                                               struct event_trigger_data *data,
1928                                               struct trace_event_file *file);
1929         struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1930 };
1931 
1932 /**
1933  * enum event_command_flags - flags for struct event_command
1934  *
1935  * @POST_TRIGGER: A flag that says whether or not this command needs
1936  *      to have its action delayed until after the current event has
1937  *      been closed.  Some triggers need to avoid being invoked while
1938  *      an event is currently in the process of being logged, since
1939  *      the trigger may itself log data into the trace buffer.  Thus
1940  *      we make sure the current event is committed before invoking
1941  *      those triggers.  To do that, the trigger invocation is split
1942  *      in two - the first part checks the filter using the current
1943  *      trace record; if a command has the @post_trigger flag set, it
1944  *      sets a bit for itself in the return value, otherwise it
1945  *      directly invokes the trigger.  Once all commands have been
1946  *      either invoked or set their return flag, the current record is
1947  *      either committed or discarded.  At that point, if any commands
1948  *      have deferred their triggers, those commands are finally
1949  *      invoked following the close of the current event.  In other
1950  *      words, if the event_trigger_ops @func() probe implementation
1951  *      itself logs to the trace buffer, this flag should be set,
1952  *      otherwise it can be left unspecified.
1953  *
1954  * @NEEDS_REC: A flag that says whether or not this command needs
1955  *      access to the trace record in order to perform its function,
1956  *      regardless of whether or not it has a filter associated with
1957  *      it (filters make a trigger require access to the trace record
1958  *      but are not always present).
1959  */
1960 enum event_command_flags {
1961         EVENT_CMD_FL_POST_TRIGGER       = 1,
1962         EVENT_CMD_FL_NEEDS_REC          = 2,
1963 };
1964 
1965 static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1966 {
1967         return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1968 }
1969 
1970 static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1971 {
1972         return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1973 }
1974 
1975 extern int trace_event_enable_disable(struct trace_event_file *file,
1976                                       int enable, int soft_disable);
1977 extern int tracing_alloc_snapshot(void);
1978 extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
1979 extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
1980 
1981 extern int tracing_snapshot_cond_disable(struct trace_array *tr);
1982 extern void *tracing_cond_snapshot_data(struct trace_array *tr);
1983 
1984 extern const char *__start___trace_bprintk_fmt[];
1985 extern const char *__stop___trace_bprintk_fmt[];
1986 
1987 extern const char *__start___tracepoint_str[];
1988 extern const char *__stop___tracepoint_str[];
1989 
1990 void trace_printk_control(bool enabled);
1991 void trace_printk_start_comm(void);
1992 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1993 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1994 
1995 /* Used from boot time tracer */
1996 extern int trace_set_options(struct trace_array *tr, char *option);
1997 extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
1998 extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
1999                                           unsigned long size, int cpu_id);
2000 extern int tracing_set_cpumask(struct trace_array *tr,
2001                                 cpumask_var_t tracing_cpumask_new);
2002 
2003 
2004 #define MAX_EVENT_NAME_LEN      64
2005 
2006 extern ssize_t trace_parse_run_command(struct file *file,
2007                 const char __user *buffer, size_t count, loff_t *ppos,
2008                 int (*createfn)(const char *));
2009 
2010 extern unsigned int err_pos(char *cmd, const char *str);
2011 extern void tracing_log_err(struct trace_array *tr,
2012                             const char *loc, const char *cmd,
2013                             const char **errs, u8 type, u16 pos);
2014 
2015 /*
2016  * Normal trace_printk() and friends allocates special buffers
2017  * to do the manipulation, as well as saves the print formats
2018  * into sections to display. But the trace infrastructure wants
2019  * to use these without the added overhead at the price of being
2020  * a bit slower (used mainly for warnings, where we don't care
2021  * about performance). The internal_trace_puts() is for such
2022  * a purpose.
2023  */
2024 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
2025 
2026 #undef FTRACE_ENTRY
2027 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print)     \
2028         extern struct trace_event_call                                  \
2029         __aligned(4) event_##call;
2030 #undef FTRACE_ENTRY_DUP
2031 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
2032         FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
2033 #undef FTRACE_ENTRY_PACKED
2034 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
2035         FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
2036 
2037 #include "trace_entries.h"
2038 
2039 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
2040 int perf_ftrace_event_register(struct trace_event_call *call,
2041                                enum trace_reg type, void *data);
2042 #else
2043 #define perf_ftrace_event_register NULL
2044 #endif
2045 
2046 #ifdef CONFIG_FTRACE_SYSCALLS
2047 void init_ftrace_syscalls(void);
2048 const char *get_syscall_name(int syscall);
2049 #else
2050 static inline void init_ftrace_syscalls(void) { }
2051 static inline const char *get_syscall_name(int syscall)
2052 {
2053         return NULL;
2054 }
2055 #endif
2056 
2057 #ifdef CONFIG_EVENT_TRACING
2058 void trace_event_init(void);
2059 void trace_event_eval_update(struct trace_eval_map **map, int len);
2060 /* Used from boot time tracer */
2061 extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
2062 extern int trigger_process_regex(struct trace_event_file *file, char *buff);
2063 #else
2064 static inline void __init trace_event_init(void) { }
2065 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
2066 #endif
2067 
2068 #ifdef CONFIG_TRACER_SNAPSHOT
2069 void tracing_snapshot_instance(struct trace_array *tr);
2070 int tracing_alloc_snapshot_instance(struct trace_array *tr);
2071 int tracing_arm_snapshot(struct trace_array *tr);
2072 void tracing_disarm_snapshot(struct trace_array *tr);
2073 #else
2074 static inline void tracing_snapshot_instance(struct trace_array *tr) { }
2075 static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
2076 {
2077         return 0;
2078 }
2079 static inline int tracing_arm_snapshot(struct trace_array *tr) { return 0; }
2080 static inline void tracing_disarm_snapshot(struct trace_array *tr) { }
2081 #endif
2082 
2083 #ifdef CONFIG_PREEMPT_TRACER
2084 void tracer_preempt_on(unsigned long a0, unsigned long a1);
2085 void tracer_preempt_off(unsigned long a0, unsigned long a1);
2086 #else
2087 static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
2088 static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
2089 #endif
2090 #ifdef CONFIG_IRQSOFF_TRACER
2091 void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
2092 void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
2093 #else
2094 static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
2095 static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
2096 #endif
2097 
2098 /*
2099  * Reset the state of the trace_iterator so that it can read consumed data.
2100  * Normally, the trace_iterator is used for reading the data when it is not
2101  * consumed, and must retain state.
2102  */
2103 static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
2104 {
2105         memset_startat(iter, 0, seq);
2106         iter->pos = -1;
2107 }
2108 
2109 /* Check the name is good for event/group/fields */
2110 static inline bool __is_good_name(const char *name, bool hash_ok)
2111 {
2112         if (!isalpha(*name) && *name != '_' && (!hash_ok || *name != '-'))
2113                 return false;
2114         while (*++name != '\0') {
2115                 if (!isalpha(*name) && !isdigit(*name) && *name != '_' &&
2116                     (!hash_ok || *name != '-'))
2117                         return false;
2118         }
2119         return true;
2120 }
2121 
2122 /* Check the name is good for event/group/fields */
2123 static inline bool is_good_name(const char *name)
2124 {
2125         return __is_good_name(name, false);
2126 }
2127 
2128 /* Check the name is good for system */
2129 static inline bool is_good_system_name(const char *name)
2130 {
2131         return __is_good_name(name, true);
2132 }
2133 
2134 /* Convert certain expected symbols into '_' when generating event names */
2135 static inline void sanitize_event_name(char *name)
2136 {
2137         while (*name++ != '\0')
2138                 if (*name == ':' || *name == '.')
2139                         *name = '_';
2140 }
2141 
2142 /*
2143  * This is a generic way to read and write a u64 value from a file in tracefs.
2144  *
2145  * The value is stored on the variable pointed by *val. The value needs
2146  * to be at least *min and at most *max. The write is protected by an
2147  * existing *lock.
2148  */
2149 struct trace_min_max_param {
2150         struct mutex    *lock;
2151         u64             *val;
2152         u64             *min;
2153         u64             *max;
2154 };
2155 
2156 #define U64_STR_SIZE            24      /* 20 digits max */
2157 
2158 extern const struct file_operations trace_min_max_fops;
2159 
2160 #ifdef CONFIG_RV
2161 extern int rv_init_interface(void);
2162 #else
2163 static inline int rv_init_interface(void)
2164 {
2165         return 0;
2166 }
2167 #endif
2168 
2169 #endif /* _LINUX_KERNEL_TRACE_H */
2170 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php