~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/trace/trace_selftest.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /* Include in trace.c */
  3 
  4 #include <uapi/linux/sched/types.h>
  5 #include <linux/stringify.h>
  6 #include <linux/kthread.h>
  7 #include <linux/delay.h>
  8 #include <linux/slab.h>
  9 
 10 static inline int trace_valid_entry(struct trace_entry *entry)
 11 {
 12         switch (entry->type) {
 13         case TRACE_FN:
 14         case TRACE_CTX:
 15         case TRACE_WAKE:
 16         case TRACE_STACK:
 17         case TRACE_PRINT:
 18         case TRACE_BRANCH:
 19         case TRACE_GRAPH_ENT:
 20         case TRACE_GRAPH_RET:
 21                 return 1;
 22         }
 23         return 0;
 24 }
 25 
 26 static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
 27 {
 28         struct ring_buffer_event *event;
 29         struct trace_entry *entry;
 30         unsigned int loops = 0;
 31 
 32         while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
 33                 entry = ring_buffer_event_data(event);
 34 
 35                 /*
 36                  * The ring buffer is a size of trace_buf_size, if
 37                  * we loop more than the size, there's something wrong
 38                  * with the ring buffer.
 39                  */
 40                 if (loops++ > trace_buf_size) {
 41                         printk(KERN_CONT ".. bad ring buffer ");
 42                         goto failed;
 43                 }
 44                 if (!trace_valid_entry(entry)) {
 45                         printk(KERN_CONT ".. invalid entry %d ",
 46                                 entry->type);
 47                         goto failed;
 48                 }
 49         }
 50         return 0;
 51 
 52  failed:
 53         /* disable tracing */
 54         tracing_disabled = 1;
 55         printk(KERN_CONT ".. corrupted trace buffer .. ");
 56         return -1;
 57 }
 58 
 59 /*
 60  * Test the trace buffer to see if all the elements
 61  * are still sane.
 62  */
 63 static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
 64 {
 65         unsigned long flags, cnt = 0;
 66         int cpu, ret = 0;
 67 
 68         /* Don't allow flipping of max traces now */
 69         local_irq_save(flags);
 70         arch_spin_lock(&buf->tr->max_lock);
 71 
 72         cnt = ring_buffer_entries(buf->buffer);
 73 
 74         /*
 75          * The trace_test_buffer_cpu runs a while loop to consume all data.
 76          * If the calling tracer is broken, and is constantly filling
 77          * the buffer, this will run forever, and hard lock the box.
 78          * We disable the ring buffer while we do this test to prevent
 79          * a hard lock up.
 80          */
 81         tracing_off();
 82         for_each_possible_cpu(cpu) {
 83                 ret = trace_test_buffer_cpu(buf, cpu);
 84                 if (ret)
 85                         break;
 86         }
 87         tracing_on();
 88         arch_spin_unlock(&buf->tr->max_lock);
 89         local_irq_restore(flags);
 90 
 91         if (count)
 92                 *count = cnt;
 93 
 94         return ret;
 95 }
 96 
 97 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
 98 {
 99         printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
100                 trace->name, init_ret);
101 }
102 #ifdef CONFIG_FUNCTION_TRACER
103 
104 #ifdef CONFIG_DYNAMIC_FTRACE
105 
106 static int trace_selftest_test_probe1_cnt;
107 static void trace_selftest_test_probe1_func(unsigned long ip,
108                                             unsigned long pip,
109                                             struct ftrace_ops *op,
110                                             struct ftrace_regs *fregs)
111 {
112         trace_selftest_test_probe1_cnt++;
113 }
114 
115 static int trace_selftest_test_probe2_cnt;
116 static void trace_selftest_test_probe2_func(unsigned long ip,
117                                             unsigned long pip,
118                                             struct ftrace_ops *op,
119                                             struct ftrace_regs *fregs)
120 {
121         trace_selftest_test_probe2_cnt++;
122 }
123 
124 static int trace_selftest_test_probe3_cnt;
125 static void trace_selftest_test_probe3_func(unsigned long ip,
126                                             unsigned long pip,
127                                             struct ftrace_ops *op,
128                                             struct ftrace_regs *fregs)
129 {
130         trace_selftest_test_probe3_cnt++;
131 }
132 
133 static int trace_selftest_test_global_cnt;
134 static void trace_selftest_test_global_func(unsigned long ip,
135                                             unsigned long pip,
136                                             struct ftrace_ops *op,
137                                             struct ftrace_regs *fregs)
138 {
139         trace_selftest_test_global_cnt++;
140 }
141 
142 static int trace_selftest_test_dyn_cnt;
143 static void trace_selftest_test_dyn_func(unsigned long ip,
144                                          unsigned long pip,
145                                          struct ftrace_ops *op,
146                                          struct ftrace_regs *fregs)
147 {
148         trace_selftest_test_dyn_cnt++;
149 }
150 
151 static struct ftrace_ops test_probe1 = {
152         .func                   = trace_selftest_test_probe1_func,
153 };
154 
155 static struct ftrace_ops test_probe2 = {
156         .func                   = trace_selftest_test_probe2_func,
157 };
158 
159 static struct ftrace_ops test_probe3 = {
160         .func                   = trace_selftest_test_probe3_func,
161 };
162 
163 static void print_counts(void)
164 {
165         printk("(%d %d %d %d %d) ",
166                trace_selftest_test_probe1_cnt,
167                trace_selftest_test_probe2_cnt,
168                trace_selftest_test_probe3_cnt,
169                trace_selftest_test_global_cnt,
170                trace_selftest_test_dyn_cnt);
171 }
172 
173 static void reset_counts(void)
174 {
175         trace_selftest_test_probe1_cnt = 0;
176         trace_selftest_test_probe2_cnt = 0;
177         trace_selftest_test_probe3_cnt = 0;
178         trace_selftest_test_global_cnt = 0;
179         trace_selftest_test_dyn_cnt = 0;
180 }
181 
182 static int trace_selftest_ops(struct trace_array *tr, int cnt)
183 {
184         int save_ftrace_enabled = ftrace_enabled;
185         struct ftrace_ops *dyn_ops;
186         char *func1_name;
187         char *func2_name;
188         int len1;
189         int len2;
190         int ret = -1;
191 
192         printk(KERN_CONT "PASSED\n");
193         pr_info("Testing dynamic ftrace ops #%d: ", cnt);
194 
195         ftrace_enabled = 1;
196         reset_counts();
197 
198         /* Handle PPC64 '.' name */
199         func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
200         func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
201         len1 = strlen(func1_name);
202         len2 = strlen(func2_name);
203 
204         /*
205          * Probe 1 will trace function 1.
206          * Probe 2 will trace function 2.
207          * Probe 3 will trace functions 1 and 2.
208          */
209         ftrace_set_filter(&test_probe1, func1_name, len1, 1);
210         ftrace_set_filter(&test_probe2, func2_name, len2, 1);
211         ftrace_set_filter(&test_probe3, func1_name, len1, 1);
212         ftrace_set_filter(&test_probe3, func2_name, len2, 0);
213 
214         register_ftrace_function(&test_probe1);
215         register_ftrace_function(&test_probe2);
216         register_ftrace_function(&test_probe3);
217         /* First time we are running with main function */
218         if (cnt > 1) {
219                 ftrace_init_array_ops(tr, trace_selftest_test_global_func);
220                 register_ftrace_function(tr->ops);
221         }
222 
223         DYN_FTRACE_TEST_NAME();
224 
225         print_counts();
226 
227         if (trace_selftest_test_probe1_cnt != 1)
228                 goto out;
229         if (trace_selftest_test_probe2_cnt != 0)
230                 goto out;
231         if (trace_selftest_test_probe3_cnt != 1)
232                 goto out;
233         if (cnt > 1) {
234                 if (trace_selftest_test_global_cnt == 0)
235                         goto out;
236         }
237 
238         DYN_FTRACE_TEST_NAME2();
239 
240         print_counts();
241 
242         if (trace_selftest_test_probe1_cnt != 1)
243                 goto out;
244         if (trace_selftest_test_probe2_cnt != 1)
245                 goto out;
246         if (trace_selftest_test_probe3_cnt != 2)
247                 goto out;
248 
249         /* Add a dynamic probe */
250         dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
251         if (!dyn_ops) {
252                 printk("MEMORY ERROR ");
253                 goto out;
254         }
255 
256         dyn_ops->func = trace_selftest_test_dyn_func;
257 
258         register_ftrace_function(dyn_ops);
259 
260         trace_selftest_test_global_cnt = 0;
261 
262         DYN_FTRACE_TEST_NAME();
263 
264         print_counts();
265 
266         if (trace_selftest_test_probe1_cnt != 2)
267                 goto out_free;
268         if (trace_selftest_test_probe2_cnt != 1)
269                 goto out_free;
270         if (trace_selftest_test_probe3_cnt != 3)
271                 goto out_free;
272         if (cnt > 1) {
273                 if (trace_selftest_test_global_cnt == 0)
274                         goto out_free;
275         }
276         if (trace_selftest_test_dyn_cnt == 0)
277                 goto out_free;
278 
279         DYN_FTRACE_TEST_NAME2();
280 
281         print_counts();
282 
283         if (trace_selftest_test_probe1_cnt != 2)
284                 goto out_free;
285         if (trace_selftest_test_probe2_cnt != 2)
286                 goto out_free;
287         if (trace_selftest_test_probe3_cnt != 4)
288                 goto out_free;
289 
290         /* Remove trace function from probe 3 */
291         func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME);
292         len1 = strlen(func1_name);
293 
294         ftrace_set_filter(&test_probe3, func1_name, len1, 0);
295 
296         DYN_FTRACE_TEST_NAME();
297 
298         print_counts();
299 
300         if (trace_selftest_test_probe1_cnt != 3)
301                 goto out_free;
302         if (trace_selftest_test_probe2_cnt != 2)
303                 goto out_free;
304         if (trace_selftest_test_probe3_cnt != 4)
305                 goto out_free;
306         if (cnt > 1) {
307                 if (trace_selftest_test_global_cnt == 0)
308                         goto out_free;
309         }
310         if (trace_selftest_test_dyn_cnt == 0)
311                 goto out_free;
312 
313         DYN_FTRACE_TEST_NAME2();
314 
315         print_counts();
316 
317         if (trace_selftest_test_probe1_cnt != 3)
318                 goto out_free;
319         if (trace_selftest_test_probe2_cnt != 3)
320                 goto out_free;
321         if (trace_selftest_test_probe3_cnt != 5)
322                 goto out_free;
323 
324         ret = 0;
325  out_free:
326         unregister_ftrace_function(dyn_ops);
327         kfree(dyn_ops);
328 
329  out:
330         /* Purposely unregister in the same order */
331         unregister_ftrace_function(&test_probe1);
332         unregister_ftrace_function(&test_probe2);
333         unregister_ftrace_function(&test_probe3);
334         if (cnt > 1)
335                 unregister_ftrace_function(tr->ops);
336         ftrace_reset_array_ops(tr);
337 
338         /* Make sure everything is off */
339         reset_counts();
340         DYN_FTRACE_TEST_NAME();
341         DYN_FTRACE_TEST_NAME();
342 
343         if (trace_selftest_test_probe1_cnt ||
344             trace_selftest_test_probe2_cnt ||
345             trace_selftest_test_probe3_cnt ||
346             trace_selftest_test_global_cnt ||
347             trace_selftest_test_dyn_cnt)
348                 ret = -1;
349 
350         ftrace_enabled = save_ftrace_enabled;
351 
352         return ret;
353 }
354 
355 /* Test dynamic code modification and ftrace filters */
356 static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
357                                                   struct trace_array *tr,
358                                                   int (*func)(void))
359 {
360         int save_ftrace_enabled = ftrace_enabled;
361         unsigned long count;
362         char *func_name;
363         int ret;
364 
365         /* The ftrace test PASSED */
366         printk(KERN_CONT "PASSED\n");
367         pr_info("Testing dynamic ftrace: ");
368 
369         /* enable tracing, and record the filter function */
370         ftrace_enabled = 1;
371 
372         /* passed in by parameter to fool gcc from optimizing */
373         func();
374 
375         /*
376          * Some archs *cough*PowerPC*cough* add characters to the
377          * start of the function names. We simply put a '*' to
378          * accommodate them.
379          */
380         func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
381 
382         /* filter only on our function */
383         ftrace_set_global_filter(func_name, strlen(func_name), 1);
384 
385         /* enable tracing */
386         ret = tracer_init(trace, tr);
387         if (ret) {
388                 warn_failed_init_tracer(trace, ret);
389                 goto out;
390         }
391 
392         /* Sleep for a 1/10 of a second */
393         msleep(100);
394 
395         /* we should have nothing in the buffer */
396         ret = trace_test_buffer(&tr->array_buffer, &count);
397         if (ret)
398                 goto out;
399 
400         if (count) {
401                 ret = -1;
402                 printk(KERN_CONT ".. filter did not filter .. ");
403                 goto out;
404         }
405 
406         /* call our function again */
407         func();
408 
409         /* sleep again */
410         msleep(100);
411 
412         /* stop the tracing. */
413         tracing_stop();
414         ftrace_enabled = 0;
415 
416         /* check the trace buffer */
417         ret = trace_test_buffer(&tr->array_buffer, &count);
418 
419         ftrace_enabled = 1;
420         tracing_start();
421 
422         /* we should only have one item */
423         if (!ret && count != 1) {
424                 trace->reset(tr);
425                 printk(KERN_CONT ".. filter failed count=%ld ..", count);
426                 ret = -1;
427                 goto out;
428         }
429 
430         /* Test the ops with global tracing running */
431         ret = trace_selftest_ops(tr, 1);
432         trace->reset(tr);
433 
434  out:
435         ftrace_enabled = save_ftrace_enabled;
436 
437         /* Enable tracing on all functions again */
438         ftrace_set_global_filter(NULL, 0, 1);
439 
440         /* Test the ops with global tracing off */
441         if (!ret)
442                 ret = trace_selftest_ops(tr, 2);
443 
444         return ret;
445 }
446 
447 static int trace_selftest_recursion_cnt;
448 static void trace_selftest_test_recursion_func(unsigned long ip,
449                                                unsigned long pip,
450                                                struct ftrace_ops *op,
451                                                struct ftrace_regs *fregs)
452 {
453         /*
454          * This function is registered without the recursion safe flag.
455          * The ftrace infrastructure should provide the recursion
456          * protection. If not, this will crash the kernel!
457          */
458         if (trace_selftest_recursion_cnt++ > 10)
459                 return;
460         DYN_FTRACE_TEST_NAME();
461 }
462 
463 static void trace_selftest_test_recursion_safe_func(unsigned long ip,
464                                                     unsigned long pip,
465                                                     struct ftrace_ops *op,
466                                                     struct ftrace_regs *fregs)
467 {
468         /*
469          * We said we would provide our own recursion. By calling
470          * this function again, we should recurse back into this function
471          * and count again. But this only happens if the arch supports
472          * all of ftrace features and nothing else is using the function
473          * tracing utility.
474          */
475         if (trace_selftest_recursion_cnt++)
476                 return;
477         DYN_FTRACE_TEST_NAME();
478 }
479 
480 static struct ftrace_ops test_rec_probe = {
481         .func                   = trace_selftest_test_recursion_func,
482         .flags                  = FTRACE_OPS_FL_RECURSION,
483 };
484 
485 static struct ftrace_ops test_recsafe_probe = {
486         .func                   = trace_selftest_test_recursion_safe_func,
487 };
488 
489 static int
490 trace_selftest_function_recursion(void)
491 {
492         int save_ftrace_enabled = ftrace_enabled;
493         char *func_name;
494         int len;
495         int ret;
496 
497         /* The previous test PASSED */
498         pr_cont("PASSED\n");
499         pr_info("Testing ftrace recursion: ");
500 
501 
502         /* enable tracing, and record the filter function */
503         ftrace_enabled = 1;
504 
505         /* Handle PPC64 '.' name */
506         func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
507         len = strlen(func_name);
508 
509         ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
510         if (ret) {
511                 pr_cont("*Could not set filter* ");
512                 goto out;
513         }
514 
515         ret = register_ftrace_function(&test_rec_probe);
516         if (ret) {
517                 pr_cont("*could not register callback* ");
518                 goto out;
519         }
520 
521         DYN_FTRACE_TEST_NAME();
522 
523         unregister_ftrace_function(&test_rec_probe);
524 
525         ret = -1;
526         /*
527          * Recursion allows for transitions between context,
528          * and may call the callback twice.
529          */
530         if (trace_selftest_recursion_cnt != 1 &&
531             trace_selftest_recursion_cnt != 2) {
532                 pr_cont("*callback not called once (or twice) (%d)* ",
533                         trace_selftest_recursion_cnt);
534                 goto out;
535         }
536 
537         trace_selftest_recursion_cnt = 1;
538 
539         pr_cont("PASSED\n");
540         pr_info("Testing ftrace recursion safe: ");
541 
542         ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
543         if (ret) {
544                 pr_cont("*Could not set filter* ");
545                 goto out;
546         }
547 
548         ret = register_ftrace_function(&test_recsafe_probe);
549         if (ret) {
550                 pr_cont("*could not register callback* ");
551                 goto out;
552         }
553 
554         DYN_FTRACE_TEST_NAME();
555 
556         unregister_ftrace_function(&test_recsafe_probe);
557 
558         ret = -1;
559         if (trace_selftest_recursion_cnt != 2) {
560                 pr_cont("*callback not called expected 2 times (%d)* ",
561                         trace_selftest_recursion_cnt);
562                 goto out;
563         }
564 
565         ret = 0;
566 out:
567         ftrace_enabled = save_ftrace_enabled;
568 
569         return ret;
570 }
571 #else
572 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
573 # define trace_selftest_function_recursion() ({ 0; })
574 #endif /* CONFIG_DYNAMIC_FTRACE */
575 
576 static enum {
577         TRACE_SELFTEST_REGS_START,
578         TRACE_SELFTEST_REGS_FOUND,
579         TRACE_SELFTEST_REGS_NOT_FOUND,
580 } trace_selftest_regs_stat;
581 
582 static void trace_selftest_test_regs_func(unsigned long ip,
583                                           unsigned long pip,
584                                           struct ftrace_ops *op,
585                                           struct ftrace_regs *fregs)
586 {
587         struct pt_regs *regs = ftrace_get_regs(fregs);
588 
589         if (regs)
590                 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
591         else
592                 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
593 }
594 
595 static struct ftrace_ops test_regs_probe = {
596         .func           = trace_selftest_test_regs_func,
597         .flags          = FTRACE_OPS_FL_SAVE_REGS,
598 };
599 
600 static int
601 trace_selftest_function_regs(void)
602 {
603         int save_ftrace_enabled = ftrace_enabled;
604         char *func_name;
605         int len;
606         int ret;
607         int supported = 0;
608 
609 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
610         supported = 1;
611 #endif
612 
613         /* The previous test PASSED */
614         pr_cont("PASSED\n");
615         pr_info("Testing ftrace regs%s: ",
616                 !supported ? "(no arch support)" : "");
617 
618         /* enable tracing, and record the filter function */
619         ftrace_enabled = 1;
620 
621         /* Handle PPC64 '.' name */
622         func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
623         len = strlen(func_name);
624 
625         ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
626         /*
627          * If DYNAMIC_FTRACE is not set, then we just trace all functions.
628          * This test really doesn't care.
629          */
630         if (ret && ret != -ENODEV) {
631                 pr_cont("*Could not set filter* ");
632                 goto out;
633         }
634 
635         ret = register_ftrace_function(&test_regs_probe);
636         /*
637          * Now if the arch does not support passing regs, then this should
638          * have failed.
639          */
640         if (!supported) {
641                 if (!ret) {
642                         pr_cont("*registered save-regs without arch support* ");
643                         goto out;
644                 }
645                 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
646                 ret = register_ftrace_function(&test_regs_probe);
647         }
648         if (ret) {
649                 pr_cont("*could not register callback* ");
650                 goto out;
651         }
652 
653 
654         DYN_FTRACE_TEST_NAME();
655 
656         unregister_ftrace_function(&test_regs_probe);
657 
658         ret = -1;
659 
660         switch (trace_selftest_regs_stat) {
661         case TRACE_SELFTEST_REGS_START:
662                 pr_cont("*callback never called* ");
663                 goto out;
664 
665         case TRACE_SELFTEST_REGS_FOUND:
666                 if (supported)
667                         break;
668                 pr_cont("*callback received regs without arch support* ");
669                 goto out;
670 
671         case TRACE_SELFTEST_REGS_NOT_FOUND:
672                 if (!supported)
673                         break;
674                 pr_cont("*callback received NULL regs* ");
675                 goto out;
676         }
677 
678         ret = 0;
679 out:
680         ftrace_enabled = save_ftrace_enabled;
681 
682         return ret;
683 }
684 
685 /*
686  * Simple verification test of ftrace function tracer.
687  * Enable ftrace, sleep 1/10 second, and then read the trace
688  * buffer to see if all is in order.
689  */
690 __init int
691 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
692 {
693         int save_ftrace_enabled = ftrace_enabled;
694         unsigned long count;
695         int ret;
696 
697 #ifdef CONFIG_DYNAMIC_FTRACE
698         if (ftrace_filter_param) {
699                 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
700                 return 0;
701         }
702 #endif
703 
704         /* make sure msleep has been recorded */
705         msleep(1);
706 
707         /* start the tracing */
708         ftrace_enabled = 1;
709 
710         ret = tracer_init(trace, tr);
711         if (ret) {
712                 warn_failed_init_tracer(trace, ret);
713                 goto out;
714         }
715 
716         /* Sleep for a 1/10 of a second */
717         msleep(100);
718         /* stop the tracing. */
719         tracing_stop();
720         ftrace_enabled = 0;
721 
722         /* check the trace buffer */
723         ret = trace_test_buffer(&tr->array_buffer, &count);
724 
725         ftrace_enabled = 1;
726         trace->reset(tr);
727         tracing_start();
728 
729         if (!ret && !count) {
730                 printk(KERN_CONT ".. no entries found ..");
731                 ret = -1;
732                 goto out;
733         }
734 
735         ret = trace_selftest_startup_dynamic_tracing(trace, tr,
736                                                      DYN_FTRACE_TEST_NAME);
737         if (ret)
738                 goto out;
739 
740         ret = trace_selftest_function_recursion();
741         if (ret)
742                 goto out;
743 
744         ret = trace_selftest_function_regs();
745  out:
746         ftrace_enabled = save_ftrace_enabled;
747 
748         /* kill ftrace totally if we failed */
749         if (ret)
750                 ftrace_kill();
751 
752         return ret;
753 }
754 #endif /* CONFIG_FUNCTION_TRACER */
755 
756 
757 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
758 
759 #ifdef CONFIG_DYNAMIC_FTRACE
760 
761 #define CHAR_NUMBER 123
762 #define SHORT_NUMBER 12345
763 #define WORD_NUMBER 1234567890
764 #define LONG_NUMBER 1234567890123456789LL
765 #define ERRSTR_BUFLEN 128
766 
767 struct fgraph_fixture {
768         struct fgraph_ops gops;
769         int store_size;
770         const char *store_type_name;
771         char error_str_buf[ERRSTR_BUFLEN];
772         char *error_str;
773 };
774 
775 static __init int store_entry(struct ftrace_graph_ent *trace,
776                               struct fgraph_ops *gops)
777 {
778         struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops);
779         const char *type = fixture->store_type_name;
780         int size = fixture->store_size;
781         void *p;
782 
783         p = fgraph_reserve_data(gops->idx, size);
784         if (!p) {
785                 snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
786                          "Failed to reserve %s\n", type);
787                 return 0;
788         }
789 
790         switch (size) {
791         case 1:
792                 *(char *)p = CHAR_NUMBER;
793                 break;
794         case 2:
795                 *(short *)p = SHORT_NUMBER;
796                 break;
797         case 4:
798                 *(int *)p = WORD_NUMBER;
799                 break;
800         case 8:
801                 *(long long *)p = LONG_NUMBER;
802                 break;
803         }
804 
805         return 1;
806 }
807 
808 static __init void store_return(struct ftrace_graph_ret *trace,
809                                 struct fgraph_ops *gops)
810 {
811         struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops);
812         const char *type = fixture->store_type_name;
813         long long expect = 0;
814         long long found = -1;
815         int size;
816         char *p;
817 
818         p = fgraph_retrieve_data(gops->idx, &size);
819         if (!p) {
820                 snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
821                          "Failed to retrieve %s\n", type);
822                 return;
823         }
824         if (fixture->store_size > size) {
825                 snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
826                          "Retrieved size %d is smaller than expected %d\n",
827                          size, (int)fixture->store_size);
828                 return;
829         }
830 
831         switch (fixture->store_size) {
832         case 1:
833                 expect = CHAR_NUMBER;
834                 found = *(char *)p;
835                 break;
836         case 2:
837                 expect = SHORT_NUMBER;
838                 found = *(short *)p;
839                 break;
840         case 4:
841                 expect = WORD_NUMBER;
842                 found = *(int *)p;
843                 break;
844         case 8:
845                 expect = LONG_NUMBER;
846                 found = *(long long *)p;
847                 break;
848         }
849 
850         if (found != expect) {
851                 snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
852                          "%s returned not %lld but %lld\n", type, expect, found);
853                 return;
854         }
855         fixture->error_str = NULL;
856 }
857 
858 static int __init init_fgraph_fixture(struct fgraph_fixture *fixture)
859 {
860         char *func_name;
861         int len;
862 
863         snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
864                  "Failed to execute storage %s\n", fixture->store_type_name);
865         fixture->error_str = fixture->error_str_buf;
866 
867         func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
868         len = strlen(func_name);
869 
870         return ftrace_set_filter(&fixture->gops.ops, func_name, len, 1);
871 }
872 
873 /* Test fgraph storage for each size */
874 static int __init test_graph_storage_single(struct fgraph_fixture *fixture)
875 {
876         int size = fixture->store_size;
877         int ret;
878 
879         pr_cont("PASSED\n");
880         pr_info("Testing fgraph storage of %d byte%s: ", size, str_plural(size));
881 
882         ret = init_fgraph_fixture(fixture);
883         if (ret && ret != -ENODEV) {
884                 pr_cont("*Could not set filter* ");
885                 return -1;
886         }
887 
888         ret = register_ftrace_graph(&fixture->gops);
889         if (ret) {
890                 pr_warn("Failed to init store_bytes fgraph tracing\n");
891                 return -1;
892         }
893 
894         DYN_FTRACE_TEST_NAME();
895 
896         unregister_ftrace_graph(&fixture->gops);
897 
898         if (fixture->error_str) {
899                 pr_cont("*** %s ***", fixture->error_str);
900                 return -1;
901         }
902 
903         return 0;
904 }
905 
906 static struct fgraph_fixture store_bytes[4] __initdata = {
907         [0] = {
908                 .gops = {
909                         .entryfunc              = store_entry,
910                         .retfunc                = store_return,
911                 },
912                 .store_size = 1,
913                 .store_type_name = "byte",
914         },
915         [1] = {
916                 .gops = {
917                         .entryfunc              = store_entry,
918                         .retfunc                = store_return,
919                 },
920                 .store_size = 2,
921                 .store_type_name = "short",
922         },
923         [2] = {
924                 .gops = {
925                         .entryfunc              = store_entry,
926                         .retfunc                = store_return,
927                 },
928                 .store_size = 4,
929                 .store_type_name = "word",
930         },
931         [3] = {
932                 .gops = {
933                         .entryfunc              = store_entry,
934                         .retfunc                = store_return,
935                 },
936                 .store_size = 8,
937                 .store_type_name = "long long",
938         },
939 };
940 
941 static __init int test_graph_storage_multi(void)
942 {
943         struct fgraph_fixture *fixture;
944         bool printed = false;
945         int i, j, ret;
946 
947         pr_cont("PASSED\n");
948         pr_info("Testing multiple fgraph storage on a function: ");
949 
950         for (i = 0; i < ARRAY_SIZE(store_bytes); i++) {
951                 fixture = &store_bytes[i];
952                 ret = init_fgraph_fixture(fixture);
953                 if (ret && ret != -ENODEV) {
954                         pr_cont("*Could not set filter* ");
955                         printed = true;
956                         goto out2;
957                 }
958         }
959 
960         for (j = 0; j < ARRAY_SIZE(store_bytes); j++) {
961                 fixture = &store_bytes[j];
962                 ret = register_ftrace_graph(&fixture->gops);
963                 if (ret) {
964                         pr_warn("Failed to init store_bytes fgraph tracing\n");
965                         printed = true;
966                         goto out1;
967                 }
968         }
969 
970         DYN_FTRACE_TEST_NAME();
971 out1:
972         while (--j >= 0) {
973                 fixture = &store_bytes[j];
974                 unregister_ftrace_graph(&fixture->gops);
975 
976                 if (fixture->error_str && !printed) {
977                         pr_cont("*** %s ***", fixture->error_str);
978                         printed = true;
979                 }
980         }
981 out2:
982         while (--i >= 0) {
983                 fixture = &store_bytes[i];
984                 ftrace_free_filter(&fixture->gops.ops);
985 
986                 if (fixture->error_str && !printed) {
987                         pr_cont("*** %s ***", fixture->error_str);
988                         printed = true;
989                 }
990         }
991         return printed ? -1 : 0;
992 }
993 
994 /* Test the storage passed across function_graph entry and return */
995 static __init int test_graph_storage(void)
996 {
997         int ret;
998 
999         ret = test_graph_storage_single(&store_bytes[0]);
1000         if (ret)
1001                 return ret;
1002         ret = test_graph_storage_single(&store_bytes[1]);
1003         if (ret)
1004                 return ret;
1005         ret = test_graph_storage_single(&store_bytes[2]);
1006         if (ret)
1007                 return ret;
1008         ret = test_graph_storage_single(&store_bytes[3]);
1009         if (ret)
1010                 return ret;
1011         ret = test_graph_storage_multi();
1012         if (ret)
1013                 return ret;
1014         return 0;
1015 }
1016 #else
1017 static inline int test_graph_storage(void) { return 0; }
1018 #endif /* CONFIG_DYNAMIC_FTRACE */
1019 
1020 /* Maximum number of functions to trace before diagnosing a hang */
1021 #define GRAPH_MAX_FUNC_TEST     100000000
1022 
1023 static unsigned int graph_hang_thresh;
1024 
1025 /* Wrap the real function entry probe to avoid possible hanging */
1026 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace,
1027                                       struct fgraph_ops *gops)
1028 {
1029         /* This is harmlessly racy, we want to approximately detect a hang */
1030         if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
1031                 ftrace_graph_stop();
1032                 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
1033                 if (ftrace_dump_on_oops_enabled()) {
1034                         ftrace_dump(DUMP_ALL);
1035                         /* ftrace_dump() disables tracing */
1036                         tracing_on();
1037                 }
1038                 return 0;
1039         }
1040 
1041         return trace_graph_entry(trace, gops);
1042 }
1043 
1044 static struct fgraph_ops fgraph_ops __initdata  = {
1045         .entryfunc              = &trace_graph_entry_watchdog,
1046         .retfunc                = &trace_graph_return,
1047 };
1048 
1049 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
1050 static struct ftrace_ops direct;
1051 #endif
1052 
1053 /*
1054  * Pretty much the same than for the function tracer from which the selftest
1055  * has been borrowed.
1056  */
1057 __init int
1058 trace_selftest_startup_function_graph(struct tracer *trace,
1059                                         struct trace_array *tr)
1060 {
1061         int ret;
1062         unsigned long count;
1063         char *func_name __maybe_unused;
1064 
1065 #ifdef CONFIG_DYNAMIC_FTRACE
1066         if (ftrace_filter_param) {
1067                 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
1068                 return 0;
1069         }
1070 #endif
1071 
1072         /*
1073          * Simulate the init() callback but we attach a watchdog callback
1074          * to detect and recover from possible hangs
1075          */
1076         tracing_reset_online_cpus(&tr->array_buffer);
1077         fgraph_ops.private = tr;
1078         ret = register_ftrace_graph(&fgraph_ops);
1079         if (ret) {
1080                 warn_failed_init_tracer(trace, ret);
1081                 goto out;
1082         }
1083         tracing_start_cmdline_record();
1084 
1085         /* Sleep for a 1/10 of a second */
1086         msleep(100);
1087 
1088         /* Have we just recovered from a hang? */
1089         if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
1090                 disable_tracing_selftest("recovering from a hang");
1091                 ret = -1;
1092                 goto out;
1093         }
1094 
1095         tracing_stop();
1096 
1097         /* check the trace buffer */
1098         ret = trace_test_buffer(&tr->array_buffer, &count);
1099 
1100         /* Need to also simulate the tr->reset to remove this fgraph_ops */
1101         tracing_stop_cmdline_record();
1102         unregister_ftrace_graph(&fgraph_ops);
1103 
1104         tracing_start();
1105 
1106         if (!ret && !count) {
1107                 printk(KERN_CONT ".. no entries found ..");
1108                 ret = -1;
1109                 goto out;
1110         }
1111 
1112 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
1113         /*
1114          * These tests can take some time to run. Make sure on non PREEMPT
1115          * kernels, we do not trigger the softlockup detector.
1116          */
1117         cond_resched();
1118 
1119         tracing_reset_online_cpus(&tr->array_buffer);
1120         fgraph_ops.private = tr;
1121 
1122         /*
1123          * Some archs *cough*PowerPC*cough* add characters to the
1124          * start of the function names. We simply put a '*' to
1125          * accommodate them.
1126          */
1127         func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
1128         ftrace_set_global_filter(func_name, strlen(func_name), 1);
1129 
1130         /*
1131          * Register direct function together with graph tracer
1132          * and make sure we get graph trace.
1133          */
1134         ftrace_set_filter_ip(&direct, (unsigned long)DYN_FTRACE_TEST_NAME, 0, 0);
1135         ret = register_ftrace_direct(&direct,
1136                                      (unsigned long)ftrace_stub_direct_tramp);
1137         if (ret)
1138                 goto out;
1139 
1140         cond_resched();
1141 
1142         ret = register_ftrace_graph(&fgraph_ops);
1143         if (ret) {
1144                 warn_failed_init_tracer(trace, ret);
1145                 goto out;
1146         }
1147 
1148         DYN_FTRACE_TEST_NAME();
1149 
1150         count = 0;
1151 
1152         tracing_stop();
1153         /* check the trace buffer */
1154         ret = trace_test_buffer(&tr->array_buffer, &count);
1155 
1156         unregister_ftrace_graph(&fgraph_ops);
1157 
1158         ret = unregister_ftrace_direct(&direct,
1159                                        (unsigned long)ftrace_stub_direct_tramp,
1160                                        true);
1161         if (ret)
1162                 goto out;
1163 
1164         cond_resched();
1165 
1166         tracing_start();
1167 
1168         if (!ret && !count) {
1169                 ret = -1;
1170                 goto out;
1171         }
1172 
1173         /* Enable tracing on all functions again */
1174         ftrace_set_global_filter(NULL, 0, 1);
1175 #endif
1176 
1177         ret = test_graph_storage();
1178 
1179         /* Don't test dynamic tracing, the function tracer already did */
1180 out:
1181         /* Stop it if we failed */
1182         if (ret)
1183                 ftrace_graph_stop();
1184 
1185         return ret;
1186 }
1187 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1188 
1189 
1190 #ifdef CONFIG_IRQSOFF_TRACER
1191 int
1192 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
1193 {
1194         unsigned long save_max = tr->max_latency;
1195         unsigned long count;
1196         int ret;
1197 
1198         /* start the tracing */
1199         ret = tracer_init(trace, tr);
1200         if (ret) {
1201                 warn_failed_init_tracer(trace, ret);
1202                 return ret;
1203         }
1204 
1205         /* reset the max latency */
1206         tr->max_latency = 0;
1207         /* disable interrupts for a bit */
1208         local_irq_disable();
1209         udelay(100);
1210         local_irq_enable();
1211 
1212         /*
1213          * Stop the tracer to avoid a warning subsequent
1214          * to buffer flipping failure because tracing_stop()
1215          * disables the tr and max buffers, making flipping impossible
1216          * in case of parallels max irqs off latencies.
1217          */
1218         trace->stop(tr);
1219         /* stop the tracing. */
1220         tracing_stop();
1221         /* check both trace buffers */
1222         ret = trace_test_buffer(&tr->array_buffer, NULL);
1223         if (!ret)
1224                 ret = trace_test_buffer(&tr->max_buffer, &count);
1225         trace->reset(tr);
1226         tracing_start();
1227 
1228         if (!ret && !count) {
1229                 printk(KERN_CONT ".. no entries found ..");
1230                 ret = -1;
1231         }
1232 
1233         tr->max_latency = save_max;
1234 
1235         return ret;
1236 }
1237 #endif /* CONFIG_IRQSOFF_TRACER */
1238 
1239 #ifdef CONFIG_PREEMPT_TRACER
1240 int
1241 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
1242 {
1243         unsigned long save_max = tr->max_latency;
1244         unsigned long count;
1245         int ret;
1246 
1247         /*
1248          * Now that the big kernel lock is no longer preemptible,
1249          * and this is called with the BKL held, it will always
1250          * fail. If preemption is already disabled, simply
1251          * pass the test. When the BKL is removed, or becomes
1252          * preemptible again, we will once again test this,
1253          * so keep it in.
1254          */
1255         if (preempt_count()) {
1256                 printk(KERN_CONT "can not test ... force ");
1257                 return 0;
1258         }
1259 
1260         /* start the tracing */
1261         ret = tracer_init(trace, tr);
1262         if (ret) {
1263                 warn_failed_init_tracer(trace, ret);
1264                 return ret;
1265         }
1266 
1267         /* reset the max latency */
1268         tr->max_latency = 0;
1269         /* disable preemption for a bit */
1270         preempt_disable();
1271         udelay(100);
1272         preempt_enable();
1273 
1274         /*
1275          * Stop the tracer to avoid a warning subsequent
1276          * to buffer flipping failure because tracing_stop()
1277          * disables the tr and max buffers, making flipping impossible
1278          * in case of parallels max preempt off latencies.
1279          */
1280         trace->stop(tr);
1281         /* stop the tracing. */
1282         tracing_stop();
1283         /* check both trace buffers */
1284         ret = trace_test_buffer(&tr->array_buffer, NULL);
1285         if (!ret)
1286                 ret = trace_test_buffer(&tr->max_buffer, &count);
1287         trace->reset(tr);
1288         tracing_start();
1289 
1290         if (!ret && !count) {
1291                 printk(KERN_CONT ".. no entries found ..");
1292                 ret = -1;
1293         }
1294 
1295         tr->max_latency = save_max;
1296 
1297         return ret;
1298 }
1299 #endif /* CONFIG_PREEMPT_TRACER */
1300 
1301 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
1302 int
1303 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
1304 {
1305         unsigned long save_max = tr->max_latency;
1306         unsigned long count;
1307         int ret;
1308 
1309         /*
1310          * Now that the big kernel lock is no longer preemptible,
1311          * and this is called with the BKL held, it will always
1312          * fail. If preemption is already disabled, simply
1313          * pass the test. When the BKL is removed, or becomes
1314          * preemptible again, we will once again test this,
1315          * so keep it in.
1316          */
1317         if (preempt_count()) {
1318                 printk(KERN_CONT "can not test ... force ");
1319                 return 0;
1320         }
1321 
1322         /* start the tracing */
1323         ret = tracer_init(trace, tr);
1324         if (ret) {
1325                 warn_failed_init_tracer(trace, ret);
1326                 goto out_no_start;
1327         }
1328 
1329         /* reset the max latency */
1330         tr->max_latency = 0;
1331 
1332         /* disable preemption and interrupts for a bit */
1333         preempt_disable();
1334         local_irq_disable();
1335         udelay(100);
1336         preempt_enable();
1337         /* reverse the order of preempt vs irqs */
1338         local_irq_enable();
1339 
1340         /*
1341          * Stop the tracer to avoid a warning subsequent
1342          * to buffer flipping failure because tracing_stop()
1343          * disables the tr and max buffers, making flipping impossible
1344          * in case of parallels max irqs/preempt off latencies.
1345          */
1346         trace->stop(tr);
1347         /* stop the tracing. */
1348         tracing_stop();
1349         /* check both trace buffers */
1350         ret = trace_test_buffer(&tr->array_buffer, NULL);
1351         if (ret)
1352                 goto out;
1353 
1354         ret = trace_test_buffer(&tr->max_buffer, &count);
1355         if (ret)
1356                 goto out;
1357 
1358         if (!ret && !count) {
1359                 printk(KERN_CONT ".. no entries found ..");
1360                 ret = -1;
1361                 goto out;
1362         }
1363 
1364         /* do the test by disabling interrupts first this time */
1365         tr->max_latency = 0;
1366         tracing_start();
1367         trace->start(tr);
1368 
1369         preempt_disable();
1370         local_irq_disable();
1371         udelay(100);
1372         preempt_enable();
1373         /* reverse the order of preempt vs irqs */
1374         local_irq_enable();
1375 
1376         trace->stop(tr);
1377         /* stop the tracing. */
1378         tracing_stop();
1379         /* check both trace buffers */
1380         ret = trace_test_buffer(&tr->array_buffer, NULL);
1381         if (ret)
1382                 goto out;
1383 
1384         ret = trace_test_buffer(&tr->max_buffer, &count);
1385 
1386         if (!ret && !count) {
1387                 printk(KERN_CONT ".. no entries found ..");
1388                 ret = -1;
1389                 goto out;
1390         }
1391 
1392 out:
1393         tracing_start();
1394 out_no_start:
1395         trace->reset(tr);
1396         tr->max_latency = save_max;
1397 
1398         return ret;
1399 }
1400 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1401 
1402 #ifdef CONFIG_NOP_TRACER
1403 int
1404 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1405 {
1406         /* What could possibly go wrong? */
1407         return 0;
1408 }
1409 #endif
1410 
1411 #ifdef CONFIG_SCHED_TRACER
1412 
1413 struct wakeup_test_data {
1414         struct completion       is_ready;
1415         int                     go;
1416 };
1417 
1418 static int trace_wakeup_test_thread(void *data)
1419 {
1420         /* Make this a -deadline thread */
1421         static const struct sched_attr attr = {
1422                 .sched_policy = SCHED_DEADLINE,
1423                 .sched_runtime = 100000ULL,
1424                 .sched_deadline = 10000000ULL,
1425                 .sched_period = 10000000ULL
1426         };
1427         struct wakeup_test_data *x = data;
1428 
1429         sched_setattr(current, &attr);
1430 
1431         /* Make it know we have a new prio */
1432         complete(&x->is_ready);
1433 
1434         /* now go to sleep and let the test wake us up */
1435         set_current_state(TASK_INTERRUPTIBLE);
1436         while (!x->go) {
1437                 schedule();
1438                 set_current_state(TASK_INTERRUPTIBLE);
1439         }
1440 
1441         complete(&x->is_ready);
1442 
1443         set_current_state(TASK_INTERRUPTIBLE);
1444 
1445         /* we are awake, now wait to disappear */
1446         while (!kthread_should_stop()) {
1447                 schedule();
1448                 set_current_state(TASK_INTERRUPTIBLE);
1449         }
1450 
1451         __set_current_state(TASK_RUNNING);
1452 
1453         return 0;
1454 }
1455 int
1456 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1457 {
1458         unsigned long save_max = tr->max_latency;
1459         struct task_struct *p;
1460         struct wakeup_test_data data;
1461         unsigned long count;
1462         int ret;
1463 
1464         memset(&data, 0, sizeof(data));
1465 
1466         init_completion(&data.is_ready);
1467 
1468         /* create a -deadline thread */
1469         p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1470         if (IS_ERR(p)) {
1471                 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1472                 return -1;
1473         }
1474 
1475         /* make sure the thread is running at -deadline policy */
1476         wait_for_completion(&data.is_ready);
1477 
1478         /* start the tracing */
1479         ret = tracer_init(trace, tr);
1480         if (ret) {
1481                 warn_failed_init_tracer(trace, ret);
1482                 return ret;
1483         }
1484 
1485         /* reset the max latency */
1486         tr->max_latency = 0;
1487 
1488         while (p->on_rq) {
1489                 /*
1490                  * Sleep to make sure the -deadline thread is asleep too.
1491                  * On virtual machines we can't rely on timings,
1492                  * but we want to make sure this test still works.
1493                  */
1494                 msleep(100);
1495         }
1496 
1497         init_completion(&data.is_ready);
1498 
1499         data.go = 1;
1500         /* memory barrier is in the wake_up_process() */
1501 
1502         wake_up_process(p);
1503 
1504         /* Wait for the task to wake up */
1505         wait_for_completion(&data.is_ready);
1506 
1507         /* stop the tracing. */
1508         tracing_stop();
1509         /* check both trace buffers */
1510         ret = trace_test_buffer(&tr->array_buffer, NULL);
1511         if (!ret)
1512                 ret = trace_test_buffer(&tr->max_buffer, &count);
1513 
1514 
1515         trace->reset(tr);
1516         tracing_start();
1517 
1518         tr->max_latency = save_max;
1519 
1520         /* kill the thread */
1521         kthread_stop(p);
1522 
1523         if (!ret && !count) {
1524                 printk(KERN_CONT ".. no entries found ..");
1525                 ret = -1;
1526         }
1527 
1528         return ret;
1529 }
1530 #endif /* CONFIG_SCHED_TRACER */
1531 
1532 #ifdef CONFIG_BRANCH_TRACER
1533 int
1534 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1535 {
1536         unsigned long count;
1537         int ret;
1538 
1539         /* start the tracing */
1540         ret = tracer_init(trace, tr);
1541         if (ret) {
1542                 warn_failed_init_tracer(trace, ret);
1543                 return ret;
1544         }
1545 
1546         /* Sleep for a 1/10 of a second */
1547         msleep(100);
1548         /* stop the tracing. */
1549         tracing_stop();
1550         /* check the trace buffer */
1551         ret = trace_test_buffer(&tr->array_buffer, &count);
1552         trace->reset(tr);
1553         tracing_start();
1554 
1555         if (!ret && !count) {
1556                 printk(KERN_CONT ".. no entries found ..");
1557                 ret = -1;
1558         }
1559 
1560         return ret;
1561 }
1562 #endif /* CONFIG_BRANCH_TRACER */
1563 
1564 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php