1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * builtin-report.c 4 * 5 * Builtin report command: Analyze the perf.data input file, 6 * look up and read DSOs and symbol information and display 7 * a histogram of results, along various sorting keys. 8 */ 9 #include "builtin.h" 10 11 #include "util/config.h" 12 13 #include "util/annotate.h" 14 #include "util/color.h" 15 #include "util/dso.h" 16 #include <linux/list.h> 17 #include <linux/rbtree.h> 18 #include <linux/err.h> 19 #include <linux/zalloc.h> 20 #include "util/map.h" 21 #include "util/symbol.h" 22 #include "util/map_symbol.h" 23 #include "util/mem-events.h" 24 #include "util/branch.h" 25 #include "util/callchain.h" 26 #include "util/values.h" 27 28 #include "perf.h" 29 #include "util/debug.h" 30 #include "util/evlist.h" 31 #include "util/evsel.h" 32 #include "util/evswitch.h" 33 #include "util/header.h" 34 #include "util/mem-info.h" 35 #include "util/session.h" 36 #include "util/srcline.h" 37 #include "util/tool.h" 38 39 #include <subcmd/parse-options.h> 40 #include <subcmd/exec-cmd.h> 41 #include "util/parse-events.h" 42 43 #include "util/thread.h" 44 #include "util/sort.h" 45 #include "util/hist.h" 46 #include "util/data.h" 47 #include "arch/common.h" 48 #include "util/time-utils.h" 49 #include "util/auxtrace.h" 50 #include "util/units.h" 51 #include "util/util.h" // perf_tip() 52 #include "ui/ui.h" 53 #include "ui/progress.h" 54 #include "util/block-info.h" 55 56 #include <dlfcn.h> 57 #include <errno.h> 58 #include <inttypes.h> 59 #include <regex.h> 60 #include <linux/ctype.h> 61 #include <signal.h> 62 #include <linux/bitmap.h> 63 #include <linux/list_sort.h> 64 #include <linux/string.h> 65 #include <linux/stringify.h> 66 #include <linux/time64.h> 67 #include <sys/types.h> 68 #include <sys/stat.h> 69 #include <unistd.h> 70 #include <linux/mman.h> 71 72 #ifdef HAVE_LIBTRACEEVENT 73 #include <traceevent/event-parse.h> 74 #endif 75 76 struct report { 77 struct perf_tool tool; 78 struct perf_session *session; 79 struct evswitch evswitch; 80 #ifdef HAVE_SLANG_SUPPORT 81 bool use_tui; 82 #endif 83 #ifdef HAVE_GTK2_SUPPORT 84 bool use_gtk; 85 #endif 86 bool use_stdio; 87 bool show_full_info; 88 bool show_threads; 89 bool inverted_callchain; 90 bool mem_mode; 91 bool stats_mode; 92 bool tasks_mode; 93 bool mmaps_mode; 94 bool header; 95 bool header_only; 96 bool nonany_branch_mode; 97 bool group_set; 98 bool stitch_lbr; 99 bool disable_order; 100 bool skip_empty; 101 bool data_type; 102 int max_stack; 103 struct perf_read_values show_threads_values; 104 const char *pretty_printing_style; 105 const char *cpu_list; 106 const char *symbol_filter_str; 107 const char *time_str; 108 struct perf_time_interval *ptime_range; 109 int range_size; 110 int range_num; 111 float min_percent; 112 u64 nr_entries; 113 u64 queue_size; 114 u64 total_cycles; 115 int socket_filter; 116 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); 117 struct branch_type_stat brtype_stat; 118 bool symbol_ipc; 119 bool total_cycles_mode; 120 struct block_report *block_reports; 121 int nr_block_reports; 122 }; 123 124 static int report__config(const char *var, const char *value, void *cb) 125 { 126 struct report *rep = cb; 127 128 if (!strcmp(var, "report.group")) { 129 symbol_conf.event_group = perf_config_bool(var, value); 130 return 0; 131 } 132 if (!strcmp(var, "report.percent-limit")) { 133 double pcnt = strtof(value, NULL); 134 135 rep->min_percent = pcnt; 136 callchain_param.min_percent = pcnt; 137 return 0; 138 } 139 if (!strcmp(var, "report.children")) { 140 symbol_conf.cumulate_callchain = perf_config_bool(var, value); 141 return 0; 142 } 143 if (!strcmp(var, "report.queue-size")) 144 return perf_config_u64(&rep->queue_size, var, value); 145 146 if (!strcmp(var, "report.sort_order")) { 147 default_sort_order = strdup(value); 148 if (!default_sort_order) { 149 pr_err("Not enough memory for report.sort_order\n"); 150 return -1; 151 } 152 return 0; 153 } 154 155 if (!strcmp(var, "report.skip-empty")) { 156 rep->skip_empty = perf_config_bool(var, value); 157 return 0; 158 } 159 160 pr_debug("%s variable unknown, ignoring...", var); 161 return 0; 162 } 163 164 static int hist_iter__report_callback(struct hist_entry_iter *iter, 165 struct addr_location *al, bool single, 166 void *arg) 167 { 168 int err = 0; 169 struct report *rep = arg; 170 struct hist_entry *he = iter->he; 171 struct evsel *evsel = iter->evsel; 172 struct perf_sample *sample = iter->sample; 173 struct mem_info *mi; 174 struct branch_info *bi; 175 176 if (!ui__has_annotation() && !rep->symbol_ipc) 177 return 0; 178 179 if (sort__mode == SORT_MODE__BRANCH) { 180 bi = he->branch_info; 181 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel); 182 if (err) 183 goto out; 184 185 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel); 186 187 } else if (rep->mem_mode) { 188 mi = he->mem_info; 189 err = addr_map_symbol__inc_samples(mem_info__daddr(mi), sample, evsel); 190 if (err) 191 goto out; 192 193 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr); 194 195 } else if (symbol_conf.cumulate_callchain) { 196 if (single) 197 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr); 198 } else { 199 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr); 200 } 201 202 out: 203 return err; 204 } 205 206 static int hist_iter__branch_callback(struct hist_entry_iter *iter, 207 struct addr_location *al __maybe_unused, 208 bool single __maybe_unused, 209 void *arg) 210 { 211 struct hist_entry *he = iter->he; 212 struct report *rep = arg; 213 struct branch_info *bi = he->branch_info; 214 struct perf_sample *sample = iter->sample; 215 struct evsel *evsel = iter->evsel; 216 int err; 217 218 branch_type_count(&rep->brtype_stat, &bi->flags, 219 bi->from.addr, bi->to.addr); 220 221 if (!ui__has_annotation() && !rep->symbol_ipc) 222 return 0; 223 224 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel); 225 if (err) 226 goto out; 227 228 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel); 229 230 out: 231 return err; 232 } 233 234 static void setup_forced_leader(struct report *report, 235 struct evlist *evlist) 236 { 237 if (report->group_set) 238 evlist__force_leader(evlist); 239 } 240 241 static int process_feature_event(struct perf_session *session, 242 union perf_event *event) 243 { 244 struct report *rep = container_of(session->tool, struct report, tool); 245 246 if (event->feat.feat_id < HEADER_LAST_FEATURE) 247 return perf_event__process_feature(session, event); 248 249 if (event->feat.feat_id != HEADER_LAST_FEATURE) { 250 pr_err("failed: wrong feature ID: %" PRI_lu64 "\n", 251 event->feat.feat_id); 252 return -1; 253 } else if (rep->header_only) { 254 session_done = 1; 255 } 256 257 /* 258 * (feat_id = HEADER_LAST_FEATURE) is the end marker which 259 * means all features are received, now we can force the 260 * group if needed. 261 */ 262 setup_forced_leader(rep, session->evlist); 263 return 0; 264 } 265 266 static int process_sample_event(struct perf_tool *tool, 267 union perf_event *event, 268 struct perf_sample *sample, 269 struct evsel *evsel, 270 struct machine *machine) 271 { 272 struct report *rep = container_of(tool, struct report, tool); 273 struct addr_location al; 274 struct hist_entry_iter iter = { 275 .evsel = evsel, 276 .sample = sample, 277 .hide_unresolved = symbol_conf.hide_unresolved, 278 .add_entry_cb = hist_iter__report_callback, 279 }; 280 int ret = 0; 281 282 if (perf_time__ranges_skip_sample(rep->ptime_range, rep->range_num, 283 sample->time)) { 284 return 0; 285 } 286 287 if (evswitch__discard(&rep->evswitch, evsel)) 288 return 0; 289 290 addr_location__init(&al); 291 if (machine__resolve(machine, &al, sample) < 0) { 292 pr_debug("problem processing %d event, skipping it.\n", 293 event->header.type); 294 ret = -1; 295 goto out_put; 296 } 297 298 if (rep->stitch_lbr) 299 thread__set_lbr_stitch_enable(al.thread, true); 300 301 if (symbol_conf.hide_unresolved && al.sym == NULL) 302 goto out_put; 303 304 if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap)) 305 goto out_put; 306 307 if (sort__mode == SORT_MODE__BRANCH) { 308 /* 309 * A non-synthesized event might not have a branch stack if 310 * branch stacks have been synthesized (using itrace options). 311 */ 312 if (!sample->branch_stack) 313 goto out_put; 314 315 iter.add_entry_cb = hist_iter__branch_callback; 316 iter.ops = &hist_iter_branch; 317 } else if (rep->mem_mode) { 318 iter.ops = &hist_iter_mem; 319 } else if (symbol_conf.cumulate_callchain) { 320 iter.ops = &hist_iter_cumulative; 321 } else { 322 iter.ops = &hist_iter_normal; 323 } 324 325 if (al.map != NULL) 326 dso__set_hit(map__dso(al.map)); 327 328 if (ui__has_annotation() || rep->symbol_ipc || rep->total_cycles_mode) { 329 hist__account_cycles(sample->branch_stack, &al, sample, 330 rep->nonany_branch_mode, 331 &rep->total_cycles); 332 } 333 334 ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep); 335 if (ret < 0) 336 pr_debug("problem adding hist entry, skipping event\n"); 337 out_put: 338 addr_location__exit(&al); 339 return ret; 340 } 341 342 static int process_read_event(struct perf_tool *tool, 343 union perf_event *event, 344 struct perf_sample *sample __maybe_unused, 345 struct evsel *evsel, 346 struct machine *machine __maybe_unused) 347 { 348 struct report *rep = container_of(tool, struct report, tool); 349 350 if (rep->show_threads) { 351 const char *name = evsel__name(evsel); 352 int err = perf_read_values_add_value(&rep->show_threads_values, 353 event->read.pid, event->read.tid, 354 evsel->core.idx, 355 name, 356 event->read.value); 357 358 if (err) 359 return err; 360 } 361 362 return 0; 363 } 364 365 /* For pipe mode, sample_type is not currently set */ 366 static int report__setup_sample_type(struct report *rep) 367 { 368 struct perf_session *session = rep->session; 369 u64 sample_type = evlist__combined_sample_type(session->evlist); 370 bool is_pipe = perf_data__is_pipe(session->data); 371 struct evsel *evsel; 372 373 if (session->itrace_synth_opts->callchain || 374 session->itrace_synth_opts->add_callchain || 375 (!is_pipe && 376 perf_header__has_feat(&session->header, HEADER_AUXTRACE) && 377 !session->itrace_synth_opts->set)) 378 sample_type |= PERF_SAMPLE_CALLCHAIN; 379 380 if (session->itrace_synth_opts->last_branch || 381 session->itrace_synth_opts->add_last_branch) 382 sample_type |= PERF_SAMPLE_BRANCH_STACK; 383 384 if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) { 385 if (perf_hpp_list.parent) { 386 ui__error("Selected --sort parent, but no " 387 "callchain data. Did you call " 388 "'perf record' without -g?\n"); 389 return -EINVAL; 390 } 391 if (symbol_conf.use_callchain && 392 !symbol_conf.show_branchflag_count) { 393 ui__error("Selected -g or --branch-history.\n" 394 "But no callchain or branch data.\n" 395 "Did you call 'perf record' without -g or -b?\n"); 396 return -1; 397 } 398 } else if (!callchain_param.enabled && 399 callchain_param.mode != CHAIN_NONE && 400 !symbol_conf.use_callchain) { 401 symbol_conf.use_callchain = true; 402 if (callchain_register_param(&callchain_param) < 0) { 403 ui__error("Can't register callchain params.\n"); 404 return -EINVAL; 405 } 406 } 407 408 if (symbol_conf.cumulate_callchain) { 409 /* Silently ignore if callchain is missing */ 410 if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { 411 symbol_conf.cumulate_callchain = false; 412 perf_hpp__cancel_cumulate(); 413 } 414 } 415 416 if (sort__mode == SORT_MODE__BRANCH) { 417 if (!is_pipe && 418 !(sample_type & PERF_SAMPLE_BRANCH_STACK)) { 419 ui__error("Selected -b but no branch data. " 420 "Did you call perf record without -b?\n"); 421 return -1; 422 } 423 } 424 425 if (sort__mode == SORT_MODE__MEMORY) { 426 /* 427 * FIXUP: prior to kernel 5.18, Arm SPE missed to set 428 * PERF_SAMPLE_DATA_SRC bit in sample type. For backward 429 * compatibility, set the bit if it's an old perf data file. 430 */ 431 evlist__for_each_entry(session->evlist, evsel) { 432 if (strstr(evsel__name(evsel), "arm_spe") && 433 !(sample_type & PERF_SAMPLE_DATA_SRC)) { 434 evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC; 435 sample_type |= PERF_SAMPLE_DATA_SRC; 436 } 437 } 438 439 if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) { 440 ui__error("Selected --mem-mode but no mem data. " 441 "Did you call perf record without -d?\n"); 442 return -1; 443 } 444 } 445 446 callchain_param_setup(sample_type, perf_env__arch(&rep->session->header.env)); 447 448 if (rep->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) { 449 ui__warning("Can't find LBR callchain. Switch off --stitch-lbr.\n" 450 "Please apply --call-graph lbr when recording.\n"); 451 rep->stitch_lbr = false; 452 } 453 454 /* ??? handle more cases than just ANY? */ 455 if (!(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY)) 456 rep->nonany_branch_mode = true; 457 458 #if !defined(HAVE_LIBUNWIND_SUPPORT) && !defined(HAVE_DWARF_SUPPORT) 459 if (dwarf_callchain_users) { 460 ui__warning("Please install libunwind or libdw " 461 "development packages during the perf build.\n"); 462 } 463 #endif 464 465 return 0; 466 } 467 468 static void sig_handler(int sig __maybe_unused) 469 { 470 session_done = 1; 471 } 472 473 static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report *rep, 474 const char *evname, FILE *fp) 475 { 476 size_t ret; 477 char unit; 478 unsigned long nr_samples = hists->stats.nr_samples; 479 u64 nr_events = hists->stats.total_period; 480 struct evsel *evsel = hists_to_evsel(hists); 481 char buf[512]; 482 size_t size = sizeof(buf); 483 int socked_id = hists->socket_filter; 484 485 if (quiet) 486 return 0; 487 488 if (symbol_conf.filter_relative) { 489 nr_samples = hists->stats.nr_non_filtered_samples; 490 nr_events = hists->stats.total_non_filtered_period; 491 } 492 493 if (evsel__is_group_event(evsel)) { 494 struct evsel *pos; 495 496 evsel__group_desc(evsel, buf, size); 497 evname = buf; 498 499 for_each_group_member(pos, evsel) { 500 const struct hists *pos_hists = evsel__hists(pos); 501 502 if (symbol_conf.filter_relative) { 503 nr_samples += pos_hists->stats.nr_non_filtered_samples; 504 nr_events += pos_hists->stats.total_non_filtered_period; 505 } else { 506 nr_samples += pos_hists->stats.nr_samples; 507 nr_events += pos_hists->stats.total_period; 508 } 509 } 510 } 511 512 nr_samples = convert_unit(nr_samples, &unit); 513 ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit); 514 if (evname != NULL) { 515 ret += fprintf(fp, " of event%s '%s'", 516 evsel->core.nr_members > 1 ? "s" : "", evname); 517 } 518 519 if (rep->time_str) 520 ret += fprintf(fp, " (time slices: %s)", rep->time_str); 521 522 if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) { 523 ret += fprintf(fp, ", show reference callgraph"); 524 } 525 526 if (rep->mem_mode) { 527 ret += fprintf(fp, "\n# Total weight : %" PRIu64, nr_events); 528 ret += fprintf(fp, "\n# Sort order : %s", sort_order ? : default_mem_sort_order); 529 } else 530 ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events); 531 532 if (socked_id > -1) 533 ret += fprintf(fp, "\n# Processor Socket: %d", socked_id); 534 535 return ret + fprintf(fp, "\n#\n"); 536 } 537 538 static int evlist__tui_block_hists_browse(struct evlist *evlist, struct report *rep) 539 { 540 struct evsel *pos; 541 int i = 0, ret; 542 543 evlist__for_each_entry(evlist, pos) { 544 ret = report__browse_block_hists(&rep->block_reports[i++].hist, 545 rep->min_percent, pos, 546 &rep->session->header.env); 547 if (ret != 0) 548 return ret; 549 } 550 551 return 0; 552 } 553 554 static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, const char *help) 555 { 556 struct evsel *pos; 557 int i = 0; 558 559 if (!quiet) { 560 fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n", 561 evlist->stats.total_lost_samples); 562 } 563 564 evlist__for_each_entry(evlist, pos) { 565 struct hists *hists = evsel__hists(pos); 566 const char *evname = evsel__name(pos); 567 568 i++; 569 if (symbol_conf.event_group && !evsel__is_group_leader(pos)) 570 continue; 571 572 if (rep->skip_empty && !hists->stats.nr_samples) 573 continue; 574 575 hists__fprintf_nr_sample_events(hists, rep, evname, stdout); 576 577 if (rep->total_cycles_mode) { 578 report__browse_block_hists(&rep->block_reports[i - 1].hist, 579 rep->min_percent, pos, NULL); 580 continue; 581 } 582 583 hists__fprintf(hists, !quiet, 0, 0, rep->min_percent, stdout, 584 !(symbol_conf.use_callchain || 585 symbol_conf.show_branchflag_count)); 586 fprintf(stdout, "\n\n"); 587 } 588 589 if (!quiet) 590 fprintf(stdout, "#\n# (%s)\n#\n", help); 591 592 if (rep->show_threads) { 593 bool style = !strcmp(rep->pretty_printing_style, "raw"); 594 perf_read_values_display(stdout, &rep->show_threads_values, 595 style); 596 perf_read_values_destroy(&rep->show_threads_values); 597 } 598 599 if (sort__mode == SORT_MODE__BRANCH) 600 branch_type_stat_display(stdout, &rep->brtype_stat); 601 602 return 0; 603 } 604 605 static void report__warn_kptr_restrict(const struct report *rep) 606 { 607 struct map *kernel_map = machine__kernel_map(&rep->session->machines.host); 608 struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL; 609 610 if (evlist__exclude_kernel(rep->session->evlist)) 611 return; 612 613 if (kernel_map == NULL || 614 (dso__hit(map__dso(kernel_map)) && 615 (kernel_kmap->ref_reloc_sym == NULL || 616 kernel_kmap->ref_reloc_sym->addr == 0))) { 617 const char *desc = 618 "As no suitable kallsyms nor vmlinux was found, kernel samples\n" 619 "can't be resolved."; 620 621 if (kernel_map && map__has_symbols(kernel_map)) { 622 desc = "If some relocation was applied (e.g. " 623 "kexec) symbols may be misresolved."; 624 } 625 626 ui__warning( 627 "Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n" 628 "Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n" 629 "Samples in kernel modules can't be resolved as well.\n\n", 630 desc); 631 } 632 } 633 634 static int report__gtk_browse_hists(struct report *rep, const char *help) 635 { 636 int (*hist_browser)(struct evlist *evlist, const char *help, 637 struct hist_browser_timer *timer, float min_pcnt); 638 639 hist_browser = dlsym(perf_gtk_handle, "evlist__gtk_browse_hists"); 640 641 if (hist_browser == NULL) { 642 ui__error("GTK browser not found!\n"); 643 return -1; 644 } 645 646 return hist_browser(rep->session->evlist, help, NULL, rep->min_percent); 647 } 648 649 static int report__browse_hists(struct report *rep) 650 { 651 int ret; 652 struct perf_session *session = rep->session; 653 struct evlist *evlist = session->evlist; 654 char *help = NULL, *path = NULL; 655 656 path = system_path(TIPDIR); 657 if (perf_tip(&help, path) || help == NULL) { 658 /* fallback for people who don't install perf ;-) */ 659 free(path); 660 path = system_path(DOCDIR); 661 if (perf_tip(&help, path) || help == NULL) 662 help = strdup("Cannot load tips.txt file, please install perf!"); 663 } 664 free(path); 665 666 switch (use_browser) { 667 case 1: 668 if (rep->total_cycles_mode) { 669 ret = evlist__tui_block_hists_browse(evlist, rep); 670 break; 671 } 672 673 ret = evlist__tui_browse_hists(evlist, help, NULL, rep->min_percent, 674 &session->header.env, true); 675 /* 676 * Usually "ret" is the last pressed key, and we only 677 * care if the key notifies us to switch data file. 678 */ 679 if (ret != K_SWITCH_INPUT_DATA && ret != K_RELOAD) 680 ret = 0; 681 break; 682 case 2: 683 ret = report__gtk_browse_hists(rep, help); 684 break; 685 default: 686 ret = evlist__tty_browse_hists(evlist, rep, help); 687 break; 688 } 689 free(help); 690 return ret; 691 } 692 693 static int report__collapse_hists(struct report *rep) 694 { 695 struct perf_session *session = rep->session; 696 struct evlist *evlist = session->evlist; 697 struct ui_progress prog; 698 struct evsel *pos; 699 int ret = 0; 700 701 /* 702 * The pipe data needs to setup hierarchy hpp formats now, because it 703 * cannot know about evsels in the data before reading the data. The 704 * normal file data saves the event (attribute) info in the header 705 * section, but pipe does not have the luxury. 706 */ 707 if (perf_data__is_pipe(session->data)) { 708 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) { 709 ui__error("Failed to setup hierarchy output formats\n"); 710 return -1; 711 } 712 } 713 714 ui_progress__init(&prog, rep->nr_entries, "Merging related events..."); 715 716 evlist__for_each_entry(rep->session->evlist, pos) { 717 struct hists *hists = evsel__hists(pos); 718 719 if (pos->core.idx == 0) 720 hists->symbol_filter_str = rep->symbol_filter_str; 721 722 hists->socket_filter = rep->socket_filter; 723 724 ret = hists__collapse_resort(hists, &prog); 725 if (ret < 0) 726 break; 727 728 /* Non-group events are considered as leader */ 729 if (symbol_conf.event_group && !evsel__is_group_leader(pos)) { 730 struct hists *leader_hists = evsel__hists(evsel__leader(pos)); 731 732 hists__match(leader_hists, hists); 733 hists__link(leader_hists, hists); 734 } 735 } 736 737 ui_progress__finish(); 738 return ret; 739 } 740 741 static int hists__resort_cb(struct hist_entry *he, void *arg) 742 { 743 struct report *rep = arg; 744 struct symbol *sym = he->ms.sym; 745 746 if (rep->symbol_ipc && sym && !sym->annotate2) { 747 struct evsel *evsel = hists_to_evsel(he->hists); 748 749 symbol__annotate2(&he->ms, evsel, NULL); 750 } 751 752 return 0; 753 } 754 755 static void report__output_resort(struct report *rep) 756 { 757 struct ui_progress prog; 758 struct evsel *pos; 759 760 ui_progress__init(&prog, rep->nr_entries, "Sorting events for output..."); 761 762 evlist__for_each_entry(rep->session->evlist, pos) { 763 evsel__output_resort_cb(pos, &prog, hists__resort_cb, rep); 764 } 765 766 ui_progress__finish(); 767 } 768 769 static int count_sample_event(struct perf_tool *tool __maybe_unused, 770 union perf_event *event __maybe_unused, 771 struct perf_sample *sample __maybe_unused, 772 struct evsel *evsel, 773 struct machine *machine __maybe_unused) 774 { 775 struct hists *hists = evsel__hists(evsel); 776 777 hists__inc_nr_events(hists); 778 return 0; 779 } 780 781 static int count_lost_samples_event(struct perf_tool *tool, 782 union perf_event *event, 783 struct perf_sample *sample, 784 struct machine *machine __maybe_unused) 785 { 786 struct report *rep = container_of(tool, struct report, tool); 787 struct evsel *evsel; 788 789 evsel = evlist__id2evsel(rep->session->evlist, sample->id); 790 if (evsel) { 791 hists__inc_nr_lost_samples(evsel__hists(evsel), 792 event->lost_samples.lost); 793 } 794 return 0; 795 } 796 797 static int process_attr(struct perf_tool *tool __maybe_unused, 798 union perf_event *event, 799 struct evlist **pevlist); 800 801 static void stats_setup(struct report *rep) 802 { 803 memset(&rep->tool, 0, sizeof(rep->tool)); 804 rep->tool.attr = process_attr; 805 rep->tool.sample = count_sample_event; 806 rep->tool.lost_samples = count_lost_samples_event; 807 rep->tool.no_warn = true; 808 } 809 810 static int stats_print(struct report *rep) 811 { 812 struct perf_session *session = rep->session; 813 814 perf_session__fprintf_nr_events(session, stdout); 815 evlist__fprintf_nr_events(session->evlist, stdout); 816 return 0; 817 } 818 819 static void tasks_setup(struct report *rep) 820 { 821 memset(&rep->tool, 0, sizeof(rep->tool)); 822 rep->tool.ordered_events = true; 823 if (rep->mmaps_mode) { 824 rep->tool.mmap = perf_event__process_mmap; 825 rep->tool.mmap2 = perf_event__process_mmap2; 826 } 827 rep->tool.attr = process_attr; 828 rep->tool.comm = perf_event__process_comm; 829 rep->tool.exit = perf_event__process_exit; 830 rep->tool.fork = perf_event__process_fork; 831 rep->tool.no_warn = true; 832 } 833 834 struct maps__fprintf_task_args { 835 int indent; 836 FILE *fp; 837 size_t printed; 838 }; 839 840 static int maps__fprintf_task_cb(struct map *map, void *data) 841 { 842 struct maps__fprintf_task_args *args = data; 843 const struct dso *dso = map__dso(map); 844 u32 prot = map__prot(map); 845 int ret; 846 847 ret = fprintf(args->fp, 848 "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n", 849 args->indent, "", map__start(map), map__end(map), 850 prot & PROT_READ ? 'r' : '-', 851 prot & PROT_WRITE ? 'w' : '-', 852 prot & PROT_EXEC ? 'x' : '-', 853 map__flags(map) ? 's' : 'p', 854 map__pgoff(map), 855 dso__id_const(dso)->ino, dso__name(dso)); 856 857 if (ret < 0) 858 return ret; 859 860 args->printed += ret; 861 return 0; 862 } 863 864 static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp) 865 { 866 struct maps__fprintf_task_args args = { 867 .indent = indent, 868 .fp = fp, 869 .printed = 0, 870 }; 871 872 maps__for_each_map(maps, maps__fprintf_task_cb, &args); 873 874 return args.printed; 875 } 876 877 static int thread_level(struct machine *machine, const struct thread *thread) 878 { 879 struct thread *parent_thread; 880 int res; 881 882 if (thread__tid(thread) <= 0) 883 return 0; 884 885 if (thread__ppid(thread) <= 0) 886 return 1; 887 888 parent_thread = machine__find_thread(machine, -1, thread__ppid(thread)); 889 if (!parent_thread) { 890 pr_err("Missing parent thread of %d\n", thread__tid(thread)); 891 return 0; 892 } 893 res = 1 + thread_level(machine, parent_thread); 894 thread__put(parent_thread); 895 return res; 896 } 897 898 static void task__print_level(struct machine *machine, struct thread *thread, FILE *fp) 899 { 900 int level = thread_level(machine, thread); 901 int comm_indent = fprintf(fp, " %8d %8d %8d |%*s", 902 thread__pid(thread), thread__tid(thread), 903 thread__ppid(thread), level, ""); 904 905 fprintf(fp, "%s\n", thread__comm_str(thread)); 906 907 maps__fprintf_task(thread__maps(thread), comm_indent, fp); 908 } 909 910 /* 911 * Sort two thread list nodes such that they form a tree. The first node is the 912 * root of the tree, its children are ordered numerically after it. If a child 913 * has children itself then they appear immediately after their parent. For 914 * example, the 4 threads in the order they'd appear in the list: 915 * - init with a TID 1 and a parent of 0 916 * - systemd with a TID 3000 and a parent of init/1 917 * - systemd child thread with TID 4000, the parent is 3000 918 * - NetworkManager is a child of init with a TID of 3500. 919 */ 920 static int task_list_cmp(void *priv, const struct list_head *la, const struct list_head *lb) 921 { 922 struct machine *machine = priv; 923 struct thread_list *task_a = list_entry(la, struct thread_list, list); 924 struct thread_list *task_b = list_entry(lb, struct thread_list, list); 925 struct thread *a = task_a->thread; 926 struct thread *b = task_b->thread; 927 int level_a, level_b, res; 928 929 /* Same thread? */ 930 if (thread__tid(a) == thread__tid(b)) 931 return 0; 932 933 /* Compare a and b to root. */ 934 if (thread__tid(a) == 0) 935 return -1; 936 937 if (thread__tid(b) == 0) 938 return 1; 939 940 /* If parents match sort by tid. */ 941 if (thread__ppid(a) == thread__ppid(b)) 942 return thread__tid(a) < thread__tid(b) ? -1 : 1; 943 944 /* 945 * Find a and b such that if they are a child of each other a and b's 946 * tid's match, otherwise a and b have a common parent and distinct 947 * tid's to sort by. First make the depths of the threads match. 948 */ 949 level_a = thread_level(machine, a); 950 level_b = thread_level(machine, b); 951 a = thread__get(a); 952 b = thread__get(b); 953 for (int i = level_a; i > level_b; i--) { 954 struct thread *parent = machine__find_thread(machine, -1, thread__ppid(a)); 955 956 thread__put(a); 957 if (!parent) { 958 pr_err("Missing parent thread of %d\n", thread__tid(a)); 959 thread__put(b); 960 return -1; 961 } 962 a = parent; 963 } 964 for (int i = level_b; i > level_a; i--) { 965 struct thread *parent = machine__find_thread(machine, -1, thread__ppid(b)); 966 967 thread__put(b); 968 if (!parent) { 969 pr_err("Missing parent thread of %d\n", thread__tid(b)); 970 thread__put(a); 971 return 1; 972 } 973 b = parent; 974 } 975 /* Search up to a common parent. */ 976 while (thread__ppid(a) != thread__ppid(b)) { 977 struct thread *parent; 978 979 parent = machine__find_thread(machine, -1, thread__ppid(a)); 980 thread__put(a); 981 if (!parent) 982 pr_err("Missing parent thread of %d\n", thread__tid(a)); 983 a = parent; 984 parent = machine__find_thread(machine, -1, thread__ppid(b)); 985 thread__put(b); 986 if (!parent) 987 pr_err("Missing parent thread of %d\n", thread__tid(b)); 988 b = parent; 989 if (!a || !b) { 990 /* Handle missing parent (unexpected) with some sanity. */ 991 thread__put(a); 992 thread__put(b); 993 return !a && !b ? 0 : (!a ? -1 : 1); 994 } 995 } 996 if (thread__tid(a) == thread__tid(b)) { 997 /* a is a child of b or vice-versa, deeper levels appear later. */ 998 res = level_a < level_b ? -1 : (level_a > level_b ? 1 : 0); 999 } else { 1000 /* Sort by tid now the parent is the same. */ 1001 res = thread__tid(a) < thread__tid(b) ? -1 : 1; 1002 } 1003 thread__put(a); 1004 thread__put(b); 1005 return res; 1006 } 1007 1008 static int tasks_print(struct report *rep, FILE *fp) 1009 { 1010 struct machine *machine = &rep->session->machines.host; 1011 LIST_HEAD(tasks); 1012 int ret; 1013 1014 ret = machine__thread_list(machine, &tasks); 1015 if (!ret) { 1016 struct thread_list *task; 1017 1018 list_sort(machine, &tasks, task_list_cmp); 1019 1020 fprintf(fp, "# %8s %8s %8s %s\n", "pid", "tid", "ppid", "comm"); 1021 1022 list_for_each_entry(task, &tasks, list) 1023 task__print_level(machine, task->thread, fp); 1024 } 1025 thread_list__delete(&tasks); 1026 return ret; 1027 } 1028 1029 static int __cmd_report(struct report *rep) 1030 { 1031 int ret; 1032 struct perf_session *session = rep->session; 1033 struct evsel *pos; 1034 struct perf_data *data = session->data; 1035 1036 signal(SIGINT, sig_handler); 1037 1038 if (rep->cpu_list) { 1039 ret = perf_session__cpu_bitmap(session, rep->cpu_list, 1040 rep->cpu_bitmap); 1041 if (ret) { 1042 ui__error("failed to set cpu bitmap\n"); 1043 return ret; 1044 } 1045 session->itrace_synth_opts->cpu_bitmap = rep->cpu_bitmap; 1046 } 1047 1048 if (rep->show_threads) { 1049 ret = perf_read_values_init(&rep->show_threads_values); 1050 if (ret) 1051 return ret; 1052 } 1053 1054 ret = report__setup_sample_type(rep); 1055 if (ret) { 1056 /* report__setup_sample_type() already showed error message */ 1057 return ret; 1058 } 1059 1060 if (rep->stats_mode) 1061 stats_setup(rep); 1062 1063 if (rep->tasks_mode) 1064 tasks_setup(rep); 1065 1066 ret = perf_session__process_events(session); 1067 if (ret) { 1068 ui__error("failed to process sample\n"); 1069 return ret; 1070 } 1071 1072 evlist__check_mem_load_aux(session->evlist); 1073 1074 if (rep->stats_mode) 1075 return stats_print(rep); 1076 1077 if (rep->tasks_mode) 1078 return tasks_print(rep, stdout); 1079 1080 report__warn_kptr_restrict(rep); 1081 1082 evlist__for_each_entry(session->evlist, pos) 1083 rep->nr_entries += evsel__hists(pos)->nr_entries; 1084 1085 if (use_browser == 0) { 1086 if (verbose > 3) 1087 perf_session__fprintf(session, stdout); 1088 1089 if (verbose > 2) 1090 perf_session__fprintf_dsos(session, stdout); 1091 1092 if (dump_trace) { 1093 stats_print(rep); 1094 return 0; 1095 } 1096 } 1097 1098 ret = report__collapse_hists(rep); 1099 if (ret) { 1100 ui__error("failed to process hist entry\n"); 1101 return ret; 1102 } 1103 1104 if (session_done()) 1105 return 0; 1106 1107 /* 1108 * recalculate number of entries after collapsing since it 1109 * might be changed during the collapse phase. 1110 */ 1111 rep->nr_entries = 0; 1112 evlist__for_each_entry(session->evlist, pos) 1113 rep->nr_entries += evsel__hists(pos)->nr_entries; 1114 1115 if (rep->nr_entries == 0) { 1116 ui__error("The %s data has no samples!\n", data->path); 1117 return 0; 1118 } 1119 1120 report__output_resort(rep); 1121 1122 if (rep->total_cycles_mode) { 1123 int block_hpps[6] = { 1124 PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT, 1125 PERF_HPP_REPORT__BLOCK_LBR_CYCLES, 1126 PERF_HPP_REPORT__BLOCK_CYCLES_PCT, 1127 PERF_HPP_REPORT__BLOCK_AVG_CYCLES, 1128 PERF_HPP_REPORT__BLOCK_RANGE, 1129 PERF_HPP_REPORT__BLOCK_DSO, 1130 }; 1131 1132 rep->block_reports = block_info__create_report(session->evlist, 1133 rep->total_cycles, 1134 block_hpps, 6, 1135 &rep->nr_block_reports); 1136 if (!rep->block_reports) 1137 return -1; 1138 } 1139 1140 return report__browse_hists(rep); 1141 } 1142 1143 static int 1144 report_parse_callchain_opt(const struct option *opt, const char *arg, int unset) 1145 { 1146 struct callchain_param *callchain = opt->value; 1147 1148 callchain->enabled = !unset; 1149 /* 1150 * --no-call-graph 1151 */ 1152 if (unset) { 1153 symbol_conf.use_callchain = false; 1154 callchain->mode = CHAIN_NONE; 1155 return 0; 1156 } 1157 1158 return parse_callchain_report_opt(arg); 1159 } 1160 1161 static int 1162 parse_time_quantum(const struct option *opt, const char *arg, 1163 int unset __maybe_unused) 1164 { 1165 unsigned long *time_q = opt->value; 1166 char *end; 1167 1168 *time_q = strtoul(arg, &end, 0); 1169 if (end == arg) 1170 goto parse_err; 1171 if (*time_q == 0) { 1172 pr_err("time quantum cannot be 0"); 1173 return -1; 1174 } 1175 end = skip_spaces(end); 1176 if (*end == 0) 1177 return 0; 1178 if (!strcmp(end, "s")) { 1179 *time_q *= NSEC_PER_SEC; 1180 return 0; 1181 } 1182 if (!strcmp(end, "ms")) { 1183 *time_q *= NSEC_PER_MSEC; 1184 return 0; 1185 } 1186 if (!strcmp(end, "us")) { 1187 *time_q *= NSEC_PER_USEC; 1188 return 0; 1189 } 1190 if (!strcmp(end, "ns")) 1191 return 0; 1192 parse_err: 1193 pr_err("Cannot parse time quantum `%s'\n", arg); 1194 return -1; 1195 } 1196 1197 int 1198 report_parse_ignore_callees_opt(const struct option *opt __maybe_unused, 1199 const char *arg, int unset __maybe_unused) 1200 { 1201 if (arg) { 1202 int err = regcomp(&ignore_callees_regex, arg, REG_EXTENDED); 1203 if (err) { 1204 char buf[BUFSIZ]; 1205 regerror(err, &ignore_callees_regex, buf, sizeof(buf)); 1206 pr_err("Invalid --ignore-callees regex: %s\n%s", arg, buf); 1207 return -1; 1208 } 1209 have_ignore_callees = 1; 1210 } 1211 1212 return 0; 1213 } 1214 1215 static int 1216 parse_branch_mode(const struct option *opt, 1217 const char *str __maybe_unused, int unset) 1218 { 1219 int *branch_mode = opt->value; 1220 1221 *branch_mode = !unset; 1222 return 0; 1223 } 1224 1225 static int 1226 parse_percent_limit(const struct option *opt, const char *str, 1227 int unset __maybe_unused) 1228 { 1229 struct report *rep = opt->value; 1230 double pcnt = strtof(str, NULL); 1231 1232 rep->min_percent = pcnt; 1233 callchain_param.min_percent = pcnt; 1234 return 0; 1235 } 1236 1237 static int process_attr(struct perf_tool *tool __maybe_unused, 1238 union perf_event *event, 1239 struct evlist **pevlist) 1240 { 1241 u64 sample_type; 1242 int err; 1243 1244 err = perf_event__process_attr(tool, event, pevlist); 1245 if (err) 1246 return err; 1247 1248 /* 1249 * Check if we need to enable callchains based 1250 * on events sample_type. 1251 */ 1252 sample_type = evlist__combined_sample_type(*pevlist); 1253 callchain_param_setup(sample_type, perf_env__arch((*pevlist)->env)); 1254 return 0; 1255 } 1256 1257 int cmd_report(int argc, const char **argv) 1258 { 1259 struct perf_session *session; 1260 struct itrace_synth_opts itrace_synth_opts = { .set = 0, }; 1261 struct stat st; 1262 bool has_br_stack = false; 1263 int branch_mode = -1; 1264 int last_key = 0; 1265 bool branch_call_mode = false; 1266 #define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent" 1267 static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n" 1268 CALLCHAIN_REPORT_HELP 1269 "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT; 1270 char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT; 1271 const char * const report_usage[] = { 1272 "perf report [<options>]", 1273 NULL 1274 }; 1275 struct report report = { 1276 .tool = { 1277 .sample = process_sample_event, 1278 .mmap = perf_event__process_mmap, 1279 .mmap2 = perf_event__process_mmap2, 1280 .comm = perf_event__process_comm, 1281 .namespaces = perf_event__process_namespaces, 1282 .cgroup = perf_event__process_cgroup, 1283 .exit = perf_event__process_exit, 1284 .fork = perf_event__process_fork, 1285 .lost = perf_event__process_lost, 1286 .read = process_read_event, 1287 .attr = process_attr, 1288 #ifdef HAVE_LIBTRACEEVENT 1289 .tracing_data = perf_event__process_tracing_data, 1290 #endif 1291 .build_id = perf_event__process_build_id, 1292 .id_index = perf_event__process_id_index, 1293 .auxtrace_info = perf_event__process_auxtrace_info, 1294 .auxtrace = perf_event__process_auxtrace, 1295 .event_update = perf_event__process_event_update, 1296 .feature = process_feature_event, 1297 .ordered_events = true, 1298 .ordering_requires_timestamps = true, 1299 }, 1300 .max_stack = PERF_MAX_STACK_DEPTH, 1301 .pretty_printing_style = "normal", 1302 .socket_filter = -1, 1303 .skip_empty = true, 1304 }; 1305 char *sort_order_help = sort_help("sort by key(s):"); 1306 char *field_order_help = sort_help("output field(s): overhead period sample "); 1307 const char *disassembler_style = NULL, *objdump_path = NULL, *addr2line_path = NULL; 1308 const struct option options[] = { 1309 OPT_STRING('i', "input", &input_name, "file", 1310 "input file name"), 1311 OPT_INCR('v', "verbose", &verbose, 1312 "be more verbose (show symbol address, etc)"), 1313 OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"), 1314 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, 1315 "dump raw trace in ASCII"), 1316 OPT_BOOLEAN(0, "stats", &report.stats_mode, "Display event stats"), 1317 OPT_BOOLEAN(0, "tasks", &report.tasks_mode, "Display recorded tasks"), 1318 OPT_BOOLEAN(0, "mmaps", &report.mmaps_mode, "Display recorded tasks memory maps"), 1319 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 1320 "file", "vmlinux pathname"), 1321 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux, 1322 "don't load vmlinux even if found"), 1323 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, 1324 "file", "kallsyms pathname"), 1325 OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"), 1326 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, 1327 "load module symbols - WARNING: use only with -k and LIVE kernel"), 1328 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, 1329 "Show a column with the number of samples"), 1330 OPT_BOOLEAN('T', "threads", &report.show_threads, 1331 "Show per-thread event counters"), 1332 OPT_STRING(0, "pretty", &report.pretty_printing_style, "key", 1333 "pretty printing style key: normal raw"), 1334 #ifdef HAVE_SLANG_SUPPORT 1335 OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"), 1336 #endif 1337 #ifdef HAVE_GTK2_SUPPORT 1338 OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"), 1339 #endif 1340 OPT_BOOLEAN(0, "stdio", &report.use_stdio, 1341 "Use the stdio interface"), 1342 OPT_BOOLEAN(0, "header", &report.header, "Show data header."), 1343 OPT_BOOLEAN(0, "header-only", &report.header_only, 1344 "Show only data header."), 1345 OPT_STRING('s', "sort", &sort_order, "key[,key2...]", 1346 sort_order_help), 1347 OPT_STRING('F', "fields", &field_order, "key[,keys...]", 1348 field_order_help), 1349 OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization, 1350 "Show sample percentage for different cpu modes"), 1351 OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization, 1352 "Show sample percentage for different cpu modes", PARSE_OPT_HIDDEN), 1353 OPT_STRING('p', "parent", &parent_pattern, "regex", 1354 "regex filter to identify parent, see: '--sort parent'"), 1355 OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other, 1356 "Only display entries with parent-match"), 1357 OPT_CALLBACK_DEFAULT('g', "call-graph", &callchain_param, 1358 "print_type,threshold[,print_limit],order,sort_key[,branch],value", 1359 report_callchain_help, &report_parse_callchain_opt, 1360 callchain_default_opt), 1361 OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain, 1362 "Accumulate callchains of children and show total overhead as well. " 1363 "Enabled by default, use --no-children to disable."), 1364 OPT_INTEGER(0, "max-stack", &report.max_stack, 1365 "Set the maximum stack depth when parsing the callchain, " 1366 "anything beyond the specified depth will be ignored. " 1367 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)), 1368 OPT_BOOLEAN('G', "inverted", &report.inverted_callchain, 1369 "alias for inverted call graph"), 1370 OPT_CALLBACK(0, "ignore-callees", NULL, "regex", 1371 "ignore callees of these functions in call graphs", 1372 report_parse_ignore_callees_opt), 1373 OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", 1374 "only consider symbols in these dsos"), 1375 OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", 1376 "only consider symbols in these comms"), 1377 OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]", 1378 "only consider symbols in these pids"), 1379 OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]", 1380 "only consider symbols in these tids"), 1381 OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", 1382 "only consider these symbols"), 1383 OPT_STRING(0, "symbol-filter", &report.symbol_filter_str, "filter", 1384 "only show symbols that (partially) match with this filter"), 1385 OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str, 1386 "width[,width...]", 1387 "don't try to adjust column width, use these fixed values"), 1388 OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator", 1389 "separator for columns, no spaces will be added between " 1390 "columns '.' is reserved."), 1391 OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved, 1392 "Only display entries resolved to a symbol"), 1393 OPT_CALLBACK(0, "symfs", NULL, "directory", 1394 "Look for files with symbols relative to this directory", 1395 symbol__config_symfs), 1396 OPT_STRING('C', "cpu", &report.cpu_list, "cpu", 1397 "list of cpus to profile"), 1398 OPT_BOOLEAN('I', "show-info", &report.show_full_info, 1399 "Display extended information about perf.data file"), 1400 OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src, 1401 "Interleave source code with assembly code (default)"), 1402 OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw, 1403 "Display raw encoding of assembly instructions (default)"), 1404 OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", 1405 "Specify disassembler style (e.g. -M intel for intel syntax)"), 1406 OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix", 1407 "Add prefix to source file path names in programs (with --prefix-strip)"), 1408 OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N", 1409 "Strip first N entries of source file path name in programs (with --prefix)"), 1410 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, 1411 "Show a column with the sum of periods"), 1412 OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group, &report.group_set, 1413 "Show event group information together"), 1414 OPT_INTEGER(0, "group-sort-idx", &symbol_conf.group_sort_idx, 1415 "Sort the output by the event at the index n in group. " 1416 "If n is invalid, sort by the first event. " 1417 "WARNING: should be used on grouped events."), 1418 OPT_CALLBACK_NOOPT('b', "branch-stack", &branch_mode, "", 1419 "use branch records for per branch histogram filling", 1420 parse_branch_mode), 1421 OPT_BOOLEAN(0, "branch-history", &branch_call_mode, 1422 "add last branch records to call history"), 1423 OPT_STRING(0, "objdump", &objdump_path, "path", 1424 "objdump binary to use for disassembly and annotations"), 1425 OPT_STRING(0, "addr2line", &addr2line_path, "path", 1426 "addr2line binary to use for line numbers"), 1427 OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle, 1428 "Disable symbol demangling"), 1429 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel, 1430 "Enable kernel symbol demangling"), 1431 OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"), 1432 OPT_INTEGER(0, "samples", &symbol_conf.res_sample, 1433 "Number of samples to save per histogram entry for individual browsing"), 1434 OPT_CALLBACK(0, "percent-limit", &report, "percent", 1435 "Don't show entries under that percent", parse_percent_limit), 1436 OPT_CALLBACK(0, "percentage", NULL, "relative|absolute", 1437 "how to display percentage of filtered entries", parse_filter_percentage), 1438 OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts", 1439 "Instruction Tracing options\n" ITRACE_HELP, 1440 itrace_parse_synth_opts), 1441 OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename, 1442 "Show full source file name path for source lines"), 1443 OPT_BOOLEAN(0, "show-ref-call-graph", &symbol_conf.show_ref_callgraph, 1444 "Show callgraph from reference event"), 1445 OPT_BOOLEAN(0, "stitch-lbr", &report.stitch_lbr, 1446 "Enable LBR callgraph stitching approach"), 1447 OPT_INTEGER(0, "socket-filter", &report.socket_filter, 1448 "only show processor socket that match with this filter"), 1449 OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace, 1450 "Show raw trace event output (do not use print fmt or plugins)"), 1451 OPT_BOOLEAN('H', "hierarchy", &symbol_conf.report_hierarchy, 1452 "Show entries in a hierarchy"), 1453 OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode", 1454 "'always' (default), 'never' or 'auto' only applicable to --stdio mode", 1455 stdio__config_color, "always"), 1456 OPT_STRING(0, "time", &report.time_str, "str", 1457 "Time span of interest (start,stop)"), 1458 OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name, 1459 "Show inline function"), 1460 OPT_CALLBACK(0, "percent-type", &annotate_opts, "local-period", 1461 "Set percent type local/global-period/hits", 1462 annotate_parse_percent_type), 1463 OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"), 1464 OPT_CALLBACK(0, "time-quantum", &symbol_conf.time_quantum, "time (ms|us|ns|s)", 1465 "Set time quantum for time sort key (default 100ms)", 1466 parse_time_quantum), 1467 OPTS_EVSWITCH(&report.evswitch), 1468 OPT_BOOLEAN(0, "total-cycles", &report.total_cycles_mode, 1469 "Sort all blocks by 'Sampled Cycles%'"), 1470 OPT_BOOLEAN(0, "disable-order", &report.disable_order, 1471 "Disable raw trace ordering"), 1472 OPT_BOOLEAN(0, "skip-empty", &report.skip_empty, 1473 "Do not display empty (or dummy) events in the output"), 1474 OPT_END() 1475 }; 1476 struct perf_data data = { 1477 .mode = PERF_DATA_MODE_READ, 1478 }; 1479 int ret = hists__init(); 1480 char sort_tmp[128]; 1481 1482 if (ret < 0) 1483 goto exit; 1484 1485 /* 1486 * tasks_mode require access to exited threads to list those that are in 1487 * the data file. Off-cpu events are synthesized after other events and 1488 * reference exited threads. 1489 */ 1490 symbol_conf.keep_exited_threads = true; 1491 1492 annotation_options__init(); 1493 1494 ret = perf_config(report__config, &report); 1495 if (ret) 1496 goto exit; 1497 1498 argc = parse_options(argc, argv, options, report_usage, 0); 1499 if (argc) { 1500 /* 1501 * Special case: if there's an argument left then assume that 1502 * it's a symbol filter: 1503 */ 1504 if (argc > 1) 1505 usage_with_options(report_usage, options); 1506 1507 report.symbol_filter_str = argv[0]; 1508 } 1509 1510 if (disassembler_style) { 1511 annotate_opts.disassembler_style = strdup(disassembler_style); 1512 if (!annotate_opts.disassembler_style) 1513 return -ENOMEM; 1514 } 1515 if (objdump_path) { 1516 annotate_opts.objdump_path = strdup(objdump_path); 1517 if (!annotate_opts.objdump_path) 1518 return -ENOMEM; 1519 } 1520 if (addr2line_path) { 1521 symbol_conf.addr2line_path = strdup(addr2line_path); 1522 if (!symbol_conf.addr2line_path) 1523 return -ENOMEM; 1524 } 1525 1526 if (annotate_check_args() < 0) { 1527 ret = -EINVAL; 1528 goto exit; 1529 } 1530 1531 if (report.mmaps_mode) 1532 report.tasks_mode = true; 1533 1534 if (dump_trace && report.disable_order) 1535 report.tool.ordered_events = false; 1536 1537 if (quiet) 1538 perf_quiet_option(); 1539 1540 ret = symbol__validate_sym_arguments(); 1541 if (ret) 1542 goto exit; 1543 1544 if (report.inverted_callchain) 1545 callchain_param.order = ORDER_CALLER; 1546 if (symbol_conf.cumulate_callchain && !callchain_param.order_set) 1547 callchain_param.order = ORDER_CALLER; 1548 1549 if ((itrace_synth_opts.callchain || itrace_synth_opts.add_callchain) && 1550 (int)itrace_synth_opts.callchain_sz > report.max_stack) 1551 report.max_stack = itrace_synth_opts.callchain_sz; 1552 1553 if (!input_name || !strlen(input_name)) { 1554 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 1555 input_name = "-"; 1556 else 1557 input_name = "perf.data"; 1558 } 1559 1560 data.path = input_name; 1561 data.force = symbol_conf.force; 1562 1563 symbol_conf.skip_empty = report.skip_empty; 1564 1565 repeat: 1566 session = perf_session__new(&data, &report.tool); 1567 if (IS_ERR(session)) { 1568 ret = PTR_ERR(session); 1569 goto exit; 1570 } 1571 1572 ret = evswitch__init(&report.evswitch, session->evlist, stderr); 1573 if (ret) 1574 goto exit; 1575 1576 if (zstd_init(&(session->zstd_data), 0) < 0) 1577 pr_warning("Decompression initialization failed. Reported data may be incomplete.\n"); 1578 1579 if (report.queue_size) { 1580 ordered_events__set_alloc_size(&session->ordered_events, 1581 report.queue_size); 1582 } 1583 1584 session->itrace_synth_opts = &itrace_synth_opts; 1585 1586 report.session = session; 1587 1588 has_br_stack = perf_header__has_feat(&session->header, 1589 HEADER_BRANCH_STACK); 1590 if (evlist__combined_sample_type(session->evlist) & PERF_SAMPLE_STACK_USER) 1591 has_br_stack = false; 1592 1593 setup_forced_leader(&report, session->evlist); 1594 1595 if (symbol_conf.group_sort_idx && evlist__nr_groups(session->evlist) == 0) { 1596 parse_options_usage(NULL, options, "group-sort-idx", 0); 1597 ret = -EINVAL; 1598 goto error; 1599 } 1600 1601 if (itrace_synth_opts.last_branch || itrace_synth_opts.add_last_branch) 1602 has_br_stack = true; 1603 1604 if (has_br_stack && branch_call_mode) 1605 symbol_conf.show_branchflag_count = true; 1606 1607 memset(&report.brtype_stat, 0, sizeof(struct branch_type_stat)); 1608 1609 /* 1610 * Branch mode is a tristate: 1611 * -1 means default, so decide based on the file having branch data. 1612 * 0/1 means the user chose a mode. 1613 */ 1614 if (((branch_mode == -1 && has_br_stack) || branch_mode == 1) && 1615 !branch_call_mode) { 1616 sort__mode = SORT_MODE__BRANCH; 1617 symbol_conf.cumulate_callchain = false; 1618 } 1619 if (branch_call_mode) { 1620 callchain_param.key = CCKEY_ADDRESS; 1621 callchain_param.branch_callstack = true; 1622 symbol_conf.use_callchain = true; 1623 callchain_register_param(&callchain_param); 1624 if (sort_order == NULL) 1625 sort_order = "srcline,symbol,dso"; 1626 } 1627 1628 if (report.mem_mode) { 1629 if (sort__mode == SORT_MODE__BRANCH) { 1630 pr_err("branch and mem mode incompatible\n"); 1631 goto error; 1632 } 1633 sort__mode = SORT_MODE__MEMORY; 1634 symbol_conf.cumulate_callchain = false; 1635 } 1636 1637 if (symbol_conf.report_hierarchy) { 1638 /* disable incompatible options */ 1639 symbol_conf.cumulate_callchain = false; 1640 1641 if (field_order) { 1642 pr_err("Error: --hierarchy and --fields options cannot be used together\n"); 1643 parse_options_usage(report_usage, options, "F", 1); 1644 parse_options_usage(NULL, options, "hierarchy", 0); 1645 goto error; 1646 } 1647 1648 perf_hpp_list.need_collapse = true; 1649 } 1650 1651 if (report.use_stdio) 1652 use_browser = 0; 1653 #ifdef HAVE_SLANG_SUPPORT 1654 else if (report.use_tui) 1655 use_browser = 1; 1656 #endif 1657 #ifdef HAVE_GTK2_SUPPORT 1658 else if (report.use_gtk) 1659 use_browser = 2; 1660 #endif 1661 1662 /* Force tty output for header output and per-thread stat. */ 1663 if (report.header || report.header_only || report.show_threads) 1664 use_browser = 0; 1665 if (report.header || report.header_only) 1666 report.tool.show_feat_hdr = SHOW_FEAT_HEADER; 1667 if (report.show_full_info) 1668 report.tool.show_feat_hdr = SHOW_FEAT_HEADER_FULL_INFO; 1669 if (report.stats_mode || report.tasks_mode) 1670 use_browser = 0; 1671 if (report.stats_mode && report.tasks_mode) { 1672 pr_err("Error: --tasks and --mmaps can't be used together with --stats\n"); 1673 goto error; 1674 } 1675 1676 if (report.total_cycles_mode) { 1677 if (sort__mode != SORT_MODE__BRANCH) 1678 report.total_cycles_mode = false; 1679 else 1680 sort_order = NULL; 1681 } 1682 1683 if (sort_order && strstr(sort_order, "type")) { 1684 report.data_type = true; 1685 annotate_opts.annotate_src = false; 1686 1687 #ifndef HAVE_DWARF_GETLOCATIONS_SUPPORT 1688 pr_err("Error: Data type profiling is disabled due to missing DWARF support\n"); 1689 goto error; 1690 #endif 1691 } 1692 1693 if (strcmp(input_name, "-") != 0) 1694 setup_browser(true); 1695 else 1696 use_browser = 0; 1697 1698 if (report.data_type && use_browser == 1) { 1699 symbol_conf.annotate_data_member = true; 1700 symbol_conf.annotate_data_sample = true; 1701 } 1702 1703 if (sort_order && strstr(sort_order, "ipc")) { 1704 parse_options_usage(report_usage, options, "s", 1); 1705 goto error; 1706 } 1707 1708 if (sort_order && strstr(sort_order, "symbol")) { 1709 if (sort__mode == SORT_MODE__BRANCH) { 1710 snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s", 1711 sort_order, "ipc_lbr"); 1712 report.symbol_ipc = true; 1713 } else { 1714 snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s", 1715 sort_order, "ipc_null"); 1716 } 1717 1718 sort_order = sort_tmp; 1719 } 1720 1721 if ((last_key != K_SWITCH_INPUT_DATA && last_key != K_RELOAD) && 1722 (setup_sorting(session->evlist) < 0)) { 1723 if (sort_order) 1724 parse_options_usage(report_usage, options, "s", 1); 1725 if (field_order) 1726 parse_options_usage(sort_order ? NULL : report_usage, 1727 options, "F", 1); 1728 goto error; 1729 } 1730 1731 if ((report.header || report.header_only) && !quiet) { 1732 perf_session__fprintf_info(session, stdout, 1733 report.show_full_info); 1734 if (report.header_only) { 1735 if (data.is_pipe) { 1736 /* 1737 * we need to process first few records 1738 * which contains PERF_RECORD_HEADER_FEATURE. 1739 */ 1740 perf_session__process_events(session); 1741 } 1742 ret = 0; 1743 goto error; 1744 } 1745 } else if (use_browser == 0 && !quiet && 1746 !report.stats_mode && !report.tasks_mode) { 1747 fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n", 1748 stdout); 1749 } 1750 1751 /* 1752 * Only in the TUI browser we are doing integrated annotation, 1753 * so don't allocate extra space that won't be used in the stdio 1754 * implementation. 1755 */ 1756 if (ui__has_annotation() || report.symbol_ipc || report.data_type || 1757 report.total_cycles_mode) { 1758 ret = symbol__annotation_init(); 1759 if (ret < 0) 1760 goto error; 1761 /* 1762 * For searching by name on the "Browse map details". 1763 * providing it only in verbose mode not to bloat too 1764 * much struct symbol. 1765 */ 1766 if (verbose > 0) { 1767 /* 1768 * XXX: Need to provide a less kludgy way to ask for 1769 * more space per symbol, the u32 is for the index on 1770 * the ui browser. 1771 * See symbol__browser_index. 1772 */ 1773 symbol_conf.priv_size += sizeof(u32); 1774 } 1775 annotation_config__init(); 1776 } 1777 1778 if (symbol__init(&session->header.env) < 0) 1779 goto error; 1780 1781 if (report.time_str) { 1782 ret = perf_time__parse_for_ranges(report.time_str, session, 1783 &report.ptime_range, 1784 &report.range_size, 1785 &report.range_num); 1786 if (ret < 0) 1787 goto error; 1788 1789 itrace_synth_opts__set_time_range(&itrace_synth_opts, 1790 report.ptime_range, 1791 report.range_num); 1792 } 1793 1794 #ifdef HAVE_LIBTRACEEVENT 1795 if (session->tevent.pevent && 1796 tep_set_function_resolver(session->tevent.pevent, 1797 machine__resolve_kernel_addr, 1798 &session->machines.host) < 0) { 1799 pr_err("%s: failed to set libtraceevent function resolver\n", 1800 __func__); 1801 return -1; 1802 } 1803 #endif 1804 sort__setup_elide(stdout); 1805 1806 ret = __cmd_report(&report); 1807 if (ret == K_SWITCH_INPUT_DATA || ret == K_RELOAD) { 1808 perf_session__delete(session); 1809 last_key = K_SWITCH_INPUT_DATA; 1810 goto repeat; 1811 } else 1812 ret = 0; 1813 1814 if (!use_browser && (verbose > 2 || debug_kmaps)) 1815 perf_session__dump_kmaps(session); 1816 error: 1817 if (report.ptime_range) { 1818 itrace_synth_opts__clear_time_range(&itrace_synth_opts); 1819 zfree(&report.ptime_range); 1820 } 1821 1822 if (report.block_reports) { 1823 block_info__free_report(report.block_reports, 1824 report.nr_block_reports); 1825 report.block_reports = NULL; 1826 } 1827 1828 zstd_fini(&(session->zstd_data)); 1829 perf_session__delete(session); 1830 exit: 1831 annotation_options__exit(); 1832 free(sort_order_help); 1833 free(field_order_help); 1834 return ret; 1835 } 1836
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.