1 // SPDX-License-Identifier: GPL-2.0 2 #include "callchain.h" 3 #include "debug.h" 4 #include "dso.h" 5 #include "build-id.h" 6 #include "hist.h" 7 #include "kvm-stat.h" 8 #include "map.h" 9 #include "map_symbol.h" 10 #include "branch.h" 11 #include "mem-events.h" 12 #include "mem-info.h" 13 #include "session.h" 14 #include "namespaces.h" 15 #include "cgroup.h" 16 #include "sort.h" 17 #include "units.h" 18 #include "evlist.h" 19 #include "evsel.h" 20 #include "annotate.h" 21 #include "srcline.h" 22 #include "symbol.h" 23 #include "thread.h" 24 #include "block-info.h" 25 #include "ui/progress.h" 26 #include <errno.h> 27 #include <math.h> 28 #include <inttypes.h> 29 #include <sys/param.h> 30 #include <linux/rbtree.h> 31 #include <linux/string.h> 32 #include <linux/time64.h> 33 #include <linux/zalloc.h> 34 35 static bool hists__filter_entry_by_dso(struct hists *hists, 36 struct hist_entry *he); 37 static bool hists__filter_entry_by_thread(struct hists *hists, 38 struct hist_entry *he); 39 static bool hists__filter_entry_by_symbol(struct hists *hists, 40 struct hist_entry *he); 41 static bool hists__filter_entry_by_socket(struct hists *hists, 42 struct hist_entry *he); 43 44 u16 hists__col_len(struct hists *hists, enum hist_column col) 45 { 46 return hists->col_len[col]; 47 } 48 49 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) 50 { 51 hists->col_len[col] = len; 52 } 53 54 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) 55 { 56 if (len > hists__col_len(hists, col)) { 57 hists__set_col_len(hists, col, len); 58 return true; 59 } 60 return false; 61 } 62 63 void hists__reset_col_len(struct hists *hists) 64 { 65 enum hist_column col; 66 67 for (col = 0; col < HISTC_NR_COLS; ++col) 68 hists__set_col_len(hists, col, 0); 69 } 70 71 static void hists__set_unres_dso_col_len(struct hists *hists, int dso) 72 { 73 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 74 75 if (hists__col_len(hists, dso) < unresolved_col_width && 76 !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 77 !symbol_conf.dso_list) 78 hists__set_col_len(hists, dso, unresolved_col_width); 79 } 80 81 void hists__calc_col_len(struct hists *hists, struct hist_entry *h) 82 { 83 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 84 int symlen; 85 u16 len; 86 87 if (h->block_info) 88 return; 89 /* 90 * +4 accounts for '[x] ' priv level info 91 * +2 accounts for 0x prefix on raw addresses 92 * +3 accounts for ' y ' symtab origin info 93 */ 94 if (h->ms.sym) { 95 symlen = h->ms.sym->namelen + 4; 96 if (verbose > 0) 97 symlen += BITS_PER_LONG / 4 + 2 + 3; 98 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 99 } else { 100 symlen = unresolved_col_width + 4 + 2; 101 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 102 hists__set_unres_dso_col_len(hists, HISTC_DSO); 103 } 104 105 len = thread__comm_len(h->thread); 106 if (hists__new_col_len(hists, HISTC_COMM, len)) 107 hists__set_col_len(hists, HISTC_THREAD, len + 8); 108 109 if (h->ms.map) { 110 len = dso__name_len(map__dso(h->ms.map)); 111 hists__new_col_len(hists, HISTC_DSO, len); 112 } 113 114 if (h->parent) 115 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); 116 117 if (h->branch_info) { 118 if (h->branch_info->from.ms.sym) { 119 symlen = (int)h->branch_info->from.ms.sym->namelen + 4; 120 if (verbose > 0) 121 symlen += BITS_PER_LONG / 4 + 2 + 3; 122 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 123 124 symlen = dso__name_len(map__dso(h->branch_info->from.ms.map)); 125 hists__new_col_len(hists, HISTC_DSO_FROM, symlen); 126 } else { 127 symlen = unresolved_col_width + 4 + 2; 128 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 129 hists__new_col_len(hists, HISTC_ADDR_FROM, symlen); 130 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); 131 } 132 133 if (h->branch_info->to.ms.sym) { 134 symlen = (int)h->branch_info->to.ms.sym->namelen + 4; 135 if (verbose > 0) 136 symlen += BITS_PER_LONG / 4 + 2 + 3; 137 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 138 139 symlen = dso__name_len(map__dso(h->branch_info->to.ms.map)); 140 hists__new_col_len(hists, HISTC_DSO_TO, symlen); 141 } else { 142 symlen = unresolved_col_width + 4 + 2; 143 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 144 hists__new_col_len(hists, HISTC_ADDR_TO, symlen); 145 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 146 } 147 148 if (h->branch_info->srcline_from) 149 hists__new_col_len(hists, HISTC_SRCLINE_FROM, 150 strlen(h->branch_info->srcline_from)); 151 if (h->branch_info->srcline_to) 152 hists__new_col_len(hists, HISTC_SRCLINE_TO, 153 strlen(h->branch_info->srcline_to)); 154 } 155 156 if (h->mem_info) { 157 if (mem_info__daddr(h->mem_info)->ms.sym) { 158 symlen = (int)mem_info__daddr(h->mem_info)->ms.sym->namelen + 4 159 + unresolved_col_width + 2; 160 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 161 symlen); 162 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 163 symlen + 1); 164 } else { 165 symlen = unresolved_col_width + 4 + 2; 166 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 167 symlen); 168 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 169 symlen); 170 } 171 172 if (mem_info__iaddr(h->mem_info)->ms.sym) { 173 symlen = (int)mem_info__iaddr(h->mem_info)->ms.sym->namelen + 4 174 + unresolved_col_width + 2; 175 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 176 symlen); 177 } else { 178 symlen = unresolved_col_width + 4 + 2; 179 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 180 symlen); 181 } 182 183 if (mem_info__daddr(h->mem_info)->ms.map) { 184 symlen = dso__name_len(map__dso(mem_info__daddr(h->mem_info)->ms.map)); 185 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, 186 symlen); 187 } else { 188 symlen = unresolved_col_width + 4 + 2; 189 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 190 } 191 192 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR, 193 unresolved_col_width + 4 + 2); 194 195 hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE, 196 unresolved_col_width + 4 + 2); 197 198 } else { 199 symlen = unresolved_col_width + 4 + 2; 200 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); 201 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen); 202 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 203 } 204 205 hists__new_col_len(hists, HISTC_CGROUP, 6); 206 hists__new_col_len(hists, HISTC_CGROUP_ID, 20); 207 hists__new_col_len(hists, HISTC_CPU, 3); 208 hists__new_col_len(hists, HISTC_SOCKET, 6); 209 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); 210 hists__new_col_len(hists, HISTC_MEM_TLB, 22); 211 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); 212 hists__new_col_len(hists, HISTC_MEM_LVL, 36 + 3); 213 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); 214 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); 215 hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10); 216 hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13); 217 hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13); 218 hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13); 219 hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13); 220 hists__new_col_len(hists, HISTC_ADDR, BITS_PER_LONG / 4 + 2); 221 222 if (symbol_conf.nanosecs) 223 hists__new_col_len(hists, HISTC_TIME, 16); 224 else 225 hists__new_col_len(hists, HISTC_TIME, 12); 226 hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6); 227 228 if (h->srcline) { 229 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header)); 230 hists__new_col_len(hists, HISTC_SRCLINE, len); 231 } 232 233 if (h->srcfile) 234 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile)); 235 236 if (h->transaction) 237 hists__new_col_len(hists, HISTC_TRANSACTION, 238 hist_entry__transaction_len()); 239 240 if (h->trace_output) 241 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output)); 242 243 if (h->cgroup) { 244 const char *cgrp_name = "unknown"; 245 struct cgroup *cgrp = cgroup__find(maps__machine(h->ms.maps)->env, 246 h->cgroup); 247 if (cgrp != NULL) 248 cgrp_name = cgrp->name; 249 250 hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name)); 251 } 252 } 253 254 void hists__output_recalc_col_len(struct hists *hists, int max_rows) 255 { 256 struct rb_node *next = rb_first_cached(&hists->entries); 257 struct hist_entry *n; 258 int row = 0; 259 260 hists__reset_col_len(hists); 261 262 while (next && row++ < max_rows) { 263 n = rb_entry(next, struct hist_entry, rb_node); 264 if (!n->filtered) 265 hists__calc_col_len(hists, n); 266 next = rb_next(&n->rb_node); 267 } 268 } 269 270 static void he_stat__add_cpumode_period(struct he_stat *he_stat, 271 unsigned int cpumode, u64 period) 272 { 273 switch (cpumode) { 274 case PERF_RECORD_MISC_KERNEL: 275 he_stat->period_sys += period; 276 break; 277 case PERF_RECORD_MISC_USER: 278 he_stat->period_us += period; 279 break; 280 case PERF_RECORD_MISC_GUEST_KERNEL: 281 he_stat->period_guest_sys += period; 282 break; 283 case PERF_RECORD_MISC_GUEST_USER: 284 he_stat->period_guest_us += period; 285 break; 286 default: 287 break; 288 } 289 } 290 291 static long hist_time(unsigned long htime) 292 { 293 unsigned long time_quantum = symbol_conf.time_quantum; 294 if (time_quantum) 295 return (htime / time_quantum) * time_quantum; 296 return htime; 297 } 298 299 static void he_stat__add_period(struct he_stat *he_stat, u64 period) 300 { 301 he_stat->period += period; 302 he_stat->nr_events += 1; 303 } 304 305 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) 306 { 307 dest->period += src->period; 308 dest->period_sys += src->period_sys; 309 dest->period_us += src->period_us; 310 dest->period_guest_sys += src->period_guest_sys; 311 dest->period_guest_us += src->period_guest_us; 312 dest->weight1 += src->weight1; 313 dest->weight2 += src->weight2; 314 dest->weight3 += src->weight3; 315 dest->nr_events += src->nr_events; 316 } 317 318 static void he_stat__decay(struct he_stat *he_stat) 319 { 320 he_stat->period = (he_stat->period * 7) / 8; 321 he_stat->nr_events = (he_stat->nr_events * 7) / 8; 322 he_stat->weight1 = (he_stat->weight1 * 7) / 8; 323 he_stat->weight2 = (he_stat->weight2 * 7) / 8; 324 he_stat->weight3 = (he_stat->weight3 * 7) / 8; 325 } 326 327 static void hists__delete_entry(struct hists *hists, struct hist_entry *he); 328 329 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) 330 { 331 u64 prev_period = he->stat.period; 332 u64 diff; 333 334 if (prev_period == 0) 335 return true; 336 337 he_stat__decay(&he->stat); 338 if (symbol_conf.cumulate_callchain) 339 he_stat__decay(he->stat_acc); 340 decay_callchain(he->callchain); 341 342 diff = prev_period - he->stat.period; 343 344 if (!he->depth) { 345 hists->stats.total_period -= diff; 346 if (!he->filtered) 347 hists->stats.total_non_filtered_period -= diff; 348 } 349 350 if (!he->leaf) { 351 struct hist_entry *child; 352 struct rb_node *node = rb_first_cached(&he->hroot_out); 353 while (node) { 354 child = rb_entry(node, struct hist_entry, rb_node); 355 node = rb_next(node); 356 357 if (hists__decay_entry(hists, child)) 358 hists__delete_entry(hists, child); 359 } 360 } 361 362 return he->stat.period == 0; 363 } 364 365 static void hists__delete_entry(struct hists *hists, struct hist_entry *he) 366 { 367 struct rb_root_cached *root_in; 368 struct rb_root_cached *root_out; 369 370 if (he->parent_he) { 371 root_in = &he->parent_he->hroot_in; 372 root_out = &he->parent_he->hroot_out; 373 } else { 374 if (hists__has(hists, need_collapse)) 375 root_in = &hists->entries_collapsed; 376 else 377 root_in = hists->entries_in; 378 root_out = &hists->entries; 379 } 380 381 rb_erase_cached(&he->rb_node_in, root_in); 382 rb_erase_cached(&he->rb_node, root_out); 383 384 --hists->nr_entries; 385 if (!he->filtered) 386 --hists->nr_non_filtered_entries; 387 388 hist_entry__delete(he); 389 } 390 391 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) 392 { 393 struct rb_node *next = rb_first_cached(&hists->entries); 394 struct hist_entry *n; 395 396 while (next) { 397 n = rb_entry(next, struct hist_entry, rb_node); 398 next = rb_next(&n->rb_node); 399 if (((zap_user && n->level == '.') || 400 (zap_kernel && n->level != '.') || 401 hists__decay_entry(hists, n))) { 402 hists__delete_entry(hists, n); 403 } 404 } 405 } 406 407 void hists__delete_entries(struct hists *hists) 408 { 409 struct rb_node *next = rb_first_cached(&hists->entries); 410 struct hist_entry *n; 411 412 while (next) { 413 n = rb_entry(next, struct hist_entry, rb_node); 414 next = rb_next(&n->rb_node); 415 416 hists__delete_entry(hists, n); 417 } 418 } 419 420 struct hist_entry *hists__get_entry(struct hists *hists, int idx) 421 { 422 struct rb_node *next = rb_first_cached(&hists->entries); 423 struct hist_entry *n; 424 int i = 0; 425 426 while (next) { 427 n = rb_entry(next, struct hist_entry, rb_node); 428 if (i == idx) 429 return n; 430 431 next = rb_next(&n->rb_node); 432 i++; 433 } 434 435 return NULL; 436 } 437 438 /* 439 * histogram, sorted on item, collects periods 440 */ 441 442 static int hist_entry__init(struct hist_entry *he, 443 struct hist_entry *template, 444 bool sample_self, 445 size_t callchain_size) 446 { 447 *he = *template; 448 he->callchain_size = callchain_size; 449 450 if (symbol_conf.cumulate_callchain) { 451 he->stat_acc = malloc(sizeof(he->stat)); 452 if (he->stat_acc == NULL) 453 return -ENOMEM; 454 memcpy(he->stat_acc, &he->stat, sizeof(he->stat)); 455 if (!sample_self) 456 memset(&he->stat, 0, sizeof(he->stat)); 457 } 458 459 he->ms.maps = maps__get(he->ms.maps); 460 he->ms.map = map__get(he->ms.map); 461 462 if (he->branch_info) { 463 /* 464 * This branch info is (a part of) allocated from 465 * sample__resolve_bstack() and will be freed after 466 * adding new entries. So we need to save a copy. 467 */ 468 he->branch_info = malloc(sizeof(*he->branch_info)); 469 if (he->branch_info == NULL) 470 goto err; 471 472 memcpy(he->branch_info, template->branch_info, 473 sizeof(*he->branch_info)); 474 475 he->branch_info->from.ms.map = map__get(he->branch_info->from.ms.map); 476 he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map); 477 } 478 479 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) 480 callchain_init(he->callchain); 481 482 if (he->raw_data) { 483 he->raw_data = memdup(he->raw_data, he->raw_size); 484 if (he->raw_data == NULL) 485 goto err_infos; 486 } 487 488 if (he->srcline && he->srcline != SRCLINE_UNKNOWN) { 489 he->srcline = strdup(he->srcline); 490 if (he->srcline == NULL) 491 goto err_rawdata; 492 } 493 494 if (symbol_conf.res_sample) { 495 he->res_samples = calloc(symbol_conf.res_sample, 496 sizeof(struct res_sample)); 497 if (!he->res_samples) 498 goto err_srcline; 499 } 500 501 INIT_LIST_HEAD(&he->pairs.node); 502 he->thread = thread__get(he->thread); 503 he->hroot_in = RB_ROOT_CACHED; 504 he->hroot_out = RB_ROOT_CACHED; 505 506 if (!symbol_conf.report_hierarchy) 507 he->leaf = true; 508 509 return 0; 510 511 err_srcline: 512 zfree(&he->srcline); 513 514 err_rawdata: 515 zfree(&he->raw_data); 516 517 err_infos: 518 if (he->branch_info) { 519 map_symbol__exit(&he->branch_info->from.ms); 520 map_symbol__exit(&he->branch_info->to.ms); 521 zfree(&he->branch_info); 522 } 523 if (he->mem_info) { 524 map_symbol__exit(&mem_info__iaddr(he->mem_info)->ms); 525 map_symbol__exit(&mem_info__daddr(he->mem_info)->ms); 526 } 527 err: 528 map_symbol__exit(&he->ms); 529 zfree(&he->stat_acc); 530 return -ENOMEM; 531 } 532 533 static void *hist_entry__zalloc(size_t size) 534 { 535 return zalloc(size + sizeof(struct hist_entry)); 536 } 537 538 static void hist_entry__free(void *ptr) 539 { 540 free(ptr); 541 } 542 543 static struct hist_entry_ops default_ops = { 544 .new = hist_entry__zalloc, 545 .free = hist_entry__free, 546 }; 547 548 static struct hist_entry *hist_entry__new(struct hist_entry *template, 549 bool sample_self) 550 { 551 struct hist_entry_ops *ops = template->ops; 552 size_t callchain_size = 0; 553 struct hist_entry *he; 554 int err = 0; 555 556 if (!ops) 557 ops = template->ops = &default_ops; 558 559 if (symbol_conf.use_callchain) 560 callchain_size = sizeof(struct callchain_root); 561 562 he = ops->new(callchain_size); 563 if (he) { 564 err = hist_entry__init(he, template, sample_self, callchain_size); 565 if (err) { 566 ops->free(he); 567 he = NULL; 568 } 569 } 570 return he; 571 } 572 573 static u8 symbol__parent_filter(const struct symbol *parent) 574 { 575 if (symbol_conf.exclude_other && parent == NULL) 576 return 1 << HIST_FILTER__PARENT; 577 return 0; 578 } 579 580 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period) 581 { 582 if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain) 583 return; 584 585 he->hists->callchain_period += period; 586 if (!he->filtered) 587 he->hists->callchain_non_filtered_period += period; 588 } 589 590 static struct hist_entry *hists__findnew_entry(struct hists *hists, 591 struct hist_entry *entry, 592 const struct addr_location *al, 593 bool sample_self) 594 { 595 struct rb_node **p; 596 struct rb_node *parent = NULL; 597 struct hist_entry *he; 598 int64_t cmp; 599 u64 period = entry->stat.period; 600 bool leftmost = true; 601 602 p = &hists->entries_in->rb_root.rb_node; 603 604 while (*p != NULL) { 605 parent = *p; 606 he = rb_entry(parent, struct hist_entry, rb_node_in); 607 608 /* 609 * Make sure that it receives arguments in a same order as 610 * hist_entry__collapse() so that we can use an appropriate 611 * function when searching an entry regardless which sort 612 * keys were used. 613 */ 614 cmp = hist_entry__cmp(he, entry); 615 if (!cmp) { 616 if (sample_self) { 617 he_stat__add_stat(&he->stat, &entry->stat); 618 hist_entry__add_callchain_period(he, period); 619 } 620 if (symbol_conf.cumulate_callchain) 621 he_stat__add_period(he->stat_acc, period); 622 623 /* 624 * This mem info was allocated from sample__resolve_mem 625 * and will not be used anymore. 626 */ 627 mem_info__zput(entry->mem_info); 628 629 block_info__delete(entry->block_info); 630 631 kvm_info__zput(entry->kvm_info); 632 633 /* If the map of an existing hist_entry has 634 * become out-of-date due to an exec() or 635 * similar, update it. Otherwise we will 636 * mis-adjust symbol addresses when computing 637 * the history counter to increment. 638 */ 639 if (hists__has(hists, sym) && he->ms.map != entry->ms.map) { 640 if (he->ms.sym) { 641 u64 addr = he->ms.sym->start; 642 he->ms.sym = map__find_symbol(entry->ms.map, addr); 643 } 644 645 map__put(he->ms.map); 646 he->ms.map = map__get(entry->ms.map); 647 } 648 goto out; 649 } 650 651 if (cmp < 0) 652 p = &(*p)->rb_left; 653 else { 654 p = &(*p)->rb_right; 655 leftmost = false; 656 } 657 } 658 659 he = hist_entry__new(entry, sample_self); 660 if (!he) 661 return NULL; 662 663 if (sample_self) 664 hist_entry__add_callchain_period(he, period); 665 hists->nr_entries++; 666 667 rb_link_node(&he->rb_node_in, parent, p); 668 rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost); 669 out: 670 if (sample_self) 671 he_stat__add_cpumode_period(&he->stat, al->cpumode, period); 672 if (symbol_conf.cumulate_callchain) 673 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period); 674 return he; 675 } 676 677 static unsigned random_max(unsigned high) 678 { 679 unsigned thresh = -high % high; 680 for (;;) { 681 unsigned r = random(); 682 if (r >= thresh) 683 return r % high; 684 } 685 } 686 687 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample) 688 { 689 struct res_sample *r; 690 int j; 691 692 if (he->num_res < symbol_conf.res_sample) { 693 j = he->num_res++; 694 } else { 695 j = random_max(symbol_conf.res_sample); 696 } 697 r = &he->res_samples[j]; 698 r->time = sample->time; 699 r->cpu = sample->cpu; 700 r->tid = sample->tid; 701 } 702 703 static struct hist_entry* 704 __hists__add_entry(struct hists *hists, 705 struct addr_location *al, 706 struct symbol *sym_parent, 707 struct branch_info *bi, 708 struct mem_info *mi, 709 struct kvm_info *ki, 710 struct block_info *block_info, 711 struct perf_sample *sample, 712 bool sample_self, 713 struct hist_entry_ops *ops) 714 { 715 struct namespaces *ns = thread__namespaces(al->thread); 716 struct hist_entry entry = { 717 .thread = al->thread, 718 .comm = thread__comm(al->thread), 719 .cgroup_id = { 720 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0, 721 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0, 722 }, 723 .cgroup = sample->cgroup, 724 .ms = { 725 .maps = al->maps, 726 .map = al->map, 727 .sym = al->sym, 728 }, 729 .srcline = (char *) al->srcline, 730 .socket = al->socket, 731 .cpu = al->cpu, 732 .cpumode = al->cpumode, 733 .ip = al->addr, 734 .level = al->level, 735 .code_page_size = sample->code_page_size, 736 .stat = { 737 .nr_events = 1, 738 .period = sample->period, 739 .weight1 = sample->weight, 740 .weight2 = sample->ins_lat, 741 .weight3 = sample->p_stage_cyc, 742 }, 743 .parent = sym_parent, 744 .filtered = symbol__parent_filter(sym_parent) | al->filtered, 745 .hists = hists, 746 .branch_info = bi, 747 .mem_info = mem_info__get(mi), 748 .kvm_info = ki, 749 .block_info = block_info, 750 .transaction = sample->transaction, 751 .raw_data = sample->raw_data, 752 .raw_size = sample->raw_size, 753 .ops = ops, 754 .time = hist_time(sample->time), 755 .weight = sample->weight, 756 .ins_lat = sample->ins_lat, 757 .p_stage_cyc = sample->p_stage_cyc, 758 .simd_flags = sample->simd_flags, 759 }, *he = hists__findnew_entry(hists, &entry, al, sample_self); 760 761 if (!hists->has_callchains && he && he->callchain_size != 0) 762 hists->has_callchains = true; 763 if (he && symbol_conf.res_sample) 764 hists__res_sample(he, sample); 765 return he; 766 } 767 768 struct hist_entry *hists__add_entry(struct hists *hists, 769 struct addr_location *al, 770 struct symbol *sym_parent, 771 struct branch_info *bi, 772 struct mem_info *mi, 773 struct kvm_info *ki, 774 struct perf_sample *sample, 775 bool sample_self) 776 { 777 return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL, 778 sample, sample_self, NULL); 779 } 780 781 struct hist_entry *hists__add_entry_ops(struct hists *hists, 782 struct hist_entry_ops *ops, 783 struct addr_location *al, 784 struct symbol *sym_parent, 785 struct branch_info *bi, 786 struct mem_info *mi, 787 struct kvm_info *ki, 788 struct perf_sample *sample, 789 bool sample_self) 790 { 791 return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL, 792 sample, sample_self, ops); 793 } 794 795 struct hist_entry *hists__add_entry_block(struct hists *hists, 796 struct addr_location *al, 797 struct block_info *block_info) 798 { 799 struct hist_entry entry = { 800 .block_info = block_info, 801 .hists = hists, 802 .ms = { 803 .maps = al->maps, 804 .map = al->map, 805 .sym = al->sym, 806 }, 807 }, *he = hists__findnew_entry(hists, &entry, al, false); 808 809 return he; 810 } 811 812 static int 813 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 814 struct addr_location *al __maybe_unused) 815 { 816 return 0; 817 } 818 819 static int 820 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 821 struct addr_location *al __maybe_unused) 822 { 823 return 0; 824 } 825 826 static int 827 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 828 { 829 struct perf_sample *sample = iter->sample; 830 struct mem_info *mi; 831 832 mi = sample__resolve_mem(sample, al); 833 if (mi == NULL) 834 return -ENOMEM; 835 836 iter->mi = mi; 837 return 0; 838 } 839 840 static int 841 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 842 { 843 u64 cost; 844 struct mem_info *mi = iter->mi; 845 struct hists *hists = evsel__hists(iter->evsel); 846 struct perf_sample *sample = iter->sample; 847 struct hist_entry *he; 848 849 if (mi == NULL) 850 return -EINVAL; 851 852 cost = sample->weight; 853 if (!cost) 854 cost = 1; 855 856 /* 857 * must pass period=weight in order to get the correct 858 * sorting from hists__collapse_resort() which is solely 859 * based on periods. We want sorting be done on nr_events * weight 860 * and this is indirectly achieved by passing period=weight here 861 * and the he_stat__add_period() function. 862 */ 863 sample->period = cost; 864 865 he = hists__add_entry(hists, al, iter->parent, NULL, mi, NULL, 866 sample, true); 867 if (!he) 868 return -ENOMEM; 869 870 iter->he = he; 871 return 0; 872 } 873 874 static int 875 iter_finish_mem_entry(struct hist_entry_iter *iter, 876 struct addr_location *al __maybe_unused) 877 { 878 struct evsel *evsel = iter->evsel; 879 struct hists *hists = evsel__hists(evsel); 880 struct hist_entry *he = iter->he; 881 int err = -EINVAL; 882 883 if (he == NULL) 884 goto out; 885 886 hists__inc_nr_samples(hists, he->filtered); 887 888 err = hist_entry__append_callchain(he, iter->sample); 889 890 out: 891 mem_info__zput(iter->mi); 892 893 iter->he = NULL; 894 return err; 895 } 896 897 static int 898 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 899 { 900 struct branch_info *bi; 901 struct perf_sample *sample = iter->sample; 902 903 bi = sample__resolve_bstack(sample, al); 904 if (!bi) 905 return -ENOMEM; 906 907 iter->curr = 0; 908 iter->total = sample->branch_stack->nr; 909 910 iter->bi = bi; 911 return 0; 912 } 913 914 static int 915 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused, 916 struct addr_location *al __maybe_unused) 917 { 918 return 0; 919 } 920 921 static int 922 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 923 { 924 struct branch_info *bi = iter->bi; 925 int i = iter->curr; 926 927 if (bi == NULL) 928 return 0; 929 930 if (iter->curr >= iter->total) 931 return 0; 932 933 maps__put(al->maps); 934 al->maps = maps__get(bi[i].to.ms.maps); 935 map__put(al->map); 936 al->map = map__get(bi[i].to.ms.map); 937 al->sym = bi[i].to.ms.sym; 938 al->addr = bi[i].to.addr; 939 return 1; 940 } 941 942 static int 943 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 944 { 945 struct branch_info *bi; 946 struct evsel *evsel = iter->evsel; 947 struct hists *hists = evsel__hists(evsel); 948 struct perf_sample *sample = iter->sample; 949 struct hist_entry *he = NULL; 950 int i = iter->curr; 951 int err = 0; 952 953 bi = iter->bi; 954 955 if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym)) 956 goto out; 957 958 /* 959 * The report shows the percentage of total branches captured 960 * and not events sampled. Thus we use a pseudo period of 1. 961 */ 962 sample->period = 1; 963 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1; 964 965 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, NULL, 966 sample, true); 967 if (he == NULL) 968 return -ENOMEM; 969 970 hists__inc_nr_samples(hists, he->filtered); 971 972 out: 973 iter->he = he; 974 iter->curr++; 975 return err; 976 } 977 978 static int 979 iter_finish_branch_entry(struct hist_entry_iter *iter, 980 struct addr_location *al __maybe_unused) 981 { 982 zfree(&iter->bi); 983 iter->he = NULL; 984 985 return iter->curr >= iter->total ? 0 : -1; 986 } 987 988 static int 989 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused, 990 struct addr_location *al __maybe_unused) 991 { 992 return 0; 993 } 994 995 static int 996 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) 997 { 998 struct evsel *evsel = iter->evsel; 999 struct perf_sample *sample = iter->sample; 1000 struct hist_entry *he; 1001 1002 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 1003 NULL, sample, true); 1004 if (he == NULL) 1005 return -ENOMEM; 1006 1007 iter->he = he; 1008 return 0; 1009 } 1010 1011 static int 1012 iter_finish_normal_entry(struct hist_entry_iter *iter, 1013 struct addr_location *al __maybe_unused) 1014 { 1015 struct hist_entry *he = iter->he; 1016 struct evsel *evsel = iter->evsel; 1017 struct perf_sample *sample = iter->sample; 1018 1019 if (he == NULL) 1020 return 0; 1021 1022 iter->he = NULL; 1023 1024 hists__inc_nr_samples(evsel__hists(evsel), he->filtered); 1025 1026 return hist_entry__append_callchain(he, sample); 1027 } 1028 1029 static int 1030 iter_prepare_cumulative_entry(struct hist_entry_iter *iter, 1031 struct addr_location *al __maybe_unused) 1032 { 1033 struct hist_entry **he_cache; 1034 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 1035 1036 if (cursor == NULL) 1037 return -ENOMEM; 1038 1039 callchain_cursor_commit(cursor); 1040 1041 /* 1042 * This is for detecting cycles or recursions so that they're 1043 * cumulated only one time to prevent entries more than 100% 1044 * overhead. 1045 */ 1046 he_cache = malloc(sizeof(*he_cache) * (cursor->nr + 1)); 1047 if (he_cache == NULL) 1048 return -ENOMEM; 1049 1050 iter->he_cache = he_cache; 1051 iter->curr = 0; 1052 1053 return 0; 1054 } 1055 1056 static int 1057 iter_add_single_cumulative_entry(struct hist_entry_iter *iter, 1058 struct addr_location *al) 1059 { 1060 struct evsel *evsel = iter->evsel; 1061 struct hists *hists = evsel__hists(evsel); 1062 struct perf_sample *sample = iter->sample; 1063 struct hist_entry **he_cache = iter->he_cache; 1064 struct hist_entry *he; 1065 int err = 0; 1066 1067 he = hists__add_entry(hists, al, iter->parent, NULL, NULL, NULL, 1068 sample, true); 1069 if (he == NULL) 1070 return -ENOMEM; 1071 1072 iter->he = he; 1073 he_cache[iter->curr++] = he; 1074 1075 hist_entry__append_callchain(he, sample); 1076 1077 /* 1078 * We need to re-initialize the cursor since callchain_append() 1079 * advanced the cursor to the end. 1080 */ 1081 callchain_cursor_commit(get_tls_callchain_cursor()); 1082 1083 hists__inc_nr_samples(hists, he->filtered); 1084 1085 return err; 1086 } 1087 1088 static int 1089 iter_next_cumulative_entry(struct hist_entry_iter *iter, 1090 struct addr_location *al) 1091 { 1092 struct callchain_cursor_node *node; 1093 1094 node = callchain_cursor_current(get_tls_callchain_cursor()); 1095 if (node == NULL) 1096 return 0; 1097 1098 return fill_callchain_info(al, node, iter->hide_unresolved); 1099 } 1100 1101 static bool 1102 hist_entry__fast__sym_diff(struct hist_entry *left, 1103 struct hist_entry *right) 1104 { 1105 struct symbol *sym_l = left->ms.sym; 1106 struct symbol *sym_r = right->ms.sym; 1107 1108 if (!sym_l && !sym_r) 1109 return left->ip != right->ip; 1110 1111 return !!_sort__sym_cmp(sym_l, sym_r); 1112 } 1113 1114 1115 static int 1116 iter_add_next_cumulative_entry(struct hist_entry_iter *iter, 1117 struct addr_location *al) 1118 { 1119 struct evsel *evsel = iter->evsel; 1120 struct perf_sample *sample = iter->sample; 1121 struct hist_entry **he_cache = iter->he_cache; 1122 struct hist_entry *he; 1123 struct hist_entry he_tmp = { 1124 .hists = evsel__hists(evsel), 1125 .cpu = al->cpu, 1126 .thread = al->thread, 1127 .comm = thread__comm(al->thread), 1128 .ip = al->addr, 1129 .ms = { 1130 .maps = al->maps, 1131 .map = al->map, 1132 .sym = al->sym, 1133 }, 1134 .srcline = (char *) al->srcline, 1135 .parent = iter->parent, 1136 .raw_data = sample->raw_data, 1137 .raw_size = sample->raw_size, 1138 }; 1139 int i; 1140 struct callchain_cursor cursor, *tls_cursor = get_tls_callchain_cursor(); 1141 bool fast = hists__has(he_tmp.hists, sym); 1142 1143 if (tls_cursor == NULL) 1144 return -ENOMEM; 1145 1146 callchain_cursor_snapshot(&cursor, tls_cursor); 1147 1148 callchain_cursor_advance(tls_cursor); 1149 1150 /* 1151 * Check if there's duplicate entries in the callchain. 1152 * It's possible that it has cycles or recursive calls. 1153 */ 1154 for (i = 0; i < iter->curr; i++) { 1155 /* 1156 * For most cases, there are no duplicate entries in callchain. 1157 * The symbols are usually different. Do a quick check for 1158 * symbols first. 1159 */ 1160 if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp)) 1161 continue; 1162 1163 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) { 1164 /* to avoid calling callback function */ 1165 iter->he = NULL; 1166 return 0; 1167 } 1168 } 1169 1170 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 1171 NULL, sample, false); 1172 if (he == NULL) 1173 return -ENOMEM; 1174 1175 iter->he = he; 1176 he_cache[iter->curr++] = he; 1177 1178 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) 1179 callchain_append(he->callchain, &cursor, sample->period); 1180 return 0; 1181 } 1182 1183 static int 1184 iter_finish_cumulative_entry(struct hist_entry_iter *iter, 1185 struct addr_location *al __maybe_unused) 1186 { 1187 mem_info__zput(iter->mi); 1188 zfree(&iter->bi); 1189 zfree(&iter->he_cache); 1190 iter->he = NULL; 1191 1192 return 0; 1193 } 1194 1195 const struct hist_iter_ops hist_iter_mem = { 1196 .prepare_entry = iter_prepare_mem_entry, 1197 .add_single_entry = iter_add_single_mem_entry, 1198 .next_entry = iter_next_nop_entry, 1199 .add_next_entry = iter_add_next_nop_entry, 1200 .finish_entry = iter_finish_mem_entry, 1201 }; 1202 1203 const struct hist_iter_ops hist_iter_branch = { 1204 .prepare_entry = iter_prepare_branch_entry, 1205 .add_single_entry = iter_add_single_branch_entry, 1206 .next_entry = iter_next_branch_entry, 1207 .add_next_entry = iter_add_next_branch_entry, 1208 .finish_entry = iter_finish_branch_entry, 1209 }; 1210 1211 const struct hist_iter_ops hist_iter_normal = { 1212 .prepare_entry = iter_prepare_normal_entry, 1213 .add_single_entry = iter_add_single_normal_entry, 1214 .next_entry = iter_next_nop_entry, 1215 .add_next_entry = iter_add_next_nop_entry, 1216 .finish_entry = iter_finish_normal_entry, 1217 }; 1218 1219 const struct hist_iter_ops hist_iter_cumulative = { 1220 .prepare_entry = iter_prepare_cumulative_entry, 1221 .add_single_entry = iter_add_single_cumulative_entry, 1222 .next_entry = iter_next_cumulative_entry, 1223 .add_next_entry = iter_add_next_cumulative_entry, 1224 .finish_entry = iter_finish_cumulative_entry, 1225 }; 1226 1227 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, 1228 int max_stack_depth, void *arg) 1229 { 1230 int err, err2; 1231 struct map *alm = NULL; 1232 1233 if (al) 1234 alm = map__get(al->map); 1235 1236 err = sample__resolve_callchain(iter->sample, get_tls_callchain_cursor(), &iter->parent, 1237 iter->evsel, al, max_stack_depth); 1238 if (err) { 1239 map__put(alm); 1240 return err; 1241 } 1242 1243 err = iter->ops->prepare_entry(iter, al); 1244 if (err) 1245 goto out; 1246 1247 err = iter->ops->add_single_entry(iter, al); 1248 if (err) 1249 goto out; 1250 1251 if (iter->he && iter->add_entry_cb) { 1252 err = iter->add_entry_cb(iter, al, true, arg); 1253 if (err) 1254 goto out; 1255 } 1256 1257 while (iter->ops->next_entry(iter, al)) { 1258 err = iter->ops->add_next_entry(iter, al); 1259 if (err) 1260 break; 1261 1262 if (iter->he && iter->add_entry_cb) { 1263 err = iter->add_entry_cb(iter, al, false, arg); 1264 if (err) 1265 goto out; 1266 } 1267 } 1268 1269 out: 1270 err2 = iter->ops->finish_entry(iter, al); 1271 if (!err) 1272 err = err2; 1273 1274 map__put(alm); 1275 1276 return err; 1277 } 1278 1279 int64_t 1280 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) 1281 { 1282 struct hists *hists = left->hists; 1283 struct perf_hpp_fmt *fmt; 1284 int64_t cmp = 0; 1285 1286 hists__for_each_sort_list(hists, fmt) { 1287 if (perf_hpp__is_dynamic_entry(fmt) && 1288 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1289 continue; 1290 1291 cmp = fmt->cmp(fmt, left, right); 1292 if (cmp) 1293 break; 1294 } 1295 1296 return cmp; 1297 } 1298 1299 int64_t 1300 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) 1301 { 1302 struct hists *hists = left->hists; 1303 struct perf_hpp_fmt *fmt; 1304 int64_t cmp = 0; 1305 1306 hists__for_each_sort_list(hists, fmt) { 1307 if (perf_hpp__is_dynamic_entry(fmt) && 1308 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1309 continue; 1310 1311 cmp = fmt->collapse(fmt, left, right); 1312 if (cmp) 1313 break; 1314 } 1315 1316 return cmp; 1317 } 1318 1319 void hist_entry__delete(struct hist_entry *he) 1320 { 1321 struct hist_entry_ops *ops = he->ops; 1322 1323 thread__zput(he->thread); 1324 map_symbol__exit(&he->ms); 1325 1326 if (he->branch_info) { 1327 map_symbol__exit(&he->branch_info->from.ms); 1328 map_symbol__exit(&he->branch_info->to.ms); 1329 zfree_srcline(&he->branch_info->srcline_from); 1330 zfree_srcline(&he->branch_info->srcline_to); 1331 zfree(&he->branch_info); 1332 } 1333 1334 if (he->mem_info) { 1335 map_symbol__exit(&mem_info__iaddr(he->mem_info)->ms); 1336 map_symbol__exit(&mem_info__daddr(he->mem_info)->ms); 1337 mem_info__zput(he->mem_info); 1338 } 1339 1340 if (he->block_info) 1341 block_info__delete(he->block_info); 1342 1343 if (he->kvm_info) 1344 kvm_info__zput(he->kvm_info); 1345 1346 zfree(&he->res_samples); 1347 zfree(&he->stat_acc); 1348 zfree_srcline(&he->srcline); 1349 if (he->srcfile && he->srcfile[0]) 1350 zfree(&he->srcfile); 1351 free_callchain(he->callchain); 1352 zfree(&he->trace_output); 1353 zfree(&he->raw_data); 1354 ops->free(he); 1355 } 1356 1357 /* 1358 * If this is not the last column, then we need to pad it according to the 1359 * pre-calculated max length for this column, otherwise don't bother adding 1360 * spaces because that would break viewing this with, for instance, 'less', 1361 * that would show tons of trailing spaces when a long C++ demangled method 1362 * names is sampled. 1363 */ 1364 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp, 1365 struct perf_hpp_fmt *fmt, int printed) 1366 { 1367 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) { 1368 const int width = fmt->width(fmt, hpp, he->hists); 1369 if (printed < width) { 1370 advance_hpp(hpp, printed); 1371 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " "); 1372 } 1373 } 1374 1375 return printed; 1376 } 1377 1378 /* 1379 * collapse the histogram 1380 */ 1381 1382 static void hists__apply_filters(struct hists *hists, struct hist_entry *he); 1383 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he, 1384 enum hist_filter type); 1385 1386 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt); 1387 1388 static bool check_thread_entry(struct perf_hpp_fmt *fmt) 1389 { 1390 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt); 1391 } 1392 1393 static void hist_entry__check_and_remove_filter(struct hist_entry *he, 1394 enum hist_filter type, 1395 fmt_chk_fn check) 1396 { 1397 struct perf_hpp_fmt *fmt; 1398 bool type_match = false; 1399 struct hist_entry *parent = he->parent_he; 1400 1401 switch (type) { 1402 case HIST_FILTER__THREAD: 1403 if (symbol_conf.comm_list == NULL && 1404 symbol_conf.pid_list == NULL && 1405 symbol_conf.tid_list == NULL) 1406 return; 1407 break; 1408 case HIST_FILTER__DSO: 1409 if (symbol_conf.dso_list == NULL) 1410 return; 1411 break; 1412 case HIST_FILTER__SYMBOL: 1413 if (symbol_conf.sym_list == NULL) 1414 return; 1415 break; 1416 case HIST_FILTER__PARENT: 1417 case HIST_FILTER__GUEST: 1418 case HIST_FILTER__HOST: 1419 case HIST_FILTER__SOCKET: 1420 case HIST_FILTER__C2C: 1421 default: 1422 return; 1423 } 1424 1425 /* if it's filtered by own fmt, it has to have filter bits */ 1426 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1427 if (check(fmt)) { 1428 type_match = true; 1429 break; 1430 } 1431 } 1432 1433 if (type_match) { 1434 /* 1435 * If the filter is for current level entry, propagate 1436 * filter marker to parents. The marker bit was 1437 * already set by default so it only needs to clear 1438 * non-filtered entries. 1439 */ 1440 if (!(he->filtered & (1 << type))) { 1441 while (parent) { 1442 parent->filtered &= ~(1 << type); 1443 parent = parent->parent_he; 1444 } 1445 } 1446 } else { 1447 /* 1448 * If current entry doesn't have matching formats, set 1449 * filter marker for upper level entries. it will be 1450 * cleared if its lower level entries is not filtered. 1451 * 1452 * For lower-level entries, it inherits parent's 1453 * filter bit so that lower level entries of a 1454 * non-filtered entry won't set the filter marker. 1455 */ 1456 if (parent == NULL) 1457 he->filtered |= (1 << type); 1458 else 1459 he->filtered |= (parent->filtered & (1 << type)); 1460 } 1461 } 1462 1463 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he) 1464 { 1465 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD, 1466 check_thread_entry); 1467 1468 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO, 1469 perf_hpp__is_dso_entry); 1470 1471 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL, 1472 perf_hpp__is_sym_entry); 1473 1474 hists__apply_filters(he->hists, he); 1475 } 1476 1477 static struct hist_entry *hierarchy_insert_entry(struct hists *hists, 1478 struct rb_root_cached *root, 1479 struct hist_entry *he, 1480 struct hist_entry *parent_he, 1481 struct perf_hpp_list *hpp_list) 1482 { 1483 struct rb_node **p = &root->rb_root.rb_node; 1484 struct rb_node *parent = NULL; 1485 struct hist_entry *iter, *new; 1486 struct perf_hpp_fmt *fmt; 1487 int64_t cmp; 1488 bool leftmost = true; 1489 1490 while (*p != NULL) { 1491 parent = *p; 1492 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1493 1494 cmp = 0; 1495 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1496 cmp = fmt->collapse(fmt, iter, he); 1497 if (cmp) 1498 break; 1499 } 1500 1501 if (!cmp) { 1502 he_stat__add_stat(&iter->stat, &he->stat); 1503 return iter; 1504 } 1505 1506 if (cmp < 0) 1507 p = &parent->rb_left; 1508 else { 1509 p = &parent->rb_right; 1510 leftmost = false; 1511 } 1512 } 1513 1514 new = hist_entry__new(he, true); 1515 if (new == NULL) 1516 return NULL; 1517 1518 hists->nr_entries++; 1519 1520 /* save related format list for output */ 1521 new->hpp_list = hpp_list; 1522 new->parent_he = parent_he; 1523 1524 hist_entry__apply_hierarchy_filters(new); 1525 1526 /* some fields are now passed to 'new' */ 1527 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1528 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt)) 1529 he->trace_output = NULL; 1530 else 1531 new->trace_output = NULL; 1532 1533 if (perf_hpp__is_srcline_entry(fmt)) 1534 he->srcline = NULL; 1535 else 1536 new->srcline = NULL; 1537 1538 if (perf_hpp__is_srcfile_entry(fmt)) 1539 he->srcfile = NULL; 1540 else 1541 new->srcfile = NULL; 1542 } 1543 1544 rb_link_node(&new->rb_node_in, parent, p); 1545 rb_insert_color_cached(&new->rb_node_in, root, leftmost); 1546 return new; 1547 } 1548 1549 static int hists__hierarchy_insert_entry(struct hists *hists, 1550 struct rb_root_cached *root, 1551 struct hist_entry *he) 1552 { 1553 struct perf_hpp_list_node *node; 1554 struct hist_entry *new_he = NULL; 1555 struct hist_entry *parent = NULL; 1556 int depth = 0; 1557 int ret = 0; 1558 1559 list_for_each_entry(node, &hists->hpp_formats, list) { 1560 /* skip period (overhead) and elided columns */ 1561 if (node->level == 0 || node->skip) 1562 continue; 1563 1564 /* insert copy of 'he' for each fmt into the hierarchy */ 1565 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp); 1566 if (new_he == NULL) { 1567 ret = -1; 1568 break; 1569 } 1570 1571 root = &new_he->hroot_in; 1572 new_he->depth = depth++; 1573 parent = new_he; 1574 } 1575 1576 if (new_he) { 1577 new_he->leaf = true; 1578 1579 if (hist_entry__has_callchains(new_he) && 1580 symbol_conf.use_callchain) { 1581 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 1582 1583 if (cursor == NULL) 1584 return -1; 1585 1586 callchain_cursor_reset(cursor); 1587 if (callchain_merge(cursor, 1588 new_he->callchain, 1589 he->callchain) < 0) 1590 ret = -1; 1591 } 1592 } 1593 1594 /* 'he' is no longer used */ 1595 hist_entry__delete(he); 1596 1597 /* return 0 (or -1) since it already applied filters */ 1598 return ret; 1599 } 1600 1601 static int hists__collapse_insert_entry(struct hists *hists, 1602 struct rb_root_cached *root, 1603 struct hist_entry *he) 1604 { 1605 struct rb_node **p = &root->rb_root.rb_node; 1606 struct rb_node *parent = NULL; 1607 struct hist_entry *iter; 1608 int64_t cmp; 1609 bool leftmost = true; 1610 1611 if (symbol_conf.report_hierarchy) 1612 return hists__hierarchy_insert_entry(hists, root, he); 1613 1614 while (*p != NULL) { 1615 parent = *p; 1616 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1617 1618 cmp = hist_entry__collapse(iter, he); 1619 1620 if (!cmp) { 1621 int ret = 0; 1622 1623 he_stat__add_stat(&iter->stat, &he->stat); 1624 if (symbol_conf.cumulate_callchain) 1625 he_stat__add_stat(iter->stat_acc, he->stat_acc); 1626 1627 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) { 1628 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 1629 1630 if (cursor != NULL) { 1631 callchain_cursor_reset(cursor); 1632 if (callchain_merge(cursor, iter->callchain, he->callchain) < 0) 1633 ret = -1; 1634 } else { 1635 ret = 0; 1636 } 1637 } 1638 hist_entry__delete(he); 1639 return ret; 1640 } 1641 1642 if (cmp < 0) 1643 p = &(*p)->rb_left; 1644 else { 1645 p = &(*p)->rb_right; 1646 leftmost = false; 1647 } 1648 } 1649 hists->nr_entries++; 1650 1651 rb_link_node(&he->rb_node_in, parent, p); 1652 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 1653 return 1; 1654 } 1655 1656 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists) 1657 { 1658 struct rb_root_cached *root; 1659 1660 mutex_lock(&hists->lock); 1661 1662 root = hists->entries_in; 1663 if (++hists->entries_in > &hists->entries_in_array[1]) 1664 hists->entries_in = &hists->entries_in_array[0]; 1665 1666 mutex_unlock(&hists->lock); 1667 1668 return root; 1669 } 1670 1671 static void hists__apply_filters(struct hists *hists, struct hist_entry *he) 1672 { 1673 hists__filter_entry_by_dso(hists, he); 1674 hists__filter_entry_by_thread(hists, he); 1675 hists__filter_entry_by_symbol(hists, he); 1676 hists__filter_entry_by_socket(hists, he); 1677 } 1678 1679 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog) 1680 { 1681 struct rb_root_cached *root; 1682 struct rb_node *next; 1683 struct hist_entry *n; 1684 int ret; 1685 1686 if (!hists__has(hists, need_collapse)) 1687 return 0; 1688 1689 hists->nr_entries = 0; 1690 1691 root = hists__get_rotate_entries_in(hists); 1692 1693 next = rb_first_cached(root); 1694 1695 while (next) { 1696 if (session_done()) 1697 break; 1698 n = rb_entry(next, struct hist_entry, rb_node_in); 1699 next = rb_next(&n->rb_node_in); 1700 1701 rb_erase_cached(&n->rb_node_in, root); 1702 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n); 1703 if (ret < 0) 1704 return -1; 1705 1706 if (ret) { 1707 /* 1708 * If it wasn't combined with one of the entries already 1709 * collapsed, we need to apply the filters that may have 1710 * been set by, say, the hist_browser. 1711 */ 1712 hists__apply_filters(hists, n); 1713 } 1714 if (prog) 1715 ui_progress__update(prog, 1); 1716 } 1717 return 0; 1718 } 1719 1720 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b) 1721 { 1722 struct hists *hists = a->hists; 1723 struct perf_hpp_fmt *fmt; 1724 int64_t cmp = 0; 1725 1726 hists__for_each_sort_list(hists, fmt) { 1727 if (perf_hpp__should_skip(fmt, a->hists)) 1728 continue; 1729 1730 cmp = fmt->sort(fmt, a, b); 1731 if (cmp) 1732 break; 1733 } 1734 1735 return cmp; 1736 } 1737 1738 static void hists__reset_filter_stats(struct hists *hists) 1739 { 1740 hists->nr_non_filtered_entries = 0; 1741 hists->stats.total_non_filtered_period = 0; 1742 } 1743 1744 void hists__reset_stats(struct hists *hists) 1745 { 1746 hists->nr_entries = 0; 1747 hists->stats.total_period = 0; 1748 1749 hists__reset_filter_stats(hists); 1750 } 1751 1752 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h) 1753 { 1754 hists->nr_non_filtered_entries++; 1755 hists->stats.total_non_filtered_period += h->stat.period; 1756 } 1757 1758 void hists__inc_stats(struct hists *hists, struct hist_entry *h) 1759 { 1760 if (!h->filtered) 1761 hists__inc_filter_stats(hists, h); 1762 1763 hists->nr_entries++; 1764 hists->stats.total_period += h->stat.period; 1765 } 1766 1767 static void hierarchy_recalc_total_periods(struct hists *hists) 1768 { 1769 struct rb_node *node; 1770 struct hist_entry *he; 1771 1772 node = rb_first_cached(&hists->entries); 1773 1774 hists->stats.total_period = 0; 1775 hists->stats.total_non_filtered_period = 0; 1776 1777 /* 1778 * recalculate total period using top-level entries only 1779 * since lower level entries only see non-filtered entries 1780 * but upper level entries have sum of both entries. 1781 */ 1782 while (node) { 1783 he = rb_entry(node, struct hist_entry, rb_node); 1784 node = rb_next(node); 1785 1786 hists->stats.total_period += he->stat.period; 1787 if (!he->filtered) 1788 hists->stats.total_non_filtered_period += he->stat.period; 1789 } 1790 } 1791 1792 static void hierarchy_insert_output_entry(struct rb_root_cached *root, 1793 struct hist_entry *he) 1794 { 1795 struct rb_node **p = &root->rb_root.rb_node; 1796 struct rb_node *parent = NULL; 1797 struct hist_entry *iter; 1798 struct perf_hpp_fmt *fmt; 1799 bool leftmost = true; 1800 1801 while (*p != NULL) { 1802 parent = *p; 1803 iter = rb_entry(parent, struct hist_entry, rb_node); 1804 1805 if (hist_entry__sort(he, iter) > 0) 1806 p = &parent->rb_left; 1807 else { 1808 p = &parent->rb_right; 1809 leftmost = false; 1810 } 1811 } 1812 1813 rb_link_node(&he->rb_node, parent, p); 1814 rb_insert_color_cached(&he->rb_node, root, leftmost); 1815 1816 /* update column width of dynamic entry */ 1817 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 1818 if (fmt->init) 1819 fmt->init(fmt, he); 1820 } 1821 } 1822 1823 static void hists__hierarchy_output_resort(struct hists *hists, 1824 struct ui_progress *prog, 1825 struct rb_root_cached *root_in, 1826 struct rb_root_cached *root_out, 1827 u64 min_callchain_hits, 1828 bool use_callchain) 1829 { 1830 struct rb_node *node; 1831 struct hist_entry *he; 1832 1833 *root_out = RB_ROOT_CACHED; 1834 node = rb_first_cached(root_in); 1835 1836 while (node) { 1837 he = rb_entry(node, struct hist_entry, rb_node_in); 1838 node = rb_next(node); 1839 1840 hierarchy_insert_output_entry(root_out, he); 1841 1842 if (prog) 1843 ui_progress__update(prog, 1); 1844 1845 hists->nr_entries++; 1846 if (!he->filtered) { 1847 hists->nr_non_filtered_entries++; 1848 hists__calc_col_len(hists, he); 1849 } 1850 1851 if (!he->leaf) { 1852 hists__hierarchy_output_resort(hists, prog, 1853 &he->hroot_in, 1854 &he->hroot_out, 1855 min_callchain_hits, 1856 use_callchain); 1857 continue; 1858 } 1859 1860 if (!use_callchain) 1861 continue; 1862 1863 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1864 u64 total = he->stat.period; 1865 1866 if (symbol_conf.cumulate_callchain) 1867 total = he->stat_acc->period; 1868 1869 min_callchain_hits = total * (callchain_param.min_percent / 100); 1870 } 1871 1872 callchain_param.sort(&he->sorted_chain, he->callchain, 1873 min_callchain_hits, &callchain_param); 1874 } 1875 } 1876 1877 static void __hists__insert_output_entry(struct rb_root_cached *entries, 1878 struct hist_entry *he, 1879 u64 min_callchain_hits, 1880 bool use_callchain) 1881 { 1882 struct rb_node **p = &entries->rb_root.rb_node; 1883 struct rb_node *parent = NULL; 1884 struct hist_entry *iter; 1885 struct perf_hpp_fmt *fmt; 1886 bool leftmost = true; 1887 1888 if (use_callchain) { 1889 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1890 u64 total = he->stat.period; 1891 1892 if (symbol_conf.cumulate_callchain) 1893 total = he->stat_acc->period; 1894 1895 min_callchain_hits = total * (callchain_param.min_percent / 100); 1896 } 1897 callchain_param.sort(&he->sorted_chain, he->callchain, 1898 min_callchain_hits, &callchain_param); 1899 } 1900 1901 while (*p != NULL) { 1902 parent = *p; 1903 iter = rb_entry(parent, struct hist_entry, rb_node); 1904 1905 if (hist_entry__sort(he, iter) > 0) 1906 p = &(*p)->rb_left; 1907 else { 1908 p = &(*p)->rb_right; 1909 leftmost = false; 1910 } 1911 } 1912 1913 rb_link_node(&he->rb_node, parent, p); 1914 rb_insert_color_cached(&he->rb_node, entries, leftmost); 1915 1916 /* update column width of dynamic entries */ 1917 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) { 1918 if (fmt->init) 1919 fmt->init(fmt, he); 1920 } 1921 } 1922 1923 static void output_resort(struct hists *hists, struct ui_progress *prog, 1924 bool use_callchain, hists__resort_cb_t cb, 1925 void *cb_arg) 1926 { 1927 struct rb_root_cached *root; 1928 struct rb_node *next; 1929 struct hist_entry *n; 1930 u64 callchain_total; 1931 u64 min_callchain_hits; 1932 1933 callchain_total = hists->callchain_period; 1934 if (symbol_conf.filter_relative) 1935 callchain_total = hists->callchain_non_filtered_period; 1936 1937 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100); 1938 1939 hists__reset_stats(hists); 1940 hists__reset_col_len(hists); 1941 1942 if (symbol_conf.report_hierarchy) { 1943 hists__hierarchy_output_resort(hists, prog, 1944 &hists->entries_collapsed, 1945 &hists->entries, 1946 min_callchain_hits, 1947 use_callchain); 1948 hierarchy_recalc_total_periods(hists); 1949 return; 1950 } 1951 1952 if (hists__has(hists, need_collapse)) 1953 root = &hists->entries_collapsed; 1954 else 1955 root = hists->entries_in; 1956 1957 next = rb_first_cached(root); 1958 hists->entries = RB_ROOT_CACHED; 1959 1960 while (next) { 1961 n = rb_entry(next, struct hist_entry, rb_node_in); 1962 next = rb_next(&n->rb_node_in); 1963 1964 if (cb && cb(n, cb_arg)) 1965 continue; 1966 1967 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain); 1968 hists__inc_stats(hists, n); 1969 1970 if (!n->filtered) 1971 hists__calc_col_len(hists, n); 1972 1973 if (prog) 1974 ui_progress__update(prog, 1); 1975 } 1976 } 1977 1978 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog, 1979 hists__resort_cb_t cb, void *cb_arg) 1980 { 1981 bool use_callchain; 1982 1983 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph) 1984 use_callchain = evsel__has_callchain(evsel); 1985 else 1986 use_callchain = symbol_conf.use_callchain; 1987 1988 use_callchain |= symbol_conf.show_branchflag_count; 1989 1990 output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg); 1991 } 1992 1993 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog) 1994 { 1995 return evsel__output_resort_cb(evsel, prog, NULL, NULL); 1996 } 1997 1998 void hists__output_resort(struct hists *hists, struct ui_progress *prog) 1999 { 2000 output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL); 2001 } 2002 2003 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog, 2004 hists__resort_cb_t cb) 2005 { 2006 output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL); 2007 } 2008 2009 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd) 2010 { 2011 if (he->leaf || hmd == HMD_FORCE_SIBLING) 2012 return false; 2013 2014 if (he->unfolded || hmd == HMD_FORCE_CHILD) 2015 return true; 2016 2017 return false; 2018 } 2019 2020 struct rb_node *rb_hierarchy_last(struct rb_node *node) 2021 { 2022 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 2023 2024 while (can_goto_child(he, HMD_NORMAL)) { 2025 node = rb_last(&he->hroot_out.rb_root); 2026 he = rb_entry(node, struct hist_entry, rb_node); 2027 } 2028 return node; 2029 } 2030 2031 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd) 2032 { 2033 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 2034 2035 if (can_goto_child(he, hmd)) 2036 node = rb_first_cached(&he->hroot_out); 2037 else 2038 node = rb_next(node); 2039 2040 while (node == NULL) { 2041 he = he->parent_he; 2042 if (he == NULL) 2043 break; 2044 2045 node = rb_next(&he->rb_node); 2046 } 2047 return node; 2048 } 2049 2050 struct rb_node *rb_hierarchy_prev(struct rb_node *node) 2051 { 2052 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 2053 2054 node = rb_prev(node); 2055 if (node) 2056 return rb_hierarchy_last(node); 2057 2058 he = he->parent_he; 2059 if (he == NULL) 2060 return NULL; 2061 2062 return &he->rb_node; 2063 } 2064 2065 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit) 2066 { 2067 struct rb_node *node; 2068 struct hist_entry *child; 2069 float percent; 2070 2071 if (he->leaf) 2072 return false; 2073 2074 node = rb_first_cached(&he->hroot_out); 2075 child = rb_entry(node, struct hist_entry, rb_node); 2076 2077 while (node && child->filtered) { 2078 node = rb_next(node); 2079 child = rb_entry(node, struct hist_entry, rb_node); 2080 } 2081 2082 if (node) 2083 percent = hist_entry__get_percent_limit(child); 2084 else 2085 percent = 0; 2086 2087 return node && percent >= limit; 2088 } 2089 2090 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, 2091 enum hist_filter filter) 2092 { 2093 h->filtered &= ~(1 << filter); 2094 2095 if (symbol_conf.report_hierarchy) { 2096 struct hist_entry *parent = h->parent_he; 2097 2098 while (parent) { 2099 he_stat__add_stat(&parent->stat, &h->stat); 2100 2101 parent->filtered &= ~(1 << filter); 2102 2103 if (parent->filtered) 2104 goto next; 2105 2106 /* force fold unfiltered entry for simplicity */ 2107 parent->unfolded = false; 2108 parent->has_no_entry = false; 2109 parent->row_offset = 0; 2110 parent->nr_rows = 0; 2111 next: 2112 parent = parent->parent_he; 2113 } 2114 } 2115 2116 if (h->filtered) 2117 return; 2118 2119 /* force fold unfiltered entry for simplicity */ 2120 h->unfolded = false; 2121 h->has_no_entry = false; 2122 h->row_offset = 0; 2123 h->nr_rows = 0; 2124 2125 hists->stats.nr_non_filtered_samples += h->stat.nr_events; 2126 2127 hists__inc_filter_stats(hists, h); 2128 hists__calc_col_len(hists, h); 2129 } 2130 2131 2132 static bool hists__filter_entry_by_dso(struct hists *hists, 2133 struct hist_entry *he) 2134 { 2135 if (hists->dso_filter != NULL && 2136 (he->ms.map == NULL || !RC_CHK_EQUAL(map__dso(he->ms.map), hists->dso_filter))) { 2137 he->filtered |= (1 << HIST_FILTER__DSO); 2138 return true; 2139 } 2140 2141 return false; 2142 } 2143 2144 static bool hists__filter_entry_by_thread(struct hists *hists, 2145 struct hist_entry *he) 2146 { 2147 if (hists->thread_filter != NULL && 2148 !RC_CHK_EQUAL(he->thread, hists->thread_filter)) { 2149 he->filtered |= (1 << HIST_FILTER__THREAD); 2150 return true; 2151 } 2152 2153 return false; 2154 } 2155 2156 static bool hists__filter_entry_by_symbol(struct hists *hists, 2157 struct hist_entry *he) 2158 { 2159 if (hists->symbol_filter_str != NULL && 2160 (!he->ms.sym || strstr(he->ms.sym->name, 2161 hists->symbol_filter_str) == NULL)) { 2162 he->filtered |= (1 << HIST_FILTER__SYMBOL); 2163 return true; 2164 } 2165 2166 return false; 2167 } 2168 2169 static bool hists__filter_entry_by_socket(struct hists *hists, 2170 struct hist_entry *he) 2171 { 2172 if ((hists->socket_filter > -1) && 2173 (he->socket != hists->socket_filter)) { 2174 he->filtered |= (1 << HIST_FILTER__SOCKET); 2175 return true; 2176 } 2177 2178 return false; 2179 } 2180 2181 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he); 2182 2183 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter) 2184 { 2185 struct rb_node *nd; 2186 2187 hists->stats.nr_non_filtered_samples = 0; 2188 2189 hists__reset_filter_stats(hists); 2190 hists__reset_col_len(hists); 2191 2192 for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) { 2193 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2194 2195 if (filter(hists, h)) 2196 continue; 2197 2198 hists__remove_entry_filter(hists, h, type); 2199 } 2200 } 2201 2202 static void resort_filtered_entry(struct rb_root_cached *root, 2203 struct hist_entry *he) 2204 { 2205 struct rb_node **p = &root->rb_root.rb_node; 2206 struct rb_node *parent = NULL; 2207 struct hist_entry *iter; 2208 struct rb_root_cached new_root = RB_ROOT_CACHED; 2209 struct rb_node *nd; 2210 bool leftmost = true; 2211 2212 while (*p != NULL) { 2213 parent = *p; 2214 iter = rb_entry(parent, struct hist_entry, rb_node); 2215 2216 if (hist_entry__sort(he, iter) > 0) 2217 p = &(*p)->rb_left; 2218 else { 2219 p = &(*p)->rb_right; 2220 leftmost = false; 2221 } 2222 } 2223 2224 rb_link_node(&he->rb_node, parent, p); 2225 rb_insert_color_cached(&he->rb_node, root, leftmost); 2226 2227 if (he->leaf || he->filtered) 2228 return; 2229 2230 nd = rb_first_cached(&he->hroot_out); 2231 while (nd) { 2232 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2233 2234 nd = rb_next(nd); 2235 rb_erase_cached(&h->rb_node, &he->hroot_out); 2236 2237 resort_filtered_entry(&new_root, h); 2238 } 2239 2240 he->hroot_out = new_root; 2241 } 2242 2243 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg) 2244 { 2245 struct rb_node *nd; 2246 struct rb_root_cached new_root = RB_ROOT_CACHED; 2247 2248 hists->stats.nr_non_filtered_samples = 0; 2249 2250 hists__reset_filter_stats(hists); 2251 hists__reset_col_len(hists); 2252 2253 nd = rb_first_cached(&hists->entries); 2254 while (nd) { 2255 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2256 int ret; 2257 2258 ret = hist_entry__filter(h, type, arg); 2259 2260 /* 2261 * case 1. non-matching type 2262 * zero out the period, set filter marker and move to child 2263 */ 2264 if (ret < 0) { 2265 memset(&h->stat, 0, sizeof(h->stat)); 2266 h->filtered |= (1 << type); 2267 2268 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD); 2269 } 2270 /* 2271 * case 2. matched type (filter out) 2272 * set filter marker and move to next 2273 */ 2274 else if (ret == 1) { 2275 h->filtered |= (1 << type); 2276 2277 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2278 } 2279 /* 2280 * case 3. ok (not filtered) 2281 * add period to hists and parents, erase the filter marker 2282 * and move to next sibling 2283 */ 2284 else { 2285 hists__remove_entry_filter(hists, h, type); 2286 2287 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2288 } 2289 } 2290 2291 hierarchy_recalc_total_periods(hists); 2292 2293 /* 2294 * resort output after applying a new filter since filter in a lower 2295 * hierarchy can change periods in a upper hierarchy. 2296 */ 2297 nd = rb_first_cached(&hists->entries); 2298 while (nd) { 2299 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2300 2301 nd = rb_next(nd); 2302 rb_erase_cached(&h->rb_node, &hists->entries); 2303 2304 resort_filtered_entry(&new_root, h); 2305 } 2306 2307 hists->entries = new_root; 2308 } 2309 2310 void hists__filter_by_thread(struct hists *hists) 2311 { 2312 if (symbol_conf.report_hierarchy) 2313 hists__filter_hierarchy(hists, HIST_FILTER__THREAD, 2314 hists->thread_filter); 2315 else 2316 hists__filter_by_type(hists, HIST_FILTER__THREAD, 2317 hists__filter_entry_by_thread); 2318 } 2319 2320 void hists__filter_by_dso(struct hists *hists) 2321 { 2322 if (symbol_conf.report_hierarchy) 2323 hists__filter_hierarchy(hists, HIST_FILTER__DSO, 2324 hists->dso_filter); 2325 else 2326 hists__filter_by_type(hists, HIST_FILTER__DSO, 2327 hists__filter_entry_by_dso); 2328 } 2329 2330 void hists__filter_by_symbol(struct hists *hists) 2331 { 2332 if (symbol_conf.report_hierarchy) 2333 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL, 2334 hists->symbol_filter_str); 2335 else 2336 hists__filter_by_type(hists, HIST_FILTER__SYMBOL, 2337 hists__filter_entry_by_symbol); 2338 } 2339 2340 void hists__filter_by_socket(struct hists *hists) 2341 { 2342 if (symbol_conf.report_hierarchy) 2343 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET, 2344 &hists->socket_filter); 2345 else 2346 hists__filter_by_type(hists, HIST_FILTER__SOCKET, 2347 hists__filter_entry_by_socket); 2348 } 2349 2350 void events_stats__inc(struct events_stats *stats, u32 type) 2351 { 2352 ++stats->nr_events[0]; 2353 ++stats->nr_events[type]; 2354 } 2355 2356 static void hists_stats__inc(struct hists_stats *stats) 2357 { 2358 ++stats->nr_samples; 2359 } 2360 2361 void hists__inc_nr_events(struct hists *hists) 2362 { 2363 hists_stats__inc(&hists->stats); 2364 } 2365 2366 void hists__inc_nr_samples(struct hists *hists, bool filtered) 2367 { 2368 hists_stats__inc(&hists->stats); 2369 if (!filtered) 2370 hists->stats.nr_non_filtered_samples++; 2371 } 2372 2373 void hists__inc_nr_lost_samples(struct hists *hists, u32 lost) 2374 { 2375 hists->stats.nr_lost_samples += lost; 2376 } 2377 2378 static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 2379 struct hist_entry *pair) 2380 { 2381 struct rb_root_cached *root; 2382 struct rb_node **p; 2383 struct rb_node *parent = NULL; 2384 struct hist_entry *he; 2385 int64_t cmp; 2386 bool leftmost = true; 2387 2388 if (hists__has(hists, need_collapse)) 2389 root = &hists->entries_collapsed; 2390 else 2391 root = hists->entries_in; 2392 2393 p = &root->rb_root.rb_node; 2394 2395 while (*p != NULL) { 2396 parent = *p; 2397 he = rb_entry(parent, struct hist_entry, rb_node_in); 2398 2399 cmp = hist_entry__collapse(he, pair); 2400 2401 if (!cmp) 2402 goto out; 2403 2404 if (cmp < 0) 2405 p = &(*p)->rb_left; 2406 else { 2407 p = &(*p)->rb_right; 2408 leftmost = false; 2409 } 2410 } 2411 2412 he = hist_entry__new(pair, true); 2413 if (he) { 2414 memset(&he->stat, 0, sizeof(he->stat)); 2415 he->hists = hists; 2416 if (symbol_conf.cumulate_callchain) 2417 memset(he->stat_acc, 0, sizeof(he->stat)); 2418 rb_link_node(&he->rb_node_in, parent, p); 2419 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2420 hists__inc_stats(hists, he); 2421 he->dummy = true; 2422 } 2423 out: 2424 return he; 2425 } 2426 2427 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists, 2428 struct rb_root_cached *root, 2429 struct hist_entry *pair) 2430 { 2431 struct rb_node **p; 2432 struct rb_node *parent = NULL; 2433 struct hist_entry *he; 2434 struct perf_hpp_fmt *fmt; 2435 bool leftmost = true; 2436 2437 p = &root->rb_root.rb_node; 2438 while (*p != NULL) { 2439 int64_t cmp = 0; 2440 2441 parent = *p; 2442 he = rb_entry(parent, struct hist_entry, rb_node_in); 2443 2444 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2445 cmp = fmt->collapse(fmt, he, pair); 2446 if (cmp) 2447 break; 2448 } 2449 if (!cmp) 2450 goto out; 2451 2452 if (cmp < 0) 2453 p = &parent->rb_left; 2454 else { 2455 p = &parent->rb_right; 2456 leftmost = false; 2457 } 2458 } 2459 2460 he = hist_entry__new(pair, true); 2461 if (he) { 2462 rb_link_node(&he->rb_node_in, parent, p); 2463 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2464 2465 he->dummy = true; 2466 he->hists = hists; 2467 memset(&he->stat, 0, sizeof(he->stat)); 2468 hists__inc_stats(hists, he); 2469 } 2470 out: 2471 return he; 2472 } 2473 2474 static struct hist_entry *hists__find_entry(struct hists *hists, 2475 struct hist_entry *he) 2476 { 2477 struct rb_node *n; 2478 2479 if (hists__has(hists, need_collapse)) 2480 n = hists->entries_collapsed.rb_root.rb_node; 2481 else 2482 n = hists->entries_in->rb_root.rb_node; 2483 2484 while (n) { 2485 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); 2486 int64_t cmp = hist_entry__collapse(iter, he); 2487 2488 if (cmp < 0) 2489 n = n->rb_left; 2490 else if (cmp > 0) 2491 n = n->rb_right; 2492 else 2493 return iter; 2494 } 2495 2496 return NULL; 2497 } 2498 2499 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root, 2500 struct hist_entry *he) 2501 { 2502 struct rb_node *n = root->rb_root.rb_node; 2503 2504 while (n) { 2505 struct hist_entry *iter; 2506 struct perf_hpp_fmt *fmt; 2507 int64_t cmp = 0; 2508 2509 iter = rb_entry(n, struct hist_entry, rb_node_in); 2510 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2511 cmp = fmt->collapse(fmt, iter, he); 2512 if (cmp) 2513 break; 2514 } 2515 2516 if (cmp < 0) 2517 n = n->rb_left; 2518 else if (cmp > 0) 2519 n = n->rb_right; 2520 else 2521 return iter; 2522 } 2523 2524 return NULL; 2525 } 2526 2527 static void hists__match_hierarchy(struct rb_root_cached *leader_root, 2528 struct rb_root_cached *other_root) 2529 { 2530 struct rb_node *nd; 2531 struct hist_entry *pos, *pair; 2532 2533 for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) { 2534 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2535 pair = hists__find_hierarchy_entry(other_root, pos); 2536 2537 if (pair) { 2538 hist_entry__add_pair(pair, pos); 2539 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in); 2540 } 2541 } 2542 } 2543 2544 /* 2545 * Look for pairs to link to the leader buckets (hist_entries): 2546 */ 2547 void hists__match(struct hists *leader, struct hists *other) 2548 { 2549 struct rb_root_cached *root; 2550 struct rb_node *nd; 2551 struct hist_entry *pos, *pair; 2552 2553 if (symbol_conf.report_hierarchy) { 2554 /* hierarchy report always collapses entries */ 2555 return hists__match_hierarchy(&leader->entries_collapsed, 2556 &other->entries_collapsed); 2557 } 2558 2559 if (hists__has(leader, need_collapse)) 2560 root = &leader->entries_collapsed; 2561 else 2562 root = leader->entries_in; 2563 2564 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2565 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2566 pair = hists__find_entry(other, pos); 2567 2568 if (pair) 2569 hist_entry__add_pair(pair, pos); 2570 } 2571 } 2572 2573 static int hists__link_hierarchy(struct hists *leader_hists, 2574 struct hist_entry *parent, 2575 struct rb_root_cached *leader_root, 2576 struct rb_root_cached *other_root) 2577 { 2578 struct rb_node *nd; 2579 struct hist_entry *pos, *leader; 2580 2581 for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) { 2582 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2583 2584 if (hist_entry__has_pairs(pos)) { 2585 bool found = false; 2586 2587 list_for_each_entry(leader, &pos->pairs.head, pairs.node) { 2588 if (leader->hists == leader_hists) { 2589 found = true; 2590 break; 2591 } 2592 } 2593 if (!found) 2594 return -1; 2595 } else { 2596 leader = add_dummy_hierarchy_entry(leader_hists, 2597 leader_root, pos); 2598 if (leader == NULL) 2599 return -1; 2600 2601 /* do not point parent in the pos */ 2602 leader->parent_he = parent; 2603 2604 hist_entry__add_pair(pos, leader); 2605 } 2606 2607 if (!pos->leaf) { 2608 if (hists__link_hierarchy(leader_hists, leader, 2609 &leader->hroot_in, 2610 &pos->hroot_in) < 0) 2611 return -1; 2612 } 2613 } 2614 return 0; 2615 } 2616 2617 /* 2618 * Look for entries in the other hists that are not present in the leader, if 2619 * we find them, just add a dummy entry on the leader hists, with period=0, 2620 * nr_events=0, to serve as the list header. 2621 */ 2622 int hists__link(struct hists *leader, struct hists *other) 2623 { 2624 struct rb_root_cached *root; 2625 struct rb_node *nd; 2626 struct hist_entry *pos, *pair; 2627 2628 if (symbol_conf.report_hierarchy) { 2629 /* hierarchy report always collapses entries */ 2630 return hists__link_hierarchy(leader, NULL, 2631 &leader->entries_collapsed, 2632 &other->entries_collapsed); 2633 } 2634 2635 if (hists__has(other, need_collapse)) 2636 root = &other->entries_collapsed; 2637 else 2638 root = other->entries_in; 2639 2640 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2641 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2642 2643 if (!hist_entry__has_pairs(pos)) { 2644 pair = hists__add_dummy_entry(leader, pos); 2645 if (pair == NULL) 2646 return -1; 2647 hist_entry__add_pair(pos, pair); 2648 } 2649 } 2650 2651 return 0; 2652 } 2653 2654 int hists__unlink(struct hists *hists) 2655 { 2656 struct rb_root_cached *root; 2657 struct rb_node *nd; 2658 struct hist_entry *pos; 2659 2660 if (hists__has(hists, need_collapse)) 2661 root = &hists->entries_collapsed; 2662 else 2663 root = hists->entries_in; 2664 2665 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2666 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2667 list_del_init(&pos->pairs.node); 2668 } 2669 2670 return 0; 2671 } 2672 2673 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al, 2674 struct perf_sample *sample, bool nonany_branch_mode, 2675 u64 *total_cycles) 2676 { 2677 struct branch_info *bi; 2678 struct branch_entry *entries = perf_sample__branch_entries(sample); 2679 2680 /* If we have branch cycles always annotate them. */ 2681 if (bs && bs->nr && entries[0].flags.cycles) { 2682 bi = sample__resolve_bstack(sample, al); 2683 if (bi) { 2684 struct addr_map_symbol *prev = NULL; 2685 2686 /* 2687 * Ignore errors, still want to process the 2688 * other entries. 2689 * 2690 * For non standard branch modes always 2691 * force no IPC (prev == NULL) 2692 * 2693 * Note that perf stores branches reversed from 2694 * program order! 2695 */ 2696 for (int i = bs->nr - 1; i >= 0; i--) { 2697 addr_map_symbol__account_cycles(&bi[i].from, 2698 nonany_branch_mode ? NULL : prev, 2699 bi[i].flags.cycles); 2700 prev = &bi[i].to; 2701 2702 if (total_cycles) 2703 *total_cycles += bi[i].flags.cycles; 2704 } 2705 for (unsigned int i = 0; i < bs->nr; i++) { 2706 map_symbol__exit(&bi[i].to.ms); 2707 map_symbol__exit(&bi[i].from.ms); 2708 } 2709 free(bi); 2710 } 2711 } 2712 } 2713 2714 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp) 2715 { 2716 struct evsel *pos; 2717 size_t ret = 0; 2718 2719 evlist__for_each_entry(evlist, pos) { 2720 struct hists *hists = evsel__hists(pos); 2721 2722 if (symbol_conf.skip_empty && !hists->stats.nr_samples && 2723 !hists->stats.nr_lost_samples) 2724 continue; 2725 2726 ret += fprintf(fp, "%s stats:\n", evsel__name(pos)); 2727 if (hists->stats.nr_samples) 2728 ret += fprintf(fp, "%16s events: %10d\n", 2729 "SAMPLE", hists->stats.nr_samples); 2730 if (hists->stats.nr_lost_samples) 2731 ret += fprintf(fp, "%16s events: %10d\n", 2732 "LOST_SAMPLES", hists->stats.nr_lost_samples); 2733 } 2734 2735 return ret; 2736 } 2737 2738 2739 u64 hists__total_period(struct hists *hists) 2740 { 2741 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period : 2742 hists->stats.total_period; 2743 } 2744 2745 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq) 2746 { 2747 char unit; 2748 int printed; 2749 const struct dso *dso = hists->dso_filter; 2750 struct thread *thread = hists->thread_filter; 2751 int socket_id = hists->socket_filter; 2752 unsigned long nr_samples = hists->stats.nr_samples; 2753 u64 nr_events = hists->stats.total_period; 2754 struct evsel *evsel = hists_to_evsel(hists); 2755 const char *ev_name = evsel__name(evsel); 2756 char buf[512], sample_freq_str[64] = ""; 2757 size_t buflen = sizeof(buf); 2758 char ref[30] = " show reference callgraph, "; 2759 bool enable_ref = false; 2760 2761 if (symbol_conf.filter_relative) { 2762 nr_samples = hists->stats.nr_non_filtered_samples; 2763 nr_events = hists->stats.total_non_filtered_period; 2764 } 2765 2766 if (evsel__is_group_event(evsel)) { 2767 struct evsel *pos; 2768 2769 evsel__group_desc(evsel, buf, buflen); 2770 ev_name = buf; 2771 2772 for_each_group_member(pos, evsel) { 2773 struct hists *pos_hists = evsel__hists(pos); 2774 2775 if (symbol_conf.filter_relative) { 2776 nr_samples += pos_hists->stats.nr_non_filtered_samples; 2777 nr_events += pos_hists->stats.total_non_filtered_period; 2778 } else { 2779 nr_samples += pos_hists->stats.nr_samples; 2780 nr_events += pos_hists->stats.total_period; 2781 } 2782 } 2783 } 2784 2785 if (symbol_conf.show_ref_callgraph && 2786 strstr(ev_name, "call-graph=no")) 2787 enable_ref = true; 2788 2789 if (show_freq) 2790 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq); 2791 2792 nr_samples = convert_unit(nr_samples, &unit); 2793 printed = scnprintf(bf, size, 2794 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64, 2795 nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "", 2796 ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events); 2797 2798 2799 if (hists->uid_filter_str) 2800 printed += snprintf(bf + printed, size - printed, 2801 ", UID: %s", hists->uid_filter_str); 2802 if (thread) { 2803 if (hists__has(hists, thread)) { 2804 printed += scnprintf(bf + printed, size - printed, 2805 ", Thread: %s(%d)", 2806 (thread__comm_set(thread) ? thread__comm_str(thread) : ""), 2807 thread__tid(thread)); 2808 } else { 2809 printed += scnprintf(bf + printed, size - printed, 2810 ", Thread: %s", 2811 (thread__comm_set(thread) ? thread__comm_str(thread) : "")); 2812 } 2813 } 2814 if (dso) 2815 printed += scnprintf(bf + printed, size - printed, 2816 ", DSO: %s", dso__short_name(dso)); 2817 if (socket_id > -1) 2818 printed += scnprintf(bf + printed, size - printed, 2819 ", Processor Socket: %d", socket_id); 2820 2821 return printed; 2822 } 2823 2824 int parse_filter_percentage(const struct option *opt __maybe_unused, 2825 const char *arg, int unset __maybe_unused) 2826 { 2827 if (!strcmp(arg, "relative")) 2828 symbol_conf.filter_relative = true; 2829 else if (!strcmp(arg, "absolute")) 2830 symbol_conf.filter_relative = false; 2831 else { 2832 pr_debug("Invalid percentage: %s\n", arg); 2833 return -1; 2834 } 2835 2836 return 0; 2837 } 2838 2839 int perf_hist_config(const char *var, const char *value) 2840 { 2841 if (!strcmp(var, "hist.percentage")) 2842 return parse_filter_percentage(NULL, value, 0); 2843 2844 return 0; 2845 } 2846 2847 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list) 2848 { 2849 memset(hists, 0, sizeof(*hists)); 2850 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED; 2851 hists->entries_in = &hists->entries_in_array[0]; 2852 hists->entries_collapsed = RB_ROOT_CACHED; 2853 hists->entries = RB_ROOT_CACHED; 2854 mutex_init(&hists->lock); 2855 hists->socket_filter = -1; 2856 hists->hpp_list = hpp_list; 2857 INIT_LIST_HEAD(&hists->hpp_formats); 2858 return 0; 2859 } 2860 2861 static void hists__delete_remaining_entries(struct rb_root_cached *root) 2862 { 2863 struct rb_node *node; 2864 struct hist_entry *he; 2865 2866 while (!RB_EMPTY_ROOT(&root->rb_root)) { 2867 node = rb_first_cached(root); 2868 rb_erase_cached(node, root); 2869 2870 he = rb_entry(node, struct hist_entry, rb_node_in); 2871 hist_entry__delete(he); 2872 } 2873 } 2874 2875 static void hists__delete_all_entries(struct hists *hists) 2876 { 2877 hists__delete_entries(hists); 2878 hists__delete_remaining_entries(&hists->entries_in_array[0]); 2879 hists__delete_remaining_entries(&hists->entries_in_array[1]); 2880 hists__delete_remaining_entries(&hists->entries_collapsed); 2881 } 2882 2883 static void hists_evsel__exit(struct evsel *evsel) 2884 { 2885 struct hists *hists = evsel__hists(evsel); 2886 struct perf_hpp_fmt *fmt, *pos; 2887 struct perf_hpp_list_node *node, *tmp; 2888 2889 hists__delete_all_entries(hists); 2890 2891 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) { 2892 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) { 2893 list_del_init(&fmt->list); 2894 free(fmt); 2895 } 2896 list_del_init(&node->list); 2897 free(node); 2898 } 2899 } 2900 2901 static int hists_evsel__init(struct evsel *evsel) 2902 { 2903 struct hists *hists = evsel__hists(evsel); 2904 2905 __hists__init(hists, &perf_hpp_list); 2906 return 0; 2907 } 2908 2909 /* 2910 * XXX We probably need a hists_evsel__exit() to free the hist_entries 2911 * stored in the rbtree... 2912 */ 2913 2914 int hists__init(void) 2915 { 2916 int err = evsel__object_config(sizeof(struct hists_evsel), 2917 hists_evsel__init, hists_evsel__exit); 2918 if (err) 2919 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr); 2920 2921 return err; 2922 } 2923 2924 void perf_hpp_list__init(struct perf_hpp_list *list) 2925 { 2926 INIT_LIST_HEAD(&list->fields); 2927 INIT_LIST_HEAD(&list->sorts); 2928 } 2929
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.