1 // SPDX-License-Identifier: GPL-2.0 2 #include <inttypes.h> 3 #include <limits.h> 4 #include <stdio.h> 5 #include <stdlib.h> 6 #include <string.h> 7 #include <linux/string.h> 8 #include <linux/zalloc.h> 9 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 10 #include "debug.h" 11 #include "dso.h" 12 #include "map.h" 13 #include "namespaces.h" 14 #include "srcline.h" 15 #include "symbol.h" 16 #include "thread.h" 17 #include "vdso.h" 18 19 static inline int is_android_lib(const char *filename) 20 { 21 return strstarts(filename, "/data/app-lib/") || 22 strstarts(filename, "/system/lib/"); 23 } 24 25 static inline bool replace_android_lib(const char *filename, char *newfilename) 26 { 27 const char *libname; 28 char *app_abi; 29 size_t app_abi_length, new_length; 30 size_t lib_length = 0; 31 32 libname = strrchr(filename, '/'); 33 if (libname) 34 lib_length = strlen(libname); 35 36 app_abi = getenv("APP_ABI"); 37 if (!app_abi) 38 return false; 39 40 app_abi_length = strlen(app_abi); 41 42 if (strstarts(filename, "/data/app-lib/")) { 43 char *apk_path; 44 45 if (!app_abi_length) 46 return false; 47 48 new_length = 7 + app_abi_length + lib_length; 49 50 apk_path = getenv("APK_PATH"); 51 if (apk_path) { 52 new_length += strlen(apk_path) + 1; 53 if (new_length > PATH_MAX) 54 return false; 55 snprintf(newfilename, new_length, 56 "%s/libs/%s/%s", apk_path, app_abi, libname); 57 } else { 58 if (new_length > PATH_MAX) 59 return false; 60 snprintf(newfilename, new_length, 61 "libs/%s/%s", app_abi, libname); 62 } 63 return true; 64 } 65 66 if (strstarts(filename, "/system/lib/")) { 67 char *ndk, *app; 68 const char *arch; 69 int ndk_length, app_length; 70 71 ndk = getenv("NDK_ROOT"); 72 app = getenv("APP_PLATFORM"); 73 74 if (!(ndk && app)) 75 return false; 76 77 ndk_length = strlen(ndk); 78 app_length = strlen(app); 79 80 if (!(ndk_length && app_length && app_abi_length)) 81 return false; 82 83 arch = !strncmp(app_abi, "arm", 3) ? "arm" : 84 !strncmp(app_abi, "mips", 4) ? "mips" : 85 !strncmp(app_abi, "x86", 3) ? "x86" : NULL; 86 87 if (!arch) 88 return false; 89 90 new_length = 27 + ndk_length + 91 app_length + lib_length 92 + strlen(arch); 93 94 if (new_length > PATH_MAX) 95 return false; 96 snprintf(newfilename, new_length, 97 "%.*s/platforms/%.*s/arch-%s/usr/lib/%s", 98 ndk_length, ndk, app_length, app, arch, libname); 99 100 return true; 101 } 102 return false; 103 } 104 105 void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso) 106 { 107 map__set_start(map, start); 108 map__set_end(map, end); 109 map__set_pgoff(map, pgoff); 110 map__set_reloc(map, 0); 111 map__set_dso(map, dso__get(dso)); 112 map__set_mapping_type(map, MAPPING_TYPE__DSO); 113 map__set_erange_warned(map, false); 114 refcount_set(map__refcnt(map), 1); 115 } 116 117 struct map *map__new(struct machine *machine, u64 start, u64 len, 118 u64 pgoff, struct dso_id *id, 119 u32 prot, u32 flags, struct build_id *bid, 120 char *filename, struct thread *thread) 121 { 122 struct map *result; 123 RC_STRUCT(map) *map; 124 struct nsinfo *nsi = NULL; 125 struct nsinfo *nnsi; 126 127 map = malloc(sizeof(*map)); 128 if (ADD_RC_CHK(result, map)) { 129 char newfilename[PATH_MAX]; 130 struct dso *dso, *header_bid_dso; 131 int anon, no_dso, vdso, android; 132 133 android = is_android_lib(filename); 134 anon = is_anon_memory(filename) || flags & MAP_HUGETLB; 135 vdso = is_vdso_map(filename); 136 no_dso = is_no_dso_memory(filename); 137 map->prot = prot; 138 map->flags = flags; 139 nsi = nsinfo__get(thread__nsinfo(thread)); 140 141 if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) { 142 snprintf(newfilename, sizeof(newfilename), 143 "/tmp/perf-%d.map", nsinfo__pid(nsi)); 144 filename = newfilename; 145 } 146 147 if (android) { 148 if (replace_android_lib(filename, newfilename)) 149 filename = newfilename; 150 } 151 152 if (vdso) { 153 /* The vdso maps are always on the host and not the 154 * container. Ensure that we don't use setns to look 155 * them up. 156 */ 157 nnsi = nsinfo__copy(nsi); 158 if (nnsi) { 159 nsinfo__put(nsi); 160 nsinfo__clear_need_setns(nnsi); 161 nsi = nnsi; 162 } 163 pgoff = 0; 164 dso = machine__findnew_vdso(machine, thread); 165 } else 166 dso = machine__findnew_dso_id(machine, filename, id); 167 168 if (dso == NULL) 169 goto out_delete; 170 171 assert(!dso__kernel(dso)); 172 map__init(result, start, start + len, pgoff, dso); 173 174 if (anon || no_dso) { 175 map->mapping_type = MAPPING_TYPE__IDENTITY; 176 177 /* 178 * Set memory without DSO as loaded. All map__find_* 179 * functions still return NULL, and we avoid the 180 * unnecessary map__load warning. 181 */ 182 if (!(prot & PROT_EXEC)) 183 dso__set_loaded(dso); 184 } 185 mutex_lock(dso__lock(dso)); 186 dso__set_nsinfo(dso, nsi); 187 mutex_unlock(dso__lock(dso)); 188 189 if (build_id__is_defined(bid)) { 190 dso__set_build_id(dso, bid); 191 } else { 192 /* 193 * If the mmap event had no build ID, search for an existing dso from the 194 * build ID header by name. Otherwise only the dso loaded at the time of 195 * reading the header will have the build ID set and all future mmaps will 196 * have it missing. 197 */ 198 header_bid_dso = dsos__find(&machine->dsos, filename, false); 199 if (header_bid_dso && dso__header_build_id(header_bid_dso)) { 200 dso__set_build_id(dso, dso__bid(header_bid_dso)); 201 dso__set_header_build_id(dso, 1); 202 } 203 dso__put(header_bid_dso); 204 } 205 dso__put(dso); 206 } 207 return result; 208 out_delete: 209 nsinfo__put(nsi); 210 RC_CHK_FREE(result); 211 return NULL; 212 } 213 214 /* 215 * Constructor variant for modules (where we know from /proc/modules where 216 * they are loaded) and for vmlinux, where only after we load all the 217 * symbols we'll know where it starts and ends. 218 */ 219 struct map *map__new2(u64 start, struct dso *dso) 220 { 221 struct map *result; 222 RC_STRUCT(map) *map; 223 224 map = calloc(1, sizeof(*map) + (dso__kernel(dso) ? sizeof(struct kmap) : 0)); 225 if (ADD_RC_CHK(result, map)) { 226 /* 227 * ->end will be filled after we load all the symbols 228 */ 229 map__init(result, start, 0, 0, dso); 230 } 231 232 return result; 233 } 234 235 bool __map__is_kernel(const struct map *map) 236 { 237 if (!dso__kernel(map__dso(map))) 238 return false; 239 return machine__kernel_map(maps__machine(map__kmaps((struct map *)map))) == map; 240 } 241 242 bool __map__is_extra_kernel_map(const struct map *map) 243 { 244 struct kmap *kmap = __map__kmap((struct map *)map); 245 246 return kmap && kmap->name[0]; 247 } 248 249 bool __map__is_bpf_prog(const struct map *map) 250 { 251 const char *name; 252 struct dso *dso = map__dso(map); 253 254 if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO) 255 return true; 256 257 /* 258 * If PERF_RECORD_BPF_EVENT is not included, the dso will not have 259 * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can 260 * guess the type based on name. 261 */ 262 name = dso__short_name(dso); 263 return name && (strstr(name, "bpf_prog_") == name); 264 } 265 266 bool __map__is_bpf_image(const struct map *map) 267 { 268 const char *name; 269 struct dso *dso = map__dso(map); 270 271 if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_IMAGE) 272 return true; 273 274 /* 275 * If PERF_RECORD_KSYMBOL is not included, the dso will not have 276 * type of DSO_BINARY_TYPE__BPF_IMAGE. In such cases, we can 277 * guess the type based on name. 278 */ 279 name = dso__short_name(dso); 280 return name && is_bpf_image(name); 281 } 282 283 bool __map__is_ool(const struct map *map) 284 { 285 const struct dso *dso = map__dso(map); 286 287 return dso && dso__binary_type(dso) == DSO_BINARY_TYPE__OOL; 288 } 289 290 bool map__has_symbols(const struct map *map) 291 { 292 return dso__has_symbols(map__dso(map)); 293 } 294 295 static void map__exit(struct map *map) 296 { 297 BUG_ON(refcount_read(map__refcnt(map)) != 0); 298 dso__zput(RC_CHK_ACCESS(map)->dso); 299 } 300 301 void map__delete(struct map *map) 302 { 303 map__exit(map); 304 RC_CHK_FREE(map); 305 } 306 307 void map__put(struct map *map) 308 { 309 if (map && refcount_dec_and_test(map__refcnt(map))) 310 map__delete(map); 311 else 312 RC_CHK_PUT(map); 313 } 314 315 void map__fixup_start(struct map *map) 316 { 317 struct dso *dso = map__dso(map); 318 struct rb_root_cached *symbols = dso__symbols(dso); 319 struct rb_node *nd = rb_first_cached(symbols); 320 321 if (nd != NULL) { 322 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 323 324 map__set_start(map, sym->start); 325 } 326 } 327 328 void map__fixup_end(struct map *map) 329 { 330 struct dso *dso = map__dso(map); 331 struct rb_root_cached *symbols = dso__symbols(dso); 332 struct rb_node *nd = rb_last(&symbols->rb_root); 333 334 if (nd != NULL) { 335 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 336 map__set_end(map, sym->end); 337 } 338 } 339 340 #define DSO__DELETED "(deleted)" 341 342 int map__load(struct map *map) 343 { 344 struct dso *dso = map__dso(map); 345 const char *name = dso__long_name(dso); 346 int nr; 347 348 if (dso__loaded(dso)) 349 return 0; 350 351 nr = dso__load(dso, map); 352 if (nr < 0) { 353 if (dso__has_build_id(dso)) { 354 char sbuild_id[SBUILD_ID_SIZE]; 355 356 build_id__sprintf(dso__bid(dso), sbuild_id); 357 pr_debug("%s with build id %s not found", name, sbuild_id); 358 } else 359 pr_debug("Failed to open %s", name); 360 361 pr_debug(", continuing without symbols\n"); 362 return -1; 363 } else if (nr == 0) { 364 #ifdef HAVE_LIBELF_SUPPORT 365 const size_t len = strlen(name); 366 const size_t real_len = len - sizeof(DSO__DELETED); 367 368 if (len > sizeof(DSO__DELETED) && 369 strcmp(name + real_len + 1, DSO__DELETED) == 0) { 370 pr_debug("%.*s was updated (is prelink enabled?). " 371 "Restart the long running apps that use it!\n", 372 (int)real_len, name); 373 } else { 374 pr_debug("no symbols found in %s, maybe install a debug package?\n", name); 375 } 376 #endif 377 return -1; 378 } 379 380 return 0; 381 } 382 383 struct symbol *map__find_symbol(struct map *map, u64 addr) 384 { 385 if (map__load(map) < 0) 386 return NULL; 387 388 return dso__find_symbol(map__dso(map), addr); 389 } 390 391 struct symbol *map__find_symbol_by_name_idx(struct map *map, const char *name, size_t *idx) 392 { 393 struct dso *dso; 394 395 if (map__load(map) < 0) 396 return NULL; 397 398 dso = map__dso(map); 399 dso__sort_by_name(dso); 400 401 return dso__find_symbol_by_name(dso, name, idx); 402 } 403 404 struct symbol *map__find_symbol_by_name(struct map *map, const char *name) 405 { 406 size_t idx; 407 408 return map__find_symbol_by_name_idx(map, name, &idx); 409 } 410 411 struct map *map__clone(struct map *from) 412 { 413 struct map *result; 414 RC_STRUCT(map) *map; 415 size_t size = sizeof(RC_STRUCT(map)); 416 struct dso *dso = map__dso(from); 417 418 if (dso && dso__kernel(dso)) 419 size += sizeof(struct kmap); 420 421 map = memdup(RC_CHK_ACCESS(from), size); 422 if (ADD_RC_CHK(result, map)) { 423 refcount_set(&map->refcnt, 1); 424 map->dso = dso__get(dso); 425 } 426 427 return result; 428 } 429 430 size_t map__fprintf(struct map *map, FILE *fp) 431 { 432 const struct dso *dso = map__dso(map); 433 434 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", 435 map__start(map), map__end(map), map__pgoff(map), dso__name(dso)); 436 } 437 438 static bool prefer_dso_long_name(const struct dso *dso, bool print_off) 439 { 440 return dso__long_name(dso) && 441 (symbol_conf.show_kernel_path || 442 (print_off && (dso__name(dso)[0] == '[' || dso__is_kcore(dso)))); 443 } 444 445 static size_t __map__fprintf_dsoname(struct map *map, bool print_off, FILE *fp) 446 { 447 char buf[symbol_conf.pad_output_len_dso + 1]; 448 const char *dsoname = "[unknown]"; 449 const struct dso *dso = map ? map__dso(map) : NULL; 450 451 if (dso) { 452 if (prefer_dso_long_name(dso, print_off)) 453 dsoname = dso__long_name(dso); 454 else 455 dsoname = dso__name(dso); 456 } 457 458 if (symbol_conf.pad_output_len_dso) { 459 scnprintf_pad(buf, symbol_conf.pad_output_len_dso, "%s", dsoname); 460 dsoname = buf; 461 } 462 463 return fprintf(fp, "%s", dsoname); 464 } 465 466 size_t map__fprintf_dsoname(struct map *map, FILE *fp) 467 { 468 return __map__fprintf_dsoname(map, false, fp); 469 } 470 471 size_t map__fprintf_dsoname_dsoff(struct map *map, bool print_off, u64 addr, FILE *fp) 472 { 473 const struct dso *dso = map ? map__dso(map) : NULL; 474 int printed = 0; 475 476 if (print_off && (!dso || !dso__is_object_file(dso))) 477 print_off = false; 478 printed += fprintf(fp, " ("); 479 printed += __map__fprintf_dsoname(map, print_off, fp); 480 if (print_off) 481 printed += fprintf(fp, "+0x%" PRIx64, addr); 482 printed += fprintf(fp, ")"); 483 484 return printed; 485 } 486 487 char *map__srcline(struct map *map, u64 addr, struct symbol *sym) 488 { 489 if (map == NULL) 490 return SRCLINE_UNKNOWN; 491 492 return get_srcline(map__dso(map), map__rip_2objdump(map, addr), sym, true, true, addr); 493 } 494 495 int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, 496 FILE *fp) 497 { 498 const struct dso *dso = map ? map__dso(map) : NULL; 499 int ret = 0; 500 501 if (dso) { 502 char *srcline = map__srcline(map, addr, NULL); 503 if (srcline != SRCLINE_UNKNOWN) 504 ret = fprintf(fp, "%s%s", prefix, srcline); 505 zfree_srcline(&srcline); 506 } 507 return ret; 508 } 509 510 void srccode_state_free(struct srccode_state *state) 511 { 512 zfree(&state->srcfile); 513 state->line = 0; 514 } 515 516 /** 517 * map__rip_2objdump - convert symbol start address to objdump address. 518 * @map: memory map 519 * @rip: symbol start address 520 * 521 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. 522 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is 523 * relative to section start. 524 * 525 * Return: Address suitable for passing to "objdump --start-address=" 526 */ 527 u64 map__rip_2objdump(struct map *map, u64 rip) 528 { 529 struct kmap *kmap = __map__kmap(map); 530 const struct dso *dso = map__dso(map); 531 532 /* 533 * vmlinux does not have program headers for PTI entry trampolines and 534 * kcore may not either. However the trampoline object code is on the 535 * main kernel map, so just use that instead. 536 */ 537 if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps) { 538 struct machine *machine = maps__machine(kmap->kmaps); 539 540 if (machine) { 541 struct map *kernel_map = machine__kernel_map(machine); 542 543 if (kernel_map) 544 map = kernel_map; 545 } 546 } 547 548 if (!dso__adjust_symbols(dso)) 549 return rip; 550 551 if (dso__rel(dso)) 552 return rip - map__pgoff(map); 553 554 if (dso__kernel(dso) == DSO_SPACE__USER) 555 return rip + dso__text_offset(dso); 556 557 return map__unmap_ip(map, rip) - map__reloc(map); 558 } 559 560 /** 561 * map__objdump_2mem - convert objdump address to a memory address. 562 * @map: memory map 563 * @ip: objdump address 564 * 565 * Closely related to map__rip_2objdump(), this function takes an address from 566 * objdump and converts it to a memory address. Note this assumes that @map 567 * contains the address. To be sure the result is valid, check it forwards 568 * e.g. map__rip_2objdump(map__map_ip(map, map__objdump_2mem(map, ip))) == ip 569 * 570 * Return: Memory address. 571 */ 572 u64 map__objdump_2mem(struct map *map, u64 ip) 573 { 574 const struct dso *dso = map__dso(map); 575 576 if (!dso__adjust_symbols(dso)) 577 return map__unmap_ip(map, ip); 578 579 if (dso__rel(dso)) 580 return map__unmap_ip(map, ip + map__pgoff(map)); 581 582 if (dso__kernel(dso) == DSO_SPACE__USER) 583 return map__unmap_ip(map, ip - dso__text_offset(dso)); 584 585 return ip + map__reloc(map); 586 } 587 588 /* convert objdump address to relative address. (To be removed) */ 589 u64 map__objdump_2rip(struct map *map, u64 ip) 590 { 591 const struct dso *dso = map__dso(map); 592 593 if (!dso__adjust_symbols(dso)) 594 return ip; 595 596 if (dso__rel(dso)) 597 return ip + map__pgoff(map); 598 599 if (dso__kernel(dso) == DSO_SPACE__USER) 600 return ip - dso__text_offset(dso); 601 602 return map__map_ip(map, ip + map__reloc(map)); 603 } 604 605 bool map__contains_symbol(const struct map *map, const struct symbol *sym) 606 { 607 u64 ip = map__unmap_ip(map, sym->start); 608 609 return ip >= map__start(map) && ip < map__end(map); 610 } 611 612 struct kmap *__map__kmap(struct map *map) 613 { 614 const struct dso *dso = map__dso(map); 615 616 if (!dso || !dso__kernel(dso)) 617 return NULL; 618 return (struct kmap *)(&RC_CHK_ACCESS(map)[1]); 619 } 620 621 struct kmap *map__kmap(struct map *map) 622 { 623 struct kmap *kmap = __map__kmap(map); 624 625 if (!kmap) 626 pr_err("Internal error: map__kmap with a non-kernel map\n"); 627 return kmap; 628 } 629 630 struct maps *map__kmaps(struct map *map) 631 { 632 struct kmap *kmap = map__kmap(map); 633 634 if (!kmap || !kmap->kmaps) { 635 pr_err("Internal error: map__kmaps with a non-kernel map\n"); 636 return NULL; 637 } 638 return kmap->kmaps; 639 } 640
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.