1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <unistd.h> 4 #include <pthread.h> 5 #include <test_progs.h> 6 #include "uprobe_multi.skel.h" 7 #include "uprobe_multi_bench.skel.h" 8 #include "uprobe_multi_usdt.skel.h" 9 #include "bpf/libbpf_internal.h" 10 #include "testing_helpers.h" 11 #include "../sdt.h" 12 13 static char test_data[] = "test_data"; 14 15 noinline void uprobe_multi_func_1(void) 16 { 17 asm volatile (""); 18 } 19 20 noinline void uprobe_multi_func_2(void) 21 { 22 asm volatile (""); 23 } 24 25 noinline void uprobe_multi_func_3(void) 26 { 27 asm volatile (""); 28 } 29 30 noinline void usdt_trigger(void) 31 { 32 STAP_PROBE(test, pid_filter_usdt); 33 } 34 35 struct child { 36 int go[2]; 37 int c2p[2]; /* child -> parent channel */ 38 int pid; 39 int tid; 40 pthread_t thread; 41 }; 42 43 static void release_child(struct child *child) 44 { 45 int child_status; 46 47 if (!child) 48 return; 49 close(child->go[1]); 50 close(child->go[0]); 51 if (child->thread) 52 pthread_join(child->thread, NULL); 53 close(child->c2p[0]); 54 close(child->c2p[1]); 55 if (child->pid > 0) 56 waitpid(child->pid, &child_status, 0); 57 } 58 59 static void kick_child(struct child *child) 60 { 61 char c = 1; 62 63 if (child) { 64 write(child->go[1], &c, 1); 65 release_child(child); 66 } 67 fflush(NULL); 68 } 69 70 static struct child *spawn_child(void) 71 { 72 static struct child child; 73 int err; 74 int c; 75 76 /* pipe to notify child to execute the trigger functions */ 77 if (pipe(child.go)) 78 return NULL; 79 80 child.pid = child.tid = fork(); 81 if (child.pid < 0) { 82 release_child(&child); 83 errno = EINVAL; 84 return NULL; 85 } 86 87 /* child */ 88 if (child.pid == 0) { 89 close(child.go[1]); 90 91 /* wait for parent's kick */ 92 err = read(child.go[0], &c, 1); 93 if (err != 1) 94 exit(err); 95 96 uprobe_multi_func_1(); 97 uprobe_multi_func_2(); 98 uprobe_multi_func_3(); 99 usdt_trigger(); 100 101 exit(errno); 102 } 103 104 return &child; 105 } 106 107 static void *child_thread(void *ctx) 108 { 109 struct child *child = ctx; 110 int c = 0, err; 111 112 child->tid = syscall(SYS_gettid); 113 114 /* let parent know we are ready */ 115 err = write(child->c2p[1], &c, 1); 116 if (err != 1) 117 pthread_exit(&err); 118 119 /* wait for parent's kick */ 120 err = read(child->go[0], &c, 1); 121 if (err != 1) 122 pthread_exit(&err); 123 124 uprobe_multi_func_1(); 125 uprobe_multi_func_2(); 126 uprobe_multi_func_3(); 127 usdt_trigger(); 128 129 err = 0; 130 pthread_exit(&err); 131 } 132 133 static struct child *spawn_thread(void) 134 { 135 static struct child child; 136 int c, err; 137 138 /* pipe to notify child to execute the trigger functions */ 139 if (pipe(child.go)) 140 return NULL; 141 /* pipe to notify parent that child thread is ready */ 142 if (pipe(child.c2p)) { 143 close(child.go[0]); 144 close(child.go[1]); 145 return NULL; 146 } 147 148 child.pid = getpid(); 149 150 err = pthread_create(&child.thread, NULL, child_thread, &child); 151 if (err) { 152 err = -errno; 153 close(child.go[0]); 154 close(child.go[1]); 155 close(child.c2p[0]); 156 close(child.c2p[1]); 157 errno = -err; 158 return NULL; 159 } 160 161 err = read(child.c2p[0], &c, 1); 162 if (!ASSERT_EQ(err, 1, "child_thread_ready")) 163 return NULL; 164 165 return &child; 166 } 167 168 static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child) 169 { 170 skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1; 171 skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2; 172 skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3; 173 174 skel->bss->user_ptr = test_data; 175 176 /* 177 * Disable pid check in bpf program if we are pid filter test, 178 * because the probe should be executed only by child->pid 179 * passed at the probe attach. 180 */ 181 skel->bss->pid = child ? 0 : getpid(); 182 skel->bss->expect_pid = child ? child->pid : 0; 183 184 /* trigger all probes, if we are testing child *process*, just to make 185 * sure that PID filtering doesn't let through activations from wrong 186 * PIDs; when we test child *thread*, we don't want to do this to 187 * avoid double counting number of triggering events 188 */ 189 if (!child || !child->thread) { 190 uprobe_multi_func_1(); 191 uprobe_multi_func_2(); 192 uprobe_multi_func_3(); 193 usdt_trigger(); 194 } 195 196 if (child) 197 kick_child(child); 198 199 /* 200 * There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123] 201 * function and each slepable probe (6) increments uprobe_multi_sleep_result. 202 */ 203 ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 2, "uprobe_multi_func_1_result"); 204 ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 2, "uprobe_multi_func_2_result"); 205 ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 2, "uprobe_multi_func_3_result"); 206 207 ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 2, "uretprobe_multi_func_1_result"); 208 ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 2, "uretprobe_multi_func_2_result"); 209 ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 2, "uretprobe_multi_func_3_result"); 210 211 ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 6, "uprobe_multi_sleep_result"); 212 213 ASSERT_FALSE(skel->bss->bad_pid_seen, "bad_pid_seen"); 214 215 if (child) { 216 ASSERT_EQ(skel->bss->child_pid, child->pid, "uprobe_multi_child_pid"); 217 ASSERT_EQ(skel->bss->child_tid, child->tid, "uprobe_multi_child_tid"); 218 } 219 } 220 221 static void test_skel_api(void) 222 { 223 struct uprobe_multi *skel = NULL; 224 int err; 225 226 skel = uprobe_multi__open_and_load(); 227 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) 228 goto cleanup; 229 230 err = uprobe_multi__attach(skel); 231 if (!ASSERT_OK(err, "uprobe_multi__attach")) 232 goto cleanup; 233 234 uprobe_multi_test_run(skel, NULL); 235 236 cleanup: 237 uprobe_multi__destroy(skel); 238 } 239 240 static void 241 __test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts, 242 struct child *child) 243 { 244 pid_t pid = child ? child->pid : -1; 245 struct uprobe_multi *skel = NULL; 246 247 skel = uprobe_multi__open_and_load(); 248 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) 249 goto cleanup; 250 251 opts->retprobe = false; 252 skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, pid, 253 binary, pattern, opts); 254 if (!ASSERT_OK_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi")) 255 goto cleanup; 256 257 opts->retprobe = true; 258 skel->links.uretprobe = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, pid, 259 binary, pattern, opts); 260 if (!ASSERT_OK_PTR(skel->links.uretprobe, "bpf_program__attach_uprobe_multi")) 261 goto cleanup; 262 263 opts->retprobe = false; 264 skel->links.uprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uprobe_sleep, pid, 265 binary, pattern, opts); 266 if (!ASSERT_OK_PTR(skel->links.uprobe_sleep, "bpf_program__attach_uprobe_multi")) 267 goto cleanup; 268 269 opts->retprobe = true; 270 skel->links.uretprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uretprobe_sleep, 271 pid, binary, pattern, opts); 272 if (!ASSERT_OK_PTR(skel->links.uretprobe_sleep, "bpf_program__attach_uprobe_multi")) 273 goto cleanup; 274 275 opts->retprobe = false; 276 skel->links.uprobe_extra = bpf_program__attach_uprobe_multi(skel->progs.uprobe_extra, -1, 277 binary, pattern, opts); 278 if (!ASSERT_OK_PTR(skel->links.uprobe_extra, "bpf_program__attach_uprobe_multi")) 279 goto cleanup; 280 281 /* Attach (uprobe-backed) USDTs */ 282 skel->links.usdt_pid = bpf_program__attach_usdt(skel->progs.usdt_pid, pid, binary, 283 "test", "pid_filter_usdt", NULL); 284 if (!ASSERT_OK_PTR(skel->links.usdt_pid, "attach_usdt_pid")) 285 goto cleanup; 286 287 skel->links.usdt_extra = bpf_program__attach_usdt(skel->progs.usdt_extra, -1, binary, 288 "test", "pid_filter_usdt", NULL); 289 if (!ASSERT_OK_PTR(skel->links.usdt_extra, "attach_usdt_extra")) 290 goto cleanup; 291 292 uprobe_multi_test_run(skel, child); 293 294 ASSERT_FALSE(skel->bss->bad_pid_seen_usdt, "bad_pid_seen_usdt"); 295 if (child) { 296 ASSERT_EQ(skel->bss->child_pid_usdt, child->pid, "usdt_multi_child_pid"); 297 ASSERT_EQ(skel->bss->child_tid_usdt, child->tid, "usdt_multi_child_tid"); 298 } 299 cleanup: 300 uprobe_multi__destroy(skel); 301 } 302 303 static void 304 test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts) 305 { 306 struct child *child; 307 308 /* no pid filter */ 309 __test_attach_api(binary, pattern, opts, NULL); 310 311 /* pid filter */ 312 child = spawn_child(); 313 if (!ASSERT_OK_PTR(child, "spawn_child")) 314 return; 315 316 __test_attach_api(binary, pattern, opts, child); 317 318 /* pid filter (thread) */ 319 child = spawn_thread(); 320 if (!ASSERT_OK_PTR(child, "spawn_thread")) 321 return; 322 323 __test_attach_api(binary, pattern, opts, child); 324 } 325 326 static void test_attach_api_pattern(void) 327 { 328 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); 329 330 test_attach_api("/proc/self/exe", "uprobe_multi_func_*", &opts); 331 test_attach_api("/proc/self/exe", "uprobe_multi_func_?", &opts); 332 } 333 334 static void test_attach_api_syms(void) 335 { 336 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); 337 const char *syms[3] = { 338 "uprobe_multi_func_1", 339 "uprobe_multi_func_2", 340 "uprobe_multi_func_3", 341 }; 342 343 opts.syms = syms; 344 opts.cnt = ARRAY_SIZE(syms); 345 test_attach_api("/proc/self/exe", NULL, &opts); 346 } 347 348 static void test_attach_api_fails(void) 349 { 350 LIBBPF_OPTS(bpf_link_create_opts, opts); 351 const char *path = "/proc/self/exe"; 352 struct uprobe_multi *skel = NULL; 353 int prog_fd, link_fd = -1; 354 unsigned long offset = 0; 355 356 skel = uprobe_multi__open_and_load(); 357 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) 358 goto cleanup; 359 360 prog_fd = bpf_program__fd(skel->progs.uprobe_extra); 361 362 /* abnormal cnt */ 363 opts.uprobe_multi.path = path; 364 opts.uprobe_multi.offsets = &offset; 365 opts.uprobe_multi.cnt = INT_MAX; 366 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); 367 if (!ASSERT_ERR(link_fd, "link_fd")) 368 goto cleanup; 369 if (!ASSERT_EQ(link_fd, -E2BIG, "big cnt")) 370 goto cleanup; 371 372 /* cnt is 0 */ 373 LIBBPF_OPTS_RESET(opts, 374 .uprobe_multi.path = path, 375 .uprobe_multi.offsets = (unsigned long *) &offset, 376 ); 377 378 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); 379 if (!ASSERT_ERR(link_fd, "link_fd")) 380 goto cleanup; 381 if (!ASSERT_EQ(link_fd, -EINVAL, "cnt_is_zero")) 382 goto cleanup; 383 384 /* negative offset */ 385 offset = -1; 386 opts.uprobe_multi.path = path; 387 opts.uprobe_multi.offsets = (unsigned long *) &offset; 388 opts.uprobe_multi.cnt = 1; 389 390 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); 391 if (!ASSERT_ERR(link_fd, "link_fd")) 392 goto cleanup; 393 if (!ASSERT_EQ(link_fd, -EINVAL, "offset_is_negative")) 394 goto cleanup; 395 396 /* offsets is NULL */ 397 LIBBPF_OPTS_RESET(opts, 398 .uprobe_multi.path = path, 399 .uprobe_multi.cnt = 1, 400 ); 401 402 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); 403 if (!ASSERT_ERR(link_fd, "link_fd")) 404 goto cleanup; 405 if (!ASSERT_EQ(link_fd, -EINVAL, "offsets_is_null")) 406 goto cleanup; 407 408 /* wrong offsets pointer */ 409 LIBBPF_OPTS_RESET(opts, 410 .uprobe_multi.path = path, 411 .uprobe_multi.offsets = (unsigned long *) 1, 412 .uprobe_multi.cnt = 1, 413 ); 414 415 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); 416 if (!ASSERT_ERR(link_fd, "link_fd")) 417 goto cleanup; 418 if (!ASSERT_EQ(link_fd, -EFAULT, "offsets_is_wrong")) 419 goto cleanup; 420 421 /* path is NULL */ 422 offset = 1; 423 LIBBPF_OPTS_RESET(opts, 424 .uprobe_multi.offsets = (unsigned long *) &offset, 425 .uprobe_multi.cnt = 1, 426 ); 427 428 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); 429 if (!ASSERT_ERR(link_fd, "link_fd")) 430 goto cleanup; 431 if (!ASSERT_EQ(link_fd, -EINVAL, "path_is_null")) 432 goto cleanup; 433 434 /* wrong path pointer */ 435 LIBBPF_OPTS_RESET(opts, 436 .uprobe_multi.path = (const char *) 1, 437 .uprobe_multi.offsets = (unsigned long *) &offset, 438 .uprobe_multi.cnt = 1, 439 ); 440 441 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); 442 if (!ASSERT_ERR(link_fd, "link_fd")) 443 goto cleanup; 444 if (!ASSERT_EQ(link_fd, -EFAULT, "path_is_wrong")) 445 goto cleanup; 446 447 /* wrong path type */ 448 LIBBPF_OPTS_RESET(opts, 449 .uprobe_multi.path = "/", 450 .uprobe_multi.offsets = (unsigned long *) &offset, 451 .uprobe_multi.cnt = 1, 452 ); 453 454 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); 455 if (!ASSERT_ERR(link_fd, "link_fd")) 456 goto cleanup; 457 if (!ASSERT_EQ(link_fd, -EBADF, "path_is_wrong_type")) 458 goto cleanup; 459 460 /* wrong cookies pointer */ 461 LIBBPF_OPTS_RESET(opts, 462 .uprobe_multi.path = path, 463 .uprobe_multi.offsets = (unsigned long *) &offset, 464 .uprobe_multi.cookies = (__u64 *) 1ULL, 465 .uprobe_multi.cnt = 1, 466 ); 467 468 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); 469 if (!ASSERT_ERR(link_fd, "link_fd")) 470 goto cleanup; 471 if (!ASSERT_EQ(link_fd, -EFAULT, "cookies_is_wrong")) 472 goto cleanup; 473 474 /* wrong ref_ctr_offsets pointer */ 475 LIBBPF_OPTS_RESET(opts, 476 .uprobe_multi.path = path, 477 .uprobe_multi.offsets = (unsigned long *) &offset, 478 .uprobe_multi.cookies = (__u64 *) &offset, 479 .uprobe_multi.ref_ctr_offsets = (unsigned long *) 1, 480 .uprobe_multi.cnt = 1, 481 ); 482 483 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); 484 if (!ASSERT_ERR(link_fd, "link_fd")) 485 goto cleanup; 486 if (!ASSERT_EQ(link_fd, -EFAULT, "ref_ctr_offsets_is_wrong")) 487 goto cleanup; 488 489 /* wrong flags */ 490 LIBBPF_OPTS_RESET(opts, 491 .uprobe_multi.flags = 1 << 31, 492 ); 493 494 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); 495 if (!ASSERT_ERR(link_fd, "link_fd")) 496 goto cleanup; 497 if (!ASSERT_EQ(link_fd, -EINVAL, "wrong_flags")) 498 goto cleanup; 499 500 /* wrong pid */ 501 LIBBPF_OPTS_RESET(opts, 502 .uprobe_multi.path = path, 503 .uprobe_multi.offsets = (unsigned long *) &offset, 504 .uprobe_multi.cnt = 1, 505 .uprobe_multi.pid = -2, 506 ); 507 508 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); 509 if (!ASSERT_ERR(link_fd, "link_fd")) 510 goto cleanup; 511 ASSERT_EQ(link_fd, -EINVAL, "pid_is_wrong"); 512 513 cleanup: 514 if (link_fd >= 0) 515 close(link_fd); 516 uprobe_multi__destroy(skel); 517 } 518 519 static void __test_link_api(struct child *child) 520 { 521 int prog_fd, link1_fd = -1, link2_fd = -1, link3_fd = -1, link4_fd = -1; 522 LIBBPF_OPTS(bpf_link_create_opts, opts); 523 const char *path = "/proc/self/exe"; 524 struct uprobe_multi *skel = NULL; 525 unsigned long *offsets = NULL; 526 const char *syms[3] = { 527 "uprobe_multi_func_1", 528 "uprobe_multi_func_2", 529 "uprobe_multi_func_3", 530 }; 531 int link_extra_fd = -1; 532 int err; 533 534 err = elf_resolve_syms_offsets(path, 3, syms, (unsigned long **) &offsets, STT_FUNC); 535 if (!ASSERT_OK(err, "elf_resolve_syms_offsets")) 536 return; 537 538 opts.uprobe_multi.path = path; 539 opts.uprobe_multi.offsets = offsets; 540 opts.uprobe_multi.cnt = ARRAY_SIZE(syms); 541 opts.uprobe_multi.pid = child ? child->pid : 0; 542 543 skel = uprobe_multi__open_and_load(); 544 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) 545 goto cleanup; 546 547 opts.kprobe_multi.flags = 0; 548 prog_fd = bpf_program__fd(skel->progs.uprobe); 549 link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); 550 if (!ASSERT_GE(link1_fd, 0, "link1_fd")) 551 goto cleanup; 552 553 opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN; 554 prog_fd = bpf_program__fd(skel->progs.uretprobe); 555 link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); 556 if (!ASSERT_GE(link2_fd, 0, "link2_fd")) 557 goto cleanup; 558 559 opts.kprobe_multi.flags = 0; 560 prog_fd = bpf_program__fd(skel->progs.uprobe_sleep); 561 link3_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); 562 if (!ASSERT_GE(link3_fd, 0, "link3_fd")) 563 goto cleanup; 564 565 opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN; 566 prog_fd = bpf_program__fd(skel->progs.uretprobe_sleep); 567 link4_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); 568 if (!ASSERT_GE(link4_fd, 0, "link4_fd")) 569 goto cleanup; 570 571 opts.kprobe_multi.flags = 0; 572 opts.uprobe_multi.pid = 0; 573 prog_fd = bpf_program__fd(skel->progs.uprobe_extra); 574 link_extra_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); 575 if (!ASSERT_GE(link_extra_fd, 0, "link_extra_fd")) 576 goto cleanup; 577 578 uprobe_multi_test_run(skel, child); 579 580 cleanup: 581 if (link1_fd >= 0) 582 close(link1_fd); 583 if (link2_fd >= 0) 584 close(link2_fd); 585 if (link3_fd >= 0) 586 close(link3_fd); 587 if (link4_fd >= 0) 588 close(link4_fd); 589 if (link_extra_fd >= 0) 590 close(link_extra_fd); 591 592 uprobe_multi__destroy(skel); 593 free(offsets); 594 } 595 596 static void test_link_api(void) 597 { 598 struct child *child; 599 600 /* no pid filter */ 601 __test_link_api(NULL); 602 603 /* pid filter */ 604 child = spawn_child(); 605 if (!ASSERT_OK_PTR(child, "spawn_child")) 606 return; 607 608 __test_link_api(child); 609 610 /* pid filter (thread) */ 611 child = spawn_thread(); 612 if (!ASSERT_OK_PTR(child, "spawn_thread")) 613 return; 614 615 __test_link_api(child); 616 } 617 618 static void test_bench_attach_uprobe(void) 619 { 620 long attach_start_ns = 0, attach_end_ns = 0; 621 struct uprobe_multi_bench *skel = NULL; 622 long detach_start_ns, detach_end_ns; 623 double attach_delta, detach_delta; 624 int err; 625 626 skel = uprobe_multi_bench__open_and_load(); 627 if (!ASSERT_OK_PTR(skel, "uprobe_multi_bench__open_and_load")) 628 goto cleanup; 629 630 attach_start_ns = get_time_ns(); 631 632 err = uprobe_multi_bench__attach(skel); 633 if (!ASSERT_OK(err, "uprobe_multi_bench__attach")) 634 goto cleanup; 635 636 attach_end_ns = get_time_ns(); 637 638 system("./uprobe_multi bench"); 639 640 ASSERT_EQ(skel->bss->count, 50000, "uprobes_count"); 641 642 cleanup: 643 detach_start_ns = get_time_ns(); 644 uprobe_multi_bench__destroy(skel); 645 detach_end_ns = get_time_ns(); 646 647 attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0; 648 detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0; 649 650 printf("%s: attached in %7.3lfs\n", __func__, attach_delta); 651 printf("%s: detached in %7.3lfs\n", __func__, detach_delta); 652 } 653 654 static void test_bench_attach_usdt(void) 655 { 656 long attach_start_ns = 0, attach_end_ns = 0; 657 struct uprobe_multi_usdt *skel = NULL; 658 long detach_start_ns, detach_end_ns; 659 double attach_delta, detach_delta; 660 661 skel = uprobe_multi_usdt__open_and_load(); 662 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open")) 663 goto cleanup; 664 665 attach_start_ns = get_time_ns(); 666 667 skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0, -1, "./uprobe_multi", 668 "test", "usdt", NULL); 669 if (!ASSERT_OK_PTR(skel->links.usdt0, "bpf_program__attach_usdt")) 670 goto cleanup; 671 672 attach_end_ns = get_time_ns(); 673 674 system("./uprobe_multi usdt"); 675 676 ASSERT_EQ(skel->bss->count, 50000, "usdt_count"); 677 678 cleanup: 679 detach_start_ns = get_time_ns(); 680 uprobe_multi_usdt__destroy(skel); 681 detach_end_ns = get_time_ns(); 682 683 attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0; 684 detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0; 685 686 printf("%s: attached in %7.3lfs\n", __func__, attach_delta); 687 printf("%s: detached in %7.3lfs\n", __func__, detach_delta); 688 } 689 690 void test_uprobe_multi_test(void) 691 { 692 if (test__start_subtest("skel_api")) 693 test_skel_api(); 694 if (test__start_subtest("attach_api_pattern")) 695 test_attach_api_pattern(); 696 if (test__start_subtest("attach_api_syms")) 697 test_attach_api_syms(); 698 if (test__start_subtest("link_api")) 699 test_link_api(); 700 if (test__start_subtest("bench_uprobe")) 701 test_bench_attach_uprobe(); 702 if (test__start_subtest("bench_usdt")) 703 test_bench_attach_usdt(); 704 if (test__start_subtest("attach_api_fails")) 705 test_attach_api_fails(); 706 } 707
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.