1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */ 3 4 #include <string.h> 5 #include <linux/bpf.h> 6 #include <linux/limits.h> 7 #include <test_progs.h> 8 #include "trace_helpers.h" 9 #include "test_fill_link_info.skel.h" 10 #include "bpf/libbpf_internal.h" 11 12 #define TP_CAT "sched" 13 #define TP_NAME "sched_switch" 14 15 static const char *kmulti_syms[] = { 16 "bpf_fentry_test2", 17 "bpf_fentry_test1", 18 "bpf_fentry_test3", 19 }; 20 #define KMULTI_CNT ARRAY_SIZE(kmulti_syms) 21 static __u64 kmulti_addrs[KMULTI_CNT]; 22 static __u64 kmulti_cookies[] = { 3, 1, 2 }; 23 24 #define KPROBE_FUNC "bpf_fentry_test1" 25 static __u64 kprobe_addr; 26 27 #define UPROBE_FILE "/proc/self/exe" 28 static ssize_t uprobe_offset; 29 /* uprobe attach point */ 30 static noinline void uprobe_func(void) 31 { 32 asm volatile (""); 33 } 34 35 #define PERF_EVENT_COOKIE 0xdeadbeef 36 37 static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long addr, 38 ssize_t offset, ssize_t entry_offset) 39 { 40 struct bpf_link_info info; 41 __u32 len = sizeof(info); 42 char buf[PATH_MAX]; 43 int err; 44 45 memset(&info, 0, sizeof(info)); 46 buf[0] = '\0'; 47 48 again: 49 err = bpf_link_get_info_by_fd(fd, &info, &len); 50 if (!ASSERT_OK(err, "get_link_info")) 51 return -1; 52 53 if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_PERF_EVENT, "link_type")) 54 return -1; 55 if (!ASSERT_EQ(info.perf_event.type, type, "perf_type_match")) 56 return -1; 57 58 switch (info.perf_event.type) { 59 case BPF_PERF_EVENT_KPROBE: 60 case BPF_PERF_EVENT_KRETPROBE: 61 ASSERT_EQ(info.perf_event.kprobe.offset, offset, "kprobe_offset"); 62 63 /* In case kernel.kptr_restrict is not permitted or MAX_SYMS is reached */ 64 if (addr) 65 ASSERT_EQ(info.perf_event.kprobe.addr, addr + entry_offset, 66 "kprobe_addr"); 67 68 ASSERT_EQ(info.perf_event.kprobe.cookie, PERF_EVENT_COOKIE, "kprobe_cookie"); 69 70 if (!info.perf_event.kprobe.func_name) { 71 ASSERT_EQ(info.perf_event.kprobe.name_len, 0, "name_len"); 72 info.perf_event.kprobe.func_name = ptr_to_u64(&buf); 73 info.perf_event.kprobe.name_len = sizeof(buf); 74 goto again; 75 } 76 77 err = strncmp(u64_to_ptr(info.perf_event.kprobe.func_name), KPROBE_FUNC, 78 strlen(KPROBE_FUNC)); 79 ASSERT_EQ(err, 0, "cmp_kprobe_func_name"); 80 break; 81 case BPF_PERF_EVENT_TRACEPOINT: 82 if (!info.perf_event.tracepoint.tp_name) { 83 ASSERT_EQ(info.perf_event.tracepoint.name_len, 0, "name_len"); 84 info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf); 85 info.perf_event.tracepoint.name_len = sizeof(buf); 86 goto again; 87 } 88 89 ASSERT_EQ(info.perf_event.tracepoint.cookie, PERF_EVENT_COOKIE, "tracepoint_cookie"); 90 91 err = strncmp(u64_to_ptr(info.perf_event.tracepoint.tp_name), TP_NAME, 92 strlen(TP_NAME)); 93 ASSERT_EQ(err, 0, "cmp_tp_name"); 94 break; 95 case BPF_PERF_EVENT_UPROBE: 96 case BPF_PERF_EVENT_URETPROBE: 97 ASSERT_EQ(info.perf_event.uprobe.offset, offset, "uprobe_offset"); 98 99 if (!info.perf_event.uprobe.file_name) { 100 ASSERT_EQ(info.perf_event.uprobe.name_len, 0, "name_len"); 101 info.perf_event.uprobe.file_name = ptr_to_u64(&buf); 102 info.perf_event.uprobe.name_len = sizeof(buf); 103 goto again; 104 } 105 106 ASSERT_EQ(info.perf_event.uprobe.cookie, PERF_EVENT_COOKIE, "uprobe_cookie"); 107 108 err = strncmp(u64_to_ptr(info.perf_event.uprobe.file_name), UPROBE_FILE, 109 strlen(UPROBE_FILE)); 110 ASSERT_EQ(err, 0, "cmp_file_name"); 111 break; 112 case BPF_PERF_EVENT_EVENT: 113 ASSERT_EQ(info.perf_event.event.type, PERF_TYPE_SOFTWARE, "event_type"); 114 ASSERT_EQ(info.perf_event.event.config, PERF_COUNT_SW_PAGE_FAULTS, "event_config"); 115 ASSERT_EQ(info.perf_event.event.cookie, PERF_EVENT_COOKIE, "event_cookie"); 116 break; 117 default: 118 err = -1; 119 break; 120 } 121 return err; 122 } 123 124 static void kprobe_fill_invalid_user_buffer(int fd) 125 { 126 struct bpf_link_info info; 127 __u32 len = sizeof(info); 128 int err; 129 130 memset(&info, 0, sizeof(info)); 131 132 info.perf_event.kprobe.func_name = 0x1; /* invalid address */ 133 err = bpf_link_get_info_by_fd(fd, &info, &len); 134 ASSERT_EQ(err, -EINVAL, "invalid_buff_and_len"); 135 136 info.perf_event.kprobe.name_len = 64; 137 err = bpf_link_get_info_by_fd(fd, &info, &len); 138 ASSERT_EQ(err, -EFAULT, "invalid_buff"); 139 140 info.perf_event.kprobe.func_name = 0; 141 err = bpf_link_get_info_by_fd(fd, &info, &len); 142 ASSERT_EQ(err, -EINVAL, "invalid_len"); 143 144 ASSERT_EQ(info.perf_event.kprobe.addr, 0, "func_addr"); 145 ASSERT_EQ(info.perf_event.kprobe.offset, 0, "func_offset"); 146 ASSERT_EQ(info.perf_event.type, 0, "type"); 147 } 148 149 static void test_kprobe_fill_link_info(struct test_fill_link_info *skel, 150 enum bpf_perf_event_type type, 151 bool invalid) 152 { 153 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts, 154 .attach_mode = PROBE_ATTACH_MODE_LINK, 155 .retprobe = type == BPF_PERF_EVENT_KRETPROBE, 156 .bpf_cookie = PERF_EVENT_COOKIE, 157 ); 158 ssize_t entry_offset = 0; 159 struct bpf_link *link; 160 int link_fd, err; 161 162 link = bpf_program__attach_kprobe_opts(skel->progs.kprobe_run, KPROBE_FUNC, &opts); 163 if (!ASSERT_OK_PTR(link, "attach_kprobe")) 164 return; 165 166 link_fd = bpf_link__fd(link); 167 if (!invalid) { 168 /* See also arch_adjust_kprobe_addr(). */ 169 if (skel->kconfig->CONFIG_X86_KERNEL_IBT) 170 entry_offset = 4; 171 err = verify_perf_link_info(link_fd, type, kprobe_addr, 0, entry_offset); 172 ASSERT_OK(err, "verify_perf_link_info"); 173 } else { 174 kprobe_fill_invalid_user_buffer(link_fd); 175 } 176 bpf_link__destroy(link); 177 } 178 179 static void test_tp_fill_link_info(struct test_fill_link_info *skel) 180 { 181 DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts, 182 .bpf_cookie = PERF_EVENT_COOKIE, 183 ); 184 struct bpf_link *link; 185 int link_fd, err; 186 187 link = bpf_program__attach_tracepoint_opts(skel->progs.tp_run, TP_CAT, TP_NAME, &opts); 188 if (!ASSERT_OK_PTR(link, "attach_tp")) 189 return; 190 191 link_fd = bpf_link__fd(link); 192 err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_TRACEPOINT, 0, 0, 0); 193 ASSERT_OK(err, "verify_perf_link_info"); 194 bpf_link__destroy(link); 195 } 196 197 static void test_event_fill_link_info(struct test_fill_link_info *skel) 198 { 199 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, opts, 200 .bpf_cookie = PERF_EVENT_COOKIE, 201 ); 202 struct bpf_link *link; 203 int link_fd, err, pfd; 204 struct perf_event_attr attr = { 205 .type = PERF_TYPE_SOFTWARE, 206 .config = PERF_COUNT_SW_PAGE_FAULTS, 207 .freq = 1, 208 .sample_freq = 1, 209 .size = sizeof(struct perf_event_attr), 210 }; 211 212 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu 0 */, 213 -1 /* group id */, 0 /* flags */); 214 if (!ASSERT_GE(pfd, 0, "perf_event_open")) 215 return; 216 217 link = bpf_program__attach_perf_event_opts(skel->progs.event_run, pfd, &opts); 218 if (!ASSERT_OK_PTR(link, "attach_event")) 219 goto error; 220 221 link_fd = bpf_link__fd(link); 222 err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_EVENT, 0, 0, 0); 223 ASSERT_OK(err, "verify_perf_link_info"); 224 bpf_link__destroy(link); 225 226 error: 227 close(pfd); 228 } 229 230 static void test_uprobe_fill_link_info(struct test_fill_link_info *skel, 231 enum bpf_perf_event_type type) 232 { 233 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, 234 .retprobe = type == BPF_PERF_EVENT_URETPROBE, 235 .bpf_cookie = PERF_EVENT_COOKIE, 236 ); 237 struct bpf_link *link; 238 int link_fd, err; 239 240 link = bpf_program__attach_uprobe_opts(skel->progs.uprobe_run, 241 0, /* self pid */ 242 UPROBE_FILE, uprobe_offset, 243 &opts); 244 if (!ASSERT_OK_PTR(link, "attach_uprobe")) 245 return; 246 247 link_fd = bpf_link__fd(link); 248 err = verify_perf_link_info(link_fd, type, 0, uprobe_offset, 0); 249 ASSERT_OK(err, "verify_perf_link_info"); 250 bpf_link__destroy(link); 251 } 252 253 static int verify_kmulti_link_info(int fd, bool retprobe, bool has_cookies) 254 { 255 __u64 addrs[KMULTI_CNT], cookies[KMULTI_CNT]; 256 struct bpf_link_info info; 257 __u32 len = sizeof(info); 258 int flags, i, err; 259 260 memset(&info, 0, sizeof(info)); 261 262 again: 263 err = bpf_link_get_info_by_fd(fd, &info, &len); 264 if (!ASSERT_OK(err, "get_link_info")) 265 return -1; 266 267 if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_KPROBE_MULTI, "kmulti_type")) 268 return -1; 269 270 ASSERT_EQ(info.kprobe_multi.count, KMULTI_CNT, "func_cnt"); 271 flags = info.kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN; 272 if (!retprobe) 273 ASSERT_EQ(flags, 0, "kmulti_flags"); 274 else 275 ASSERT_NEQ(flags, 0, "kretmulti_flags"); 276 277 if (!info.kprobe_multi.addrs) { 278 info.kprobe_multi.addrs = ptr_to_u64(addrs); 279 info.kprobe_multi.cookies = ptr_to_u64(cookies); 280 goto again; 281 } 282 for (i = 0; i < KMULTI_CNT; i++) { 283 ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs"); 284 ASSERT_EQ(cookies[i], has_cookies ? kmulti_cookies[i] : 0, 285 "kmulti_cookies_value"); 286 } 287 return 0; 288 } 289 290 static void verify_kmulti_invalid_user_buffer(int fd) 291 { 292 __u64 addrs[KMULTI_CNT], cookies[KMULTI_CNT]; 293 struct bpf_link_info info; 294 __u32 len = sizeof(info); 295 int err, i; 296 297 memset(&info, 0, sizeof(info)); 298 299 info.kprobe_multi.count = KMULTI_CNT; 300 err = bpf_link_get_info_by_fd(fd, &info, &len); 301 ASSERT_EQ(err, -EINVAL, "no_addr"); 302 303 info.kprobe_multi.addrs = ptr_to_u64(addrs); 304 info.kprobe_multi.count = 0; 305 err = bpf_link_get_info_by_fd(fd, &info, &len); 306 ASSERT_EQ(err, -EINVAL, "no_cnt"); 307 308 for (i = 0; i < KMULTI_CNT; i++) 309 addrs[i] = 0; 310 info.kprobe_multi.count = KMULTI_CNT - 1; 311 err = bpf_link_get_info_by_fd(fd, &info, &len); 312 ASSERT_EQ(err, -ENOSPC, "smaller_cnt"); 313 for (i = 0; i < KMULTI_CNT - 1; i++) 314 ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs"); 315 ASSERT_EQ(addrs[i], 0, "kmulti_addrs"); 316 317 for (i = 0; i < KMULTI_CNT; i++) 318 addrs[i] = 0; 319 info.kprobe_multi.count = KMULTI_CNT + 1; 320 err = bpf_link_get_info_by_fd(fd, &info, &len); 321 ASSERT_EQ(err, 0, "bigger_cnt"); 322 for (i = 0; i < KMULTI_CNT; i++) 323 ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs"); 324 325 info.kprobe_multi.count = KMULTI_CNT; 326 info.kprobe_multi.addrs = 0x1; /* invalid addr */ 327 err = bpf_link_get_info_by_fd(fd, &info, &len); 328 ASSERT_EQ(err, -EFAULT, "invalid_buff_addrs"); 329 330 info.kprobe_multi.count = KMULTI_CNT; 331 info.kprobe_multi.addrs = ptr_to_u64(addrs); 332 info.kprobe_multi.cookies = 0x1; /* invalid addr */ 333 err = bpf_link_get_info_by_fd(fd, &info, &len); 334 ASSERT_EQ(err, -EFAULT, "invalid_buff_cookies"); 335 336 /* cookies && !count */ 337 info.kprobe_multi.count = 0; 338 info.kprobe_multi.addrs = ptr_to_u64(NULL); 339 info.kprobe_multi.cookies = ptr_to_u64(cookies); 340 err = bpf_link_get_info_by_fd(fd, &info, &len); 341 ASSERT_EQ(err, -EINVAL, "invalid_cookies_count"); 342 } 343 344 static int symbols_cmp_r(const void *a, const void *b) 345 { 346 const char **str_a = (const char **) a; 347 const char **str_b = (const char **) b; 348 349 return strcmp(*str_a, *str_b); 350 } 351 352 static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel, 353 bool retprobe, bool cookies, 354 bool invalid) 355 { 356 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); 357 struct bpf_link *link; 358 int link_fd, err; 359 360 opts.syms = kmulti_syms; 361 opts.cookies = cookies ? kmulti_cookies : NULL; 362 opts.cnt = KMULTI_CNT; 363 opts.retprobe = retprobe; 364 link = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run, NULL, &opts); 365 if (!ASSERT_OK_PTR(link, "attach_kprobe_multi")) 366 return; 367 368 link_fd = bpf_link__fd(link); 369 if (!invalid) { 370 err = verify_kmulti_link_info(link_fd, retprobe, cookies); 371 ASSERT_OK(err, "verify_kmulti_link_info"); 372 } else { 373 verify_kmulti_invalid_user_buffer(link_fd); 374 } 375 bpf_link__destroy(link); 376 } 377 378 #define SEC(name) __attribute__((section(name), used)) 379 380 static short uprobe_link_info_sema_1 SEC(".probes"); 381 static short uprobe_link_info_sema_2 SEC(".probes"); 382 static short uprobe_link_info_sema_3 SEC(".probes"); 383 384 noinline void uprobe_link_info_func_1(void) 385 { 386 asm volatile (""); 387 uprobe_link_info_sema_1++; 388 } 389 390 noinline void uprobe_link_info_func_2(void) 391 { 392 asm volatile (""); 393 uprobe_link_info_sema_2++; 394 } 395 396 noinline void uprobe_link_info_func_3(void) 397 { 398 asm volatile (""); 399 uprobe_link_info_sema_3++; 400 } 401 402 static int 403 verify_umulti_link_info(int fd, bool retprobe, __u64 *offsets, 404 __u64 *cookies, __u64 *ref_ctr_offsets) 405 { 406 char path[PATH_MAX], path_buf[PATH_MAX]; 407 struct bpf_link_info info; 408 __u32 len = sizeof(info); 409 __u64 ref_ctr_offsets_buf[3]; 410 __u64 offsets_buf[3]; 411 __u64 cookies_buf[3]; 412 int i, err, bit; 413 __u32 count = 0; 414 415 memset(path, 0, sizeof(path)); 416 err = readlink("/proc/self/exe", path, sizeof(path)); 417 if (!ASSERT_NEQ(err, -1, "readlink")) 418 return -1; 419 420 for (bit = 0; bit < 8; bit++) { 421 memset(&info, 0, sizeof(info)); 422 info.uprobe_multi.path = ptr_to_u64(path_buf); 423 info.uprobe_multi.path_size = sizeof(path_buf); 424 info.uprobe_multi.count = count; 425 426 if (bit & 0x1) 427 info.uprobe_multi.offsets = ptr_to_u64(offsets_buf); 428 if (bit & 0x2) 429 info.uprobe_multi.cookies = ptr_to_u64(cookies_buf); 430 if (bit & 0x4) 431 info.uprobe_multi.ref_ctr_offsets = ptr_to_u64(ref_ctr_offsets_buf); 432 433 err = bpf_link_get_info_by_fd(fd, &info, &len); 434 if (!ASSERT_OK(err, "bpf_link_get_info_by_fd")) 435 return -1; 436 437 if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_UPROBE_MULTI, "info.type")) 438 return -1; 439 440 ASSERT_EQ(info.uprobe_multi.pid, getpid(), "info.uprobe_multi.pid"); 441 ASSERT_EQ(info.uprobe_multi.count, 3, "info.uprobe_multi.count"); 442 ASSERT_EQ(info.uprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN, 443 retprobe, "info.uprobe_multi.flags.retprobe"); 444 ASSERT_EQ(info.uprobe_multi.path_size, strlen(path) + 1, "info.uprobe_multi.path_size"); 445 ASSERT_STREQ(path_buf, path, "info.uprobe_multi.path"); 446 447 for (i = 0; i < info.uprobe_multi.count; i++) { 448 if (info.uprobe_multi.offsets) 449 ASSERT_EQ(offsets_buf[i], offsets[i], "info.uprobe_multi.offsets"); 450 if (info.uprobe_multi.cookies) 451 ASSERT_EQ(cookies_buf[i], cookies[i], "info.uprobe_multi.cookies"); 452 if (info.uprobe_multi.ref_ctr_offsets) { 453 ASSERT_EQ(ref_ctr_offsets_buf[i], ref_ctr_offsets[i], 454 "info.uprobe_multi.ref_ctr_offsets"); 455 } 456 } 457 count = count ?: info.uprobe_multi.count; 458 } 459 460 return 0; 461 } 462 463 static void verify_umulti_invalid_user_buffer(int fd) 464 { 465 struct bpf_link_info info; 466 __u32 len = sizeof(info); 467 __u64 buf[3]; 468 int err; 469 470 /* upath_size defined, not path */ 471 memset(&info, 0, sizeof(info)); 472 info.uprobe_multi.path_size = 3; 473 err = bpf_link_get_info_by_fd(fd, &info, &len); 474 ASSERT_EQ(err, -EINVAL, "failed_upath_size"); 475 476 /* path defined, but small */ 477 memset(&info, 0, sizeof(info)); 478 info.uprobe_multi.path = ptr_to_u64(buf); 479 info.uprobe_multi.path_size = 3; 480 err = bpf_link_get_info_by_fd(fd, &info, &len); 481 ASSERT_LT(err, 0, "failed_upath_small"); 482 483 /* path has wrong pointer */ 484 memset(&info, 0, sizeof(info)); 485 info.uprobe_multi.path_size = PATH_MAX; 486 info.uprobe_multi.path = 123; 487 err = bpf_link_get_info_by_fd(fd, &info, &len); 488 ASSERT_EQ(err, -EFAULT, "failed_bad_path_ptr"); 489 490 /* count zero, with offsets */ 491 memset(&info, 0, sizeof(info)); 492 info.uprobe_multi.offsets = ptr_to_u64(buf); 493 err = bpf_link_get_info_by_fd(fd, &info, &len); 494 ASSERT_EQ(err, -EINVAL, "failed_count"); 495 496 /* offsets not big enough */ 497 memset(&info, 0, sizeof(info)); 498 info.uprobe_multi.offsets = ptr_to_u64(buf); 499 info.uprobe_multi.count = 2; 500 err = bpf_link_get_info_by_fd(fd, &info, &len); 501 ASSERT_EQ(err, -ENOSPC, "failed_small_count"); 502 503 /* offsets has wrong pointer */ 504 memset(&info, 0, sizeof(info)); 505 info.uprobe_multi.offsets = 123; 506 info.uprobe_multi.count = 3; 507 err = bpf_link_get_info_by_fd(fd, &info, &len); 508 ASSERT_EQ(err, -EFAULT, "failed_wrong_offsets"); 509 } 510 511 static void test_uprobe_multi_fill_link_info(struct test_fill_link_info *skel, 512 bool retprobe, bool invalid) 513 { 514 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts, 515 .retprobe = retprobe, 516 ); 517 const char *syms[3] = { 518 "uprobe_link_info_func_1", 519 "uprobe_link_info_func_2", 520 "uprobe_link_info_func_3", 521 }; 522 __u64 cookies[3] = { 523 0xdead, 524 0xbeef, 525 0xcafe, 526 }; 527 const char *sema[3] = { 528 "uprobe_link_info_sema_1", 529 "uprobe_link_info_sema_2", 530 "uprobe_link_info_sema_3", 531 }; 532 __u64 *offsets = NULL, *ref_ctr_offsets; 533 struct bpf_link *link; 534 int link_fd, err; 535 536 err = elf_resolve_syms_offsets("/proc/self/exe", 3, sema, 537 (unsigned long **) &ref_ctr_offsets, STT_OBJECT); 538 if (!ASSERT_OK(err, "elf_resolve_syms_offsets_object")) 539 return; 540 541 err = elf_resolve_syms_offsets("/proc/self/exe", 3, syms, 542 (unsigned long **) &offsets, STT_FUNC); 543 if (!ASSERT_OK(err, "elf_resolve_syms_offsets_func")) 544 goto out; 545 546 opts.syms = syms; 547 opts.cookies = &cookies[0]; 548 opts.ref_ctr_offsets = (unsigned long *) &ref_ctr_offsets[0]; 549 opts.cnt = ARRAY_SIZE(syms); 550 551 link = bpf_program__attach_uprobe_multi(skel->progs.umulti_run, 0, 552 "/proc/self/exe", NULL, &opts); 553 if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi")) 554 goto out; 555 556 link_fd = bpf_link__fd(link); 557 if (invalid) 558 verify_umulti_invalid_user_buffer(link_fd); 559 else 560 verify_umulti_link_info(link_fd, retprobe, offsets, cookies, ref_ctr_offsets); 561 562 bpf_link__destroy(link); 563 out: 564 free(ref_ctr_offsets); 565 free(offsets); 566 } 567 568 void test_fill_link_info(void) 569 { 570 struct test_fill_link_info *skel; 571 int i; 572 573 skel = test_fill_link_info__open_and_load(); 574 if (!ASSERT_OK_PTR(skel, "skel_open")) 575 return; 576 577 /* load kallsyms to compare the addr */ 578 if (!ASSERT_OK(load_kallsyms(), "load_kallsyms")) 579 goto cleanup; 580 581 kprobe_addr = ksym_get_addr(KPROBE_FUNC); 582 if (test__start_subtest("kprobe_link_info")) 583 test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, false); 584 if (test__start_subtest("kretprobe_link_info")) 585 test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KRETPROBE, false); 586 if (test__start_subtest("kprobe_invalid_ubuff")) 587 test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, true); 588 if (test__start_subtest("tracepoint_link_info")) 589 test_tp_fill_link_info(skel); 590 if (test__start_subtest("event_link_info")) 591 test_event_fill_link_info(skel); 592 593 uprobe_offset = get_uprobe_offset(&uprobe_func); 594 if (test__start_subtest("uprobe_link_info")) 595 test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_UPROBE); 596 if (test__start_subtest("uretprobe_link_info")) 597 test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_URETPROBE); 598 599 qsort(kmulti_syms, KMULTI_CNT, sizeof(kmulti_syms[0]), symbols_cmp_r); 600 for (i = 0; i < KMULTI_CNT; i++) 601 kmulti_addrs[i] = ksym_get_addr(kmulti_syms[i]); 602 if (test__start_subtest("kprobe_multi_link_info")) { 603 test_kprobe_multi_fill_link_info(skel, false, false, false); 604 test_kprobe_multi_fill_link_info(skel, false, true, false); 605 } 606 if (test__start_subtest("kretprobe_multi_link_info")) { 607 test_kprobe_multi_fill_link_info(skel, true, false, false); 608 test_kprobe_multi_fill_link_info(skel, true, true, false); 609 } 610 if (test__start_subtest("kprobe_multi_invalid_ubuff")) 611 test_kprobe_multi_fill_link_info(skel, true, true, true); 612 613 if (test__start_subtest("uprobe_multi_link_info")) 614 test_uprobe_multi_fill_link_info(skel, false, false); 615 if (test__start_subtest("uretprobe_multi_link_info")) 616 test_uprobe_multi_fill_link_info(skel, true, false); 617 if (test__start_subtest("uprobe_multi_invalid")) 618 test_uprobe_multi_fill_link_info(skel, false, true); 619 620 cleanup: 621 test_fill_link_info__destroy(skel); 622 } 623
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.