1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2023 Isovalent */ 3 #include <uapi/linux/if_link.h> 4 #include <uapi/linux/pkt_sched.h> 5 #include <net/if.h> 6 #include <test_progs.h> 7 8 #define loopback 1 9 #define ping_cmd "ping -q -c1 -w1 127.0.0.1 > /dev/null" 10 11 #include "test_tc_link.skel.h" 12 13 #include "netlink_helpers.h" 14 #include "tc_helpers.h" 15 16 void serial_test_tc_links_basic(void) 17 { 18 LIBBPF_OPTS(bpf_prog_query_opts, optq); 19 LIBBPF_OPTS(bpf_tcx_opts, optl); 20 __u32 prog_ids[2], link_ids[2]; 21 __u32 pid1, pid2, lid1, lid2; 22 struct test_tc_link *skel; 23 struct bpf_link *link; 24 int err; 25 26 skel = test_tc_link__open_and_load(); 27 if (!ASSERT_OK_PTR(skel, "skel_load")) 28 goto cleanup; 29 30 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); 31 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); 32 33 ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); 34 35 assert_mprog_count(BPF_TCX_INGRESS, 0); 36 assert_mprog_count(BPF_TCX_EGRESS, 0); 37 38 ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1"); 39 ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); 40 41 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 42 if (!ASSERT_OK_PTR(link, "link_attach")) 43 goto cleanup; 44 45 skel->links.tc1 = link; 46 47 lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); 48 49 assert_mprog_count(BPF_TCX_INGRESS, 1); 50 assert_mprog_count(BPF_TCX_EGRESS, 0); 51 52 optq.prog_ids = prog_ids; 53 optq.link_ids = link_ids; 54 55 memset(prog_ids, 0, sizeof(prog_ids)); 56 memset(link_ids, 0, sizeof(link_ids)); 57 optq.count = ARRAY_SIZE(prog_ids); 58 59 err = bpf_prog_query_opts(loopback, BPF_TCX_INGRESS, &optq); 60 if (!ASSERT_OK(err, "prog_query")) 61 goto cleanup; 62 63 ASSERT_EQ(optq.count, 1, "count"); 64 ASSERT_EQ(optq.revision, 2, "revision"); 65 ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); 66 ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); 67 ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); 68 ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]"); 69 70 tc_skel_reset_all_seen(skel); 71 ASSERT_OK(system(ping_cmd), ping_cmd); 72 73 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 74 ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); 75 76 link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); 77 if (!ASSERT_OK_PTR(link, "link_attach")) 78 goto cleanup; 79 80 skel->links.tc2 = link; 81 82 lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); 83 ASSERT_NEQ(lid1, lid2, "link_ids_1_2"); 84 85 assert_mprog_count(BPF_TCX_INGRESS, 1); 86 assert_mprog_count(BPF_TCX_EGRESS, 1); 87 88 memset(prog_ids, 0, sizeof(prog_ids)); 89 memset(link_ids, 0, sizeof(link_ids)); 90 optq.count = ARRAY_SIZE(prog_ids); 91 92 err = bpf_prog_query_opts(loopback, BPF_TCX_EGRESS, &optq); 93 if (!ASSERT_OK(err, "prog_query")) 94 goto cleanup; 95 96 ASSERT_EQ(optq.count, 1, "count"); 97 ASSERT_EQ(optq.revision, 2, "revision"); 98 ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]"); 99 ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]"); 100 ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); 101 ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]"); 102 103 tc_skel_reset_all_seen(skel); 104 ASSERT_OK(system(ping_cmd), ping_cmd); 105 106 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 107 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 108 cleanup: 109 test_tc_link__destroy(skel); 110 111 assert_mprog_count(BPF_TCX_INGRESS, 0); 112 assert_mprog_count(BPF_TCX_EGRESS, 0); 113 } 114 115 static void test_tc_links_before_target(int target) 116 { 117 LIBBPF_OPTS(bpf_prog_query_opts, optq); 118 LIBBPF_OPTS(bpf_tcx_opts, optl); 119 __u32 prog_ids[5], link_ids[5]; 120 __u32 pid1, pid2, pid3, pid4; 121 __u32 lid1, lid2, lid3, lid4; 122 struct test_tc_link *skel; 123 struct bpf_link *link; 124 int err; 125 126 skel = test_tc_link__open(); 127 if (!ASSERT_OK_PTR(skel, "skel_open")) 128 goto cleanup; 129 130 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), 131 0, "tc1_attach_type"); 132 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), 133 0, "tc2_attach_type"); 134 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), 135 0, "tc3_attach_type"); 136 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), 137 0, "tc4_attach_type"); 138 139 err = test_tc_link__load(skel); 140 if (!ASSERT_OK(err, "skel_load")) 141 goto cleanup; 142 143 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); 144 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); 145 pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); 146 pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); 147 148 ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); 149 ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); 150 ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); 151 152 assert_mprog_count(target, 0); 153 154 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 155 if (!ASSERT_OK_PTR(link, "link_attach")) 156 goto cleanup; 157 158 skel->links.tc1 = link; 159 160 lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); 161 162 assert_mprog_count(target, 1); 163 164 link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); 165 if (!ASSERT_OK_PTR(link, "link_attach")) 166 goto cleanup; 167 168 skel->links.tc2 = link; 169 170 lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); 171 172 assert_mprog_count(target, 2); 173 174 optq.prog_ids = prog_ids; 175 optq.link_ids = link_ids; 176 177 memset(prog_ids, 0, sizeof(prog_ids)); 178 memset(link_ids, 0, sizeof(link_ids)); 179 optq.count = ARRAY_SIZE(prog_ids); 180 181 err = bpf_prog_query_opts(loopback, target, &optq); 182 if (!ASSERT_OK(err, "prog_query")) 183 goto cleanup; 184 185 ASSERT_EQ(optq.count, 2, "count"); 186 ASSERT_EQ(optq.revision, 3, "revision"); 187 ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); 188 ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); 189 ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]"); 190 ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]"); 191 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 192 ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); 193 194 tc_skel_reset_all_seen(skel); 195 ASSERT_OK(system(ping_cmd), ping_cmd); 196 197 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 198 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 199 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); 200 ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); 201 202 LIBBPF_OPTS_RESET(optl, 203 .flags = BPF_F_BEFORE, 204 .relative_fd = bpf_program__fd(skel->progs.tc2), 205 ); 206 207 link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); 208 if (!ASSERT_OK_PTR(link, "link_attach")) 209 goto cleanup; 210 211 skel->links.tc3 = link; 212 213 lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3)); 214 215 LIBBPF_OPTS_RESET(optl, 216 .flags = BPF_F_BEFORE | BPF_F_LINK, 217 .relative_id = lid1, 218 ); 219 220 link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl); 221 if (!ASSERT_OK_PTR(link, "link_attach")) 222 goto cleanup; 223 224 skel->links.tc4 = link; 225 226 lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4)); 227 228 assert_mprog_count(target, 4); 229 230 memset(prog_ids, 0, sizeof(prog_ids)); 231 memset(link_ids, 0, sizeof(link_ids)); 232 optq.count = ARRAY_SIZE(prog_ids); 233 234 err = bpf_prog_query_opts(loopback, target, &optq); 235 if (!ASSERT_OK(err, "prog_query")) 236 goto cleanup; 237 238 ASSERT_EQ(optq.count, 4, "count"); 239 ASSERT_EQ(optq.revision, 5, "revision"); 240 ASSERT_EQ(optq.prog_ids[0], pid4, "prog_ids[0]"); 241 ASSERT_EQ(optq.link_ids[0], lid4, "link_ids[0]"); 242 ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]"); 243 ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]"); 244 ASSERT_EQ(optq.prog_ids[2], pid3, "prog_ids[2]"); 245 ASSERT_EQ(optq.link_ids[2], lid3, "link_ids[2]"); 246 ASSERT_EQ(optq.prog_ids[3], pid2, "prog_ids[3]"); 247 ASSERT_EQ(optq.link_ids[3], lid2, "link_ids[3]"); 248 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); 249 ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]"); 250 251 tc_skel_reset_all_seen(skel); 252 ASSERT_OK(system(ping_cmd), ping_cmd); 253 254 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 255 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 256 ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); 257 ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); 258 cleanup: 259 test_tc_link__destroy(skel); 260 assert_mprog_count(target, 0); 261 } 262 263 void serial_test_tc_links_before(void) 264 { 265 test_tc_links_before_target(BPF_TCX_INGRESS); 266 test_tc_links_before_target(BPF_TCX_EGRESS); 267 } 268 269 static void test_tc_links_after_target(int target) 270 { 271 LIBBPF_OPTS(bpf_prog_query_opts, optq); 272 LIBBPF_OPTS(bpf_tcx_opts, optl); 273 __u32 prog_ids[5], link_ids[5]; 274 __u32 pid1, pid2, pid3, pid4; 275 __u32 lid1, lid2, lid3, lid4; 276 struct test_tc_link *skel; 277 struct bpf_link *link; 278 int err; 279 280 skel = test_tc_link__open(); 281 if (!ASSERT_OK_PTR(skel, "skel_open")) 282 goto cleanup; 283 284 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), 285 0, "tc1_attach_type"); 286 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), 287 0, "tc2_attach_type"); 288 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), 289 0, "tc3_attach_type"); 290 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), 291 0, "tc4_attach_type"); 292 293 err = test_tc_link__load(skel); 294 if (!ASSERT_OK(err, "skel_load")) 295 goto cleanup; 296 297 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); 298 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); 299 pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); 300 pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); 301 302 ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); 303 ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); 304 ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); 305 306 assert_mprog_count(target, 0); 307 308 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 309 if (!ASSERT_OK_PTR(link, "link_attach")) 310 goto cleanup; 311 312 skel->links.tc1 = link; 313 314 lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); 315 316 assert_mprog_count(target, 1); 317 318 link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); 319 if (!ASSERT_OK_PTR(link, "link_attach")) 320 goto cleanup; 321 322 skel->links.tc2 = link; 323 324 lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); 325 326 assert_mprog_count(target, 2); 327 328 optq.prog_ids = prog_ids; 329 optq.link_ids = link_ids; 330 331 memset(prog_ids, 0, sizeof(prog_ids)); 332 memset(link_ids, 0, sizeof(link_ids)); 333 optq.count = ARRAY_SIZE(prog_ids); 334 335 err = bpf_prog_query_opts(loopback, target, &optq); 336 if (!ASSERT_OK(err, "prog_query")) 337 goto cleanup; 338 339 ASSERT_EQ(optq.count, 2, "count"); 340 ASSERT_EQ(optq.revision, 3, "revision"); 341 ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); 342 ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); 343 ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]"); 344 ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]"); 345 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 346 ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); 347 348 tc_skel_reset_all_seen(skel); 349 ASSERT_OK(system(ping_cmd), ping_cmd); 350 351 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 352 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 353 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); 354 ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); 355 356 LIBBPF_OPTS_RESET(optl, 357 .flags = BPF_F_AFTER, 358 .relative_fd = bpf_program__fd(skel->progs.tc1), 359 ); 360 361 link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); 362 if (!ASSERT_OK_PTR(link, "link_attach")) 363 goto cleanup; 364 365 skel->links.tc3 = link; 366 367 lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3)); 368 369 LIBBPF_OPTS_RESET(optl, 370 .flags = BPF_F_AFTER | BPF_F_LINK, 371 .relative_fd = bpf_link__fd(skel->links.tc2), 372 ); 373 374 link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl); 375 if (!ASSERT_OK_PTR(link, "link_attach")) 376 goto cleanup; 377 378 skel->links.tc4 = link; 379 380 lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4)); 381 382 assert_mprog_count(target, 4); 383 384 memset(prog_ids, 0, sizeof(prog_ids)); 385 memset(link_ids, 0, sizeof(link_ids)); 386 optq.count = ARRAY_SIZE(prog_ids); 387 388 err = bpf_prog_query_opts(loopback, target, &optq); 389 if (!ASSERT_OK(err, "prog_query")) 390 goto cleanup; 391 392 ASSERT_EQ(optq.count, 4, "count"); 393 ASSERT_EQ(optq.revision, 5, "revision"); 394 ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); 395 ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); 396 ASSERT_EQ(optq.prog_ids[1], pid3, "prog_ids[1]"); 397 ASSERT_EQ(optq.link_ids[1], lid3, "link_ids[1]"); 398 ASSERT_EQ(optq.prog_ids[2], pid2, "prog_ids[2]"); 399 ASSERT_EQ(optq.link_ids[2], lid2, "link_ids[2]"); 400 ASSERT_EQ(optq.prog_ids[3], pid4, "prog_ids[3]"); 401 ASSERT_EQ(optq.link_ids[3], lid4, "link_ids[3]"); 402 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); 403 ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]"); 404 405 tc_skel_reset_all_seen(skel); 406 ASSERT_OK(system(ping_cmd), ping_cmd); 407 408 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 409 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 410 ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); 411 ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); 412 cleanup: 413 test_tc_link__destroy(skel); 414 assert_mprog_count(target, 0); 415 } 416 417 void serial_test_tc_links_after(void) 418 { 419 test_tc_links_after_target(BPF_TCX_INGRESS); 420 test_tc_links_after_target(BPF_TCX_EGRESS); 421 } 422 423 static void test_tc_links_revision_target(int target) 424 { 425 LIBBPF_OPTS(bpf_prog_query_opts, optq); 426 LIBBPF_OPTS(bpf_tcx_opts, optl); 427 __u32 prog_ids[3], link_ids[3]; 428 __u32 pid1, pid2, lid1, lid2; 429 struct test_tc_link *skel; 430 struct bpf_link *link; 431 int err; 432 433 skel = test_tc_link__open(); 434 if (!ASSERT_OK_PTR(skel, "skel_open")) 435 goto cleanup; 436 437 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), 438 0, "tc1_attach_type"); 439 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), 440 0, "tc2_attach_type"); 441 442 err = test_tc_link__load(skel); 443 if (!ASSERT_OK(err, "skel_load")) 444 goto cleanup; 445 446 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); 447 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); 448 449 ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); 450 451 assert_mprog_count(target, 0); 452 453 optl.expected_revision = 1; 454 455 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 456 if (!ASSERT_OK_PTR(link, "link_attach")) 457 goto cleanup; 458 459 skel->links.tc1 = link; 460 461 lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); 462 463 assert_mprog_count(target, 1); 464 465 optl.expected_revision = 1; 466 467 link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); 468 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 469 bpf_link__destroy(link); 470 goto cleanup; 471 } 472 473 assert_mprog_count(target, 1); 474 475 optl.expected_revision = 2; 476 477 link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); 478 if (!ASSERT_OK_PTR(link, "link_attach")) 479 goto cleanup; 480 481 skel->links.tc2 = link; 482 483 lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); 484 485 assert_mprog_count(target, 2); 486 487 optq.prog_ids = prog_ids; 488 optq.link_ids = link_ids; 489 490 memset(prog_ids, 0, sizeof(prog_ids)); 491 memset(link_ids, 0, sizeof(link_ids)); 492 optq.count = ARRAY_SIZE(prog_ids); 493 494 err = bpf_prog_query_opts(loopback, target, &optq); 495 if (!ASSERT_OK(err, "prog_query")) 496 goto cleanup; 497 498 ASSERT_EQ(optq.count, 2, "count"); 499 ASSERT_EQ(optq.revision, 3, "revision"); 500 ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); 501 ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); 502 ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]"); 503 ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]"); 504 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 505 ASSERT_EQ(optq.link_ids[2], 0, "prog_ids[2]"); 506 507 tc_skel_reset_all_seen(skel); 508 ASSERT_OK(system(ping_cmd), ping_cmd); 509 510 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 511 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 512 cleanup: 513 test_tc_link__destroy(skel); 514 assert_mprog_count(target, 0); 515 } 516 517 void serial_test_tc_links_revision(void) 518 { 519 test_tc_links_revision_target(BPF_TCX_INGRESS); 520 test_tc_links_revision_target(BPF_TCX_EGRESS); 521 } 522 523 static void test_tc_chain_classic(int target, bool chain_tc_old) 524 { 525 LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); 526 LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback); 527 bool hook_created = false, tc_attached = false; 528 LIBBPF_OPTS(bpf_tcx_opts, optl); 529 __u32 pid1, pid2, pid3; 530 struct test_tc_link *skel; 531 struct bpf_link *link; 532 int err; 533 534 skel = test_tc_link__open(); 535 if (!ASSERT_OK_PTR(skel, "skel_open")) 536 goto cleanup; 537 538 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), 539 0, "tc1_attach_type"); 540 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), 541 0, "tc2_attach_type"); 542 543 err = test_tc_link__load(skel); 544 if (!ASSERT_OK(err, "skel_load")) 545 goto cleanup; 546 547 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); 548 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); 549 pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); 550 551 ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); 552 ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); 553 554 assert_mprog_count(target, 0); 555 556 if (chain_tc_old) { 557 tc_hook.attach_point = target == BPF_TCX_INGRESS ? 558 BPF_TC_INGRESS : BPF_TC_EGRESS; 559 err = bpf_tc_hook_create(&tc_hook); 560 if (err == 0) 561 hook_created = true; 562 err = err == -EEXIST ? 0 : err; 563 if (!ASSERT_OK(err, "bpf_tc_hook_create")) 564 goto cleanup; 565 566 tc_opts.prog_fd = bpf_program__fd(skel->progs.tc3); 567 err = bpf_tc_attach(&tc_hook, &tc_opts); 568 if (!ASSERT_OK(err, "bpf_tc_attach")) 569 goto cleanup; 570 tc_attached = true; 571 } 572 573 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 574 if (!ASSERT_OK_PTR(link, "link_attach")) 575 goto cleanup; 576 577 skel->links.tc1 = link; 578 579 link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); 580 if (!ASSERT_OK_PTR(link, "link_attach")) 581 goto cleanup; 582 583 skel->links.tc2 = link; 584 585 assert_mprog_count(target, 2); 586 587 tc_skel_reset_all_seen(skel); 588 ASSERT_OK(system(ping_cmd), ping_cmd); 589 590 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 591 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 592 ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3"); 593 594 err = bpf_link__detach(skel->links.tc2); 595 if (!ASSERT_OK(err, "prog_detach")) 596 goto cleanup; 597 598 assert_mprog_count(target, 1); 599 600 tc_skel_reset_all_seen(skel); 601 ASSERT_OK(system(ping_cmd), ping_cmd); 602 603 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 604 ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); 605 ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3"); 606 cleanup: 607 if (tc_attached) { 608 tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0; 609 err = bpf_tc_detach(&tc_hook, &tc_opts); 610 ASSERT_OK(err, "bpf_tc_detach"); 611 } 612 if (hook_created) { 613 tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; 614 bpf_tc_hook_destroy(&tc_hook); 615 } 616 assert_mprog_count(target, 1); 617 test_tc_link__destroy(skel); 618 assert_mprog_count(target, 0); 619 } 620 621 void serial_test_tc_links_chain_classic(void) 622 { 623 test_tc_chain_classic(BPF_TCX_INGRESS, false); 624 test_tc_chain_classic(BPF_TCX_EGRESS, false); 625 test_tc_chain_classic(BPF_TCX_INGRESS, true); 626 test_tc_chain_classic(BPF_TCX_EGRESS, true); 627 } 628 629 static void test_tc_links_replace_target(int target) 630 { 631 LIBBPF_OPTS(bpf_prog_query_opts, optq); 632 LIBBPF_OPTS(bpf_tcx_opts, optl); 633 __u32 pid1, pid2, pid3, lid1, lid2; 634 __u32 prog_ids[4], link_ids[4]; 635 struct test_tc_link *skel; 636 struct bpf_link *link; 637 int err; 638 639 skel = test_tc_link__open(); 640 if (!ASSERT_OK_PTR(skel, "skel_open")) 641 goto cleanup; 642 643 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), 644 0, "tc1_attach_type"); 645 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), 646 0, "tc2_attach_type"); 647 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), 648 0, "tc3_attach_type"); 649 650 err = test_tc_link__load(skel); 651 if (!ASSERT_OK(err, "skel_load")) 652 goto cleanup; 653 654 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); 655 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); 656 pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); 657 658 ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); 659 ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); 660 661 assert_mprog_count(target, 0); 662 663 optl.expected_revision = 1; 664 665 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 666 if (!ASSERT_OK_PTR(link, "link_attach")) 667 goto cleanup; 668 669 skel->links.tc1 = link; 670 671 lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); 672 673 assert_mprog_count(target, 1); 674 675 LIBBPF_OPTS_RESET(optl, 676 .flags = BPF_F_BEFORE, 677 .relative_id = pid1, 678 .expected_revision = 2, 679 ); 680 681 link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); 682 if (!ASSERT_OK_PTR(link, "link_attach")) 683 goto cleanup; 684 685 skel->links.tc2 = link; 686 687 lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); 688 689 assert_mprog_count(target, 2); 690 691 optq.prog_ids = prog_ids; 692 optq.link_ids = link_ids; 693 694 memset(prog_ids, 0, sizeof(prog_ids)); 695 memset(link_ids, 0, sizeof(link_ids)); 696 optq.count = ARRAY_SIZE(prog_ids); 697 698 err = bpf_prog_query_opts(loopback, target, &optq); 699 if (!ASSERT_OK(err, "prog_query")) 700 goto cleanup; 701 702 ASSERT_EQ(optq.count, 2, "count"); 703 ASSERT_EQ(optq.revision, 3, "revision"); 704 ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]"); 705 ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]"); 706 ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]"); 707 ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]"); 708 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 709 ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); 710 711 tc_skel_reset_all_seen(skel); 712 ASSERT_OK(system(ping_cmd), ping_cmd); 713 714 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 715 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 716 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); 717 718 LIBBPF_OPTS_RESET(optl, 719 .flags = BPF_F_REPLACE, 720 .relative_fd = bpf_program__fd(skel->progs.tc2), 721 .expected_revision = 3, 722 ); 723 724 link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); 725 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 726 bpf_link__destroy(link); 727 goto cleanup; 728 } 729 730 assert_mprog_count(target, 2); 731 732 LIBBPF_OPTS_RESET(optl, 733 .flags = BPF_F_REPLACE | BPF_F_LINK, 734 .relative_fd = bpf_link__fd(skel->links.tc2), 735 .expected_revision = 3, 736 ); 737 738 link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); 739 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 740 bpf_link__destroy(link); 741 goto cleanup; 742 } 743 744 assert_mprog_count(target, 2); 745 746 LIBBPF_OPTS_RESET(optl, 747 .flags = BPF_F_REPLACE | BPF_F_LINK | BPF_F_AFTER, 748 .relative_id = lid2, 749 ); 750 751 link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); 752 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 753 bpf_link__destroy(link); 754 goto cleanup; 755 } 756 757 assert_mprog_count(target, 2); 758 759 err = bpf_link__update_program(skel->links.tc2, skel->progs.tc3); 760 if (!ASSERT_OK(err, "link_update")) 761 goto cleanup; 762 763 assert_mprog_count(target, 2); 764 765 memset(prog_ids, 0, sizeof(prog_ids)); 766 memset(link_ids, 0, sizeof(link_ids)); 767 optq.count = ARRAY_SIZE(prog_ids); 768 769 err = bpf_prog_query_opts(loopback, target, &optq); 770 if (!ASSERT_OK(err, "prog_query")) 771 goto cleanup; 772 773 ASSERT_EQ(optq.count, 2, "count"); 774 ASSERT_EQ(optq.revision, 4, "revision"); 775 ASSERT_EQ(optq.prog_ids[0], pid3, "prog_ids[0]"); 776 ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]"); 777 ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]"); 778 ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]"); 779 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 780 ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); 781 782 tc_skel_reset_all_seen(skel); 783 ASSERT_OK(system(ping_cmd), ping_cmd); 784 785 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 786 ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); 787 ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); 788 789 err = bpf_link__detach(skel->links.tc2); 790 if (!ASSERT_OK(err, "link_detach")) 791 goto cleanup; 792 793 assert_mprog_count(target, 1); 794 795 memset(prog_ids, 0, sizeof(prog_ids)); 796 memset(link_ids, 0, sizeof(link_ids)); 797 optq.count = ARRAY_SIZE(prog_ids); 798 799 err = bpf_prog_query_opts(loopback, target, &optq); 800 if (!ASSERT_OK(err, "prog_query")) 801 goto cleanup; 802 803 ASSERT_EQ(optq.count, 1, "count"); 804 ASSERT_EQ(optq.revision, 5, "revision"); 805 ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); 806 ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); 807 ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); 808 ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]"); 809 810 tc_skel_reset_all_seen(skel); 811 ASSERT_OK(system(ping_cmd), ping_cmd); 812 813 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 814 ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); 815 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); 816 817 err = bpf_link__update_program(skel->links.tc1, skel->progs.tc1); 818 if (!ASSERT_OK(err, "link_update_self")) 819 goto cleanup; 820 821 assert_mprog_count(target, 1); 822 823 memset(prog_ids, 0, sizeof(prog_ids)); 824 memset(link_ids, 0, sizeof(link_ids)); 825 optq.count = ARRAY_SIZE(prog_ids); 826 827 err = bpf_prog_query_opts(loopback, target, &optq); 828 if (!ASSERT_OK(err, "prog_query")) 829 goto cleanup; 830 831 ASSERT_EQ(optq.count, 1, "count"); 832 ASSERT_EQ(optq.revision, 5, "revision"); 833 ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); 834 ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); 835 ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); 836 ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]"); 837 838 tc_skel_reset_all_seen(skel); 839 ASSERT_OK(system(ping_cmd), ping_cmd); 840 841 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 842 ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); 843 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); 844 cleanup: 845 test_tc_link__destroy(skel); 846 assert_mprog_count(target, 0); 847 } 848 849 void serial_test_tc_links_replace(void) 850 { 851 test_tc_links_replace_target(BPF_TCX_INGRESS); 852 test_tc_links_replace_target(BPF_TCX_EGRESS); 853 } 854 855 static void test_tc_links_invalid_target(int target) 856 { 857 LIBBPF_OPTS(bpf_prog_query_opts, optq); 858 LIBBPF_OPTS(bpf_tcx_opts, optl); 859 __u32 pid1, pid2, lid1; 860 struct test_tc_link *skel; 861 struct bpf_link *link; 862 int err; 863 864 skel = test_tc_link__open(); 865 if (!ASSERT_OK_PTR(skel, "skel_open")) 866 goto cleanup; 867 868 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), 869 0, "tc1_attach_type"); 870 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), 871 0, "tc2_attach_type"); 872 873 err = test_tc_link__load(skel); 874 if (!ASSERT_OK(err, "skel_load")) 875 goto cleanup; 876 877 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); 878 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); 879 880 ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); 881 882 assert_mprog_count(target, 0); 883 884 optl.flags = BPF_F_BEFORE | BPF_F_AFTER; 885 886 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 887 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 888 bpf_link__destroy(link); 889 goto cleanup; 890 } 891 892 assert_mprog_count(target, 0); 893 894 LIBBPF_OPTS_RESET(optl, 895 .flags = BPF_F_BEFORE | BPF_F_ID, 896 ); 897 898 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 899 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 900 bpf_link__destroy(link); 901 goto cleanup; 902 } 903 904 assert_mprog_count(target, 0); 905 906 LIBBPF_OPTS_RESET(optl, 907 .flags = BPF_F_AFTER | BPF_F_ID, 908 ); 909 910 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 911 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 912 bpf_link__destroy(link); 913 goto cleanup; 914 } 915 916 assert_mprog_count(target, 0); 917 918 LIBBPF_OPTS_RESET(optl, 919 .flags = BPF_F_ID, 920 ); 921 922 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 923 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 924 bpf_link__destroy(link); 925 goto cleanup; 926 } 927 928 assert_mprog_count(target, 0); 929 930 LIBBPF_OPTS_RESET(optl, 931 .flags = BPF_F_LINK, 932 .relative_fd = bpf_program__fd(skel->progs.tc2), 933 ); 934 935 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 936 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 937 bpf_link__destroy(link); 938 goto cleanup; 939 } 940 941 assert_mprog_count(target, 0); 942 943 LIBBPF_OPTS_RESET(optl, 944 .flags = BPF_F_LINK, 945 ); 946 947 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 948 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 949 bpf_link__destroy(link); 950 goto cleanup; 951 } 952 953 assert_mprog_count(target, 0); 954 955 LIBBPF_OPTS_RESET(optl, 956 .relative_fd = bpf_program__fd(skel->progs.tc2), 957 ); 958 959 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 960 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 961 bpf_link__destroy(link); 962 goto cleanup; 963 } 964 965 assert_mprog_count(target, 0); 966 967 LIBBPF_OPTS_RESET(optl, 968 .flags = BPF_F_BEFORE | BPF_F_AFTER, 969 .relative_fd = bpf_program__fd(skel->progs.tc2), 970 ); 971 972 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 973 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 974 bpf_link__destroy(link); 975 goto cleanup; 976 } 977 978 assert_mprog_count(target, 0); 979 980 LIBBPF_OPTS_RESET(optl, 981 .flags = BPF_F_BEFORE, 982 .relative_fd = bpf_program__fd(skel->progs.tc1), 983 ); 984 985 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 986 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 987 bpf_link__destroy(link); 988 goto cleanup; 989 } 990 991 assert_mprog_count(target, 0); 992 993 LIBBPF_OPTS_RESET(optl, 994 .flags = BPF_F_ID, 995 .relative_id = pid2, 996 ); 997 998 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 999 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 1000 bpf_link__destroy(link); 1001 goto cleanup; 1002 } 1003 1004 assert_mprog_count(target, 0); 1005 1006 LIBBPF_OPTS_RESET(optl, 1007 .flags = BPF_F_ID, 1008 .relative_id = 42, 1009 ); 1010 1011 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 1012 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 1013 bpf_link__destroy(link); 1014 goto cleanup; 1015 } 1016 1017 assert_mprog_count(target, 0); 1018 1019 LIBBPF_OPTS_RESET(optl, 1020 .flags = BPF_F_BEFORE, 1021 .relative_fd = bpf_program__fd(skel->progs.tc1), 1022 ); 1023 1024 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 1025 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 1026 bpf_link__destroy(link); 1027 goto cleanup; 1028 } 1029 1030 assert_mprog_count(target, 0); 1031 1032 LIBBPF_OPTS_RESET(optl, 1033 .flags = BPF_F_BEFORE | BPF_F_LINK, 1034 .relative_fd = bpf_program__fd(skel->progs.tc1), 1035 ); 1036 1037 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 1038 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 1039 bpf_link__destroy(link); 1040 goto cleanup; 1041 } 1042 1043 assert_mprog_count(target, 0); 1044 1045 LIBBPF_OPTS_RESET(optl, 1046 .flags = BPF_F_AFTER, 1047 .relative_fd = bpf_program__fd(skel->progs.tc1), 1048 ); 1049 1050 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 1051 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 1052 bpf_link__destroy(link); 1053 goto cleanup; 1054 } 1055 1056 assert_mprog_count(target, 0); 1057 1058 LIBBPF_OPTS_RESET(optl); 1059 1060 link = bpf_program__attach_tcx(skel->progs.tc1, 0, &optl); 1061 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 1062 bpf_link__destroy(link); 1063 goto cleanup; 1064 } 1065 1066 assert_mprog_count(target, 0); 1067 1068 LIBBPF_OPTS_RESET(optl, 1069 .flags = BPF_F_AFTER | BPF_F_LINK, 1070 .relative_fd = bpf_program__fd(skel->progs.tc1), 1071 ); 1072 1073 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 1074 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 1075 bpf_link__destroy(link); 1076 goto cleanup; 1077 } 1078 1079 assert_mprog_count(target, 0); 1080 1081 LIBBPF_OPTS_RESET(optl); 1082 1083 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 1084 if (!ASSERT_OK_PTR(link, "link_attach")) 1085 goto cleanup; 1086 1087 skel->links.tc1 = link; 1088 1089 lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); 1090 1091 assert_mprog_count(target, 1); 1092 1093 LIBBPF_OPTS_RESET(optl, 1094 .flags = BPF_F_AFTER | BPF_F_LINK, 1095 .relative_fd = bpf_program__fd(skel->progs.tc1), 1096 ); 1097 1098 link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); 1099 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 1100 bpf_link__destroy(link); 1101 goto cleanup; 1102 } 1103 1104 assert_mprog_count(target, 1); 1105 1106 LIBBPF_OPTS_RESET(optl, 1107 .flags = BPF_F_BEFORE | BPF_F_LINK | BPF_F_ID, 1108 .relative_id = ~0, 1109 ); 1110 1111 link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); 1112 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 1113 bpf_link__destroy(link); 1114 goto cleanup; 1115 } 1116 1117 assert_mprog_count(target, 1); 1118 1119 LIBBPF_OPTS_RESET(optl, 1120 .flags = BPF_F_BEFORE | BPF_F_LINK | BPF_F_ID, 1121 .relative_id = lid1, 1122 ); 1123 1124 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 1125 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 1126 bpf_link__destroy(link); 1127 goto cleanup; 1128 } 1129 1130 assert_mprog_count(target, 1); 1131 1132 LIBBPF_OPTS_RESET(optl, 1133 .flags = BPF_F_BEFORE | BPF_F_ID, 1134 .relative_id = pid1, 1135 ); 1136 1137 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 1138 if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { 1139 bpf_link__destroy(link); 1140 goto cleanup; 1141 } 1142 assert_mprog_count(target, 1); 1143 1144 LIBBPF_OPTS_RESET(optl, 1145 .flags = BPF_F_BEFORE | BPF_F_LINK | BPF_F_ID, 1146 .relative_id = lid1, 1147 ); 1148 1149 link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); 1150 if (!ASSERT_OK_PTR(link, "link_attach")) 1151 goto cleanup; 1152 1153 skel->links.tc2 = link; 1154 1155 assert_mprog_count(target, 2); 1156 cleanup: 1157 test_tc_link__destroy(skel); 1158 assert_mprog_count(target, 0); 1159 } 1160 1161 void serial_test_tc_links_invalid(void) 1162 { 1163 test_tc_links_invalid_target(BPF_TCX_INGRESS); 1164 test_tc_links_invalid_target(BPF_TCX_EGRESS); 1165 } 1166 1167 static void test_tc_links_prepend_target(int target) 1168 { 1169 LIBBPF_OPTS(bpf_prog_query_opts, optq); 1170 LIBBPF_OPTS(bpf_tcx_opts, optl); 1171 __u32 prog_ids[5], link_ids[5]; 1172 __u32 pid1, pid2, pid3, pid4; 1173 __u32 lid1, lid2, lid3, lid4; 1174 struct test_tc_link *skel; 1175 struct bpf_link *link; 1176 int err; 1177 1178 skel = test_tc_link__open(); 1179 if (!ASSERT_OK_PTR(skel, "skel_open")) 1180 goto cleanup; 1181 1182 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), 1183 0, "tc1_attach_type"); 1184 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), 1185 0, "tc2_attach_type"); 1186 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), 1187 0, "tc3_attach_type"); 1188 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), 1189 0, "tc4_attach_type"); 1190 1191 err = test_tc_link__load(skel); 1192 if (!ASSERT_OK(err, "skel_load")) 1193 goto cleanup; 1194 1195 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); 1196 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); 1197 pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); 1198 pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); 1199 1200 ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); 1201 ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); 1202 ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); 1203 1204 assert_mprog_count(target, 0); 1205 1206 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 1207 if (!ASSERT_OK_PTR(link, "link_attach")) 1208 goto cleanup; 1209 1210 skel->links.tc1 = link; 1211 1212 lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); 1213 1214 assert_mprog_count(target, 1); 1215 1216 LIBBPF_OPTS_RESET(optl, 1217 .flags = BPF_F_BEFORE, 1218 ); 1219 1220 link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); 1221 if (!ASSERT_OK_PTR(link, "link_attach")) 1222 goto cleanup; 1223 1224 skel->links.tc2 = link; 1225 1226 lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); 1227 1228 assert_mprog_count(target, 2); 1229 1230 optq.prog_ids = prog_ids; 1231 optq.link_ids = link_ids; 1232 1233 memset(prog_ids, 0, sizeof(prog_ids)); 1234 memset(link_ids, 0, sizeof(link_ids)); 1235 optq.count = ARRAY_SIZE(prog_ids); 1236 1237 err = bpf_prog_query_opts(loopback, target, &optq); 1238 if (!ASSERT_OK(err, "prog_query")) 1239 goto cleanup; 1240 1241 ASSERT_EQ(optq.count, 2, "count"); 1242 ASSERT_EQ(optq.revision, 3, "revision"); 1243 ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]"); 1244 ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]"); 1245 ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]"); 1246 ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]"); 1247 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 1248 ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); 1249 1250 tc_skel_reset_all_seen(skel); 1251 ASSERT_OK(system(ping_cmd), ping_cmd); 1252 1253 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 1254 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 1255 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); 1256 ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); 1257 1258 LIBBPF_OPTS_RESET(optl, 1259 .flags = BPF_F_BEFORE, 1260 ); 1261 1262 link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); 1263 if (!ASSERT_OK_PTR(link, "link_attach")) 1264 goto cleanup; 1265 1266 skel->links.tc3 = link; 1267 1268 lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3)); 1269 1270 LIBBPF_OPTS_RESET(optl, 1271 .flags = BPF_F_BEFORE, 1272 ); 1273 1274 link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl); 1275 if (!ASSERT_OK_PTR(link, "link_attach")) 1276 goto cleanup; 1277 1278 skel->links.tc4 = link; 1279 1280 lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4)); 1281 1282 assert_mprog_count(target, 4); 1283 1284 memset(prog_ids, 0, sizeof(prog_ids)); 1285 memset(link_ids, 0, sizeof(link_ids)); 1286 optq.count = ARRAY_SIZE(prog_ids); 1287 1288 err = bpf_prog_query_opts(loopback, target, &optq); 1289 if (!ASSERT_OK(err, "prog_query")) 1290 goto cleanup; 1291 1292 ASSERT_EQ(optq.count, 4, "count"); 1293 ASSERT_EQ(optq.revision, 5, "revision"); 1294 ASSERT_EQ(optq.prog_ids[0], pid4, "prog_ids[0]"); 1295 ASSERT_EQ(optq.link_ids[0], lid4, "link_ids[0]"); 1296 ASSERT_EQ(optq.prog_ids[1], pid3, "prog_ids[1]"); 1297 ASSERT_EQ(optq.link_ids[1], lid3, "link_ids[1]"); 1298 ASSERT_EQ(optq.prog_ids[2], pid2, "prog_ids[2]"); 1299 ASSERT_EQ(optq.link_ids[2], lid2, "link_ids[2]"); 1300 ASSERT_EQ(optq.prog_ids[3], pid1, "prog_ids[3]"); 1301 ASSERT_EQ(optq.link_ids[3], lid1, "link_ids[3]"); 1302 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); 1303 ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]"); 1304 1305 tc_skel_reset_all_seen(skel); 1306 ASSERT_OK(system(ping_cmd), ping_cmd); 1307 1308 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 1309 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 1310 ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); 1311 ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); 1312 cleanup: 1313 test_tc_link__destroy(skel); 1314 assert_mprog_count(target, 0); 1315 } 1316 1317 void serial_test_tc_links_prepend(void) 1318 { 1319 test_tc_links_prepend_target(BPF_TCX_INGRESS); 1320 test_tc_links_prepend_target(BPF_TCX_EGRESS); 1321 } 1322 1323 static void test_tc_links_append_target(int target) 1324 { 1325 LIBBPF_OPTS(bpf_prog_query_opts, optq); 1326 LIBBPF_OPTS(bpf_tcx_opts, optl); 1327 __u32 prog_ids[5], link_ids[5]; 1328 __u32 pid1, pid2, pid3, pid4; 1329 __u32 lid1, lid2, lid3, lid4; 1330 struct test_tc_link *skel; 1331 struct bpf_link *link; 1332 int err; 1333 1334 skel = test_tc_link__open(); 1335 if (!ASSERT_OK_PTR(skel, "skel_open")) 1336 goto cleanup; 1337 1338 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), 1339 0, "tc1_attach_type"); 1340 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), 1341 0, "tc2_attach_type"); 1342 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), 1343 0, "tc3_attach_type"); 1344 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), 1345 0, "tc4_attach_type"); 1346 1347 err = test_tc_link__load(skel); 1348 if (!ASSERT_OK(err, "skel_load")) 1349 goto cleanup; 1350 1351 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); 1352 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); 1353 pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); 1354 pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); 1355 1356 ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); 1357 ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); 1358 ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); 1359 1360 assert_mprog_count(target, 0); 1361 1362 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 1363 if (!ASSERT_OK_PTR(link, "link_attach")) 1364 goto cleanup; 1365 1366 skel->links.tc1 = link; 1367 1368 lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); 1369 1370 assert_mprog_count(target, 1); 1371 1372 LIBBPF_OPTS_RESET(optl, 1373 .flags = BPF_F_AFTER, 1374 ); 1375 1376 link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); 1377 if (!ASSERT_OK_PTR(link, "link_attach")) 1378 goto cleanup; 1379 1380 skel->links.tc2 = link; 1381 1382 lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); 1383 1384 assert_mprog_count(target, 2); 1385 1386 optq.prog_ids = prog_ids; 1387 optq.link_ids = link_ids; 1388 1389 memset(prog_ids, 0, sizeof(prog_ids)); 1390 memset(link_ids, 0, sizeof(link_ids)); 1391 optq.count = ARRAY_SIZE(prog_ids); 1392 1393 err = bpf_prog_query_opts(loopback, target, &optq); 1394 if (!ASSERT_OK(err, "prog_query")) 1395 goto cleanup; 1396 1397 ASSERT_EQ(optq.count, 2, "count"); 1398 ASSERT_EQ(optq.revision, 3, "revision"); 1399 ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); 1400 ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); 1401 ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]"); 1402 ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]"); 1403 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 1404 ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); 1405 1406 tc_skel_reset_all_seen(skel); 1407 ASSERT_OK(system(ping_cmd), ping_cmd); 1408 1409 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 1410 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 1411 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); 1412 ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); 1413 1414 LIBBPF_OPTS_RESET(optl, 1415 .flags = BPF_F_AFTER, 1416 ); 1417 1418 link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); 1419 if (!ASSERT_OK_PTR(link, "link_attach")) 1420 goto cleanup; 1421 1422 skel->links.tc3 = link; 1423 1424 lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3)); 1425 1426 LIBBPF_OPTS_RESET(optl, 1427 .flags = BPF_F_AFTER, 1428 ); 1429 1430 link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl); 1431 if (!ASSERT_OK_PTR(link, "link_attach")) 1432 goto cleanup; 1433 1434 skel->links.tc4 = link; 1435 1436 lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4)); 1437 1438 assert_mprog_count(target, 4); 1439 1440 memset(prog_ids, 0, sizeof(prog_ids)); 1441 memset(link_ids, 0, sizeof(link_ids)); 1442 optq.count = ARRAY_SIZE(prog_ids); 1443 1444 err = bpf_prog_query_opts(loopback, target, &optq); 1445 if (!ASSERT_OK(err, "prog_query")) 1446 goto cleanup; 1447 1448 ASSERT_EQ(optq.count, 4, "count"); 1449 ASSERT_EQ(optq.revision, 5, "revision"); 1450 ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); 1451 ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); 1452 ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]"); 1453 ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]"); 1454 ASSERT_EQ(optq.prog_ids[2], pid3, "prog_ids[2]"); 1455 ASSERT_EQ(optq.link_ids[2], lid3, "link_ids[2]"); 1456 ASSERT_EQ(optq.prog_ids[3], pid4, "prog_ids[3]"); 1457 ASSERT_EQ(optq.link_ids[3], lid4, "link_ids[3]"); 1458 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); 1459 ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]"); 1460 1461 tc_skel_reset_all_seen(skel); 1462 ASSERT_OK(system(ping_cmd), ping_cmd); 1463 1464 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 1465 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 1466 ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); 1467 ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); 1468 cleanup: 1469 test_tc_link__destroy(skel); 1470 assert_mprog_count(target, 0); 1471 } 1472 1473 void serial_test_tc_links_append(void) 1474 { 1475 test_tc_links_append_target(BPF_TCX_INGRESS); 1476 test_tc_links_append_target(BPF_TCX_EGRESS); 1477 } 1478 1479 static void test_tc_links_dev_cleanup_target(int target) 1480 { 1481 LIBBPF_OPTS(bpf_tcx_opts, optl); 1482 LIBBPF_OPTS(bpf_prog_query_opts, optq); 1483 __u32 pid1, pid2, pid3, pid4; 1484 struct test_tc_link *skel; 1485 struct bpf_link *link; 1486 int err, ifindex; 1487 1488 ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth"); 1489 ifindex = if_nametoindex("tcx_opts1"); 1490 ASSERT_NEQ(ifindex, 0, "non_zero_ifindex"); 1491 1492 skel = test_tc_link__open(); 1493 if (!ASSERT_OK_PTR(skel, "skel_open")) 1494 goto cleanup; 1495 1496 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), 1497 0, "tc1_attach_type"); 1498 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), 1499 0, "tc2_attach_type"); 1500 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), 1501 0, "tc3_attach_type"); 1502 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), 1503 0, "tc4_attach_type"); 1504 1505 err = test_tc_link__load(skel); 1506 if (!ASSERT_OK(err, "skel_load")) 1507 goto cleanup; 1508 1509 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); 1510 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); 1511 pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); 1512 pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); 1513 1514 ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); 1515 ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); 1516 ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); 1517 1518 assert_mprog_count(target, 0); 1519 1520 link = bpf_program__attach_tcx(skel->progs.tc1, ifindex, &optl); 1521 if (!ASSERT_OK_PTR(link, "link_attach")) 1522 goto cleanup; 1523 1524 skel->links.tc1 = link; 1525 1526 assert_mprog_count_ifindex(ifindex, target, 1); 1527 1528 link = bpf_program__attach_tcx(skel->progs.tc2, ifindex, &optl); 1529 if (!ASSERT_OK_PTR(link, "link_attach")) 1530 goto cleanup; 1531 1532 skel->links.tc2 = link; 1533 1534 assert_mprog_count_ifindex(ifindex, target, 2); 1535 1536 link = bpf_program__attach_tcx(skel->progs.tc3, ifindex, &optl); 1537 if (!ASSERT_OK_PTR(link, "link_attach")) 1538 goto cleanup; 1539 1540 skel->links.tc3 = link; 1541 1542 assert_mprog_count_ifindex(ifindex, target, 3); 1543 1544 link = bpf_program__attach_tcx(skel->progs.tc4, ifindex, &optl); 1545 if (!ASSERT_OK_PTR(link, "link_attach")) 1546 goto cleanup; 1547 1548 skel->links.tc4 = link; 1549 1550 assert_mprog_count_ifindex(ifindex, target, 4); 1551 1552 ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); 1553 ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); 1554 ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); 1555 1556 ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc1)), 0, "tc1_ifindex"); 1557 ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc2)), 0, "tc2_ifindex"); 1558 ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc3)), 0, "tc3_ifindex"); 1559 ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc4)), 0, "tc4_ifindex"); 1560 1561 test_tc_link__destroy(skel); 1562 return; 1563 cleanup: 1564 test_tc_link__destroy(skel); 1565 1566 ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); 1567 ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); 1568 ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); 1569 } 1570 1571 void serial_test_tc_links_dev_cleanup(void) 1572 { 1573 test_tc_links_dev_cleanup_target(BPF_TCX_INGRESS); 1574 test_tc_links_dev_cleanup_target(BPF_TCX_EGRESS); 1575 } 1576 1577 static void test_tc_chain_mixed(int target) 1578 { 1579 LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); 1580 LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback); 1581 LIBBPF_OPTS(bpf_tcx_opts, optl); 1582 struct test_tc_link *skel; 1583 struct bpf_link *link; 1584 __u32 pid1, pid2, pid3; 1585 int err; 1586 1587 skel = test_tc_link__open(); 1588 if (!ASSERT_OK_PTR(skel, "skel_open")) 1589 goto cleanup; 1590 1591 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), 1592 0, "tc4_attach_type"); 1593 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc5, target), 1594 0, "tc5_attach_type"); 1595 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc6, target), 1596 0, "tc6_attach_type"); 1597 1598 err = test_tc_link__load(skel); 1599 if (!ASSERT_OK(err, "skel_load")) 1600 goto cleanup; 1601 1602 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); 1603 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc5)); 1604 pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc6)); 1605 1606 ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); 1607 ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); 1608 1609 assert_mprog_count(target, 0); 1610 1611 tc_hook.attach_point = target == BPF_TCX_INGRESS ? 1612 BPF_TC_INGRESS : BPF_TC_EGRESS; 1613 err = bpf_tc_hook_create(&tc_hook); 1614 err = err == -EEXIST ? 0 : err; 1615 if (!ASSERT_OK(err, "bpf_tc_hook_create")) 1616 goto cleanup; 1617 1618 tc_opts.prog_fd = bpf_program__fd(skel->progs.tc5); 1619 err = bpf_tc_attach(&tc_hook, &tc_opts); 1620 if (!ASSERT_OK(err, "bpf_tc_attach")) 1621 goto cleanup; 1622 1623 link = bpf_program__attach_tcx(skel->progs.tc6, loopback, &optl); 1624 if (!ASSERT_OK_PTR(link, "link_attach")) 1625 goto cleanup; 1626 1627 skel->links.tc6 = link; 1628 1629 assert_mprog_count(target, 1); 1630 1631 tc_skel_reset_all_seen(skel); 1632 ASSERT_OK(system(ping_cmd), ping_cmd); 1633 1634 ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); 1635 ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5"); 1636 ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6"); 1637 1638 err = bpf_link__update_program(skel->links.tc6, skel->progs.tc4); 1639 if (!ASSERT_OK(err, "link_update")) 1640 goto cleanup; 1641 1642 assert_mprog_count(target, 1); 1643 1644 tc_skel_reset_all_seen(skel); 1645 ASSERT_OK(system(ping_cmd), ping_cmd); 1646 1647 ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); 1648 ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5"); 1649 ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6"); 1650 1651 err = bpf_link__detach(skel->links.tc6); 1652 if (!ASSERT_OK(err, "prog_detach")) 1653 goto cleanup; 1654 1655 assert_mprog_count(target, 0); 1656 1657 tc_skel_reset_all_seen(skel); 1658 ASSERT_OK(system(ping_cmd), ping_cmd); 1659 1660 ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); 1661 ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5"); 1662 ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6"); 1663 1664 cleanup: 1665 tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0; 1666 err = bpf_tc_detach(&tc_hook, &tc_opts); 1667 ASSERT_OK(err, "bpf_tc_detach"); 1668 1669 tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; 1670 bpf_tc_hook_destroy(&tc_hook); 1671 1672 test_tc_link__destroy(skel); 1673 } 1674 1675 void serial_test_tc_links_chain_mixed(void) 1676 { 1677 test_tc_chain_mixed(BPF_TCX_INGRESS); 1678 test_tc_chain_mixed(BPF_TCX_EGRESS); 1679 } 1680 1681 static void test_tc_links_ingress(int target, bool chain_tc_old, 1682 bool tcx_teardown_first) 1683 { 1684 LIBBPF_OPTS(bpf_tc_opts, tc_opts, 1685 .handle = 1, 1686 .priority = 1, 1687 ); 1688 LIBBPF_OPTS(bpf_tc_hook, tc_hook, 1689 .ifindex = loopback, 1690 .attach_point = BPF_TC_CUSTOM, 1691 .parent = TC_H_INGRESS, 1692 ); 1693 bool hook_created = false, tc_attached = false; 1694 LIBBPF_OPTS(bpf_tcx_opts, optl); 1695 __u32 pid1, pid2, pid3; 1696 struct test_tc_link *skel; 1697 struct bpf_link *link; 1698 int err; 1699 1700 skel = test_tc_link__open(); 1701 if (!ASSERT_OK_PTR(skel, "skel_open")) 1702 goto cleanup; 1703 1704 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), 1705 0, "tc1_attach_type"); 1706 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), 1707 0, "tc2_attach_type"); 1708 1709 err = test_tc_link__load(skel); 1710 if (!ASSERT_OK(err, "skel_load")) 1711 goto cleanup; 1712 1713 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); 1714 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); 1715 pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); 1716 1717 ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); 1718 ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); 1719 1720 assert_mprog_count(target, 0); 1721 1722 if (chain_tc_old) { 1723 ASSERT_OK(system("tc qdisc add dev lo ingress"), "add_ingress"); 1724 hook_created = true; 1725 1726 tc_opts.prog_fd = bpf_program__fd(skel->progs.tc3); 1727 err = bpf_tc_attach(&tc_hook, &tc_opts); 1728 if (!ASSERT_OK(err, "bpf_tc_attach")) 1729 goto cleanup; 1730 tc_attached = true; 1731 } 1732 1733 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); 1734 if (!ASSERT_OK_PTR(link, "link_attach")) 1735 goto cleanup; 1736 1737 skel->links.tc1 = link; 1738 1739 link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); 1740 if (!ASSERT_OK_PTR(link, "link_attach")) 1741 goto cleanup; 1742 1743 skel->links.tc2 = link; 1744 1745 assert_mprog_count(target, 2); 1746 1747 tc_skel_reset_all_seen(skel); 1748 ASSERT_OK(system(ping_cmd), ping_cmd); 1749 1750 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 1751 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 1752 ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3"); 1753 1754 err = bpf_link__detach(skel->links.tc2); 1755 if (!ASSERT_OK(err, "prog_detach")) 1756 goto cleanup; 1757 1758 assert_mprog_count(target, 1); 1759 1760 tc_skel_reset_all_seen(skel); 1761 ASSERT_OK(system(ping_cmd), ping_cmd); 1762 1763 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 1764 ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); 1765 ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3"); 1766 cleanup: 1767 if (tc_attached) { 1768 tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0; 1769 err = bpf_tc_detach(&tc_hook, &tc_opts); 1770 ASSERT_OK(err, "bpf_tc_detach"); 1771 } 1772 ASSERT_OK(system(ping_cmd), ping_cmd); 1773 assert_mprog_count(target, 1); 1774 if (hook_created && tcx_teardown_first) 1775 ASSERT_OK(system("tc qdisc del dev lo ingress"), "del_ingress"); 1776 ASSERT_OK(system(ping_cmd), ping_cmd); 1777 test_tc_link__destroy(skel); 1778 ASSERT_OK(system(ping_cmd), ping_cmd); 1779 if (hook_created && !tcx_teardown_first) 1780 ASSERT_OK(system("tc qdisc del dev lo ingress"), "del_ingress"); 1781 ASSERT_OK(system(ping_cmd), ping_cmd); 1782 assert_mprog_count(target, 0); 1783 } 1784 1785 void serial_test_tc_links_ingress(void) 1786 { 1787 test_tc_links_ingress(BPF_TCX_INGRESS, true, true); 1788 test_tc_links_ingress(BPF_TCX_INGRESS, true, false); 1789 test_tc_links_ingress(BPF_TCX_INGRESS, false, false); 1790 } 1791 1792 struct qdisc_req { 1793 struct nlmsghdr n; 1794 struct tcmsg t; 1795 char buf[1024]; 1796 }; 1797 1798 static int qdisc_replace(int ifindex, const char *kind, bool block) 1799 { 1800 struct rtnl_handle rth = { .fd = -1 }; 1801 struct qdisc_req req; 1802 int err; 1803 1804 err = rtnl_open(&rth, 0); 1805 if (!ASSERT_OK(err, "open_rtnetlink")) 1806 return err; 1807 1808 memset(&req, 0, sizeof(req)); 1809 req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)); 1810 req.n.nlmsg_flags = NLM_F_CREATE | NLM_F_REPLACE | NLM_F_REQUEST; 1811 req.n.nlmsg_type = RTM_NEWQDISC; 1812 req.t.tcm_family = AF_UNSPEC; 1813 req.t.tcm_ifindex = ifindex; 1814 req.t.tcm_parent = 0xfffffff1; 1815 1816 addattr_l(&req.n, sizeof(req), TCA_KIND, kind, strlen(kind) + 1); 1817 if (block) 1818 addattr32(&req.n, sizeof(req), TCA_INGRESS_BLOCK, 1); 1819 1820 err = rtnl_talk(&rth, &req.n, NULL); 1821 ASSERT_OK(err, "talk_rtnetlink"); 1822 rtnl_close(&rth); 1823 return err; 1824 } 1825 1826 void serial_test_tc_links_dev_chain0(void) 1827 { 1828 int err, ifindex; 1829 1830 ASSERT_OK(system("ip link add dev foo type veth peer name bar"), "add veth"); 1831 ifindex = if_nametoindex("foo"); 1832 ASSERT_NEQ(ifindex, 0, "non_zero_ifindex"); 1833 err = qdisc_replace(ifindex, "ingress", true); 1834 if (!ASSERT_OK(err, "attaching ingress")) 1835 goto cleanup; 1836 ASSERT_OK(system("tc filter add block 1 matchall action skbmod swap mac"), "add block"); 1837 err = qdisc_replace(ifindex, "clsact", false); 1838 if (!ASSERT_OK(err, "attaching clsact")) 1839 goto cleanup; 1840 /* Heuristic: kern_sync_rcu() alone does not work; a wait-time of ~5s 1841 * triggered the issue without the fix reliably 100% of the time. 1842 */ 1843 sleep(5); 1844 ASSERT_OK(system("tc filter add dev foo ingress matchall action skbmod swap mac"), "add filter"); 1845 cleanup: 1846 ASSERT_OK(system("ip link del dev foo"), "del veth"); 1847 ASSERT_EQ(if_nametoindex("foo"), 0, "foo removed"); 1848 ASSERT_EQ(if_nametoindex("bar"), 0, "bar removed"); 1849 } 1850 1851 static void test_tc_links_dev_mixed(int target) 1852 { 1853 LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); 1854 LIBBPF_OPTS(bpf_tc_hook, tc_hook); 1855 LIBBPF_OPTS(bpf_tcx_opts, optl); 1856 __u32 pid1, pid2, pid3, pid4; 1857 struct test_tc_link *skel; 1858 struct bpf_link *link; 1859 int err, ifindex; 1860 1861 ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth"); 1862 ifindex = if_nametoindex("tcx_opts1"); 1863 ASSERT_NEQ(ifindex, 0, "non_zero_ifindex"); 1864 1865 skel = test_tc_link__open(); 1866 if (!ASSERT_OK_PTR(skel, "skel_open")) 1867 goto cleanup; 1868 1869 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), 1870 0, "tc1_attach_type"); 1871 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), 1872 0, "tc2_attach_type"); 1873 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), 1874 0, "tc3_attach_type"); 1875 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), 1876 0, "tc4_attach_type"); 1877 1878 err = test_tc_link__load(skel); 1879 if (!ASSERT_OK(err, "skel_load")) 1880 goto cleanup; 1881 1882 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); 1883 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); 1884 pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); 1885 pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); 1886 1887 ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); 1888 ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); 1889 ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); 1890 1891 assert_mprog_count(target, 0); 1892 1893 link = bpf_program__attach_tcx(skel->progs.tc1, ifindex, &optl); 1894 if (!ASSERT_OK_PTR(link, "link_attach")) 1895 goto cleanup; 1896 1897 skel->links.tc1 = link; 1898 1899 assert_mprog_count_ifindex(ifindex, target, 1); 1900 1901 link = bpf_program__attach_tcx(skel->progs.tc2, ifindex, &optl); 1902 if (!ASSERT_OK_PTR(link, "link_attach")) 1903 goto cleanup; 1904 1905 skel->links.tc2 = link; 1906 1907 assert_mprog_count_ifindex(ifindex, target, 2); 1908 1909 link = bpf_program__attach_tcx(skel->progs.tc3, ifindex, &optl); 1910 if (!ASSERT_OK_PTR(link, "link_attach")) 1911 goto cleanup; 1912 1913 skel->links.tc3 = link; 1914 1915 assert_mprog_count_ifindex(ifindex, target, 3); 1916 1917 link = bpf_program__attach_tcx(skel->progs.tc4, ifindex, &optl); 1918 if (!ASSERT_OK_PTR(link, "link_attach")) 1919 goto cleanup; 1920 1921 skel->links.tc4 = link; 1922 1923 assert_mprog_count_ifindex(ifindex, target, 4); 1924 1925 tc_hook.ifindex = ifindex; 1926 tc_hook.attach_point = target == BPF_TCX_INGRESS ? 1927 BPF_TC_INGRESS : BPF_TC_EGRESS; 1928 1929 err = bpf_tc_hook_create(&tc_hook); 1930 err = err == -EEXIST ? 0 : err; 1931 if (!ASSERT_OK(err, "bpf_tc_hook_create")) 1932 goto cleanup; 1933 1934 tc_opts.prog_fd = bpf_program__fd(skel->progs.tc5); 1935 err = bpf_tc_attach(&tc_hook, &tc_opts); 1936 if (!ASSERT_OK(err, "bpf_tc_attach")) 1937 goto cleanup; 1938 1939 ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); 1940 ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); 1941 ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); 1942 1943 ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc1)), 0, "tc1_ifindex"); 1944 ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc2)), 0, "tc2_ifindex"); 1945 ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc3)), 0, "tc3_ifindex"); 1946 ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc4)), 0, "tc4_ifindex"); 1947 1948 test_tc_link__destroy(skel); 1949 return; 1950 cleanup: 1951 test_tc_link__destroy(skel); 1952 1953 ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); 1954 ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); 1955 ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); 1956 } 1957 1958 void serial_test_tc_links_dev_mixed(void) 1959 { 1960 test_tc_links_dev_mixed(BPF_TCX_INGRESS); 1961 test_tc_links_dev_mixed(BPF_TCX_EGRESS); 1962 } 1963
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.