1 // SPDX-License-Identifier: GPL-2.0 2 #define _GNU_SOURCE 3 #include <test_progs.h> 4 #include <network_helpers.h> 5 #include <linux/if_tun.h> 6 #include <sys/uio.h> 7 8 #include "bpf_flow.skel.h" 9 10 #define FLOW_CONTINUE_SADDR 0x7f00007f /* 127.0.0.127 */ 11 12 #ifndef IP_MF 13 #define IP_MF 0x2000 14 #endif 15 16 #define CHECK_FLOW_KEYS(desc, got, expected) \ 17 _CHECK(memcmp(&got, &expected, sizeof(got)) != 0, \ 18 desc, \ 19 topts.duration, \ 20 "nhoff=%u/%u " \ 21 "thoff=%u/%u " \ 22 "addr_proto=0x%x/0x%x " \ 23 "is_frag=%u/%u " \ 24 "is_first_frag=%u/%u " \ 25 "is_encap=%u/%u " \ 26 "ip_proto=0x%x/0x%x " \ 27 "n_proto=0x%x/0x%x " \ 28 "flow_label=0x%x/0x%x " \ 29 "sport=%u/%u " \ 30 "dport=%u/%u\n", \ 31 got.nhoff, expected.nhoff, \ 32 got.thoff, expected.thoff, \ 33 got.addr_proto, expected.addr_proto, \ 34 got.is_frag, expected.is_frag, \ 35 got.is_first_frag, expected.is_first_frag, \ 36 got.is_encap, expected.is_encap, \ 37 got.ip_proto, expected.ip_proto, \ 38 got.n_proto, expected.n_proto, \ 39 got.flow_label, expected.flow_label, \ 40 got.sport, expected.sport, \ 41 got.dport, expected.dport) 42 43 struct ipv4_pkt { 44 struct ethhdr eth; 45 struct iphdr iph; 46 struct tcphdr tcp; 47 } __packed; 48 49 struct ipip_pkt { 50 struct ethhdr eth; 51 struct iphdr iph; 52 struct iphdr iph_inner; 53 struct tcphdr tcp; 54 } __packed; 55 56 struct svlan_ipv4_pkt { 57 struct ethhdr eth; 58 __u16 vlan_tci; 59 __u16 vlan_proto; 60 struct iphdr iph; 61 struct tcphdr tcp; 62 } __packed; 63 64 struct ipv6_pkt { 65 struct ethhdr eth; 66 struct ipv6hdr iph; 67 struct tcphdr tcp; 68 } __packed; 69 70 struct ipv6_frag_pkt { 71 struct ethhdr eth; 72 struct ipv6hdr iph; 73 struct frag_hdr { 74 __u8 nexthdr; 75 __u8 reserved; 76 __be16 frag_off; 77 __be32 identification; 78 } ipf; 79 struct tcphdr tcp; 80 } __packed; 81 82 struct dvlan_ipv6_pkt { 83 struct ethhdr eth; 84 __u16 vlan_tci; 85 __u16 vlan_proto; 86 __u16 vlan_tci2; 87 __u16 vlan_proto2; 88 struct ipv6hdr iph; 89 struct tcphdr tcp; 90 } __packed; 91 92 struct test { 93 const char *name; 94 union { 95 struct ipv4_pkt ipv4; 96 struct svlan_ipv4_pkt svlan_ipv4; 97 struct ipip_pkt ipip; 98 struct ipv6_pkt ipv6; 99 struct ipv6_frag_pkt ipv6_frag; 100 struct dvlan_ipv6_pkt dvlan_ipv6; 101 } pkt; 102 struct bpf_flow_keys keys; 103 __u32 flags; 104 __u32 retval; 105 }; 106 107 #define VLAN_HLEN 4 108 109 static __u32 duration; 110 struct test tests[] = { 111 { 112 .name = "ipv4", 113 .pkt.ipv4 = { 114 .eth.h_proto = __bpf_constant_htons(ETH_P_IP), 115 .iph.ihl = 5, 116 .iph.protocol = IPPROTO_TCP, 117 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), 118 .tcp.doff = 5, 119 .tcp.source = 80, 120 .tcp.dest = 8080, 121 }, 122 .keys = { 123 .nhoff = ETH_HLEN, 124 .thoff = ETH_HLEN + sizeof(struct iphdr), 125 .addr_proto = ETH_P_IP, 126 .ip_proto = IPPROTO_TCP, 127 .n_proto = __bpf_constant_htons(ETH_P_IP), 128 .sport = 80, 129 .dport = 8080, 130 }, 131 .retval = BPF_OK, 132 }, 133 { 134 .name = "ipv6", 135 .pkt.ipv6 = { 136 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), 137 .iph.nexthdr = IPPROTO_TCP, 138 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), 139 .tcp.doff = 5, 140 .tcp.source = 80, 141 .tcp.dest = 8080, 142 }, 143 .keys = { 144 .nhoff = ETH_HLEN, 145 .thoff = ETH_HLEN + sizeof(struct ipv6hdr), 146 .addr_proto = ETH_P_IPV6, 147 .ip_proto = IPPROTO_TCP, 148 .n_proto = __bpf_constant_htons(ETH_P_IPV6), 149 .sport = 80, 150 .dport = 8080, 151 }, 152 .retval = BPF_OK, 153 }, 154 { 155 .name = "802.1q-ipv4", 156 .pkt.svlan_ipv4 = { 157 .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q), 158 .vlan_proto = __bpf_constant_htons(ETH_P_IP), 159 .iph.ihl = 5, 160 .iph.protocol = IPPROTO_TCP, 161 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), 162 .tcp.doff = 5, 163 .tcp.source = 80, 164 .tcp.dest = 8080, 165 }, 166 .keys = { 167 .nhoff = ETH_HLEN + VLAN_HLEN, 168 .thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr), 169 .addr_proto = ETH_P_IP, 170 .ip_proto = IPPROTO_TCP, 171 .n_proto = __bpf_constant_htons(ETH_P_IP), 172 .sport = 80, 173 .dport = 8080, 174 }, 175 .retval = BPF_OK, 176 }, 177 { 178 .name = "802.1ad-ipv6", 179 .pkt.dvlan_ipv6 = { 180 .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD), 181 .vlan_proto = __bpf_constant_htons(ETH_P_8021Q), 182 .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6), 183 .iph.nexthdr = IPPROTO_TCP, 184 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), 185 .tcp.doff = 5, 186 .tcp.source = 80, 187 .tcp.dest = 8080, 188 }, 189 .keys = { 190 .nhoff = ETH_HLEN + VLAN_HLEN * 2, 191 .thoff = ETH_HLEN + VLAN_HLEN * 2 + 192 sizeof(struct ipv6hdr), 193 .addr_proto = ETH_P_IPV6, 194 .ip_proto = IPPROTO_TCP, 195 .n_proto = __bpf_constant_htons(ETH_P_IPV6), 196 .sport = 80, 197 .dport = 8080, 198 }, 199 .retval = BPF_OK, 200 }, 201 { 202 .name = "ipv4-frag", 203 .pkt.ipv4 = { 204 .eth.h_proto = __bpf_constant_htons(ETH_P_IP), 205 .iph.ihl = 5, 206 .iph.protocol = IPPROTO_TCP, 207 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), 208 .iph.frag_off = __bpf_constant_htons(IP_MF), 209 .tcp.doff = 5, 210 .tcp.source = 80, 211 .tcp.dest = 8080, 212 }, 213 .keys = { 214 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, 215 .nhoff = ETH_HLEN, 216 .thoff = ETH_HLEN + sizeof(struct iphdr), 217 .addr_proto = ETH_P_IP, 218 .ip_proto = IPPROTO_TCP, 219 .n_proto = __bpf_constant_htons(ETH_P_IP), 220 .is_frag = true, 221 .is_first_frag = true, 222 .sport = 80, 223 .dport = 8080, 224 }, 225 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, 226 .retval = BPF_OK, 227 }, 228 { 229 .name = "ipv4-no-frag", 230 .pkt.ipv4 = { 231 .eth.h_proto = __bpf_constant_htons(ETH_P_IP), 232 .iph.ihl = 5, 233 .iph.protocol = IPPROTO_TCP, 234 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), 235 .iph.frag_off = __bpf_constant_htons(IP_MF), 236 .tcp.doff = 5, 237 .tcp.source = 80, 238 .tcp.dest = 8080, 239 }, 240 .keys = { 241 .nhoff = ETH_HLEN, 242 .thoff = ETH_HLEN + sizeof(struct iphdr), 243 .addr_proto = ETH_P_IP, 244 .ip_proto = IPPROTO_TCP, 245 .n_proto = __bpf_constant_htons(ETH_P_IP), 246 .is_frag = true, 247 .is_first_frag = true, 248 }, 249 .retval = BPF_OK, 250 }, 251 { 252 .name = "ipv6-frag", 253 .pkt.ipv6_frag = { 254 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), 255 .iph.nexthdr = IPPROTO_FRAGMENT, 256 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), 257 .ipf.nexthdr = IPPROTO_TCP, 258 .tcp.doff = 5, 259 .tcp.source = 80, 260 .tcp.dest = 8080, 261 }, 262 .keys = { 263 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, 264 .nhoff = ETH_HLEN, 265 .thoff = ETH_HLEN + sizeof(struct ipv6hdr) + 266 sizeof(struct frag_hdr), 267 .addr_proto = ETH_P_IPV6, 268 .ip_proto = IPPROTO_TCP, 269 .n_proto = __bpf_constant_htons(ETH_P_IPV6), 270 .is_frag = true, 271 .is_first_frag = true, 272 .sport = 80, 273 .dport = 8080, 274 }, 275 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, 276 .retval = BPF_OK, 277 }, 278 { 279 .name = "ipv6-no-frag", 280 .pkt.ipv6_frag = { 281 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), 282 .iph.nexthdr = IPPROTO_FRAGMENT, 283 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), 284 .ipf.nexthdr = IPPROTO_TCP, 285 .tcp.doff = 5, 286 .tcp.source = 80, 287 .tcp.dest = 8080, 288 }, 289 .keys = { 290 .nhoff = ETH_HLEN, 291 .thoff = ETH_HLEN + sizeof(struct ipv6hdr) + 292 sizeof(struct frag_hdr), 293 .addr_proto = ETH_P_IPV6, 294 .ip_proto = IPPROTO_TCP, 295 .n_proto = __bpf_constant_htons(ETH_P_IPV6), 296 .is_frag = true, 297 .is_first_frag = true, 298 }, 299 .retval = BPF_OK, 300 }, 301 { 302 .name = "ipv6-flow-label", 303 .pkt.ipv6 = { 304 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), 305 .iph.nexthdr = IPPROTO_TCP, 306 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), 307 .iph.flow_lbl = { 0xb, 0xee, 0xef }, 308 .tcp.doff = 5, 309 .tcp.source = 80, 310 .tcp.dest = 8080, 311 }, 312 .keys = { 313 .nhoff = ETH_HLEN, 314 .thoff = ETH_HLEN + sizeof(struct ipv6hdr), 315 .addr_proto = ETH_P_IPV6, 316 .ip_proto = IPPROTO_TCP, 317 .n_proto = __bpf_constant_htons(ETH_P_IPV6), 318 .sport = 80, 319 .dport = 8080, 320 .flow_label = __bpf_constant_htonl(0xbeeef), 321 }, 322 .retval = BPF_OK, 323 }, 324 { 325 .name = "ipv6-no-flow-label", 326 .pkt.ipv6 = { 327 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), 328 .iph.nexthdr = IPPROTO_TCP, 329 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), 330 .iph.flow_lbl = { 0xb, 0xee, 0xef }, 331 .tcp.doff = 5, 332 .tcp.source = 80, 333 .tcp.dest = 8080, 334 }, 335 .keys = { 336 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL, 337 .nhoff = ETH_HLEN, 338 .thoff = ETH_HLEN + sizeof(struct ipv6hdr), 339 .addr_proto = ETH_P_IPV6, 340 .ip_proto = IPPROTO_TCP, 341 .n_proto = __bpf_constant_htons(ETH_P_IPV6), 342 .flow_label = __bpf_constant_htonl(0xbeeef), 343 }, 344 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL, 345 .retval = BPF_OK, 346 }, 347 { 348 .name = "ipv6-empty-flow-label", 349 .pkt.ipv6 = { 350 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), 351 .iph.nexthdr = IPPROTO_TCP, 352 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), 353 .iph.flow_lbl = { 0x00, 0x00, 0x00 }, 354 .tcp.doff = 5, 355 .tcp.source = 80, 356 .tcp.dest = 8080, 357 }, 358 .keys = { 359 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL, 360 .nhoff = ETH_HLEN, 361 .thoff = ETH_HLEN + sizeof(struct ipv6hdr), 362 .addr_proto = ETH_P_IPV6, 363 .ip_proto = IPPROTO_TCP, 364 .n_proto = __bpf_constant_htons(ETH_P_IPV6), 365 .sport = 80, 366 .dport = 8080, 367 }, 368 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL, 369 .retval = BPF_OK, 370 }, 371 { 372 .name = "ipip-encap", 373 .pkt.ipip = { 374 .eth.h_proto = __bpf_constant_htons(ETH_P_IP), 375 .iph.ihl = 5, 376 .iph.protocol = IPPROTO_IPIP, 377 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), 378 .iph_inner.ihl = 5, 379 .iph_inner.protocol = IPPROTO_TCP, 380 .iph_inner.tot_len = 381 __bpf_constant_htons(MAGIC_BYTES) - 382 sizeof(struct iphdr), 383 .tcp.doff = 5, 384 .tcp.source = 80, 385 .tcp.dest = 8080, 386 }, 387 .keys = { 388 .nhoff = ETH_HLEN, 389 .thoff = ETH_HLEN + sizeof(struct iphdr) + 390 sizeof(struct iphdr), 391 .addr_proto = ETH_P_IP, 392 .ip_proto = IPPROTO_TCP, 393 .n_proto = __bpf_constant_htons(ETH_P_IP), 394 .is_encap = true, 395 .sport = 80, 396 .dport = 8080, 397 }, 398 .retval = BPF_OK, 399 }, 400 { 401 .name = "ipip-no-encap", 402 .pkt.ipip = { 403 .eth.h_proto = __bpf_constant_htons(ETH_P_IP), 404 .iph.ihl = 5, 405 .iph.protocol = IPPROTO_IPIP, 406 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), 407 .iph_inner.ihl = 5, 408 .iph_inner.protocol = IPPROTO_TCP, 409 .iph_inner.tot_len = 410 __bpf_constant_htons(MAGIC_BYTES) - 411 sizeof(struct iphdr), 412 .tcp.doff = 5, 413 .tcp.source = 80, 414 .tcp.dest = 8080, 415 }, 416 .keys = { 417 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP, 418 .nhoff = ETH_HLEN, 419 .thoff = ETH_HLEN + sizeof(struct iphdr), 420 .addr_proto = ETH_P_IP, 421 .ip_proto = IPPROTO_IPIP, 422 .n_proto = __bpf_constant_htons(ETH_P_IP), 423 .is_encap = true, 424 }, 425 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP, 426 .retval = BPF_OK, 427 }, 428 { 429 .name = "ipip-encap-dissector-continue", 430 .pkt.ipip = { 431 .eth.h_proto = __bpf_constant_htons(ETH_P_IP), 432 .iph.ihl = 5, 433 .iph.protocol = IPPROTO_IPIP, 434 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), 435 .iph.saddr = __bpf_constant_htonl(FLOW_CONTINUE_SADDR), 436 .iph_inner.ihl = 5, 437 .iph_inner.protocol = IPPROTO_TCP, 438 .iph_inner.tot_len = 439 __bpf_constant_htons(MAGIC_BYTES) - 440 sizeof(struct iphdr), 441 .tcp.doff = 5, 442 .tcp.source = 99, 443 .tcp.dest = 9090, 444 }, 445 .retval = BPF_FLOW_DISSECTOR_CONTINUE, 446 }, 447 }; 448 449 static int create_tap(const char *ifname) 450 { 451 struct ifreq ifr = { 452 .ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS, 453 }; 454 int fd, ret; 455 456 strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); 457 458 fd = open("/dev/net/tun", O_RDWR); 459 if (fd < 0) 460 return -1; 461 462 ret = ioctl(fd, TUNSETIFF, &ifr); 463 if (ret) 464 return -1; 465 466 return fd; 467 } 468 469 static int tx_tap(int fd, void *pkt, size_t len) 470 { 471 struct iovec iov[] = { 472 { 473 .iov_len = len, 474 .iov_base = pkt, 475 }, 476 }; 477 return writev(fd, iov, ARRAY_SIZE(iov)); 478 } 479 480 static int ifup(const char *ifname) 481 { 482 struct ifreq ifr = {}; 483 int sk, ret; 484 485 strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); 486 487 sk = socket(PF_INET, SOCK_DGRAM, 0); 488 if (sk < 0) 489 return -1; 490 491 ret = ioctl(sk, SIOCGIFFLAGS, &ifr); 492 if (ret) { 493 close(sk); 494 return -1; 495 } 496 497 ifr.ifr_flags |= IFF_UP; 498 ret = ioctl(sk, SIOCSIFFLAGS, &ifr); 499 if (ret) { 500 close(sk); 501 return -1; 502 } 503 504 close(sk); 505 return 0; 506 } 507 508 static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array) 509 { 510 int i, err, map_fd, prog_fd; 511 struct bpf_program *prog; 512 char prog_name[32]; 513 514 map_fd = bpf_map__fd(prog_array); 515 if (map_fd < 0) 516 return -1; 517 518 for (i = 0; i < bpf_map__max_entries(prog_array); i++) { 519 snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i); 520 521 prog = bpf_object__find_program_by_name(obj, prog_name); 522 if (!prog) 523 return -1; 524 525 prog_fd = bpf_program__fd(prog); 526 if (prog_fd < 0) 527 return -1; 528 529 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); 530 if (err) 531 return -1; 532 } 533 return 0; 534 } 535 536 static void run_tests_skb_less(int tap_fd, struct bpf_map *keys) 537 { 538 int i, err, keys_fd; 539 540 keys_fd = bpf_map__fd(keys); 541 if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd)) 542 return; 543 544 for (i = 0; i < ARRAY_SIZE(tests); i++) { 545 /* Keep in sync with 'flags' from eth_get_headlen. */ 546 __u32 eth_get_headlen_flags = 547 BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG; 548 LIBBPF_OPTS(bpf_test_run_opts, topts); 549 struct bpf_flow_keys flow_keys = {}; 550 __u32 key = (__u32)(tests[i].keys.sport) << 16 | 551 tests[i].keys.dport; 552 553 /* For skb-less case we can't pass input flags; run 554 * only the tests that have a matching set of flags. 555 */ 556 557 if (tests[i].flags != eth_get_headlen_flags) 558 continue; 559 560 err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt)); 561 CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno); 562 563 /* check the stored flow_keys only if BPF_OK expected */ 564 if (tests[i].retval != BPF_OK) 565 continue; 566 567 err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys); 568 ASSERT_OK(err, "bpf_map_lookup_elem"); 569 570 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); 571 572 err = bpf_map_delete_elem(keys_fd, &key); 573 ASSERT_OK(err, "bpf_map_delete_elem"); 574 } 575 } 576 577 static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd) 578 { 579 int err, prog_fd; 580 581 prog_fd = bpf_program__fd(skel->progs._dissect); 582 if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd)) 583 return; 584 585 err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); 586 if (CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno)) 587 return; 588 589 run_tests_skb_less(tap_fd, skel->maps.last_dissection); 590 591 err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR); 592 CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno); 593 } 594 595 static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd) 596 { 597 struct bpf_link *link; 598 int err, net_fd; 599 600 net_fd = open("/proc/self/ns/net", O_RDONLY); 601 if (CHECK(net_fd < 0, "open(/proc/self/ns/net)", "err %d\n", errno)) 602 return; 603 604 link = bpf_program__attach_netns(skel->progs._dissect, net_fd); 605 if (!ASSERT_OK_PTR(link, "attach_netns")) 606 goto out_close; 607 608 run_tests_skb_less(tap_fd, skel->maps.last_dissection); 609 610 err = bpf_link__destroy(link); 611 CHECK(err, "bpf_link__destroy", "err %d\n", err); 612 out_close: 613 close(net_fd); 614 } 615 616 void test_flow_dissector(void) 617 { 618 int i, err, prog_fd, keys_fd = -1, tap_fd; 619 struct bpf_flow *skel; 620 621 skel = bpf_flow__open_and_load(); 622 if (CHECK(!skel, "skel", "failed to open/load skeleton\n")) 623 return; 624 625 prog_fd = bpf_program__fd(skel->progs._dissect); 626 if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd)) 627 goto out_destroy_skel; 628 keys_fd = bpf_map__fd(skel->maps.last_dissection); 629 if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd)) 630 goto out_destroy_skel; 631 err = init_prog_array(skel->obj, skel->maps.jmp_table); 632 if (CHECK(err, "init_prog_array", "err %d\n", err)) 633 goto out_destroy_skel; 634 635 for (i = 0; i < ARRAY_SIZE(tests); i++) { 636 struct bpf_flow_keys flow_keys; 637 LIBBPF_OPTS(bpf_test_run_opts, topts, 638 .data_in = &tests[i].pkt, 639 .data_size_in = sizeof(tests[i].pkt), 640 .data_out = &flow_keys, 641 ); 642 static struct bpf_flow_keys ctx = {}; 643 644 if (tests[i].flags) { 645 topts.ctx_in = &ctx; 646 topts.ctx_size_in = sizeof(ctx); 647 ctx.flags = tests[i].flags; 648 } 649 650 err = bpf_prog_test_run_opts(prog_fd, &topts); 651 ASSERT_OK(err, "test_run"); 652 ASSERT_EQ(topts.retval, tests[i].retval, "test_run retval"); 653 654 /* check the resulting flow_keys only if BPF_OK returned */ 655 if (topts.retval != BPF_OK) 656 continue; 657 ASSERT_EQ(topts.data_size_out, sizeof(flow_keys), 658 "test_run data_size_out"); 659 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); 660 } 661 662 /* Do the same tests but for skb-less flow dissector. 663 * We use a known path in the net/tun driver that calls 664 * eth_get_headlen and we manually export bpf_flow_keys 665 * via BPF map in this case. 666 */ 667 668 tap_fd = create_tap("tap0"); 669 CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno); 670 err = ifup("tap0"); 671 CHECK(err, "ifup", "err %d errno %d\n", err, errno); 672 673 /* Test direct prog attachment */ 674 test_skb_less_prog_attach(skel, tap_fd); 675 /* Test indirect prog attachment via link */ 676 test_skb_less_link_create(skel, tap_fd); 677 678 close(tap_fd); 679 out_destroy_skel: 680 bpf_flow__destroy(skel); 681 } 682
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.