~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/selftests/bpf/xskxceiver.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /* Copyright(c) 2020 Intel Corporation. */
  3 
  4 /*
  5  * Some functions in this program are taken from
  6  * Linux kernel samples/bpf/xdpsock* and modified
  7  * for use.
  8  *
  9  * See test_xsk.sh for detailed information on test topology
 10  * and prerequisite network setup.
 11  *
 12  * This test program contains two threads, each thread is single socket with
 13  * a unique UMEM. It validates in-order packet delivery and packet content
 14  * by sending packets to each other.
 15  *
 16  * Tests Information:
 17  * ------------------
 18  * These selftests test AF_XDP SKB and Native/DRV modes using veth
 19  * Virtual Ethernet interfaces.
 20  *
 21  * For each mode, the following tests are run:
 22  *    a. nopoll - soft-irq processing in run-to-completion mode
 23  *    b. poll - using poll() syscall
 24  *    c. Socket Teardown
 25  *       Create a Tx and a Rx socket, Tx from one socket, Rx on another. Destroy
 26  *       both sockets, then repeat multiple times. Only nopoll mode is used
 27  *    d. Bi-directional sockets
 28  *       Configure sockets as bi-directional tx/rx sockets, sets up fill and
 29  *       completion rings on each socket, tx/rx in both directions. Only nopoll
 30  *       mode is used
 31  *    e. Statistics
 32  *       Trigger some error conditions and ensure that the appropriate statistics
 33  *       are incremented. Within this test, the following statistics are tested:
 34  *       i.   rx dropped
 35  *            Increase the UMEM frame headroom to a value which results in
 36  *            insufficient space in the rx buffer for both the packet and the headroom.
 37  *       ii.  tx invalid
 38  *            Set the 'len' field of tx descriptors to an invalid value (umem frame
 39  *            size + 1).
 40  *       iii. rx ring full
 41  *            Reduce the size of the RX ring to a fraction of the fill ring size.
 42  *       iv.  fill queue empty
 43  *            Do not populate the fill queue and then try to receive pkts.
 44  *    f. bpf_link resource persistence
 45  *       Configure sockets at indexes 0 and 1, run a traffic on queue ids 0,
 46  *       then remove xsk sockets from queue 0 on both veth interfaces and
 47  *       finally run a traffic on queues ids 1
 48  *    g. unaligned mode
 49  *    h. tests for invalid and corner case Tx descriptors so that the correct ones
 50  *       are discarded and let through, respectively.
 51  *    i. 2K frame size tests
 52  *    j. If multi-buffer is supported, send 9k packets divided into 3 frames
 53  *    k. If multi-buffer and huge pages are supported, send 9k packets in a single frame
 54  *       using unaligned mode
 55  *    l. If multi-buffer is supported, try various nasty combinations of descriptors to
 56  *       check if they pass the validation or not
 57  *
 58  * Flow:
 59  * -----
 60  * - Single process spawns two threads: Tx and Rx
 61  * - Each of these two threads attach to a veth interface
 62  * - Each thread creates one AF_XDP socket connected to a unique umem for each
 63  *   veth interface
 64  * - Tx thread Transmits a number of packets from veth<xxxx> to veth<yyyy>
 65  * - Rx thread verifies if all packets were received and delivered in-order,
 66  *   and have the right content
 67  *
 68  * Enable/disable packet dump mode:
 69  * --------------------------
 70  * To enable L2 - L4 headers and payload dump of each packet on STDOUT, add
 71  * parameter -D to params array in test_xsk.sh, i.e. params=("-S" "-D")
 72  */
 73 
 74 #define _GNU_SOURCE
 75 #include <assert.h>
 76 #include <fcntl.h>
 77 #include <errno.h>
 78 #include <getopt.h>
 79 #include <linux/if_link.h>
 80 #include <linux/if_ether.h>
 81 #include <linux/mman.h>
 82 #include <linux/netdev.h>
 83 #include <linux/bitmap.h>
 84 #include <linux/ethtool.h>
 85 #include <arpa/inet.h>
 86 #include <net/if.h>
 87 #include <locale.h>
 88 #include <poll.h>
 89 #include <pthread.h>
 90 #include <signal.h>
 91 #include <stdio.h>
 92 #include <stdlib.h>
 93 #include <string.h>
 94 #include <stddef.h>
 95 #include <sys/mman.h>
 96 #include <sys/socket.h>
 97 #include <sys/time.h>
 98 #include <sys/types.h>
 99 #include <unistd.h>
100 
101 #include "xsk_xdp_progs.skel.h"
102 #include "xsk.h"
103 #include "xskxceiver.h"
104 #include <bpf/bpf.h>
105 #include <linux/filter.h>
106 #include "../kselftest.h"
107 #include "xsk_xdp_common.h"
108 
109 #include <network_helpers.h>
110 
111 static bool opt_verbose;
112 static bool opt_print_tests;
113 static enum test_mode opt_mode = TEST_MODE_ALL;
114 static u32 opt_run_test = RUN_ALL_TESTS;
115 
116 void test__fail(void) { /* for network_helpers.c */ }
117 
118 static void __exit_with_error(int error, const char *file, const char *func, int line)
119 {
120         ksft_test_result_fail("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error,
121                               strerror(error));
122         ksft_exit_xfail();
123 }
124 
125 #define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__)
126 #define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : ""
127 static char *mode_string(struct test_spec *test)
128 {
129         switch (test->mode) {
130         case TEST_MODE_SKB:
131                 return "SKB";
132         case TEST_MODE_DRV:
133                 return "DRV";
134         case TEST_MODE_ZC:
135                 return "ZC";
136         default:
137                 return "BOGUS";
138         }
139 }
140 
141 static void report_failure(struct test_spec *test)
142 {
143         if (test->fail)
144                 return;
145 
146         ksft_test_result_fail("FAIL: %s %s%s\n", mode_string(test), busy_poll_string(test),
147                               test->name);
148         test->fail = true;
149 }
150 
151 /* The payload is a word consisting of a packet sequence number in the upper
152  * 16-bits and a intra packet data sequence number in the lower 16 bits. So the 3rd packet's
153  * 5th word of data will contain the number (2<<16) | 4 as they are numbered from 0.
154  */
155 static void write_payload(void *dest, u32 pkt_nb, u32 start, u32 size)
156 {
157         u32 *ptr = (u32 *)dest, i;
158 
159         start /= sizeof(*ptr);
160         size /= sizeof(*ptr);
161         for (i = 0; i < size; i++)
162                 ptr[i] = htonl(pkt_nb << 16 | (i + start));
163 }
164 
165 static void gen_eth_hdr(struct xsk_socket_info *xsk, struct ethhdr *eth_hdr)
166 {
167         memcpy(eth_hdr->h_dest, xsk->dst_mac, ETH_ALEN);
168         memcpy(eth_hdr->h_source, xsk->src_mac, ETH_ALEN);
169         eth_hdr->h_proto = htons(ETH_P_LOOPBACK);
170 }
171 
172 static bool is_umem_valid(struct ifobject *ifobj)
173 {
174         return !!ifobj->umem->umem;
175 }
176 
177 static u32 mode_to_xdp_flags(enum test_mode mode)
178 {
179         return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE;
180 }
181 
182 static u64 umem_size(struct xsk_umem_info *umem)
183 {
184         return umem->num_frames * umem->frame_size;
185 }
186 
187 static int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer,
188                               u64 size)
189 {
190         struct xsk_umem_config cfg = {
191                 .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
192                 .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
193                 .frame_size = umem->frame_size,
194                 .frame_headroom = umem->frame_headroom,
195                 .flags = XSK_UMEM__DEFAULT_FLAGS
196         };
197         int ret;
198 
199         if (umem->fill_size)
200                 cfg.fill_size = umem->fill_size;
201 
202         if (umem->comp_size)
203                 cfg.comp_size = umem->comp_size;
204 
205         if (umem->unaligned_mode)
206                 cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
207 
208         ret = xsk_umem__create(&umem->umem, buffer, size,
209                                &umem->fq, &umem->cq, &cfg);
210         if (ret)
211                 return ret;
212 
213         umem->buffer = buffer;
214         if (ifobj->shared_umem && ifobj->rx_on) {
215                 umem->base_addr = umem_size(umem);
216                 umem->next_buffer = umem_size(umem);
217         }
218 
219         return 0;
220 }
221 
222 static u64 umem_alloc_buffer(struct xsk_umem_info *umem)
223 {
224         u64 addr;
225 
226         addr = umem->next_buffer;
227         umem->next_buffer += umem->frame_size;
228         if (umem->next_buffer >= umem->base_addr + umem_size(umem))
229                 umem->next_buffer = umem->base_addr;
230 
231         return addr;
232 }
233 
234 static void umem_reset_alloc(struct xsk_umem_info *umem)
235 {
236         umem->next_buffer = 0;
237 }
238 
239 static void enable_busy_poll(struct xsk_socket_info *xsk)
240 {
241         int sock_opt;
242 
243         sock_opt = 1;
244         if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL,
245                        (void *)&sock_opt, sizeof(sock_opt)) < 0)
246                 exit_with_error(errno);
247 
248         sock_opt = 20;
249         if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL,
250                        (void *)&sock_opt, sizeof(sock_opt)) < 0)
251                 exit_with_error(errno);
252 
253         sock_opt = xsk->batch_size;
254         if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET,
255                        (void *)&sock_opt, sizeof(sock_opt)) < 0)
256                 exit_with_error(errno);
257 }
258 
259 static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
260                                   struct ifobject *ifobject, bool shared)
261 {
262         struct xsk_socket_config cfg = {};
263         struct xsk_ring_cons *rxr;
264         struct xsk_ring_prod *txr;
265 
266         xsk->umem = umem;
267         cfg.rx_size = xsk->rxqsize;
268         cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
269         cfg.bind_flags = ifobject->bind_flags;
270         if (shared)
271                 cfg.bind_flags |= XDP_SHARED_UMEM;
272         if (ifobject->mtu > MAX_ETH_PKT_SIZE)
273                 cfg.bind_flags |= XDP_USE_SG;
274         if (umem->comp_size)
275                 cfg.tx_size = umem->comp_size;
276         if (umem->fill_size)
277                 cfg.rx_size = umem->fill_size;
278 
279         txr = ifobject->tx_on ? &xsk->tx : NULL;
280         rxr = ifobject->rx_on ? &xsk->rx : NULL;
281         return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg);
282 }
283 
284 static bool ifobj_zc_avail(struct ifobject *ifobject)
285 {
286         size_t umem_sz = DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE;
287         int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
288         struct xsk_socket_info *xsk;
289         struct xsk_umem_info *umem;
290         bool zc_avail = false;
291         void *bufs;
292         int ret;
293 
294         bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
295         if (bufs == MAP_FAILED)
296                 exit_with_error(errno);
297 
298         umem = calloc(1, sizeof(struct xsk_umem_info));
299         if (!umem) {
300                 munmap(bufs, umem_sz);
301                 exit_with_error(ENOMEM);
302         }
303         umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
304         ret = xsk_configure_umem(ifobject, umem, bufs, umem_sz);
305         if (ret)
306                 exit_with_error(-ret);
307 
308         xsk = calloc(1, sizeof(struct xsk_socket_info));
309         if (!xsk)
310                 goto out;
311         ifobject->bind_flags = XDP_USE_NEED_WAKEUP | XDP_ZEROCOPY;
312         ifobject->rx_on = true;
313         xsk->rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
314         ret = __xsk_configure_socket(xsk, umem, ifobject, false);
315         if (!ret)
316                 zc_avail = true;
317 
318         xsk_socket__delete(xsk->xsk);
319         free(xsk);
320 out:
321         munmap(umem->buffer, umem_sz);
322         xsk_umem__delete(umem->umem);
323         free(umem);
324         return zc_avail;
325 }
326 
327 static struct option long_options[] = {
328         {"interface", required_argument, 0, 'i'},
329         {"busy-poll", no_argument, 0, 'b'},
330         {"verbose", no_argument, 0, 'v'},
331         {"mode", required_argument, 0, 'm'},
332         {"list", no_argument, 0, 'l'},
333         {"test", required_argument, 0, 't'},
334         {"help", no_argument, 0, 'h'},
335         {0, 0, 0, 0}
336 };
337 
338 static void print_usage(char **argv)
339 {
340         const char *str =
341                 "  Usage: xskxceiver [OPTIONS]\n"
342                 "  Options:\n"
343                 "  -i, --interface      Use interface\n"
344                 "  -v, --verbose        Verbose output\n"
345                 "  -b, --busy-poll      Enable busy poll\n"
346                 "  -m, --mode           Run only mode skb, drv, or zc\n"
347                 "  -l, --list           List all available tests\n"
348                 "  -t, --test           Run a specific test. Enter number from -l option.\n"
349                 "  -h, --help           Display this help and exit\n";
350 
351         ksft_print_msg(str, basename(argv[0]));
352         ksft_exit_xfail();
353 }
354 
355 static bool validate_interface(struct ifobject *ifobj)
356 {
357         if (!strcmp(ifobj->ifname, ""))
358                 return false;
359         return true;
360 }
361 
362 static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx, int argc,
363                                char **argv)
364 {
365         struct ifobject *ifobj;
366         u32 interface_nb = 0;
367         int option_index, c;
368 
369         opterr = 0;
370 
371         for (;;) {
372                 c = getopt_long(argc, argv, "i:vbm:lt:", long_options, &option_index);
373                 if (c == -1)
374                         break;
375 
376                 switch (c) {
377                 case 'i':
378                         if (interface_nb == 0)
379                                 ifobj = ifobj_tx;
380                         else if (interface_nb == 1)
381                                 ifobj = ifobj_rx;
382                         else
383                                 break;
384 
385                         memcpy(ifobj->ifname, optarg,
386                                min_t(size_t, MAX_INTERFACE_NAME_CHARS, strlen(optarg)));
387 
388                         ifobj->ifindex = if_nametoindex(ifobj->ifname);
389                         if (!ifobj->ifindex)
390                                 exit_with_error(errno);
391 
392                         interface_nb++;
393                         break;
394                 case 'v':
395                         opt_verbose = true;
396                         break;
397                 case 'b':
398                         ifobj_tx->busy_poll = true;
399                         ifobj_rx->busy_poll = true;
400                         break;
401                 case 'm':
402                         if (!strncmp("skb", optarg, strlen(optarg)))
403                                 opt_mode = TEST_MODE_SKB;
404                         else if (!strncmp("drv", optarg, strlen(optarg)))
405                                 opt_mode = TEST_MODE_DRV;
406                         else if (!strncmp("zc", optarg, strlen(optarg)))
407                                 opt_mode = TEST_MODE_ZC;
408                         else
409                                 print_usage(argv);
410                         break;
411                 case 'l':
412                         opt_print_tests = true;
413                         break;
414                 case 't':
415                         errno = 0;
416                         opt_run_test = strtol(optarg, NULL, 0);
417                         if (errno)
418                                 print_usage(argv);
419                         break;
420                 case 'h':
421                 default:
422                         print_usage(argv);
423                 }
424         }
425 }
426 
427 static int set_ring_size(struct ifobject *ifobj)
428 {
429         int ret;
430         u32 ctr = 0;
431 
432         while (ctr++ < SOCK_RECONF_CTR) {
433                 ret = set_hw_ring_size(ifobj->ifname, &ifobj->ring);
434                 if (!ret)
435                         break;
436 
437                 /* Retry if it fails */
438                 if (ctr >= SOCK_RECONF_CTR || errno != EBUSY)
439                         return -errno;
440 
441                 usleep(USLEEP_MAX);
442         }
443 
444         return ret;
445 }
446 
447 static int hw_ring_size_reset(struct ifobject *ifobj)
448 {
449         ifobj->ring.tx_pending = ifobj->set_ring.default_tx;
450         ifobj->ring.rx_pending = ifobj->set_ring.default_rx;
451         return set_ring_size(ifobj);
452 }
453 
454 static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
455                              struct ifobject *ifobj_rx)
456 {
457         u32 i, j;
458 
459         for (i = 0; i < MAX_INTERFACES; i++) {
460                 struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
461 
462                 ifobj->xsk = &ifobj->xsk_arr[0];
463                 ifobj->use_poll = false;
464                 ifobj->use_fill_ring = true;
465                 ifobj->release_rx = true;
466                 ifobj->validation_func = NULL;
467                 ifobj->use_metadata = false;
468 
469                 if (i == 0) {
470                         ifobj->rx_on = false;
471                         ifobj->tx_on = true;
472                 } else {
473                         ifobj->rx_on = true;
474                         ifobj->tx_on = false;
475                 }
476 
477                 memset(ifobj->umem, 0, sizeof(*ifobj->umem));
478                 ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS;
479                 ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
480 
481                 for (j = 0; j < MAX_SOCKETS; j++) {
482                         memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
483                         ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
484                         ifobj->xsk_arr[j].batch_size = DEFAULT_BATCH_SIZE;
485                         if (i == 0)
486                                 ifobj->xsk_arr[j].pkt_stream = test->tx_pkt_stream_default;
487                         else
488                                 ifobj->xsk_arr[j].pkt_stream = test->rx_pkt_stream_default;
489 
490                         memcpy(ifobj->xsk_arr[j].src_mac, g_mac, ETH_ALEN);
491                         memcpy(ifobj->xsk_arr[j].dst_mac, g_mac, ETH_ALEN);
492                         ifobj->xsk_arr[j].src_mac[5] += ((j * 2) + 0);
493                         ifobj->xsk_arr[j].dst_mac[5] += ((j * 2) + 1);
494                 }
495         }
496 
497         if (ifobj_tx->hw_ring_size_supp)
498                 hw_ring_size_reset(ifobj_tx);
499 
500         test->ifobj_tx = ifobj_tx;
501         test->ifobj_rx = ifobj_rx;
502         test->current_step = 0;
503         test->total_steps = 1;
504         test->nb_sockets = 1;
505         test->fail = false;
506         test->set_ring = false;
507         test->mtu = MAX_ETH_PKT_SIZE;
508         test->xdp_prog_rx = ifobj_rx->xdp_progs->progs.xsk_def_prog;
509         test->xskmap_rx = ifobj_rx->xdp_progs->maps.xsk;
510         test->xdp_prog_tx = ifobj_tx->xdp_progs->progs.xsk_def_prog;
511         test->xskmap_tx = ifobj_tx->xdp_progs->maps.xsk;
512 }
513 
514 static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
515                            struct ifobject *ifobj_rx, enum test_mode mode,
516                            const struct test_spec *test_to_run)
517 {
518         struct pkt_stream *tx_pkt_stream;
519         struct pkt_stream *rx_pkt_stream;
520         u32 i;
521 
522         tx_pkt_stream = test->tx_pkt_stream_default;
523         rx_pkt_stream = test->rx_pkt_stream_default;
524         memset(test, 0, sizeof(*test));
525         test->tx_pkt_stream_default = tx_pkt_stream;
526         test->rx_pkt_stream_default = rx_pkt_stream;
527 
528         for (i = 0; i < MAX_INTERFACES; i++) {
529                 struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
530 
531                 ifobj->bind_flags = XDP_USE_NEED_WAKEUP;
532                 if (mode == TEST_MODE_ZC)
533                         ifobj->bind_flags |= XDP_ZEROCOPY;
534                 else
535                         ifobj->bind_flags |= XDP_COPY;
536         }
537 
538         strncpy(test->name, test_to_run->name, MAX_TEST_NAME_SIZE);
539         test->test_func = test_to_run->test_func;
540         test->mode = mode;
541         __test_spec_init(test, ifobj_tx, ifobj_rx);
542 }
543 
544 static void test_spec_reset(struct test_spec *test)
545 {
546         __test_spec_init(test, test->ifobj_tx, test->ifobj_rx);
547 }
548 
549 static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx,
550                                    struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx,
551                                    struct bpf_map *xskmap_tx)
552 {
553         test->xdp_prog_rx = xdp_prog_rx;
554         test->xdp_prog_tx = xdp_prog_tx;
555         test->xskmap_rx = xskmap_rx;
556         test->xskmap_tx = xskmap_tx;
557 }
558 
559 static int test_spec_set_mtu(struct test_spec *test, int mtu)
560 {
561         int err;
562 
563         if (test->ifobj_rx->mtu != mtu) {
564                 err = xsk_set_mtu(test->ifobj_rx->ifindex, mtu);
565                 if (err)
566                         return err;
567                 test->ifobj_rx->mtu = mtu;
568         }
569         if (test->ifobj_tx->mtu != mtu) {
570                 err = xsk_set_mtu(test->ifobj_tx->ifindex, mtu);
571                 if (err)
572                         return err;
573                 test->ifobj_tx->mtu = mtu;
574         }
575 
576         return 0;
577 }
578 
579 static void pkt_stream_reset(struct pkt_stream *pkt_stream)
580 {
581         if (pkt_stream) {
582                 pkt_stream->current_pkt_nb = 0;
583                 pkt_stream->nb_rx_pkts = 0;
584         }
585 }
586 
587 static struct pkt *pkt_stream_get_next_tx_pkt(struct pkt_stream *pkt_stream)
588 {
589         if (pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts)
590                 return NULL;
591 
592         return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
593 }
594 
595 static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent)
596 {
597         while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) {
598                 (*pkts_sent)++;
599                 if (pkt_stream->pkts[pkt_stream->current_pkt_nb].valid)
600                         return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
601                 pkt_stream->current_pkt_nb++;
602         }
603         return NULL;
604 }
605 
606 static void pkt_stream_delete(struct pkt_stream *pkt_stream)
607 {
608         free(pkt_stream->pkts);
609         free(pkt_stream);
610 }
611 
612 static void pkt_stream_restore_default(struct test_spec *test)
613 {
614         struct pkt_stream *tx_pkt_stream = test->ifobj_tx->xsk->pkt_stream;
615         struct pkt_stream *rx_pkt_stream = test->ifobj_rx->xsk->pkt_stream;
616 
617         if (tx_pkt_stream != test->tx_pkt_stream_default) {
618                 pkt_stream_delete(test->ifobj_tx->xsk->pkt_stream);
619                 test->ifobj_tx->xsk->pkt_stream = test->tx_pkt_stream_default;
620         }
621 
622         if (rx_pkt_stream != test->rx_pkt_stream_default) {
623                 pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream);
624                 test->ifobj_rx->xsk->pkt_stream = test->rx_pkt_stream_default;
625         }
626 }
627 
628 static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts)
629 {
630         struct pkt_stream *pkt_stream;
631 
632         pkt_stream = calloc(1, sizeof(*pkt_stream));
633         if (!pkt_stream)
634                 return NULL;
635 
636         pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts));
637         if (!pkt_stream->pkts) {
638                 free(pkt_stream);
639                 return NULL;
640         }
641 
642         pkt_stream->nb_pkts = nb_pkts;
643         return pkt_stream;
644 }
645 
646 static bool pkt_continues(u32 options)
647 {
648         return options & XDP_PKT_CONTD;
649 }
650 
651 static u32 ceil_u32(u32 a, u32 b)
652 {
653         return (a + b - 1) / b;
654 }
655 
656 static u32 pkt_nb_frags(u32 frame_size, struct pkt_stream *pkt_stream, struct pkt *pkt)
657 {
658         u32 nb_frags = 1, next_frag;
659 
660         if (!pkt)
661                 return 1;
662 
663         if (!pkt_stream->verbatim) {
664                 if (!pkt->valid || !pkt->len)
665                         return 1;
666                 return ceil_u32(pkt->len, frame_size);
667         }
668 
669         /* Search for the end of the packet in verbatim mode */
670         if (!pkt_continues(pkt->options))
671                 return nb_frags;
672 
673         next_frag = pkt_stream->current_pkt_nb;
674         pkt++;
675         while (next_frag++ < pkt_stream->nb_pkts) {
676                 nb_frags++;
677                 if (!pkt_continues(pkt->options) || !pkt->valid)
678                         break;
679                 pkt++;
680         }
681         return nb_frags;
682 }
683 
684 static bool set_pkt_valid(int offset, u32 len)
685 {
686         return len <= MAX_ETH_JUMBO_SIZE;
687 }
688 
689 static void pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len)
690 {
691         pkt->offset = offset;
692         pkt->len = len;
693         pkt->valid = set_pkt_valid(offset, len);
694 }
695 
696 static void pkt_stream_pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len)
697 {
698         bool prev_pkt_valid = pkt->valid;
699 
700         pkt_set(pkt_stream, pkt, offset, len);
701         pkt_stream->nb_valid_entries += pkt->valid - prev_pkt_valid;
702 }
703 
704 static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len)
705 {
706         return ceil_u32(len, umem->frame_size) * umem->frame_size;
707 }
708 
709 static struct pkt_stream *__pkt_stream_generate(u32 nb_pkts, u32 pkt_len, u32 nb_start, u32 nb_off)
710 {
711         struct pkt_stream *pkt_stream;
712         u32 i;
713 
714         pkt_stream = __pkt_stream_alloc(nb_pkts);
715         if (!pkt_stream)
716                 exit_with_error(ENOMEM);
717 
718         pkt_stream->nb_pkts = nb_pkts;
719         pkt_stream->max_pkt_len = pkt_len;
720         for (i = 0; i < nb_pkts; i++) {
721                 struct pkt *pkt = &pkt_stream->pkts[i];
722 
723                 pkt_stream_pkt_set(pkt_stream, pkt, 0, pkt_len);
724                 pkt->pkt_nb = nb_start + i * nb_off;
725         }
726 
727         return pkt_stream;
728 }
729 
730 static struct pkt_stream *pkt_stream_generate(u32 nb_pkts, u32 pkt_len)
731 {
732         return __pkt_stream_generate(nb_pkts, pkt_len, 0, 1);
733 }
734 
735 static struct pkt_stream *pkt_stream_clone(struct pkt_stream *pkt_stream)
736 {
737         return pkt_stream_generate(pkt_stream->nb_pkts, pkt_stream->pkts[0].len);
738 }
739 
740 static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
741 {
742         struct pkt_stream *pkt_stream;
743 
744         pkt_stream = pkt_stream_generate(nb_pkts, pkt_len);
745         test->ifobj_tx->xsk->pkt_stream = pkt_stream;
746         pkt_stream = pkt_stream_generate(nb_pkts, pkt_len);
747         test->ifobj_rx->xsk->pkt_stream = pkt_stream;
748 }
749 
750 static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
751                                       int offset)
752 {
753         struct pkt_stream *pkt_stream;
754         u32 i;
755 
756         pkt_stream = pkt_stream_clone(ifobj->xsk->pkt_stream);
757         for (i = 1; i < ifobj->xsk->pkt_stream->nb_pkts; i += 2)
758                 pkt_stream_pkt_set(pkt_stream, &pkt_stream->pkts[i], offset, pkt_len);
759 
760         ifobj->xsk->pkt_stream = pkt_stream;
761 }
762 
763 static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)
764 {
765         __pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset);
766         __pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset);
767 }
768 
769 static void pkt_stream_receive_half(struct test_spec *test)
770 {
771         struct pkt_stream *pkt_stream = test->ifobj_tx->xsk->pkt_stream;
772         u32 i;
773 
774         test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(pkt_stream->nb_pkts,
775                                                               pkt_stream->pkts[0].len);
776         pkt_stream = test->ifobj_rx->xsk->pkt_stream;
777         for (i = 1; i < pkt_stream->nb_pkts; i += 2)
778                 pkt_stream->pkts[i].valid = false;
779 
780         pkt_stream->nb_valid_entries /= 2;
781 }
782 
783 static void pkt_stream_even_odd_sequence(struct test_spec *test)
784 {
785         struct pkt_stream *pkt_stream;
786         u32 i;
787 
788         for (i = 0; i < test->nb_sockets; i++) {
789                 pkt_stream = test->ifobj_tx->xsk_arr[i].pkt_stream;
790                 pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2,
791                                                    pkt_stream->pkts[0].len, i, 2);
792                 test->ifobj_tx->xsk_arr[i].pkt_stream = pkt_stream;
793 
794                 pkt_stream = test->ifobj_rx->xsk_arr[i].pkt_stream;
795                 pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2,
796                                                    pkt_stream->pkts[0].len, i, 2);
797                 test->ifobj_rx->xsk_arr[i].pkt_stream = pkt_stream;
798         }
799 }
800 
801 static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem)
802 {
803         if (!pkt->valid)
804                 return pkt->offset;
805         return pkt->offset + umem_alloc_buffer(umem);
806 }
807 
808 static void pkt_stream_cancel(struct pkt_stream *pkt_stream)
809 {
810         pkt_stream->current_pkt_nb--;
811 }
812 
813 static void pkt_generate(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, u64 addr, u32 len,
814                          u32 pkt_nb, u32 bytes_written)
815 {
816         void *data = xsk_umem__get_data(umem->buffer, addr);
817 
818         if (len < MIN_PKT_SIZE)
819                 return;
820 
821         if (!bytes_written) {
822                 gen_eth_hdr(xsk, data);
823 
824                 len -= PKT_HDR_SIZE;
825                 data += PKT_HDR_SIZE;
826         } else {
827                 bytes_written -= PKT_HDR_SIZE;
828         }
829 
830         write_payload(data, pkt_nb, bytes_written, len);
831 }
832 
833 static struct pkt_stream *__pkt_stream_generate_custom(struct ifobject *ifobj, struct pkt *frames,
834                                                        u32 nb_frames, bool verbatim)
835 {
836         u32 i, len = 0, pkt_nb = 0, payload = 0;
837         struct pkt_stream *pkt_stream;
838 
839         pkt_stream = __pkt_stream_alloc(nb_frames);
840         if (!pkt_stream)
841                 exit_with_error(ENOMEM);
842 
843         for (i = 0; i < nb_frames; i++) {
844                 struct pkt *pkt = &pkt_stream->pkts[pkt_nb];
845                 struct pkt *frame = &frames[i];
846 
847                 pkt->offset = frame->offset;
848                 if (verbatim) {
849                         *pkt = *frame;
850                         pkt->pkt_nb = payload;
851                         if (!frame->valid || !pkt_continues(frame->options))
852                                 payload++;
853                 } else {
854                         if (frame->valid)
855                                 len += frame->len;
856                         if (frame->valid && pkt_continues(frame->options))
857                                 continue;
858 
859                         pkt->pkt_nb = pkt_nb;
860                         pkt->len = len;
861                         pkt->valid = frame->valid;
862                         pkt->options = 0;
863 
864                         len = 0;
865                 }
866 
867                 print_verbose("offset: %d len: %u valid: %u options: %u pkt_nb: %u\n",
868                               pkt->offset, pkt->len, pkt->valid, pkt->options, pkt->pkt_nb);
869 
870                 if (pkt->valid && pkt->len > pkt_stream->max_pkt_len)
871                         pkt_stream->max_pkt_len = pkt->len;
872 
873                 if (pkt->valid)
874                         pkt_stream->nb_valid_entries++;
875 
876                 pkt_nb++;
877         }
878 
879         pkt_stream->nb_pkts = pkt_nb;
880         pkt_stream->verbatim = verbatim;
881         return pkt_stream;
882 }
883 
884 static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts)
885 {
886         struct pkt_stream *pkt_stream;
887 
888         pkt_stream = __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts, true);
889         test->ifobj_tx->xsk->pkt_stream = pkt_stream;
890 
891         pkt_stream = __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts, false);
892         test->ifobj_rx->xsk->pkt_stream = pkt_stream;
893 }
894 
895 static void pkt_print_data(u32 *data, u32 cnt)
896 {
897         u32 i;
898 
899         for (i = 0; i < cnt; i++) {
900                 u32 seqnum, pkt_nb;
901 
902                 seqnum = ntohl(*data) & 0xffff;
903                 pkt_nb = ntohl(*data) >> 16;
904                 ksft_print_msg("%u:%u ", pkt_nb, seqnum);
905                 data++;
906         }
907 }
908 
909 static void pkt_dump(void *pkt, u32 len, bool eth_header)
910 {
911         struct ethhdr *ethhdr = pkt;
912         u32 i, *data;
913 
914         if (eth_header) {
915                 /*extract L2 frame */
916                 ksft_print_msg("DEBUG>> L2: dst mac: ");
917                 for (i = 0; i < ETH_ALEN; i++)
918                         ksft_print_msg("%02X", ethhdr->h_dest[i]);
919 
920                 ksft_print_msg("\nDEBUG>> L2: src mac: ");
921                 for (i = 0; i < ETH_ALEN; i++)
922                         ksft_print_msg("%02X", ethhdr->h_source[i]);
923 
924                 data = pkt + PKT_HDR_SIZE;
925         } else {
926                 data = pkt;
927         }
928 
929         /*extract L5 frame */
930         ksft_print_msg("\nDEBUG>> L5: seqnum: ");
931         pkt_print_data(data, PKT_DUMP_NB_TO_PRINT);
932         ksft_print_msg("....");
933         if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) {
934                 ksft_print_msg("\n.... ");
935                 pkt_print_data(data + len / sizeof(u32) - PKT_DUMP_NB_TO_PRINT,
936                                PKT_DUMP_NB_TO_PRINT);
937         }
938         ksft_print_msg("\n---------------------------------------\n");
939 }
940 
941 static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr)
942 {
943         u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom;
944         u32 offset = addr % umem->frame_size, expected_offset;
945         int pkt_offset = pkt->valid ? pkt->offset : 0;
946 
947         if (!umem->unaligned_mode)
948                 pkt_offset = 0;
949 
950         expected_offset = (pkt_offset + headroom + XDP_PACKET_HEADROOM) % umem->frame_size;
951 
952         if (offset == expected_offset)
953                 return true;
954 
955         ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset);
956         return false;
957 }
958 
959 static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr)
960 {
961         void *data = xsk_umem__get_data(buffer, addr);
962         struct xdp_info *meta = data - sizeof(struct xdp_info);
963 
964         if (meta->count != pkt->pkt_nb) {
965                 ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%llu]\n",
966                                __func__, pkt->pkt_nb,
967                                (unsigned long long)meta->count);
968                 return false;
969         }
970 
971         return true;
972 }
973 
974 static bool is_frag_valid(struct xsk_umem_info *umem, u64 addr, u32 len, u32 expected_pkt_nb,
975                           u32 bytes_processed)
976 {
977         u32 seqnum, pkt_nb, *pkt_data, words_to_end, expected_seqnum;
978         void *data = xsk_umem__get_data(umem->buffer, addr);
979 
980         addr -= umem->base_addr;
981 
982         if (addr >= umem->num_frames * umem->frame_size ||
983             addr + len > umem->num_frames * umem->frame_size) {
984                 ksft_print_msg("Frag invalid addr: %llx len: %u\n",
985                                (unsigned long long)addr, len);
986                 return false;
987         }
988         if (!umem->unaligned_mode && addr % umem->frame_size + len > umem->frame_size) {
989                 ksft_print_msg("Frag crosses frame boundary addr: %llx len: %u\n",
990                                (unsigned long long)addr, len);
991                 return false;
992         }
993 
994         pkt_data = data;
995         if (!bytes_processed) {
996                 pkt_data += PKT_HDR_SIZE / sizeof(*pkt_data);
997                 len -= PKT_HDR_SIZE;
998         } else {
999                 bytes_processed -= PKT_HDR_SIZE;
1000         }
1001 
1002         expected_seqnum = bytes_processed / sizeof(*pkt_data);
1003         seqnum = ntohl(*pkt_data) & 0xffff;
1004         pkt_nb = ntohl(*pkt_data) >> 16;
1005 
1006         if (expected_pkt_nb != pkt_nb) {
1007                 ksft_print_msg("[%s] expected pkt_nb [%u], got pkt_nb [%u]\n",
1008                                __func__, expected_pkt_nb, pkt_nb);
1009                 goto error;
1010         }
1011         if (expected_seqnum != seqnum) {
1012                 ksft_print_msg("[%s] expected seqnum at start [%u], got seqnum [%u]\n",
1013                                __func__, expected_seqnum, seqnum);
1014                 goto error;
1015         }
1016 
1017         words_to_end = len / sizeof(*pkt_data) - 1;
1018         pkt_data += words_to_end;
1019         seqnum = ntohl(*pkt_data) & 0xffff;
1020         expected_seqnum += words_to_end;
1021         if (expected_seqnum != seqnum) {
1022                 ksft_print_msg("[%s] expected seqnum at end [%u], got seqnum [%u]\n",
1023                                __func__, expected_seqnum, seqnum);
1024                 goto error;
1025         }
1026 
1027         return true;
1028 
1029 error:
1030         pkt_dump(data, len, !bytes_processed);
1031         return false;
1032 }
1033 
1034 static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
1035 {
1036         if (pkt->len != len) {
1037                 ksft_print_msg("[%s] expected packet length [%d], got length [%d]\n",
1038                                __func__, pkt->len, len);
1039                 pkt_dump(xsk_umem__get_data(buffer, addr), len, true);
1040                 return false;
1041         }
1042 
1043         return true;
1044 }
1045 
1046 static int kick_tx(struct xsk_socket_info *xsk)
1047 {
1048         int ret;
1049 
1050         ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
1051         if (ret >= 0)
1052                 return TEST_PASS;
1053         if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) {
1054                 usleep(100);
1055                 return TEST_PASS;
1056         }
1057         return TEST_FAILURE;
1058 }
1059 
1060 static int kick_rx(struct xsk_socket_info *xsk)
1061 {
1062         int ret;
1063 
1064         ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
1065         if (ret < 0)
1066                 return TEST_FAILURE;
1067 
1068         return TEST_PASS;
1069 }
1070 
1071 static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
1072 {
1073         unsigned int rcvd;
1074         u32 idx;
1075         int ret;
1076 
1077         if (xsk_ring_prod__needs_wakeup(&xsk->tx)) {
1078                 ret = kick_tx(xsk);
1079                 if (ret)
1080                         return TEST_FAILURE;
1081         }
1082 
1083         rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
1084         if (rcvd) {
1085                 if (rcvd > xsk->outstanding_tx) {
1086                         u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1);
1087 
1088                         ksft_print_msg("[%s] Too many packets completed\n", __func__);
1089                         ksft_print_msg("Last completion address: %llx\n",
1090                                        (unsigned long long)addr);
1091                         return TEST_FAILURE;
1092                 }
1093 
1094                 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
1095                 xsk->outstanding_tx -= rcvd;
1096         }
1097 
1098         return TEST_PASS;
1099 }
1100 
1101 static int __receive_pkts(struct test_spec *test, struct xsk_socket_info *xsk)
1102 {
1103         u32 frags_processed = 0, nb_frags = 0, pkt_len = 0;
1104         u32 idx_rx = 0, idx_fq = 0, rcvd, pkts_sent = 0;
1105         struct pkt_stream *pkt_stream = xsk->pkt_stream;
1106         struct ifobject *ifobj = test->ifobj_rx;
1107         struct xsk_umem_info *umem = xsk->umem;
1108         struct pollfd fds = { };
1109         struct pkt *pkt;
1110         u64 first_addr = 0;
1111         int ret;
1112 
1113         fds.fd = xsk_socket__fd(xsk->xsk);
1114         fds.events = POLLIN;
1115 
1116         ret = kick_rx(xsk);
1117         if (ret)
1118                 return TEST_FAILURE;
1119 
1120         if (ifobj->use_poll) {
1121                 ret = poll(&fds, 1, POLL_TMOUT);
1122                 if (ret < 0)
1123                         return TEST_FAILURE;
1124 
1125                 if (!ret) {
1126                         if (!is_umem_valid(test->ifobj_tx))
1127                                 return TEST_PASS;
1128 
1129                         ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
1130                         return TEST_CONTINUE;
1131                 }
1132 
1133                 if (!(fds.revents & POLLIN))
1134                         return TEST_CONTINUE;
1135         }
1136 
1137         rcvd = xsk_ring_cons__peek(&xsk->rx, xsk->batch_size, &idx_rx);
1138         if (!rcvd)
1139                 return TEST_CONTINUE;
1140 
1141         if (ifobj->use_fill_ring) {
1142                 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
1143                 while (ret != rcvd) {
1144                         if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
1145                                 ret = poll(&fds, 1, POLL_TMOUT);
1146                                 if (ret < 0)
1147                                         return TEST_FAILURE;
1148                         }
1149                         ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
1150                 }
1151         }
1152 
1153         while (frags_processed < rcvd) {
1154                 const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
1155                 u64 addr = desc->addr, orig;
1156 
1157                 orig = xsk_umem__extract_addr(addr);
1158                 addr = xsk_umem__add_offset_to_addr(addr);
1159 
1160                 if (!nb_frags) {
1161                         pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
1162                         if (!pkt) {
1163                                 ksft_print_msg("[%s] received too many packets addr: %lx len %u\n",
1164                                                __func__, addr, desc->len);
1165                                 return TEST_FAILURE;
1166                         }
1167                 }
1168 
1169                 print_verbose("Rx: addr: %lx len: %u options: %u pkt_nb: %u valid: %u\n",
1170                               addr, desc->len, desc->options, pkt->pkt_nb, pkt->valid);
1171 
1172                 if (!is_frag_valid(umem, addr, desc->len, pkt->pkt_nb, pkt_len) ||
1173                     !is_offset_correct(umem, pkt, addr) || (ifobj->use_metadata &&
1174                     !is_metadata_correct(pkt, umem->buffer, addr)))
1175                         return TEST_FAILURE;
1176 
1177                 if (!nb_frags++)
1178                         first_addr = addr;
1179                 frags_processed++;
1180                 pkt_len += desc->len;
1181                 if (ifobj->use_fill_ring)
1182                         *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
1183 
1184                 if (pkt_continues(desc->options))
1185                         continue;
1186 
1187                 /* The complete packet has been received */
1188                 if (!is_pkt_valid(pkt, umem->buffer, first_addr, pkt_len) ||
1189                     !is_offset_correct(umem, pkt, addr))
1190                         return TEST_FAILURE;
1191 
1192                 pkt_stream->nb_rx_pkts++;
1193                 nb_frags = 0;
1194                 pkt_len = 0;
1195         }
1196 
1197         if (nb_frags) {
1198                 /* In the middle of a packet. Start over from beginning of packet. */
1199                 idx_rx -= nb_frags;
1200                 xsk_ring_cons__cancel(&xsk->rx, nb_frags);
1201                 if (ifobj->use_fill_ring) {
1202                         idx_fq -= nb_frags;
1203                         xsk_ring_prod__cancel(&umem->fq, nb_frags);
1204                 }
1205                 frags_processed -= nb_frags;
1206         }
1207 
1208         if (ifobj->use_fill_ring)
1209                 xsk_ring_prod__submit(&umem->fq, frags_processed);
1210         if (ifobj->release_rx)
1211                 xsk_ring_cons__release(&xsk->rx, frags_processed);
1212 
1213         pthread_mutex_lock(&pacing_mutex);
1214         pkts_in_flight -= pkts_sent;
1215         pthread_mutex_unlock(&pacing_mutex);
1216         pkts_sent = 0;
1217 
1218 return TEST_CONTINUE;
1219 }
1220 
1221 bool all_packets_received(struct test_spec *test, struct xsk_socket_info *xsk, u32 sock_num,
1222                           unsigned long *bitmap)
1223 {
1224         struct pkt_stream *pkt_stream = xsk->pkt_stream;
1225 
1226         if (!pkt_stream) {
1227                 __set_bit(sock_num, bitmap);
1228                 return false;
1229         }
1230 
1231         if (pkt_stream->nb_rx_pkts == pkt_stream->nb_valid_entries) {
1232                 __set_bit(sock_num, bitmap);
1233                 if (bitmap_full(bitmap, test->nb_sockets))
1234                         return true;
1235         }
1236 
1237         return false;
1238 }
1239 
1240 static int receive_pkts(struct test_spec *test)
1241 {
1242         struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
1243         DECLARE_BITMAP(bitmap, test->nb_sockets);
1244         struct xsk_socket_info *xsk;
1245         u32 sock_num = 0;
1246         int res, ret;
1247 
1248         ret = gettimeofday(&tv_now, NULL);
1249         if (ret)
1250                 exit_with_error(errno);
1251 
1252         timeradd(&tv_now, &tv_timeout, &tv_end);
1253 
1254         while (1) {
1255                 xsk = &test->ifobj_rx->xsk_arr[sock_num];
1256 
1257                 if ((all_packets_received(test, xsk, sock_num, bitmap)))
1258                         break;
1259 
1260                 res = __receive_pkts(test, xsk);
1261                 if (!(res == TEST_PASS || res == TEST_CONTINUE))
1262                         return res;
1263 
1264                 ret = gettimeofday(&tv_now, NULL);
1265                 if (ret)
1266                         exit_with_error(errno);
1267 
1268                 if (timercmp(&tv_now, &tv_end, >)) {
1269                         ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__);
1270                         return TEST_FAILURE;
1271                 }
1272                 sock_num = (sock_num + 1) % test->nb_sockets;
1273         }
1274 
1275         return TEST_PASS;
1276 }
1277 
1278 static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, bool timeout)
1279 {
1280         u32 i, idx = 0, valid_pkts = 0, valid_frags = 0, buffer_len;
1281         struct pkt_stream *pkt_stream = xsk->pkt_stream;
1282         struct xsk_umem_info *umem = ifobject->umem;
1283         bool use_poll = ifobject->use_poll;
1284         struct pollfd fds = { };
1285         int ret;
1286 
1287         buffer_len = pkt_get_buffer_len(umem, pkt_stream->max_pkt_len);
1288         /* pkts_in_flight might be negative if many invalid packets are sent */
1289         if (pkts_in_flight >= (int)((umem_size(umem) - xsk->batch_size * buffer_len) /
1290             buffer_len)) {
1291                 ret = kick_tx(xsk);
1292                 if (ret)
1293                         return TEST_FAILURE;
1294                 return TEST_CONTINUE;
1295         }
1296 
1297         fds.fd = xsk_socket__fd(xsk->xsk);
1298         fds.events = POLLOUT;
1299 
1300         while (xsk_ring_prod__reserve(&xsk->tx, xsk->batch_size, &idx) < xsk->batch_size) {
1301                 if (use_poll) {
1302                         ret = poll(&fds, 1, POLL_TMOUT);
1303                         if (timeout) {
1304                                 if (ret < 0) {
1305                                         ksft_print_msg("ERROR: [%s] Poll error %d\n",
1306                                                        __func__, errno);
1307                                         return TEST_FAILURE;
1308                                 }
1309                                 if (ret == 0)
1310                                         return TEST_PASS;
1311                                 break;
1312                         }
1313                         if (ret <= 0) {
1314                                 ksft_print_msg("ERROR: [%s] Poll error %d\n",
1315                                                __func__, errno);
1316                                 return TEST_FAILURE;
1317                         }
1318                 }
1319 
1320                 complete_pkts(xsk, xsk->batch_size);
1321         }
1322 
1323         for (i = 0; i < xsk->batch_size; i++) {
1324                 struct pkt *pkt = pkt_stream_get_next_tx_pkt(pkt_stream);
1325                 u32 nb_frags_left, nb_frags, bytes_written = 0;
1326 
1327                 if (!pkt)
1328                         break;
1329 
1330                 nb_frags = pkt_nb_frags(umem->frame_size, pkt_stream, pkt);
1331                 if (nb_frags > xsk->batch_size - i) {
1332                         pkt_stream_cancel(pkt_stream);
1333                         xsk_ring_prod__cancel(&xsk->tx, xsk->batch_size - i);
1334                         break;
1335                 }
1336                 nb_frags_left = nb_frags;
1337 
1338                 while (nb_frags_left--) {
1339                         struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
1340 
1341                         tx_desc->addr = pkt_get_addr(pkt, ifobject->umem);
1342                         if (pkt_stream->verbatim) {
1343                                 tx_desc->len = pkt->len;
1344                                 tx_desc->options = pkt->options;
1345                         } else if (nb_frags_left) {
1346                                 tx_desc->len = umem->frame_size;
1347                                 tx_desc->options = XDP_PKT_CONTD;
1348                         } else {
1349                                 tx_desc->len = pkt->len - bytes_written;
1350                                 tx_desc->options = 0;
1351                         }
1352                         if (pkt->valid)
1353                                 pkt_generate(xsk, umem, tx_desc->addr, tx_desc->len, pkt->pkt_nb,
1354                                              bytes_written);
1355                         bytes_written += tx_desc->len;
1356 
1357                         print_verbose("Tx addr: %llx len: %u options: %u pkt_nb: %u\n",
1358                                       tx_desc->addr, tx_desc->len, tx_desc->options, pkt->pkt_nb);
1359 
1360                         if (nb_frags_left) {
1361                                 i++;
1362                                 if (pkt_stream->verbatim)
1363                                         pkt = pkt_stream_get_next_tx_pkt(pkt_stream);
1364                         }
1365                 }
1366 
1367                 if (pkt && pkt->valid) {
1368                         valid_pkts++;
1369                         valid_frags += nb_frags;
1370                 }
1371         }
1372 
1373         pthread_mutex_lock(&pacing_mutex);
1374         pkts_in_flight += valid_pkts;
1375         pthread_mutex_unlock(&pacing_mutex);
1376 
1377         xsk_ring_prod__submit(&xsk->tx, i);
1378         xsk->outstanding_tx += valid_frags;
1379 
1380         if (use_poll) {
1381                 ret = poll(&fds, 1, POLL_TMOUT);
1382                 if (ret <= 0) {
1383                         if (ret == 0 && timeout)
1384                                 return TEST_PASS;
1385 
1386                         ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret);
1387                         return TEST_FAILURE;
1388                 }
1389         }
1390 
1391         if (!timeout) {
1392                 if (complete_pkts(xsk, i))
1393                         return TEST_FAILURE;
1394 
1395                 usleep(10);
1396                 return TEST_PASS;
1397         }
1398 
1399         return TEST_CONTINUE;
1400 }
1401 
1402 static int wait_for_tx_completion(struct xsk_socket_info *xsk)
1403 {
1404         struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
1405         int ret;
1406 
1407         ret = gettimeofday(&tv_now, NULL);
1408         if (ret)
1409                 exit_with_error(errno);
1410         timeradd(&tv_now, &tv_timeout, &tv_end);
1411 
1412         while (xsk->outstanding_tx) {
1413                 ret = gettimeofday(&tv_now, NULL);
1414                 if (ret)
1415                         exit_with_error(errno);
1416                 if (timercmp(&tv_now, &tv_end, >)) {
1417                         ksft_print_msg("ERROR: [%s] Transmission loop timed out\n", __func__);
1418                         return TEST_FAILURE;
1419                 }
1420 
1421                 complete_pkts(xsk, xsk->batch_size);
1422         }
1423 
1424         return TEST_PASS;
1425 }
1426 
1427 bool all_packets_sent(struct test_spec *test, unsigned long *bitmap)
1428 {
1429         return bitmap_full(bitmap, test->nb_sockets);
1430 }
1431 
1432 static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
1433 {
1434         bool timeout = !is_umem_valid(test->ifobj_rx);
1435         DECLARE_BITMAP(bitmap, test->nb_sockets);
1436         u32 i, ret;
1437 
1438         while (!(all_packets_sent(test, bitmap))) {
1439                 for (i = 0; i < test->nb_sockets; i++) {
1440                         struct pkt_stream *pkt_stream;
1441 
1442                         pkt_stream = ifobject->xsk_arr[i].pkt_stream;
1443                         if (!pkt_stream || pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts) {
1444                                 __set_bit(i, bitmap);
1445                                 continue;
1446                         }
1447                         ret = __send_pkts(ifobject, &ifobject->xsk_arr[i], timeout);
1448                         if (ret == TEST_CONTINUE && !test->fail)
1449                                 continue;
1450 
1451                         if ((ret || test->fail) && !timeout)
1452                                 return TEST_FAILURE;
1453 
1454                         if (ret == TEST_PASS && timeout)
1455                                 return ret;
1456 
1457                         ret = wait_for_tx_completion(&ifobject->xsk_arr[i]);
1458                         if (ret)
1459                                 return TEST_FAILURE;
1460                 }
1461         }
1462 
1463         return TEST_PASS;
1464 }
1465 
1466 static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
1467 {
1468         int fd = xsk_socket__fd(xsk), err;
1469         socklen_t optlen, expected_len;
1470 
1471         optlen = sizeof(*stats);
1472         err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen);
1473         if (err) {
1474                 ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
1475                                __func__, -err, strerror(-err));
1476                 return TEST_FAILURE;
1477         }
1478 
1479         expected_len = sizeof(struct xdp_statistics);
1480         if (optlen != expected_len) {
1481                 ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n",
1482                                __func__, expected_len, optlen);
1483                 return TEST_FAILURE;
1484         }
1485 
1486         return TEST_PASS;
1487 }
1488 
1489 static int validate_rx_dropped(struct ifobject *ifobject)
1490 {
1491         struct xsk_socket *xsk = ifobject->xsk->xsk;
1492         struct xdp_statistics stats;
1493         int err;
1494 
1495         err = kick_rx(ifobject->xsk);
1496         if (err)
1497                 return TEST_FAILURE;
1498 
1499         err = get_xsk_stats(xsk, &stats);
1500         if (err)
1501                 return TEST_FAILURE;
1502 
1503         /* The receiver calls getsockopt after receiving the last (valid)
1504          * packet which is not the final packet sent in this test (valid and
1505          * invalid packets are sent in alternating fashion with the final
1506          * packet being invalid). Since the last packet may or may not have
1507          * been dropped already, both outcomes must be allowed.
1508          */
1509         if (stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 ||
1510             stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 - 1)
1511                 return TEST_PASS;
1512 
1513         return TEST_FAILURE;
1514 }
1515 
1516 static int validate_rx_full(struct ifobject *ifobject)
1517 {
1518         struct xsk_socket *xsk = ifobject->xsk->xsk;
1519         struct xdp_statistics stats;
1520         int err;
1521 
1522         usleep(1000);
1523         err = kick_rx(ifobject->xsk);
1524         if (err)
1525                 return TEST_FAILURE;
1526 
1527         err = get_xsk_stats(xsk, &stats);
1528         if (err)
1529                 return TEST_FAILURE;
1530 
1531         if (stats.rx_ring_full)
1532                 return TEST_PASS;
1533 
1534         return TEST_FAILURE;
1535 }
1536 
1537 static int validate_fill_empty(struct ifobject *ifobject)
1538 {
1539         struct xsk_socket *xsk = ifobject->xsk->xsk;
1540         struct xdp_statistics stats;
1541         int err;
1542 
1543         usleep(1000);
1544         err = kick_rx(ifobject->xsk);
1545         if (err)
1546                 return TEST_FAILURE;
1547 
1548         err = get_xsk_stats(xsk, &stats);
1549         if (err)
1550                 return TEST_FAILURE;
1551 
1552         if (stats.rx_fill_ring_empty_descs)
1553                 return TEST_PASS;
1554 
1555         return TEST_FAILURE;
1556 }
1557 
1558 static int validate_tx_invalid_descs(struct ifobject *ifobject)
1559 {
1560         struct xsk_socket *xsk = ifobject->xsk->xsk;
1561         int fd = xsk_socket__fd(xsk);
1562         struct xdp_statistics stats;
1563         socklen_t optlen;
1564         int err;
1565 
1566         optlen = sizeof(stats);
1567         err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
1568         if (err) {
1569                 ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
1570                                __func__, -err, strerror(-err));
1571                 return TEST_FAILURE;
1572         }
1573 
1574         if (stats.tx_invalid_descs != ifobject->xsk->pkt_stream->nb_pkts / 2) {
1575                 ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%llu] expected [%u]\n",
1576                                __func__,
1577                                (unsigned long long)stats.tx_invalid_descs,
1578                                ifobject->xsk->pkt_stream->nb_pkts);
1579                 return TEST_FAILURE;
1580         }
1581 
1582         return TEST_PASS;
1583 }
1584 
1585 static void xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject,
1586                                  struct xsk_umem_info *umem, bool tx)
1587 {
1588         int i, ret;
1589 
1590         for (i = 0; i < test->nb_sockets; i++) {
1591                 bool shared = (ifobject->shared_umem && tx) ? true : !!i;
1592                 u32 ctr = 0;
1593 
1594                 while (ctr++ < SOCK_RECONF_CTR) {
1595                         ret = __xsk_configure_socket(&ifobject->xsk_arr[i], umem,
1596                                                      ifobject, shared);
1597                         if (!ret)
1598                                 break;
1599 
1600                         /* Retry if it fails as xsk_socket__create() is asynchronous */
1601                         if (ctr >= SOCK_RECONF_CTR)
1602                                 exit_with_error(-ret);
1603                         usleep(USLEEP_MAX);
1604                 }
1605                 if (ifobject->busy_poll)
1606                         enable_busy_poll(&ifobject->xsk_arr[i]);
1607         }
1608 }
1609 
1610 static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject)
1611 {
1612         xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true);
1613         ifobject->xsk = &ifobject->xsk_arr[0];
1614         ifobject->xskmap = test->ifobj_rx->xskmap;
1615         memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info));
1616         ifobject->umem->base_addr = 0;
1617 }
1618 
1619 static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream,
1620                                    bool fill_up)
1621 {
1622         u32 rx_frame_size = umem->frame_size - XDP_PACKET_HEADROOM;
1623         u32 idx = 0, filled = 0, buffers_to_fill, nb_pkts;
1624         int ret;
1625 
1626         if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
1627                 buffers_to_fill = umem->num_frames;
1628         else
1629                 buffers_to_fill = umem->fill_size;
1630 
1631         ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
1632         if (ret != buffers_to_fill)
1633                 exit_with_error(ENOSPC);
1634 
1635         while (filled < buffers_to_fill) {
1636                 struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts);
1637                 u64 addr;
1638                 u32 i;
1639 
1640                 for (i = 0; i < pkt_nb_frags(rx_frame_size, pkt_stream, pkt); i++) {
1641                         if (!pkt) {
1642                                 if (!fill_up)
1643                                         break;
1644                                 addr = filled * umem->frame_size + umem->base_addr;
1645                         } else if (pkt->offset >= 0) {
1646                                 addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem);
1647                         } else {
1648                                 addr = pkt->offset + umem_alloc_buffer(umem);
1649                         }
1650 
1651                         *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
1652                         if (++filled >= buffers_to_fill)
1653                                 break;
1654                 }
1655         }
1656         xsk_ring_prod__submit(&umem->fq, filled);
1657         xsk_ring_prod__cancel(&umem->fq, buffers_to_fill - filled);
1658 
1659         pkt_stream_reset(pkt_stream);
1660         umem_reset_alloc(umem);
1661 }
1662 
1663 static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
1664 {
1665         u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
1666         int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
1667         LIBBPF_OPTS(bpf_xdp_query_opts, opts);
1668         void *bufs;
1669         int ret;
1670         u32 i;
1671 
1672         if (ifobject->umem->unaligned_mode)
1673                 mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
1674 
1675         if (ifobject->shared_umem)
1676                 umem_sz *= 2;
1677 
1678         bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
1679         if (bufs == MAP_FAILED)
1680                 exit_with_error(errno);
1681 
1682         ret = xsk_configure_umem(ifobject, ifobject->umem, bufs, umem_sz);
1683         if (ret)
1684                 exit_with_error(-ret);
1685 
1686         xsk_configure_socket(test, ifobject, ifobject->umem, false);
1687 
1688         ifobject->xsk = &ifobject->xsk_arr[0];
1689 
1690         if (!ifobject->rx_on)
1691                 return;
1692 
1693         xsk_populate_fill_ring(ifobject->umem, ifobject->xsk->pkt_stream, ifobject->use_fill_ring);
1694 
1695         for (i = 0; i < test->nb_sockets; i++) {
1696                 ifobject->xsk = &ifobject->xsk_arr[i];
1697                 ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, i);
1698                 if (ret)
1699                         exit_with_error(errno);
1700         }
1701 }
1702 
1703 static void *worker_testapp_validate_tx(void *arg)
1704 {
1705         struct test_spec *test = (struct test_spec *)arg;
1706         struct ifobject *ifobject = test->ifobj_tx;
1707         int err;
1708 
1709         if (test->current_step == 1) {
1710                 if (!ifobject->shared_umem)
1711                         thread_common_ops(test, ifobject);
1712                 else
1713                         thread_common_ops_tx(test, ifobject);
1714         }
1715 
1716         err = send_pkts(test, ifobject);
1717 
1718         if (!err && ifobject->validation_func)
1719                 err = ifobject->validation_func(ifobject);
1720         if (err)
1721                 report_failure(test);
1722 
1723         pthread_exit(NULL);
1724 }
1725 
1726 static void *worker_testapp_validate_rx(void *arg)
1727 {
1728         struct test_spec *test = (struct test_spec *)arg;
1729         struct ifobject *ifobject = test->ifobj_rx;
1730         int err;
1731 
1732         if (test->current_step == 1) {
1733                 thread_common_ops(test, ifobject);
1734         } else {
1735                 xsk_clear_xskmap(ifobject->xskmap);
1736                 err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, 0);
1737                 if (err) {
1738                         ksft_print_msg("Error: Failed to update xskmap, error %s\n",
1739                                        strerror(-err));
1740                         exit_with_error(-err);
1741                 }
1742         }
1743 
1744         pthread_barrier_wait(&barr);
1745 
1746         err = receive_pkts(test);
1747 
1748         if (!err && ifobject->validation_func)
1749                 err = ifobject->validation_func(ifobject);
1750         if (err)
1751                 report_failure(test);
1752 
1753         pthread_exit(NULL);
1754 }
1755 
1756 static u64 ceil_u64(u64 a, u64 b)
1757 {
1758         return (a + b - 1) / b;
1759 }
1760 
1761 static void testapp_clean_xsk_umem(struct ifobject *ifobj)
1762 {
1763         u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size;
1764 
1765         if (ifobj->shared_umem)
1766                 umem_sz *= 2;
1767 
1768         umem_sz = ceil_u64(umem_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
1769         xsk_umem__delete(ifobj->umem->umem);
1770         munmap(ifobj->umem->buffer, umem_sz);
1771 }
1772 
1773 static void handler(int signum)
1774 {
1775         pthread_exit(NULL);
1776 }
1777 
1778 static bool xdp_prog_changed_rx(struct test_spec *test)
1779 {
1780         struct ifobject *ifobj = test->ifobj_rx;
1781 
1782         return ifobj->xdp_prog != test->xdp_prog_rx || ifobj->mode != test->mode;
1783 }
1784 
1785 static bool xdp_prog_changed_tx(struct test_spec *test)
1786 {
1787         struct ifobject *ifobj = test->ifobj_tx;
1788 
1789         return ifobj->xdp_prog != test->xdp_prog_tx || ifobj->mode != test->mode;
1790 }
1791 
1792 static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_prog,
1793                              struct bpf_map *xskmap, enum test_mode mode)
1794 {
1795         int err;
1796 
1797         xsk_detach_xdp_program(ifobj->ifindex, mode_to_xdp_flags(ifobj->mode));
1798         err = xsk_attach_xdp_program(xdp_prog, ifobj->ifindex, mode_to_xdp_flags(mode));
1799         if (err) {
1800                 ksft_print_msg("Error attaching XDP program\n");
1801                 exit_with_error(-err);
1802         }
1803 
1804         if (ifobj->mode != mode && (mode == TEST_MODE_DRV || mode == TEST_MODE_ZC))
1805                 if (!xsk_is_in_mode(ifobj->ifindex, XDP_FLAGS_DRV_MODE)) {
1806                         ksft_print_msg("ERROR: XDP prog not in DRV mode\n");
1807                         exit_with_error(EINVAL);
1808                 }
1809 
1810         ifobj->xdp_prog = xdp_prog;
1811         ifobj->xskmap = xskmap;
1812         ifobj->mode = mode;
1813 }
1814 
1815 static void xsk_attach_xdp_progs(struct test_spec *test, struct ifobject *ifobj_rx,
1816                                  struct ifobject *ifobj_tx)
1817 {
1818         if (xdp_prog_changed_rx(test))
1819                 xsk_reattach_xdp(ifobj_rx, test->xdp_prog_rx, test->xskmap_rx, test->mode);
1820 
1821         if (!ifobj_tx || ifobj_tx->shared_umem)
1822                 return;
1823 
1824         if (xdp_prog_changed_tx(test))
1825                 xsk_reattach_xdp(ifobj_tx, test->xdp_prog_tx, test->xskmap_tx, test->mode);
1826 }
1827 
1828 static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *ifobj1,
1829                                       struct ifobject *ifobj2)
1830 {
1831         pthread_t t0, t1;
1832         int err;
1833 
1834         if (test->mtu > MAX_ETH_PKT_SIZE) {
1835                 if (test->mode == TEST_MODE_ZC && (!ifobj1->multi_buff_zc_supp ||
1836                                                    (ifobj2 && !ifobj2->multi_buff_zc_supp))) {
1837                         ksft_test_result_skip("Multi buffer for zero-copy not supported.\n");
1838                         return TEST_SKIP;
1839                 }
1840                 if (test->mode != TEST_MODE_ZC && (!ifobj1->multi_buff_supp ||
1841                                                    (ifobj2 && !ifobj2->multi_buff_supp))) {
1842                         ksft_test_result_skip("Multi buffer not supported.\n");
1843                         return TEST_SKIP;
1844                 }
1845         }
1846         err = test_spec_set_mtu(test, test->mtu);
1847         if (err) {
1848                 ksft_print_msg("Error, could not set mtu.\n");
1849                 exit_with_error(err);
1850         }
1851 
1852         if (ifobj2) {
1853                 if (pthread_barrier_init(&barr, NULL, 2))
1854                         exit_with_error(errno);
1855                 pkt_stream_reset(ifobj2->xsk->pkt_stream);
1856         }
1857 
1858         test->current_step++;
1859         pkt_stream_reset(ifobj1->xsk->pkt_stream);
1860         pkts_in_flight = 0;
1861 
1862         signal(SIGUSR1, handler);
1863         /*Spawn RX thread */
1864         pthread_create(&t0, NULL, ifobj1->func_ptr, test);
1865 
1866         if (ifobj2) {
1867                 pthread_barrier_wait(&barr);
1868                 if (pthread_barrier_destroy(&barr))
1869                         exit_with_error(errno);
1870 
1871                 /*Spawn TX thread */
1872                 pthread_create(&t1, NULL, ifobj2->func_ptr, test);
1873 
1874                 pthread_join(t1, NULL);
1875         }
1876 
1877         if (!ifobj2)
1878                 pthread_kill(t0, SIGUSR1);
1879         else
1880                 pthread_join(t0, NULL);
1881 
1882         if (test->total_steps == test->current_step || test->fail) {
1883                 u32 i;
1884 
1885                 if (ifobj2)
1886                         for (i = 0; i < test->nb_sockets; i++)
1887                                 xsk_socket__delete(ifobj2->xsk_arr[i].xsk);
1888 
1889                 for (i = 0; i < test->nb_sockets; i++)
1890                         xsk_socket__delete(ifobj1->xsk_arr[i].xsk);
1891 
1892                 testapp_clean_xsk_umem(ifobj1);
1893                 if (ifobj2 && !ifobj2->shared_umem)
1894                         testapp_clean_xsk_umem(ifobj2);
1895         }
1896 
1897         return !!test->fail;
1898 }
1899 
1900 static int testapp_validate_traffic(struct test_spec *test)
1901 {
1902         struct ifobject *ifobj_rx = test->ifobj_rx;
1903         struct ifobject *ifobj_tx = test->ifobj_tx;
1904 
1905         if ((ifobj_rx->umem->unaligned_mode && !ifobj_rx->unaligned_supp) ||
1906             (ifobj_tx->umem->unaligned_mode && !ifobj_tx->unaligned_supp)) {
1907                 ksft_test_result_skip("No huge pages present.\n");
1908                 return TEST_SKIP;
1909         }
1910 
1911         if (test->set_ring) {
1912                 if (ifobj_tx->hw_ring_size_supp) {
1913                         if (set_ring_size(ifobj_tx)) {
1914                                 ksft_test_result_skip("Failed to change HW ring size.\n");
1915                                 return TEST_FAILURE;
1916                         }
1917                 } else {
1918                         ksft_test_result_skip("Changing HW ring size not supported.\n");
1919                         return TEST_SKIP;
1920                 }
1921         }
1922 
1923         xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx);
1924         return __testapp_validate_traffic(test, ifobj_rx, ifobj_tx);
1925 }
1926 
1927 static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj)
1928 {
1929         return __testapp_validate_traffic(test, ifobj, NULL);
1930 }
1931 
1932 static int testapp_teardown(struct test_spec *test)
1933 {
1934         int i;
1935 
1936         for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
1937                 if (testapp_validate_traffic(test))
1938                         return TEST_FAILURE;
1939                 test_spec_reset(test);
1940         }
1941 
1942         return TEST_PASS;
1943 }
1944 
1945 static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2)
1946 {
1947         thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr;
1948         struct ifobject *tmp_ifobj = (*ifobj1);
1949 
1950         (*ifobj1)->func_ptr = (*ifobj2)->func_ptr;
1951         (*ifobj2)->func_ptr = tmp_func_ptr;
1952 
1953         *ifobj1 = *ifobj2;
1954         *ifobj2 = tmp_ifobj;
1955 }
1956 
1957 static int testapp_bidirectional(struct test_spec *test)
1958 {
1959         int res;
1960 
1961         test->ifobj_tx->rx_on = true;
1962         test->ifobj_rx->tx_on = true;
1963         test->total_steps = 2;
1964         if (testapp_validate_traffic(test))
1965                 return TEST_FAILURE;
1966 
1967         print_verbose("Switching Tx/Rx direction\n");
1968         swap_directions(&test->ifobj_rx, &test->ifobj_tx);
1969         res = __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx);
1970 
1971         swap_directions(&test->ifobj_rx, &test->ifobj_tx);
1972         return res;
1973 }
1974 
1975 static int swap_xsk_resources(struct test_spec *test)
1976 {
1977         int ret;
1978 
1979         test->ifobj_tx->xsk_arr[0].pkt_stream = NULL;
1980         test->ifobj_rx->xsk_arr[0].pkt_stream = NULL;
1981         test->ifobj_tx->xsk_arr[1].pkt_stream = test->tx_pkt_stream_default;
1982         test->ifobj_rx->xsk_arr[1].pkt_stream = test->rx_pkt_stream_default;
1983         test->ifobj_tx->xsk = &test->ifobj_tx->xsk_arr[1];
1984         test->ifobj_rx->xsk = &test->ifobj_rx->xsk_arr[1];
1985 
1986         ret = xsk_update_xskmap(test->ifobj_rx->xskmap, test->ifobj_rx->xsk->xsk, 0);
1987         if (ret)
1988                 return TEST_FAILURE;
1989 
1990         return TEST_PASS;
1991 }
1992 
1993 static int testapp_xdp_prog_cleanup(struct test_spec *test)
1994 {
1995         test->total_steps = 2;
1996         test->nb_sockets = 2;
1997         if (testapp_validate_traffic(test))
1998                 return TEST_FAILURE;
1999 
2000         if (swap_xsk_resources(test))
2001                 return TEST_FAILURE;
2002         return testapp_validate_traffic(test);
2003 }
2004 
2005 static int testapp_headroom(struct test_spec *test)
2006 {
2007         test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
2008         return testapp_validate_traffic(test);
2009 }
2010 
2011 static int testapp_stats_rx_dropped(struct test_spec *test)
2012 {
2013         if (test->mode == TEST_MODE_ZC) {
2014                 ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n");
2015                 return TEST_SKIP;
2016         }
2017 
2018         pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0);
2019         test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
2020                 XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3;
2021         pkt_stream_receive_half(test);
2022         test->ifobj_rx->validation_func = validate_rx_dropped;
2023         return testapp_validate_traffic(test);
2024 }
2025 
2026 static int testapp_stats_tx_invalid_descs(struct test_spec *test)
2027 {
2028         pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0);
2029         test->ifobj_tx->validation_func = validate_tx_invalid_descs;
2030         return testapp_validate_traffic(test);
2031 }
2032 
2033 static int testapp_stats_rx_full(struct test_spec *test)
2034 {
2035         pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
2036         test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
2037 
2038         test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
2039         test->ifobj_rx->release_rx = false;
2040         test->ifobj_rx->validation_func = validate_rx_full;
2041         return testapp_validate_traffic(test);
2042 }
2043 
2044 static int testapp_stats_fill_empty(struct test_spec *test)
2045 {
2046         pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
2047         test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
2048 
2049         test->ifobj_rx->use_fill_ring = false;
2050         test->ifobj_rx->validation_func = validate_fill_empty;
2051         return testapp_validate_traffic(test);
2052 }
2053 
2054 static int testapp_send_receive_unaligned(struct test_spec *test)
2055 {
2056         test->ifobj_tx->umem->unaligned_mode = true;
2057         test->ifobj_rx->umem->unaligned_mode = true;
2058         /* Let half of the packets straddle a 4K buffer boundary */
2059         pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2);
2060 
2061         return testapp_validate_traffic(test);
2062 }
2063 
2064 static int testapp_send_receive_unaligned_mb(struct test_spec *test)
2065 {
2066         test->mtu = MAX_ETH_JUMBO_SIZE;
2067         test->ifobj_tx->umem->unaligned_mode = true;
2068         test->ifobj_rx->umem->unaligned_mode = true;
2069         pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE);
2070         return testapp_validate_traffic(test);
2071 }
2072 
2073 static int testapp_single_pkt(struct test_spec *test)
2074 {
2075         struct pkt pkts[] = {{0, MIN_PKT_SIZE, 0, true}};
2076 
2077         pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
2078         return testapp_validate_traffic(test);
2079 }
2080 
2081 static int testapp_send_receive_mb(struct test_spec *test)
2082 {
2083         test->mtu = MAX_ETH_JUMBO_SIZE;
2084         pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE);
2085 
2086         return testapp_validate_traffic(test);
2087 }
2088 
2089 static int testapp_invalid_desc_mb(struct test_spec *test)
2090 {
2091         struct xsk_umem_info *umem = test->ifobj_tx->umem;
2092         u64 umem_size = umem->num_frames * umem->frame_size;
2093         struct pkt pkts[] = {
2094                 /* Valid packet for synch to start with */
2095                 {0, MIN_PKT_SIZE, 0, true, 0},
2096                 /* Zero frame len is not legal */
2097                 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2098                 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2099                 {0, 0, 0, false, 0},
2100                 /* Invalid address in the second frame */
2101                 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2102                 {umem_size, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2103                 /* Invalid len in the middle */
2104                 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2105                 {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2106                 /* Invalid options in the middle */
2107                 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2108                 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XSK_DESC__INVALID_OPTION},
2109                 /* Transmit 2 frags, receive 3 */
2110                 {0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, XDP_PKT_CONTD},
2111                 {0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, 0},
2112                 /* Middle frame crosses chunk boundary with small length */
2113                 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2114                 {-MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false, 0},
2115                 /* Valid packet for synch so that something is received */
2116                 {0, MIN_PKT_SIZE, 0, true, 0}};
2117 
2118         if (umem->unaligned_mode) {
2119                 /* Crossing a chunk boundary allowed */
2120                 pkts[12].valid = true;
2121                 pkts[13].valid = true;
2122         }
2123 
2124         test->mtu = MAX_ETH_JUMBO_SIZE;
2125         pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
2126         return testapp_validate_traffic(test);
2127 }
2128 
2129 static int testapp_invalid_desc(struct test_spec *test)
2130 {
2131         struct xsk_umem_info *umem = test->ifobj_tx->umem;
2132         u64 umem_size = umem->num_frames * umem->frame_size;
2133         struct pkt pkts[] = {
2134                 /* Zero packet address allowed */
2135                 {0, MIN_PKT_SIZE, 0, true},
2136                 /* Allowed packet */
2137                 {0, MIN_PKT_SIZE, 0, true},
2138                 /* Straddling the start of umem */
2139                 {-2, MIN_PKT_SIZE, 0, false},
2140                 /* Packet too large */
2141                 {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
2142                 /* Up to end of umem allowed */
2143                 {umem_size - MIN_PKT_SIZE - 2 * umem->frame_size, MIN_PKT_SIZE, 0, true},
2144                 /* After umem ends */
2145                 {umem_size, MIN_PKT_SIZE, 0, false},
2146                 /* Straddle the end of umem */
2147                 {umem_size - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
2148                 /* Straddle a 4K boundary */
2149                 {0x1000 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
2150                 /* Straddle a 2K boundary */
2151                 {0x800 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, true},
2152                 /* Valid packet for synch so that something is received */
2153                 {0, MIN_PKT_SIZE, 0, true}};
2154 
2155         if (umem->unaligned_mode) {
2156                 /* Crossing a page boundary allowed */
2157                 pkts[7].valid = true;
2158         }
2159         if (umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) {
2160                 /* Crossing a 2K frame size boundary not allowed */
2161                 pkts[8].valid = false;
2162         }
2163 
2164         if (test->ifobj_tx->shared_umem) {
2165                 pkts[4].offset += umem_size;
2166                 pkts[5].offset += umem_size;
2167                 pkts[6].offset += umem_size;
2168         }
2169 
2170         pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
2171         return testapp_validate_traffic(test);
2172 }
2173 
2174 static int testapp_xdp_drop(struct test_spec *test)
2175 {
2176         struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
2177         struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
2178 
2179         test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop,
2180                                skel_rx->maps.xsk, skel_tx->maps.xsk);
2181 
2182         pkt_stream_receive_half(test);
2183         return testapp_validate_traffic(test);
2184 }
2185 
2186 static int testapp_xdp_metadata_copy(struct test_spec *test)
2187 {
2188         struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
2189         struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
2190         struct bpf_map *data_map;
2191         int count = 0;
2192         int key = 0;
2193 
2194         test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_populate_metadata,
2195                                skel_tx->progs.xsk_xdp_populate_metadata,
2196                                skel_rx->maps.xsk, skel_tx->maps.xsk);
2197         test->ifobj_rx->use_metadata = true;
2198 
2199         data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss");
2200         if (!data_map || !bpf_map__is_internal(data_map)) {
2201                 ksft_print_msg("Error: could not find bss section of XDP program\n");
2202                 return TEST_FAILURE;
2203         }
2204 
2205         if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY)) {
2206                 ksft_print_msg("Error: could not update count element\n");
2207                 return TEST_FAILURE;
2208         }
2209 
2210         return testapp_validate_traffic(test);
2211 }
2212 
2213 static int testapp_xdp_shared_umem(struct test_spec *test)
2214 {
2215         struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
2216         struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
2217 
2218         test->total_steps = 1;
2219         test->nb_sockets = 2;
2220 
2221         test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_shared_umem,
2222                                skel_tx->progs.xsk_xdp_shared_umem,
2223                                skel_rx->maps.xsk, skel_tx->maps.xsk);
2224 
2225         pkt_stream_even_odd_sequence(test);
2226 
2227         return testapp_validate_traffic(test);
2228 }
2229 
2230 static int testapp_poll_txq_tmout(struct test_spec *test)
2231 {
2232         test->ifobj_tx->use_poll = true;
2233         /* create invalid frame by set umem frame_size and pkt length equal to 2048 */
2234         test->ifobj_tx->umem->frame_size = 2048;
2235         pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048);
2236         return testapp_validate_traffic_single_thread(test, test->ifobj_tx);
2237 }
2238 
2239 static int testapp_poll_rxq_tmout(struct test_spec *test)
2240 {
2241         test->ifobj_rx->use_poll = true;
2242         return testapp_validate_traffic_single_thread(test, test->ifobj_rx);
2243 }
2244 
2245 static int testapp_too_many_frags(struct test_spec *test)
2246 {
2247         struct pkt pkts[2 * XSK_DESC__MAX_SKB_FRAGS + 2] = {};
2248         u32 max_frags, i;
2249 
2250         if (test->mode == TEST_MODE_ZC)
2251                 max_frags = test->ifobj_tx->xdp_zc_max_segs;
2252         else
2253                 max_frags = XSK_DESC__MAX_SKB_FRAGS;
2254 
2255         test->mtu = MAX_ETH_JUMBO_SIZE;
2256 
2257         /* Valid packet for synch */
2258         pkts[0].len = MIN_PKT_SIZE;
2259         pkts[0].valid = true;
2260 
2261         /* One valid packet with the max amount of frags */
2262         for (i = 1; i < max_frags + 1; i++) {
2263                 pkts[i].len = MIN_PKT_SIZE;
2264                 pkts[i].options = XDP_PKT_CONTD;
2265                 pkts[i].valid = true;
2266         }
2267         pkts[max_frags].options = 0;
2268 
2269         /* An invalid packet with the max amount of frags but signals packet
2270          * continues on the last frag
2271          */
2272         for (i = max_frags + 1; i < 2 * max_frags + 1; i++) {
2273                 pkts[i].len = MIN_PKT_SIZE;
2274                 pkts[i].options = XDP_PKT_CONTD;
2275                 pkts[i].valid = false;
2276         }
2277 
2278         /* Valid packet for synch */
2279         pkts[2 * max_frags + 1].len = MIN_PKT_SIZE;
2280         pkts[2 * max_frags + 1].valid = true;
2281 
2282         pkt_stream_generate_custom(test, pkts, 2 * max_frags + 2);
2283         return testapp_validate_traffic(test);
2284 }
2285 
2286 static int xsk_load_xdp_programs(struct ifobject *ifobj)
2287 {
2288         ifobj->xdp_progs = xsk_xdp_progs__open_and_load();
2289         if (libbpf_get_error(ifobj->xdp_progs))
2290                 return libbpf_get_error(ifobj->xdp_progs);
2291 
2292         return 0;
2293 }
2294 
2295 static void xsk_unload_xdp_programs(struct ifobject *ifobj)
2296 {
2297         xsk_xdp_progs__destroy(ifobj->xdp_progs);
2298 }
2299 
2300 /* Simple test */
2301 static bool hugepages_present(void)
2302 {
2303         size_t mmap_sz = 2 * DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE;
2304         void *bufs;
2305 
2306         bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
2307                     MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, MAP_HUGE_2MB);
2308         if (bufs == MAP_FAILED)
2309                 return false;
2310 
2311         mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
2312         munmap(bufs, mmap_sz);
2313         return true;
2314 }
2315 
2316 static void init_iface(struct ifobject *ifobj, thread_func_t func_ptr)
2317 {
2318         LIBBPF_OPTS(bpf_xdp_query_opts, query_opts);
2319         int err;
2320 
2321         ifobj->func_ptr = func_ptr;
2322 
2323         err = xsk_load_xdp_programs(ifobj);
2324         if (err) {
2325                 ksft_print_msg("Error loading XDP program\n");
2326                 exit_with_error(err);
2327         }
2328 
2329         if (hugepages_present())
2330                 ifobj->unaligned_supp = true;
2331 
2332         err = bpf_xdp_query(ifobj->ifindex, XDP_FLAGS_DRV_MODE, &query_opts);
2333         if (err) {
2334                 ksft_print_msg("Error querying XDP capabilities\n");
2335                 exit_with_error(-err);
2336         }
2337         if (query_opts.feature_flags & NETDEV_XDP_ACT_RX_SG)
2338                 ifobj->multi_buff_supp = true;
2339         if (query_opts.feature_flags & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
2340                 if (query_opts.xdp_zc_max_segs > 1) {
2341                         ifobj->multi_buff_zc_supp = true;
2342                         ifobj->xdp_zc_max_segs = query_opts.xdp_zc_max_segs;
2343                 } else {
2344                         ifobj->xdp_zc_max_segs = 0;
2345                 }
2346         }
2347 }
2348 
2349 static int testapp_send_receive(struct test_spec *test)
2350 {
2351         return testapp_validate_traffic(test);
2352 }
2353 
2354 static int testapp_send_receive_2k_frame(struct test_spec *test)
2355 {
2356         test->ifobj_tx->umem->frame_size = 2048;
2357         test->ifobj_rx->umem->frame_size = 2048;
2358         pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
2359         return testapp_validate_traffic(test);
2360 }
2361 
2362 static int testapp_poll_rx(struct test_spec *test)
2363 {
2364         test->ifobj_rx->use_poll = true;
2365         return testapp_validate_traffic(test);
2366 }
2367 
2368 static int testapp_poll_tx(struct test_spec *test)
2369 {
2370         test->ifobj_tx->use_poll = true;
2371         return testapp_validate_traffic(test);
2372 }
2373 
2374 static int testapp_aligned_inv_desc(struct test_spec *test)
2375 {
2376         return testapp_invalid_desc(test);
2377 }
2378 
2379 static int testapp_aligned_inv_desc_2k_frame(struct test_spec *test)
2380 {
2381         test->ifobj_tx->umem->frame_size = 2048;
2382         test->ifobj_rx->umem->frame_size = 2048;
2383         return testapp_invalid_desc(test);
2384 }
2385 
2386 static int testapp_unaligned_inv_desc(struct test_spec *test)
2387 {
2388         test->ifobj_tx->umem->unaligned_mode = true;
2389         test->ifobj_rx->umem->unaligned_mode = true;
2390         return testapp_invalid_desc(test);
2391 }
2392 
2393 static int testapp_unaligned_inv_desc_4001_frame(struct test_spec *test)
2394 {
2395         u64 page_size, umem_size;
2396 
2397         /* Odd frame size so the UMEM doesn't end near a page boundary. */
2398         test->ifobj_tx->umem->frame_size = 4001;
2399         test->ifobj_rx->umem->frame_size = 4001;
2400         test->ifobj_tx->umem->unaligned_mode = true;
2401         test->ifobj_rx->umem->unaligned_mode = true;
2402         /* This test exists to test descriptors that staddle the end of
2403          * the UMEM but not a page.
2404          */
2405         page_size = sysconf(_SC_PAGESIZE);
2406         umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
2407         assert(umem_size % page_size > MIN_PKT_SIZE);
2408         assert(umem_size % page_size < page_size - MIN_PKT_SIZE);
2409 
2410         return testapp_invalid_desc(test);
2411 }
2412 
2413 static int testapp_aligned_inv_desc_mb(struct test_spec *test)
2414 {
2415         return testapp_invalid_desc_mb(test);
2416 }
2417 
2418 static int testapp_unaligned_inv_desc_mb(struct test_spec *test)
2419 {
2420         test->ifobj_tx->umem->unaligned_mode = true;
2421         test->ifobj_rx->umem->unaligned_mode = true;
2422         return testapp_invalid_desc_mb(test);
2423 }
2424 
2425 static int testapp_xdp_metadata(struct test_spec *test)
2426 {
2427         return testapp_xdp_metadata_copy(test);
2428 }
2429 
2430 static int testapp_xdp_metadata_mb(struct test_spec *test)
2431 {
2432         test->mtu = MAX_ETH_JUMBO_SIZE;
2433         return testapp_xdp_metadata_copy(test);
2434 }
2435 
2436 static int testapp_hw_sw_min_ring_size(struct test_spec *test)
2437 {
2438         int ret;
2439 
2440         test->set_ring = true;
2441         test->total_steps = 2;
2442         test->ifobj_tx->ring.tx_pending = DEFAULT_BATCH_SIZE;
2443         test->ifobj_tx->ring.rx_pending = DEFAULT_BATCH_SIZE * 2;
2444         test->ifobj_tx->xsk->batch_size = 1;
2445         test->ifobj_rx->xsk->batch_size = 1;
2446         ret = testapp_validate_traffic(test);
2447         if (ret)
2448                 return ret;
2449 
2450         /* Set batch size to hw_ring_size - 1 */
2451         test->ifobj_tx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1;
2452         test->ifobj_rx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1;
2453         return testapp_validate_traffic(test);
2454 }
2455 
2456 static int testapp_hw_sw_max_ring_size(struct test_spec *test)
2457 {
2458         u32 max_descs = XSK_RING_PROD__DEFAULT_NUM_DESCS * 4;
2459         int ret;
2460 
2461         test->set_ring = true;
2462         test->total_steps = 2;
2463         test->ifobj_tx->ring.tx_pending = test->ifobj_tx->ring.tx_max_pending;
2464         test->ifobj_tx->ring.rx_pending  = test->ifobj_tx->ring.rx_max_pending;
2465         test->ifobj_rx->umem->num_frames = max_descs;
2466         test->ifobj_rx->umem->fill_size = max_descs;
2467         test->ifobj_rx->umem->comp_size = max_descs;
2468         test->ifobj_tx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
2469         test->ifobj_rx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
2470 
2471         ret = testapp_validate_traffic(test);
2472         if (ret)
2473                 return ret;
2474 
2475         /* Set batch_size to 8152 for testing, as the ice HW ignores the 3 lowest bits when
2476          * updating the Rx HW tail register.
2477          */
2478         test->ifobj_tx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8;
2479         test->ifobj_rx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8;
2480         pkt_stream_replace(test, max_descs, MIN_PKT_SIZE);
2481         return testapp_validate_traffic(test);
2482 }
2483 
2484 static void run_pkt_test(struct test_spec *test)
2485 {
2486         int ret;
2487 
2488         ret = test->test_func(test);
2489 
2490         if (ret == TEST_PASS)
2491                 ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test),
2492                                       test->name);
2493         pkt_stream_restore_default(test);
2494 }
2495 
2496 static struct ifobject *ifobject_create(void)
2497 {
2498         struct ifobject *ifobj;
2499 
2500         ifobj = calloc(1, sizeof(struct ifobject));
2501         if (!ifobj)
2502                 return NULL;
2503 
2504         ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr));
2505         if (!ifobj->xsk_arr)
2506                 goto out_xsk_arr;
2507 
2508         ifobj->umem = calloc(1, sizeof(*ifobj->umem));
2509         if (!ifobj->umem)
2510                 goto out_umem;
2511 
2512         return ifobj;
2513 
2514 out_umem:
2515         free(ifobj->xsk_arr);
2516 out_xsk_arr:
2517         free(ifobj);
2518         return NULL;
2519 }
2520 
2521 static void ifobject_delete(struct ifobject *ifobj)
2522 {
2523         free(ifobj->umem);
2524         free(ifobj->xsk_arr);
2525         free(ifobj);
2526 }
2527 
2528 static bool is_xdp_supported(int ifindex)
2529 {
2530         int flags = XDP_FLAGS_DRV_MODE;
2531 
2532         LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = flags);
2533         struct bpf_insn insns[2] = {
2534                 BPF_MOV64_IMM(BPF_REG_0, XDP_PASS),
2535                 BPF_EXIT_INSN()
2536         };
2537         int prog_fd, insn_cnt = ARRAY_SIZE(insns);
2538         int err;
2539 
2540         prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
2541         if (prog_fd < 0)
2542                 return false;
2543 
2544         err = bpf_xdp_attach(ifindex, prog_fd, flags, NULL);
2545         if (err) {
2546                 close(prog_fd);
2547                 return false;
2548         }
2549 
2550         bpf_xdp_detach(ifindex, flags, NULL);
2551         close(prog_fd);
2552 
2553         return true;
2554 }
2555 
2556 static const struct test_spec tests[] = {
2557         {.name = "SEND_RECEIVE", .test_func = testapp_send_receive},
2558         {.name = "SEND_RECEIVE_2K_FRAME", .test_func = testapp_send_receive_2k_frame},
2559         {.name = "SEND_RECEIVE_SINGLE_PKT", .test_func = testapp_single_pkt},
2560         {.name = "POLL_RX", .test_func = testapp_poll_rx},
2561         {.name = "POLL_TX", .test_func = testapp_poll_tx},
2562         {.name = "POLL_RXQ_FULL", .test_func = testapp_poll_rxq_tmout},
2563         {.name = "POLL_TXQ_FULL", .test_func = testapp_poll_txq_tmout},
2564         {.name = "SEND_RECEIVE_UNALIGNED", .test_func = testapp_send_receive_unaligned},
2565         {.name = "ALIGNED_INV_DESC", .test_func = testapp_aligned_inv_desc},
2566         {.name = "ALIGNED_INV_DESC_2K_FRAME_SIZE", .test_func = testapp_aligned_inv_desc_2k_frame},
2567         {.name = "UNALIGNED_INV_DESC", .test_func = testapp_unaligned_inv_desc},
2568         {.name = "UNALIGNED_INV_DESC_4001_FRAME_SIZE",
2569          .test_func = testapp_unaligned_inv_desc_4001_frame},
2570         {.name = "UMEM_HEADROOM", .test_func = testapp_headroom},
2571         {.name = "TEARDOWN", .test_func = testapp_teardown},
2572         {.name = "BIDIRECTIONAL", .test_func = testapp_bidirectional},
2573         {.name = "STAT_RX_DROPPED", .test_func = testapp_stats_rx_dropped},
2574         {.name = "STAT_TX_INVALID", .test_func = testapp_stats_tx_invalid_descs},
2575         {.name = "STAT_RX_FULL", .test_func = testapp_stats_rx_full},
2576         {.name = "STAT_FILL_EMPTY", .test_func = testapp_stats_fill_empty},
2577         {.name = "XDP_PROG_CLEANUP", .test_func = testapp_xdp_prog_cleanup},
2578         {.name = "XDP_DROP_HALF", .test_func = testapp_xdp_drop},
2579         {.name = "XDP_SHARED_UMEM", .test_func = testapp_xdp_shared_umem},
2580         {.name = "XDP_METADATA_COPY", .test_func = testapp_xdp_metadata},
2581         {.name = "XDP_METADATA_COPY_MULTI_BUFF", .test_func = testapp_xdp_metadata_mb},
2582         {.name = "SEND_RECEIVE_9K_PACKETS", .test_func = testapp_send_receive_mb},
2583         {.name = "SEND_RECEIVE_UNALIGNED_9K_PACKETS",
2584          .test_func = testapp_send_receive_unaligned_mb},
2585         {.name = "ALIGNED_INV_DESC_MULTI_BUFF", .test_func = testapp_aligned_inv_desc_mb},
2586         {.name = "UNALIGNED_INV_DESC_MULTI_BUFF", .test_func = testapp_unaligned_inv_desc_mb},
2587         {.name = "TOO_MANY_FRAGS", .test_func = testapp_too_many_frags},
2588         {.name = "HW_SW_MIN_RING_SIZE", .test_func = testapp_hw_sw_min_ring_size},
2589         {.name = "HW_SW_MAX_RING_SIZE", .test_func = testapp_hw_sw_max_ring_size},
2590         };
2591 
2592 static void print_tests(void)
2593 {
2594         u32 i;
2595 
2596         printf("Tests:\n");
2597         for (i = 0; i < ARRAY_SIZE(tests); i++)
2598                 printf("%u: %s\n", i, tests[i].name);
2599 }
2600 
2601 int main(int argc, char **argv)
2602 {
2603         struct pkt_stream *rx_pkt_stream_default;
2604         struct pkt_stream *tx_pkt_stream_default;
2605         struct ifobject *ifobj_tx, *ifobj_rx;
2606         u32 i, j, failed_tests = 0, nb_tests;
2607         int modes = TEST_MODE_SKB + 1;
2608         struct test_spec test;
2609         bool shared_netdev;
2610         int ret;
2611 
2612         /* Use libbpf 1.0 API mode */
2613         libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
2614 
2615         ifobj_tx = ifobject_create();
2616         if (!ifobj_tx)
2617                 exit_with_error(ENOMEM);
2618         ifobj_rx = ifobject_create();
2619         if (!ifobj_rx)
2620                 exit_with_error(ENOMEM);
2621 
2622         setlocale(LC_ALL, "");
2623 
2624         parse_command_line(ifobj_tx, ifobj_rx, argc, argv);
2625 
2626         if (opt_print_tests) {
2627                 print_tests();
2628                 ksft_exit_xpass();
2629         }
2630         if (opt_run_test != RUN_ALL_TESTS && opt_run_test >= ARRAY_SIZE(tests)) {
2631                 ksft_print_msg("Error: test %u does not exist.\n", opt_run_test);
2632                 ksft_exit_xfail();
2633         }
2634 
2635         shared_netdev = (ifobj_tx->ifindex == ifobj_rx->ifindex);
2636         ifobj_tx->shared_umem = shared_netdev;
2637         ifobj_rx->shared_umem = shared_netdev;
2638 
2639         if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx))
2640                 print_usage(argv);
2641 
2642         if (is_xdp_supported(ifobj_tx->ifindex)) {
2643                 modes++;
2644                 if (ifobj_zc_avail(ifobj_tx))
2645                         modes++;
2646         }
2647 
2648         ret = get_hw_ring_size(ifobj_tx->ifname, &ifobj_tx->ring);
2649         if (!ret) {
2650                 ifobj_tx->hw_ring_size_supp = true;
2651                 ifobj_tx->set_ring.default_tx = ifobj_tx->ring.tx_pending;
2652                 ifobj_tx->set_ring.default_rx = ifobj_tx->ring.rx_pending;
2653         }
2654 
2655         init_iface(ifobj_rx, worker_testapp_validate_rx);
2656         init_iface(ifobj_tx, worker_testapp_validate_tx);
2657 
2658         test_spec_init(&test, ifobj_tx, ifobj_rx, 0, &tests[0]);
2659         tx_pkt_stream_default = pkt_stream_generate(DEFAULT_PKT_CNT, MIN_PKT_SIZE);
2660         rx_pkt_stream_default = pkt_stream_generate(DEFAULT_PKT_CNT, MIN_PKT_SIZE);
2661         if (!tx_pkt_stream_default || !rx_pkt_stream_default)
2662                 exit_with_error(ENOMEM);
2663         test.tx_pkt_stream_default = tx_pkt_stream_default;
2664         test.rx_pkt_stream_default = rx_pkt_stream_default;
2665 
2666         if (opt_run_test == RUN_ALL_TESTS)
2667                 nb_tests = ARRAY_SIZE(tests);
2668         else
2669                 nb_tests = 1;
2670         if (opt_mode == TEST_MODE_ALL) {
2671                 ksft_set_plan(modes * nb_tests);
2672         } else {
2673                 if (opt_mode == TEST_MODE_DRV && modes <= TEST_MODE_DRV) {
2674                         ksft_print_msg("Error: XDP_DRV mode not supported.\n");
2675                         ksft_exit_xfail();
2676                 }
2677                 if (opt_mode == TEST_MODE_ZC && modes <= TEST_MODE_ZC) {
2678                         ksft_print_msg("Error: zero-copy mode not supported.\n");
2679                         ksft_exit_xfail();
2680                 }
2681 
2682                 ksft_set_plan(nb_tests);
2683         }
2684 
2685         for (i = 0; i < modes; i++) {
2686                 if (opt_mode != TEST_MODE_ALL && i != opt_mode)
2687                         continue;
2688 
2689                 for (j = 0; j < ARRAY_SIZE(tests); j++) {
2690                         if (opt_run_test != RUN_ALL_TESTS && j != opt_run_test)
2691                                 continue;
2692 
2693                         test_spec_init(&test, ifobj_tx, ifobj_rx, i, &tests[j]);
2694                         run_pkt_test(&test);
2695                         usleep(USLEEP_MAX);
2696 
2697                         if (test.fail)
2698                                 failed_tests++;
2699                 }
2700         }
2701 
2702         if (ifobj_tx->hw_ring_size_supp)
2703                 hw_ring_size_reset(ifobj_tx);
2704 
2705         pkt_stream_delete(tx_pkt_stream_default);
2706         pkt_stream_delete(rx_pkt_stream_default);
2707         xsk_unload_xdp_programs(ifobj_tx);
2708         xsk_unload_xdp_programs(ifobj_rx);
2709         ifobject_delete(ifobj_tx);
2710         ifobject_delete(ifobj_rx);
2711 
2712         if (failed_tests)
2713                 ksft_exit_fail();
2714         else
2715                 ksft_exit_pass();
2716 }
2717 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php