blob: e379eac034acd95726ae970eeefd4bc08d841ee4 [file] [log] [blame]
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001// SPDX-License-Identifier: GPL-2.0
Björn Töpeldac091492018-05-18 14:00:21 +02002/* Copyright(c) 2017 - 2018 Intel Corporation. */
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02003
4#include <assert.h>
5#include <errno.h>
6#include <getopt.h>
7#include <libgen.h>
8#include <linux/bpf.h>
9#include <linux/if_link.h>
10#include <linux/if_xdp.h>
11#include <linux/if_ether.h>
12#include <net/if.h>
13#include <signal.h>
14#include <stdbool.h>
15#include <stdio.h>
16#include <stdlib.h>
17#include <string.h>
18#include <net/ethernet.h>
19#include <sys/resource.h>
20#include <sys/socket.h>
21#include <sys/mman.h>
22#include <time.h>
23#include <unistd.h>
24#include <pthread.h>
25#include <locale.h>
26#include <sys/types.h>
27#include <poll.h>
28
29#include "bpf_load.h"
30#include "bpf_util.h"
Jakub Kicinski2bf3e2e2018-05-14 22:35:02 -070031#include <bpf/bpf.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020032
33#include "xdpsock.h"
34
35#ifndef SOL_XDP
36#define SOL_XDP 283
37#endif
38
39#ifndef AF_XDP
40#define AF_XDP 44
41#endif
42
43#ifndef PF_XDP
44#define PF_XDP AF_XDP
45#endif
46
47#define NUM_FRAMES 131072
48#define FRAME_HEADROOM 0
49#define FRAME_SIZE 2048
50#define NUM_DESCS 1024
51#define BATCH_SIZE 16
52
53#define FQ_NUM_DESCS 1024
54#define CQ_NUM_DESCS 1024
55
56#define DEBUG_HEXDUMP 0
57
58typedef __u32 u32;
59
60static unsigned long prev_time;
61
62enum benchmark_type {
63 BENCH_RXDROP = 0,
64 BENCH_TXONLY = 1,
65 BENCH_L2FWD = 2,
66};
67
68static enum benchmark_type opt_bench = BENCH_RXDROP;
69static u32 opt_xdp_flags;
70static const char *opt_if = "";
71static int opt_ifindex;
72static int opt_queue;
73static int opt_poll;
74static int opt_shared_packet_buffer;
75static int opt_interval = 1;
76
77struct xdp_umem_uqueue {
78 u32 cached_prod;
79 u32 cached_cons;
80 u32 mask;
81 u32 size;
Björn Töpel1c4917d2018-05-22 09:35:00 +020082 u32 *producer;
83 u32 *consumer;
84 u32 *ring;
85 void *map;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020086};
87
88struct xdp_umem {
89 char (*frames)[FRAME_SIZE];
90 struct xdp_umem_uqueue fq;
91 struct xdp_umem_uqueue cq;
92 int fd;
93};
94
95struct xdp_uqueue {
96 u32 cached_prod;
97 u32 cached_cons;
98 u32 mask;
99 u32 size;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200100 u32 *producer;
101 u32 *consumer;
102 struct xdp_desc *ring;
103 void *map;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200104};
105
106struct xdpsock {
107 struct xdp_uqueue rx;
108 struct xdp_uqueue tx;
109 int sfd;
110 struct xdp_umem *umem;
111 u32 outstanding_tx;
112 unsigned long rx_npkts;
113 unsigned long tx_npkts;
114 unsigned long prev_rx_npkts;
115 unsigned long prev_tx_npkts;
116};
117
118#define MAX_SOCKS 4
119static int num_socks;
120struct xdpsock *xsks[MAX_SOCKS];
121
122static unsigned long get_nsecs(void)
123{
124 struct timespec ts;
125
126 clock_gettime(CLOCK_MONOTONIC, &ts);
127 return ts.tv_sec * 1000000000UL + ts.tv_nsec;
128}
129
130static void dump_stats(void);
131
132#define lassert(expr) \
133 do { \
134 if (!(expr)) { \
135 fprintf(stderr, "%s:%s:%i: Assertion failed: " \
136 #expr ": errno: %d/\"%s\"\n", \
137 __FILE__, __func__, __LINE__, \
138 errno, strerror(errno)); \
139 dump_stats(); \
140 exit(EXIT_FAILURE); \
141 } \
142 } while (0)
143
144#define barrier() __asm__ __volatile__("": : :"memory")
145#define u_smp_rmb() barrier()
146#define u_smp_wmb() barrier()
147#define likely(x) __builtin_expect(!!(x), 1)
148#define unlikely(x) __builtin_expect(!!(x), 0)
149
150static const char pkt_data[] =
151 "\x3c\xfd\xfe\x9e\x7f\x71\xec\xb1\xd7\x98\x3a\xc0\x08\x00\x45\x00"
152 "\x00\x2e\x00\x00\x00\x00\x40\x11\x88\x97\x05\x08\x07\x08\xc8\x14"
153 "\x1e\x04\x10\x92\x10\x92\x00\x1a\x6d\xa3\x34\x33\x1f\x69\x40\x6b"
154 "\x54\x59\xb6\x14\x2d\x11\x44\xbf\xaf\xd9\xbe\xaa";
155
156static inline u32 umem_nb_free(struct xdp_umem_uqueue *q, u32 nb)
157{
158 u32 free_entries = q->size - (q->cached_prod - q->cached_cons);
159
160 if (free_entries >= nb)
161 return free_entries;
162
163 /* Refresh the local tail pointer */
Björn Töpel1c4917d2018-05-22 09:35:00 +0200164 q->cached_cons = *q->consumer;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200165
166 return q->size - (q->cached_prod - q->cached_cons);
167}
168
169static inline u32 xq_nb_free(struct xdp_uqueue *q, u32 ndescs)
170{
171 u32 free_entries = q->cached_cons - q->cached_prod;
172
173 if (free_entries >= ndescs)
174 return free_entries;
175
176 /* Refresh the local tail pointer */
Björn Töpel1c4917d2018-05-22 09:35:00 +0200177 q->cached_cons = *q->consumer + q->size;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200178 return q->cached_cons - q->cached_prod;
179}
180
181static inline u32 umem_nb_avail(struct xdp_umem_uqueue *q, u32 nb)
182{
183 u32 entries = q->cached_prod - q->cached_cons;
184
185 if (entries == 0) {
Björn Töpel1c4917d2018-05-22 09:35:00 +0200186 q->cached_prod = *q->producer;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200187 entries = q->cached_prod - q->cached_cons;
188 }
189
190 return (entries > nb) ? nb : entries;
191}
192
193static inline u32 xq_nb_avail(struct xdp_uqueue *q, u32 ndescs)
194{
195 u32 entries = q->cached_prod - q->cached_cons;
196
197 if (entries == 0) {
Björn Töpel1c4917d2018-05-22 09:35:00 +0200198 q->cached_prod = *q->producer;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200199 entries = q->cached_prod - q->cached_cons;
200 }
201
202 return (entries > ndescs) ? ndescs : entries;
203}
204
205static inline int umem_fill_to_kernel_ex(struct xdp_umem_uqueue *fq,
206 struct xdp_desc *d,
207 size_t nb)
208{
209 u32 i;
210
211 if (umem_nb_free(fq, nb) < nb)
212 return -ENOSPC;
213
214 for (i = 0; i < nb; i++) {
215 u32 idx = fq->cached_prod++ & fq->mask;
216
Björn Töpel1c4917d2018-05-22 09:35:00 +0200217 fq->ring[idx] = d[i].idx;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200218 }
219
220 u_smp_wmb();
221
Björn Töpel1c4917d2018-05-22 09:35:00 +0200222 *fq->producer = fq->cached_prod;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200223
224 return 0;
225}
226
227static inline int umem_fill_to_kernel(struct xdp_umem_uqueue *fq, u32 *d,
228 size_t nb)
229{
230 u32 i;
231
232 if (umem_nb_free(fq, nb) < nb)
233 return -ENOSPC;
234
235 for (i = 0; i < nb; i++) {
236 u32 idx = fq->cached_prod++ & fq->mask;
237
Björn Töpel1c4917d2018-05-22 09:35:00 +0200238 fq->ring[idx] = d[i];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200239 }
240
241 u_smp_wmb();
242
Björn Töpel1c4917d2018-05-22 09:35:00 +0200243 *fq->producer = fq->cached_prod;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200244
245 return 0;
246}
247
248static inline size_t umem_complete_from_kernel(struct xdp_umem_uqueue *cq,
249 u32 *d, size_t nb)
250{
251 u32 idx, i, entries = umem_nb_avail(cq, nb);
252
253 u_smp_rmb();
254
255 for (i = 0; i < entries; i++) {
256 idx = cq->cached_cons++ & cq->mask;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200257 d[i] = cq->ring[idx];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200258 }
259
260 if (entries > 0) {
261 u_smp_wmb();
262
Björn Töpel1c4917d2018-05-22 09:35:00 +0200263 *cq->consumer = cq->cached_cons;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200264 }
265
266 return entries;
267}
268
269static inline void *xq_get_data(struct xdpsock *xsk, __u32 idx, __u32 off)
270{
271 lassert(idx < NUM_FRAMES);
272 return &xsk->umem->frames[idx][off];
273}
274
275static inline int xq_enq(struct xdp_uqueue *uq,
276 const struct xdp_desc *descs,
277 unsigned int ndescs)
278{
Björn Töpel1c4917d2018-05-22 09:35:00 +0200279 struct xdp_desc *r = uq->ring;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200280 unsigned int i;
281
282 if (xq_nb_free(uq, ndescs) < ndescs)
283 return -ENOSPC;
284
285 for (i = 0; i < ndescs; i++) {
286 u32 idx = uq->cached_prod++ & uq->mask;
287
Björn Töpel1c4917d2018-05-22 09:35:00 +0200288 r[idx].idx = descs[i].idx;
289 r[idx].len = descs[i].len;
290 r[idx].offset = descs[i].offset;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200291 }
292
293 u_smp_wmb();
294
Björn Töpel1c4917d2018-05-22 09:35:00 +0200295 *uq->producer = uq->cached_prod;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200296 return 0;
297}
298
299static inline int xq_enq_tx_only(struct xdp_uqueue *uq,
300 __u32 idx, unsigned int ndescs)
301{
Björn Töpel1c4917d2018-05-22 09:35:00 +0200302 struct xdp_desc *r = uq->ring;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200303 unsigned int i;
304
305 if (xq_nb_free(uq, ndescs) < ndescs)
306 return -ENOSPC;
307
308 for (i = 0; i < ndescs; i++) {
309 u32 idx = uq->cached_prod++ & uq->mask;
310
Björn Töpel1c4917d2018-05-22 09:35:00 +0200311 r[idx].idx = idx + i;
312 r[idx].len = sizeof(pkt_data) - 1;
313 r[idx].offset = 0;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200314 }
315
316 u_smp_wmb();
317
Björn Töpel1c4917d2018-05-22 09:35:00 +0200318 *uq->producer = uq->cached_prod;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200319 return 0;
320}
321
322static inline int xq_deq(struct xdp_uqueue *uq,
323 struct xdp_desc *descs,
324 int ndescs)
325{
Björn Töpel1c4917d2018-05-22 09:35:00 +0200326 struct xdp_desc *r = uq->ring;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200327 unsigned int idx;
328 int i, entries;
329
330 entries = xq_nb_avail(uq, ndescs);
331
332 u_smp_rmb();
333
334 for (i = 0; i < entries; i++) {
335 idx = uq->cached_cons++ & uq->mask;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200336 descs[i] = r[idx];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200337 }
338
339 if (entries > 0) {
340 u_smp_wmb();
341
Björn Töpel1c4917d2018-05-22 09:35:00 +0200342 *uq->consumer = uq->cached_cons;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200343 }
344
345 return entries;
346}
347
348static void swap_mac_addresses(void *data)
349{
350 struct ether_header *eth = (struct ether_header *)data;
351 struct ether_addr *src_addr = (struct ether_addr *)&eth->ether_shost;
352 struct ether_addr *dst_addr = (struct ether_addr *)&eth->ether_dhost;
353 struct ether_addr tmp;
354
355 tmp = *src_addr;
356 *src_addr = *dst_addr;
357 *dst_addr = tmp;
358}
359
360#if DEBUG_HEXDUMP
361static void hex_dump(void *pkt, size_t length, const char *prefix)
362{
363 int i = 0;
364 const unsigned char *address = (unsigned char *)pkt;
365 const unsigned char *line = address;
366 size_t line_size = 32;
367 unsigned char c;
368
369 printf("length = %zu\n", length);
370 printf("%s | ", prefix);
371 while (length-- > 0) {
372 printf("%02X ", *address++);
373 if (!(++i % line_size) || (length == 0 && i % line_size)) {
374 if (length == 0) {
375 while (i++ % line_size)
376 printf("__ ");
377 }
378 printf(" | "); /* right close */
379 while (line < address) {
380 c = *line++;
381 printf("%c", (c < 33 || c == 255) ? 0x2E : c);
382 }
383 printf("\n");
384 if (length > 0)
385 printf("%s | ", prefix);
386 }
387 }
388 printf("\n");
389}
390#endif
391
392static size_t gen_eth_frame(char *frame)
393{
394 memcpy(frame, pkt_data, sizeof(pkt_data) - 1);
395 return sizeof(pkt_data) - 1;
396}
397
398static struct xdp_umem *xdp_umem_configure(int sfd)
399{
400 int fq_size = FQ_NUM_DESCS, cq_size = CQ_NUM_DESCS;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200401 struct xdp_mmap_offsets off;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200402 struct xdp_umem_reg mr;
403 struct xdp_umem *umem;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200404 socklen_t optlen;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200405 void *bufs;
406
407 umem = calloc(1, sizeof(*umem));
408 lassert(umem);
409
410 lassert(posix_memalign(&bufs, getpagesize(), /* PAGE_SIZE aligned */
411 NUM_FRAMES * FRAME_SIZE) == 0);
412
413 mr.addr = (__u64)bufs;
414 mr.len = NUM_FRAMES * FRAME_SIZE;
415 mr.frame_size = FRAME_SIZE;
416 mr.frame_headroom = FRAME_HEADROOM;
417
418 lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)) == 0);
419 lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_FILL_RING, &fq_size,
420 sizeof(int)) == 0);
421 lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_COMPLETION_RING, &cq_size,
422 sizeof(int)) == 0);
423
Björn Töpel1c4917d2018-05-22 09:35:00 +0200424 optlen = sizeof(off);
425 lassert(getsockopt(sfd, SOL_XDP, XDP_MMAP_OFFSETS, &off,
426 &optlen) == 0);
427
428 umem->fq.map = mmap(0, off.fr.desc +
429 FQ_NUM_DESCS * sizeof(u32),
430 PROT_READ | PROT_WRITE,
431 MAP_SHARED | MAP_POPULATE, sfd,
432 XDP_UMEM_PGOFF_FILL_RING);
433 lassert(umem->fq.map != MAP_FAILED);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200434
435 umem->fq.mask = FQ_NUM_DESCS - 1;
436 umem->fq.size = FQ_NUM_DESCS;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200437 umem->fq.producer = umem->fq.map + off.fr.producer;
438 umem->fq.consumer = umem->fq.map + off.fr.consumer;
439 umem->fq.ring = umem->fq.map + off.fr.desc;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200440
Björn Töpel1c4917d2018-05-22 09:35:00 +0200441 umem->cq.map = mmap(0, off.cr.desc +
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200442 CQ_NUM_DESCS * sizeof(u32),
443 PROT_READ | PROT_WRITE,
444 MAP_SHARED | MAP_POPULATE, sfd,
445 XDP_UMEM_PGOFF_COMPLETION_RING);
Björn Töpel1c4917d2018-05-22 09:35:00 +0200446 lassert(umem->cq.map != MAP_FAILED);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200447
448 umem->cq.mask = CQ_NUM_DESCS - 1;
449 umem->cq.size = CQ_NUM_DESCS;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200450 umem->cq.producer = umem->cq.map + off.cr.producer;
451 umem->cq.consumer = umem->cq.map + off.cr.consumer;
452 umem->cq.ring = umem->cq.map + off.cr.desc;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200453
454 umem->frames = (char (*)[FRAME_SIZE])bufs;
455 umem->fd = sfd;
456
457 if (opt_bench == BENCH_TXONLY) {
458 int i;
459
460 for (i = 0; i < NUM_FRAMES; i++)
461 (void)gen_eth_frame(&umem->frames[i][0]);
462 }
463
464 return umem;
465}
466
467static struct xdpsock *xsk_configure(struct xdp_umem *umem)
468{
469 struct sockaddr_xdp sxdp = {};
Björn Töpel1c4917d2018-05-22 09:35:00 +0200470 struct xdp_mmap_offsets off;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200471 int sfd, ndescs = NUM_DESCS;
472 struct xdpsock *xsk;
473 bool shared = true;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200474 socklen_t optlen;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200475 u32 i;
476
477 sfd = socket(PF_XDP, SOCK_RAW, 0);
478 lassert(sfd >= 0);
479
480 xsk = calloc(1, sizeof(*xsk));
481 lassert(xsk);
482
483 xsk->sfd = sfd;
484 xsk->outstanding_tx = 0;
485
486 if (!umem) {
487 shared = false;
488 xsk->umem = xdp_umem_configure(sfd);
489 } else {
490 xsk->umem = umem;
491 }
492
493 lassert(setsockopt(sfd, SOL_XDP, XDP_RX_RING,
494 &ndescs, sizeof(int)) == 0);
495 lassert(setsockopt(sfd, SOL_XDP, XDP_TX_RING,
496 &ndescs, sizeof(int)) == 0);
Björn Töpel1c4917d2018-05-22 09:35:00 +0200497 optlen = sizeof(off);
498 lassert(getsockopt(sfd, SOL_XDP, XDP_MMAP_OFFSETS, &off,
499 &optlen) == 0);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200500
501 /* Rx */
Björn Töpel1c4917d2018-05-22 09:35:00 +0200502 xsk->rx.map = mmap(NULL,
503 off.rx.desc +
504 NUM_DESCS * sizeof(struct xdp_desc),
505 PROT_READ | PROT_WRITE,
506 MAP_SHARED | MAP_POPULATE, sfd,
507 XDP_PGOFF_RX_RING);
508 lassert(xsk->rx.map != MAP_FAILED);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200509
510 if (!shared) {
511 for (i = 0; i < NUM_DESCS / 2; i++)
512 lassert(umem_fill_to_kernel(&xsk->umem->fq, &i, 1)
513 == 0);
514 }
515
516 /* Tx */
Björn Töpel1c4917d2018-05-22 09:35:00 +0200517 xsk->tx.map = mmap(NULL,
518 off.tx.desc +
519 NUM_DESCS * sizeof(struct xdp_desc),
520 PROT_READ | PROT_WRITE,
521 MAP_SHARED | MAP_POPULATE, sfd,
522 XDP_PGOFF_TX_RING);
523 lassert(xsk->tx.map != MAP_FAILED);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200524
525 xsk->rx.mask = NUM_DESCS - 1;
526 xsk->rx.size = NUM_DESCS;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200527 xsk->rx.producer = xsk->rx.map + off.rx.producer;
528 xsk->rx.consumer = xsk->rx.map + off.rx.consumer;
529 xsk->rx.ring = xsk->rx.map + off.rx.desc;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200530
531 xsk->tx.mask = NUM_DESCS - 1;
532 xsk->tx.size = NUM_DESCS;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200533 xsk->tx.producer = xsk->tx.map + off.tx.producer;
534 xsk->tx.consumer = xsk->tx.map + off.tx.consumer;
535 xsk->tx.ring = xsk->tx.map + off.tx.desc;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200536
537 sxdp.sxdp_family = PF_XDP;
538 sxdp.sxdp_ifindex = opt_ifindex;
539 sxdp.sxdp_queue_id = opt_queue;
540 if (shared) {
541 sxdp.sxdp_flags = XDP_SHARED_UMEM;
542 sxdp.sxdp_shared_umem_fd = umem->fd;
543 }
544
545 lassert(bind(sfd, (struct sockaddr *)&sxdp, sizeof(sxdp)) == 0);
546
547 return xsk;
548}
549
550static void print_benchmark(bool running)
551{
552 const char *bench_str = "INVALID";
553
554 if (opt_bench == BENCH_RXDROP)
555 bench_str = "rxdrop";
556 else if (opt_bench == BENCH_TXONLY)
557 bench_str = "txonly";
558 else if (opt_bench == BENCH_L2FWD)
559 bench_str = "l2fwd";
560
561 printf("%s:%d %s ", opt_if, opt_queue, bench_str);
562 if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
563 printf("xdp-skb ");
564 else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
565 printf("xdp-drv ");
566 else
567 printf(" ");
568
569 if (opt_poll)
570 printf("poll() ");
571
572 if (running) {
573 printf("running...");
574 fflush(stdout);
575 }
576}
577
578static void dump_stats(void)
579{
580 unsigned long now = get_nsecs();
581 long dt = now - prev_time;
582 int i;
583
584 prev_time = now;
585
586 for (i = 0; i < num_socks; i++) {
587 char *fmt = "%-15s %'-11.0f %'-11lu\n";
588 double rx_pps, tx_pps;
589
590 rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) *
591 1000000000. / dt;
592 tx_pps = (xsks[i]->tx_npkts - xsks[i]->prev_tx_npkts) *
593 1000000000. / dt;
594
595 printf("\n sock%d@", i);
596 print_benchmark(false);
597 printf("\n");
598
599 printf("%-15s %-11s %-11s %-11.2f\n", "", "pps", "pkts",
600 dt / 1000000000.);
601 printf(fmt, "rx", rx_pps, xsks[i]->rx_npkts);
602 printf(fmt, "tx", tx_pps, xsks[i]->tx_npkts);
603
604 xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts;
605 xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts;
606 }
607}
608
609static void *poller(void *arg)
610{
611 (void)arg;
612 for (;;) {
613 sleep(opt_interval);
614 dump_stats();
615 }
616
617 return NULL;
618}
619
620static void int_exit(int sig)
621{
622 (void)sig;
623 dump_stats();
624 bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
625 exit(EXIT_SUCCESS);
626}
627
628static struct option long_options[] = {
629 {"rxdrop", no_argument, 0, 'r'},
630 {"txonly", no_argument, 0, 't'},
631 {"l2fwd", no_argument, 0, 'l'},
632 {"interface", required_argument, 0, 'i'},
633 {"queue", required_argument, 0, 'q'},
634 {"poll", no_argument, 0, 'p'},
635 {"shared-buffer", no_argument, 0, 's'},
636 {"xdp-skb", no_argument, 0, 'S'},
637 {"xdp-native", no_argument, 0, 'N'},
638 {"interval", required_argument, 0, 'n'},
639 {0, 0, 0, 0}
640};
641
642static void usage(const char *prog)
643{
644 const char *str =
645 " Usage: %s [OPTIONS]\n"
646 " Options:\n"
647 " -r, --rxdrop Discard all incoming packets (default)\n"
648 " -t, --txonly Only send packets\n"
649 " -l, --l2fwd MAC swap L2 forwarding\n"
650 " -i, --interface=n Run on interface n\n"
651 " -q, --queue=n Use queue n (default 0)\n"
652 " -p, --poll Use poll syscall\n"
653 " -s, --shared-buffer Use shared packet buffer\n"
654 " -S, --xdp-skb=n Use XDP skb-mod\n"
655 " -N, --xdp-native=n Enfore XDP native mode\n"
656 " -n, --interval=n Specify statistics update interval (default 1 sec).\n"
657 "\n";
658 fprintf(stderr, str, prog);
659 exit(EXIT_FAILURE);
660}
661
662static void parse_command_line(int argc, char **argv)
663{
664 int option_index, c;
665
666 opterr = 0;
667
668 for (;;) {
669 c = getopt_long(argc, argv, "rtli:q:psSNn:", long_options,
670 &option_index);
671 if (c == -1)
672 break;
673
674 switch (c) {
675 case 'r':
676 opt_bench = BENCH_RXDROP;
677 break;
678 case 't':
679 opt_bench = BENCH_TXONLY;
680 break;
681 case 'l':
682 opt_bench = BENCH_L2FWD;
683 break;
684 case 'i':
685 opt_if = optarg;
686 break;
687 case 'q':
688 opt_queue = atoi(optarg);
689 break;
690 case 's':
691 opt_shared_packet_buffer = 1;
692 break;
693 case 'p':
694 opt_poll = 1;
695 break;
696 case 'S':
697 opt_xdp_flags |= XDP_FLAGS_SKB_MODE;
698 break;
699 case 'N':
700 opt_xdp_flags |= XDP_FLAGS_DRV_MODE;
701 break;
702 case 'n':
703 opt_interval = atoi(optarg);
704 break;
705 default:
706 usage(basename(argv[0]));
707 }
708 }
709
710 opt_ifindex = if_nametoindex(opt_if);
711 if (!opt_ifindex) {
712 fprintf(stderr, "ERROR: interface \"%s\" does not exist\n",
713 opt_if);
714 usage(basename(argv[0]));
715 }
716}
717
718static void kick_tx(int fd)
719{
720 int ret;
721
722 ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0);
723 if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN)
724 return;
725 lassert(0);
726}
727
728static inline void complete_tx_l2fwd(struct xdpsock *xsk)
729{
730 u32 descs[BATCH_SIZE];
731 unsigned int rcvd;
732 size_t ndescs;
733
734 if (!xsk->outstanding_tx)
735 return;
736
737 kick_tx(xsk->sfd);
738 ndescs = (xsk->outstanding_tx > BATCH_SIZE) ? BATCH_SIZE :
739 xsk->outstanding_tx;
740
741 /* re-add completed Tx buffers */
742 rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, ndescs);
743 if (rcvd > 0) {
744 umem_fill_to_kernel(&xsk->umem->fq, descs, rcvd);
745 xsk->outstanding_tx -= rcvd;
746 xsk->tx_npkts += rcvd;
747 }
748}
749
750static inline void complete_tx_only(struct xdpsock *xsk)
751{
752 u32 descs[BATCH_SIZE];
753 unsigned int rcvd;
754
755 if (!xsk->outstanding_tx)
756 return;
757
758 kick_tx(xsk->sfd);
759
760 rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, BATCH_SIZE);
761 if (rcvd > 0) {
762 xsk->outstanding_tx -= rcvd;
763 xsk->tx_npkts += rcvd;
764 }
765}
766
767static void rx_drop(struct xdpsock *xsk)
768{
769 struct xdp_desc descs[BATCH_SIZE];
770 unsigned int rcvd, i;
771
772 rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
773 if (!rcvd)
774 return;
775
776 for (i = 0; i < rcvd; i++) {
777 u32 idx = descs[i].idx;
778
779 lassert(idx < NUM_FRAMES);
780#if DEBUG_HEXDUMP
781 char *pkt;
782 char buf[32];
783
784 pkt = xq_get_data(xsk, idx, descs[i].offset);
785 sprintf(buf, "idx=%d", idx);
786 hex_dump(pkt, descs[i].len, buf);
787#endif
788 }
789
790 xsk->rx_npkts += rcvd;
791
792 umem_fill_to_kernel_ex(&xsk->umem->fq, descs, rcvd);
793}
794
795static void rx_drop_all(void)
796{
797 struct pollfd fds[MAX_SOCKS + 1];
798 int i, ret, timeout, nfds = 1;
799
800 memset(fds, 0, sizeof(fds));
801
802 for (i = 0; i < num_socks; i++) {
803 fds[i].fd = xsks[i]->sfd;
804 fds[i].events = POLLIN;
805 timeout = 1000; /* 1sn */
806 }
807
808 for (;;) {
809 if (opt_poll) {
810 ret = poll(fds, nfds, timeout);
811 if (ret <= 0)
812 continue;
813 }
814
815 for (i = 0; i < num_socks; i++)
816 rx_drop(xsks[i]);
817 }
818}
819
820static void tx_only(struct xdpsock *xsk)
821{
822 int timeout, ret, nfds = 1;
823 struct pollfd fds[nfds + 1];
824 unsigned int idx = 0;
825
826 memset(fds, 0, sizeof(fds));
827 fds[0].fd = xsk->sfd;
828 fds[0].events = POLLOUT;
829 timeout = 1000; /* 1sn */
830
831 for (;;) {
832 if (opt_poll) {
833 ret = poll(fds, nfds, timeout);
834 if (ret <= 0)
835 continue;
836
837 if (fds[0].fd != xsk->sfd ||
838 !(fds[0].revents & POLLOUT))
839 continue;
840 }
841
842 if (xq_nb_free(&xsk->tx, BATCH_SIZE) >= BATCH_SIZE) {
843 lassert(xq_enq_tx_only(&xsk->tx, idx, BATCH_SIZE) == 0);
844
845 xsk->outstanding_tx += BATCH_SIZE;
846 idx += BATCH_SIZE;
847 idx %= NUM_FRAMES;
848 }
849
850 complete_tx_only(xsk);
851 }
852}
853
854static void l2fwd(struct xdpsock *xsk)
855{
856 for (;;) {
857 struct xdp_desc descs[BATCH_SIZE];
858 unsigned int rcvd, i;
859 int ret;
860
861 for (;;) {
862 complete_tx_l2fwd(xsk);
863
864 rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
865 if (rcvd > 0)
866 break;
867 }
868
869 for (i = 0; i < rcvd; i++) {
870 char *pkt = xq_get_data(xsk, descs[i].idx,
871 descs[i].offset);
872
873 swap_mac_addresses(pkt);
874#if DEBUG_HEXDUMP
875 char buf[32];
876 u32 idx = descs[i].idx;
877
878 sprintf(buf, "idx=%d", idx);
879 hex_dump(pkt, descs[i].len, buf);
880#endif
881 }
882
883 xsk->rx_npkts += rcvd;
884
885 ret = xq_enq(&xsk->tx, descs, rcvd);
886 lassert(ret == 0);
887 xsk->outstanding_tx += rcvd;
888 }
889}
890
891int main(int argc, char **argv)
892{
893 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
894 char xdp_filename[256];
895 int i, ret, key = 0;
896 pthread_t pt;
897
898 parse_command_line(argc, argv);
899
900 if (setrlimit(RLIMIT_MEMLOCK, &r)) {
901 fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
902 strerror(errno));
903 exit(EXIT_FAILURE);
904 }
905
906 snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]);
907
908 if (load_bpf_file(xdp_filename)) {
909 fprintf(stderr, "ERROR: load_bpf_file %s\n", bpf_log_buf);
910 exit(EXIT_FAILURE);
911 }
912
913 if (!prog_fd[0]) {
914 fprintf(stderr, "ERROR: load_bpf_file: \"%s\"\n",
915 strerror(errno));
916 exit(EXIT_FAILURE);
917 }
918
919 if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd[0], opt_xdp_flags) < 0) {
920 fprintf(stderr, "ERROR: link set xdp fd failed\n");
921 exit(EXIT_FAILURE);
922 }
923
924 ret = bpf_map_update_elem(map_fd[0], &key, &opt_queue, 0);
925 if (ret) {
926 fprintf(stderr, "ERROR: bpf_map_update_elem qidconf\n");
927 exit(EXIT_FAILURE);
928 }
929
930 /* Create sockets... */
931 xsks[num_socks++] = xsk_configure(NULL);
932
933#if RR_LB
934 for (i = 0; i < MAX_SOCKS - 1; i++)
935 xsks[num_socks++] = xsk_configure(xsks[0]->umem);
936#endif
937
938 /* ...and insert them into the map. */
939 for (i = 0; i < num_socks; i++) {
940 key = i;
941 ret = bpf_map_update_elem(map_fd[1], &key, &xsks[i]->sfd, 0);
942 if (ret) {
943 fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i);
944 exit(EXIT_FAILURE);
945 }
946 }
947
948 signal(SIGINT, int_exit);
949 signal(SIGTERM, int_exit);
950 signal(SIGABRT, int_exit);
951
952 setlocale(LC_ALL, "");
953
954 ret = pthread_create(&pt, NULL, poller, NULL);
955 lassert(ret == 0);
956
957 prev_time = get_nsecs();
958
959 if (opt_bench == BENCH_RXDROP)
960 rx_drop_all();
961 else if (opt_bench == BENCH_TXONLY)
962 tx_only(xsks[0]);
963 else
964 l2fwd(xsks[0]);
965
966 return 0;
967}