blob: d08ee1ab7bb4e95adc9ef6f3d202558bf6788340 [file] [log] [blame]
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001// SPDX-License-Identifier: GPL-2.0
Björn Töpeldac091492018-05-18 14:00:21 +02002/* Copyright(c) 2017 - 2018 Intel Corporation. */
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02003
Magnus Karlsson248c7f92019-02-21 10:21:27 +01004#include <asm/barrier.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02005#include <errno.h>
6#include <getopt.h>
7#include <libgen.h>
8#include <linux/bpf.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +01009#include <linux/compiler.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020010#include <linux/if_link.h>
11#include <linux/if_xdp.h>
12#include <linux/if_ether.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +010013#include <locale.h>
14#include <net/ethernet.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020015#include <net/if.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +010016#include <poll.h>
17#include <pthread.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020018#include <signal.h>
19#include <stdbool.h>
20#include <stdio.h>
21#include <stdlib.h>
22#include <string.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +010023#include <sys/mman.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020024#include <sys/resource.h>
25#include <sys/socket.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +010026#include <sys/types.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020027#include <time.h>
28#include <unistd.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020029
Jakub Kicinski67481822018-07-26 14:32:21 -070030#include "bpf/libbpf.h"
Magnus Karlsson248c7f92019-02-21 10:21:27 +010031#include "bpf/xsk.h"
Jakub Kicinski2bf3e2e2018-05-14 22:35:02 -070032#include <bpf/bpf.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020033
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020034#ifndef SOL_XDP
35#define SOL_XDP 283
36#endif
37
38#ifndef AF_XDP
39#define AF_XDP 44
40#endif
41
42#ifndef PF_XDP
43#define PF_XDP AF_XDP
44#endif
45
Magnus Karlsson248c7f92019-02-21 10:21:27 +010046#define NUM_FRAMES (4 * 1024)
47#define BATCH_SIZE 64
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020048
49#define DEBUG_HEXDUMP 0
Magnus Karlsson248c7f92019-02-21 10:21:27 +010050#define MAX_SOCKS 8
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020051
Björn Töpela412ef52018-06-04 13:57:14 +020052typedef __u64 u64;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020053typedef __u32 u32;
54
55static unsigned long prev_time;
56
57enum benchmark_type {
58 BENCH_RXDROP = 0,
59 BENCH_TXONLY = 1,
60 BENCH_L2FWD = 2,
61};
62
63static enum benchmark_type opt_bench = BENCH_RXDROP;
Maciej Fijalkowski743e5682019-02-01 22:42:28 +010064static u32 opt_xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020065static const char *opt_if = "";
66static int opt_ifindex;
67static int opt_queue;
68static int opt_poll;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020069static int opt_interval = 1;
Björn Töpel9f5232c2018-06-04 14:06:01 +020070static u32 opt_xdp_bind_flags;
Maciej Fijalkowski3b7a8ec2019-02-01 22:42:30 +010071static __u32 prog_id;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020072
Magnus Karlsson248c7f92019-02-21 10:21:27 +010073struct xsk_umem_info {
74 struct xsk_ring_prod fq;
75 struct xsk_ring_cons cq;
76 struct xsk_umem *umem;
77 void *buffer;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020078};
79
Magnus Karlsson248c7f92019-02-21 10:21:27 +010080struct xsk_socket_info {
81 struct xsk_ring_cons rx;
82 struct xsk_ring_prod tx;
83 struct xsk_umem_info *umem;
84 struct xsk_socket *xsk;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020085 unsigned long rx_npkts;
86 unsigned long tx_npkts;
87 unsigned long prev_rx_npkts;
88 unsigned long prev_tx_npkts;
Magnus Karlsson248c7f92019-02-21 10:21:27 +010089 u32 outstanding_tx;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020090};
91
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020092static int num_socks;
Magnus Karlsson248c7f92019-02-21 10:21:27 +010093struct xsk_socket_info *xsks[MAX_SOCKS];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020094
95static unsigned long get_nsecs(void)
96{
97 struct timespec ts;
98
99 clock_gettime(CLOCK_MONOTONIC, &ts);
100 return ts.tv_sec * 1000000000UL + ts.tv_nsec;
101}
102
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200103static void print_benchmark(bool running)
104{
105 const char *bench_str = "INVALID";
106
107 if (opt_bench == BENCH_RXDROP)
108 bench_str = "rxdrop";
109 else if (opt_bench == BENCH_TXONLY)
110 bench_str = "txonly";
111 else if (opt_bench == BENCH_L2FWD)
112 bench_str = "l2fwd";
113
114 printf("%s:%d %s ", opt_if, opt_queue, bench_str);
115 if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
116 printf("xdp-skb ");
117 else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
118 printf("xdp-drv ");
119 else
120 printf(" ");
121
122 if (opt_poll)
123 printf("poll() ");
124
125 if (running) {
126 printf("running...");
127 fflush(stdout);
128 }
129}
130
131static void dump_stats(void)
132{
133 unsigned long now = get_nsecs();
134 long dt = now - prev_time;
135 int i;
136
137 prev_time = now;
138
Prashant Bhole11c3f512018-08-31 10:00:49 +0900139 for (i = 0; i < num_socks && xsks[i]; i++) {
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200140 char *fmt = "%-15s %'-11.0f %'-11lu\n";
141 double rx_pps, tx_pps;
142
143 rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) *
144 1000000000. / dt;
145 tx_pps = (xsks[i]->tx_npkts - xsks[i]->prev_tx_npkts) *
146 1000000000. / dt;
147
148 printf("\n sock%d@", i);
149 print_benchmark(false);
150 printf("\n");
151
152 printf("%-15s %-11s %-11s %-11.2f\n", "", "pps", "pkts",
153 dt / 1000000000.);
154 printf(fmt, "rx", rx_pps, xsks[i]->rx_npkts);
155 printf(fmt, "tx", tx_pps, xsks[i]->tx_npkts);
156
157 xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts;
158 xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts;
159 }
160}
161
162static void *poller(void *arg)
163{
164 (void)arg;
165 for (;;) {
166 sleep(opt_interval);
167 dump_stats();
168 }
169
170 return NULL;
171}
172
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100173static void remove_xdp_program(void)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200174{
Maciej Fijalkowski3b7a8ec2019-02-01 22:42:30 +0100175 __u32 curr_prog_id = 0;
176
Maciej Fijalkowski3b7a8ec2019-02-01 22:42:30 +0100177 if (bpf_get_link_xdp_id(opt_ifindex, &curr_prog_id, opt_xdp_flags)) {
178 printf("bpf_get_link_xdp_id failed\n");
179 exit(EXIT_FAILURE);
180 }
181 if (prog_id == curr_prog_id)
182 bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
183 else if (!curr_prog_id)
184 printf("couldn't find a prog id on a given interface\n");
185 else
186 printf("program on interface changed, not removing\n");
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100187}
188
189static void int_exit(int sig)
190{
191 struct xsk_umem *umem = xsks[0]->umem->umem;
192
193 (void)sig;
194
195 dump_stats();
196 xsk_socket__delete(xsks[0]->xsk);
197 (void)xsk_umem__delete(umem);
198 remove_xdp_program();
199
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200200 exit(EXIT_SUCCESS);
201}
202
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100203static void __exit_with_error(int error, const char *file, const char *func,
204 int line)
205{
206 fprintf(stderr, "%s:%s:%i: errno: %d/\"%s\"\n", file, func,
207 line, error, strerror(error));
208 dump_stats();
209 remove_xdp_program();
210 exit(EXIT_FAILURE);
211}
212
213#define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, \
214 __LINE__)
215
216static const char pkt_data[] =
217 "\x3c\xfd\xfe\x9e\x7f\x71\xec\xb1\xd7\x98\x3a\xc0\x08\x00\x45\x00"
218 "\x00\x2e\x00\x00\x00\x00\x40\x11\x88\x97\x05\x08\x07\x08\xc8\x14"
219 "\x1e\x04\x10\x92\x10\x92\x00\x1a\x6d\xa3\x34\x33\x1f\x69\x40\x6b"
220 "\x54\x59\xb6\x14\x2d\x11\x44\xbf\xaf\xd9\xbe\xaa";
221
222static void swap_mac_addresses(void *data)
223{
224 struct ether_header *eth = (struct ether_header *)data;
225 struct ether_addr *src_addr = (struct ether_addr *)&eth->ether_shost;
226 struct ether_addr *dst_addr = (struct ether_addr *)&eth->ether_dhost;
227 struct ether_addr tmp;
228
229 tmp = *src_addr;
230 *src_addr = *dst_addr;
231 *dst_addr = tmp;
232}
233
234static void hex_dump(void *pkt, size_t length, u64 addr)
235{
236 const unsigned char *address = (unsigned char *)pkt;
237 const unsigned char *line = address;
238 size_t line_size = 32;
239 unsigned char c;
240 char buf[32];
241 int i = 0;
242
243 if (!DEBUG_HEXDUMP)
244 return;
245
246 sprintf(buf, "addr=%llu", addr);
247 printf("length = %zu\n", length);
248 printf("%s | ", buf);
249 while (length-- > 0) {
250 printf("%02X ", *address++);
251 if (!(++i % line_size) || (length == 0 && i % line_size)) {
252 if (length == 0) {
253 while (i++ % line_size)
254 printf("__ ");
255 }
256 printf(" | "); /* right close */
257 while (line < address) {
258 c = *line++;
259 printf("%c", (c < 33 || c == 255) ? 0x2E : c);
260 }
261 printf("\n");
262 if (length > 0)
263 printf("%s | ", buf);
264 }
265 }
266 printf("\n");
267}
268
269static size_t gen_eth_frame(struct xsk_umem_info *umem, u64 addr)
270{
271 memcpy(xsk_umem__get_data(umem->buffer, addr), pkt_data,
272 sizeof(pkt_data) - 1);
273 return sizeof(pkt_data) - 1;
274}
275
276static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size)
277{
278 struct xsk_umem_info *umem;
279 int ret;
280
281 umem = calloc(1, sizeof(*umem));
282 if (!umem)
283 exit_with_error(errno);
284
285 ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq,
286 NULL);
287 if (ret)
288 exit_with_error(-ret);
289
290 umem->buffer = buffer;
291 return umem;
292}
293
294static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem)
295{
296 struct xsk_socket_config cfg;
297 struct xsk_socket_info *xsk;
298 int ret;
299 u32 idx;
300 int i;
301
302 xsk = calloc(1, sizeof(*xsk));
303 if (!xsk)
304 exit_with_error(errno);
305
306 xsk->umem = umem;
307 cfg.rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
308 cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
309 cfg.libbpf_flags = 0;
310 cfg.xdp_flags = opt_xdp_flags;
311 cfg.bind_flags = opt_xdp_bind_flags;
312 ret = xsk_socket__create(&xsk->xsk, opt_if, opt_queue, umem->umem,
313 &xsk->rx, &xsk->tx, &cfg);
314 if (ret)
315 exit_with_error(-ret);
316
317 ret = bpf_get_link_xdp_id(opt_ifindex, &prog_id, opt_xdp_flags);
318 if (ret)
319 exit_with_error(-ret);
320
321 ret = xsk_ring_prod__reserve(&xsk->umem->fq,
322 XSK_RING_PROD__DEFAULT_NUM_DESCS,
323 &idx);
324 if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS)
325 exit_with_error(-ret);
326 for (i = 0;
327 i < XSK_RING_PROD__DEFAULT_NUM_DESCS *
328 XSK_UMEM__DEFAULT_FRAME_SIZE;
329 i += XSK_UMEM__DEFAULT_FRAME_SIZE)
330 *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx++) = i;
331 xsk_ring_prod__submit(&xsk->umem->fq,
332 XSK_RING_PROD__DEFAULT_NUM_DESCS);
333
334 return xsk;
335}
336
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200337static struct option long_options[] = {
338 {"rxdrop", no_argument, 0, 'r'},
339 {"txonly", no_argument, 0, 't'},
340 {"l2fwd", no_argument, 0, 'l'},
341 {"interface", required_argument, 0, 'i'},
342 {"queue", required_argument, 0, 'q'},
343 {"poll", no_argument, 0, 'p'},
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200344 {"xdp-skb", no_argument, 0, 'S'},
345 {"xdp-native", no_argument, 0, 'N'},
346 {"interval", required_argument, 0, 'n'},
Björn Töpel58c50ae2018-08-28 14:44:35 +0200347 {"zero-copy", no_argument, 0, 'z'},
348 {"copy", no_argument, 0, 'c'},
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200349 {0, 0, 0, 0}
350};
351
352static void usage(const char *prog)
353{
354 const char *str =
355 " Usage: %s [OPTIONS]\n"
356 " Options:\n"
357 " -r, --rxdrop Discard all incoming packets (default)\n"
358 " -t, --txonly Only send packets\n"
359 " -l, --l2fwd MAC swap L2 forwarding\n"
360 " -i, --interface=n Run on interface n\n"
361 " -q, --queue=n Use queue n (default 0)\n"
362 " -p, --poll Use poll syscall\n"
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200363 " -S, --xdp-skb=n Use XDP skb-mod\n"
364 " -N, --xdp-native=n Enfore XDP native mode\n"
365 " -n, --interval=n Specify statistics update interval (default 1 sec).\n"
Björn Töpel58c50ae2018-08-28 14:44:35 +0200366 " -z, --zero-copy Force zero-copy mode.\n"
367 " -c, --copy Force copy mode.\n"
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200368 "\n";
369 fprintf(stderr, str, prog);
370 exit(EXIT_FAILURE);
371}
372
373static void parse_command_line(int argc, char **argv)
374{
375 int option_index, c;
376
377 opterr = 0;
378
379 for (;;) {
Maciej Fijalkowski743e5682019-02-01 22:42:28 +0100380 c = getopt_long(argc, argv, "Frtli:q:psSNn:cz", long_options,
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200381 &option_index);
382 if (c == -1)
383 break;
384
385 switch (c) {
386 case 'r':
387 opt_bench = BENCH_RXDROP;
388 break;
389 case 't':
390 opt_bench = BENCH_TXONLY;
391 break;
392 case 'l':
393 opt_bench = BENCH_L2FWD;
394 break;
395 case 'i':
396 opt_if = optarg;
397 break;
398 case 'q':
399 opt_queue = atoi(optarg);
400 break;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200401 case 'p':
402 opt_poll = 1;
403 break;
404 case 'S':
405 opt_xdp_flags |= XDP_FLAGS_SKB_MODE;
Björn Töpel9f5232c2018-06-04 14:06:01 +0200406 opt_xdp_bind_flags |= XDP_COPY;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200407 break;
408 case 'N':
409 opt_xdp_flags |= XDP_FLAGS_DRV_MODE;
410 break;
411 case 'n':
412 opt_interval = atoi(optarg);
413 break;
Björn Töpel58c50ae2018-08-28 14:44:35 +0200414 case 'z':
415 opt_xdp_bind_flags |= XDP_ZEROCOPY;
416 break;
417 case 'c':
418 opt_xdp_bind_flags |= XDP_COPY;
419 break;
Maciej Fijalkowski743e5682019-02-01 22:42:28 +0100420 case 'F':
421 opt_xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
422 break;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200423 default:
424 usage(basename(argv[0]));
425 }
426 }
427
428 opt_ifindex = if_nametoindex(opt_if);
429 if (!opt_ifindex) {
430 fprintf(stderr, "ERROR: interface \"%s\" does not exist\n",
431 opt_if);
432 usage(basename(argv[0]));
433 }
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100434
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200435}
436
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100437static void kick_tx(struct xsk_socket_info *xsk)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200438{
439 int ret;
440
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100441 ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
Magnus Karlssonc03079c2018-06-29 09:48:19 +0200442 if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200443 return;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100444 exit_with_error(errno);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200445}
446
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100447static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200448{
Yonghong Songb74e21a2019-02-28 22:19:41 -0800449 u32 idx_cq = 0, idx_fq = 0;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200450 unsigned int rcvd;
451 size_t ndescs;
452
453 if (!xsk->outstanding_tx)
454 return;
455
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100456 kick_tx(xsk);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200457 ndescs = (xsk->outstanding_tx > BATCH_SIZE) ? BATCH_SIZE :
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100458 xsk->outstanding_tx;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200459
460 /* re-add completed Tx buffers */
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100461 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, ndescs, &idx_cq);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200462 if (rcvd > 0) {
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100463 unsigned int i;
464 int ret;
465
466 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
467 while (ret != rcvd) {
468 if (ret < 0)
469 exit_with_error(-ret);
470 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd,
471 &idx_fq);
472 }
473 for (i = 0; i < rcvd; i++)
474 *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) =
475 *xsk_ring_cons__comp_addr(&xsk->umem->cq,
476 idx_cq++);
477
478 xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
479 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200480 xsk->outstanding_tx -= rcvd;
481 xsk->tx_npkts += rcvd;
482 }
483}
484
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100485static inline void complete_tx_only(struct xsk_socket_info *xsk)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200486{
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200487 unsigned int rcvd;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100488 u32 idx;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200489
490 if (!xsk->outstanding_tx)
491 return;
492
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100493 kick_tx(xsk);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200494
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100495 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, BATCH_SIZE, &idx);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200496 if (rcvd > 0) {
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100497 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200498 xsk->outstanding_tx -= rcvd;
499 xsk->tx_npkts += rcvd;
500 }
501}
502
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100503static void rx_drop(struct xsk_socket_info *xsk)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200504{
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200505 unsigned int rcvd, i;
Yonghong Songb74e21a2019-02-28 22:19:41 -0800506 u32 idx_rx = 0, idx_fq = 0;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100507 int ret;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200508
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100509 rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200510 if (!rcvd)
511 return;
512
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100513 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
514 while (ret != rcvd) {
515 if (ret < 0)
516 exit_with_error(-ret);
517 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200518 }
519
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100520 for (i = 0; i < rcvd; i++) {
521 u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr;
522 u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len;
523 char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200524
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100525 hex_dump(pkt, len, addr);
526 *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = addr;
527 }
528
529 xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
530 xsk_ring_cons__release(&xsk->rx, rcvd);
531 xsk->rx_npkts += rcvd;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200532}
533
534static void rx_drop_all(void)
535{
536 struct pollfd fds[MAX_SOCKS + 1];
537 int i, ret, timeout, nfds = 1;
538
539 memset(fds, 0, sizeof(fds));
540
541 for (i = 0; i < num_socks; i++) {
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100542 fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200543 fds[i].events = POLLIN;
544 timeout = 1000; /* 1sn */
545 }
546
547 for (;;) {
548 if (opt_poll) {
549 ret = poll(fds, nfds, timeout);
550 if (ret <= 0)
551 continue;
552 }
553
554 for (i = 0; i < num_socks; i++)
555 rx_drop(xsks[i]);
556 }
557}
558
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100559static void tx_only(struct xsk_socket_info *xsk)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200560{
561 int timeout, ret, nfds = 1;
562 struct pollfd fds[nfds + 1];
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100563 u32 idx, frame_nb = 0;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200564
565 memset(fds, 0, sizeof(fds));
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100566 fds[0].fd = xsk_socket__fd(xsk->xsk);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200567 fds[0].events = POLLOUT;
568 timeout = 1000; /* 1sn */
569
570 for (;;) {
571 if (opt_poll) {
572 ret = poll(fds, nfds, timeout);
573 if (ret <= 0)
574 continue;
575
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100576 if (!(fds[0].revents & POLLOUT))
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200577 continue;
578 }
579
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100580 if (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) ==
581 BATCH_SIZE) {
582 unsigned int i;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200583
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100584 for (i = 0; i < BATCH_SIZE; i++) {
585 xsk_ring_prod__tx_desc(&xsk->tx, idx + i)->addr
586 = (frame_nb + i) <<
587 XSK_UMEM__DEFAULT_FRAME_SHIFT;
588 xsk_ring_prod__tx_desc(&xsk->tx, idx + i)->len =
589 sizeof(pkt_data) - 1;
590 }
591
592 xsk_ring_prod__submit(&xsk->tx, BATCH_SIZE);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200593 xsk->outstanding_tx += BATCH_SIZE;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100594 frame_nb += BATCH_SIZE;
595 frame_nb %= NUM_FRAMES;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200596 }
597
598 complete_tx_only(xsk);
599 }
600}
601
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100602static void l2fwd(struct xsk_socket_info *xsk)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200603{
604 for (;;) {
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200605 unsigned int rcvd, i;
Yonghong Songb74e21a2019-02-28 22:19:41 -0800606 u32 idx_rx = 0, idx_tx = 0;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200607 int ret;
608
609 for (;;) {
610 complete_tx_l2fwd(xsk);
611
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100612 rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE,
613 &idx_rx);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200614 if (rcvd > 0)
615 break;
616 }
617
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100618 ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
619 while (ret != rcvd) {
620 if (ret < 0)
621 exit_with_error(-ret);
622 ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
623 }
624
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200625 for (i = 0; i < rcvd; i++) {
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100626 u64 addr = xsk_ring_cons__rx_desc(&xsk->rx,
627 idx_rx)->addr;
628 u32 len = xsk_ring_cons__rx_desc(&xsk->rx,
629 idx_rx++)->len;
630 char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200631
632 swap_mac_addresses(pkt);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200633
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100634 hex_dump(pkt, len, addr);
635 xsk_ring_prod__tx_desc(&xsk->tx, idx_tx)->addr = addr;
636 xsk_ring_prod__tx_desc(&xsk->tx, idx_tx++)->len = len;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200637 }
638
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100639 xsk_ring_prod__submit(&xsk->tx, rcvd);
640 xsk_ring_cons__release(&xsk->rx, rcvd);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200641
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100642 xsk->rx_npkts += rcvd;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200643 xsk->outstanding_tx += rcvd;
644 }
645}
646
647int main(int argc, char **argv)
648{
649 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100650 struct xsk_umem_info *umem;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200651 pthread_t pt;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100652 void *bufs;
653 int ret;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200654
655 parse_command_line(argc, argv);
656
657 if (setrlimit(RLIMIT_MEMLOCK, &r)) {
658 fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
659 strerror(errno));
660 exit(EXIT_FAILURE);
661 }
662
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100663 ret = posix_memalign(&bufs, getpagesize(), /* PAGE_SIZE aligned */
664 NUM_FRAMES * XSK_UMEM__DEFAULT_FRAME_SIZE);
665 if (ret)
666 exit_with_error(ret);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200667
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100668 /* Create sockets... */
669 umem = xsk_configure_umem(bufs,
670 NUM_FRAMES * XSK_UMEM__DEFAULT_FRAME_SIZE);
671 xsks[num_socks++] = xsk_configure_socket(umem);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200672
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100673 if (opt_bench == BENCH_TXONLY) {
674 int i;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200675
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100676 for (i = 0; i < NUM_FRAMES * XSK_UMEM__DEFAULT_FRAME_SIZE;
677 i += XSK_UMEM__DEFAULT_FRAME_SIZE)
678 (void)gen_eth_frame(umem, i);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200679 }
680
681 signal(SIGINT, int_exit);
682 signal(SIGTERM, int_exit);
683 signal(SIGABRT, int_exit);
684
685 setlocale(LC_ALL, "");
686
687 ret = pthread_create(&pt, NULL, poller, NULL);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100688 if (ret)
689 exit_with_error(ret);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200690
691 prev_time = get_nsecs();
692
693 if (opt_bench == BENCH_RXDROP)
694 rx_drop_all();
695 else if (opt_bench == BENCH_TXONLY)
696 tx_only(xsks[0]);
697 else
698 l2fwd(xsks[0]);
699
700 return 0;
701}