blob: 4622a17fafe139496245cf3803e5c70b39f9a543 [file] [log] [blame]
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001// SPDX-License-Identifier: GPL-2.0
Björn Töpeldac091492018-05-18 14:00:21 +02002/* Copyright(c) 2017 - 2018 Intel Corporation. */
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02003
Magnus Karlsson248c7f92019-02-21 10:21:27 +01004#include <asm/barrier.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02005#include <errno.h>
6#include <getopt.h>
7#include <libgen.h>
8#include <linux/bpf.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +01009#include <linux/compiler.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020010#include <linux/if_link.h>
11#include <linux/if_xdp.h>
12#include <linux/if_ether.h>
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +053013#include <linux/ip.h>
Ciara Loftus67ed3752020-10-02 13:36:12 +000014#include <linux/limits.h>
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +053015#include <linux/udp.h>
16#include <arpa/inet.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +010017#include <locale.h>
18#include <net/ethernet.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020019#include <net/if.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +010020#include <poll.h>
21#include <pthread.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020022#include <signal.h>
23#include <stdbool.h>
24#include <stdio.h>
25#include <stdlib.h>
26#include <string.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +010027#include <sys/mman.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020028#include <sys/resource.h>
29#include <sys/socket.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +010030#include <sys/types.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020031#include <time.h>
32#include <unistd.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020033
Toke Høiland-Jørgensen7cf245a2020-01-20 14:06:49 +010034#include <bpf/libbpf.h>
35#include <bpf/xsk.h>
Jakub Kicinski2bf3e2e2018-05-14 22:35:02 -070036#include <bpf/bpf.h>
Toke Høiland-Jørgensen7cf245a2020-01-20 14:06:49 +010037#include "xdpsock.h"
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020038
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020039#ifndef SOL_XDP
40#define SOL_XDP 283
41#endif
42
43#ifndef AF_XDP
44#define AF_XDP 44
45#endif
46
47#ifndef PF_XDP
48#define PF_XDP AF_XDP
49#endif
50
Magnus Karlsson248c7f92019-02-21 10:21:27 +010051#define NUM_FRAMES (4 * 1024)
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +053052#define MIN_PKT_SIZE 64
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020053
54#define DEBUG_HEXDUMP 0
55
Björn Töpela412ef52018-06-04 13:57:14 +020056typedef __u64 u64;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020057typedef __u32 u32;
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +053058typedef __u16 u16;
59typedef __u8 u8;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020060
61static unsigned long prev_time;
62
63enum benchmark_type {
64 BENCH_RXDROP = 0,
65 BENCH_TXONLY = 1,
66 BENCH_L2FWD = 2,
67};
68
69static enum benchmark_type opt_bench = BENCH_RXDROP;
Maciej Fijalkowski743e5682019-02-01 22:42:28 +010070static u32 opt_xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020071static const char *opt_if = "";
72static int opt_ifindex;
73static int opt_queue;
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +053074static unsigned long opt_duration;
75static unsigned long start_time;
76static bool benchmark_done;
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +053077static u32 opt_batch_size = 64;
Jay Jayatheerthanece6e962019-12-20 14:25:28 +053078static int opt_pkt_count;
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +053079static u16 opt_pkt_size = MIN_PKT_SIZE;
Jay Jayatheerthan46e32682019-12-20 14:25:30 +053080static u32 opt_pkt_fill_pattern = 0x12345678;
Ciara Loftusb36c3202020-07-08 07:28:34 +000081static bool opt_extra_stats;
Magnus Karlsson74e00672020-09-10 10:31:06 +020082static bool opt_quiet;
Ciara Loftus60dc6092020-10-02 13:36:11 +000083static bool opt_app_stats;
Ciara Loftus67ed3752020-10-02 13:36:12 +000084static const char *opt_irq_str = "";
85static u32 irq_no;
86static int irqs_at_init = -1;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020087static int opt_poll;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020088static int opt_interval = 1;
Magnus Karlsson46738f72019-08-14 09:27:21 +020089static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP;
Kevin Laatzc543f542019-08-27 02:25:28 +000090static u32 opt_umem_flags;
91static int opt_unaligned_chunks;
Kevin Laatz3945b372019-08-27 02:25:30 +000092static int opt_mmap_flags;
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +030093static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
Magnus Karlsson46738f72019-08-14 09:27:21 +020094static int opt_timeout = 1000;
95static bool opt_need_wakeup = true;
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +010096static u32 opt_num_xsks = 1;
97static u32 prog_id;
Björn Töpelb35fc142020-11-30 19:52:04 +010098static bool opt_busy_poll;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020099
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000100struct xsk_ring_stats {
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200101 unsigned long rx_npkts;
102 unsigned long tx_npkts;
Ciara Loftusb36c3202020-07-08 07:28:34 +0000103 unsigned long rx_dropped_npkts;
104 unsigned long rx_invalid_npkts;
105 unsigned long tx_invalid_npkts;
106 unsigned long rx_full_npkts;
107 unsigned long rx_fill_empty_npkts;
108 unsigned long tx_empty_npkts;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200109 unsigned long prev_rx_npkts;
110 unsigned long prev_tx_npkts;
Ciara Loftusb36c3202020-07-08 07:28:34 +0000111 unsigned long prev_rx_dropped_npkts;
112 unsigned long prev_rx_invalid_npkts;
113 unsigned long prev_tx_invalid_npkts;
114 unsigned long prev_rx_full_npkts;
115 unsigned long prev_rx_fill_empty_npkts;
116 unsigned long prev_tx_empty_npkts;
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000117};
118
Ciara Loftus67ed3752020-10-02 13:36:12 +0000119struct xsk_driver_stats {
120 unsigned long intrs;
121 unsigned long prev_intrs;
122};
123
Ciara Loftus60dc6092020-10-02 13:36:11 +0000124struct xsk_app_stats {
125 unsigned long rx_empty_polls;
126 unsigned long fill_fail_polls;
127 unsigned long copy_tx_sendtos;
128 unsigned long tx_wakeup_sendtos;
129 unsigned long opt_polls;
130 unsigned long prev_rx_empty_polls;
131 unsigned long prev_fill_fail_polls;
132 unsigned long prev_copy_tx_sendtos;
133 unsigned long prev_tx_wakeup_sendtos;
134 unsigned long prev_opt_polls;
135};
136
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000137struct xsk_umem_info {
138 struct xsk_ring_prod fq;
139 struct xsk_ring_cons cq;
140 struct xsk_umem *umem;
141 void *buffer;
142};
143
144struct xsk_socket_info {
145 struct xsk_ring_cons rx;
146 struct xsk_ring_prod tx;
147 struct xsk_umem_info *umem;
148 struct xsk_socket *xsk;
149 struct xsk_ring_stats ring_stats;
Ciara Loftus60dc6092020-10-02 13:36:11 +0000150 struct xsk_app_stats app_stats;
Ciara Loftus67ed3752020-10-02 13:36:12 +0000151 struct xsk_driver_stats drv_stats;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100152 u32 outstanding_tx;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200153};
154
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200155static int num_socks;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100156struct xsk_socket_info *xsks[MAX_SOCKS];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200157
158static unsigned long get_nsecs(void)
159{
160 struct timespec ts;
161
162 clock_gettime(CLOCK_MONOTONIC, &ts);
163 return ts.tv_sec * 1000000000UL + ts.tv_nsec;
164}
165
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200166static void print_benchmark(bool running)
167{
168 const char *bench_str = "INVALID";
169
170 if (opt_bench == BENCH_RXDROP)
171 bench_str = "rxdrop";
172 else if (opt_bench == BENCH_TXONLY)
173 bench_str = "txonly";
174 else if (opt_bench == BENCH_L2FWD)
175 bench_str = "l2fwd";
176
177 printf("%s:%d %s ", opt_if, opt_queue, bench_str);
178 if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
179 printf("xdp-skb ");
180 else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
181 printf("xdp-drv ");
182 else
183 printf(" ");
184
185 if (opt_poll)
186 printf("poll() ");
187
188 if (running) {
189 printf("running...");
190 fflush(stdout);
191 }
192}
193
Ciara Loftusb36c3202020-07-08 07:28:34 +0000194static int xsk_get_xdp_stats(int fd, struct xsk_socket_info *xsk)
195{
196 struct xdp_statistics stats;
197 socklen_t optlen;
198 int err;
199
200 optlen = sizeof(stats);
201 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
202 if (err)
203 return err;
204
205 if (optlen == sizeof(struct xdp_statistics)) {
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000206 xsk->ring_stats.rx_dropped_npkts = stats.rx_dropped;
207 xsk->ring_stats.rx_invalid_npkts = stats.rx_invalid_descs;
208 xsk->ring_stats.tx_invalid_npkts = stats.tx_invalid_descs;
209 xsk->ring_stats.rx_full_npkts = stats.rx_ring_full;
210 xsk->ring_stats.rx_fill_empty_npkts = stats.rx_fill_ring_empty_descs;
211 xsk->ring_stats.tx_empty_npkts = stats.tx_ring_empty_descs;
Ciara Loftusb36c3202020-07-08 07:28:34 +0000212 return 0;
213 }
214
215 return -EINVAL;
216}
217
Ciara Loftus60dc6092020-10-02 13:36:11 +0000218static void dump_app_stats(long dt)
219{
220 int i;
221
222 for (i = 0; i < num_socks && xsks[i]; i++) {
223 char *fmt = "%-18s %'-14.0f %'-14lu\n";
224 double rx_empty_polls_ps, fill_fail_polls_ps, copy_tx_sendtos_ps,
225 tx_wakeup_sendtos_ps, opt_polls_ps;
226
227 rx_empty_polls_ps = (xsks[i]->app_stats.rx_empty_polls -
228 xsks[i]->app_stats.prev_rx_empty_polls) * 1000000000. / dt;
229 fill_fail_polls_ps = (xsks[i]->app_stats.fill_fail_polls -
230 xsks[i]->app_stats.prev_fill_fail_polls) * 1000000000. / dt;
231 copy_tx_sendtos_ps = (xsks[i]->app_stats.copy_tx_sendtos -
232 xsks[i]->app_stats.prev_copy_tx_sendtos) * 1000000000. / dt;
233 tx_wakeup_sendtos_ps = (xsks[i]->app_stats.tx_wakeup_sendtos -
234 xsks[i]->app_stats.prev_tx_wakeup_sendtos)
235 * 1000000000. / dt;
236 opt_polls_ps = (xsks[i]->app_stats.opt_polls -
237 xsks[i]->app_stats.prev_opt_polls) * 1000000000. / dt;
238
239 printf("\n%-18s %-14s %-14s\n", "", "calls/s", "count");
240 printf(fmt, "rx empty polls", rx_empty_polls_ps, xsks[i]->app_stats.rx_empty_polls);
241 printf(fmt, "fill fail polls", fill_fail_polls_ps,
242 xsks[i]->app_stats.fill_fail_polls);
243 printf(fmt, "copy tx sendtos", copy_tx_sendtos_ps,
244 xsks[i]->app_stats.copy_tx_sendtos);
245 printf(fmt, "tx wakeup sendtos", tx_wakeup_sendtos_ps,
246 xsks[i]->app_stats.tx_wakeup_sendtos);
247 printf(fmt, "opt polls", opt_polls_ps, xsks[i]->app_stats.opt_polls);
248
249 xsks[i]->app_stats.prev_rx_empty_polls = xsks[i]->app_stats.rx_empty_polls;
250 xsks[i]->app_stats.prev_fill_fail_polls = xsks[i]->app_stats.fill_fail_polls;
251 xsks[i]->app_stats.prev_copy_tx_sendtos = xsks[i]->app_stats.copy_tx_sendtos;
252 xsks[i]->app_stats.prev_tx_wakeup_sendtos = xsks[i]->app_stats.tx_wakeup_sendtos;
253 xsks[i]->app_stats.prev_opt_polls = xsks[i]->app_stats.opt_polls;
254 }
255}
256
Ciara Loftus67ed3752020-10-02 13:36:12 +0000257static bool get_interrupt_number(void)
258{
259 FILE *f_int_proc;
260 char line[4096];
261 bool found = false;
262
263 f_int_proc = fopen("/proc/interrupts", "r");
264 if (f_int_proc == NULL) {
265 printf("Failed to open /proc/interrupts.\n");
266 return found;
267 }
268
269 while (!feof(f_int_proc) && !found) {
270 /* Make sure to read a full line at a time */
271 if (fgets(line, sizeof(line), f_int_proc) == NULL ||
272 line[strlen(line) - 1] != '\n') {
273 printf("Error reading from interrupts file\n");
274 break;
275 }
276
277 /* Extract interrupt number from line */
278 if (strstr(line, opt_irq_str) != NULL) {
279 irq_no = atoi(line);
280 found = true;
281 break;
282 }
283 }
284
285 fclose(f_int_proc);
286
287 return found;
288}
289
290static int get_irqs(void)
291{
292 char count_path[PATH_MAX];
293 int total_intrs = -1;
294 FILE *f_count_proc;
295 char line[4096];
296
297 snprintf(count_path, sizeof(count_path),
298 "/sys/kernel/irq/%i/per_cpu_count", irq_no);
299 f_count_proc = fopen(count_path, "r");
300 if (f_count_proc == NULL) {
301 printf("Failed to open %s\n", count_path);
302 return total_intrs;
303 }
304
305 if (fgets(line, sizeof(line), f_count_proc) == NULL ||
306 line[strlen(line) - 1] != '\n') {
307 printf("Error reading from %s\n", count_path);
308 } else {
309 static const char com[2] = ",";
310 char *token;
311
312 total_intrs = 0;
313 token = strtok(line, com);
314 while (token != NULL) {
315 /* sum up interrupts across all cores */
316 total_intrs += atoi(token);
317 token = strtok(NULL, com);
318 }
319 }
320
321 fclose(f_count_proc);
322
323 return total_intrs;
324}
325
326static void dump_driver_stats(long dt)
327{
328 int i;
329
330 for (i = 0; i < num_socks && xsks[i]; i++) {
331 char *fmt = "%-18s %'-14.0f %'-14lu\n";
332 double intrs_ps;
333 int n_ints = get_irqs();
334
335 if (n_ints < 0) {
336 printf("error getting intr info for intr %i\n", irq_no);
337 return;
338 }
339 xsks[i]->drv_stats.intrs = n_ints - irqs_at_init;
340
341 intrs_ps = (xsks[i]->drv_stats.intrs - xsks[i]->drv_stats.prev_intrs) *
342 1000000000. / dt;
343
344 printf("\n%-18s %-14s %-14s\n", "", "intrs/s", "count");
345 printf(fmt, "irqs", intrs_ps, xsks[i]->drv_stats.intrs);
346
347 xsks[i]->drv_stats.prev_intrs = xsks[i]->drv_stats.intrs;
348 }
349}
350
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200351static void dump_stats(void)
352{
353 unsigned long now = get_nsecs();
354 long dt = now - prev_time;
355 int i;
356
357 prev_time = now;
358
Prashant Bhole11c3f512018-08-31 10:00:49 +0900359 for (i = 0; i < num_socks && xsks[i]; i++) {
Ciara Loftus60dc6092020-10-02 13:36:11 +0000360 char *fmt = "%-18s %'-14.0f %'-14lu\n";
Ciara Loftusb36c3202020-07-08 07:28:34 +0000361 double rx_pps, tx_pps, dropped_pps, rx_invalid_pps, full_pps, fill_empty_pps,
362 tx_invalid_pps, tx_empty_pps;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200363
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000364 rx_pps = (xsks[i]->ring_stats.rx_npkts - xsks[i]->ring_stats.prev_rx_npkts) *
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200365 1000000000. / dt;
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000366 tx_pps = (xsks[i]->ring_stats.tx_npkts - xsks[i]->ring_stats.prev_tx_npkts) *
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200367 1000000000. / dt;
368
369 printf("\n sock%d@", i);
370 print_benchmark(false);
371 printf("\n");
372
Ciara Loftus60dc6092020-10-02 13:36:11 +0000373 printf("%-18s %-14s %-14s %-14.2f\n", "", "pps", "pkts",
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200374 dt / 1000000000.);
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000375 printf(fmt, "rx", rx_pps, xsks[i]->ring_stats.rx_npkts);
376 printf(fmt, "tx", tx_pps, xsks[i]->ring_stats.tx_npkts);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200377
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000378 xsks[i]->ring_stats.prev_rx_npkts = xsks[i]->ring_stats.rx_npkts;
379 xsks[i]->ring_stats.prev_tx_npkts = xsks[i]->ring_stats.tx_npkts;
Ciara Loftusb36c3202020-07-08 07:28:34 +0000380
381 if (opt_extra_stats) {
382 if (!xsk_get_xdp_stats(xsk_socket__fd(xsks[i]->xsk), xsks[i])) {
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000383 dropped_pps = (xsks[i]->ring_stats.rx_dropped_npkts -
384 xsks[i]->ring_stats.prev_rx_dropped_npkts) *
385 1000000000. / dt;
386 rx_invalid_pps = (xsks[i]->ring_stats.rx_invalid_npkts -
387 xsks[i]->ring_stats.prev_rx_invalid_npkts) *
388 1000000000. / dt;
389 tx_invalid_pps = (xsks[i]->ring_stats.tx_invalid_npkts -
390 xsks[i]->ring_stats.prev_tx_invalid_npkts) *
391 1000000000. / dt;
392 full_pps = (xsks[i]->ring_stats.rx_full_npkts -
393 xsks[i]->ring_stats.prev_rx_full_npkts) *
394 1000000000. / dt;
395 fill_empty_pps = (xsks[i]->ring_stats.rx_fill_empty_npkts -
396 xsks[i]->ring_stats.prev_rx_fill_empty_npkts) *
397 1000000000. / dt;
398 tx_empty_pps = (xsks[i]->ring_stats.tx_empty_npkts -
399 xsks[i]->ring_stats.prev_tx_empty_npkts) *
400 1000000000. / dt;
Ciara Loftusb36c3202020-07-08 07:28:34 +0000401
402 printf(fmt, "rx dropped", dropped_pps,
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000403 xsks[i]->ring_stats.rx_dropped_npkts);
Ciara Loftusb36c3202020-07-08 07:28:34 +0000404 printf(fmt, "rx invalid", rx_invalid_pps,
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000405 xsks[i]->ring_stats.rx_invalid_npkts);
Ciara Loftusb36c3202020-07-08 07:28:34 +0000406 printf(fmt, "tx invalid", tx_invalid_pps,
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000407 xsks[i]->ring_stats.tx_invalid_npkts);
Ciara Loftusb36c3202020-07-08 07:28:34 +0000408 printf(fmt, "rx queue full", full_pps,
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000409 xsks[i]->ring_stats.rx_full_npkts);
Ciara Loftusb36c3202020-07-08 07:28:34 +0000410 printf(fmt, "fill ring empty", fill_empty_pps,
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000411 xsks[i]->ring_stats.rx_fill_empty_npkts);
Ciara Loftusb36c3202020-07-08 07:28:34 +0000412 printf(fmt, "tx ring empty", tx_empty_pps,
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000413 xsks[i]->ring_stats.tx_empty_npkts);
Ciara Loftusb36c3202020-07-08 07:28:34 +0000414
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000415 xsks[i]->ring_stats.prev_rx_dropped_npkts =
416 xsks[i]->ring_stats.rx_dropped_npkts;
417 xsks[i]->ring_stats.prev_rx_invalid_npkts =
418 xsks[i]->ring_stats.rx_invalid_npkts;
419 xsks[i]->ring_stats.prev_tx_invalid_npkts =
420 xsks[i]->ring_stats.tx_invalid_npkts;
421 xsks[i]->ring_stats.prev_rx_full_npkts =
422 xsks[i]->ring_stats.rx_full_npkts;
423 xsks[i]->ring_stats.prev_rx_fill_empty_npkts =
424 xsks[i]->ring_stats.rx_fill_empty_npkts;
425 xsks[i]->ring_stats.prev_tx_empty_npkts =
426 xsks[i]->ring_stats.tx_empty_npkts;
Ciara Loftusb36c3202020-07-08 07:28:34 +0000427 } else {
428 printf("%-15s\n", "Error retrieving extra stats");
429 }
430 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200431 }
Ciara Loftus60dc6092020-10-02 13:36:11 +0000432
433 if (opt_app_stats)
434 dump_app_stats(dt);
Ciara Loftus67ed3752020-10-02 13:36:12 +0000435 if (irq_no)
436 dump_driver_stats(dt);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200437}
438
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +0530439static bool is_benchmark_done(void)
440{
441 if (opt_duration > 0) {
442 unsigned long dt = (get_nsecs() - start_time);
443
444 if (dt >= opt_duration)
445 benchmark_done = true;
446 }
447 return benchmark_done;
448}
449
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200450static void *poller(void *arg)
451{
452 (void)arg;
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +0530453 while (!is_benchmark_done()) {
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200454 sleep(opt_interval);
455 dump_stats();
456 }
457
458 return NULL;
459}
460
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100461static void remove_xdp_program(void)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200462{
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100463 u32 curr_prog_id = 0;
Maciej Fijalkowski3b7a8ec2019-02-01 22:42:30 +0100464
Maciej Fijalkowski3b7a8ec2019-02-01 22:42:30 +0100465 if (bpf_get_link_xdp_id(opt_ifindex, &curr_prog_id, opt_xdp_flags)) {
466 printf("bpf_get_link_xdp_id failed\n");
467 exit(EXIT_FAILURE);
468 }
469 if (prog_id == curr_prog_id)
470 bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
471 else if (!curr_prog_id)
472 printf("couldn't find a prog id on a given interface\n");
473 else
474 printf("program on interface changed, not removing\n");
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100475}
476
477static void int_exit(int sig)
478{
Jay Jayatheerthan69525582019-12-20 14:25:26 +0530479 benchmark_done = true;
480}
481
482static void xdpsock_cleanup(void)
483{
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100484 struct xsk_umem *umem = xsks[0]->umem->umem;
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100485 int i;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100486
487 dump_stats();
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100488 for (i = 0; i < num_socks; i++)
489 xsk_socket__delete(xsks[i]->xsk);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100490 (void)xsk_umem__delete(umem);
491 remove_xdp_program();
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200492}
493
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100494static void __exit_with_error(int error, const char *file, const char *func,
495 int line)
496{
497 fprintf(stderr, "%s:%s:%i: errno: %d/\"%s\"\n", file, func,
498 line, error, strerror(error));
499 dump_stats();
500 remove_xdp_program();
501 exit(EXIT_FAILURE);
502}
503
504#define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, \
505 __LINE__)
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100506static void swap_mac_addresses(void *data)
507{
508 struct ether_header *eth = (struct ether_header *)data;
509 struct ether_addr *src_addr = (struct ether_addr *)&eth->ether_shost;
510 struct ether_addr *dst_addr = (struct ether_addr *)&eth->ether_dhost;
511 struct ether_addr tmp;
512
513 tmp = *src_addr;
514 *src_addr = *dst_addr;
515 *dst_addr = tmp;
516}
517
518static void hex_dump(void *pkt, size_t length, u64 addr)
519{
520 const unsigned char *address = (unsigned char *)pkt;
521 const unsigned char *line = address;
522 size_t line_size = 32;
523 unsigned char c;
524 char buf[32];
525 int i = 0;
526
527 if (!DEBUG_HEXDUMP)
528 return;
529
530 sprintf(buf, "addr=%llu", addr);
531 printf("length = %zu\n", length);
532 printf("%s | ", buf);
533 while (length-- > 0) {
534 printf("%02X ", *address++);
535 if (!(++i % line_size) || (length == 0 && i % line_size)) {
536 if (length == 0) {
537 while (i++ % line_size)
538 printf("__ ");
539 }
540 printf(" | "); /* right close */
541 while (line < address) {
542 c = *line++;
543 printf("%c", (c < 33 || c == 255) ? 0x2E : c);
544 }
545 printf("\n");
546 if (length > 0)
547 printf("%s | ", buf);
548 }
549 }
550 printf("\n");
551}
552
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +0530553static void *memset32_htonl(void *dest, u32 val, u32 size)
554{
555 u32 *ptr = (u32 *)dest;
556 int i;
557
558 val = htonl(val);
559
560 for (i = 0; i < (size & (~0x3)); i += 4)
561 ptr[i >> 2] = val;
562
563 for (; i < size; i++)
564 ((char *)dest)[i] = ((char *)&val)[i & 3];
565
566 return dest;
567}
568
569/*
570 * This function code has been taken from
571 * Linux kernel lib/checksum.c
572 */
573static inline unsigned short from32to16(unsigned int x)
574{
575 /* add up 16-bit and 16-bit for 16+c bit */
576 x = (x & 0xffff) + (x >> 16);
577 /* add up carry.. */
578 x = (x & 0xffff) + (x >> 16);
579 return x;
580}
581
582/*
583 * This function code has been taken from
584 * Linux kernel lib/checksum.c
585 */
586static unsigned int do_csum(const unsigned char *buff, int len)
587{
588 unsigned int result = 0;
589 int odd;
590
591 if (len <= 0)
592 goto out;
593 odd = 1 & (unsigned long)buff;
594 if (odd) {
595#ifdef __LITTLE_ENDIAN
596 result += (*buff << 8);
597#else
598 result = *buff;
599#endif
600 len--;
601 buff++;
602 }
603 if (len >= 2) {
604 if (2 & (unsigned long)buff) {
605 result += *(unsigned short *)buff;
606 len -= 2;
607 buff += 2;
608 }
609 if (len >= 4) {
610 const unsigned char *end = buff +
611 ((unsigned int)len & ~3);
612 unsigned int carry = 0;
613
614 do {
615 unsigned int w = *(unsigned int *)buff;
616
617 buff += 4;
618 result += carry;
619 result += w;
620 carry = (w > result);
621 } while (buff < end);
622 result += carry;
623 result = (result & 0xffff) + (result >> 16);
624 }
625 if (len & 2) {
626 result += *(unsigned short *)buff;
627 buff += 2;
628 }
629 }
630 if (len & 1)
631#ifdef __LITTLE_ENDIAN
632 result += *buff;
633#else
634 result += (*buff << 8);
635#endif
636 result = from32to16(result);
637 if (odd)
638 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
639out:
640 return result;
641}
642
643__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
644
645/*
646 * This is a version of ip_compute_csum() optimized for IP headers,
647 * which always checksum on 4 octet boundaries.
648 * This function code has been taken from
649 * Linux kernel lib/checksum.c
650 */
651__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
652{
653 return (__force __sum16)~do_csum(iph, ihl * 4);
654}
655
656/*
657 * Fold a partial checksum
658 * This function code has been taken from
659 * Linux kernel include/asm-generic/checksum.h
660 */
661static inline __sum16 csum_fold(__wsum csum)
662{
663 u32 sum = (__force u32)csum;
664
665 sum = (sum & 0xffff) + (sum >> 16);
666 sum = (sum & 0xffff) + (sum >> 16);
667 return (__force __sum16)~sum;
668}
669
670/*
671 * This function code has been taken from
672 * Linux kernel lib/checksum.c
673 */
674static inline u32 from64to32(u64 x)
675{
676 /* add up 32-bit and 32-bit for 32+c bit */
677 x = (x & 0xffffffff) + (x >> 32);
678 /* add up carry.. */
679 x = (x & 0xffffffff) + (x >> 32);
680 return (u32)x;
681}
682
683__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
684 __u32 len, __u8 proto, __wsum sum);
685
686/*
687 * This function code has been taken from
688 * Linux kernel lib/checksum.c
689 */
690__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
691 __u32 len, __u8 proto, __wsum sum)
692{
693 unsigned long long s = (__force u32)sum;
694
695 s += (__force u32)saddr;
696 s += (__force u32)daddr;
697#ifdef __BIG_ENDIAN__
698 s += proto + len;
699#else
700 s += (proto + len) << 8;
701#endif
702 return (__force __wsum)from64to32(s);
703}
704
705/*
706 * This function has been taken from
707 * Linux kernel include/asm-generic/checksum.h
708 */
709static inline __sum16
710csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
711 __u8 proto, __wsum sum)
712{
713 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
714}
715
716static inline u16 udp_csum(u32 saddr, u32 daddr, u32 len,
717 u8 proto, u16 *udp_pkt)
718{
719 u32 csum = 0;
720 u32 cnt = 0;
721
722 /* udp hdr and data */
723 for (; cnt < len; cnt += 2)
724 csum += udp_pkt[cnt >> 1];
725
726 return csum_tcpudp_magic(saddr, daddr, len, proto, csum);
727}
728
729#define ETH_FCS_SIZE 4
730
731#define PKT_HDR_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
732 sizeof(struct udphdr))
733
734#define PKT_SIZE (opt_pkt_size - ETH_FCS_SIZE)
735#define IP_PKT_SIZE (PKT_SIZE - sizeof(struct ethhdr))
736#define UDP_PKT_SIZE (IP_PKT_SIZE - sizeof(struct iphdr))
737#define UDP_PKT_DATA_SIZE (UDP_PKT_SIZE - sizeof(struct udphdr))
738
739static u8 pkt_data[XSK_UMEM__DEFAULT_FRAME_SIZE];
740
741static void gen_eth_hdr_data(void)
742{
743 struct udphdr *udp_hdr = (struct udphdr *)(pkt_data +
744 sizeof(struct ethhdr) +
745 sizeof(struct iphdr));
746 struct iphdr *ip_hdr = (struct iphdr *)(pkt_data +
747 sizeof(struct ethhdr));
748 struct ethhdr *eth_hdr = (struct ethhdr *)pkt_data;
749
750 /* ethernet header */
751 memcpy(eth_hdr->h_dest, "\x3c\xfd\xfe\x9e\x7f\x71", ETH_ALEN);
752 memcpy(eth_hdr->h_source, "\xec\xb1\xd7\x98\x3a\xc0", ETH_ALEN);
753 eth_hdr->h_proto = htons(ETH_P_IP);
754
755 /* IP header */
756 ip_hdr->version = IPVERSION;
757 ip_hdr->ihl = 0x5; /* 20 byte header */
758 ip_hdr->tos = 0x0;
759 ip_hdr->tot_len = htons(IP_PKT_SIZE);
760 ip_hdr->id = 0;
761 ip_hdr->frag_off = 0;
762 ip_hdr->ttl = IPDEFTTL;
763 ip_hdr->protocol = IPPROTO_UDP;
764 ip_hdr->saddr = htonl(0x0a0a0a10);
765 ip_hdr->daddr = htonl(0x0a0a0a20);
766
767 /* IP header checksum */
768 ip_hdr->check = 0;
769 ip_hdr->check = ip_fast_csum((const void *)ip_hdr, ip_hdr->ihl);
770
771 /* UDP header */
772 udp_hdr->source = htons(0x1000);
773 udp_hdr->dest = htons(0x1000);
774 udp_hdr->len = htons(UDP_PKT_SIZE);
775
776 /* UDP data */
Jay Jayatheerthan46e32682019-12-20 14:25:30 +0530777 memset32_htonl(pkt_data + PKT_HDR_SIZE, opt_pkt_fill_pattern,
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +0530778 UDP_PKT_DATA_SIZE);
779
780 /* UDP header checksum */
781 udp_hdr->check = 0;
782 udp_hdr->check = udp_csum(ip_hdr->saddr, ip_hdr->daddr, UDP_PKT_SIZE,
783 IPPROTO_UDP, (u16 *)udp_hdr);
784}
785
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +0530786static void gen_eth_frame(struct xsk_umem_info *umem, u64 addr)
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100787{
788 memcpy(xsk_umem__get_data(umem->buffer, addr), pkt_data,
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +0530789 PKT_SIZE);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100790}
791
792static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size)
793{
794 struct xsk_umem_info *umem;
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300795 struct xsk_umem_config cfg = {
Magnus Karlssonc8a039a2020-08-28 14:51:05 +0200796 /* We recommend that you set the fill ring size >= HW RX ring size +
797 * AF_XDP RX ring size. Make sure you fill up the fill ring
798 * with buffers at regular intervals, and you will with this setting
799 * avoid allocation failures in the driver. These are usually quite
800 * expensive since drivers have not been written to assume that
801 * allocation failures are common. For regular sockets, kernel
802 * allocated memory is used that only runs out in OOM situations
803 * that should be rare.
804 */
805 .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS * 2,
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300806 .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
807 .frame_size = opt_xsk_frame_size,
808 .frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM,
Kevin Laatzc543f542019-08-27 02:25:28 +0000809 .flags = opt_umem_flags
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300810 };
Magnus Karlsson661842c2019-11-07 18:47:39 +0100811 int ret;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100812
813 umem = calloc(1, sizeof(*umem));
814 if (!umem)
815 exit_with_error(errno);
816
817 ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq,
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300818 &cfg);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100819 if (ret)
820 exit_with_error(-ret);
821
Magnus Karlsson661842c2019-11-07 18:47:39 +0100822 umem->buffer = buffer;
823 return umem;
824}
825
826static void xsk_populate_fill_ring(struct xsk_umem_info *umem)
827{
828 int ret, i;
829 u32 idx;
830
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100831 ret = xsk_ring_prod__reserve(&umem->fq,
Magnus Karlssonc8a039a2020-08-28 14:51:05 +0200832 XSK_RING_PROD__DEFAULT_NUM_DESCS * 2, &idx);
833 if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS * 2)
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100834 exit_with_error(-ret);
Magnus Karlssonc8a039a2020-08-28 14:51:05 +0200835 for (i = 0; i < XSK_RING_PROD__DEFAULT_NUM_DESCS * 2; i++)
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100836 *xsk_ring_prod__fill_addr(&umem->fq, idx++) =
837 i * opt_xsk_frame_size;
Magnus Karlssonc8a039a2020-08-28 14:51:05 +0200838 xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS * 2);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100839}
840
Magnus Karlsson661842c2019-11-07 18:47:39 +0100841static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem,
842 bool rx, bool tx)
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100843{
844 struct xsk_socket_config cfg;
845 struct xsk_socket_info *xsk;
Magnus Karlsson661842c2019-11-07 18:47:39 +0100846 struct xsk_ring_cons *rxr;
847 struct xsk_ring_prod *txr;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100848 int ret;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100849
850 xsk = calloc(1, sizeof(*xsk));
851 if (!xsk)
852 exit_with_error(errno);
853
854 xsk->umem = umem;
855 cfg.rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
856 cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100857 if (opt_num_xsks > 1)
858 cfg.libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;
859 else
860 cfg.libbpf_flags = 0;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100861 cfg.xdp_flags = opt_xdp_flags;
862 cfg.bind_flags = opt_xdp_bind_flags;
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100863
Magnus Karlsson661842c2019-11-07 18:47:39 +0100864 rxr = rx ? &xsk->rx : NULL;
865 txr = tx ? &xsk->tx : NULL;
866 ret = xsk_socket__create(&xsk->xsk, opt_if, opt_queue, umem->umem,
867 rxr, txr, &cfg);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100868 if (ret)
869 exit_with_error(-ret);
870
871 ret = bpf_get_link_xdp_id(opt_ifindex, &prog_id, opt_xdp_flags);
872 if (ret)
873 exit_with_error(-ret);
874
Ciara Loftus60dc6092020-10-02 13:36:11 +0000875 xsk->app_stats.rx_empty_polls = 0;
876 xsk->app_stats.fill_fail_polls = 0;
877 xsk->app_stats.copy_tx_sendtos = 0;
878 xsk->app_stats.tx_wakeup_sendtos = 0;
879 xsk->app_stats.opt_polls = 0;
880 xsk->app_stats.prev_rx_empty_polls = 0;
881 xsk->app_stats.prev_fill_fail_polls = 0;
882 xsk->app_stats.prev_copy_tx_sendtos = 0;
883 xsk->app_stats.prev_tx_wakeup_sendtos = 0;
884 xsk->app_stats.prev_opt_polls = 0;
885
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100886 return xsk;
887}
888
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200889static struct option long_options[] = {
890 {"rxdrop", no_argument, 0, 'r'},
891 {"txonly", no_argument, 0, 't'},
892 {"l2fwd", no_argument, 0, 'l'},
893 {"interface", required_argument, 0, 'i'},
894 {"queue", required_argument, 0, 'q'},
895 {"poll", no_argument, 0, 'p'},
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200896 {"xdp-skb", no_argument, 0, 'S'},
897 {"xdp-native", no_argument, 0, 'N'},
898 {"interval", required_argument, 0, 'n'},
Björn Töpel58c50ae2018-08-28 14:44:35 +0200899 {"zero-copy", no_argument, 0, 'z'},
900 {"copy", no_argument, 0, 'c'},
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300901 {"frame-size", required_argument, 0, 'f'},
Magnus Karlsson46738f72019-08-14 09:27:21 +0200902 {"no-need-wakeup", no_argument, 0, 'm'},
Kevin Laatzc543f542019-08-27 02:25:28 +0000903 {"unaligned", no_argument, 0, 'u'},
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100904 {"shared-umem", no_argument, 0, 'M'},
Andre Guedesb3133322019-11-14 08:28:47 -0800905 {"force", no_argument, 0, 'F'},
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +0530906 {"duration", required_argument, 0, 'd'},
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +0530907 {"batch-size", required_argument, 0, 'b'},
Jay Jayatheerthanece6e962019-12-20 14:25:28 +0530908 {"tx-pkt-count", required_argument, 0, 'C'},
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +0530909 {"tx-pkt-size", required_argument, 0, 's'},
Jay Jayatheerthan46e32682019-12-20 14:25:30 +0530910 {"tx-pkt-pattern", required_argument, 0, 'P'},
Ciara Loftusb36c3202020-07-08 07:28:34 +0000911 {"extra-stats", no_argument, 0, 'x'},
Magnus Karlsson74e00672020-09-10 10:31:06 +0200912 {"quiet", no_argument, 0, 'Q'},
Ciara Loftus60dc6092020-10-02 13:36:11 +0000913 {"app-stats", no_argument, 0, 'a'},
Ciara Loftus67ed3752020-10-02 13:36:12 +0000914 {"irq-string", no_argument, 0, 'I'},
Björn Töpelb35fc142020-11-30 19:52:04 +0100915 {"busy-poll", no_argument, 0, 'B'},
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200916 {0, 0, 0, 0}
917};
918
919static void usage(const char *prog)
920{
921 const char *str =
922 " Usage: %s [OPTIONS]\n"
923 " Options:\n"
924 " -r, --rxdrop Discard all incoming packets (default)\n"
925 " -t, --txonly Only send packets\n"
926 " -l, --l2fwd MAC swap L2 forwarding\n"
927 " -i, --interface=n Run on interface n\n"
928 " -q, --queue=n Use queue n (default 0)\n"
929 " -p, --poll Use poll syscall\n"
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200930 " -S, --xdp-skb=n Use XDP skb-mod\n"
Anton Ivanov4564a8bb2019-10-07 09:26:36 +0100931 " -N, --xdp-native=n Enforce XDP native mode\n"
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200932 " -n, --interval=n Specify statistics update interval (default 1 sec).\n"
Björn Töpel58c50ae2018-08-28 14:44:35 +0200933 " -z, --zero-copy Force zero-copy mode.\n"
934 " -c, --copy Force copy mode.\n"
Magnus Karlsson46738f72019-08-14 09:27:21 +0200935 " -m, --no-need-wakeup Turn off use of driver need wakeup flag.\n"
Kevin Laatzc543f542019-08-27 02:25:28 +0000936 " -f, --frame-size=n Set the frame size (must be a power of two in aligned mode, default is %d).\n"
937 " -u, --unaligned Enable unaligned chunk placement\n"
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100938 " -M, --shared-umem Enable XDP_SHARED_UMEM\n"
Andre Guedesb3133322019-11-14 08:28:47 -0800939 " -F, --force Force loading the XDP prog\n"
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +0530940 " -d, --duration=n Duration in secs to run command.\n"
941 " Default: forever.\n"
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +0530942 " -b, --batch-size=n Batch size for sending or receiving\n"
943 " packets. Default: %d\n"
Jay Jayatheerthanece6e962019-12-20 14:25:28 +0530944 " -C, --tx-pkt-count=n Number of packets to send.\n"
945 " Default: Continuous packets.\n"
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +0530946 " -s, --tx-pkt-size=n Transmit packet size.\n"
947 " (Default: %d bytes)\n"
948 " Min size: %d, Max size %d.\n"
Jay Jayatheerthan46e32682019-12-20 14:25:30 +0530949 " -P, --tx-pkt-pattern=nPacket fill pattern. Default: 0x%x\n"
Ciara Loftusb36c3202020-07-08 07:28:34 +0000950 " -x, --extra-stats Display extra statistics.\n"
Magnus Karlsson74e00672020-09-10 10:31:06 +0200951 " -Q, --quiet Do not display any stats.\n"
Ciara Loftus60dc6092020-10-02 13:36:11 +0000952 " -a, --app-stats Display application (syscall) statistics.\n"
Ciara Loftus67ed3752020-10-02 13:36:12 +0000953 " -I, --irq-string Display driver interrupt statistics for interface associated with irq-string.\n"
Björn Töpelb35fc142020-11-30 19:52:04 +0100954 " -B, --busy-poll Busy poll.\n"
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200955 "\n";
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +0530956 fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE,
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +0530957 opt_batch_size, MIN_PKT_SIZE, MIN_PKT_SIZE,
Jay Jayatheerthan46e32682019-12-20 14:25:30 +0530958 XSK_UMEM__DEFAULT_FRAME_SIZE, opt_pkt_fill_pattern);
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +0530959
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200960 exit(EXIT_FAILURE);
961}
962
963static void parse_command_line(int argc, char **argv)
964{
965 int option_index, c;
966
967 opterr = 0;
968
969 for (;;) {
Björn Töpelb35fc142020-11-30 19:52:04 +0100970 c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:xQaI:B",
Magnus Karlsson46738f72019-08-14 09:27:21 +0200971 long_options, &option_index);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200972 if (c == -1)
973 break;
974
975 switch (c) {
976 case 'r':
977 opt_bench = BENCH_RXDROP;
978 break;
979 case 't':
980 opt_bench = BENCH_TXONLY;
981 break;
982 case 'l':
983 opt_bench = BENCH_L2FWD;
984 break;
985 case 'i':
986 opt_if = optarg;
987 break;
988 case 'q':
989 opt_queue = atoi(optarg);
990 break;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200991 case 'p':
992 opt_poll = 1;
993 break;
994 case 'S':
995 opt_xdp_flags |= XDP_FLAGS_SKB_MODE;
Björn Töpel9f5232c2018-06-04 14:06:01 +0200996 opt_xdp_bind_flags |= XDP_COPY;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200997 break;
998 case 'N':
Toke Høiland-Jørgensend50ecc42019-12-16 12:07:42 +0100999 /* default, set below */
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001000 break;
1001 case 'n':
1002 opt_interval = atoi(optarg);
1003 break;
Björn Töpel58c50ae2018-08-28 14:44:35 +02001004 case 'z':
1005 opt_xdp_bind_flags |= XDP_ZEROCOPY;
1006 break;
1007 case 'c':
1008 opt_xdp_bind_flags |= XDP_COPY;
1009 break;
Kevin Laatzc543f542019-08-27 02:25:28 +00001010 case 'u':
1011 opt_umem_flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
1012 opt_unaligned_chunks = 1;
Kevin Laatz3945b372019-08-27 02:25:30 +00001013 opt_mmap_flags = MAP_HUGETLB;
Kevin Laatzc543f542019-08-27 02:25:28 +00001014 break;
Maciej Fijalkowski743e5682019-02-01 22:42:28 +01001015 case 'F':
1016 opt_xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
1017 break;
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +03001018 case 'f':
1019 opt_xsk_frame_size = atoi(optarg);
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001020 break;
Magnus Karlsson46738f72019-08-14 09:27:21 +02001021 case 'm':
1022 opt_need_wakeup = false;
1023 opt_xdp_bind_flags &= ~XDP_USE_NEED_WAKEUP;
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +03001024 break;
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001025 case 'M':
1026 opt_num_xsks = MAX_SOCKS;
1027 break;
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +05301028 case 'd':
1029 opt_duration = atoi(optarg);
1030 opt_duration *= 1000000000;
1031 break;
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +05301032 case 'b':
1033 opt_batch_size = atoi(optarg);
1034 break;
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301035 case 'C':
1036 opt_pkt_count = atoi(optarg);
1037 break;
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +05301038 case 's':
1039 opt_pkt_size = atoi(optarg);
1040 if (opt_pkt_size > (XSK_UMEM__DEFAULT_FRAME_SIZE) ||
1041 opt_pkt_size < MIN_PKT_SIZE) {
1042 fprintf(stderr,
1043 "ERROR: Invalid frame size %d\n",
1044 opt_pkt_size);
1045 usage(basename(argv[0]));
1046 }
1047 break;
Jay Jayatheerthan46e32682019-12-20 14:25:30 +05301048 case 'P':
1049 opt_pkt_fill_pattern = strtol(optarg, NULL, 16);
1050 break;
Ciara Loftusb36c3202020-07-08 07:28:34 +00001051 case 'x':
1052 opt_extra_stats = 1;
1053 break;
Magnus Karlsson74e00672020-09-10 10:31:06 +02001054 case 'Q':
1055 opt_quiet = 1;
1056 break;
Ciara Loftus60dc6092020-10-02 13:36:11 +00001057 case 'a':
1058 opt_app_stats = 1;
1059 break;
Ciara Loftus67ed3752020-10-02 13:36:12 +00001060 case 'I':
1061 opt_irq_str = optarg;
1062 if (get_interrupt_number())
1063 irqs_at_init = get_irqs();
1064 if (irqs_at_init < 0) {
1065 fprintf(stderr, "ERROR: Failed to get irqs for %s\n", opt_irq_str);
1066 usage(basename(argv[0]));
1067 }
Björn Töpelb35fc142020-11-30 19:52:04 +01001068 break;
1069 case 'B':
1070 opt_busy_poll = 1;
Ciara Loftus67ed3752020-10-02 13:36:12 +00001071 break;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001072 default:
1073 usage(basename(argv[0]));
1074 }
1075 }
1076
Toke Høiland-Jørgensend50ecc42019-12-16 12:07:42 +01001077 if (!(opt_xdp_flags & XDP_FLAGS_SKB_MODE))
1078 opt_xdp_flags |= XDP_FLAGS_DRV_MODE;
1079
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001080 opt_ifindex = if_nametoindex(opt_if);
1081 if (!opt_ifindex) {
1082 fprintf(stderr, "ERROR: interface \"%s\" does not exist\n",
1083 opt_if);
1084 usage(basename(argv[0]));
1085 }
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001086
Kevin Laatzc543f542019-08-27 02:25:28 +00001087 if ((opt_xsk_frame_size & (opt_xsk_frame_size - 1)) &&
1088 !opt_unaligned_chunks) {
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +03001089 fprintf(stderr, "--frame-size=%d is not a power of two\n",
1090 opt_xsk_frame_size);
1091 usage(basename(argv[0]));
1092 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001093}
1094
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001095static void kick_tx(struct xsk_socket_info *xsk)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001096{
1097 int ret;
1098
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001099 ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
Maciej Fijalkowski8ed47e12020-02-05 05:58:34 +01001100 if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN ||
1101 errno == EBUSY || errno == ENETDOWN)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001102 return;
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001103 exit_with_error(errno);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001104}
1105
Björn Töpel284cbc62020-11-30 19:52:03 +01001106static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001107{
Kevin Laatz03895e62019-08-27 02:25:29 +00001108 struct xsk_umem_info *umem = xsk->umem;
Yonghong Songb74e21a2019-02-28 22:19:41 -08001109 u32 idx_cq = 0, idx_fq = 0;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001110 unsigned int rcvd;
1111 size_t ndescs;
1112
1113 if (!xsk->outstanding_tx)
1114 return;
1115
Magnus Karlsson3131cf62020-09-10 10:31:04 +02001116 /* In copy mode, Tx is driven by a syscall so we need to use e.g. sendto() to
1117 * really send the packets. In zero-copy mode we do not have to do this, since Tx
1118 * is driven by the NAPI loop. So as an optimization, we do not have to call
1119 * sendto() all the time in zero-copy mode for l2fwd.
1120 */
Ciara Loftus60dc6092020-10-02 13:36:11 +00001121 if (opt_xdp_bind_flags & XDP_COPY) {
1122 xsk->app_stats.copy_tx_sendtos++;
Magnus Karlsson3131cf62020-09-10 10:31:04 +02001123 kick_tx(xsk);
Ciara Loftus60dc6092020-10-02 13:36:11 +00001124 }
Magnus Karlsson3131cf62020-09-10 10:31:04 +02001125
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +05301126 ndescs = (xsk->outstanding_tx > opt_batch_size) ? opt_batch_size :
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001127 xsk->outstanding_tx;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001128
1129 /* re-add completed Tx buffers */
Kevin Laatz03895e62019-08-27 02:25:29 +00001130 rcvd = xsk_ring_cons__peek(&umem->cq, ndescs, &idx_cq);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001131 if (rcvd > 0) {
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001132 unsigned int i;
1133 int ret;
1134
Kevin Laatz03895e62019-08-27 02:25:29 +00001135 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001136 while (ret != rcvd) {
1137 if (ret < 0)
1138 exit_with_error(-ret);
Björn Töpelb35fc142020-11-30 19:52:04 +01001139 if (opt_busy_poll || xsk_ring_prod__needs_wakeup(&umem->fq)) {
Ciara Loftus60dc6092020-10-02 13:36:11 +00001140 xsk->app_stats.fill_fail_polls++;
Björn Töpel284cbc62020-11-30 19:52:03 +01001141 recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL,
1142 NULL);
Ciara Loftus60dc6092020-10-02 13:36:11 +00001143 }
Kevin Laatz03895e62019-08-27 02:25:29 +00001144 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001145 }
Kevin Laatz03895e62019-08-27 02:25:29 +00001146
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001147 for (i = 0; i < rcvd; i++)
Kevin Laatz03895e62019-08-27 02:25:29 +00001148 *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) =
1149 *xsk_ring_cons__comp_addr(&umem->cq, idx_cq++);
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001150
1151 xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
1152 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001153 xsk->outstanding_tx -= rcvd;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001154 }
1155}
1156
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301157static inline void complete_tx_only(struct xsk_socket_info *xsk,
1158 int batch_size)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001159{
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001160 unsigned int rcvd;
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001161 u32 idx;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001162
1163 if (!xsk->outstanding_tx)
1164 return;
1165
Ciara Loftus60dc6092020-10-02 13:36:11 +00001166 if (!opt_need_wakeup || xsk_ring_prod__needs_wakeup(&xsk->tx)) {
1167 xsk->app_stats.tx_wakeup_sendtos++;
Magnus Karlsson46738f72019-08-14 09:27:21 +02001168 kick_tx(xsk);
Ciara Loftus60dc6092020-10-02 13:36:11 +00001169 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001170
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301171 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001172 if (rcvd > 0) {
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001173 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001174 xsk->outstanding_tx -= rcvd;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001175 }
1176}
1177
Björn Töpelf2d27282020-11-30 19:52:02 +01001178static void rx_drop(struct xsk_socket_info *xsk)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001179{
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001180 unsigned int rcvd, i;
Yonghong Songb74e21a2019-02-28 22:19:41 -08001181 u32 idx_rx = 0, idx_fq = 0;
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001182 int ret;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001183
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +05301184 rcvd = xsk_ring_cons__peek(&xsk->rx, opt_batch_size, &idx_rx);
Magnus Karlsson46738f72019-08-14 09:27:21 +02001185 if (!rcvd) {
Björn Töpelb35fc142020-11-30 19:52:04 +01001186 if (opt_busy_poll || xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
Ciara Loftus60dc6092020-10-02 13:36:11 +00001187 xsk->app_stats.rx_empty_polls++;
Björn Töpelf2d27282020-11-30 19:52:02 +01001188 recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
Ciara Loftus60dc6092020-10-02 13:36:11 +00001189 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001190 return;
Magnus Karlsson46738f72019-08-14 09:27:21 +02001191 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001192
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001193 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
1194 while (ret != rcvd) {
1195 if (ret < 0)
1196 exit_with_error(-ret);
Björn Töpelb35fc142020-11-30 19:52:04 +01001197 if (opt_busy_poll || xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
Ciara Loftus60dc6092020-10-02 13:36:11 +00001198 xsk->app_stats.fill_fail_polls++;
Björn Töpelf2d27282020-11-30 19:52:02 +01001199 recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
Ciara Loftus60dc6092020-10-02 13:36:11 +00001200 }
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001201 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001202 }
1203
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001204 for (i = 0; i < rcvd; i++) {
1205 u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr;
1206 u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len;
Kevin Laatz03895e62019-08-27 02:25:29 +00001207 u64 orig = xsk_umem__extract_addr(addr);
1208
1209 addr = xsk_umem__add_offset_to_addr(addr);
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001210 char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001211
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001212 hex_dump(pkt, len, addr);
Kevin Laatz03895e62019-08-27 02:25:29 +00001213 *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig;
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001214 }
1215
1216 xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
1217 xsk_ring_cons__release(&xsk->rx, rcvd);
Ciara Loftus2e8806f2020-10-02 13:36:10 +00001218 xsk->ring_stats.rx_npkts += rcvd;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001219}
1220
1221static void rx_drop_all(void)
1222{
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001223 struct pollfd fds[MAX_SOCKS] = {};
Magnus Karlsson46738f72019-08-14 09:27:21 +02001224 int i, ret;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001225
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001226 for (i = 0; i < num_socks; i++) {
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001227 fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001228 fds[i].events = POLLIN;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001229 }
1230
1231 for (;;) {
1232 if (opt_poll) {
Ciara Loftus60dc6092020-10-02 13:36:11 +00001233 for (i = 0; i < num_socks; i++)
1234 xsks[i]->app_stats.opt_polls++;
Magnus Karlsson46738f72019-08-14 09:27:21 +02001235 ret = poll(fds, num_socks, opt_timeout);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001236 if (ret <= 0)
1237 continue;
1238 }
1239
1240 for (i = 0; i < num_socks; i++)
Björn Töpelf2d27282020-11-30 19:52:02 +01001241 rx_drop(xsks[i]);
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +05301242
1243 if (benchmark_done)
1244 break;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001245 }
1246}
1247
Weqaar Janjuab69e56c2020-08-29 00:17:17 +08001248static void tx_only(struct xsk_socket_info *xsk, u32 *frame_nb, int batch_size)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001249{
Magnus Karlsson46738f72019-08-14 09:27:21 +02001250 u32 idx;
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +05301251 unsigned int i;
Magnus Karlsson46738f72019-08-14 09:27:21 +02001252
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301253 while (xsk_ring_prod__reserve(&xsk->tx, batch_size, &idx) <
1254 batch_size) {
1255 complete_tx_only(xsk, batch_size);
Magnus Karlsson46738f72019-08-14 09:27:21 +02001256 }
1257
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301258 for (i = 0; i < batch_size; i++) {
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +05301259 struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx,
1260 idx + i);
Weqaar Janjuab69e56c2020-08-29 00:17:17 +08001261 tx_desc->addr = (*frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +05301262 tx_desc->len = PKT_SIZE;
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +05301263 }
1264
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301265 xsk_ring_prod__submit(&xsk->tx, batch_size);
Magnus Karlsson90da4b32020-11-16 12:12:43 +01001266 xsk->ring_stats.tx_npkts += batch_size;
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301267 xsk->outstanding_tx += batch_size;
Weqaar Janjuab69e56c2020-08-29 00:17:17 +08001268 *frame_nb += batch_size;
1269 *frame_nb %= NUM_FRAMES;
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301270 complete_tx_only(xsk, batch_size);
1271}
1272
1273static inline int get_batch_size(int pkt_cnt)
1274{
1275 if (!opt_pkt_count)
1276 return opt_batch_size;
1277
1278 if (pkt_cnt + opt_batch_size <= opt_pkt_count)
1279 return opt_batch_size;
1280
1281 return opt_pkt_count - pkt_cnt;
1282}
1283
1284static void complete_tx_only_all(void)
1285{
1286 bool pending;
1287 int i;
1288
1289 do {
1290 pending = false;
1291 for (i = 0; i < num_socks; i++) {
1292 if (xsks[i]->outstanding_tx) {
1293 complete_tx_only(xsks[i], opt_batch_size);
1294 pending = !!xsks[i]->outstanding_tx;
1295 }
1296 }
1297 } while (pending);
Magnus Karlsson46738f72019-08-14 09:27:21 +02001298}
1299
1300static void tx_only_all(void)
1301{
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001302 struct pollfd fds[MAX_SOCKS] = {};
Magnus Karlsson46738f72019-08-14 09:27:21 +02001303 u32 frame_nb[MAX_SOCKS] = {};
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301304 int pkt_cnt = 0;
Magnus Karlsson46738f72019-08-14 09:27:21 +02001305 int i, ret;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001306
Magnus Karlsson46738f72019-08-14 09:27:21 +02001307 for (i = 0; i < num_socks; i++) {
1308 fds[0].fd = xsk_socket__fd(xsks[i]->xsk);
1309 fds[0].events = POLLOUT;
1310 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001311
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301312 while ((opt_pkt_count && pkt_cnt < opt_pkt_count) || !opt_pkt_count) {
1313 int batch_size = get_batch_size(pkt_cnt);
1314
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001315 if (opt_poll) {
Ciara Loftus60dc6092020-10-02 13:36:11 +00001316 for (i = 0; i < num_socks; i++)
1317 xsks[i]->app_stats.opt_polls++;
Magnus Karlsson46738f72019-08-14 09:27:21 +02001318 ret = poll(fds, num_socks, opt_timeout);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001319 if (ret <= 0)
1320 continue;
1321
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001322 if (!(fds[0].revents & POLLOUT))
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001323 continue;
1324 }
1325
Magnus Karlsson46738f72019-08-14 09:27:21 +02001326 for (i = 0; i < num_socks; i++)
Weqaar Janjuab69e56c2020-08-29 00:17:17 +08001327 tx_only(xsks[i], &frame_nb[i], batch_size);
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301328
1329 pkt_cnt += batch_size;
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +05301330
1331 if (benchmark_done)
1332 break;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001333 }
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301334
1335 if (opt_pkt_count)
1336 complete_tx_only_all();
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001337}
1338
Björn Töpel284cbc62020-11-30 19:52:03 +01001339static void l2fwd(struct xsk_socket_info *xsk)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001340{
Magnus Karlsson46738f72019-08-14 09:27:21 +02001341 unsigned int rcvd, i;
1342 u32 idx_rx = 0, idx_tx = 0;
1343 int ret;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001344
Björn Töpel284cbc62020-11-30 19:52:03 +01001345 complete_tx_l2fwd(xsk);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001346
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +05301347 rcvd = xsk_ring_cons__peek(&xsk->rx, opt_batch_size, &idx_rx);
Magnus Karlsson46738f72019-08-14 09:27:21 +02001348 if (!rcvd) {
Björn Töpelb35fc142020-11-30 19:52:04 +01001349 if (opt_busy_poll || xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
Ciara Loftus60dc6092020-10-02 13:36:11 +00001350 xsk->app_stats.rx_empty_polls++;
Björn Töpel284cbc62020-11-30 19:52:03 +01001351 recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
Ciara Loftus60dc6092020-10-02 13:36:11 +00001352 }
Magnus Karlsson46738f72019-08-14 09:27:21 +02001353 return;
1354 }
Magnus Karlsson90da4b32020-11-16 12:12:43 +01001355 xsk->ring_stats.rx_npkts += rcvd;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001356
Magnus Karlsson46738f72019-08-14 09:27:21 +02001357 ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
1358 while (ret != rcvd) {
1359 if (ret < 0)
1360 exit_with_error(-ret);
Björn Töpel284cbc62020-11-30 19:52:03 +01001361 complete_tx_l2fwd(xsk);
Björn Töpelb35fc142020-11-30 19:52:04 +01001362 if (opt_busy_poll || xsk_ring_prod__needs_wakeup(&xsk->tx)) {
Ciara Loftus60dc6092020-10-02 13:36:11 +00001363 xsk->app_stats.tx_wakeup_sendtos++;
Magnus Karlsson46738f72019-08-14 09:27:21 +02001364 kick_tx(xsk);
Ciara Loftus60dc6092020-10-02 13:36:11 +00001365 }
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001366 ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
Magnus Karlsson46738f72019-08-14 09:27:21 +02001367 }
1368
1369 for (i = 0; i < rcvd; i++) {
1370 u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr;
1371 u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len;
Ciara Loftus5a712e12019-09-13 10:39:48 +00001372 u64 orig = addr;
Kevin Laatz03895e62019-08-27 02:25:29 +00001373
1374 addr = xsk_umem__add_offset_to_addr(addr);
Magnus Karlsson46738f72019-08-14 09:27:21 +02001375 char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
1376
1377 swap_mac_addresses(pkt);
1378
1379 hex_dump(pkt, len, addr);
Kevin Laatz03895e62019-08-27 02:25:29 +00001380 xsk_ring_prod__tx_desc(&xsk->tx, idx_tx)->addr = orig;
Magnus Karlsson46738f72019-08-14 09:27:21 +02001381 xsk_ring_prod__tx_desc(&xsk->tx, idx_tx++)->len = len;
1382 }
1383
1384 xsk_ring_prod__submit(&xsk->tx, rcvd);
1385 xsk_ring_cons__release(&xsk->rx, rcvd);
1386
Magnus Karlsson90da4b32020-11-16 12:12:43 +01001387 xsk->ring_stats.tx_npkts += rcvd;
Magnus Karlsson46738f72019-08-14 09:27:21 +02001388 xsk->outstanding_tx += rcvd;
1389}
1390
1391static void l2fwd_all(void)
1392{
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001393 struct pollfd fds[MAX_SOCKS] = {};
Magnus Karlsson46738f72019-08-14 09:27:21 +02001394 int i, ret;
1395
Magnus Karlsson46738f72019-08-14 09:27:21 +02001396 for (;;) {
1397 if (opt_poll) {
Björn Töpel284cbc62020-11-30 19:52:03 +01001398 for (i = 0; i < num_socks; i++) {
1399 fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
1400 fds[i].events = POLLOUT | POLLIN;
Ciara Loftus60dc6092020-10-02 13:36:11 +00001401 xsks[i]->app_stats.opt_polls++;
Björn Töpel284cbc62020-11-30 19:52:03 +01001402 }
Magnus Karlsson46738f72019-08-14 09:27:21 +02001403 ret = poll(fds, num_socks, opt_timeout);
1404 if (ret <= 0)
1405 continue;
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001406 }
1407
Magnus Karlsson46738f72019-08-14 09:27:21 +02001408 for (i = 0; i < num_socks; i++)
Björn Töpel284cbc62020-11-30 19:52:03 +01001409 l2fwd(xsks[i]);
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +05301410
1411 if (benchmark_done)
1412 break;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001413 }
1414}
1415
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001416static void load_xdp_program(char **argv, struct bpf_object **obj)
1417{
1418 struct bpf_prog_load_attr prog_load_attr = {
1419 .prog_type = BPF_PROG_TYPE_XDP,
1420 };
1421 char xdp_filename[256];
1422 int prog_fd;
1423
1424 snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]);
1425 prog_load_attr.file = xdp_filename;
1426
1427 if (bpf_prog_load_xattr(&prog_load_attr, obj, &prog_fd))
1428 exit(EXIT_FAILURE);
1429 if (prog_fd < 0) {
1430 fprintf(stderr, "ERROR: no program found: %s\n",
1431 strerror(prog_fd));
1432 exit(EXIT_FAILURE);
1433 }
1434
1435 if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd, opt_xdp_flags) < 0) {
1436 fprintf(stderr, "ERROR: link set xdp fd failed\n");
1437 exit(EXIT_FAILURE);
1438 }
1439}
1440
1441static void enter_xsks_into_map(struct bpf_object *obj)
1442{
1443 struct bpf_map *map;
1444 int i, xsks_map;
1445
1446 map = bpf_object__find_map_by_name(obj, "xsks_map");
1447 xsks_map = bpf_map__fd(map);
1448 if (xsks_map < 0) {
1449 fprintf(stderr, "ERROR: no xsks map found: %s\n",
1450 strerror(xsks_map));
1451 exit(EXIT_FAILURE);
1452 }
1453
1454 for (i = 0; i < num_socks; i++) {
1455 int fd = xsk_socket__fd(xsks[i]->xsk);
1456 int key, ret;
1457
1458 key = i;
1459 ret = bpf_map_update_elem(xsks_map, &key, &fd, 0);
1460 if (ret) {
1461 fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i);
1462 exit(EXIT_FAILURE);
1463 }
1464 }
1465}
1466
Björn Töpelb35fc142020-11-30 19:52:04 +01001467static void apply_setsockopt(struct xsk_socket_info *xsk)
1468{
1469 int sock_opt;
1470
1471 if (!opt_busy_poll)
1472 return;
1473
1474 sock_opt = 1;
1475 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL,
1476 (void *)&sock_opt, sizeof(sock_opt)) < 0)
1477 exit_with_error(errno);
1478
1479 sock_opt = 20;
1480 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL,
1481 (void *)&sock_opt, sizeof(sock_opt)) < 0)
1482 exit_with_error(errno);
1483}
1484
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001485int main(int argc, char **argv)
1486{
1487 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
Magnus Karlsson661842c2019-11-07 18:47:39 +01001488 bool rx = false, tx = false;
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001489 struct xsk_umem_info *umem;
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001490 struct bpf_object *obj;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001491 pthread_t pt;
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001492 int i, ret;
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001493 void *bufs;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001494
1495 parse_command_line(argc, argv);
1496
1497 if (setrlimit(RLIMIT_MEMLOCK, &r)) {
1498 fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
1499 strerror(errno));
1500 exit(EXIT_FAILURE);
1501 }
1502
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001503 if (opt_num_xsks > 1)
1504 load_xdp_program(argv, &obj);
1505
Kevin Laatz3945b372019-08-27 02:25:30 +00001506 /* Reserve memory for the umem. Use hugepages if unaligned chunk mode */
1507 bufs = mmap(NULL, NUM_FRAMES * opt_xsk_frame_size,
1508 PROT_READ | PROT_WRITE,
1509 MAP_PRIVATE | MAP_ANONYMOUS | opt_mmap_flags, -1, 0);
1510 if (bufs == MAP_FAILED) {
1511 printf("ERROR: mmap failed\n");
1512 exit(EXIT_FAILURE);
1513 }
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001514
1515 /* Create sockets... */
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +03001516 umem = xsk_configure_umem(bufs, NUM_FRAMES * opt_xsk_frame_size);
Magnus Karlsson661842c2019-11-07 18:47:39 +01001517 if (opt_bench == BENCH_RXDROP || opt_bench == BENCH_L2FWD) {
1518 rx = true;
1519 xsk_populate_fill_ring(umem);
1520 }
1521 if (opt_bench == BENCH_L2FWD || opt_bench == BENCH_TXONLY)
1522 tx = true;
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001523 for (i = 0; i < opt_num_xsks; i++)
Magnus Karlsson661842c2019-11-07 18:47:39 +01001524 xsks[num_socks++] = xsk_configure_socket(umem, rx, tx);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001525
Björn Töpelb35fc142020-11-30 19:52:04 +01001526 for (i = 0; i < opt_num_xsks; i++)
1527 apply_setsockopt(xsks[i]);
1528
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +05301529 if (opt_bench == BENCH_TXONLY) {
1530 gen_eth_hdr_data();
1531
Magnus Karlsson661842c2019-11-07 18:47:39 +01001532 for (i = 0; i < NUM_FRAMES; i++)
1533 gen_eth_frame(umem, i * opt_xsk_frame_size);
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +05301534 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001535
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001536 if (opt_num_xsks > 1 && opt_bench != BENCH_TXONLY)
1537 enter_xsks_into_map(obj);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001538
1539 signal(SIGINT, int_exit);
1540 signal(SIGTERM, int_exit);
1541 signal(SIGABRT, int_exit);
1542
1543 setlocale(LC_ALL, "");
1544
Magnus Karlsson74e00672020-09-10 10:31:06 +02001545 if (!opt_quiet) {
1546 ret = pthread_create(&pt, NULL, poller, NULL);
1547 if (ret)
1548 exit_with_error(ret);
1549 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001550
1551 prev_time = get_nsecs();
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +05301552 start_time = prev_time;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001553
1554 if (opt_bench == BENCH_RXDROP)
1555 rx_drop_all();
1556 else if (opt_bench == BENCH_TXONLY)
Magnus Karlsson46738f72019-08-14 09:27:21 +02001557 tx_only_all();
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001558 else
Magnus Karlsson46738f72019-08-14 09:27:21 +02001559 l2fwd_all();
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001560
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301561 benchmark_done = true;
1562
Magnus Karlsson74e00672020-09-10 10:31:06 +02001563 if (!opt_quiet)
1564 pthread_join(pt, NULL);
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +05301565
Jay Jayatheerthan69525582019-12-20 14:25:26 +05301566 xdpsock_cleanup();
1567
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001568 return 0;
1569}