blob: 6cf9586e5027a64c55dde9aa561e1cc662db51c6 [file] [log] [blame]
Magnus Karlsson423f3832018-05-02 13:01:24 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP user-space ring structure
3 * Copyright(c) 2018 Intel Corporation.
Magnus Karlsson423f3832018-05-02 13:01:24 +02004 */
5
Jakub Kicinskif5bd9132018-09-07 10:18:46 +02006#include <linux/log2.h>
Magnus Karlsson423f3832018-05-02 13:01:24 +02007#include <linux/slab.h>
Jakub Kicinskif5bd9132018-09-07 10:18:46 +02008#include <linux/overflow.h>
Magnus Karlssona71506a2020-05-20 21:20:51 +02009#include <net/xdp_sock_drv.h>
Magnus Karlsson423f3832018-05-02 13:01:24 +020010
11#include "xsk_queue.h"
12
Magnus Karlsson1d9cb1f2019-12-19 13:39:31 +010013static size_t xskq_get_ring_size(struct xsk_queue *q, bool umem_queue)
Magnus Karlsson423f3832018-05-02 13:01:24 +020014{
Magnus Karlsson1d9cb1f2019-12-19 13:39:31 +010015 struct xdp_umem_ring *umem_ring;
16 struct xdp_rxtx_ring *rxtx_ring;
Magnus Karlsson423f3832018-05-02 13:01:24 +020017
Magnus Karlsson1d9cb1f2019-12-19 13:39:31 +010018 if (umem_queue)
19 return struct_size(umem_ring, desc, q->nentries);
20 return struct_size(rxtx_ring, desc, q->nentries);
Björn Töpelb9b6b682018-05-02 13:01:25 +020021}
22
23struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
Magnus Karlsson423f3832018-05-02 13:01:24 +020024{
25 struct xsk_queue *q;
26 gfp_t gfp_flags;
27 size_t size;
28
29 q = kzalloc(sizeof(*q), GFP_KERNEL);
30 if (!q)
31 return NULL;
32
33 q->nentries = nentries;
34 q->ring_mask = nentries - 1;
35
36 gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN |
37 __GFP_COMP | __GFP_NORETRY;
Magnus Karlsson1d9cb1f2019-12-19 13:39:31 +010038 size = xskq_get_ring_size(q, umem_queue);
Magnus Karlsson423f3832018-05-02 13:01:24 +020039
40 q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags,
41 get_order(size));
42 if (!q->ring) {
43 kfree(q);
44 return NULL;
45 }
46
47 return q;
48}
49
50void xskq_destroy(struct xsk_queue *q)
51{
52 if (!q)
53 return;
54
55 page_frag_free(q->ring);
56 kfree(q);
57}