blob: b66504592d9bd886126743da5e932e03029e742a [file] [log] [blame]
Magnus Karlsson423f3832018-05-02 13:01:24 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP user-space ring structure
3 * Copyright(c) 2018 Intel Corporation.
Magnus Karlsson423f3832018-05-02 13:01:24 +02004 */
5
Jakub Kicinskif5bd9132018-09-07 10:18:46 +02006#include <linux/log2.h>
Magnus Karlsson423f3832018-05-02 13:01:24 +02007#include <linux/slab.h>
Jakub Kicinskif5bd9132018-09-07 10:18:46 +02008#include <linux/overflow.h>
Magnus Karlsson423f3832018-05-02 13:01:24 +02009
10#include "xsk_queue.h"
11
Magnus Karlsson93ee30f2018-08-31 13:40:02 +020012void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask)
Magnus Karlsson965a9902018-05-02 13:01:26 +020013{
14 if (!q)
15 return;
16
Magnus Karlsson93ee30f2018-08-31 13:40:02 +020017 q->size = size;
18 q->chunk_mask = chunk_mask;
Magnus Karlsson965a9902018-05-02 13:01:26 +020019}
20
Magnus Karlsson423f3832018-05-02 13:01:24 +020021static u32 xskq_umem_get_ring_size(struct xsk_queue *q)
22{
Björn Töpelbbff2f32018-06-04 13:57:13 +020023 return sizeof(struct xdp_umem_ring) + q->nentries * sizeof(u64);
Magnus Karlsson423f3832018-05-02 13:01:24 +020024}
25
Björn Töpelb9b6b682018-05-02 13:01:25 +020026static u32 xskq_rxtx_get_ring_size(struct xsk_queue *q)
27{
Björn Töpelda60cf02018-05-18 14:00:23 +020028 return sizeof(struct xdp_ring) + q->nentries * sizeof(struct xdp_desc);
Björn Töpelb9b6b682018-05-02 13:01:25 +020029}
30
31struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
Magnus Karlsson423f3832018-05-02 13:01:24 +020032{
33 struct xsk_queue *q;
34 gfp_t gfp_flags;
35 size_t size;
36
37 q = kzalloc(sizeof(*q), GFP_KERNEL);
38 if (!q)
39 return NULL;
40
41 q->nentries = nentries;
42 q->ring_mask = nentries - 1;
43
44 gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN |
45 __GFP_COMP | __GFP_NORETRY;
Björn Töpelb9b6b682018-05-02 13:01:25 +020046 size = umem_queue ? xskq_umem_get_ring_size(q) :
47 xskq_rxtx_get_ring_size(q);
Magnus Karlsson423f3832018-05-02 13:01:24 +020048
49 q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags,
50 get_order(size));
51 if (!q->ring) {
52 kfree(q);
53 return NULL;
54 }
55
56 return q;
57}
58
59void xskq_destroy(struct xsk_queue *q)
60{
61 if (!q)
62 return;
63
64 page_frag_free(q->ring);
65 kfree(q);
66}
Jakub Kicinskif5bd9132018-09-07 10:18:46 +020067
68struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
69{
70 struct xdp_umem_fq_reuse *newq;
71
72 /* Check for overflow */
73 if (nentries > (u32)roundup_pow_of_two(nentries))
74 return NULL;
75 nentries = roundup_pow_of_two(nentries);
76
77 newq = kvmalloc(struct_size(newq, handles, nentries), GFP_KERNEL);
78 if (!newq)
79 return NULL;
80 memset(newq, 0, offsetof(typeof(*newq), handles));
81
82 newq->nentries = nentries;
83 return newq;
84}
85EXPORT_SYMBOL_GPL(xsk_reuseq_prepare);
86
87struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
88 struct xdp_umem_fq_reuse *newq)
89{
90 struct xdp_umem_fq_reuse *oldq = umem->fq_reuse;
91
92 if (!oldq) {
93 umem->fq_reuse = newq;
94 return NULL;
95 }
96
97 if (newq->nentries < oldq->length)
98 return newq;
99
100 memcpy(newq->handles, oldq->handles,
101 array_size(oldq->length, sizeof(u64)));
102 newq->length = oldq->length;
103
104 umem->fq_reuse = newq;
105 return oldq;
106}
107EXPORT_SYMBOL_GPL(xsk_reuseq_swap);
108
109void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
110{
111 kvfree(rq);
112}
113EXPORT_SYMBOL_GPL(xsk_reuseq_free);
114
115void xsk_reuseq_destroy(struct xdp_umem *umem)
116{
117 xsk_reuseq_free(umem->fq_reuse);
118 umem->fq_reuse = NULL;
119}