blob: b5924e7aeb2bbc82f2888350b0dd96eafdf87a82 [file] [log] [blame]
Björn Töpeldac091492018-05-18 14:00:21 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/* XDP user-space ring structure
Magnus Karlsson423f3832018-05-02 13:01:24 +02003 * Copyright(c) 2018 Intel Corporation.
Magnus Karlsson423f3832018-05-02 13:01:24 +02004 */
5
6#ifndef _LINUX_XSK_QUEUE_H
7#define _LINUX_XSK_QUEUE_H
8
9#include <linux/types.h>
10#include <linux/if_xdp.h>
11
12#include "xdp_umem_props.h"
13
Björn Töpelc4971762018-05-02 13:01:27 +020014#define RX_BATCH_SIZE 16
15
Björn Töpelb3a9e0b2018-05-22 09:34:59 +020016struct xdp_ring {
17 u32 producer ____cacheline_aligned_in_smp;
18 u32 consumer ____cacheline_aligned_in_smp;
19};
20
21/* Used for the RX and TX queues for packets */
22struct xdp_rxtx_ring {
23 struct xdp_ring ptrs;
24 struct xdp_desc desc[0] ____cacheline_aligned_in_smp;
25};
26
27/* Used for the fill and completion queues for buffers */
28struct xdp_umem_ring {
29 struct xdp_ring ptrs;
30 u32 desc[0] ____cacheline_aligned_in_smp;
31};
32
Magnus Karlsson423f3832018-05-02 13:01:24 +020033struct xsk_queue {
34 struct xdp_umem_props umem_props;
35 u32 ring_mask;
36 u32 nentries;
37 u32 prod_head;
38 u32 prod_tail;
39 u32 cons_head;
40 u32 cons_tail;
41 struct xdp_ring *ring;
42 u64 invalid_descs;
43};
44
Björn Töpelc4971762018-05-02 13:01:27 +020045/* Common functions operating for both RXTX and umem queues */
46
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +020047static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
48{
49 return q ? q->invalid_descs : 0;
50}
51
Björn Töpelc4971762018-05-02 13:01:27 +020052static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
53{
54 u32 entries = q->prod_tail - q->cons_tail;
55
56 if (entries == 0) {
57 /* Refresh the local pointer */
58 q->prod_tail = READ_ONCE(q->ring->producer);
59 entries = q->prod_tail - q->cons_tail;
60 }
61
62 return (entries > dcnt) ? dcnt : entries;
63}
64
65static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
66{
67 u32 free_entries = q->nentries - (producer - q->cons_tail);
68
69 if (free_entries >= dcnt)
70 return free_entries;
71
72 /* Refresh the local tail pointer */
73 q->cons_tail = READ_ONCE(q->ring->consumer);
74 return q->nentries - (producer - q->cons_tail);
75}
76
77/* UMEM queue */
78
79static inline bool xskq_is_valid_id(struct xsk_queue *q, u32 idx)
80{
81 if (unlikely(idx >= q->umem_props.nframes)) {
82 q->invalid_descs++;
83 return false;
84 }
85 return true;
86}
87
Björn Töpel4e64c832018-06-04 13:57:11 +020088static inline u32 *xskq_validate_id(struct xsk_queue *q, u32 *id)
Björn Töpelc4971762018-05-02 13:01:27 +020089{
90 while (q->cons_tail != q->cons_head) {
91 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
92 unsigned int idx = q->cons_tail & q->ring_mask;
93
Björn Töpel4e64c832018-06-04 13:57:11 +020094 *id = READ_ONCE(ring->desc[idx]);
95 if (xskq_is_valid_id(q, *id))
96 return id;
Björn Töpelc4971762018-05-02 13:01:27 +020097
98 q->cons_tail++;
99 }
100
101 return NULL;
102}
103
Björn Töpel4e64c832018-06-04 13:57:11 +0200104static inline u32 *xskq_peek_id(struct xsk_queue *q, u32 *id)
Björn Töpelc4971762018-05-02 13:01:27 +0200105{
Björn Töpelc4971762018-05-02 13:01:27 +0200106 if (q->cons_tail == q->cons_head) {
107 WRITE_ONCE(q->ring->consumer, q->cons_tail);
108 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
109
110 /* Order consumer and data */
111 smp_rmb();
Björn Töpelc4971762018-05-02 13:01:27 +0200112 }
113
Björn Töpel4e64c832018-06-04 13:57:11 +0200114 return xskq_validate_id(q, id);
Björn Töpelc4971762018-05-02 13:01:27 +0200115}
116
117static inline void xskq_discard_id(struct xsk_queue *q)
118{
119 q->cons_tail++;
Björn Töpelc4971762018-05-02 13:01:27 +0200120}
121
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200122static inline int xskq_produce_id(struct xsk_queue *q, u32 id)
123{
124 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
125
126 ring->desc[q->prod_tail++ & q->ring_mask] = id;
127
128 /* Order producer and data */
129 smp_wmb();
130
131 WRITE_ONCE(q->ring->producer, q->prod_tail);
132 return 0;
133}
134
135static inline int xskq_reserve_id(struct xsk_queue *q)
136{
137 if (xskq_nb_free(q, q->prod_head, 1) == 0)
138 return -ENOSPC;
139
140 q->prod_head++;
141 return 0;
142}
143
144/* Rx/Tx queue */
145
146static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
147{
148 u32 buff_len;
149
150 if (unlikely(d->idx >= q->umem_props.nframes)) {
151 q->invalid_descs++;
152 return false;
153 }
154
155 buff_len = q->umem_props.frame_size;
156 if (unlikely(d->len > buff_len || d->len == 0 ||
157 d->offset > buff_len || d->offset + d->len > buff_len)) {
158 q->invalid_descs++;
159 return false;
160 }
161
162 return true;
163}
164
165static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
166 struct xdp_desc *desc)
167{
168 while (q->cons_tail != q->cons_head) {
169 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
170 unsigned int idx = q->cons_tail & q->ring_mask;
171
Björn Töpel4e64c832018-06-04 13:57:11 +0200172 *desc = READ_ONCE(ring->desc[idx]);
173 if (xskq_is_valid_desc(q, desc))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200174 return desc;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200175
176 q->cons_tail++;
177 }
178
179 return NULL;
180}
181
182static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
183 struct xdp_desc *desc)
184{
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200185 if (q->cons_tail == q->cons_head) {
186 WRITE_ONCE(q->ring->consumer, q->cons_tail);
187 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
188
189 /* Order consumer and data */
190 smp_rmb();
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200191 }
192
Björn Töpel4e64c832018-06-04 13:57:11 +0200193 return xskq_validate_desc(q, desc);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200194}
195
196static inline void xskq_discard_desc(struct xsk_queue *q)
197{
198 q->cons_tail++;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200199}
Björn Töpelc4971762018-05-02 13:01:27 +0200200
201static inline int xskq_produce_batch_desc(struct xsk_queue *q,
202 u32 id, u32 len, u16 offset)
203{
204 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
205 unsigned int idx;
206
207 if (xskq_nb_free(q, q->prod_head, 1) == 0)
208 return -ENOSPC;
209
210 idx = (q->prod_head++) & q->ring_mask;
211 ring->desc[idx].idx = id;
212 ring->desc[idx].len = len;
213 ring->desc[idx].offset = offset;
214
215 return 0;
216}
217
218static inline void xskq_produce_flush_desc(struct xsk_queue *q)
219{
220 /* Order producer and data */
221 smp_wmb();
222
223 q->prod_tail = q->prod_head,
224 WRITE_ONCE(q->ring->producer, q->prod_tail);
225}
226
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200227static inline bool xskq_full_desc(struct xsk_queue *q)
228{
Björn Töpelda60cf02018-05-18 14:00:23 +0200229 return xskq_nb_avail(q, q->nentries) == q->nentries;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200230}
231
Björn Töpelc4971762018-05-02 13:01:27 +0200232static inline bool xskq_empty_desc(struct xsk_queue *q)
233{
Björn Töpelda60cf02018-05-18 14:00:23 +0200234 return xskq_nb_free(q, q->prod_tail, 1) == q->nentries;
Björn Töpelc4971762018-05-02 13:01:27 +0200235}
236
Magnus Karlsson965a99092018-05-02 13:01:26 +0200237void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200238struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
Björn Töpelc4971762018-05-02 13:01:27 +0200239void xskq_destroy(struct xsk_queue *q_ops);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200240
241#endif /* _LINUX_XSK_QUEUE_H */