blob: 5246ed420a16c36657a4e1e841c69745df8ff544 [file] [log] [blame]
Björn Töpeldac091492018-05-18 14:00:21 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/* XDP user-space ring structure
Magnus Karlsson423f3832018-05-02 13:01:24 +02003 * Copyright(c) 2018 Intel Corporation.
Magnus Karlsson423f3832018-05-02 13:01:24 +02004 */
5
6#ifndef _LINUX_XSK_QUEUE_H
7#define _LINUX_XSK_QUEUE_H
8
9#include <linux/types.h>
10#include <linux/if_xdp.h>
Björn Töpele61e62b92018-06-04 14:05:51 +020011#include <net/xdp_sock.h>
Magnus Karlsson423f3832018-05-02 13:01:24 +020012
Björn Töpelc4971762018-05-02 13:01:27 +020013#define RX_BATCH_SIZE 16
14
Björn Töpelb3a9e0b2018-05-22 09:34:59 +020015struct xdp_ring {
16 u32 producer ____cacheline_aligned_in_smp;
17 u32 consumer ____cacheline_aligned_in_smp;
18};
19
20/* Used for the RX and TX queues for packets */
21struct xdp_rxtx_ring {
22 struct xdp_ring ptrs;
23 struct xdp_desc desc[0] ____cacheline_aligned_in_smp;
24};
25
26/* Used for the fill and completion queues for buffers */
27struct xdp_umem_ring {
28 struct xdp_ring ptrs;
Björn Töpelbbff2f32018-06-04 13:57:13 +020029 u64 desc[0] ____cacheline_aligned_in_smp;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +020030};
31
Magnus Karlsson423f3832018-05-02 13:01:24 +020032struct xsk_queue {
33 struct xdp_umem_props umem_props;
34 u32 ring_mask;
35 u32 nentries;
36 u32 prod_head;
37 u32 prod_tail;
38 u32 cons_head;
39 u32 cons_tail;
40 struct xdp_ring *ring;
41 u64 invalid_descs;
42};
43
Björn Töpelc4971762018-05-02 13:01:27 +020044/* Common functions operating for both RXTX and umem queues */
45
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +020046static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
47{
48 return q ? q->invalid_descs : 0;
49}
50
Björn Töpelc4971762018-05-02 13:01:27 +020051static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
52{
53 u32 entries = q->prod_tail - q->cons_tail;
54
55 if (entries == 0) {
56 /* Refresh the local pointer */
57 q->prod_tail = READ_ONCE(q->ring->producer);
58 entries = q->prod_tail - q->cons_tail;
59 }
60
61 return (entries > dcnt) ? dcnt : entries;
62}
63
64static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
65{
66 u32 free_entries = q->nentries - (producer - q->cons_tail);
67
68 if (free_entries >= dcnt)
69 return free_entries;
70
71 /* Refresh the local tail pointer */
72 q->cons_tail = READ_ONCE(q->ring->consumer);
73 return q->nentries - (producer - q->cons_tail);
74}
75
76/* UMEM queue */
77
Björn Töpelbbff2f32018-06-04 13:57:13 +020078static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr)
Björn Töpelc4971762018-05-02 13:01:27 +020079{
Björn Töpelbbff2f32018-06-04 13:57:13 +020080 if (addr >= q->umem_props.size) {
Björn Töpelc4971762018-05-02 13:01:27 +020081 q->invalid_descs++;
82 return false;
83 }
Björn Töpelbbff2f32018-06-04 13:57:13 +020084
Björn Töpelc4971762018-05-02 13:01:27 +020085 return true;
86}
87
Björn Töpelbbff2f32018-06-04 13:57:13 +020088static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr)
Björn Töpelc4971762018-05-02 13:01:27 +020089{
90 while (q->cons_tail != q->cons_head) {
91 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
92 unsigned int idx = q->cons_tail & q->ring_mask;
93
Björn Töpelbbff2f32018-06-04 13:57:13 +020094 *addr = READ_ONCE(ring->desc[idx]) & q->umem_props.chunk_mask;
95 if (xskq_is_valid_addr(q, *addr))
96 return addr;
Björn Töpelc4971762018-05-02 13:01:27 +020097
98 q->cons_tail++;
99 }
100
101 return NULL;
102}
103
Björn Töpelbbff2f32018-06-04 13:57:13 +0200104static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr)
Björn Töpelc4971762018-05-02 13:01:27 +0200105{
Björn Töpelc4971762018-05-02 13:01:27 +0200106 if (q->cons_tail == q->cons_head) {
107 WRITE_ONCE(q->ring->consumer, q->cons_tail);
108 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
109
110 /* Order consumer and data */
111 smp_rmb();
Björn Töpelc4971762018-05-02 13:01:27 +0200112 }
113
Björn Töpelbbff2f32018-06-04 13:57:13 +0200114 return xskq_validate_addr(q, addr);
Björn Töpelc4971762018-05-02 13:01:27 +0200115}
116
Björn Töpelbbff2f32018-06-04 13:57:13 +0200117static inline void xskq_discard_addr(struct xsk_queue *q)
Björn Töpelc4971762018-05-02 13:01:27 +0200118{
119 q->cons_tail++;
Björn Töpelc4971762018-05-02 13:01:27 +0200120}
121
Björn Töpelbbff2f32018-06-04 13:57:13 +0200122static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200123{
124 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
125
Björn Töpelbbff2f32018-06-04 13:57:13 +0200126 ring->desc[q->prod_tail++ & q->ring_mask] = addr;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200127
128 /* Order producer and data */
129 smp_wmb();
130
131 WRITE_ONCE(q->ring->producer, q->prod_tail);
132 return 0;
133}
134
Björn Töpelbbff2f32018-06-04 13:57:13 +0200135static inline int xskq_reserve_addr(struct xsk_queue *q)
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200136{
137 if (xskq_nb_free(q, q->prod_head, 1) == 0)
138 return -ENOSPC;
139
140 q->prod_head++;
141 return 0;
142}
143
144/* Rx/Tx queue */
145
146static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
147{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200148 if (!xskq_is_valid_addr(q, d->addr))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200149 return false;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200150
Björn Töpelbbff2f32018-06-04 13:57:13 +0200151 if (((d->addr + d->len) & q->umem_props.chunk_mask) !=
152 (d->addr & q->umem_props.chunk_mask)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200153 q->invalid_descs++;
154 return false;
155 }
156
157 return true;
158}
159
160static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
161 struct xdp_desc *desc)
162{
163 while (q->cons_tail != q->cons_head) {
164 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
165 unsigned int idx = q->cons_tail & q->ring_mask;
166
Björn Töpel4e64c832018-06-04 13:57:11 +0200167 *desc = READ_ONCE(ring->desc[idx]);
168 if (xskq_is_valid_desc(q, desc))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200169 return desc;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200170
171 q->cons_tail++;
172 }
173
174 return NULL;
175}
176
177static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
178 struct xdp_desc *desc)
179{
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200180 if (q->cons_tail == q->cons_head) {
181 WRITE_ONCE(q->ring->consumer, q->cons_tail);
182 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
183
184 /* Order consumer and data */
185 smp_rmb();
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200186 }
187
Björn Töpel4e64c832018-06-04 13:57:11 +0200188 return xskq_validate_desc(q, desc);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200189}
190
191static inline void xskq_discard_desc(struct xsk_queue *q)
192{
193 q->cons_tail++;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200194}
Björn Töpelc4971762018-05-02 13:01:27 +0200195
196static inline int xskq_produce_batch_desc(struct xsk_queue *q,
Björn Töpelbbff2f32018-06-04 13:57:13 +0200197 u64 addr, u32 len)
Björn Töpelc4971762018-05-02 13:01:27 +0200198{
199 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
200 unsigned int idx;
201
202 if (xskq_nb_free(q, q->prod_head, 1) == 0)
203 return -ENOSPC;
204
205 idx = (q->prod_head++) & q->ring_mask;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200206 ring->desc[idx].addr = addr;
Björn Töpelc4971762018-05-02 13:01:27 +0200207 ring->desc[idx].len = len;
Björn Töpelc4971762018-05-02 13:01:27 +0200208
209 return 0;
210}
211
212static inline void xskq_produce_flush_desc(struct xsk_queue *q)
213{
214 /* Order producer and data */
215 smp_wmb();
216
217 q->prod_tail = q->prod_head,
218 WRITE_ONCE(q->ring->producer, q->prod_tail);
219}
220
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200221static inline bool xskq_full_desc(struct xsk_queue *q)
222{
Björn Töpelda60cf02018-05-18 14:00:23 +0200223 return xskq_nb_avail(q, q->nentries) == q->nentries;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200224}
225
Björn Töpelc4971762018-05-02 13:01:27 +0200226static inline bool xskq_empty_desc(struct xsk_queue *q)
227{
Björn Töpelda60cf02018-05-18 14:00:23 +0200228 return xskq_nb_free(q, q->prod_tail, 1) == q->nentries;
Björn Töpelc4971762018-05-02 13:01:27 +0200229}
230
Magnus Karlsson965a99092018-05-02 13:01:26 +0200231void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200232struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
Björn Töpelc4971762018-05-02 13:01:27 +0200233void xskq_destroy(struct xsk_queue *q_ops);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200234
235#endif /* _LINUX_XSK_QUEUE_H */