Björn Töpel | dac09149 | 2018-05-18 14:00:21 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* XDP user-space ring structure |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 3 | * Copyright(c) 2018 Intel Corporation. |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #ifndef _LINUX_XSK_QUEUE_H |
| 7 | #define _LINUX_XSK_QUEUE_H |
| 8 | |
| 9 | #include <linux/types.h> |
| 10 | #include <linux/if_xdp.h> |
Björn Töpel | e61e62b9 | 2018-06-04 14:05:51 +0200 | [diff] [blame] | 11 | #include <net/xdp_sock.h> |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 12 | #include <net/xsk_buff_pool.h> |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 13 | |
Björn Töpel | 89e4a37 | 2020-05-20 21:20:52 +0200 | [diff] [blame] | 14 | #include "xsk.h" |
| 15 | |
Björn Töpel | b3a9e0b | 2018-05-22 09:34:59 +0200 | [diff] [blame] | 16 | struct xdp_ring { |
| 17 | u32 producer ____cacheline_aligned_in_smp; |
Magnus Karlsson | c3f01fd | 2020-10-08 16:12:18 +0200 | [diff] [blame] | 18 | /* Hinder the adjacent cache prefetcher to prefetch the consumer |
| 19 | * pointer if the producer pointer is touched and vice versa. |
| 20 | */ |
Magnus Karlsson | b8c7aec | 2020-11-16 12:12:45 +0100 | [diff] [blame] | 21 | u32 pad1 ____cacheline_aligned_in_smp; |
Björn Töpel | b3a9e0b | 2018-05-22 09:34:59 +0200 | [diff] [blame] | 22 | u32 consumer ____cacheline_aligned_in_smp; |
Magnus Karlsson | b8c7aec | 2020-11-16 12:12:45 +0100 | [diff] [blame] | 23 | u32 pad2 ____cacheline_aligned_in_smp; |
Magnus Karlsson | 77cd0d7 | 2019-08-14 09:27:17 +0200 | [diff] [blame] | 24 | u32 flags; |
Magnus Karlsson | b8c7aec | 2020-11-16 12:12:45 +0100 | [diff] [blame] | 25 | u32 pad3 ____cacheline_aligned_in_smp; |
Björn Töpel | b3a9e0b | 2018-05-22 09:34:59 +0200 | [diff] [blame] | 26 | }; |
| 27 | |
| 28 | /* Used for the RX and TX queues for packets */ |
| 29 | struct xdp_rxtx_ring { |
| 30 | struct xdp_ring ptrs; |
Gustavo A. R. Silva | 95e486f | 2020-02-28 07:19:07 -0600 | [diff] [blame] | 31 | struct xdp_desc desc[] ____cacheline_aligned_in_smp; |
Björn Töpel | b3a9e0b | 2018-05-22 09:34:59 +0200 | [diff] [blame] | 32 | }; |
| 33 | |
| 34 | /* Used for the fill and completion queues for buffers */ |
| 35 | struct xdp_umem_ring { |
| 36 | struct xdp_ring ptrs; |
Gustavo A. R. Silva | 95e486f | 2020-02-28 07:19:07 -0600 | [diff] [blame] | 37 | u64 desc[] ____cacheline_aligned_in_smp; |
Björn Töpel | b3a9e0b | 2018-05-22 09:34:59 +0200 | [diff] [blame] | 38 | }; |
| 39 | |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 40 | struct xsk_queue { |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 41 | u32 ring_mask; |
| 42 | u32 nentries; |
Magnus Karlsson | d7012f0 | 2019-12-19 13:39:22 +0100 | [diff] [blame] | 43 | u32 cached_prod; |
Magnus Karlsson | c5ed924b | 2019-12-19 13:39:26 +0100 | [diff] [blame] | 44 | u32 cached_cons; |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 45 | struct xdp_ring *ring; |
| 46 | u64 invalid_descs; |
Ciara Loftus | 8aa5a33 | 2020-07-08 07:28:33 +0000 | [diff] [blame] | 47 | u64 queue_empty_descs; |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 48 | }; |
| 49 | |
Magnus Karlsson | f63666de | 2019-04-16 14:58:08 +0200 | [diff] [blame] | 50 | /* The structure of the shared state of the rings are the same as the |
| 51 | * ring buffer in kernel/events/ring_buffer.c. For the Rx and completion |
| 52 | * ring, the kernel is the producer and user space is the consumer. For |
| 53 | * the Tx and fill rings, the kernel is the consumer and user space is |
| 54 | * the producer. |
| 55 | * |
| 56 | * producer consumer |
| 57 | * |
| 58 | * if (LOAD ->consumer) { LOAD ->producer |
| 59 | * (A) smp_rmb() (C) |
| 60 | * STORE $data LOAD $data |
| 61 | * smp_wmb() (B) smp_mb() (D) |
| 62 | * STORE ->producer STORE ->consumer |
| 63 | * } |
| 64 | * |
| 65 | * (A) pairs with (D), and (B) pairs with (C). |
| 66 | * |
| 67 | * Starting with (B), it protects the data from being written after |
| 68 | * the producer pointer. If this barrier was missing, the consumer |
| 69 | * could observe the producer pointer being set and thus load the data |
| 70 | * before the producer has written the new data. The consumer would in |
| 71 | * this case load the old data. |
| 72 | * |
| 73 | * (C) protects the consumer from speculatively loading the data before |
| 74 | * the producer pointer actually has been read. If we do not have this |
| 75 | * barrier, some architectures could load old data as speculative loads |
| 76 | * are not discarded as the CPU does not know there is a dependency |
| 77 | * between ->producer and data. |
| 78 | * |
| 79 | * (A) is a control dependency that separates the load of ->consumer |
| 80 | * from the stores of $data. In case ->consumer indicates there is no |
| 81 | * room in the buffer to store $data we do not. So no barrier is needed. |
| 82 | * |
| 83 | * (D) protects the load of the data to be observed to happen after the |
| 84 | * store of the consumer pointer. If we did not have this memory |
| 85 | * barrier, the producer could observe the consumer pointer being set |
| 86 | * and overwrite the data with a new value before the consumer got the |
| 87 | * chance to read the old value. The consumer would thus miss reading |
| 88 | * the old entry and very likely read the new entry twice, once right |
| 89 | * now and again after circling through the ring. |
| 90 | */ |
| 91 | |
Magnus Karlsson | 15d8c91 | 2019-12-19 13:39:30 +0100 | [diff] [blame] | 92 | /* The operations on the rings are the following: |
| 93 | * |
| 94 | * producer consumer |
| 95 | * |
| 96 | * RESERVE entries PEEK in the ring for entries |
| 97 | * WRITE data into the ring READ data from the ring |
| 98 | * SUBMIT entries RELEASE entries |
| 99 | * |
| 100 | * The producer reserves one or more entries in the ring. It can then |
| 101 | * fill in these entries and finally submit them so that they can be |
| 102 | * seen and read by the consumer. |
| 103 | * |
| 104 | * The consumer peeks into the ring to see if the producer has written |
Ciara Loftus | f1fc8ec | 2020-09-28 08:23:44 +0000 | [diff] [blame] | 105 | * any new entries. If so, the consumer can then read these entries |
Magnus Karlsson | 15d8c91 | 2019-12-19 13:39:30 +0100 | [diff] [blame] | 106 | * and when it is done reading them release them back to the producer |
| 107 | * so that the producer can use these slots to fill in new entries. |
| 108 | * |
| 109 | * The function names below reflect these operations. |
| 110 | */ |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 111 | |
Magnus Karlsson | 15d8c91 | 2019-12-19 13:39:30 +0100 | [diff] [blame] | 112 | /* Functions that read and validate content from consumer rings. */ |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 113 | |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 114 | static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) |
| 115 | { |
| 116 | struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; |
| 117 | |
| 118 | if (q->cached_cons != q->cached_prod) { |
| 119 | u32 idx = q->cached_cons & q->ring_mask; |
| 120 | |
| 121 | *addr = ring->desc[idx]; |
| 122 | return true; |
| 123 | } |
| 124 | |
| 125 | return false; |
| 126 | } |
| 127 | |
Björn Töpel | 26062b1 | 2020-05-20 21:21:02 +0200 | [diff] [blame] | 128 | static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool, |
| 129 | struct xdp_desc *desc) |
| 130 | { |
| 131 | u64 chunk, chunk_end; |
| 132 | |
| 133 | chunk = xp_aligned_extract_addr(pool, desc->addr); |
| 134 | chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len); |
| 135 | if (chunk != chunk_end) |
| 136 | return false; |
| 137 | |
| 138 | if (chunk >= pool->addrs_cnt) |
| 139 | return false; |
| 140 | |
| 141 | if (desc->options) |
| 142 | return false; |
| 143 | return true; |
| 144 | } |
| 145 | |
| 146 | static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool, |
| 147 | struct xdp_desc *desc) |
| 148 | { |
| 149 | u64 addr, base_addr; |
| 150 | |
| 151 | base_addr = xp_unaligned_extract_addr(desc->addr); |
| 152 | addr = xp_unaligned_add_offset_to_addr(desc->addr); |
| 153 | |
| 154 | if (desc->len > pool->chunk_size) |
| 155 | return false; |
| 156 | |
| 157 | if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt || |
| 158 | xp_desc_crosses_non_contig_pg(pool, addr, desc->len)) |
| 159 | return false; |
| 160 | |
| 161 | if (desc->options) |
| 162 | return false; |
| 163 | return true; |
| 164 | } |
| 165 | |
| 166 | static inline bool xp_validate_desc(struct xsk_buff_pool *pool, |
| 167 | struct xdp_desc *desc) |
| 168 | { |
| 169 | return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) : |
| 170 | xp_aligned_validate_desc(pool, desc); |
| 171 | } |
| 172 | |
Magnus Karlsson | 03896ef | 2019-12-19 13:39:27 +0100 | [diff] [blame] | 173 | static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q, |
| 174 | struct xdp_desc *d, |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 175 | struct xsk_buff_pool *pool) |
Magnus Karlsson | 35fcde7 | 2018-05-02 13:01:34 +0200 | [diff] [blame] | 176 | { |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 177 | if (!xp_validate_desc(pool, d)) { |
Magnus Karlsson | 35fcde7 | 2018-05-02 13:01:34 +0200 | [diff] [blame] | 178 | q->invalid_descs++; |
| 179 | return false; |
| 180 | } |
Magnus Karlsson | 35fcde7 | 2018-05-02 13:01:34 +0200 | [diff] [blame] | 181 | return true; |
| 182 | } |
| 183 | |
Magnus Karlsson | 03896ef | 2019-12-19 13:39:27 +0100 | [diff] [blame] | 184 | static inline bool xskq_cons_read_desc(struct xsk_queue *q, |
| 185 | struct xdp_desc *desc, |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 186 | struct xsk_buff_pool *pool) |
Magnus Karlsson | 35fcde7 | 2018-05-02 13:01:34 +0200 | [diff] [blame] | 187 | { |
Magnus Karlsson | c5ed924b | 2019-12-19 13:39:26 +0100 | [diff] [blame] | 188 | while (q->cached_cons != q->cached_prod) { |
Magnus Karlsson | 35fcde7 | 2018-05-02 13:01:34 +0200 | [diff] [blame] | 189 | struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; |
Magnus Karlsson | c5ed924b | 2019-12-19 13:39:26 +0100 | [diff] [blame] | 190 | u32 idx = q->cached_cons & q->ring_mask; |
Magnus Karlsson | 35fcde7 | 2018-05-02 13:01:34 +0200 | [diff] [blame] | 191 | |
Magnus Karlsson | c34787f | 2019-12-19 13:39:29 +0100 | [diff] [blame] | 192 | *desc = ring->desc[idx]; |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 193 | if (xskq_cons_is_valid_desc(q, desc, pool)) |
Magnus Karlsson | 03896ef | 2019-12-19 13:39:27 +0100 | [diff] [blame] | 194 | return true; |
Magnus Karlsson | 35fcde7 | 2018-05-02 13:01:34 +0200 | [diff] [blame] | 195 | |
Magnus Karlsson | c5ed924b | 2019-12-19 13:39:26 +0100 | [diff] [blame] | 196 | q->cached_cons++; |
Magnus Karlsson | 35fcde7 | 2018-05-02 13:01:34 +0200 | [diff] [blame] | 197 | } |
| 198 | |
Magnus Karlsson | 03896ef | 2019-12-19 13:39:27 +0100 | [diff] [blame] | 199 | return false; |
Magnus Karlsson | 35fcde7 | 2018-05-02 13:01:34 +0200 | [diff] [blame] | 200 | } |
| 201 | |
Magnus Karlsson | 9349eb3 | 2020-11-16 12:12:46 +0100 | [diff] [blame] | 202 | static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, |
| 203 | struct xdp_desc *descs, |
| 204 | struct xsk_buff_pool *pool, u32 max) |
| 205 | { |
| 206 | u32 cached_cons = q->cached_cons, nb_entries = 0; |
| 207 | |
| 208 | while (cached_cons != q->cached_prod && nb_entries < max) { |
| 209 | struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; |
| 210 | u32 idx = cached_cons & q->ring_mask; |
| 211 | |
| 212 | descs[nb_entries] = ring->desc[idx]; |
| 213 | if (unlikely(!xskq_cons_is_valid_desc(q, &descs[nb_entries], pool))) { |
| 214 | /* Skip the entry */ |
| 215 | cached_cons++; |
| 216 | continue; |
| 217 | } |
| 218 | |
| 219 | nb_entries++; |
| 220 | cached_cons++; |
| 221 | } |
| 222 | |
| 223 | return nb_entries; |
| 224 | } |
| 225 | |
Magnus Karlsson | 15d8c91 | 2019-12-19 13:39:30 +0100 | [diff] [blame] | 226 | /* Functions for consumers */ |
| 227 | |
| 228 | static inline void __xskq_cons_release(struct xsk_queue *q) |
| 229 | { |
| 230 | smp_mb(); /* D, matches A */ |
| 231 | WRITE_ONCE(q->ring->consumer, q->cached_cons); |
| 232 | } |
| 233 | |
| 234 | static inline void __xskq_cons_peek(struct xsk_queue *q) |
| 235 | { |
| 236 | /* Refresh the local pointer */ |
| 237 | q->cached_prod = READ_ONCE(q->ring->producer); |
| 238 | smp_rmb(); /* C, matches B */ |
| 239 | } |
| 240 | |
| 241 | static inline void xskq_cons_get_entries(struct xsk_queue *q) |
| 242 | { |
| 243 | __xskq_cons_release(q); |
| 244 | __xskq_cons_peek(q); |
| 245 | } |
| 246 | |
Magnus Karlsson | 9349eb3 | 2020-11-16 12:12:46 +0100 | [diff] [blame] | 247 | static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max) |
Magnus Karlsson | 15d8c91 | 2019-12-19 13:39:30 +0100 | [diff] [blame] | 248 | { |
| 249 | u32 entries = q->cached_prod - q->cached_cons; |
| 250 | |
Magnus Karlsson | 9349eb3 | 2020-11-16 12:12:46 +0100 | [diff] [blame] | 251 | if (entries >= max) |
| 252 | return max; |
Magnus Karlsson | 15d8c91 | 2019-12-19 13:39:30 +0100 | [diff] [blame] | 253 | |
| 254 | __xskq_cons_peek(q); |
| 255 | entries = q->cached_prod - q->cached_cons; |
| 256 | |
Magnus Karlsson | 9349eb3 | 2020-11-16 12:12:46 +0100 | [diff] [blame] | 257 | return entries >= max ? max : entries; |
| 258 | } |
| 259 | |
| 260 | static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt) |
| 261 | { |
| 262 | return xskq_cons_nb_entries(q, cnt) >= cnt ? true : false; |
Magnus Karlsson | 15d8c91 | 2019-12-19 13:39:30 +0100 | [diff] [blame] | 263 | } |
| 264 | |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 265 | static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr) |
| 266 | { |
| 267 | if (q->cached_prod == q->cached_cons) |
| 268 | xskq_cons_get_entries(q); |
| 269 | return xskq_cons_read_addr_unchecked(q, addr); |
| 270 | } |
| 271 | |
Magnus Karlsson | 03896ef | 2019-12-19 13:39:27 +0100 | [diff] [blame] | 272 | static inline bool xskq_cons_peek_desc(struct xsk_queue *q, |
| 273 | struct xdp_desc *desc, |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 274 | struct xsk_buff_pool *pool) |
Magnus Karlsson | 35fcde7 | 2018-05-02 13:01:34 +0200 | [diff] [blame] | 275 | { |
Magnus Karlsson | c5ed924b | 2019-12-19 13:39:26 +0100 | [diff] [blame] | 276 | if (q->cached_prod == q->cached_cons) |
| 277 | xskq_cons_get_entries(q); |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 278 | return xskq_cons_read_desc(q, desc, pool); |
Magnus Karlsson | 35fcde7 | 2018-05-02 13:01:34 +0200 | [diff] [blame] | 279 | } |
| 280 | |
Magnus Karlsson | 9349eb3 | 2020-11-16 12:12:46 +0100 | [diff] [blame] | 281 | static inline u32 xskq_cons_peek_desc_batch(struct xsk_queue *q, struct xdp_desc *descs, |
| 282 | struct xsk_buff_pool *pool, u32 max) |
| 283 | { |
| 284 | u32 entries = xskq_cons_nb_entries(q, max); |
| 285 | |
| 286 | return xskq_cons_read_desc_batch(q, descs, pool, entries); |
| 287 | } |
| 288 | |
| 289 | /* To improve performance in the xskq_cons_release functions, only update local state here. |
| 290 | * Reflect this to global state when we get new entries from the ring in |
| 291 | * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop. |
| 292 | */ |
Magnus Karlsson | 15d8c91 | 2019-12-19 13:39:30 +0100 | [diff] [blame] | 293 | static inline void xskq_cons_release(struct xsk_queue *q) |
| 294 | { |
Magnus Karlsson | 15d8c91 | 2019-12-19 13:39:30 +0100 | [diff] [blame] | 295 | q->cached_cons++; |
| 296 | } |
| 297 | |
Magnus Karlsson | 9349eb3 | 2020-11-16 12:12:46 +0100 | [diff] [blame] | 298 | static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt) |
| 299 | { |
| 300 | q->cached_cons += cnt; |
| 301 | } |
| 302 | |
Magnus Karlsson | 15d8c91 | 2019-12-19 13:39:30 +0100 | [diff] [blame] | 303 | static inline bool xskq_cons_is_full(struct xsk_queue *q) |
| 304 | { |
| 305 | /* No barriers needed since data is not accessed */ |
| 306 | return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) == |
| 307 | q->nentries; |
| 308 | } |
| 309 | |
Xuan Zhuo | 3413f04 | 2020-12-01 21:56:58 +0800 | [diff] [blame] | 310 | static inline u32 xskq_cons_present_entries(struct xsk_queue *q) |
| 311 | { |
| 312 | /* No barriers needed since data is not accessed */ |
| 313 | return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer); |
| 314 | } |
| 315 | |
Magnus Karlsson | 15d8c91 | 2019-12-19 13:39:30 +0100 | [diff] [blame] | 316 | /* Functions for producers */ |
| 317 | |
Magnus Karlsson | 9349eb3 | 2020-11-16 12:12:46 +0100 | [diff] [blame] | 318 | static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max) |
Magnus Karlsson | 15d8c91 | 2019-12-19 13:39:30 +0100 | [diff] [blame] | 319 | { |
| 320 | u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons); |
| 321 | |
Magnus Karlsson | 9349eb3 | 2020-11-16 12:12:46 +0100 | [diff] [blame] | 322 | if (free_entries >= max) |
| 323 | return max; |
Magnus Karlsson | 15d8c91 | 2019-12-19 13:39:30 +0100 | [diff] [blame] | 324 | |
| 325 | /* Refresh the local tail pointer */ |
| 326 | q->cached_cons = READ_ONCE(q->ring->consumer); |
| 327 | free_entries = q->nentries - (q->cached_prod - q->cached_cons); |
| 328 | |
Magnus Karlsson | 9349eb3 | 2020-11-16 12:12:46 +0100 | [diff] [blame] | 329 | return free_entries >= max ? max : free_entries; |
| 330 | } |
| 331 | |
| 332 | static inline bool xskq_prod_is_full(struct xsk_queue *q) |
| 333 | { |
| 334 | return xskq_prod_nb_free(q, 1) ? false : true; |
Magnus Karlsson | 15d8c91 | 2019-12-19 13:39:30 +0100 | [diff] [blame] | 335 | } |
| 336 | |
Magnus Karlsson | b1b95cb | 2020-12-18 14:45:25 +0100 | [diff] [blame] | 337 | static inline void xskq_prod_cancel(struct xsk_queue *q) |
| 338 | { |
| 339 | q->cached_prod--; |
| 340 | } |
| 341 | |
Magnus Karlsson | 15d8c91 | 2019-12-19 13:39:30 +0100 | [diff] [blame] | 342 | static inline int xskq_prod_reserve(struct xsk_queue *q) |
| 343 | { |
| 344 | if (xskq_prod_is_full(q)) |
| 345 | return -ENOSPC; |
| 346 | |
| 347 | /* A, matches D */ |
| 348 | q->cached_prod++; |
| 349 | return 0; |
| 350 | } |
| 351 | |
| 352 | static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr) |
| 353 | { |
| 354 | struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; |
| 355 | |
| 356 | if (xskq_prod_is_full(q)) |
| 357 | return -ENOSPC; |
| 358 | |
| 359 | /* A, matches D */ |
| 360 | ring->desc[q->cached_prod++ & q->ring_mask] = addr; |
| 361 | return 0; |
| 362 | } |
| 363 | |
Magnus Karlsson | 9349eb3 | 2020-11-16 12:12:46 +0100 | [diff] [blame] | 364 | static inline u32 xskq_prod_reserve_addr_batch(struct xsk_queue *q, struct xdp_desc *descs, |
| 365 | u32 max) |
| 366 | { |
| 367 | struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; |
| 368 | u32 nb_entries, i, cached_prod; |
| 369 | |
| 370 | nb_entries = xskq_prod_nb_free(q, max); |
| 371 | |
| 372 | /* A, matches D */ |
| 373 | cached_prod = q->cached_prod; |
| 374 | for (i = 0; i < nb_entries; i++) |
| 375 | ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr; |
| 376 | q->cached_prod = cached_prod; |
| 377 | |
| 378 | return nb_entries; |
| 379 | } |
| 380 | |
Magnus Karlsson | 59e35e5 | 2019-12-19 13:39:23 +0100 | [diff] [blame] | 381 | static inline int xskq_prod_reserve_desc(struct xsk_queue *q, |
| 382 | u64 addr, u32 len) |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 383 | { |
| 384 | struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; |
Magnus Karlsson | 59e35e5 | 2019-12-19 13:39:23 +0100 | [diff] [blame] | 385 | u32 idx; |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 386 | |
Magnus Karlsson | df0ae6f | 2019-12-19 13:39:25 +0100 | [diff] [blame] | 387 | if (xskq_prod_is_full(q)) |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 388 | return -ENOSPC; |
| 389 | |
Magnus Karlsson | f63666de | 2019-04-16 14:58:08 +0200 | [diff] [blame] | 390 | /* A, matches D */ |
Magnus Karlsson | d7012f0 | 2019-12-19 13:39:22 +0100 | [diff] [blame] | 391 | idx = q->cached_prod++ & q->ring_mask; |
Björn Töpel | bbff2f3 | 2018-06-04 13:57:13 +0200 | [diff] [blame] | 392 | ring->desc[idx].addr = addr; |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 393 | ring->desc[idx].len = len; |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 394 | |
| 395 | return 0; |
| 396 | } |
| 397 | |
Magnus Karlsson | 15d8c91 | 2019-12-19 13:39:30 +0100 | [diff] [blame] | 398 | static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx) |
Magnus Karlsson | 35fcde7 | 2018-05-02 13:01:34 +0200 | [diff] [blame] | 399 | { |
Magnus Karlsson | 15d8c91 | 2019-12-19 13:39:30 +0100 | [diff] [blame] | 400 | smp_wmb(); /* B, matches C */ |
| 401 | |
| 402 | WRITE_ONCE(q->ring->producer, idx); |
| 403 | } |
| 404 | |
| 405 | static inline void xskq_prod_submit(struct xsk_queue *q) |
| 406 | { |
| 407 | __xskq_prod_submit(q, q->cached_prod); |
| 408 | } |
| 409 | |
| 410 | static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr) |
| 411 | { |
| 412 | struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; |
| 413 | u32 idx = q->ring->producer; |
| 414 | |
| 415 | ring->desc[idx++ & q->ring_mask] = addr; |
| 416 | |
| 417 | __xskq_prod_submit(q, idx); |
| 418 | } |
| 419 | |
| 420 | static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries) |
| 421 | { |
| 422 | __xskq_prod_submit(q, q->ring->producer + nb_entries); |
Magnus Karlsson | 35fcde7 | 2018-05-02 13:01:34 +0200 | [diff] [blame] | 423 | } |
| 424 | |
Magnus Karlsson | 59e35e5 | 2019-12-19 13:39:23 +0100 | [diff] [blame] | 425 | static inline bool xskq_prod_is_empty(struct xsk_queue *q) |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 426 | { |
Magnus Karlsson | 11cc2d2 | 2019-12-19 13:39:21 +0100 | [diff] [blame] | 427 | /* No barriers needed since data is not accessed */ |
| 428 | return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer); |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 429 | } |
| 430 | |
Magnus Karlsson | 15d8c91 | 2019-12-19 13:39:30 +0100 | [diff] [blame] | 431 | /* For both producers and consumers */ |
| 432 | |
| 433 | static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) |
| 434 | { |
| 435 | return q ? q->invalid_descs : 0; |
| 436 | } |
| 437 | |
Ciara Loftus | 8aa5a33 | 2020-07-08 07:28:33 +0000 | [diff] [blame] | 438 | static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q) |
| 439 | { |
| 440 | return q ? q->queue_empty_descs : 0; |
| 441 | } |
| 442 | |
Björn Töpel | b9b6b68 | 2018-05-02 13:01:25 +0200 | [diff] [blame] | 443 | struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 444 | void xskq_destroy(struct xsk_queue *q_ops); |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 445 | |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 446 | #endif /* _LINUX_XSK_QUEUE_H */ |