Björn Töpel | dac09149 | 2018-05-18 14:00:21 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* XDP user-space ring structure |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 3 | * Copyright(c) 2018 Intel Corporation. |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #ifndef _LINUX_XSK_QUEUE_H |
| 7 | #define _LINUX_XSK_QUEUE_H |
| 8 | |
| 9 | #include <linux/types.h> |
| 10 | #include <linux/if_xdp.h> |
| 11 | |
| 12 | #include "xdp_umem_props.h" |
| 13 | |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 14 | #define RX_BATCH_SIZE 16 |
| 15 | |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 16 | struct xsk_queue { |
| 17 | struct xdp_umem_props umem_props; |
| 18 | u32 ring_mask; |
| 19 | u32 nentries; |
| 20 | u32 prod_head; |
| 21 | u32 prod_tail; |
| 22 | u32 cons_head; |
| 23 | u32 cons_tail; |
| 24 | struct xdp_ring *ring; |
| 25 | u64 invalid_descs; |
| 26 | }; |
| 27 | |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 28 | /* Common functions operating for both RXTX and umem queues */ |
| 29 | |
Magnus Karlsson | af75d9e | 2018-05-02 13:01:35 +0200 | [diff] [blame] | 30 | static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) |
| 31 | { |
| 32 | return q ? q->invalid_descs : 0; |
| 33 | } |
| 34 | |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 35 | static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt) |
| 36 | { |
| 37 | u32 entries = q->prod_tail - q->cons_tail; |
| 38 | |
| 39 | if (entries == 0) { |
| 40 | /* Refresh the local pointer */ |
| 41 | q->prod_tail = READ_ONCE(q->ring->producer); |
| 42 | entries = q->prod_tail - q->cons_tail; |
| 43 | } |
| 44 | |
| 45 | return (entries > dcnt) ? dcnt : entries; |
| 46 | } |
| 47 | |
| 48 | static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt) |
| 49 | { |
| 50 | u32 free_entries = q->nentries - (producer - q->cons_tail); |
| 51 | |
| 52 | if (free_entries >= dcnt) |
| 53 | return free_entries; |
| 54 | |
| 55 | /* Refresh the local tail pointer */ |
| 56 | q->cons_tail = READ_ONCE(q->ring->consumer); |
| 57 | return q->nentries - (producer - q->cons_tail); |
| 58 | } |
| 59 | |
| 60 | /* UMEM queue */ |
| 61 | |
| 62 | static inline bool xskq_is_valid_id(struct xsk_queue *q, u32 idx) |
| 63 | { |
| 64 | if (unlikely(idx >= q->umem_props.nframes)) { |
| 65 | q->invalid_descs++; |
| 66 | return false; |
| 67 | } |
| 68 | return true; |
| 69 | } |
| 70 | |
| 71 | static inline u32 *xskq_validate_id(struct xsk_queue *q) |
| 72 | { |
| 73 | while (q->cons_tail != q->cons_head) { |
| 74 | struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; |
| 75 | unsigned int idx = q->cons_tail & q->ring_mask; |
| 76 | |
| 77 | if (xskq_is_valid_id(q, ring->desc[idx])) |
| 78 | return &ring->desc[idx]; |
| 79 | |
| 80 | q->cons_tail++; |
| 81 | } |
| 82 | |
| 83 | return NULL; |
| 84 | } |
| 85 | |
| 86 | static inline u32 *xskq_peek_id(struct xsk_queue *q) |
| 87 | { |
| 88 | struct xdp_umem_ring *ring; |
| 89 | |
| 90 | if (q->cons_tail == q->cons_head) { |
| 91 | WRITE_ONCE(q->ring->consumer, q->cons_tail); |
| 92 | q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE); |
| 93 | |
| 94 | /* Order consumer and data */ |
| 95 | smp_rmb(); |
| 96 | |
| 97 | return xskq_validate_id(q); |
| 98 | } |
| 99 | |
| 100 | ring = (struct xdp_umem_ring *)q->ring; |
| 101 | return &ring->desc[q->cons_tail & q->ring_mask]; |
| 102 | } |
| 103 | |
| 104 | static inline void xskq_discard_id(struct xsk_queue *q) |
| 105 | { |
| 106 | q->cons_tail++; |
| 107 | (void)xskq_validate_id(q); |
| 108 | } |
| 109 | |
Magnus Karlsson | 35fcde7 | 2018-05-02 13:01:34 +0200 | [diff] [blame] | 110 | static inline int xskq_produce_id(struct xsk_queue *q, u32 id) |
| 111 | { |
| 112 | struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; |
| 113 | |
| 114 | ring->desc[q->prod_tail++ & q->ring_mask] = id; |
| 115 | |
| 116 | /* Order producer and data */ |
| 117 | smp_wmb(); |
| 118 | |
| 119 | WRITE_ONCE(q->ring->producer, q->prod_tail); |
| 120 | return 0; |
| 121 | } |
| 122 | |
| 123 | static inline int xskq_reserve_id(struct xsk_queue *q) |
| 124 | { |
| 125 | if (xskq_nb_free(q, q->prod_head, 1) == 0) |
| 126 | return -ENOSPC; |
| 127 | |
| 128 | q->prod_head++; |
| 129 | return 0; |
| 130 | } |
| 131 | |
| 132 | /* Rx/Tx queue */ |
| 133 | |
| 134 | static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d) |
| 135 | { |
| 136 | u32 buff_len; |
| 137 | |
| 138 | if (unlikely(d->idx >= q->umem_props.nframes)) { |
| 139 | q->invalid_descs++; |
| 140 | return false; |
| 141 | } |
| 142 | |
| 143 | buff_len = q->umem_props.frame_size; |
| 144 | if (unlikely(d->len > buff_len || d->len == 0 || |
| 145 | d->offset > buff_len || d->offset + d->len > buff_len)) { |
| 146 | q->invalid_descs++; |
| 147 | return false; |
| 148 | } |
| 149 | |
| 150 | return true; |
| 151 | } |
| 152 | |
| 153 | static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q, |
| 154 | struct xdp_desc *desc) |
| 155 | { |
| 156 | while (q->cons_tail != q->cons_head) { |
| 157 | struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; |
| 158 | unsigned int idx = q->cons_tail & q->ring_mask; |
| 159 | |
| 160 | if (xskq_is_valid_desc(q, &ring->desc[idx])) { |
| 161 | if (desc) |
| 162 | *desc = ring->desc[idx]; |
| 163 | return desc; |
| 164 | } |
| 165 | |
| 166 | q->cons_tail++; |
| 167 | } |
| 168 | |
| 169 | return NULL; |
| 170 | } |
| 171 | |
| 172 | static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q, |
| 173 | struct xdp_desc *desc) |
| 174 | { |
| 175 | struct xdp_rxtx_ring *ring; |
| 176 | |
| 177 | if (q->cons_tail == q->cons_head) { |
| 178 | WRITE_ONCE(q->ring->consumer, q->cons_tail); |
| 179 | q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE); |
| 180 | |
| 181 | /* Order consumer and data */ |
| 182 | smp_rmb(); |
| 183 | |
| 184 | return xskq_validate_desc(q, desc); |
| 185 | } |
| 186 | |
| 187 | ring = (struct xdp_rxtx_ring *)q->ring; |
| 188 | *desc = ring->desc[q->cons_tail & q->ring_mask]; |
| 189 | return desc; |
| 190 | } |
| 191 | |
| 192 | static inline void xskq_discard_desc(struct xsk_queue *q) |
| 193 | { |
| 194 | q->cons_tail++; |
| 195 | (void)xskq_validate_desc(q, NULL); |
| 196 | } |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 197 | |
| 198 | static inline int xskq_produce_batch_desc(struct xsk_queue *q, |
| 199 | u32 id, u32 len, u16 offset) |
| 200 | { |
| 201 | struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; |
| 202 | unsigned int idx; |
| 203 | |
| 204 | if (xskq_nb_free(q, q->prod_head, 1) == 0) |
| 205 | return -ENOSPC; |
| 206 | |
| 207 | idx = (q->prod_head++) & q->ring_mask; |
| 208 | ring->desc[idx].idx = id; |
| 209 | ring->desc[idx].len = len; |
| 210 | ring->desc[idx].offset = offset; |
| 211 | |
| 212 | return 0; |
| 213 | } |
| 214 | |
| 215 | static inline void xskq_produce_flush_desc(struct xsk_queue *q) |
| 216 | { |
| 217 | /* Order producer and data */ |
| 218 | smp_wmb(); |
| 219 | |
| 220 | q->prod_tail = q->prod_head, |
| 221 | WRITE_ONCE(q->ring->producer, q->prod_tail); |
| 222 | } |
| 223 | |
Magnus Karlsson | 35fcde7 | 2018-05-02 13:01:34 +0200 | [diff] [blame] | 224 | static inline bool xskq_full_desc(struct xsk_queue *q) |
| 225 | { |
Björn Töpel | da60cf0 | 2018-05-18 14:00:23 +0200 | [diff] [blame^] | 226 | return xskq_nb_avail(q, q->nentries) == q->nentries; |
Magnus Karlsson | 35fcde7 | 2018-05-02 13:01:34 +0200 | [diff] [blame] | 227 | } |
| 228 | |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 229 | static inline bool xskq_empty_desc(struct xsk_queue *q) |
| 230 | { |
Björn Töpel | da60cf0 | 2018-05-18 14:00:23 +0200 | [diff] [blame^] | 231 | return xskq_nb_free(q, q->prod_tail, 1) == q->nentries; |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 232 | } |
| 233 | |
Magnus Karlsson | 965a9909 | 2018-05-02 13:01:26 +0200 | [diff] [blame] | 234 | void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props); |
Björn Töpel | b9b6b68 | 2018-05-02 13:01:25 +0200 | [diff] [blame] | 235 | struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 236 | void xskq_destroy(struct xsk_queue *q_ops); |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 237 | |
| 238 | #endif /* _LINUX_XSK_QUEUE_H */ |