blob: b72f1f4c3b15cb60ad891cd165369b2e80bdd5db [file] [log] [blame]
Björn Töpeldac091492018-05-18 14:00:21 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/* AF_XDP internal functions
Björn Töpelc0c77d82018-05-02 13:01:23 +02003 * Copyright(c) 2018 Intel Corporation.
Björn Töpelc0c77d82018-05-02 13:01:23 +02004 */
5
6#ifndef _LINUX_XDP_SOCK_H
7#define _LINUX_XDP_SOCK_H
8
Björn Töpele61e62b92018-06-04 14:05:51 +02009#include <linux/workqueue.h>
10#include <linux/if_xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020011#include <linux/mutex.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020012#include <linux/spinlock.h>
Björn Töpele61e62b92018-06-04 14:05:51 +020013#include <linux/mm.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020014#include <net/sock.h>
15
Björn Töpelb9b6b682018-05-02 13:01:25 +020016struct net_device;
17struct xsk_queue;
Björn Töpele61e62b92018-06-04 14:05:51 +020018
Kevin Laatzc05cd362019-08-27 02:25:22 +000019/* Masks for xdp_umem_page flags.
20 * The low 12-bits of the addr will be 0 since this is the page address, so we
21 * can use them for flags.
22 */
23#define XSK_NEXT_PG_CONTIG_SHIFT 0
24#define XSK_NEXT_PG_CONTIG_MASK (1ULL << XSK_NEXT_PG_CONTIG_SHIFT)
25
Björn Töpel8aef7342018-06-04 14:05:52 +020026struct xdp_umem_page {
27 void *addr;
Björn Töpel173d3ad2018-06-04 14:05:55 +020028 dma_addr_t dma;
Björn Töpel8aef7342018-06-04 14:05:52 +020029};
30
Jakub Kicinskif5bd9132018-09-07 10:18:46 +020031struct xdp_umem_fq_reuse {
32 u32 nentries;
33 u32 length;
34 u64 handles[];
35};
36
Kevin Laatzc05cd362019-08-27 02:25:22 +000037/* Flags for the umem flags field.
38 *
39 * The NEED_WAKEUP flag is 1 due to the reuse of the flags field for public
40 * flags. See inlude/uapi/include/linux/if_xdp.h.
41 */
42#define XDP_UMEM_USES_NEED_WAKEUP (1 << 1)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020043
Björn Töpele61e62b92018-06-04 14:05:51 +020044struct xdp_umem {
45 struct xsk_queue *fq;
46 struct xsk_queue *cq;
Björn Töpel8aef7342018-06-04 14:05:52 +020047 struct xdp_umem_page *pages;
Magnus Karlsson93ee30f2018-08-31 13:40:02 +020048 u64 chunk_mask;
49 u64 size;
Björn Töpele61e62b92018-06-04 14:05:51 +020050 u32 headroom;
51 u32 chunk_size_nohr;
52 struct user_struct *user;
Björn Töpele61e62b92018-06-04 14:05:51 +020053 unsigned long address;
54 refcount_t users;
55 struct work_struct work;
Björn Töpel8aef7342018-06-04 14:05:52 +020056 struct page **pgs;
Björn Töpele61e62b92018-06-04 14:05:51 +020057 u32 npgs;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020058 u16 queue_id;
59 u8 need_wakeup;
60 u8 flags;
Björn Töpel50e74c02019-01-24 19:59:38 +010061 int id;
Björn Töpel173d3ad2018-06-04 14:05:55 +020062 struct net_device *dev;
Jakub Kicinskif5bd9132018-09-07 10:18:46 +020063 struct xdp_umem_fq_reuse *fq_reuse;
Björn Töpel173d3ad2018-06-04 14:05:55 +020064 bool zc;
Magnus Karlssone4e5aef2020-05-04 15:33:51 +020065 spinlock_t xsk_tx_list_lock;
66 struct list_head xsk_tx_list;
Björn Töpele61e62b92018-06-04 14:05:51 +020067};
Björn Töpelc0c77d82018-05-02 13:01:23 +020068
Björn Töpel0402acd2019-08-15 11:30:13 +020069/* Nodes are linked in the struct xdp_sock map_list field, and used to
70 * track which maps a certain socket reside in.
71 */
Björn Töpeld8179912019-11-01 12:03:46 +010072
73struct xsk_map {
74 struct bpf_map map;
Björn Töpeld8179912019-11-01 12:03:46 +010075 spinlock_t lock; /* Synchronize map updates */
76 struct xdp_sock *xsk_map[];
77};
78
Björn Töpel0402acd2019-08-15 11:30:13 +020079struct xsk_map_node {
80 struct list_head node;
81 struct xsk_map *map;
82 struct xdp_sock **map_entry;
83};
84
Björn Töpelc0c77d82018-05-02 13:01:23 +020085struct xdp_sock {
86 /* struct sock must be the first member of struct xdp_sock */
87 struct sock sk;
Björn Töpelb9b6b682018-05-02 13:01:25 +020088 struct xsk_queue *rx;
89 struct net_device *dev;
Björn Töpelc0c77d82018-05-02 13:01:23 +020090 struct xdp_umem *umem;
Björn Töpelfbfc504a2018-05-02 13:01:28 +020091 struct list_head flush_node;
Magnus Karlsson965a9902018-05-02 13:01:26 +020092 u16 queue_id;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020093 bool zc;
Ilya Maximets455302d2019-06-28 11:04:07 +030094 enum {
95 XSK_READY = 0,
96 XSK_BOUND,
97 XSK_UNBOUND,
98 } state;
Björn Töpelc0c77d82018-05-02 13:01:23 +020099 /* Protects multiple processes in the control path */
100 struct mutex mutex;
Jonathan Lemonfada7fd2019-06-06 13:59:40 -0700101 struct xsk_queue *tx ____cacheline_aligned_in_smp;
102 struct list_head list;
Magnus Karlssona9744f72018-06-29 09:48:20 +0200103 /* Mutual exclusion of NAPI TX thread and sendmsg error paths
104 * in the SKB destructor callback.
105 */
106 spinlock_t tx_completion_lock;
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300107 /* Protects generic receive. */
108 spinlock_t rx_lock;
Björn Töpelc4971762018-05-02 13:01:27 +0200109 u64 rx_dropped;
Björn Töpel0402acd2019-08-15 11:30:13 +0200110 struct list_head map_list;
111 /* Protects map_list */
112 spinlock_t map_list_lock;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200113};
114
Björn Töpelc4971762018-05-02 13:01:27 +0200115struct xdp_buff;
116#ifdef CONFIG_XDP_SOCKETS
117int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200118bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200119/* Used from netdev driver */
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +0300120bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
Magnus Karlsson03896ef2019-12-19 13:39:27 +0100121bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
Magnus Karlssonf8509aa2019-12-19 13:39:28 +0100122void xsk_umem_release_addr(struct xdp_umem *umem);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200123void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
Maxim Mikityanskiy4bce4e52019-06-26 17:35:28 +0300124bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200125void xsk_umem_consume_tx_done(struct xdp_umem *umem);
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200126struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
127struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
128 struct xdp_umem_fq_reuse *newq);
129void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
Jakub Kicinski1661d342018-10-01 14:51:36 +0200130struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200131void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
132void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
133void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
134void xsk_clear_tx_need_wakeup(struct xdp_umem *umem);
135bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem);
Björn Töpel90254032018-08-28 14:44:27 +0200136
Björn Töpel0402acd2019-08-15 11:30:13 +0200137void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
138 struct xdp_sock **map_entry);
139int xsk_map_inc(struct xsk_map *map);
140void xsk_map_put(struct xsk_map *map);
Björn Töpele312b9e2019-12-19 07:10:02 +0100141int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
142void __xsk_map_flush(void);
Björn Töpeld8179912019-11-01 12:03:46 +0100143
144static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
145 u32 key)
146{
147 struct xsk_map *m = container_of(map, struct xsk_map, map);
148 struct xdp_sock *xs;
149
150 if (key >= map->max_entries)
151 return NULL;
152
153 xs = READ_ONCE(m->xsk_map[key]);
154 return xs;
155}
Björn Töpel0402acd2019-08-15 11:30:13 +0200156
Kevin Laatzc05cd362019-08-27 02:25:22 +0000157static inline u64 xsk_umem_extract_addr(u64 addr)
158{
159 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
160}
161
162static inline u64 xsk_umem_extract_offset(u64 addr)
163{
164 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
165}
166
167static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
168{
169 return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr);
170}
171
Björn Töpel90254032018-08-28 14:44:27 +0200172static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
173{
Kevin Laatzc05cd362019-08-27 02:25:22 +0000174 unsigned long page_addr;
175
176 addr = xsk_umem_add_offset_to_addr(addr);
177 page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr;
178
179 return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK);
Björn Töpel90254032018-08-28 14:44:27 +0200180}
181
182static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
183{
Kevin Laatzc05cd362019-08-27 02:25:22 +0000184 addr = xsk_umem_add_offset_to_addr(addr);
185
186 return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK);
Björn Töpel90254032018-08-28 14:44:27 +0200187}
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200188
189/* Reuse-queue aware version of FILL queue helpers */
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +0300190static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
191{
192 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
193
194 if (rq->length >= cnt)
195 return true;
196
197 return xsk_umem_has_addrs(umem, cnt - rq->length);
198}
199
Magnus Karlsson03896ef2019-12-19 13:39:27 +0100200static inline bool xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200201{
202 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
203
204 if (!rq->length)
205 return xsk_umem_peek_addr(umem, addr);
206
207 *addr = rq->handles[rq->length - 1];
208 return addr;
209}
210
Magnus Karlssonf8509aa2019-12-19 13:39:28 +0100211static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200212{
213 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
214
215 if (!rq->length)
Magnus Karlssonf8509aa2019-12-19 13:39:28 +0100216 xsk_umem_release_addr(umem);
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200217 else
218 rq->length--;
219}
220
221static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
222{
223 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
224
225 rq->handles[rq->length++] = addr;
226}
Kevin Laatzc05cd362019-08-27 02:25:22 +0000227
228/* Handle the offset appropriately depending on aligned or unaligned mode.
229 * For unaligned mode, we store the offset in the upper 16-bits of the address.
230 * For aligned mode, we simply add the offset to the address.
231 */
232static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address,
233 u64 offset)
234{
235 if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG)
236 return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
237 else
238 return address + offset;
239}
Björn Töpelc4971762018-05-02 13:01:27 +0200240#else
241static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
242{
243 return -ENOTSUPP;
244}
245
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200246static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
247{
248 return false;
249}
Björn Töpel90254032018-08-28 14:44:27 +0200250
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +0300251static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
252{
253 return false;
254}
255
Björn Töpel90254032018-08-28 14:44:27 +0200256static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
257{
258 return NULL;
259}
260
Magnus Karlssonf8509aa2019-12-19 13:39:28 +0100261static inline void xsk_umem_release_addr(struct xdp_umem *umem)
Björn Töpel90254032018-08-28 14:44:27 +0200262{
263}
264
265static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
266{
267}
268
Maxim Mikityanskiy4bce4e52019-06-26 17:35:28 +0300269static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
270 struct xdp_desc *desc)
Björn Töpel90254032018-08-28 14:44:27 +0200271{
272 return false;
273}
274
275static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
276{
277}
278
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200279static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
280{
281 return NULL;
282}
283
284static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
285 struct xdp_umem *umem,
286 struct xdp_umem_fq_reuse *newq)
287{
288 return NULL;
289}
290static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
291{
292}
293
Jakub Kicinski1661d342018-10-01 14:51:36 +0200294static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
295 u16 queue_id)
296{
297 return NULL;
298}
299
Kevin Laatzc05cd362019-08-27 02:25:22 +0000300static inline u64 xsk_umem_extract_addr(u64 addr)
301{
302 return 0;
303}
304
305static inline u64 xsk_umem_extract_offset(u64 addr)
306{
307 return 0;
308}
309
310static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
311{
312 return 0;
313}
314
Björn Töpel90254032018-08-28 14:44:27 +0200315static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
316{
317 return NULL;
318}
319
320static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
321{
322 return 0;
323}
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200324
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +0300325static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
326{
327 return false;
328}
329
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200330static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
331{
332 return NULL;
333}
334
Magnus Karlssonf8509aa2019-12-19 13:39:28 +0100335static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200336{
337}
338
339static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
340{
341}
342
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200343static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
344{
345}
346
347static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
348{
349}
350
351static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
352{
353}
354
355static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
356{
357}
358
359static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
360{
361 return false;
362}
363
Kevin Laatzc05cd362019-08-27 02:25:22 +0000364static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
365 u64 offset)
366{
367 return 0;
368}
369
Björn Töpele312b9e2019-12-19 07:10:02 +0100370static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
Björn Töpeld8179912019-11-01 12:03:46 +0100371{
372 return -EOPNOTSUPP;
373}
374
Björn Töpele312b9e2019-12-19 07:10:02 +0100375static inline void __xsk_map_flush(void)
Björn Töpeld8179912019-11-01 12:03:46 +0100376{
377}
378
379static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
380 u32 key)
381{
382 return NULL;
383}
Björn Töpelc4971762018-05-02 13:01:27 +0200384#endif /* CONFIG_XDP_SOCKETS */
385
Björn Töpelc0c77d82018-05-02 13:01:23 +0200386#endif /* _LINUX_XDP_SOCK_H */