blob: e3780e4b74e1037d0047a1f3ac0ca449eb448eff [file] [log] [blame]
Björn Töpeldac091492018-05-18 14:00:21 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/* AF_XDP internal functions
Björn Töpelc0c77d82018-05-02 13:01:23 +02003 * Copyright(c) 2018 Intel Corporation.
Björn Töpelc0c77d82018-05-02 13:01:23 +02004 */
5
6#ifndef _LINUX_XDP_SOCK_H
7#define _LINUX_XDP_SOCK_H
8
Björn Töpele61e62b92018-06-04 14:05:51 +02009#include <linux/workqueue.h>
10#include <linux/if_xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020011#include <linux/mutex.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020012#include <linux/spinlock.h>
Björn Töpele61e62b92018-06-04 14:05:51 +020013#include <linux/mm.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020014#include <net/sock.h>
15
Björn Töpelb9b6b682018-05-02 13:01:25 +020016struct net_device;
17struct xsk_queue;
Björn Töpele61e62b92018-06-04 14:05:51 +020018
Kevin Laatzc05cd362019-08-27 02:25:22 +000019/* Masks for xdp_umem_page flags.
20 * The low 12-bits of the addr will be 0 since this is the page address, so we
21 * can use them for flags.
22 */
23#define XSK_NEXT_PG_CONTIG_SHIFT 0
24#define XSK_NEXT_PG_CONTIG_MASK (1ULL << XSK_NEXT_PG_CONTIG_SHIFT)
25
Björn Töpel8aef7342018-06-04 14:05:52 +020026struct xdp_umem_page {
27 void *addr;
Björn Töpel173d3ad2018-06-04 14:05:55 +020028 dma_addr_t dma;
Björn Töpel8aef7342018-06-04 14:05:52 +020029};
30
Jakub Kicinskif5bd9132018-09-07 10:18:46 +020031struct xdp_umem_fq_reuse {
32 u32 nentries;
33 u32 length;
34 u64 handles[];
35};
36
Kevin Laatzc05cd362019-08-27 02:25:22 +000037/* Flags for the umem flags field.
38 *
39 * The NEED_WAKEUP flag is 1 due to the reuse of the flags field for public
40 * flags. See inlude/uapi/include/linux/if_xdp.h.
41 */
42#define XDP_UMEM_USES_NEED_WAKEUP (1 << 1)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020043
Björn Töpele61e62b92018-06-04 14:05:51 +020044struct xdp_umem {
45 struct xsk_queue *fq;
46 struct xsk_queue *cq;
Björn Töpel8aef7342018-06-04 14:05:52 +020047 struct xdp_umem_page *pages;
Magnus Karlsson93ee30f2018-08-31 13:40:02 +020048 u64 chunk_mask;
49 u64 size;
Björn Töpele61e62b92018-06-04 14:05:51 +020050 u32 headroom;
51 u32 chunk_size_nohr;
52 struct user_struct *user;
Björn Töpele61e62b92018-06-04 14:05:51 +020053 unsigned long address;
54 refcount_t users;
55 struct work_struct work;
Björn Töpel8aef7342018-06-04 14:05:52 +020056 struct page **pgs;
Björn Töpele61e62b92018-06-04 14:05:51 +020057 u32 npgs;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020058 u16 queue_id;
59 u8 need_wakeup;
60 u8 flags;
Björn Töpel50e74c02019-01-24 19:59:38 +010061 int id;
Björn Töpel173d3ad2018-06-04 14:05:55 +020062 struct net_device *dev;
Jakub Kicinskif5bd9132018-09-07 10:18:46 +020063 struct xdp_umem_fq_reuse *fq_reuse;
Björn Töpel173d3ad2018-06-04 14:05:55 +020064 bool zc;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020065 spinlock_t xsk_list_lock;
66 struct list_head xsk_list;
Björn Töpele61e62b92018-06-04 14:05:51 +020067};
Björn Töpelc0c77d82018-05-02 13:01:23 +020068
Björn Töpel0402acd2019-08-15 11:30:13 +020069/* Nodes are linked in the struct xdp_sock map_list field, and used to
70 * track which maps a certain socket reside in.
71 */
Björn Töpeld8179912019-11-01 12:03:46 +010072
73struct xsk_map {
74 struct bpf_map map;
75 struct list_head __percpu *flush_list;
76 spinlock_t lock; /* Synchronize map updates */
77 struct xdp_sock *xsk_map[];
78};
79
Björn Töpel0402acd2019-08-15 11:30:13 +020080struct xsk_map_node {
81 struct list_head node;
82 struct xsk_map *map;
83 struct xdp_sock **map_entry;
84};
85
Björn Töpelc0c77d82018-05-02 13:01:23 +020086struct xdp_sock {
87 /* struct sock must be the first member of struct xdp_sock */
88 struct sock sk;
Björn Töpelb9b6b682018-05-02 13:01:25 +020089 struct xsk_queue *rx;
90 struct net_device *dev;
Björn Töpelc0c77d82018-05-02 13:01:23 +020091 struct xdp_umem *umem;
Björn Töpelfbfc504a2018-05-02 13:01:28 +020092 struct list_head flush_node;
Magnus Karlsson965a9902018-05-02 13:01:26 +020093 u16 queue_id;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020094 bool zc;
Ilya Maximets455302d2019-06-28 11:04:07 +030095 enum {
96 XSK_READY = 0,
97 XSK_BOUND,
98 XSK_UNBOUND,
99 } state;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200100 /* Protects multiple processes in the control path */
101 struct mutex mutex;
Jonathan Lemonfada7fd2019-06-06 13:59:40 -0700102 struct xsk_queue *tx ____cacheline_aligned_in_smp;
103 struct list_head list;
Magnus Karlssona9744f72018-06-29 09:48:20 +0200104 /* Mutual exclusion of NAPI TX thread and sendmsg error paths
105 * in the SKB destructor callback.
106 */
107 spinlock_t tx_completion_lock;
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300108 /* Protects generic receive. */
109 spinlock_t rx_lock;
Björn Töpelc4971762018-05-02 13:01:27 +0200110 u64 rx_dropped;
Björn Töpel0402acd2019-08-15 11:30:13 +0200111 struct list_head map_list;
112 /* Protects map_list */
113 spinlock_t map_list_lock;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200114};
115
Björn Töpelc4971762018-05-02 13:01:27 +0200116struct xdp_buff;
117#ifdef CONFIG_XDP_SOCKETS
118int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200119bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200120/* Used from netdev driver */
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +0300121bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200122u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
123void xsk_umem_discard_addr(struct xdp_umem *umem);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200124void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
Maxim Mikityanskiy4bce4e52019-06-26 17:35:28 +0300125bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200126void xsk_umem_consume_tx_done(struct xdp_umem *umem);
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200127struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
128struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
129 struct xdp_umem_fq_reuse *newq);
130void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
Jakub Kicinski1661d342018-10-01 14:51:36 +0200131struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200132void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
133void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
134void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
135void xsk_clear_tx_need_wakeup(struct xdp_umem *umem);
136bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem);
Björn Töpel90254032018-08-28 14:44:27 +0200137
Björn Töpel0402acd2019-08-15 11:30:13 +0200138void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
139 struct xdp_sock **map_entry);
140int xsk_map_inc(struct xsk_map *map);
141void xsk_map_put(struct xsk_map *map);
Björn Töpeld8179912019-11-01 12:03:46 +0100142int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
143 struct xdp_sock *xs);
144void __xsk_map_flush(struct bpf_map *map);
145
146static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
147 u32 key)
148{
149 struct xsk_map *m = container_of(map, struct xsk_map, map);
150 struct xdp_sock *xs;
151
152 if (key >= map->max_entries)
153 return NULL;
154
155 xs = READ_ONCE(m->xsk_map[key]);
156 return xs;
157}
Björn Töpel0402acd2019-08-15 11:30:13 +0200158
Kevin Laatzc05cd362019-08-27 02:25:22 +0000159static inline u64 xsk_umem_extract_addr(u64 addr)
160{
161 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
162}
163
164static inline u64 xsk_umem_extract_offset(u64 addr)
165{
166 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
167}
168
169static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
170{
171 return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr);
172}
173
Björn Töpel90254032018-08-28 14:44:27 +0200174static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
175{
Kevin Laatzc05cd362019-08-27 02:25:22 +0000176 unsigned long page_addr;
177
178 addr = xsk_umem_add_offset_to_addr(addr);
179 page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr;
180
181 return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK);
Björn Töpel90254032018-08-28 14:44:27 +0200182}
183
184static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
185{
Kevin Laatzc05cd362019-08-27 02:25:22 +0000186 addr = xsk_umem_add_offset_to_addr(addr);
187
188 return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK);
Björn Töpel90254032018-08-28 14:44:27 +0200189}
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200190
191/* Reuse-queue aware version of FILL queue helpers */
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +0300192static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
193{
194 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
195
196 if (rq->length >= cnt)
197 return true;
198
199 return xsk_umem_has_addrs(umem, cnt - rq->length);
200}
201
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200202static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
203{
204 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
205
206 if (!rq->length)
207 return xsk_umem_peek_addr(umem, addr);
208
209 *addr = rq->handles[rq->length - 1];
210 return addr;
211}
212
213static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
214{
215 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
216
217 if (!rq->length)
218 xsk_umem_discard_addr(umem);
219 else
220 rq->length--;
221}
222
223static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
224{
225 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
226
227 rq->handles[rq->length++] = addr;
228}
Kevin Laatzc05cd362019-08-27 02:25:22 +0000229
230/* Handle the offset appropriately depending on aligned or unaligned mode.
231 * For unaligned mode, we store the offset in the upper 16-bits of the address.
232 * For aligned mode, we simply add the offset to the address.
233 */
234static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address,
235 u64 offset)
236{
237 if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG)
238 return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
239 else
240 return address + offset;
241}
Björn Töpelc4971762018-05-02 13:01:27 +0200242#else
243static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
244{
245 return -ENOTSUPP;
246}
247
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200248static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
249{
250 return false;
251}
Björn Töpel90254032018-08-28 14:44:27 +0200252
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +0300253static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
254{
255 return false;
256}
257
Björn Töpel90254032018-08-28 14:44:27 +0200258static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
259{
260 return NULL;
261}
262
263static inline void xsk_umem_discard_addr(struct xdp_umem *umem)
264{
265}
266
267static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
268{
269}
270
Maxim Mikityanskiy4bce4e52019-06-26 17:35:28 +0300271static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
272 struct xdp_desc *desc)
Björn Töpel90254032018-08-28 14:44:27 +0200273{
274 return false;
275}
276
277static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
278{
279}
280
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200281static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
282{
283 return NULL;
284}
285
286static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
287 struct xdp_umem *umem,
288 struct xdp_umem_fq_reuse *newq)
289{
290 return NULL;
291}
292static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
293{
294}
295
Jakub Kicinski1661d342018-10-01 14:51:36 +0200296static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
297 u16 queue_id)
298{
299 return NULL;
300}
301
Kevin Laatzc05cd362019-08-27 02:25:22 +0000302static inline u64 xsk_umem_extract_addr(u64 addr)
303{
304 return 0;
305}
306
307static inline u64 xsk_umem_extract_offset(u64 addr)
308{
309 return 0;
310}
311
312static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
313{
314 return 0;
315}
316
Björn Töpel90254032018-08-28 14:44:27 +0200317static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
318{
319 return NULL;
320}
321
322static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
323{
324 return 0;
325}
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200326
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +0300327static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
328{
329 return false;
330}
331
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200332static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
333{
334 return NULL;
335}
336
337static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
338{
339}
340
341static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
342{
343}
344
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200345static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
346{
347}
348
349static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
350{
351}
352
353static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
354{
355}
356
357static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
358{
359}
360
361static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
362{
363 return false;
364}
365
Kevin Laatzc05cd362019-08-27 02:25:22 +0000366static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
367 u64 offset)
368{
369 return 0;
370}
371
Björn Töpeld8179912019-11-01 12:03:46 +0100372static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
373 struct xdp_sock *xs)
374{
375 return -EOPNOTSUPP;
376}
377
378static inline void __xsk_map_flush(struct bpf_map *map)
379{
380}
381
382static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
383 u32 key)
384{
385 return NULL;
386}
Björn Töpelc4971762018-05-02 13:01:27 +0200387#endif /* CONFIG_XDP_SOCKETS */
388
Björn Töpelc0c77d82018-05-02 13:01:23 +0200389#endif /* _LINUX_XDP_SOCK_H */