blob: de4e3a353df344ffd42f88560b2dd4a8722b16ce [file] [log] [blame]
Björn Töpeldac091492018-05-18 14:00:21 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/* AF_XDP internal functions
Björn Töpelc0c77d82018-05-02 13:01:23 +02003 * Copyright(c) 2018 Intel Corporation.
Björn Töpelc0c77d82018-05-02 13:01:23 +02004 */
5
6#ifndef _LINUX_XDP_SOCK_H
7#define _LINUX_XDP_SOCK_H
8
Björn Töpele61e62b92018-06-04 14:05:51 +02009#include <linux/workqueue.h>
10#include <linux/if_xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020011#include <linux/mutex.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020012#include <linux/spinlock.h>
Björn Töpele61e62b92018-06-04 14:05:51 +020013#include <linux/mm.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020014#include <net/sock.h>
15
Björn Töpelb9b6b682018-05-02 13:01:25 +020016struct net_device;
17struct xsk_queue;
Björn Töpele61e62b92018-06-04 14:05:51 +020018
Björn Töpel8aef7342018-06-04 14:05:52 +020019struct xdp_umem_page {
20 void *addr;
Björn Töpel173d3ad2018-06-04 14:05:55 +020021 dma_addr_t dma;
Björn Töpel8aef7342018-06-04 14:05:52 +020022};
23
Jakub Kicinskif5bd9132018-09-07 10:18:46 +020024struct xdp_umem_fq_reuse {
25 u32 nentries;
26 u32 length;
27 u64 handles[];
28};
29
Björn Töpele61e62b92018-06-04 14:05:51 +020030struct xdp_umem {
31 struct xsk_queue *fq;
32 struct xsk_queue *cq;
Björn Töpel8aef7342018-06-04 14:05:52 +020033 struct xdp_umem_page *pages;
Magnus Karlsson93ee30f2018-08-31 13:40:02 +020034 u64 chunk_mask;
35 u64 size;
Björn Töpele61e62b92018-06-04 14:05:51 +020036 u32 headroom;
37 u32 chunk_size_nohr;
38 struct user_struct *user;
Björn Töpele61e62b92018-06-04 14:05:51 +020039 unsigned long address;
40 refcount_t users;
41 struct work_struct work;
Björn Töpel8aef7342018-06-04 14:05:52 +020042 struct page **pgs;
Björn Töpele61e62b92018-06-04 14:05:51 +020043 u32 npgs;
Björn Töpel50e74c02019-01-24 19:59:38 +010044 int id;
Björn Töpel173d3ad2018-06-04 14:05:55 +020045 struct net_device *dev;
Jakub Kicinskif5bd9132018-09-07 10:18:46 +020046 struct xdp_umem_fq_reuse *fq_reuse;
Björn Töpel173d3ad2018-06-04 14:05:55 +020047 u16 queue_id;
48 bool zc;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020049 spinlock_t xsk_list_lock;
50 struct list_head xsk_list;
Björn Töpele61e62b92018-06-04 14:05:51 +020051};
Björn Töpelc0c77d82018-05-02 13:01:23 +020052
53struct xdp_sock {
54 /* struct sock must be the first member of struct xdp_sock */
55 struct sock sk;
Björn Töpelb9b6b682018-05-02 13:01:25 +020056 struct xsk_queue *rx;
57 struct net_device *dev;
Björn Töpelc0c77d82018-05-02 13:01:23 +020058 struct xdp_umem *umem;
Björn Töpelfbfc504a2018-05-02 13:01:28 +020059 struct list_head flush_node;
Magnus Karlsson965a9902018-05-02 13:01:26 +020060 u16 queue_id;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020061 bool zc;
Björn Töpelc0c77d82018-05-02 13:01:23 +020062 /* Protects multiple processes in the control path */
63 struct mutex mutex;
Jonathan Lemonfada7fd2019-06-06 13:59:40 -070064 struct xsk_queue *tx ____cacheline_aligned_in_smp;
65 struct list_head list;
Magnus Karlssona9744f72018-06-29 09:48:20 +020066 /* Mutual exclusion of NAPI TX thread and sendmsg error paths
67 * in the SKB destructor callback.
68 */
69 spinlock_t tx_completion_lock;
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +030070 /* Protects generic receive. */
71 spinlock_t rx_lock;
Björn Töpelc4971762018-05-02 13:01:27 +020072 u64 rx_dropped;
Björn Töpelc0c77d82018-05-02 13:01:23 +020073};
74
Björn Töpelc4971762018-05-02 13:01:27 +020075struct xdp_buff;
76#ifdef CONFIG_XDP_SOCKETS
77int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
78int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
79void xsk_flush(struct xdp_sock *xs);
Björn Töpelfbfc504a2018-05-02 13:01:28 +020080bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020081/* Used from netdev driver */
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +030082bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
Björn Töpel173d3ad2018-06-04 14:05:55 +020083u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
84void xsk_umem_discard_addr(struct xdp_umem *umem);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020085void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
Maxim Mikityanskiy4bce4e52019-06-26 17:35:28 +030086bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020087void xsk_umem_consume_tx_done(struct xdp_umem *umem);
Jakub Kicinskif5bd9132018-09-07 10:18:46 +020088struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
89struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
90 struct xdp_umem_fq_reuse *newq);
91void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
Jakub Kicinski1661d342018-10-01 14:51:36 +020092struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
Björn Töpel90254032018-08-28 14:44:27 +020093
94static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
95{
96 return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1));
97}
98
99static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
100{
101 return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
102}
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200103
104/* Reuse-queue aware version of FILL queue helpers */
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +0300105static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
106{
107 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
108
109 if (rq->length >= cnt)
110 return true;
111
112 return xsk_umem_has_addrs(umem, cnt - rq->length);
113}
114
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200115static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
116{
117 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
118
119 if (!rq->length)
120 return xsk_umem_peek_addr(umem, addr);
121
122 *addr = rq->handles[rq->length - 1];
123 return addr;
124}
125
126static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
127{
128 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
129
130 if (!rq->length)
131 xsk_umem_discard_addr(umem);
132 else
133 rq->length--;
134}
135
136static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
137{
138 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
139
140 rq->handles[rq->length++] = addr;
141}
Björn Töpelc4971762018-05-02 13:01:27 +0200142#else
143static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
144{
145 return -ENOTSUPP;
146}
147
148static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
149{
150 return -ENOTSUPP;
151}
152
153static inline void xsk_flush(struct xdp_sock *xs)
154{
155}
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200156
157static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
158{
159 return false;
160}
Björn Töpel90254032018-08-28 14:44:27 +0200161
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +0300162static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
163{
164 return false;
165}
166
Björn Töpel90254032018-08-28 14:44:27 +0200167static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
168{
169 return NULL;
170}
171
172static inline void xsk_umem_discard_addr(struct xdp_umem *umem)
173{
174}
175
176static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
177{
178}
179
Maxim Mikityanskiy4bce4e52019-06-26 17:35:28 +0300180static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
181 struct xdp_desc *desc)
Björn Töpel90254032018-08-28 14:44:27 +0200182{
183 return false;
184}
185
186static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
187{
188}
189
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200190static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
191{
192 return NULL;
193}
194
195static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
196 struct xdp_umem *umem,
197 struct xdp_umem_fq_reuse *newq)
198{
199 return NULL;
200}
201static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
202{
203}
204
Jakub Kicinski1661d342018-10-01 14:51:36 +0200205static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
206 u16 queue_id)
207{
208 return NULL;
209}
210
Björn Töpel90254032018-08-28 14:44:27 +0200211static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
212{
213 return NULL;
214}
215
216static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
217{
218 return 0;
219}
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200220
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +0300221static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
222{
223 return false;
224}
225
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200226static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
227{
228 return NULL;
229}
230
231static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
232{
233}
234
235static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
236{
237}
238
Björn Töpelc4971762018-05-02 13:01:27 +0200239#endif /* CONFIG_XDP_SOCKETS */
240
Björn Töpelc0c77d82018-05-02 13:01:23 +0200241#endif /* _LINUX_XDP_SOCK_H */