blob: 9f1b906ed0e76f9f91d0bacaead1cb1c8335347e [file] [log] [blame]
Björn Töpelc0c77d82018-05-02 13:01:23 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
Björn Töpelc0c77d82018-05-02 13:01:23 +02008 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14#include <linux/if_xdp.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/sched/task.h>
19#include <linux/socket.h>
20#include <linux/file.h>
21#include <linux/uaccess.h>
22#include <linux/net.h>
23#include <linux/netdevice.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020024#include <linux/rculist.h>
Magnus Karlssona71506a2020-05-20 21:20:51 +020025#include <net/xdp_sock_drv.h>
Björn Töpelb9b6b682018-05-02 13:01:25 +020026#include <net/xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020027
Magnus Karlsson423f3832018-05-02 13:01:24 +020028#include "xsk_queue.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020029#include "xdp_umem.h"
Björn Töpela36b38aa2019-01-24 19:59:39 +010030#include "xsk.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020031
Magnus Karlsson35fcde72018-05-02 13:01:34 +020032#define TX_BATCH_SIZE 16
33
Björn Töpele312b9e2019-12-19 07:10:02 +010034static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
35
Björn Töpelfbfc504a2018-05-02 13:01:28 +020036bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
37{
Björn Töpel173d3ad2018-06-04 14:05:55 +020038 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
Magnus Karlsson7361f9c2020-08-28 10:26:18 +020039 (xs->pool->fq || READ_ONCE(xs->fq_tmp));
Björn Töpelfbfc504a2018-05-02 13:01:28 +020040}
41
Magnus Karlssonc4655762020-08-28 10:26:16 +020042void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020043{
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020044 if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020045 return;
46
Magnus Karlsson7361f9c2020-08-28 10:26:18 +020047 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020048 pool->cached_need_wakeup |= XDP_WAKEUP_RX;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020049}
50EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
51
Magnus Karlssonc4655762020-08-28 10:26:16 +020052void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020053{
Magnus Karlssonc4655762020-08-28 10:26:16 +020054 struct xdp_umem *umem = pool->umem;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020055 struct xdp_sock *xs;
56
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020057 if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020058 return;
59
60 rcu_read_lock();
Magnus Karlssone4e5aef2020-05-04 15:33:51 +020061 list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020062 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
63 }
64 rcu_read_unlock();
65
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020066 pool->cached_need_wakeup |= XDP_WAKEUP_TX;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020067}
68EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
69
Magnus Karlssonc4655762020-08-28 10:26:16 +020070void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020071{
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020072 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020073 return;
74
Magnus Karlsson7361f9c2020-08-28 10:26:18 +020075 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020076 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020077}
78EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
79
Magnus Karlssonc4655762020-08-28 10:26:16 +020080void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020081{
Magnus Karlssonc4655762020-08-28 10:26:16 +020082 struct xdp_umem *umem = pool->umem;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020083 struct xdp_sock *xs;
84
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020085 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020086 return;
87
88 rcu_read_lock();
Magnus Karlssone4e5aef2020-05-04 15:33:51 +020089 list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020090 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
91 }
92 rcu_read_unlock();
93
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020094 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020095}
96EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
97
Magnus Karlssonc4655762020-08-28 10:26:16 +020098bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020099{
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +0200100 return pool->uses_need_wakeup;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200101}
Magnus Karlssonc4655762020-08-28 10:26:16 +0200102EXPORT_SYMBOL(xsk_uses_need_wakeup);
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200103
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200104struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
105 u16 queue_id)
106{
107 if (queue_id < dev->real_num_rx_queues)
108 return dev->_rx[queue_id].pool;
109 if (queue_id < dev->real_num_tx_queues)
110 return dev->_tx[queue_id].pool;
111
112 return NULL;
113}
114EXPORT_SYMBOL(xsk_get_pool_from_qid);
115
116void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
117{
118 if (queue_id < dev->real_num_rx_queues)
119 dev->_rx[queue_id].pool = NULL;
120 if (queue_id < dev->real_num_tx_queues)
121 dev->_tx[queue_id].pool = NULL;
122}
123
124/* The buffer pool is stored both in the _rx struct and the _tx struct as we do
125 * not know if the device has more tx queues than rx, or the opposite.
126 * This might also change during run time.
127 */
128int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
129 u16 queue_id)
130{
131 if (queue_id >= max_t(unsigned int,
132 dev->real_num_rx_queues,
133 dev->real_num_tx_queues))
134 return -EINVAL;
135
136 if (queue_id < dev->real_num_rx_queues)
137 dev->_rx[queue_id].pool = pool;
138 if (queue_id < dev->real_num_tx_queues)
139 dev->_tx[queue_id].pool = pool;
140
141 return 0;
142}
143
Björn Töpel26062b12020-05-20 21:21:02 +0200144void xp_release(struct xdp_buff_xsk *xskb)
145{
146 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
147}
148
149static u64 xp_get_handle(struct xdp_buff_xsk *xskb)
150{
151 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
152
153 offset += xskb->pool->headroom;
154 if (!xskb->pool->unaligned)
155 return xskb->orig_addr + offset;
156 return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
157}
158
Björn Töpel2b434702020-05-20 21:20:53 +0200159static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
Kevin Laatzc05cd362019-08-27 02:25:22 +0000160{
Björn Töpel2b434702020-05-20 21:20:53 +0200161 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
162 u64 addr;
163 int err;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000164
Björn Töpel2b434702020-05-20 21:20:53 +0200165 addr = xp_get_handle(xskb);
166 err = xskq_prod_reserve_desc(xs->rx, addr, len);
167 if (err) {
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000168 xs->rx_queue_full++;
Björn Töpel2b434702020-05-20 21:20:53 +0200169 return err;
170 }
Kevin Laatzc05cd362019-08-27 02:25:22 +0000171
Björn Töpel2b434702020-05-20 21:20:53 +0200172 xp_release(xskb);
173 return 0;
174}
Kevin Laatzc05cd362019-08-27 02:25:22 +0000175
Björn Töpel2b434702020-05-20 21:20:53 +0200176static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
177{
178 void *from_buf, *to_buf;
179 u32 metalen;
180
181 if (unlikely(xdp_data_meta_unsupported(from))) {
182 from_buf = from->data;
183 to_buf = to->data;
184 metalen = 0;
185 } else {
186 from_buf = from->data_meta;
187 metalen = from->data - from->data_meta;
188 to_buf = to->data - metalen;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000189 }
190
191 memcpy(to_buf, from_buf, len + metalen);
192}
193
Björn Töpel2b434702020-05-20 21:20:53 +0200194static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
195 bool explicit_free)
Björn Töpel173d3ad2018-06-04 14:05:55 +0200196{
Björn Töpel2b434702020-05-20 21:20:53 +0200197 struct xdp_buff *xsk_xdp;
Björn Töpel4e64c832018-06-04 13:57:11 +0200198 int err;
Björn Töpelc4971762018-05-02 13:01:27 +0200199
Magnus Karlssonc4655762020-08-28 10:26:16 +0200200 if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
Björn Töpela509a952018-06-04 13:57:12 +0200201 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +0200202 return -ENOSPC;
Björn Töpela509a952018-06-04 13:57:12 +0200203 }
Björn Töpelc4971762018-05-02 13:01:27 +0200204
Magnus Karlssonc4655762020-08-28 10:26:16 +0200205 xsk_xdp = xsk_buff_alloc(xs->pool);
Björn Töpel2b434702020-05-20 21:20:53 +0200206 if (!xsk_xdp) {
Björn Töpela509a952018-06-04 13:57:12 +0200207 xs->rx_dropped++;
Björn Töpel2b434702020-05-20 21:20:53 +0200208 return -ENOSPC;
209 }
Björn Töpelc4971762018-05-02 13:01:27 +0200210
Björn Töpel2b434702020-05-20 21:20:53 +0200211 xsk_copy_xdp(xsk_xdp, xdp, len);
212 err = __xsk_rcv_zc(xs, xsk_xdp, len);
213 if (err) {
214 xsk_buff_free(xsk_xdp);
215 return err;
216 }
217 if (explicit_free)
218 xdp_return_buff(xdp);
219 return 0;
Björn Töpelc4971762018-05-02 13:01:27 +0200220}
221
Björn Töpel42fddcc2019-09-04 13:49:12 +0200222static bool xsk_is_bound(struct xdp_sock *xs)
223{
224 if (READ_ONCE(xs->state) == XSK_BOUND) {
225 /* Matches smp_wmb() in bind(). */
226 smp_rmb();
227 return true;
228 }
229 return false;
230}
231
Björn Töpel2b434702020-05-20 21:20:53 +0200232static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp,
233 bool explicit_free)
Björn Töpelc4971762018-05-02 13:01:27 +0200234{
Björn Töpel173d3ad2018-06-04 14:05:55 +0200235 u32 len;
Björn Töpelc4971762018-05-02 13:01:27 +0200236
Björn Töpel42fddcc2019-09-04 13:49:12 +0200237 if (!xsk_is_bound(xs))
238 return -EINVAL;
239
Björn Töpel173d3ad2018-06-04 14:05:55 +0200240 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
241 return -EINVAL;
Björn Töpelc4971762018-05-02 13:01:27 +0200242
Björn Töpel173d3ad2018-06-04 14:05:55 +0200243 len = xdp->data_end - xdp->data;
244
Björn Töpel08078922020-05-20 21:21:00 +0200245 return xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ?
Björn Töpel2b434702020-05-20 21:20:53 +0200246 __xsk_rcv_zc(xs, xdp, len) :
247 __xsk_rcv(xs, xdp, len, explicit_free);
Björn Töpelc4971762018-05-02 13:01:27 +0200248}
249
Björn Töpeld8179912019-11-01 12:03:46 +0100250static void xsk_flush(struct xdp_sock *xs)
Björn Töpelc4971762018-05-02 13:01:27 +0200251{
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100252 xskq_prod_submit(xs->rx);
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200253 __xskq_cons_release(xs->pool->fq);
Björn Töpel43a825a2020-01-20 10:29:17 +0100254 sock_def_readable(&xs->sk);
Björn Töpelc4971762018-05-02 13:01:27 +0200255}
256
257int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
258{
259 int err;
260
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300261 spin_lock_bh(&xs->rx_lock);
Björn Töpel2b434702020-05-20 21:20:53 +0200262 err = xsk_rcv(xs, xdp, false);
263 xsk_flush(xs);
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300264 spin_unlock_bh(&xs->rx_lock);
Björn Töpelc4971762018-05-02 13:01:27 +0200265 return err;
266}
267
Björn Töpele312b9e2019-12-19 07:10:02 +0100268int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
Björn Töpeld8179912019-11-01 12:03:46 +0100269{
Björn Töpele312b9e2019-12-19 07:10:02 +0100270 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
Björn Töpeld8179912019-11-01 12:03:46 +0100271 int err;
272
Björn Töpel2b434702020-05-20 21:20:53 +0200273 err = xsk_rcv(xs, xdp, true);
Björn Töpeld8179912019-11-01 12:03:46 +0100274 if (err)
275 return err;
276
277 if (!xs->flush_node.prev)
278 list_add(&xs->flush_node, flush_list);
279
280 return 0;
281}
282
Björn Töpele312b9e2019-12-19 07:10:02 +0100283void __xsk_map_flush(void)
Björn Töpeld8179912019-11-01 12:03:46 +0100284{
Björn Töpele312b9e2019-12-19 07:10:02 +0100285 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
Björn Töpeld8179912019-11-01 12:03:46 +0100286 struct xdp_sock *xs, *tmp;
287
288 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
289 xsk_flush(xs);
290 __list_del_clearprev(&xs->flush_node);
291 }
292}
293
Magnus Karlssonc4655762020-08-28 10:26:16 +0200294void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200295{
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200296 xskq_prod_submit_n(pool->cq, nb_entries);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200297}
Magnus Karlssonc4655762020-08-28 10:26:16 +0200298EXPORT_SYMBOL(xsk_tx_completed);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200299
Magnus Karlssonc4655762020-08-28 10:26:16 +0200300void xsk_tx_release(struct xsk_buff_pool *pool)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200301{
302 struct xdp_sock *xs;
303
304 rcu_read_lock();
Magnus Karlssonc4655762020-08-28 10:26:16 +0200305 list_for_each_entry_rcu(xs, &pool->umem->xsk_tx_list, list) {
Magnus Karlsson30744a62020-02-10 16:27:12 +0100306 __xskq_cons_release(xs->tx);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200307 xs->sk.sk_write_space(&xs->sk);
308 }
309 rcu_read_unlock();
310}
Magnus Karlssonc4655762020-08-28 10:26:16 +0200311EXPORT_SYMBOL(xsk_tx_release);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200312
Magnus Karlssonc4655762020-08-28 10:26:16 +0200313bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200314{
Magnus Karlssonc4655762020-08-28 10:26:16 +0200315 struct xdp_umem *umem = pool->umem;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200316 struct xdp_sock *xs;
317
318 rcu_read_lock();
Magnus Karlssone4e5aef2020-05-04 15:33:51 +0200319 list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200320 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000321 xs->tx->queue_empty_descs++;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200322 continue;
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000323 }
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200324
Tobias Klauser0a058612020-04-22 01:29:27 +0200325 /* This is the backpressure mechanism for the Tx path.
Magnus Karlsson15d8c912019-12-19 13:39:30 +0100326 * Reserve space in the completion queue and only proceed
327 * if there is space in it. This avoids having to implement
328 * any buffering in the Tx path.
329 */
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200330 if (xskq_prod_reserve_addr(pool->cq, desc->addr))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200331 goto out;
332
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100333 xskq_cons_release(xs->tx);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200334 rcu_read_unlock();
335 return true;
336 }
337
338out:
339 rcu_read_unlock();
340 return false;
341}
Magnus Karlssonc4655762020-08-28 10:26:16 +0200342EXPORT_SYMBOL(xsk_tx_peek_desc);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200343
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000344static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200345{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200346 struct net_device *dev = xs->dev;
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000347 int err;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200348
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000349 rcu_read_lock();
350 err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
351 rcu_read_unlock();
352
353 return err;
354}
355
356static int xsk_zc_xmit(struct xdp_sock *xs)
357{
358 return xsk_wakeup(xs, XDP_WAKEUP_TX);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200359}
360
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200361static void xsk_destruct_skb(struct sk_buff *skb)
362{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200363 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200364 struct xdp_sock *xs = xdp_sk(skb->sk);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200365 unsigned long flags;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200366
Magnus Karlssona9744f72018-06-29 09:48:20 +0200367 spin_lock_irqsave(&xs->tx_completion_lock, flags);
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200368 xskq_prod_submit_addr(xs->pool->cq, addr);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200369 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200370
371 sock_wfree(skb);
372}
373
Magnus Karlssondf551052019-10-02 08:31:59 +0200374static int xsk_generic_xmit(struct sock *sk)
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200375{
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200376 struct xdp_sock *xs = xdp_sk(sk);
Magnus Karlssondf551052019-10-02 08:31:59 +0200377 u32 max_batch = TX_BATCH_SIZE;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200378 bool sent_frame = false;
379 struct xdp_desc desc;
380 struct sk_buff *skb;
381 int err = 0;
382
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200383 mutex_lock(&xs->mutex);
384
Ilya Maximets67571642019-07-04 17:25:03 +0300385 if (xs->queue_id >= xs->dev->real_num_tx_queues)
386 goto out;
387
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200388 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200389 char *buffer;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200390 u64 addr;
391 u32 len;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200392
393 if (max_batch-- == 0) {
394 err = -EAGAIN;
395 goto out;
396 }
397
Magnus Karlsson09210c42018-07-11 10:12:52 +0200398 len = desc.len;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200399 skb = sock_alloc_send_skb(sk, len, 1, &err);
Li RongQingaa2cad02020-06-11 13:11:06 +0800400 if (unlikely(!skb))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200401 goto out;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200402
403 skb_put(skb, len);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200404 addr = desc.addr;
Magnus Karlssonc4655762020-08-28 10:26:16 +0200405 buffer = xsk_buff_raw_get_data(xs->pool, addr);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200406 err = skb_store_bits(skb, 0, buffer, len);
Tobias Klauser0a058612020-04-22 01:29:27 +0200407 /* This is the backpressure mechanism for the Tx path.
Magnus Karlsson15d8c912019-12-19 13:39:30 +0100408 * Reserve space in the completion queue and only proceed
409 * if there is space in it. This avoids having to implement
410 * any buffering in the Tx path.
411 */
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200412 if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200413 kfree_skb(skb);
414 goto out;
415 }
416
417 skb->dev = xs->dev;
418 skb->priority = sk->sk_priority;
419 skb->mark = sk->sk_mark;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000420 skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200421 skb->destructor = xsk_destruct_skb;
422
423 err = dev_direct_xmit(skb, xs->queue_id);
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100424 xskq_cons_release(xs->tx);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200425 /* Ignore NET_XMIT_CN as packet might have been sent */
426 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
Magnus Karlssonfe588682018-06-29 09:48:18 +0200427 /* SKB completed but not sent */
428 err = -EBUSY;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200429 goto out;
430 }
431
432 sent_frame = true;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200433 }
434
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000435 xs->tx->queue_empty_descs++;
436
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200437out:
438 if (sent_frame)
439 sk->sk_write_space(sk);
440
441 mutex_unlock(&xs->mutex);
442 return err;
443}
444
Magnus Karlssondf551052019-10-02 08:31:59 +0200445static int __xsk_sendmsg(struct sock *sk)
446{
447 struct xdp_sock *xs = xdp_sk(sk);
448
449 if (unlikely(!(xs->dev->flags & IFF_UP)))
450 return -ENETDOWN;
451 if (unlikely(!xs->tx))
452 return -ENOBUFS;
453
454 return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
455}
456
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200457static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
458{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200459 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200460 struct sock *sk = sock->sk;
461 struct xdp_sock *xs = xdp_sk(sk);
462
Björn Töpel42fddcc2019-09-04 13:49:12 +0200463 if (unlikely(!xsk_is_bound(xs)))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200464 return -ENXIO;
Magnus Karlssondf551052019-10-02 08:31:59 +0200465 if (unlikely(need_wait))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200466 return -EOPNOTSUPP;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200467
Magnus Karlssondf551052019-10-02 08:31:59 +0200468 return __xsk_sendmsg(sk);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200469}
470
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100471static __poll_t xsk_poll(struct file *file, struct socket *sock,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700472 struct poll_table_struct *wait)
Björn Töpelc4971762018-05-02 13:01:27 +0200473{
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100474 __poll_t mask = datagram_poll(file, sock, wait);
Magnus Karlssondf551052019-10-02 08:31:59 +0200475 struct sock *sk = sock->sk;
476 struct xdp_sock *xs = xdp_sk(sk);
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +0200477 struct xsk_buff_pool *pool;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200478
479 if (unlikely(!xsk_is_bound(xs)))
480 return mask;
481
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +0200482 pool = xs->pool;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200483
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +0200484 if (pool->cached_need_wakeup) {
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000485 if (xs->zc)
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +0200486 xsk_wakeup(xs, pool->cached_need_wakeup);
Magnus Karlssondf551052019-10-02 08:31:59 +0200487 else
488 /* Poll needs to drive Tx also in copy mode */
489 __xsk_sendmsg(sk);
490 }
Björn Töpelc4971762018-05-02 13:01:27 +0200491
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100492 if (xs->rx && !xskq_prod_is_empty(xs->rx))
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100493 mask |= EPOLLIN | EPOLLRDNORM;
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100494 if (xs->tx && !xskq_cons_is_full(xs->tx))
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100495 mask |= EPOLLOUT | EPOLLWRNORM;
Björn Töpelc4971762018-05-02 13:01:27 +0200496
497 return mask;
498}
499
Björn Töpelb9b6b682018-05-02 13:01:25 +0200500static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
501 bool umem_queue)
Magnus Karlsson423f3832018-05-02 13:01:24 +0200502{
503 struct xsk_queue *q;
504
505 if (entries == 0 || *queue || !is_power_of_2(entries))
506 return -EINVAL;
507
Björn Töpelb9b6b682018-05-02 13:01:25 +0200508 q = xskq_create(entries, umem_queue);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200509 if (!q)
510 return -ENOMEM;
511
Björn Töpel37b07692018-05-22 09:35:01 +0200512 /* Make sure queue is ready before it can be seen by others */
513 smp_wmb();
Björn Töpel94a99762019-09-04 13:49:10 +0200514 WRITE_ONCE(*queue, q);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200515 return 0;
516}
517
Ilya Maximets455302d2019-06-28 11:04:07 +0300518static void xsk_unbind_dev(struct xdp_sock *xs)
519{
520 struct net_device *dev = xs->dev;
521
Björn Töpel42fddcc2019-09-04 13:49:12 +0200522 if (xs->state != XSK_BOUND)
Ilya Maximets455302d2019-06-28 11:04:07 +0300523 return;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200524 WRITE_ONCE(xs->state, XSK_UNBOUND);
Ilya Maximets455302d2019-06-28 11:04:07 +0300525
526 /* Wait for driver to stop using the xdp socket. */
527 xdp_del_sk_umem(xs->umem, xs);
528 xs->dev = NULL;
529 synchronize_net();
530 dev_put(dev);
531}
532
Björn Töpel0402acd2019-08-15 11:30:13 +0200533static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
534 struct xdp_sock ***map_entry)
535{
536 struct xsk_map *map = NULL;
537 struct xsk_map_node *node;
538
539 *map_entry = NULL;
540
541 spin_lock_bh(&xs->map_list_lock);
542 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
543 node);
544 if (node) {
545 WARN_ON(xsk_map_inc(node->map));
546 map = node->map;
547 *map_entry = node->map_entry;
548 }
549 spin_unlock_bh(&xs->map_list_lock);
550 return map;
551}
552
553static void xsk_delete_from_maps(struct xdp_sock *xs)
554{
555 /* This function removes the current XDP socket from all the
556 * maps it resides in. We need to take extra care here, due to
557 * the two locks involved. Each map has a lock synchronizing
558 * updates to the entries, and each socket has a lock that
559 * synchronizes access to the list of maps (map_list). For
560 * deadlock avoidance the locks need to be taken in the order
561 * "map lock"->"socket map list lock". We start off by
562 * accessing the socket map list, and take a reference to the
563 * map to guarantee existence between the
564 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
565 * calls. Then we ask the map to remove the socket, which
566 * tries to remove the socket from the map. Note that there
567 * might be updates to the map between
568 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
569 */
570 struct xdp_sock **map_entry = NULL;
571 struct xsk_map *map;
572
573 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
574 xsk_map_try_sock_delete(map, xs, map_entry);
575 xsk_map_put(map);
576 }
577}
578
Björn Töpelc0c77d82018-05-02 13:01:23 +0200579static int xsk_release(struct socket *sock)
580{
581 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200582 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200583 struct net *net;
584
585 if (!sk)
586 return 0;
587
588 net = sock_net(sk);
589
Björn Töpel1d0dc062019-01-24 19:59:37 +0100590 mutex_lock(&net->xdp.lock);
591 sk_del_node_init_rcu(sk);
592 mutex_unlock(&net->xdp.lock);
593
Björn Töpelc0c77d82018-05-02 13:01:23 +0200594 local_bh_disable();
595 sock_prot_inuse_add(net, sk->sk_prot, -1);
596 local_bh_enable();
597
Björn Töpel0402acd2019-08-15 11:30:13 +0200598 xsk_delete_from_maps(xs);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200599 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300600 xsk_unbind_dev(xs);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200601 mutex_unlock(&xs->mutex);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200602
Björn Töpel541d7fd2018-10-05 13:25:15 +0200603 xskq_destroy(xs->rx);
604 xskq_destroy(xs->tx);
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200605 xskq_destroy(xs->fq_tmp);
606 xskq_destroy(xs->cq_tmp);
Björn Töpel541d7fd2018-10-05 13:25:15 +0200607
Björn Töpelc0c77d82018-05-02 13:01:23 +0200608 sock_orphan(sk);
609 sock->sk = NULL;
610
611 sk_refcnt_debug_release(sk);
612 sock_put(sk);
613
614 return 0;
615}
616
Magnus Karlsson965a9902018-05-02 13:01:26 +0200617static struct socket *xsk_lookup_xsk_from_fd(int fd)
618{
619 struct socket *sock;
620 int err;
621
622 sock = sockfd_lookup(fd, &err);
623 if (!sock)
624 return ERR_PTR(-ENOTSOCK);
625
626 if (sock->sk->sk_family != PF_XDP) {
627 sockfd_put(sock);
628 return ERR_PTR(-ENOPROTOOPT);
629 }
630
631 return sock;
632}
633
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200634static bool xsk_validate_queues(struct xdp_sock *xs)
635{
636 return xs->fq_tmp && xs->cq_tmp;
637}
638
Magnus Karlsson965a9902018-05-02 13:01:26 +0200639static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
640{
641 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
642 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200643 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpel959b71d2018-05-22 09:34:56 +0200644 struct net_device *dev;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200645 u32 flags, qid;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200646 int err = 0;
647
648 if (addr_len < sizeof(struct sockaddr_xdp))
649 return -EINVAL;
650 if (sxdp->sxdp_family != AF_XDP)
651 return -EINVAL;
652
Björn Töpelf54ba392019-03-08 08:57:26 +0100653 flags = sxdp->sxdp_flags;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200654 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
655 XDP_USE_NEED_WAKEUP))
Björn Töpelf54ba392019-03-08 08:57:26 +0100656 return -EINVAL;
657
Ilya Maximets5464c3a2019-07-08 14:03:44 +0300658 rtnl_lock();
Magnus Karlsson965a9902018-05-02 13:01:26 +0200659 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300660 if (xs->state != XSK_READY) {
Björn Töpel959b71d2018-05-22 09:34:56 +0200661 err = -EBUSY;
662 goto out_release;
663 }
664
Magnus Karlsson965a9902018-05-02 13:01:26 +0200665 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
666 if (!dev) {
667 err = -ENODEV;
668 goto out_release;
669 }
670
Magnus Karlssonf6145902018-05-02 13:01:32 +0200671 if (!xs->rx && !xs->tx) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200672 err = -EINVAL;
673 goto out_unlock;
674 }
675
Björn Töpel173d3ad2018-06-04 14:05:55 +0200676 qid = sxdp->sxdp_queue_id;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200677
678 if (flags & XDP_SHARED_UMEM) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200679 struct xdp_sock *umem_xs;
680 struct socket *sock;
681
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200682 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
683 (flags & XDP_USE_NEED_WAKEUP)) {
Björn Töpel173d3ad2018-06-04 14:05:55 +0200684 /* Cannot specify flags for shared sockets. */
685 err = -EINVAL;
686 goto out_unlock;
687 }
688
Magnus Karlsson965a9902018-05-02 13:01:26 +0200689 if (xs->umem) {
690 /* We have already our own. */
691 err = -EINVAL;
692 goto out_unlock;
693 }
694
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200695 if (xs->fq_tmp || xs->cq_tmp) {
696 /* Do not allow setting your own fq or cq. */
697 err = -EINVAL;
698 goto out_unlock;
699 }
700
Magnus Karlsson965a9902018-05-02 13:01:26 +0200701 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
702 if (IS_ERR(sock)) {
703 err = PTR_ERR(sock);
704 goto out_unlock;
705 }
706
707 umem_xs = xdp_sk(sock->sk);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200708 if (!xsk_is_bound(umem_xs)) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200709 err = -EBADF;
710 sockfd_put(sock);
711 goto out_unlock;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200712 }
713 if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200714 err = -EINVAL;
715 sockfd_put(sock);
716 goto out_unlock;
717 }
718
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200719 /* Share the buffer pool with the other socket. */
720 xp_get_pool(umem_xs->pool);
721 xs->pool = umem_xs->pool;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200722 xdp_get_umem(umem_xs->umem);
Björn Töpel9764f4b2019-09-04 13:49:11 +0200723 WRITE_ONCE(xs->umem, umem_xs->umem);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200724 sockfd_put(sock);
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200725 } else if (!xs->umem || !xsk_validate_queues(xs)) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200726 err = -EINVAL;
727 goto out_unlock;
Björn Töpelc4971762018-05-02 13:01:27 +0200728 } else {
729 /* This xsk has its own umem. */
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200730 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
731 if (!xs->pool) {
732 err = -ENOMEM;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200733 goto out_unlock;
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200734 }
735
736 err = xp_assign_dev(xs->pool, dev, qid, flags);
737 if (err) {
738 xp_destroy(xs->pool);
739 xs->pool = NULL;
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200740 goto out_unlock;
741 }
Magnus Karlsson965a9902018-05-02 13:01:26 +0200742 }
743
Magnus Karlsson965a9902018-05-02 13:01:26 +0200744 xs->dev = dev;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200745 xs->zc = xs->umem->zc;
746 xs->queue_id = qid;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200747 xdp_add_sk_umem(xs->umem, xs);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200748
749out_unlock:
Björn Töpel42fddcc2019-09-04 13:49:12 +0200750 if (err) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200751 dev_put(dev);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200752 } else {
753 /* Matches smp_rmb() in bind() for shared umem
754 * sockets, and xsk_is_bound().
755 */
756 smp_wmb();
757 WRITE_ONCE(xs->state, XSK_BOUND);
758 }
Magnus Karlsson965a9902018-05-02 13:01:26 +0200759out_release:
760 mutex_unlock(&xs->mutex);
Ilya Maximets5464c3a2019-07-08 14:03:44 +0300761 rtnl_unlock();
Magnus Karlsson965a9902018-05-02 13:01:26 +0200762 return err;
763}
764
Kevin Laatzc05cd362019-08-27 02:25:22 +0000765struct xdp_umem_reg_v1 {
766 __u64 addr; /* Start of packet data area */
767 __u64 len; /* Length of packet data area */
768 __u32 chunk_size;
769 __u32 headroom;
770};
771
Björn Töpelc0c77d82018-05-02 13:01:23 +0200772static int xsk_setsockopt(struct socket *sock, int level, int optname,
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200773 sockptr_t optval, unsigned int optlen)
Björn Töpelc0c77d82018-05-02 13:01:23 +0200774{
775 struct sock *sk = sock->sk;
776 struct xdp_sock *xs = xdp_sk(sk);
777 int err;
778
779 if (level != SOL_XDP)
780 return -ENOPROTOOPT;
781
782 switch (optname) {
Björn Töpelb9b6b682018-05-02 13:01:25 +0200783 case XDP_RX_RING:
Magnus Karlssonf6145902018-05-02 13:01:32 +0200784 case XDP_TX_RING:
Björn Töpelb9b6b682018-05-02 13:01:25 +0200785 {
786 struct xsk_queue **q;
787 int entries;
788
789 if (optlen < sizeof(entries))
790 return -EINVAL;
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200791 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
Björn Töpelb9b6b682018-05-02 13:01:25 +0200792 return -EFAULT;
793
794 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300795 if (xs->state != XSK_READY) {
796 mutex_unlock(&xs->mutex);
797 return -EBUSY;
798 }
Magnus Karlssonf6145902018-05-02 13:01:32 +0200799 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200800 err = xsk_init_queue(entries, q, false);
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200801 if (!err && optname == XDP_TX_RING)
802 /* Tx needs to be explicitly woken up the first time */
803 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200804 mutex_unlock(&xs->mutex);
805 return err;
806 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200807 case XDP_UMEM_REG:
808 {
Kevin Laatzc05cd362019-08-27 02:25:22 +0000809 size_t mr_size = sizeof(struct xdp_umem_reg);
810 struct xdp_umem_reg mr = {};
Björn Töpelc0c77d82018-05-02 13:01:23 +0200811 struct xdp_umem *umem;
812
Kevin Laatzc05cd362019-08-27 02:25:22 +0000813 if (optlen < sizeof(struct xdp_umem_reg_v1))
814 return -EINVAL;
815 else if (optlen < sizeof(mr))
816 mr_size = sizeof(struct xdp_umem_reg_v1);
817
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200818 if (copy_from_sockptr(&mr, optval, mr_size))
Björn Töpelc0c77d82018-05-02 13:01:23 +0200819 return -EFAULT;
820
821 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300822 if (xs->state != XSK_READY || xs->umem) {
Björn Töpelc0c77d82018-05-02 13:01:23 +0200823 mutex_unlock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200824 return -EBUSY;
825 }
826
827 umem = xdp_umem_create(&mr);
828 if (IS_ERR(umem)) {
829 mutex_unlock(&xs->mutex);
830 return PTR_ERR(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200831 }
832
833 /* Make sure umem is ready before it can be seen by others */
834 smp_wmb();
Björn Töpel9764f4b2019-09-04 13:49:11 +0200835 WRITE_ONCE(xs->umem, umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200836 mutex_unlock(&xs->mutex);
837 return 0;
838 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200839 case XDP_UMEM_FILL_RING:
Magnus Karlssonfe230832018-05-02 13:01:31 +0200840 case XDP_UMEM_COMPLETION_RING:
Magnus Karlsson423f3832018-05-02 13:01:24 +0200841 {
842 struct xsk_queue **q;
843 int entries;
844
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200845 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
Magnus Karlsson423f3832018-05-02 13:01:24 +0200846 return -EFAULT;
847
848 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300849 if (xs->state != XSK_READY) {
850 mutex_unlock(&xs->mutex);
851 return -EBUSY;
852 }
Björn Töpela49049e2018-05-22 09:35:02 +0200853 if (!xs->umem) {
854 mutex_unlock(&xs->mutex);
855 return -EINVAL;
856 }
857
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200858 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
859 &xs->cq_tmp;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200860 err = xsk_init_queue(entries, q, true);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200861 mutex_unlock(&xs->mutex);
862 return err;
863 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200864 default:
865 break;
866 }
867
868 return -ENOPROTOOPT;
869}
870
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200871static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
872{
873 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
874 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
875 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
876}
877
878static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
879{
880 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
881 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
882 ring->desc = offsetof(struct xdp_umem_ring, desc);
883}
884
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000885struct xdp_statistics_v1 {
886 __u64 rx_dropped;
887 __u64 rx_invalid_descs;
888 __u64 tx_invalid_descs;
889};
890
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200891static int xsk_getsockopt(struct socket *sock, int level, int optname,
892 char __user *optval, int __user *optlen)
893{
894 struct sock *sk = sock->sk;
895 struct xdp_sock *xs = xdp_sk(sk);
896 int len;
897
898 if (level != SOL_XDP)
899 return -ENOPROTOOPT;
900
901 if (get_user(len, optlen))
902 return -EFAULT;
903 if (len < 0)
904 return -EINVAL;
905
906 switch (optname) {
907 case XDP_STATISTICS:
908 {
Peilin Ye3c4f8502020-07-28 01:36:04 -0400909 struct xdp_statistics stats = {};
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000910 bool extra_stats = true;
911 size_t stats_size;
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200912
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000913 if (len < sizeof(struct xdp_statistics_v1)) {
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200914 return -EINVAL;
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000915 } else if (len < sizeof(stats)) {
916 extra_stats = false;
917 stats_size = sizeof(struct xdp_statistics_v1);
918 } else {
919 stats_size = sizeof(stats);
920 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200921
922 mutex_lock(&xs->mutex);
923 stats.rx_dropped = xs->rx_dropped;
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000924 if (extra_stats) {
925 stats.rx_ring_full = xs->rx_queue_full;
926 stats.rx_fill_ring_empty_descs =
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200927 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000928 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
929 } else {
930 stats.rx_dropped += xs->rx_queue_full;
931 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200932 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
933 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
934 mutex_unlock(&xs->mutex);
935
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000936 if (copy_to_user(optval, &stats, stats_size))
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200937 return -EFAULT;
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000938 if (put_user(stats_size, optlen))
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200939 return -EFAULT;
940
941 return 0;
942 }
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200943 case XDP_MMAP_OFFSETS:
944 {
945 struct xdp_mmap_offsets off;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200946 struct xdp_mmap_offsets_v1 off_v1;
947 bool flags_supported = true;
948 void *to_copy;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200949
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200950 if (len < sizeof(off_v1))
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200951 return -EINVAL;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200952 else if (len < sizeof(off))
953 flags_supported = false;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200954
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200955 if (flags_supported) {
956 /* xdp_ring_offset is identical to xdp_ring_offset_v1
957 * except for the flags field added to the end.
958 */
959 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
960 &off.rx);
961 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
962 &off.tx);
963 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
964 &off.fr);
965 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
966 &off.cr);
967 off.rx.flags = offsetof(struct xdp_rxtx_ring,
968 ptrs.flags);
969 off.tx.flags = offsetof(struct xdp_rxtx_ring,
970 ptrs.flags);
971 off.fr.flags = offsetof(struct xdp_umem_ring,
972 ptrs.flags);
973 off.cr.flags = offsetof(struct xdp_umem_ring,
974 ptrs.flags);
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200975
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200976 len = sizeof(off);
977 to_copy = &off;
978 } else {
979 xsk_enter_rxtx_offsets(&off_v1.rx);
980 xsk_enter_rxtx_offsets(&off_v1.tx);
981 xsk_enter_umem_offsets(&off_v1.fr);
982 xsk_enter_umem_offsets(&off_v1.cr);
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200983
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200984 len = sizeof(off_v1);
985 to_copy = &off_v1;
986 }
987
988 if (copy_to_user(optval, to_copy, len))
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200989 return -EFAULT;
990 if (put_user(len, optlen))
991 return -EFAULT;
992
993 return 0;
994 }
Maxim Mikityanskiy2640d3c2019-06-26 17:35:25 +0300995 case XDP_OPTIONS:
996 {
997 struct xdp_options opts = {};
998
999 if (len < sizeof(opts))
1000 return -EINVAL;
1001
1002 mutex_lock(&xs->mutex);
1003 if (xs->zc)
1004 opts.flags |= XDP_OPTIONS_ZEROCOPY;
1005 mutex_unlock(&xs->mutex);
1006
1007 len = sizeof(opts);
1008 if (copy_to_user(optval, &opts, len))
1009 return -EFAULT;
1010 if (put_user(len, optlen))
1011 return -EFAULT;
1012
1013 return 0;
1014 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +02001015 default:
1016 break;
1017 }
1018
1019 return -EOPNOTSUPP;
1020}
1021
Magnus Karlsson423f3832018-05-02 13:01:24 +02001022static int xsk_mmap(struct file *file, struct socket *sock,
1023 struct vm_area_struct *vma)
1024{
Geert Uytterhoevena5a16e42018-06-07 15:37:34 +02001025 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
Magnus Karlsson423f3832018-05-02 13:01:24 +02001026 unsigned long size = vma->vm_end - vma->vm_start;
1027 struct xdp_sock *xs = xdp_sk(sock->sk);
1028 struct xsk_queue *q = NULL;
1029 unsigned long pfn;
1030 struct page *qpg;
1031
Björn Töpel42fddcc2019-09-04 13:49:12 +02001032 if (READ_ONCE(xs->state) != XSK_READY)
Ilya Maximets455302d2019-06-28 11:04:07 +03001033 return -EBUSY;
1034
Björn Töpelb9b6b682018-05-02 13:01:25 +02001035 if (offset == XDP_PGOFF_RX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +02001036 q = READ_ONCE(xs->rx);
Magnus Karlssonf6145902018-05-02 13:01:32 +02001037 } else if (offset == XDP_PGOFF_TX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +02001038 q = READ_ONCE(xs->tx);
Björn Töpelb9b6b682018-05-02 13:01:25 +02001039 } else {
Magnus Karlssone6762c82019-02-08 14:13:50 +01001040 /* Matches the smp_wmb() in XDP_UMEM_REG */
1041 smp_rmb();
Björn Töpelb9b6b682018-05-02 13:01:25 +02001042 if (offset == XDP_UMEM_PGOFF_FILL_RING)
Magnus Karlsson7361f9c2020-08-28 10:26:18 +02001043 q = READ_ONCE(xs->fq_tmp);
Magnus Karlssonfe230832018-05-02 13:01:31 +02001044 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
Magnus Karlsson7361f9c2020-08-28 10:26:18 +02001045 q = READ_ONCE(xs->cq_tmp);
Björn Töpelb9b6b682018-05-02 13:01:25 +02001046 }
Magnus Karlsson423f3832018-05-02 13:01:24 +02001047
1048 if (!q)
1049 return -EINVAL;
1050
Magnus Karlssone6762c82019-02-08 14:13:50 +01001051 /* Matches the smp_wmb() in xsk_init_queue */
1052 smp_rmb();
Magnus Karlsson423f3832018-05-02 13:01:24 +02001053 qpg = virt_to_head_page(q->ring);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07001054 if (size > page_size(qpg))
Magnus Karlsson423f3832018-05-02 13:01:24 +02001055 return -EINVAL;
1056
1057 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
1058 return remap_pfn_range(vma, vma->vm_start, pfn,
1059 size, vma->vm_page_prot);
1060}
1061
Ilya Maximets455302d2019-06-28 11:04:07 +03001062static int xsk_notifier(struct notifier_block *this,
1063 unsigned long msg, void *ptr)
1064{
1065 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1066 struct net *net = dev_net(dev);
1067 struct sock *sk;
1068
1069 switch (msg) {
1070 case NETDEV_UNREGISTER:
1071 mutex_lock(&net->xdp.lock);
1072 sk_for_each(sk, &net->xdp.list) {
1073 struct xdp_sock *xs = xdp_sk(sk);
1074
1075 mutex_lock(&xs->mutex);
1076 if (xs->dev == dev) {
1077 sk->sk_err = ENETDOWN;
1078 if (!sock_flag(sk, SOCK_DEAD))
1079 sk->sk_error_report(sk);
1080
1081 xsk_unbind_dev(xs);
1082
Magnus Karlsson1c1efc22020-08-28 10:26:17 +02001083 /* Clear device references. */
1084 xp_clear_dev(xs->pool);
Ilya Maximets455302d2019-06-28 11:04:07 +03001085 }
1086 mutex_unlock(&xs->mutex);
1087 }
1088 mutex_unlock(&net->xdp.lock);
1089 break;
1090 }
1091 return NOTIFY_DONE;
1092}
1093
Björn Töpelc0c77d82018-05-02 13:01:23 +02001094static struct proto xsk_proto = {
1095 .name = "XDP",
1096 .owner = THIS_MODULE,
1097 .obj_size = sizeof(struct xdp_sock),
1098};
1099
1100static const struct proto_ops xsk_proto_ops = {
Björn Töpelc2f43742018-05-18 14:00:24 +02001101 .family = PF_XDP,
1102 .owner = THIS_MODULE,
1103 .release = xsk_release,
1104 .bind = xsk_bind,
1105 .connect = sock_no_connect,
1106 .socketpair = sock_no_socketpair,
1107 .accept = sock_no_accept,
1108 .getname = sock_no_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001109 .poll = xsk_poll,
Björn Töpelc2f43742018-05-18 14:00:24 +02001110 .ioctl = sock_no_ioctl,
1111 .listen = sock_no_listen,
1112 .shutdown = sock_no_shutdown,
1113 .setsockopt = xsk_setsockopt,
1114 .getsockopt = xsk_getsockopt,
1115 .sendmsg = xsk_sendmsg,
1116 .recvmsg = sock_no_recvmsg,
1117 .mmap = xsk_mmap,
1118 .sendpage = sock_no_sendpage,
Björn Töpelc0c77d82018-05-02 13:01:23 +02001119};
1120
Björn Töpel11fe9262019-02-21 13:07:38 +01001121static void xsk_destruct(struct sock *sk)
1122{
1123 struct xdp_sock *xs = xdp_sk(sk);
1124
1125 if (!sock_flag(sk, SOCK_DEAD))
1126 return;
1127
Magnus Karlsson1c1efc22020-08-28 10:26:17 +02001128 xp_put_pool(xs->pool);
Björn Töpel11fe9262019-02-21 13:07:38 +01001129
1130 sk_refcnt_debug_dec(sk);
1131}
1132
Björn Töpelc0c77d82018-05-02 13:01:23 +02001133static int xsk_create(struct net *net, struct socket *sock, int protocol,
1134 int kern)
1135{
Björn Töpelc0c77d82018-05-02 13:01:23 +02001136 struct xdp_sock *xs;
Magnus Karlsson1c1efc22020-08-28 10:26:17 +02001137 struct sock *sk;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001138
1139 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1140 return -EPERM;
1141 if (sock->type != SOCK_RAW)
1142 return -ESOCKTNOSUPPORT;
1143
1144 if (protocol)
1145 return -EPROTONOSUPPORT;
1146
1147 sock->state = SS_UNCONNECTED;
1148
1149 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1150 if (!sk)
1151 return -ENOBUFS;
1152
1153 sock->ops = &xsk_proto_ops;
1154
1155 sock_init_data(sock, sk);
1156
1157 sk->sk_family = PF_XDP;
1158
Björn Töpel11fe9262019-02-21 13:07:38 +01001159 sk->sk_destruct = xsk_destruct;
1160 sk_refcnt_debug_inc(sk);
1161
Björn Töpelcee27162018-10-08 19:40:16 +02001162 sock_set_flag(sk, SOCK_RCU_FREE);
1163
Björn Töpelc0c77d82018-05-02 13:01:23 +02001164 xs = xdp_sk(sk);
Ilya Maximets455302d2019-06-28 11:04:07 +03001165 xs->state = XSK_READY;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001166 mutex_init(&xs->mutex);
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +03001167 spin_lock_init(&xs->rx_lock);
Magnus Karlssona9744f72018-06-29 09:48:20 +02001168 spin_lock_init(&xs->tx_completion_lock);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001169
Björn Töpel0402acd2019-08-15 11:30:13 +02001170 INIT_LIST_HEAD(&xs->map_list);
1171 spin_lock_init(&xs->map_list_lock);
1172
Björn Töpel1d0dc062019-01-24 19:59:37 +01001173 mutex_lock(&net->xdp.lock);
1174 sk_add_node_rcu(sk, &net->xdp.list);
1175 mutex_unlock(&net->xdp.lock);
1176
Björn Töpelc0c77d82018-05-02 13:01:23 +02001177 local_bh_disable();
1178 sock_prot_inuse_add(net, &xsk_proto, 1);
1179 local_bh_enable();
1180
1181 return 0;
1182}
1183
1184static const struct net_proto_family xsk_family_ops = {
1185 .family = PF_XDP,
1186 .create = xsk_create,
1187 .owner = THIS_MODULE,
1188};
1189
Ilya Maximets455302d2019-06-28 11:04:07 +03001190static struct notifier_block xsk_netdev_notifier = {
1191 .notifier_call = xsk_notifier,
1192};
1193
Björn Töpel1d0dc062019-01-24 19:59:37 +01001194static int __net_init xsk_net_init(struct net *net)
1195{
1196 mutex_init(&net->xdp.lock);
1197 INIT_HLIST_HEAD(&net->xdp.list);
1198 return 0;
1199}
1200
1201static void __net_exit xsk_net_exit(struct net *net)
1202{
1203 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1204}
1205
1206static struct pernet_operations xsk_net_ops = {
1207 .init = xsk_net_init,
1208 .exit = xsk_net_exit,
1209};
1210
Björn Töpelc0c77d82018-05-02 13:01:23 +02001211static int __init xsk_init(void)
1212{
Björn Töpele312b9e2019-12-19 07:10:02 +01001213 int err, cpu;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001214
1215 err = proto_register(&xsk_proto, 0 /* no slab */);
1216 if (err)
1217 goto out;
1218
1219 err = sock_register(&xsk_family_ops);
1220 if (err)
1221 goto out_proto;
1222
Björn Töpel1d0dc062019-01-24 19:59:37 +01001223 err = register_pernet_subsys(&xsk_net_ops);
1224 if (err)
1225 goto out_sk;
Ilya Maximets455302d2019-06-28 11:04:07 +03001226
1227 err = register_netdevice_notifier(&xsk_netdev_notifier);
1228 if (err)
1229 goto out_pernet;
1230
Björn Töpele312b9e2019-12-19 07:10:02 +01001231 for_each_possible_cpu(cpu)
1232 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
Björn Töpelc0c77d82018-05-02 13:01:23 +02001233 return 0;
1234
Ilya Maximets455302d2019-06-28 11:04:07 +03001235out_pernet:
1236 unregister_pernet_subsys(&xsk_net_ops);
Björn Töpel1d0dc062019-01-24 19:59:37 +01001237out_sk:
1238 sock_unregister(PF_XDP);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001239out_proto:
1240 proto_unregister(&xsk_proto);
1241out:
1242 return err;
1243}
1244
1245fs_initcall(xsk_init);