blob: c3231620d2101d278db4c9c53c8fcfee5844565d [file] [log] [blame]
Björn Töpelc0c77d82018-05-02 13:01:23 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
Björn Töpelc0c77d82018-05-02 13:01:23 +02008 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14#include <linux/if_xdp.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/sched/task.h>
19#include <linux/socket.h>
20#include <linux/file.h>
21#include <linux/uaccess.h>
22#include <linux/net.h>
23#include <linux/netdevice.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020024#include <linux/rculist.h>
Magnus Karlssona71506a2020-05-20 21:20:51 +020025#include <net/xdp_sock_drv.h>
Björn Töpelb9b6b682018-05-02 13:01:25 +020026#include <net/xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020027
Magnus Karlsson423f3832018-05-02 13:01:24 +020028#include "xsk_queue.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020029#include "xdp_umem.h"
Björn Töpela36b38aa2019-01-24 19:59:39 +010030#include "xsk.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020031
Magnus Karlsson35fcde72018-05-02 13:01:34 +020032#define TX_BATCH_SIZE 16
33
Björn Töpele312b9e2019-12-19 07:10:02 +010034static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
35
Björn Töpelfbfc504a2018-05-02 13:01:28 +020036bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
37{
Björn Töpel173d3ad2018-06-04 14:05:55 +020038 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
39 READ_ONCE(xs->umem->fq);
Björn Töpelfbfc504a2018-05-02 13:01:28 +020040}
41
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020042void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
43{
44 if (umem->need_wakeup & XDP_WAKEUP_RX)
45 return;
46
47 umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
48 umem->need_wakeup |= XDP_WAKEUP_RX;
49}
50EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
51
52void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
53{
54 struct xdp_sock *xs;
55
56 if (umem->need_wakeup & XDP_WAKEUP_TX)
57 return;
58
59 rcu_read_lock();
Magnus Karlssone4e5aef2020-05-04 15:33:51 +020060 list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020061 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
62 }
63 rcu_read_unlock();
64
65 umem->need_wakeup |= XDP_WAKEUP_TX;
66}
67EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
68
69void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
70{
71 if (!(umem->need_wakeup & XDP_WAKEUP_RX))
72 return;
73
74 umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
75 umem->need_wakeup &= ~XDP_WAKEUP_RX;
76}
77EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
78
79void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
80{
81 struct xdp_sock *xs;
82
83 if (!(umem->need_wakeup & XDP_WAKEUP_TX))
84 return;
85
86 rcu_read_lock();
Magnus Karlssone4e5aef2020-05-04 15:33:51 +020087 list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020088 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
89 }
90 rcu_read_unlock();
91
92 umem->need_wakeup &= ~XDP_WAKEUP_TX;
93}
94EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
95
96bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
97{
98 return umem->flags & XDP_UMEM_USES_NEED_WAKEUP;
99}
100EXPORT_SYMBOL(xsk_umem_uses_need_wakeup);
101
Björn Töpel26062b12020-05-20 21:21:02 +0200102void xp_release(struct xdp_buff_xsk *xskb)
103{
104 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
105}
106
107static u64 xp_get_handle(struct xdp_buff_xsk *xskb)
108{
109 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
110
111 offset += xskb->pool->headroom;
112 if (!xskb->pool->unaligned)
113 return xskb->orig_addr + offset;
114 return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
115}
116
Björn Töpel2b434702020-05-20 21:20:53 +0200117static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
Kevin Laatzc05cd362019-08-27 02:25:22 +0000118{
Björn Töpel2b434702020-05-20 21:20:53 +0200119 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
120 u64 addr;
121 int err;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000122
Björn Töpel2b434702020-05-20 21:20:53 +0200123 addr = xp_get_handle(xskb);
124 err = xskq_prod_reserve_desc(xs->rx, addr, len);
125 if (err) {
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000126 xs->rx_queue_full++;
Björn Töpel2b434702020-05-20 21:20:53 +0200127 return err;
128 }
Kevin Laatzc05cd362019-08-27 02:25:22 +0000129
Björn Töpel2b434702020-05-20 21:20:53 +0200130 xp_release(xskb);
131 return 0;
132}
Kevin Laatzc05cd362019-08-27 02:25:22 +0000133
Björn Töpel2b434702020-05-20 21:20:53 +0200134static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
135{
136 void *from_buf, *to_buf;
137 u32 metalen;
138
139 if (unlikely(xdp_data_meta_unsupported(from))) {
140 from_buf = from->data;
141 to_buf = to->data;
142 metalen = 0;
143 } else {
144 from_buf = from->data_meta;
145 metalen = from->data - from->data_meta;
146 to_buf = to->data - metalen;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000147 }
148
149 memcpy(to_buf, from_buf, len + metalen);
150}
151
Björn Töpel2b434702020-05-20 21:20:53 +0200152static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
153 bool explicit_free)
Björn Töpel173d3ad2018-06-04 14:05:55 +0200154{
Björn Töpel2b434702020-05-20 21:20:53 +0200155 struct xdp_buff *xsk_xdp;
Björn Töpel4e64c832018-06-04 13:57:11 +0200156 int err;
Björn Töpelc4971762018-05-02 13:01:27 +0200157
Björn Töpel2b434702020-05-20 21:20:53 +0200158 if (len > xsk_umem_get_rx_frame_size(xs->umem)) {
Björn Töpela509a952018-06-04 13:57:12 +0200159 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +0200160 return -ENOSPC;
Björn Töpela509a952018-06-04 13:57:12 +0200161 }
Björn Töpelc4971762018-05-02 13:01:27 +0200162
Björn Töpel2b434702020-05-20 21:20:53 +0200163 xsk_xdp = xsk_buff_alloc(xs->umem);
164 if (!xsk_xdp) {
Björn Töpela509a952018-06-04 13:57:12 +0200165 xs->rx_dropped++;
Björn Töpel2b434702020-05-20 21:20:53 +0200166 return -ENOSPC;
167 }
Björn Töpelc4971762018-05-02 13:01:27 +0200168
Björn Töpel2b434702020-05-20 21:20:53 +0200169 xsk_copy_xdp(xsk_xdp, xdp, len);
170 err = __xsk_rcv_zc(xs, xsk_xdp, len);
171 if (err) {
172 xsk_buff_free(xsk_xdp);
173 return err;
174 }
175 if (explicit_free)
176 xdp_return_buff(xdp);
177 return 0;
Björn Töpelc4971762018-05-02 13:01:27 +0200178}
179
Björn Töpel42fddcc2019-09-04 13:49:12 +0200180static bool xsk_is_bound(struct xdp_sock *xs)
181{
182 if (READ_ONCE(xs->state) == XSK_BOUND) {
183 /* Matches smp_wmb() in bind(). */
184 smp_rmb();
185 return true;
186 }
187 return false;
188}
189
Björn Töpel2b434702020-05-20 21:20:53 +0200190static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp,
191 bool explicit_free)
Björn Töpelc4971762018-05-02 13:01:27 +0200192{
Björn Töpel173d3ad2018-06-04 14:05:55 +0200193 u32 len;
Björn Töpelc4971762018-05-02 13:01:27 +0200194
Björn Töpel42fddcc2019-09-04 13:49:12 +0200195 if (!xsk_is_bound(xs))
196 return -EINVAL;
197
Björn Töpel173d3ad2018-06-04 14:05:55 +0200198 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
199 return -EINVAL;
Björn Töpelc4971762018-05-02 13:01:27 +0200200
Björn Töpel173d3ad2018-06-04 14:05:55 +0200201 len = xdp->data_end - xdp->data;
202
Björn Töpel08078922020-05-20 21:21:00 +0200203 return xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ?
Björn Töpel2b434702020-05-20 21:20:53 +0200204 __xsk_rcv_zc(xs, xdp, len) :
205 __xsk_rcv(xs, xdp, len, explicit_free);
Björn Töpelc4971762018-05-02 13:01:27 +0200206}
207
Björn Töpeld8179912019-11-01 12:03:46 +0100208static void xsk_flush(struct xdp_sock *xs)
Björn Töpelc4971762018-05-02 13:01:27 +0200209{
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100210 xskq_prod_submit(xs->rx);
Magnus Karlsson30744a62020-02-10 16:27:12 +0100211 __xskq_cons_release(xs->umem->fq);
Björn Töpel43a825a2020-01-20 10:29:17 +0100212 sock_def_readable(&xs->sk);
Björn Töpelc4971762018-05-02 13:01:27 +0200213}
214
215int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
216{
217 int err;
218
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300219 spin_lock_bh(&xs->rx_lock);
Björn Töpel2b434702020-05-20 21:20:53 +0200220 err = xsk_rcv(xs, xdp, false);
221 xsk_flush(xs);
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300222 spin_unlock_bh(&xs->rx_lock);
Björn Töpelc4971762018-05-02 13:01:27 +0200223 return err;
224}
225
Björn Töpele312b9e2019-12-19 07:10:02 +0100226int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
Björn Töpeld8179912019-11-01 12:03:46 +0100227{
Björn Töpele312b9e2019-12-19 07:10:02 +0100228 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
Björn Töpeld8179912019-11-01 12:03:46 +0100229 int err;
230
Björn Töpel2b434702020-05-20 21:20:53 +0200231 err = xsk_rcv(xs, xdp, true);
Björn Töpeld8179912019-11-01 12:03:46 +0100232 if (err)
233 return err;
234
235 if (!xs->flush_node.prev)
236 list_add(&xs->flush_node, flush_list);
237
238 return 0;
239}
240
Björn Töpele312b9e2019-12-19 07:10:02 +0100241void __xsk_map_flush(void)
Björn Töpeld8179912019-11-01 12:03:46 +0100242{
Björn Töpele312b9e2019-12-19 07:10:02 +0100243 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
Björn Töpeld8179912019-11-01 12:03:46 +0100244 struct xdp_sock *xs, *tmp;
245
246 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
247 xsk_flush(xs);
248 __list_del_clearprev(&xs->flush_node);
249 }
250}
251
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200252void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
253{
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100254 xskq_prod_submit_n(umem->cq, nb_entries);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200255}
256EXPORT_SYMBOL(xsk_umem_complete_tx);
257
258void xsk_umem_consume_tx_done(struct xdp_umem *umem)
259{
260 struct xdp_sock *xs;
261
262 rcu_read_lock();
Magnus Karlssone4e5aef2020-05-04 15:33:51 +0200263 list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
Magnus Karlsson30744a62020-02-10 16:27:12 +0100264 __xskq_cons_release(xs->tx);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200265 xs->sk.sk_write_space(&xs->sk);
266 }
267 rcu_read_unlock();
268}
269EXPORT_SYMBOL(xsk_umem_consume_tx_done);
270
Maxim Mikityanskiy4bce4e52019-06-26 17:35:28 +0300271bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200272{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200273 struct xdp_sock *xs;
274
275 rcu_read_lock();
Magnus Karlssone4e5aef2020-05-04 15:33:51 +0200276 list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000277 if (!xskq_cons_peek_desc(xs->tx, desc, umem)) {
278 xs->tx->queue_empty_descs++;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200279 continue;
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000280 }
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200281
Tobias Klauser0a058612020-04-22 01:29:27 +0200282 /* This is the backpressure mechanism for the Tx path.
Magnus Karlsson15d8c912019-12-19 13:39:30 +0100283 * Reserve space in the completion queue and only proceed
284 * if there is space in it. This avoids having to implement
285 * any buffering in the Tx path.
286 */
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100287 if (xskq_prod_reserve_addr(umem->cq, desc->addr))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200288 goto out;
289
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100290 xskq_cons_release(xs->tx);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200291 rcu_read_unlock();
292 return true;
293 }
294
295out:
296 rcu_read_unlock();
297 return false;
298}
299EXPORT_SYMBOL(xsk_umem_consume_tx);
300
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000301static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200302{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200303 struct net_device *dev = xs->dev;
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000304 int err;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200305
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000306 rcu_read_lock();
307 err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
308 rcu_read_unlock();
309
310 return err;
311}
312
313static int xsk_zc_xmit(struct xdp_sock *xs)
314{
315 return xsk_wakeup(xs, XDP_WAKEUP_TX);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200316}
317
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200318static void xsk_destruct_skb(struct sk_buff *skb)
319{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200320 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200321 struct xdp_sock *xs = xdp_sk(skb->sk);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200322 unsigned long flags;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200323
Magnus Karlssona9744f72018-06-29 09:48:20 +0200324 spin_lock_irqsave(&xs->tx_completion_lock, flags);
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100325 xskq_prod_submit_addr(xs->umem->cq, addr);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200326 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200327
328 sock_wfree(skb);
329}
330
Magnus Karlssondf551052019-10-02 08:31:59 +0200331static int xsk_generic_xmit(struct sock *sk)
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200332{
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200333 struct xdp_sock *xs = xdp_sk(sk);
Magnus Karlssondf551052019-10-02 08:31:59 +0200334 u32 max_batch = TX_BATCH_SIZE;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200335 bool sent_frame = false;
336 struct xdp_desc desc;
337 struct sk_buff *skb;
338 int err = 0;
339
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200340 mutex_lock(&xs->mutex);
341
Ilya Maximets67571642019-07-04 17:25:03 +0300342 if (xs->queue_id >= xs->dev->real_num_tx_queues)
343 goto out;
344
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100345 while (xskq_cons_peek_desc(xs->tx, &desc, xs->umem)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200346 char *buffer;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200347 u64 addr;
348 u32 len;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200349
350 if (max_batch-- == 0) {
351 err = -EAGAIN;
352 goto out;
353 }
354
Magnus Karlsson09210c42018-07-11 10:12:52 +0200355 len = desc.len;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200356 skb = sock_alloc_send_skb(sk, len, 1, &err);
Li RongQingaa2cad02020-06-11 13:11:06 +0800357 if (unlikely(!skb))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200358 goto out;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200359
360 skb_put(skb, len);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200361 addr = desc.addr;
Björn Töpel2b434702020-05-20 21:20:53 +0200362 buffer = xsk_buff_raw_get_data(xs->umem, addr);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200363 err = skb_store_bits(skb, 0, buffer, len);
Tobias Klauser0a058612020-04-22 01:29:27 +0200364 /* This is the backpressure mechanism for the Tx path.
Magnus Karlsson15d8c912019-12-19 13:39:30 +0100365 * Reserve space in the completion queue and only proceed
366 * if there is space in it. This avoids having to implement
367 * any buffering in the Tx path.
368 */
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100369 if (unlikely(err) || xskq_prod_reserve(xs->umem->cq)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200370 kfree_skb(skb);
371 goto out;
372 }
373
374 skb->dev = xs->dev;
375 skb->priority = sk->sk_priority;
376 skb->mark = sk->sk_mark;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000377 skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200378 skb->destructor = xsk_destruct_skb;
379
380 err = dev_direct_xmit(skb, xs->queue_id);
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100381 xskq_cons_release(xs->tx);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200382 /* Ignore NET_XMIT_CN as packet might have been sent */
383 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
Magnus Karlssonfe588682018-06-29 09:48:18 +0200384 /* SKB completed but not sent */
385 err = -EBUSY;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200386 goto out;
387 }
388
389 sent_frame = true;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200390 }
391
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000392 xs->tx->queue_empty_descs++;
393
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200394out:
395 if (sent_frame)
396 sk->sk_write_space(sk);
397
398 mutex_unlock(&xs->mutex);
399 return err;
400}
401
Magnus Karlssondf551052019-10-02 08:31:59 +0200402static int __xsk_sendmsg(struct sock *sk)
403{
404 struct xdp_sock *xs = xdp_sk(sk);
405
406 if (unlikely(!(xs->dev->flags & IFF_UP)))
407 return -ENETDOWN;
408 if (unlikely(!xs->tx))
409 return -ENOBUFS;
410
411 return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
412}
413
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200414static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
415{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200416 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200417 struct sock *sk = sock->sk;
418 struct xdp_sock *xs = xdp_sk(sk);
419
Björn Töpel42fddcc2019-09-04 13:49:12 +0200420 if (unlikely(!xsk_is_bound(xs)))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200421 return -ENXIO;
Magnus Karlssondf551052019-10-02 08:31:59 +0200422 if (unlikely(need_wait))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200423 return -EOPNOTSUPP;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200424
Magnus Karlssondf551052019-10-02 08:31:59 +0200425 return __xsk_sendmsg(sk);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200426}
427
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100428static __poll_t xsk_poll(struct file *file, struct socket *sock,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700429 struct poll_table_struct *wait)
Björn Töpelc4971762018-05-02 13:01:27 +0200430{
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100431 __poll_t mask = datagram_poll(file, sock, wait);
Magnus Karlssondf551052019-10-02 08:31:59 +0200432 struct sock *sk = sock->sk;
433 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200434 struct xdp_umem *umem;
435
436 if (unlikely(!xsk_is_bound(xs)))
437 return mask;
438
Björn Töpel42fddcc2019-09-04 13:49:12 +0200439 umem = xs->umem;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200440
Magnus Karlssondf551052019-10-02 08:31:59 +0200441 if (umem->need_wakeup) {
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000442 if (xs->zc)
443 xsk_wakeup(xs, umem->need_wakeup);
Magnus Karlssondf551052019-10-02 08:31:59 +0200444 else
445 /* Poll needs to drive Tx also in copy mode */
446 __xsk_sendmsg(sk);
447 }
Björn Töpelc4971762018-05-02 13:01:27 +0200448
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100449 if (xs->rx && !xskq_prod_is_empty(xs->rx))
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100450 mask |= EPOLLIN | EPOLLRDNORM;
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100451 if (xs->tx && !xskq_cons_is_full(xs->tx))
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100452 mask |= EPOLLOUT | EPOLLWRNORM;
Björn Töpelc4971762018-05-02 13:01:27 +0200453
454 return mask;
455}
456
Björn Töpelb9b6b682018-05-02 13:01:25 +0200457static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
458 bool umem_queue)
Magnus Karlsson423f3832018-05-02 13:01:24 +0200459{
460 struct xsk_queue *q;
461
462 if (entries == 0 || *queue || !is_power_of_2(entries))
463 return -EINVAL;
464
Björn Töpelb9b6b682018-05-02 13:01:25 +0200465 q = xskq_create(entries, umem_queue);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200466 if (!q)
467 return -ENOMEM;
468
Björn Töpel37b07692018-05-22 09:35:01 +0200469 /* Make sure queue is ready before it can be seen by others */
470 smp_wmb();
Björn Töpel94a99762019-09-04 13:49:10 +0200471 WRITE_ONCE(*queue, q);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200472 return 0;
473}
474
Ilya Maximets455302d2019-06-28 11:04:07 +0300475static void xsk_unbind_dev(struct xdp_sock *xs)
476{
477 struct net_device *dev = xs->dev;
478
Björn Töpel42fddcc2019-09-04 13:49:12 +0200479 if (xs->state != XSK_BOUND)
Ilya Maximets455302d2019-06-28 11:04:07 +0300480 return;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200481 WRITE_ONCE(xs->state, XSK_UNBOUND);
Ilya Maximets455302d2019-06-28 11:04:07 +0300482
483 /* Wait for driver to stop using the xdp socket. */
484 xdp_del_sk_umem(xs->umem, xs);
485 xs->dev = NULL;
486 synchronize_net();
487 dev_put(dev);
488}
489
Björn Töpel0402acd2019-08-15 11:30:13 +0200490static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
491 struct xdp_sock ***map_entry)
492{
493 struct xsk_map *map = NULL;
494 struct xsk_map_node *node;
495
496 *map_entry = NULL;
497
498 spin_lock_bh(&xs->map_list_lock);
499 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
500 node);
501 if (node) {
502 WARN_ON(xsk_map_inc(node->map));
503 map = node->map;
504 *map_entry = node->map_entry;
505 }
506 spin_unlock_bh(&xs->map_list_lock);
507 return map;
508}
509
510static void xsk_delete_from_maps(struct xdp_sock *xs)
511{
512 /* This function removes the current XDP socket from all the
513 * maps it resides in. We need to take extra care here, due to
514 * the two locks involved. Each map has a lock synchronizing
515 * updates to the entries, and each socket has a lock that
516 * synchronizes access to the list of maps (map_list). For
517 * deadlock avoidance the locks need to be taken in the order
518 * "map lock"->"socket map list lock". We start off by
519 * accessing the socket map list, and take a reference to the
520 * map to guarantee existence between the
521 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
522 * calls. Then we ask the map to remove the socket, which
523 * tries to remove the socket from the map. Note that there
524 * might be updates to the map between
525 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
526 */
527 struct xdp_sock **map_entry = NULL;
528 struct xsk_map *map;
529
530 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
531 xsk_map_try_sock_delete(map, xs, map_entry);
532 xsk_map_put(map);
533 }
534}
535
Björn Töpelc0c77d82018-05-02 13:01:23 +0200536static int xsk_release(struct socket *sock)
537{
538 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200539 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200540 struct net *net;
541
542 if (!sk)
543 return 0;
544
545 net = sock_net(sk);
546
Björn Töpel1d0dc062019-01-24 19:59:37 +0100547 mutex_lock(&net->xdp.lock);
548 sk_del_node_init_rcu(sk);
549 mutex_unlock(&net->xdp.lock);
550
Björn Töpelc0c77d82018-05-02 13:01:23 +0200551 local_bh_disable();
552 sock_prot_inuse_add(net, sk->sk_prot, -1);
553 local_bh_enable();
554
Björn Töpel0402acd2019-08-15 11:30:13 +0200555 xsk_delete_from_maps(xs);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200556 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300557 xsk_unbind_dev(xs);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200558 mutex_unlock(&xs->mutex);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200559
Björn Töpel541d7fd2018-10-05 13:25:15 +0200560 xskq_destroy(xs->rx);
561 xskq_destroy(xs->tx);
562
Björn Töpelc0c77d82018-05-02 13:01:23 +0200563 sock_orphan(sk);
564 sock->sk = NULL;
565
566 sk_refcnt_debug_release(sk);
567 sock_put(sk);
568
569 return 0;
570}
571
Magnus Karlsson965a9902018-05-02 13:01:26 +0200572static struct socket *xsk_lookup_xsk_from_fd(int fd)
573{
574 struct socket *sock;
575 int err;
576
577 sock = sockfd_lookup(fd, &err);
578 if (!sock)
579 return ERR_PTR(-ENOTSOCK);
580
581 if (sock->sk->sk_family != PF_XDP) {
582 sockfd_put(sock);
583 return ERR_PTR(-ENOPROTOOPT);
584 }
585
586 return sock;
587}
588
589static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
590{
591 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
592 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200593 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpel959b71d2018-05-22 09:34:56 +0200594 struct net_device *dev;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200595 u32 flags, qid;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200596 int err = 0;
597
598 if (addr_len < sizeof(struct sockaddr_xdp))
599 return -EINVAL;
600 if (sxdp->sxdp_family != AF_XDP)
601 return -EINVAL;
602
Björn Töpelf54ba392019-03-08 08:57:26 +0100603 flags = sxdp->sxdp_flags;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200604 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
605 XDP_USE_NEED_WAKEUP))
Björn Töpelf54ba392019-03-08 08:57:26 +0100606 return -EINVAL;
607
Ilya Maximets5464c3a2019-07-08 14:03:44 +0300608 rtnl_lock();
Magnus Karlsson965a9902018-05-02 13:01:26 +0200609 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300610 if (xs->state != XSK_READY) {
Björn Töpel959b71d2018-05-22 09:34:56 +0200611 err = -EBUSY;
612 goto out_release;
613 }
614
Magnus Karlsson965a9902018-05-02 13:01:26 +0200615 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
616 if (!dev) {
617 err = -ENODEV;
618 goto out_release;
619 }
620
Magnus Karlssonf6145902018-05-02 13:01:32 +0200621 if (!xs->rx && !xs->tx) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200622 err = -EINVAL;
623 goto out_unlock;
624 }
625
Björn Töpel173d3ad2018-06-04 14:05:55 +0200626 qid = sxdp->sxdp_queue_id;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200627
628 if (flags & XDP_SHARED_UMEM) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200629 struct xdp_sock *umem_xs;
630 struct socket *sock;
631
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200632 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
633 (flags & XDP_USE_NEED_WAKEUP)) {
Björn Töpel173d3ad2018-06-04 14:05:55 +0200634 /* Cannot specify flags for shared sockets. */
635 err = -EINVAL;
636 goto out_unlock;
637 }
638
Magnus Karlsson965a9902018-05-02 13:01:26 +0200639 if (xs->umem) {
640 /* We have already our own. */
641 err = -EINVAL;
642 goto out_unlock;
643 }
644
645 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
646 if (IS_ERR(sock)) {
647 err = PTR_ERR(sock);
648 goto out_unlock;
649 }
650
651 umem_xs = xdp_sk(sock->sk);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200652 if (!xsk_is_bound(umem_xs)) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200653 err = -EBADF;
654 sockfd_put(sock);
655 goto out_unlock;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200656 }
657 if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200658 err = -EINVAL;
659 sockfd_put(sock);
660 goto out_unlock;
661 }
662
663 xdp_get_umem(umem_xs->umem);
Björn Töpel9764f4b2019-09-04 13:49:11 +0200664 WRITE_ONCE(xs->umem, umem_xs->umem);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200665 sockfd_put(sock);
666 } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
667 err = -EINVAL;
668 goto out_unlock;
Björn Töpelc4971762018-05-02 13:01:27 +0200669 } else {
670 /* This xsk has its own umem. */
Björn Töpel173d3ad2018-06-04 14:05:55 +0200671 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
672 if (err)
673 goto out_unlock;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200674 }
675
Magnus Karlsson965a9902018-05-02 13:01:26 +0200676 xs->dev = dev;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200677 xs->zc = xs->umem->zc;
678 xs->queue_id = qid;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200679 xdp_add_sk_umem(xs->umem, xs);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200680
681out_unlock:
Björn Töpel42fddcc2019-09-04 13:49:12 +0200682 if (err) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200683 dev_put(dev);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200684 } else {
685 /* Matches smp_rmb() in bind() for shared umem
686 * sockets, and xsk_is_bound().
687 */
688 smp_wmb();
689 WRITE_ONCE(xs->state, XSK_BOUND);
690 }
Magnus Karlsson965a9902018-05-02 13:01:26 +0200691out_release:
692 mutex_unlock(&xs->mutex);
Ilya Maximets5464c3a2019-07-08 14:03:44 +0300693 rtnl_unlock();
Magnus Karlsson965a9902018-05-02 13:01:26 +0200694 return err;
695}
696
Kevin Laatzc05cd362019-08-27 02:25:22 +0000697struct xdp_umem_reg_v1 {
698 __u64 addr; /* Start of packet data area */
699 __u64 len; /* Length of packet data area */
700 __u32 chunk_size;
701 __u32 headroom;
702};
703
Björn Töpelc0c77d82018-05-02 13:01:23 +0200704static int xsk_setsockopt(struct socket *sock, int level, int optname,
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200705 sockptr_t optval, unsigned int optlen)
Björn Töpelc0c77d82018-05-02 13:01:23 +0200706{
707 struct sock *sk = sock->sk;
708 struct xdp_sock *xs = xdp_sk(sk);
709 int err;
710
711 if (level != SOL_XDP)
712 return -ENOPROTOOPT;
713
714 switch (optname) {
Björn Töpelb9b6b682018-05-02 13:01:25 +0200715 case XDP_RX_RING:
Magnus Karlssonf6145902018-05-02 13:01:32 +0200716 case XDP_TX_RING:
Björn Töpelb9b6b682018-05-02 13:01:25 +0200717 {
718 struct xsk_queue **q;
719 int entries;
720
721 if (optlen < sizeof(entries))
722 return -EINVAL;
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200723 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
Björn Töpelb9b6b682018-05-02 13:01:25 +0200724 return -EFAULT;
725
726 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300727 if (xs->state != XSK_READY) {
728 mutex_unlock(&xs->mutex);
729 return -EBUSY;
730 }
Magnus Karlssonf6145902018-05-02 13:01:32 +0200731 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200732 err = xsk_init_queue(entries, q, false);
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200733 if (!err && optname == XDP_TX_RING)
734 /* Tx needs to be explicitly woken up the first time */
735 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200736 mutex_unlock(&xs->mutex);
737 return err;
738 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200739 case XDP_UMEM_REG:
740 {
Kevin Laatzc05cd362019-08-27 02:25:22 +0000741 size_t mr_size = sizeof(struct xdp_umem_reg);
742 struct xdp_umem_reg mr = {};
Björn Töpelc0c77d82018-05-02 13:01:23 +0200743 struct xdp_umem *umem;
744
Kevin Laatzc05cd362019-08-27 02:25:22 +0000745 if (optlen < sizeof(struct xdp_umem_reg_v1))
746 return -EINVAL;
747 else if (optlen < sizeof(mr))
748 mr_size = sizeof(struct xdp_umem_reg_v1);
749
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200750 if (copy_from_sockptr(&mr, optval, mr_size))
Björn Töpelc0c77d82018-05-02 13:01:23 +0200751 return -EFAULT;
752
753 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300754 if (xs->state != XSK_READY || xs->umem) {
Björn Töpelc0c77d82018-05-02 13:01:23 +0200755 mutex_unlock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200756 return -EBUSY;
757 }
758
759 umem = xdp_umem_create(&mr);
760 if (IS_ERR(umem)) {
761 mutex_unlock(&xs->mutex);
762 return PTR_ERR(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200763 }
764
765 /* Make sure umem is ready before it can be seen by others */
766 smp_wmb();
Björn Töpel9764f4b2019-09-04 13:49:11 +0200767 WRITE_ONCE(xs->umem, umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200768 mutex_unlock(&xs->mutex);
769 return 0;
770 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200771 case XDP_UMEM_FILL_RING:
Magnus Karlssonfe230832018-05-02 13:01:31 +0200772 case XDP_UMEM_COMPLETION_RING:
Magnus Karlsson423f3832018-05-02 13:01:24 +0200773 {
774 struct xsk_queue **q;
775 int entries;
776
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200777 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
Magnus Karlsson423f3832018-05-02 13:01:24 +0200778 return -EFAULT;
779
780 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300781 if (xs->state != XSK_READY) {
782 mutex_unlock(&xs->mutex);
783 return -EBUSY;
784 }
Björn Töpela49049e2018-05-22 09:35:02 +0200785 if (!xs->umem) {
786 mutex_unlock(&xs->mutex);
787 return -EINVAL;
788 }
789
Magnus Karlssonfe230832018-05-02 13:01:31 +0200790 q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
791 &xs->umem->cq;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200792 err = xsk_init_queue(entries, q, true);
Björn Töpel2b434702020-05-20 21:20:53 +0200793 if (optname == XDP_UMEM_FILL_RING)
794 xp_set_fq(xs->umem->pool, *q);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200795 mutex_unlock(&xs->mutex);
796 return err;
797 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200798 default:
799 break;
800 }
801
802 return -ENOPROTOOPT;
803}
804
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200805static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
806{
807 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
808 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
809 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
810}
811
812static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
813{
814 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
815 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
816 ring->desc = offsetof(struct xdp_umem_ring, desc);
817}
818
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000819struct xdp_statistics_v1 {
820 __u64 rx_dropped;
821 __u64 rx_invalid_descs;
822 __u64 tx_invalid_descs;
823};
824
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200825static int xsk_getsockopt(struct socket *sock, int level, int optname,
826 char __user *optval, int __user *optlen)
827{
828 struct sock *sk = sock->sk;
829 struct xdp_sock *xs = xdp_sk(sk);
830 int len;
831
832 if (level != SOL_XDP)
833 return -ENOPROTOOPT;
834
835 if (get_user(len, optlen))
836 return -EFAULT;
837 if (len < 0)
838 return -EINVAL;
839
840 switch (optname) {
841 case XDP_STATISTICS:
842 {
Peilin Ye3c4f8502020-07-28 01:36:04 -0400843 struct xdp_statistics stats = {};
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000844 bool extra_stats = true;
845 size_t stats_size;
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200846
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000847 if (len < sizeof(struct xdp_statistics_v1)) {
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200848 return -EINVAL;
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000849 } else if (len < sizeof(stats)) {
850 extra_stats = false;
851 stats_size = sizeof(struct xdp_statistics_v1);
852 } else {
853 stats_size = sizeof(stats);
854 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200855
856 mutex_lock(&xs->mutex);
857 stats.rx_dropped = xs->rx_dropped;
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000858 if (extra_stats) {
859 stats.rx_ring_full = xs->rx_queue_full;
860 stats.rx_fill_ring_empty_descs =
861 xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0;
862 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
863 } else {
864 stats.rx_dropped += xs->rx_queue_full;
865 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200866 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
867 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
868 mutex_unlock(&xs->mutex);
869
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000870 if (copy_to_user(optval, &stats, stats_size))
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200871 return -EFAULT;
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000872 if (put_user(stats_size, optlen))
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200873 return -EFAULT;
874
875 return 0;
876 }
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200877 case XDP_MMAP_OFFSETS:
878 {
879 struct xdp_mmap_offsets off;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200880 struct xdp_mmap_offsets_v1 off_v1;
881 bool flags_supported = true;
882 void *to_copy;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200883
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200884 if (len < sizeof(off_v1))
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200885 return -EINVAL;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200886 else if (len < sizeof(off))
887 flags_supported = false;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200888
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200889 if (flags_supported) {
890 /* xdp_ring_offset is identical to xdp_ring_offset_v1
891 * except for the flags field added to the end.
892 */
893 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
894 &off.rx);
895 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
896 &off.tx);
897 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
898 &off.fr);
899 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
900 &off.cr);
901 off.rx.flags = offsetof(struct xdp_rxtx_ring,
902 ptrs.flags);
903 off.tx.flags = offsetof(struct xdp_rxtx_ring,
904 ptrs.flags);
905 off.fr.flags = offsetof(struct xdp_umem_ring,
906 ptrs.flags);
907 off.cr.flags = offsetof(struct xdp_umem_ring,
908 ptrs.flags);
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200909
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200910 len = sizeof(off);
911 to_copy = &off;
912 } else {
913 xsk_enter_rxtx_offsets(&off_v1.rx);
914 xsk_enter_rxtx_offsets(&off_v1.tx);
915 xsk_enter_umem_offsets(&off_v1.fr);
916 xsk_enter_umem_offsets(&off_v1.cr);
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200917
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200918 len = sizeof(off_v1);
919 to_copy = &off_v1;
920 }
921
922 if (copy_to_user(optval, to_copy, len))
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200923 return -EFAULT;
924 if (put_user(len, optlen))
925 return -EFAULT;
926
927 return 0;
928 }
Maxim Mikityanskiy2640d3c2019-06-26 17:35:25 +0300929 case XDP_OPTIONS:
930 {
931 struct xdp_options opts = {};
932
933 if (len < sizeof(opts))
934 return -EINVAL;
935
936 mutex_lock(&xs->mutex);
937 if (xs->zc)
938 opts.flags |= XDP_OPTIONS_ZEROCOPY;
939 mutex_unlock(&xs->mutex);
940
941 len = sizeof(opts);
942 if (copy_to_user(optval, &opts, len))
943 return -EFAULT;
944 if (put_user(len, optlen))
945 return -EFAULT;
946
947 return 0;
948 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200949 default:
950 break;
951 }
952
953 return -EOPNOTSUPP;
954}
955
Magnus Karlsson423f3832018-05-02 13:01:24 +0200956static int xsk_mmap(struct file *file, struct socket *sock,
957 struct vm_area_struct *vma)
958{
Geert Uytterhoevena5a16e42018-06-07 15:37:34 +0200959 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200960 unsigned long size = vma->vm_end - vma->vm_start;
961 struct xdp_sock *xs = xdp_sk(sock->sk);
962 struct xsk_queue *q = NULL;
Björn Töpel37b07692018-05-22 09:35:01 +0200963 struct xdp_umem *umem;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200964 unsigned long pfn;
965 struct page *qpg;
966
Björn Töpel42fddcc2019-09-04 13:49:12 +0200967 if (READ_ONCE(xs->state) != XSK_READY)
Ilya Maximets455302d2019-06-28 11:04:07 +0300968 return -EBUSY;
969
Björn Töpelb9b6b682018-05-02 13:01:25 +0200970 if (offset == XDP_PGOFF_RX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +0200971 q = READ_ONCE(xs->rx);
Magnus Karlssonf6145902018-05-02 13:01:32 +0200972 } else if (offset == XDP_PGOFF_TX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +0200973 q = READ_ONCE(xs->tx);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200974 } else {
Björn Töpel37b07692018-05-22 09:35:01 +0200975 umem = READ_ONCE(xs->umem);
976 if (!umem)
Björn Töpelb9b6b682018-05-02 13:01:25 +0200977 return -EINVAL;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200978
Magnus Karlssone6762c82019-02-08 14:13:50 +0100979 /* Matches the smp_wmb() in XDP_UMEM_REG */
980 smp_rmb();
Björn Töpelb9b6b682018-05-02 13:01:25 +0200981 if (offset == XDP_UMEM_PGOFF_FILL_RING)
Björn Töpel37b07692018-05-22 09:35:01 +0200982 q = READ_ONCE(umem->fq);
Magnus Karlssonfe230832018-05-02 13:01:31 +0200983 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
Björn Töpel37b07692018-05-22 09:35:01 +0200984 q = READ_ONCE(umem->cq);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200985 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200986
987 if (!q)
988 return -EINVAL;
989
Magnus Karlssone6762c82019-02-08 14:13:50 +0100990 /* Matches the smp_wmb() in xsk_init_queue */
991 smp_rmb();
Magnus Karlsson423f3832018-05-02 13:01:24 +0200992 qpg = virt_to_head_page(q->ring);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -0700993 if (size > page_size(qpg))
Magnus Karlsson423f3832018-05-02 13:01:24 +0200994 return -EINVAL;
995
996 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
997 return remap_pfn_range(vma, vma->vm_start, pfn,
998 size, vma->vm_page_prot);
999}
1000
Ilya Maximets455302d2019-06-28 11:04:07 +03001001static int xsk_notifier(struct notifier_block *this,
1002 unsigned long msg, void *ptr)
1003{
1004 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1005 struct net *net = dev_net(dev);
1006 struct sock *sk;
1007
1008 switch (msg) {
1009 case NETDEV_UNREGISTER:
1010 mutex_lock(&net->xdp.lock);
1011 sk_for_each(sk, &net->xdp.list) {
1012 struct xdp_sock *xs = xdp_sk(sk);
1013
1014 mutex_lock(&xs->mutex);
1015 if (xs->dev == dev) {
1016 sk->sk_err = ENETDOWN;
1017 if (!sock_flag(sk, SOCK_DEAD))
1018 sk->sk_error_report(sk);
1019
1020 xsk_unbind_dev(xs);
1021
1022 /* Clear device references in umem. */
1023 xdp_umem_clear_dev(xs->umem);
1024 }
1025 mutex_unlock(&xs->mutex);
1026 }
1027 mutex_unlock(&net->xdp.lock);
1028 break;
1029 }
1030 return NOTIFY_DONE;
1031}
1032
Björn Töpelc0c77d82018-05-02 13:01:23 +02001033static struct proto xsk_proto = {
1034 .name = "XDP",
1035 .owner = THIS_MODULE,
1036 .obj_size = sizeof(struct xdp_sock),
1037};
1038
1039static const struct proto_ops xsk_proto_ops = {
Björn Töpelc2f43742018-05-18 14:00:24 +02001040 .family = PF_XDP,
1041 .owner = THIS_MODULE,
1042 .release = xsk_release,
1043 .bind = xsk_bind,
1044 .connect = sock_no_connect,
1045 .socketpair = sock_no_socketpair,
1046 .accept = sock_no_accept,
1047 .getname = sock_no_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001048 .poll = xsk_poll,
Björn Töpelc2f43742018-05-18 14:00:24 +02001049 .ioctl = sock_no_ioctl,
1050 .listen = sock_no_listen,
1051 .shutdown = sock_no_shutdown,
1052 .setsockopt = xsk_setsockopt,
1053 .getsockopt = xsk_getsockopt,
1054 .sendmsg = xsk_sendmsg,
1055 .recvmsg = sock_no_recvmsg,
1056 .mmap = xsk_mmap,
1057 .sendpage = sock_no_sendpage,
Björn Töpelc0c77d82018-05-02 13:01:23 +02001058};
1059
Björn Töpel11fe9262019-02-21 13:07:38 +01001060static void xsk_destruct(struct sock *sk)
1061{
1062 struct xdp_sock *xs = xdp_sk(sk);
1063
1064 if (!sock_flag(sk, SOCK_DEAD))
1065 return;
1066
1067 xdp_put_umem(xs->umem);
1068
1069 sk_refcnt_debug_dec(sk);
1070}
1071
Björn Töpelc0c77d82018-05-02 13:01:23 +02001072static int xsk_create(struct net *net, struct socket *sock, int protocol,
1073 int kern)
1074{
1075 struct sock *sk;
1076 struct xdp_sock *xs;
1077
1078 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1079 return -EPERM;
1080 if (sock->type != SOCK_RAW)
1081 return -ESOCKTNOSUPPORT;
1082
1083 if (protocol)
1084 return -EPROTONOSUPPORT;
1085
1086 sock->state = SS_UNCONNECTED;
1087
1088 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1089 if (!sk)
1090 return -ENOBUFS;
1091
1092 sock->ops = &xsk_proto_ops;
1093
1094 sock_init_data(sock, sk);
1095
1096 sk->sk_family = PF_XDP;
1097
Björn Töpel11fe9262019-02-21 13:07:38 +01001098 sk->sk_destruct = xsk_destruct;
1099 sk_refcnt_debug_inc(sk);
1100
Björn Töpelcee27162018-10-08 19:40:16 +02001101 sock_set_flag(sk, SOCK_RCU_FREE);
1102
Björn Töpelc0c77d82018-05-02 13:01:23 +02001103 xs = xdp_sk(sk);
Ilya Maximets455302d2019-06-28 11:04:07 +03001104 xs->state = XSK_READY;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001105 mutex_init(&xs->mutex);
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +03001106 spin_lock_init(&xs->rx_lock);
Magnus Karlssona9744f72018-06-29 09:48:20 +02001107 spin_lock_init(&xs->tx_completion_lock);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001108
Björn Töpel0402acd2019-08-15 11:30:13 +02001109 INIT_LIST_HEAD(&xs->map_list);
1110 spin_lock_init(&xs->map_list_lock);
1111
Björn Töpel1d0dc062019-01-24 19:59:37 +01001112 mutex_lock(&net->xdp.lock);
1113 sk_add_node_rcu(sk, &net->xdp.list);
1114 mutex_unlock(&net->xdp.lock);
1115
Björn Töpelc0c77d82018-05-02 13:01:23 +02001116 local_bh_disable();
1117 sock_prot_inuse_add(net, &xsk_proto, 1);
1118 local_bh_enable();
1119
1120 return 0;
1121}
1122
1123static const struct net_proto_family xsk_family_ops = {
1124 .family = PF_XDP,
1125 .create = xsk_create,
1126 .owner = THIS_MODULE,
1127};
1128
Ilya Maximets455302d2019-06-28 11:04:07 +03001129static struct notifier_block xsk_netdev_notifier = {
1130 .notifier_call = xsk_notifier,
1131};
1132
Björn Töpel1d0dc062019-01-24 19:59:37 +01001133static int __net_init xsk_net_init(struct net *net)
1134{
1135 mutex_init(&net->xdp.lock);
1136 INIT_HLIST_HEAD(&net->xdp.list);
1137 return 0;
1138}
1139
1140static void __net_exit xsk_net_exit(struct net *net)
1141{
1142 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1143}
1144
1145static struct pernet_operations xsk_net_ops = {
1146 .init = xsk_net_init,
1147 .exit = xsk_net_exit,
1148};
1149
Björn Töpelc0c77d82018-05-02 13:01:23 +02001150static int __init xsk_init(void)
1151{
Björn Töpele312b9e2019-12-19 07:10:02 +01001152 int err, cpu;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001153
1154 err = proto_register(&xsk_proto, 0 /* no slab */);
1155 if (err)
1156 goto out;
1157
1158 err = sock_register(&xsk_family_ops);
1159 if (err)
1160 goto out_proto;
1161
Björn Töpel1d0dc062019-01-24 19:59:37 +01001162 err = register_pernet_subsys(&xsk_net_ops);
1163 if (err)
1164 goto out_sk;
Ilya Maximets455302d2019-06-28 11:04:07 +03001165
1166 err = register_netdevice_notifier(&xsk_netdev_notifier);
1167 if (err)
1168 goto out_pernet;
1169
Björn Töpele312b9e2019-12-19 07:10:02 +01001170 for_each_possible_cpu(cpu)
1171 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
Björn Töpelc0c77d82018-05-02 13:01:23 +02001172 return 0;
1173
Ilya Maximets455302d2019-06-28 11:04:07 +03001174out_pernet:
1175 unregister_pernet_subsys(&xsk_net_ops);
Björn Töpel1d0dc062019-01-24 19:59:37 +01001176out_sk:
1177 sock_unregister(PF_XDP);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001178out_proto:
1179 proto_unregister(&xsk_proto);
1180out:
1181 return err;
1182}
1183
1184fs_initcall(xsk_init);