blob: 067e85424d36cb12c1a52e6adf32fc3ed22dc351 [file] [log] [blame]
Björn Töpelc0c77d82018-05-02 13:01:23 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
Björn Töpelc0c77d82018-05-02 13:01:23 +02008 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14#include <linux/if_xdp.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/sched/task.h>
19#include <linux/socket.h>
20#include <linux/file.h>
21#include <linux/uaccess.h>
22#include <linux/net.h>
23#include <linux/netdevice.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020024#include <linux/rculist.h>
Magnus Karlssona71506a2020-05-20 21:20:51 +020025#include <net/xdp_sock_drv.h>
Björn Töpelb9b6b682018-05-02 13:01:25 +020026#include <net/xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020027
Magnus Karlsson423f3832018-05-02 13:01:24 +020028#include "xsk_queue.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020029#include "xdp_umem.h"
Björn Töpela36b38aa2019-01-24 19:59:39 +010030#include "xsk.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020031
Magnus Karlsson35fcde72018-05-02 13:01:34 +020032#define TX_BATCH_SIZE 16
33
Björn Töpele312b9e2019-12-19 07:10:02 +010034static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
35
Björn Töpelfbfc504a2018-05-02 13:01:28 +020036bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
37{
Björn Töpel173d3ad2018-06-04 14:05:55 +020038 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
Magnus Karlsson7361f9c2020-08-28 10:26:18 +020039 (xs->pool->fq || READ_ONCE(xs->fq_tmp));
Björn Töpelfbfc504a2018-05-02 13:01:28 +020040}
41
Magnus Karlssonc4655762020-08-28 10:26:16 +020042void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020043{
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020044 if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020045 return;
46
Magnus Karlsson7361f9c2020-08-28 10:26:18 +020047 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020048 pool->cached_need_wakeup |= XDP_WAKEUP_RX;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020049}
50EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
51
Magnus Karlssonc4655762020-08-28 10:26:16 +020052void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020053{
54 struct xdp_sock *xs;
55
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020056 if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020057 return;
58
59 rcu_read_lock();
Magnus Karlssona5aa8e52020-08-28 10:26:20 +020060 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020061 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
62 }
63 rcu_read_unlock();
64
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020065 pool->cached_need_wakeup |= XDP_WAKEUP_TX;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020066}
67EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
68
Magnus Karlssonc4655762020-08-28 10:26:16 +020069void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020070{
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020071 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020072 return;
73
Magnus Karlsson7361f9c2020-08-28 10:26:18 +020074 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020075 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020076}
77EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
78
Magnus Karlssonc4655762020-08-28 10:26:16 +020079void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020080{
81 struct xdp_sock *xs;
82
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020083 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020084 return;
85
86 rcu_read_lock();
Magnus Karlssona5aa8e52020-08-28 10:26:20 +020087 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020088 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
89 }
90 rcu_read_unlock();
91
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020092 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020093}
94EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
95
Magnus Karlssonc4655762020-08-28 10:26:16 +020096bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020097{
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020098 return pool->uses_need_wakeup;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020099}
Magnus Karlssonc4655762020-08-28 10:26:16 +0200100EXPORT_SYMBOL(xsk_uses_need_wakeup);
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200101
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200102struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
103 u16 queue_id)
104{
105 if (queue_id < dev->real_num_rx_queues)
106 return dev->_rx[queue_id].pool;
107 if (queue_id < dev->real_num_tx_queues)
108 return dev->_tx[queue_id].pool;
109
110 return NULL;
111}
112EXPORT_SYMBOL(xsk_get_pool_from_qid);
113
114void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
115{
116 if (queue_id < dev->real_num_rx_queues)
117 dev->_rx[queue_id].pool = NULL;
118 if (queue_id < dev->real_num_tx_queues)
119 dev->_tx[queue_id].pool = NULL;
120}
121
122/* The buffer pool is stored both in the _rx struct and the _tx struct as we do
123 * not know if the device has more tx queues than rx, or the opposite.
124 * This might also change during run time.
125 */
126int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
127 u16 queue_id)
128{
129 if (queue_id >= max_t(unsigned int,
130 dev->real_num_rx_queues,
131 dev->real_num_tx_queues))
132 return -EINVAL;
133
134 if (queue_id < dev->real_num_rx_queues)
135 dev->_rx[queue_id].pool = pool;
136 if (queue_id < dev->real_num_tx_queues)
137 dev->_tx[queue_id].pool = pool;
138
139 return 0;
140}
141
Björn Töpel26062b12020-05-20 21:21:02 +0200142void xp_release(struct xdp_buff_xsk *xskb)
143{
144 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
145}
146
147static u64 xp_get_handle(struct xdp_buff_xsk *xskb)
148{
149 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
150
151 offset += xskb->pool->headroom;
152 if (!xskb->pool->unaligned)
153 return xskb->orig_addr + offset;
154 return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
155}
156
Björn Töpel2b434702020-05-20 21:20:53 +0200157static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
Kevin Laatzc05cd362019-08-27 02:25:22 +0000158{
Björn Töpel2b434702020-05-20 21:20:53 +0200159 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
160 u64 addr;
161 int err;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000162
Björn Töpel2b434702020-05-20 21:20:53 +0200163 addr = xp_get_handle(xskb);
164 err = xskq_prod_reserve_desc(xs->rx, addr, len);
165 if (err) {
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000166 xs->rx_queue_full++;
Björn Töpel2b434702020-05-20 21:20:53 +0200167 return err;
168 }
Kevin Laatzc05cd362019-08-27 02:25:22 +0000169
Björn Töpel2b434702020-05-20 21:20:53 +0200170 xp_release(xskb);
171 return 0;
172}
Kevin Laatzc05cd362019-08-27 02:25:22 +0000173
Björn Töpel2b434702020-05-20 21:20:53 +0200174static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
175{
176 void *from_buf, *to_buf;
177 u32 metalen;
178
179 if (unlikely(xdp_data_meta_unsupported(from))) {
180 from_buf = from->data;
181 to_buf = to->data;
182 metalen = 0;
183 } else {
184 from_buf = from->data_meta;
185 metalen = from->data - from->data_meta;
186 to_buf = to->data - metalen;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000187 }
188
189 memcpy(to_buf, from_buf, len + metalen);
190}
191
Björn Töpel2b434702020-05-20 21:20:53 +0200192static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
193 bool explicit_free)
Björn Töpel173d3ad2018-06-04 14:05:55 +0200194{
Björn Töpel2b434702020-05-20 21:20:53 +0200195 struct xdp_buff *xsk_xdp;
Björn Töpel4e64c832018-06-04 13:57:11 +0200196 int err;
Björn Töpelc4971762018-05-02 13:01:27 +0200197
Magnus Karlssonc4655762020-08-28 10:26:16 +0200198 if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
Björn Töpela509a952018-06-04 13:57:12 +0200199 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +0200200 return -ENOSPC;
Björn Töpela509a952018-06-04 13:57:12 +0200201 }
Björn Töpelc4971762018-05-02 13:01:27 +0200202
Magnus Karlssonc4655762020-08-28 10:26:16 +0200203 xsk_xdp = xsk_buff_alloc(xs->pool);
Björn Töpel2b434702020-05-20 21:20:53 +0200204 if (!xsk_xdp) {
Björn Töpela509a952018-06-04 13:57:12 +0200205 xs->rx_dropped++;
Björn Töpel2b434702020-05-20 21:20:53 +0200206 return -ENOSPC;
207 }
Björn Töpelc4971762018-05-02 13:01:27 +0200208
Björn Töpel2b434702020-05-20 21:20:53 +0200209 xsk_copy_xdp(xsk_xdp, xdp, len);
210 err = __xsk_rcv_zc(xs, xsk_xdp, len);
211 if (err) {
212 xsk_buff_free(xsk_xdp);
213 return err;
214 }
215 if (explicit_free)
216 xdp_return_buff(xdp);
217 return 0;
Björn Töpelc4971762018-05-02 13:01:27 +0200218}
219
Björn Töpel42fddcc2019-09-04 13:49:12 +0200220static bool xsk_is_bound(struct xdp_sock *xs)
221{
222 if (READ_ONCE(xs->state) == XSK_BOUND) {
223 /* Matches smp_wmb() in bind(). */
224 smp_rmb();
225 return true;
226 }
227 return false;
228}
229
Björn Töpel2b434702020-05-20 21:20:53 +0200230static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp,
231 bool explicit_free)
Björn Töpelc4971762018-05-02 13:01:27 +0200232{
Björn Töpel173d3ad2018-06-04 14:05:55 +0200233 u32 len;
Björn Töpelc4971762018-05-02 13:01:27 +0200234
Björn Töpel42fddcc2019-09-04 13:49:12 +0200235 if (!xsk_is_bound(xs))
236 return -EINVAL;
237
Björn Töpel173d3ad2018-06-04 14:05:55 +0200238 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
239 return -EINVAL;
Björn Töpelc4971762018-05-02 13:01:27 +0200240
Björn Töpel173d3ad2018-06-04 14:05:55 +0200241 len = xdp->data_end - xdp->data;
242
Björn Töpel08078922020-05-20 21:21:00 +0200243 return xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ?
Björn Töpel2b434702020-05-20 21:20:53 +0200244 __xsk_rcv_zc(xs, xdp, len) :
245 __xsk_rcv(xs, xdp, len, explicit_free);
Björn Töpelc4971762018-05-02 13:01:27 +0200246}
247
Björn Töpeld8179912019-11-01 12:03:46 +0100248static void xsk_flush(struct xdp_sock *xs)
Björn Töpelc4971762018-05-02 13:01:27 +0200249{
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100250 xskq_prod_submit(xs->rx);
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200251 __xskq_cons_release(xs->pool->fq);
Björn Töpel43a825a2020-01-20 10:29:17 +0100252 sock_def_readable(&xs->sk);
Björn Töpelc4971762018-05-02 13:01:27 +0200253}
254
255int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
256{
257 int err;
258
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300259 spin_lock_bh(&xs->rx_lock);
Björn Töpel2b434702020-05-20 21:20:53 +0200260 err = xsk_rcv(xs, xdp, false);
261 xsk_flush(xs);
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300262 spin_unlock_bh(&xs->rx_lock);
Björn Töpelc4971762018-05-02 13:01:27 +0200263 return err;
264}
265
Björn Töpele312b9e2019-12-19 07:10:02 +0100266int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
Björn Töpeld8179912019-11-01 12:03:46 +0100267{
Björn Töpele312b9e2019-12-19 07:10:02 +0100268 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
Björn Töpeld8179912019-11-01 12:03:46 +0100269 int err;
270
Björn Töpel2b434702020-05-20 21:20:53 +0200271 err = xsk_rcv(xs, xdp, true);
Björn Töpeld8179912019-11-01 12:03:46 +0100272 if (err)
273 return err;
274
275 if (!xs->flush_node.prev)
276 list_add(&xs->flush_node, flush_list);
277
278 return 0;
279}
280
Björn Töpele312b9e2019-12-19 07:10:02 +0100281void __xsk_map_flush(void)
Björn Töpeld8179912019-11-01 12:03:46 +0100282{
Björn Töpele312b9e2019-12-19 07:10:02 +0100283 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
Björn Töpeld8179912019-11-01 12:03:46 +0100284 struct xdp_sock *xs, *tmp;
285
286 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
287 xsk_flush(xs);
288 __list_del_clearprev(&xs->flush_node);
289 }
290}
291
Magnus Karlssonc4655762020-08-28 10:26:16 +0200292void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200293{
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200294 xskq_prod_submit_n(pool->cq, nb_entries);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200295}
Magnus Karlssonc4655762020-08-28 10:26:16 +0200296EXPORT_SYMBOL(xsk_tx_completed);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200297
Magnus Karlssonc4655762020-08-28 10:26:16 +0200298void xsk_tx_release(struct xsk_buff_pool *pool)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200299{
300 struct xdp_sock *xs;
301
302 rcu_read_lock();
Magnus Karlssona5aa8e52020-08-28 10:26:20 +0200303 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
Magnus Karlsson30744a62020-02-10 16:27:12 +0100304 __xskq_cons_release(xs->tx);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200305 xs->sk.sk_write_space(&xs->sk);
306 }
307 rcu_read_unlock();
308}
Magnus Karlssonc4655762020-08-28 10:26:16 +0200309EXPORT_SYMBOL(xsk_tx_release);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200310
Magnus Karlssonc4655762020-08-28 10:26:16 +0200311bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200312{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200313 struct xdp_sock *xs;
314
315 rcu_read_lock();
Magnus Karlssona5aa8e52020-08-28 10:26:20 +0200316 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200317 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000318 xs->tx->queue_empty_descs++;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200319 continue;
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000320 }
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200321
Tobias Klauser0a058612020-04-22 01:29:27 +0200322 /* This is the backpressure mechanism for the Tx path.
Magnus Karlsson15d8c912019-12-19 13:39:30 +0100323 * Reserve space in the completion queue and only proceed
324 * if there is space in it. This avoids having to implement
325 * any buffering in the Tx path.
326 */
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200327 if (xskq_prod_reserve_addr(pool->cq, desc->addr))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200328 goto out;
329
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100330 xskq_cons_release(xs->tx);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200331 rcu_read_unlock();
332 return true;
333 }
334
335out:
336 rcu_read_unlock();
337 return false;
338}
Magnus Karlssonc4655762020-08-28 10:26:16 +0200339EXPORT_SYMBOL(xsk_tx_peek_desc);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200340
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000341static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200342{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200343 struct net_device *dev = xs->dev;
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000344 int err;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200345
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000346 rcu_read_lock();
347 err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
348 rcu_read_unlock();
349
350 return err;
351}
352
353static int xsk_zc_xmit(struct xdp_sock *xs)
354{
355 return xsk_wakeup(xs, XDP_WAKEUP_TX);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200356}
357
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200358static void xsk_destruct_skb(struct sk_buff *skb)
359{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200360 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200361 struct xdp_sock *xs = xdp_sk(skb->sk);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200362 unsigned long flags;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200363
Magnus Karlssona9744f72018-06-29 09:48:20 +0200364 spin_lock_irqsave(&xs->tx_completion_lock, flags);
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200365 xskq_prod_submit_addr(xs->pool->cq, addr);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200366 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200367
368 sock_wfree(skb);
369}
370
Magnus Karlssondf551052019-10-02 08:31:59 +0200371static int xsk_generic_xmit(struct sock *sk)
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200372{
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200373 struct xdp_sock *xs = xdp_sk(sk);
Magnus Karlssondf551052019-10-02 08:31:59 +0200374 u32 max_batch = TX_BATCH_SIZE;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200375 bool sent_frame = false;
376 struct xdp_desc desc;
377 struct sk_buff *skb;
378 int err = 0;
379
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200380 mutex_lock(&xs->mutex);
381
Ilya Maximets67571642019-07-04 17:25:03 +0300382 if (xs->queue_id >= xs->dev->real_num_tx_queues)
383 goto out;
384
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200385 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200386 char *buffer;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200387 u64 addr;
388 u32 len;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200389
390 if (max_batch-- == 0) {
391 err = -EAGAIN;
392 goto out;
393 }
394
Magnus Karlsson09210c42018-07-11 10:12:52 +0200395 len = desc.len;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200396 skb = sock_alloc_send_skb(sk, len, 1, &err);
Li RongQingaa2cad02020-06-11 13:11:06 +0800397 if (unlikely(!skb))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200398 goto out;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200399
400 skb_put(skb, len);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200401 addr = desc.addr;
Magnus Karlssonc4655762020-08-28 10:26:16 +0200402 buffer = xsk_buff_raw_get_data(xs->pool, addr);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200403 err = skb_store_bits(skb, 0, buffer, len);
Tobias Klauser0a058612020-04-22 01:29:27 +0200404 /* This is the backpressure mechanism for the Tx path.
Magnus Karlsson15d8c912019-12-19 13:39:30 +0100405 * Reserve space in the completion queue and only proceed
406 * if there is space in it. This avoids having to implement
407 * any buffering in the Tx path.
408 */
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200409 if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200410 kfree_skb(skb);
411 goto out;
412 }
413
414 skb->dev = xs->dev;
415 skb->priority = sk->sk_priority;
416 skb->mark = sk->sk_mark;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000417 skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200418 skb->destructor = xsk_destruct_skb;
419
420 err = dev_direct_xmit(skb, xs->queue_id);
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100421 xskq_cons_release(xs->tx);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200422 /* Ignore NET_XMIT_CN as packet might have been sent */
423 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
Magnus Karlssonfe588682018-06-29 09:48:18 +0200424 /* SKB completed but not sent */
425 err = -EBUSY;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200426 goto out;
427 }
428
429 sent_frame = true;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200430 }
431
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000432 xs->tx->queue_empty_descs++;
433
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200434out:
435 if (sent_frame)
436 sk->sk_write_space(sk);
437
438 mutex_unlock(&xs->mutex);
439 return err;
440}
441
Magnus Karlssondf551052019-10-02 08:31:59 +0200442static int __xsk_sendmsg(struct sock *sk)
443{
444 struct xdp_sock *xs = xdp_sk(sk);
445
446 if (unlikely(!(xs->dev->flags & IFF_UP)))
447 return -ENETDOWN;
448 if (unlikely(!xs->tx))
449 return -ENOBUFS;
450
451 return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
452}
453
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200454static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
455{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200456 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200457 struct sock *sk = sock->sk;
458 struct xdp_sock *xs = xdp_sk(sk);
459
Björn Töpel42fddcc2019-09-04 13:49:12 +0200460 if (unlikely(!xsk_is_bound(xs)))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200461 return -ENXIO;
Magnus Karlssondf551052019-10-02 08:31:59 +0200462 if (unlikely(need_wait))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200463 return -EOPNOTSUPP;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200464
Magnus Karlssondf551052019-10-02 08:31:59 +0200465 return __xsk_sendmsg(sk);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200466}
467
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100468static __poll_t xsk_poll(struct file *file, struct socket *sock,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700469 struct poll_table_struct *wait)
Björn Töpelc4971762018-05-02 13:01:27 +0200470{
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100471 __poll_t mask = datagram_poll(file, sock, wait);
Magnus Karlssondf551052019-10-02 08:31:59 +0200472 struct sock *sk = sock->sk;
473 struct xdp_sock *xs = xdp_sk(sk);
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +0200474 struct xsk_buff_pool *pool;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200475
476 if (unlikely(!xsk_is_bound(xs)))
477 return mask;
478
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +0200479 pool = xs->pool;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200480
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +0200481 if (pool->cached_need_wakeup) {
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000482 if (xs->zc)
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +0200483 xsk_wakeup(xs, pool->cached_need_wakeup);
Magnus Karlssondf551052019-10-02 08:31:59 +0200484 else
485 /* Poll needs to drive Tx also in copy mode */
486 __xsk_sendmsg(sk);
487 }
Björn Töpelc4971762018-05-02 13:01:27 +0200488
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100489 if (xs->rx && !xskq_prod_is_empty(xs->rx))
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100490 mask |= EPOLLIN | EPOLLRDNORM;
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100491 if (xs->tx && !xskq_cons_is_full(xs->tx))
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100492 mask |= EPOLLOUT | EPOLLWRNORM;
Björn Töpelc4971762018-05-02 13:01:27 +0200493
494 return mask;
495}
496
Björn Töpelb9b6b682018-05-02 13:01:25 +0200497static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
498 bool umem_queue)
Magnus Karlsson423f3832018-05-02 13:01:24 +0200499{
500 struct xsk_queue *q;
501
502 if (entries == 0 || *queue || !is_power_of_2(entries))
503 return -EINVAL;
504
Björn Töpelb9b6b682018-05-02 13:01:25 +0200505 q = xskq_create(entries, umem_queue);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200506 if (!q)
507 return -ENOMEM;
508
Björn Töpel37b07692018-05-22 09:35:01 +0200509 /* Make sure queue is ready before it can be seen by others */
510 smp_wmb();
Björn Töpel94a99762019-09-04 13:49:10 +0200511 WRITE_ONCE(*queue, q);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200512 return 0;
513}
514
Ilya Maximets455302d2019-06-28 11:04:07 +0300515static void xsk_unbind_dev(struct xdp_sock *xs)
516{
517 struct net_device *dev = xs->dev;
518
Björn Töpel42fddcc2019-09-04 13:49:12 +0200519 if (xs->state != XSK_BOUND)
Ilya Maximets455302d2019-06-28 11:04:07 +0300520 return;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200521 WRITE_ONCE(xs->state, XSK_UNBOUND);
Ilya Maximets455302d2019-06-28 11:04:07 +0300522
523 /* Wait for driver to stop using the xdp socket. */
Magnus Karlssona5aa8e52020-08-28 10:26:20 +0200524 xp_del_xsk(xs->pool, xs);
Ilya Maximets455302d2019-06-28 11:04:07 +0300525 xs->dev = NULL;
526 synchronize_net();
527 dev_put(dev);
528}
529
Björn Töpel0402acd2019-08-15 11:30:13 +0200530static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
531 struct xdp_sock ***map_entry)
532{
533 struct xsk_map *map = NULL;
534 struct xsk_map_node *node;
535
536 *map_entry = NULL;
537
538 spin_lock_bh(&xs->map_list_lock);
539 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
540 node);
541 if (node) {
542 WARN_ON(xsk_map_inc(node->map));
543 map = node->map;
544 *map_entry = node->map_entry;
545 }
546 spin_unlock_bh(&xs->map_list_lock);
547 return map;
548}
549
550static void xsk_delete_from_maps(struct xdp_sock *xs)
551{
552 /* This function removes the current XDP socket from all the
553 * maps it resides in. We need to take extra care here, due to
554 * the two locks involved. Each map has a lock synchronizing
555 * updates to the entries, and each socket has a lock that
556 * synchronizes access to the list of maps (map_list). For
557 * deadlock avoidance the locks need to be taken in the order
558 * "map lock"->"socket map list lock". We start off by
559 * accessing the socket map list, and take a reference to the
560 * map to guarantee existence between the
561 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
562 * calls. Then we ask the map to remove the socket, which
563 * tries to remove the socket from the map. Note that there
564 * might be updates to the map between
565 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
566 */
567 struct xdp_sock **map_entry = NULL;
568 struct xsk_map *map;
569
570 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
571 xsk_map_try_sock_delete(map, xs, map_entry);
572 xsk_map_put(map);
573 }
574}
575
Björn Töpelc0c77d82018-05-02 13:01:23 +0200576static int xsk_release(struct socket *sock)
577{
578 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200579 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200580 struct net *net;
581
582 if (!sk)
583 return 0;
584
585 net = sock_net(sk);
586
Björn Töpel1d0dc062019-01-24 19:59:37 +0100587 mutex_lock(&net->xdp.lock);
588 sk_del_node_init_rcu(sk);
589 mutex_unlock(&net->xdp.lock);
590
Björn Töpelc0c77d82018-05-02 13:01:23 +0200591 local_bh_disable();
592 sock_prot_inuse_add(net, sk->sk_prot, -1);
593 local_bh_enable();
594
Björn Töpel0402acd2019-08-15 11:30:13 +0200595 xsk_delete_from_maps(xs);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200596 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300597 xsk_unbind_dev(xs);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200598 mutex_unlock(&xs->mutex);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200599
Björn Töpel541d7fd2018-10-05 13:25:15 +0200600 xskq_destroy(xs->rx);
601 xskq_destroy(xs->tx);
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200602 xskq_destroy(xs->fq_tmp);
603 xskq_destroy(xs->cq_tmp);
Björn Töpel541d7fd2018-10-05 13:25:15 +0200604
Björn Töpelc0c77d82018-05-02 13:01:23 +0200605 sock_orphan(sk);
606 sock->sk = NULL;
607
608 sk_refcnt_debug_release(sk);
609 sock_put(sk);
610
611 return 0;
612}
613
Magnus Karlsson965a9902018-05-02 13:01:26 +0200614static struct socket *xsk_lookup_xsk_from_fd(int fd)
615{
616 struct socket *sock;
617 int err;
618
619 sock = sockfd_lookup(fd, &err);
620 if (!sock)
621 return ERR_PTR(-ENOTSOCK);
622
623 if (sock->sk->sk_family != PF_XDP) {
624 sockfd_put(sock);
625 return ERR_PTR(-ENOPROTOOPT);
626 }
627
628 return sock;
629}
630
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200631static bool xsk_validate_queues(struct xdp_sock *xs)
632{
633 return xs->fq_tmp && xs->cq_tmp;
634}
635
Magnus Karlsson965a9902018-05-02 13:01:26 +0200636static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
637{
638 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
639 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200640 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpel959b71d2018-05-22 09:34:56 +0200641 struct net_device *dev;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200642 u32 flags, qid;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200643 int err = 0;
644
645 if (addr_len < sizeof(struct sockaddr_xdp))
646 return -EINVAL;
647 if (sxdp->sxdp_family != AF_XDP)
648 return -EINVAL;
649
Björn Töpelf54ba392019-03-08 08:57:26 +0100650 flags = sxdp->sxdp_flags;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200651 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
652 XDP_USE_NEED_WAKEUP))
Björn Töpelf54ba392019-03-08 08:57:26 +0100653 return -EINVAL;
654
Ilya Maximets5464c3a2019-07-08 14:03:44 +0300655 rtnl_lock();
Magnus Karlsson965a9902018-05-02 13:01:26 +0200656 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300657 if (xs->state != XSK_READY) {
Björn Töpel959b71d2018-05-22 09:34:56 +0200658 err = -EBUSY;
659 goto out_release;
660 }
661
Magnus Karlsson965a9902018-05-02 13:01:26 +0200662 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
663 if (!dev) {
664 err = -ENODEV;
665 goto out_release;
666 }
667
Magnus Karlssonf6145902018-05-02 13:01:32 +0200668 if (!xs->rx && !xs->tx) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200669 err = -EINVAL;
670 goto out_unlock;
671 }
672
Björn Töpel173d3ad2018-06-04 14:05:55 +0200673 qid = sxdp->sxdp_queue_id;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200674
675 if (flags & XDP_SHARED_UMEM) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200676 struct xdp_sock *umem_xs;
677 struct socket *sock;
678
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200679 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
680 (flags & XDP_USE_NEED_WAKEUP)) {
Björn Töpel173d3ad2018-06-04 14:05:55 +0200681 /* Cannot specify flags for shared sockets. */
682 err = -EINVAL;
683 goto out_unlock;
684 }
685
Magnus Karlsson965a9902018-05-02 13:01:26 +0200686 if (xs->umem) {
687 /* We have already our own. */
688 err = -EINVAL;
689 goto out_unlock;
690 }
691
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200692 if (xs->fq_tmp || xs->cq_tmp) {
693 /* Do not allow setting your own fq or cq. */
694 err = -EINVAL;
695 goto out_unlock;
696 }
697
Magnus Karlsson965a9902018-05-02 13:01:26 +0200698 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
699 if (IS_ERR(sock)) {
700 err = PTR_ERR(sock);
701 goto out_unlock;
702 }
703
704 umem_xs = xdp_sk(sock->sk);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200705 if (!xsk_is_bound(umem_xs)) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200706 err = -EBADF;
707 sockfd_put(sock);
708 goto out_unlock;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200709 }
710 if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200711 err = -EINVAL;
712 sockfd_put(sock);
713 goto out_unlock;
714 }
715
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200716 /* Share the buffer pool with the other socket. */
717 xp_get_pool(umem_xs->pool);
718 xs->pool = umem_xs->pool;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200719 xdp_get_umem(umem_xs->umem);
Björn Töpel9764f4b2019-09-04 13:49:11 +0200720 WRITE_ONCE(xs->umem, umem_xs->umem);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200721 sockfd_put(sock);
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200722 } else if (!xs->umem || !xsk_validate_queues(xs)) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200723 err = -EINVAL;
724 goto out_unlock;
Björn Töpelc4971762018-05-02 13:01:27 +0200725 } else {
726 /* This xsk has its own umem. */
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200727 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
728 if (!xs->pool) {
729 err = -ENOMEM;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200730 goto out_unlock;
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200731 }
732
733 err = xp_assign_dev(xs->pool, dev, qid, flags);
734 if (err) {
735 xp_destroy(xs->pool);
736 xs->pool = NULL;
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200737 goto out_unlock;
738 }
Magnus Karlsson965a9902018-05-02 13:01:26 +0200739 }
740
Magnus Karlsson965a9902018-05-02 13:01:26 +0200741 xs->dev = dev;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200742 xs->zc = xs->umem->zc;
743 xs->queue_id = qid;
Magnus Karlssona5aa8e52020-08-28 10:26:20 +0200744 xp_add_xsk(xs->pool, xs);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200745
746out_unlock:
Björn Töpel42fddcc2019-09-04 13:49:12 +0200747 if (err) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200748 dev_put(dev);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200749 } else {
750 /* Matches smp_rmb() in bind() for shared umem
751 * sockets, and xsk_is_bound().
752 */
753 smp_wmb();
754 WRITE_ONCE(xs->state, XSK_BOUND);
755 }
Magnus Karlsson965a9902018-05-02 13:01:26 +0200756out_release:
757 mutex_unlock(&xs->mutex);
Ilya Maximets5464c3a2019-07-08 14:03:44 +0300758 rtnl_unlock();
Magnus Karlsson965a9902018-05-02 13:01:26 +0200759 return err;
760}
761
Kevin Laatzc05cd362019-08-27 02:25:22 +0000762struct xdp_umem_reg_v1 {
763 __u64 addr; /* Start of packet data area */
764 __u64 len; /* Length of packet data area */
765 __u32 chunk_size;
766 __u32 headroom;
767};
768
Björn Töpelc0c77d82018-05-02 13:01:23 +0200769static int xsk_setsockopt(struct socket *sock, int level, int optname,
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200770 sockptr_t optval, unsigned int optlen)
Björn Töpelc0c77d82018-05-02 13:01:23 +0200771{
772 struct sock *sk = sock->sk;
773 struct xdp_sock *xs = xdp_sk(sk);
774 int err;
775
776 if (level != SOL_XDP)
777 return -ENOPROTOOPT;
778
779 switch (optname) {
Björn Töpelb9b6b682018-05-02 13:01:25 +0200780 case XDP_RX_RING:
Magnus Karlssonf6145902018-05-02 13:01:32 +0200781 case XDP_TX_RING:
Björn Töpelb9b6b682018-05-02 13:01:25 +0200782 {
783 struct xsk_queue **q;
784 int entries;
785
786 if (optlen < sizeof(entries))
787 return -EINVAL;
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200788 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
Björn Töpelb9b6b682018-05-02 13:01:25 +0200789 return -EFAULT;
790
791 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300792 if (xs->state != XSK_READY) {
793 mutex_unlock(&xs->mutex);
794 return -EBUSY;
795 }
Magnus Karlssonf6145902018-05-02 13:01:32 +0200796 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200797 err = xsk_init_queue(entries, q, false);
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200798 if (!err && optname == XDP_TX_RING)
799 /* Tx needs to be explicitly woken up the first time */
800 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200801 mutex_unlock(&xs->mutex);
802 return err;
803 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200804 case XDP_UMEM_REG:
805 {
Kevin Laatzc05cd362019-08-27 02:25:22 +0000806 size_t mr_size = sizeof(struct xdp_umem_reg);
807 struct xdp_umem_reg mr = {};
Björn Töpelc0c77d82018-05-02 13:01:23 +0200808 struct xdp_umem *umem;
809
Kevin Laatzc05cd362019-08-27 02:25:22 +0000810 if (optlen < sizeof(struct xdp_umem_reg_v1))
811 return -EINVAL;
812 else if (optlen < sizeof(mr))
813 mr_size = sizeof(struct xdp_umem_reg_v1);
814
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200815 if (copy_from_sockptr(&mr, optval, mr_size))
Björn Töpelc0c77d82018-05-02 13:01:23 +0200816 return -EFAULT;
817
818 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300819 if (xs->state != XSK_READY || xs->umem) {
Björn Töpelc0c77d82018-05-02 13:01:23 +0200820 mutex_unlock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200821 return -EBUSY;
822 }
823
824 umem = xdp_umem_create(&mr);
825 if (IS_ERR(umem)) {
826 mutex_unlock(&xs->mutex);
827 return PTR_ERR(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200828 }
829
830 /* Make sure umem is ready before it can be seen by others */
831 smp_wmb();
Björn Töpel9764f4b2019-09-04 13:49:11 +0200832 WRITE_ONCE(xs->umem, umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200833 mutex_unlock(&xs->mutex);
834 return 0;
835 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200836 case XDP_UMEM_FILL_RING:
Magnus Karlssonfe230832018-05-02 13:01:31 +0200837 case XDP_UMEM_COMPLETION_RING:
Magnus Karlsson423f3832018-05-02 13:01:24 +0200838 {
839 struct xsk_queue **q;
840 int entries;
841
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200842 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
Magnus Karlsson423f3832018-05-02 13:01:24 +0200843 return -EFAULT;
844
845 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300846 if (xs->state != XSK_READY) {
847 mutex_unlock(&xs->mutex);
848 return -EBUSY;
849 }
Björn Töpela49049e2018-05-22 09:35:02 +0200850 if (!xs->umem) {
851 mutex_unlock(&xs->mutex);
852 return -EINVAL;
853 }
854
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200855 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
856 &xs->cq_tmp;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200857 err = xsk_init_queue(entries, q, true);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200858 mutex_unlock(&xs->mutex);
859 return err;
860 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200861 default:
862 break;
863 }
864
865 return -ENOPROTOOPT;
866}
867
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200868static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
869{
870 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
871 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
872 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
873}
874
875static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
876{
877 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
878 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
879 ring->desc = offsetof(struct xdp_umem_ring, desc);
880}
881
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000882struct xdp_statistics_v1 {
883 __u64 rx_dropped;
884 __u64 rx_invalid_descs;
885 __u64 tx_invalid_descs;
886};
887
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200888static int xsk_getsockopt(struct socket *sock, int level, int optname,
889 char __user *optval, int __user *optlen)
890{
891 struct sock *sk = sock->sk;
892 struct xdp_sock *xs = xdp_sk(sk);
893 int len;
894
895 if (level != SOL_XDP)
896 return -ENOPROTOOPT;
897
898 if (get_user(len, optlen))
899 return -EFAULT;
900 if (len < 0)
901 return -EINVAL;
902
903 switch (optname) {
904 case XDP_STATISTICS:
905 {
Peilin Ye3c4f8502020-07-28 01:36:04 -0400906 struct xdp_statistics stats = {};
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000907 bool extra_stats = true;
908 size_t stats_size;
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200909
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000910 if (len < sizeof(struct xdp_statistics_v1)) {
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200911 return -EINVAL;
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000912 } else if (len < sizeof(stats)) {
913 extra_stats = false;
914 stats_size = sizeof(struct xdp_statistics_v1);
915 } else {
916 stats_size = sizeof(stats);
917 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200918
919 mutex_lock(&xs->mutex);
920 stats.rx_dropped = xs->rx_dropped;
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000921 if (extra_stats) {
922 stats.rx_ring_full = xs->rx_queue_full;
923 stats.rx_fill_ring_empty_descs =
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200924 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000925 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
926 } else {
927 stats.rx_dropped += xs->rx_queue_full;
928 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200929 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
930 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
931 mutex_unlock(&xs->mutex);
932
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000933 if (copy_to_user(optval, &stats, stats_size))
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200934 return -EFAULT;
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000935 if (put_user(stats_size, optlen))
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200936 return -EFAULT;
937
938 return 0;
939 }
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200940 case XDP_MMAP_OFFSETS:
941 {
942 struct xdp_mmap_offsets off;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200943 struct xdp_mmap_offsets_v1 off_v1;
944 bool flags_supported = true;
945 void *to_copy;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200946
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200947 if (len < sizeof(off_v1))
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200948 return -EINVAL;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200949 else if (len < sizeof(off))
950 flags_supported = false;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200951
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200952 if (flags_supported) {
953 /* xdp_ring_offset is identical to xdp_ring_offset_v1
954 * except for the flags field added to the end.
955 */
956 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
957 &off.rx);
958 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
959 &off.tx);
960 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
961 &off.fr);
962 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
963 &off.cr);
964 off.rx.flags = offsetof(struct xdp_rxtx_ring,
965 ptrs.flags);
966 off.tx.flags = offsetof(struct xdp_rxtx_ring,
967 ptrs.flags);
968 off.fr.flags = offsetof(struct xdp_umem_ring,
969 ptrs.flags);
970 off.cr.flags = offsetof(struct xdp_umem_ring,
971 ptrs.flags);
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200972
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200973 len = sizeof(off);
974 to_copy = &off;
975 } else {
976 xsk_enter_rxtx_offsets(&off_v1.rx);
977 xsk_enter_rxtx_offsets(&off_v1.tx);
978 xsk_enter_umem_offsets(&off_v1.fr);
979 xsk_enter_umem_offsets(&off_v1.cr);
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200980
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200981 len = sizeof(off_v1);
982 to_copy = &off_v1;
983 }
984
985 if (copy_to_user(optval, to_copy, len))
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200986 return -EFAULT;
987 if (put_user(len, optlen))
988 return -EFAULT;
989
990 return 0;
991 }
Maxim Mikityanskiy2640d3c2019-06-26 17:35:25 +0300992 case XDP_OPTIONS:
993 {
994 struct xdp_options opts = {};
995
996 if (len < sizeof(opts))
997 return -EINVAL;
998
999 mutex_lock(&xs->mutex);
1000 if (xs->zc)
1001 opts.flags |= XDP_OPTIONS_ZEROCOPY;
1002 mutex_unlock(&xs->mutex);
1003
1004 len = sizeof(opts);
1005 if (copy_to_user(optval, &opts, len))
1006 return -EFAULT;
1007 if (put_user(len, optlen))
1008 return -EFAULT;
1009
1010 return 0;
1011 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +02001012 default:
1013 break;
1014 }
1015
1016 return -EOPNOTSUPP;
1017}
1018
Magnus Karlsson423f3832018-05-02 13:01:24 +02001019static int xsk_mmap(struct file *file, struct socket *sock,
1020 struct vm_area_struct *vma)
1021{
Geert Uytterhoevena5a16e42018-06-07 15:37:34 +02001022 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
Magnus Karlsson423f3832018-05-02 13:01:24 +02001023 unsigned long size = vma->vm_end - vma->vm_start;
1024 struct xdp_sock *xs = xdp_sk(sock->sk);
1025 struct xsk_queue *q = NULL;
1026 unsigned long pfn;
1027 struct page *qpg;
1028
Björn Töpel42fddcc2019-09-04 13:49:12 +02001029 if (READ_ONCE(xs->state) != XSK_READY)
Ilya Maximets455302d2019-06-28 11:04:07 +03001030 return -EBUSY;
1031
Björn Töpelb9b6b682018-05-02 13:01:25 +02001032 if (offset == XDP_PGOFF_RX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +02001033 q = READ_ONCE(xs->rx);
Magnus Karlssonf6145902018-05-02 13:01:32 +02001034 } else if (offset == XDP_PGOFF_TX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +02001035 q = READ_ONCE(xs->tx);
Björn Töpelb9b6b682018-05-02 13:01:25 +02001036 } else {
Magnus Karlssone6762c82019-02-08 14:13:50 +01001037 /* Matches the smp_wmb() in XDP_UMEM_REG */
1038 smp_rmb();
Björn Töpelb9b6b682018-05-02 13:01:25 +02001039 if (offset == XDP_UMEM_PGOFF_FILL_RING)
Magnus Karlsson7361f9c2020-08-28 10:26:18 +02001040 q = READ_ONCE(xs->fq_tmp);
Magnus Karlssonfe230832018-05-02 13:01:31 +02001041 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
Magnus Karlsson7361f9c2020-08-28 10:26:18 +02001042 q = READ_ONCE(xs->cq_tmp);
Björn Töpelb9b6b682018-05-02 13:01:25 +02001043 }
Magnus Karlsson423f3832018-05-02 13:01:24 +02001044
1045 if (!q)
1046 return -EINVAL;
1047
Magnus Karlssone6762c82019-02-08 14:13:50 +01001048 /* Matches the smp_wmb() in xsk_init_queue */
1049 smp_rmb();
Magnus Karlsson423f3832018-05-02 13:01:24 +02001050 qpg = virt_to_head_page(q->ring);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07001051 if (size > page_size(qpg))
Magnus Karlsson423f3832018-05-02 13:01:24 +02001052 return -EINVAL;
1053
1054 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
1055 return remap_pfn_range(vma, vma->vm_start, pfn,
1056 size, vma->vm_page_prot);
1057}
1058
Ilya Maximets455302d2019-06-28 11:04:07 +03001059static int xsk_notifier(struct notifier_block *this,
1060 unsigned long msg, void *ptr)
1061{
1062 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1063 struct net *net = dev_net(dev);
1064 struct sock *sk;
1065
1066 switch (msg) {
1067 case NETDEV_UNREGISTER:
1068 mutex_lock(&net->xdp.lock);
1069 sk_for_each(sk, &net->xdp.list) {
1070 struct xdp_sock *xs = xdp_sk(sk);
1071
1072 mutex_lock(&xs->mutex);
1073 if (xs->dev == dev) {
1074 sk->sk_err = ENETDOWN;
1075 if (!sock_flag(sk, SOCK_DEAD))
1076 sk->sk_error_report(sk);
1077
1078 xsk_unbind_dev(xs);
1079
Magnus Karlsson1c1efc22020-08-28 10:26:17 +02001080 /* Clear device references. */
1081 xp_clear_dev(xs->pool);
Ilya Maximets455302d2019-06-28 11:04:07 +03001082 }
1083 mutex_unlock(&xs->mutex);
1084 }
1085 mutex_unlock(&net->xdp.lock);
1086 break;
1087 }
1088 return NOTIFY_DONE;
1089}
1090
Björn Töpelc0c77d82018-05-02 13:01:23 +02001091static struct proto xsk_proto = {
1092 .name = "XDP",
1093 .owner = THIS_MODULE,
1094 .obj_size = sizeof(struct xdp_sock),
1095};
1096
1097static const struct proto_ops xsk_proto_ops = {
Björn Töpelc2f43742018-05-18 14:00:24 +02001098 .family = PF_XDP,
1099 .owner = THIS_MODULE,
1100 .release = xsk_release,
1101 .bind = xsk_bind,
1102 .connect = sock_no_connect,
1103 .socketpair = sock_no_socketpair,
1104 .accept = sock_no_accept,
1105 .getname = sock_no_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001106 .poll = xsk_poll,
Björn Töpelc2f43742018-05-18 14:00:24 +02001107 .ioctl = sock_no_ioctl,
1108 .listen = sock_no_listen,
1109 .shutdown = sock_no_shutdown,
1110 .setsockopt = xsk_setsockopt,
1111 .getsockopt = xsk_getsockopt,
1112 .sendmsg = xsk_sendmsg,
1113 .recvmsg = sock_no_recvmsg,
1114 .mmap = xsk_mmap,
1115 .sendpage = sock_no_sendpage,
Björn Töpelc0c77d82018-05-02 13:01:23 +02001116};
1117
Björn Töpel11fe9262019-02-21 13:07:38 +01001118static void xsk_destruct(struct sock *sk)
1119{
1120 struct xdp_sock *xs = xdp_sk(sk);
1121
1122 if (!sock_flag(sk, SOCK_DEAD))
1123 return;
1124
Magnus Karlsson1c1efc22020-08-28 10:26:17 +02001125 xp_put_pool(xs->pool);
Björn Töpel11fe9262019-02-21 13:07:38 +01001126
1127 sk_refcnt_debug_dec(sk);
1128}
1129
Björn Töpelc0c77d82018-05-02 13:01:23 +02001130static int xsk_create(struct net *net, struct socket *sock, int protocol,
1131 int kern)
1132{
Björn Töpelc0c77d82018-05-02 13:01:23 +02001133 struct xdp_sock *xs;
Magnus Karlsson1c1efc22020-08-28 10:26:17 +02001134 struct sock *sk;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001135
1136 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1137 return -EPERM;
1138 if (sock->type != SOCK_RAW)
1139 return -ESOCKTNOSUPPORT;
1140
1141 if (protocol)
1142 return -EPROTONOSUPPORT;
1143
1144 sock->state = SS_UNCONNECTED;
1145
1146 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1147 if (!sk)
1148 return -ENOBUFS;
1149
1150 sock->ops = &xsk_proto_ops;
1151
1152 sock_init_data(sock, sk);
1153
1154 sk->sk_family = PF_XDP;
1155
Björn Töpel11fe9262019-02-21 13:07:38 +01001156 sk->sk_destruct = xsk_destruct;
1157 sk_refcnt_debug_inc(sk);
1158
Björn Töpelcee27162018-10-08 19:40:16 +02001159 sock_set_flag(sk, SOCK_RCU_FREE);
1160
Björn Töpelc0c77d82018-05-02 13:01:23 +02001161 xs = xdp_sk(sk);
Ilya Maximets455302d2019-06-28 11:04:07 +03001162 xs->state = XSK_READY;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001163 mutex_init(&xs->mutex);
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +03001164 spin_lock_init(&xs->rx_lock);
Magnus Karlssona9744f72018-06-29 09:48:20 +02001165 spin_lock_init(&xs->tx_completion_lock);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001166
Björn Töpel0402acd2019-08-15 11:30:13 +02001167 INIT_LIST_HEAD(&xs->map_list);
1168 spin_lock_init(&xs->map_list_lock);
1169
Björn Töpel1d0dc062019-01-24 19:59:37 +01001170 mutex_lock(&net->xdp.lock);
1171 sk_add_node_rcu(sk, &net->xdp.list);
1172 mutex_unlock(&net->xdp.lock);
1173
Björn Töpelc0c77d82018-05-02 13:01:23 +02001174 local_bh_disable();
1175 sock_prot_inuse_add(net, &xsk_proto, 1);
1176 local_bh_enable();
1177
1178 return 0;
1179}
1180
1181static const struct net_proto_family xsk_family_ops = {
1182 .family = PF_XDP,
1183 .create = xsk_create,
1184 .owner = THIS_MODULE,
1185};
1186
Ilya Maximets455302d2019-06-28 11:04:07 +03001187static struct notifier_block xsk_netdev_notifier = {
1188 .notifier_call = xsk_notifier,
1189};
1190
Björn Töpel1d0dc062019-01-24 19:59:37 +01001191static int __net_init xsk_net_init(struct net *net)
1192{
1193 mutex_init(&net->xdp.lock);
1194 INIT_HLIST_HEAD(&net->xdp.list);
1195 return 0;
1196}
1197
1198static void __net_exit xsk_net_exit(struct net *net)
1199{
1200 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1201}
1202
1203static struct pernet_operations xsk_net_ops = {
1204 .init = xsk_net_init,
1205 .exit = xsk_net_exit,
1206};
1207
Björn Töpelc0c77d82018-05-02 13:01:23 +02001208static int __init xsk_init(void)
1209{
Björn Töpele312b9e2019-12-19 07:10:02 +01001210 int err, cpu;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001211
1212 err = proto_register(&xsk_proto, 0 /* no slab */);
1213 if (err)
1214 goto out;
1215
1216 err = sock_register(&xsk_family_ops);
1217 if (err)
1218 goto out_proto;
1219
Björn Töpel1d0dc062019-01-24 19:59:37 +01001220 err = register_pernet_subsys(&xsk_net_ops);
1221 if (err)
1222 goto out_sk;
Ilya Maximets455302d2019-06-28 11:04:07 +03001223
1224 err = register_netdevice_notifier(&xsk_netdev_notifier);
1225 if (err)
1226 goto out_pernet;
1227
Björn Töpele312b9e2019-12-19 07:10:02 +01001228 for_each_possible_cpu(cpu)
1229 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
Björn Töpelc0c77d82018-05-02 13:01:23 +02001230 return 0;
1231
Ilya Maximets455302d2019-06-28 11:04:07 +03001232out_pernet:
1233 unregister_pernet_subsys(&xsk_net_ops);
Björn Töpel1d0dc062019-01-24 19:59:37 +01001234out_sk:
1235 sock_unregister(PF_XDP);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001236out_proto:
1237 proto_unregister(&xsk_proto);
1238out:
1239 return err;
1240}
1241
1242fs_initcall(xsk_init);