blob: 4faabd1ecfd1b3632af6fa8e9a74f12dd3d67668 [file] [log] [blame]
Björn Töpelc0c77d82018-05-02 13:01:23 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
Björn Töpelc0c77d82018-05-02 13:01:23 +02008 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14#include <linux/if_xdp.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/sched/task.h>
19#include <linux/socket.h>
20#include <linux/file.h>
21#include <linux/uaccess.h>
22#include <linux/net.h>
23#include <linux/netdevice.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020024#include <linux/rculist.h>
Magnus Karlssona71506a2020-05-20 21:20:51 +020025#include <net/xdp_sock_drv.h>
Björn Töpela0731952020-11-30 19:52:00 +010026#include <net/busy_poll.h>
Björn Töpelb9b6b682018-05-02 13:01:25 +020027#include <net/xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020028
Magnus Karlsson423f3832018-05-02 13:01:24 +020029#include "xsk_queue.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020030#include "xdp_umem.h"
Björn Töpela36b38aa2019-01-24 19:59:39 +010031#include "xsk.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020032
Magnus Karlsson35fcde72018-05-02 13:01:34 +020033#define TX_BATCH_SIZE 16
34
Björn Töpele312b9e2019-12-19 07:10:02 +010035static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
36
Magnus Karlssonc4655762020-08-28 10:26:16 +020037void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020038{
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020039 if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020040 return;
41
Magnus Karlsson7361f9c2020-08-28 10:26:18 +020042 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020043 pool->cached_need_wakeup |= XDP_WAKEUP_RX;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020044}
45EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
46
Magnus Karlssonc4655762020-08-28 10:26:16 +020047void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020048{
49 struct xdp_sock *xs;
50
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020051 if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020052 return;
53
54 rcu_read_lock();
Magnus Karlssona5aa8e52020-08-28 10:26:20 +020055 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020056 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
57 }
58 rcu_read_unlock();
59
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020060 pool->cached_need_wakeup |= XDP_WAKEUP_TX;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020061}
62EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
63
Magnus Karlssonc4655762020-08-28 10:26:16 +020064void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020065{
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020066 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020067 return;
68
Magnus Karlsson7361f9c2020-08-28 10:26:18 +020069 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020070 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020071}
72EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
73
Magnus Karlssonc4655762020-08-28 10:26:16 +020074void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020075{
76 struct xdp_sock *xs;
77
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020078 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020079 return;
80
81 rcu_read_lock();
Magnus Karlssona5aa8e52020-08-28 10:26:20 +020082 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020083 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
84 }
85 rcu_read_unlock();
86
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020087 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020088}
89EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
90
Magnus Karlssonc4655762020-08-28 10:26:16 +020091bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020092{
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020093 return pool->uses_need_wakeup;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020094}
Magnus Karlssonc4655762020-08-28 10:26:16 +020095EXPORT_SYMBOL(xsk_uses_need_wakeup);
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020096
Magnus Karlsson1c1efc22020-08-28 10:26:17 +020097struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
98 u16 queue_id)
99{
100 if (queue_id < dev->real_num_rx_queues)
101 return dev->_rx[queue_id].pool;
102 if (queue_id < dev->real_num_tx_queues)
103 return dev->_tx[queue_id].pool;
104
105 return NULL;
106}
107EXPORT_SYMBOL(xsk_get_pool_from_qid);
108
109void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
110{
Maxim Mikityanskiyb425e242021-01-18 18:03:33 +0200111 if (queue_id < dev->num_rx_queues)
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200112 dev->_rx[queue_id].pool = NULL;
Maxim Mikityanskiyb425e242021-01-18 18:03:33 +0200113 if (queue_id < dev->num_tx_queues)
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200114 dev->_tx[queue_id].pool = NULL;
115}
116
117/* The buffer pool is stored both in the _rx struct and the _tx struct as we do
118 * not know if the device has more tx queues than rx, or the opposite.
119 * This might also change during run time.
120 */
121int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
122 u16 queue_id)
123{
124 if (queue_id >= max_t(unsigned int,
125 dev->real_num_rx_queues,
126 dev->real_num_tx_queues))
127 return -EINVAL;
128
129 if (queue_id < dev->real_num_rx_queues)
130 dev->_rx[queue_id].pool = pool;
131 if (queue_id < dev->real_num_tx_queues)
132 dev->_tx[queue_id].pool = pool;
133
134 return 0;
135}
136
Björn Töpel26062b12020-05-20 21:21:02 +0200137void xp_release(struct xdp_buff_xsk *xskb)
138{
139 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
140}
141
142static u64 xp_get_handle(struct xdp_buff_xsk *xskb)
143{
144 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
145
146 offset += xskb->pool->headroom;
147 if (!xskb->pool->unaligned)
148 return xskb->orig_addr + offset;
149 return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
150}
151
Björn Töpel2b434702020-05-20 21:20:53 +0200152static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
Kevin Laatzc05cd362019-08-27 02:25:22 +0000153{
Björn Töpel2b434702020-05-20 21:20:53 +0200154 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
155 u64 addr;
156 int err;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000157
Björn Töpel2b434702020-05-20 21:20:53 +0200158 addr = xp_get_handle(xskb);
159 err = xskq_prod_reserve_desc(xs->rx, addr, len);
160 if (err) {
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000161 xs->rx_queue_full++;
Björn Töpel2b434702020-05-20 21:20:53 +0200162 return err;
163 }
Kevin Laatzc05cd362019-08-27 02:25:22 +0000164
Björn Töpel2b434702020-05-20 21:20:53 +0200165 xp_release(xskb);
166 return 0;
167}
Kevin Laatzc05cd362019-08-27 02:25:22 +0000168
Björn Töpel2b434702020-05-20 21:20:53 +0200169static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
170{
171 void *from_buf, *to_buf;
172 u32 metalen;
173
174 if (unlikely(xdp_data_meta_unsupported(from))) {
175 from_buf = from->data;
176 to_buf = to->data;
177 metalen = 0;
178 } else {
179 from_buf = from->data_meta;
180 metalen = from->data - from->data_meta;
181 to_buf = to->data - metalen;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000182 }
183
184 memcpy(to_buf, from_buf, len + metalen);
185}
186
Björn Töpel458f7272021-01-22 11:53:49 +0100187static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
Björn Töpel173d3ad2018-06-04 14:05:55 +0200188{
Björn Töpel2b434702020-05-20 21:20:53 +0200189 struct xdp_buff *xsk_xdp;
Björn Töpel4e64c832018-06-04 13:57:11 +0200190 int err;
Björn Töpel458f7272021-01-22 11:53:49 +0100191 u32 len;
Björn Töpelc4971762018-05-02 13:01:27 +0200192
Björn Töpel458f7272021-01-22 11:53:49 +0100193 len = xdp->data_end - xdp->data;
Magnus Karlssonc4655762020-08-28 10:26:16 +0200194 if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
Björn Töpela509a952018-06-04 13:57:12 +0200195 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +0200196 return -ENOSPC;
Björn Töpela509a952018-06-04 13:57:12 +0200197 }
Björn Töpelc4971762018-05-02 13:01:27 +0200198
Magnus Karlssonc4655762020-08-28 10:26:16 +0200199 xsk_xdp = xsk_buff_alloc(xs->pool);
Björn Töpel2b434702020-05-20 21:20:53 +0200200 if (!xsk_xdp) {
Björn Töpela509a952018-06-04 13:57:12 +0200201 xs->rx_dropped++;
Björn Töpel2b434702020-05-20 21:20:53 +0200202 return -ENOSPC;
203 }
Björn Töpelc4971762018-05-02 13:01:27 +0200204
Björn Töpel2b434702020-05-20 21:20:53 +0200205 xsk_copy_xdp(xsk_xdp, xdp, len);
206 err = __xsk_rcv_zc(xs, xsk_xdp, len);
207 if (err) {
208 xsk_buff_free(xsk_xdp);
209 return err;
210 }
Björn Töpel2b434702020-05-20 21:20:53 +0200211 return 0;
Björn Töpelc4971762018-05-02 13:01:27 +0200212}
213
Xuan Zhuo3413f042020-12-01 21:56:58 +0800214static bool xsk_tx_writeable(struct xdp_sock *xs)
215{
216 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
217 return false;
218
219 return true;
220}
221
Björn Töpel42fddcc2019-09-04 13:49:12 +0200222static bool xsk_is_bound(struct xdp_sock *xs)
223{
224 if (READ_ONCE(xs->state) == XSK_BOUND) {
225 /* Matches smp_wmb() in bind(). */
226 smp_rmb();
227 return true;
228 }
229 return false;
230}
231
Björn Töpel458f7272021-01-22 11:53:49 +0100232static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp)
Björn Töpelc4971762018-05-02 13:01:27 +0200233{
Björn Töpel42fddcc2019-09-04 13:49:12 +0200234 if (!xsk_is_bound(xs))
235 return -EINVAL;
236
Björn Töpel173d3ad2018-06-04 14:05:55 +0200237 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
238 return -EINVAL;
Björn Töpelc4971762018-05-02 13:01:27 +0200239
Björn Töpelb02e5a02020-11-30 19:52:01 +0100240 sk_mark_napi_id_once_xdp(&xs->sk, xdp);
Björn Töpel458f7272021-01-22 11:53:49 +0100241 return 0;
Björn Töpelc4971762018-05-02 13:01:27 +0200242}
243
Björn Töpeld8179912019-11-01 12:03:46 +0100244static void xsk_flush(struct xdp_sock *xs)
Björn Töpelc4971762018-05-02 13:01:27 +0200245{
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100246 xskq_prod_submit(xs->rx);
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200247 __xskq_cons_release(xs->pool->fq);
Björn Töpel43a825a2020-01-20 10:29:17 +0100248 sock_def_readable(&xs->sk);
Björn Töpelc4971762018-05-02 13:01:27 +0200249}
250
251int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
252{
253 int err;
254
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300255 spin_lock_bh(&xs->rx_lock);
Björn Töpel458f7272021-01-22 11:53:49 +0100256 err = xsk_rcv_check(xs, xdp);
257 if (!err) {
258 err = __xsk_rcv(xs, xdp);
259 xsk_flush(xs);
260 }
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300261 spin_unlock_bh(&xs->rx_lock);
Björn Töpelc4971762018-05-02 13:01:27 +0200262 return err;
263}
264
Björn Töpel458f7272021-01-22 11:53:49 +0100265static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
266{
267 int err;
268 u32 len;
269
270 err = xsk_rcv_check(xs, xdp);
271 if (err)
272 return err;
273
274 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
275 len = xdp->data_end - xdp->data;
276 return __xsk_rcv_zc(xs, xdp, len);
277 }
278
279 err = __xsk_rcv(xs, xdp);
280 if (!err)
281 xdp_return_buff(xdp);
282 return err;
283}
284
Björn Töpele312b9e2019-12-19 07:10:02 +0100285int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
Björn Töpeld8179912019-11-01 12:03:46 +0100286{
Björn Töpele312b9e2019-12-19 07:10:02 +0100287 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
Björn Töpeld8179912019-11-01 12:03:46 +0100288 int err;
289
Björn Töpel458f7272021-01-22 11:53:49 +0100290 err = xsk_rcv(xs, xdp);
Björn Töpeld8179912019-11-01 12:03:46 +0100291 if (err)
292 return err;
293
294 if (!xs->flush_node.prev)
295 list_add(&xs->flush_node, flush_list);
296
297 return 0;
298}
299
Björn Töpele312b9e2019-12-19 07:10:02 +0100300void __xsk_map_flush(void)
Björn Töpeld8179912019-11-01 12:03:46 +0100301{
Björn Töpele312b9e2019-12-19 07:10:02 +0100302 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
Björn Töpeld8179912019-11-01 12:03:46 +0100303 struct xdp_sock *xs, *tmp;
304
305 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
306 xsk_flush(xs);
307 __list_del_clearprev(&xs->flush_node);
308 }
309}
310
Magnus Karlssonc4655762020-08-28 10:26:16 +0200311void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200312{
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200313 xskq_prod_submit_n(pool->cq, nb_entries);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200314}
Magnus Karlssonc4655762020-08-28 10:26:16 +0200315EXPORT_SYMBOL(xsk_tx_completed);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200316
Magnus Karlssonc4655762020-08-28 10:26:16 +0200317void xsk_tx_release(struct xsk_buff_pool *pool)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200318{
319 struct xdp_sock *xs;
320
321 rcu_read_lock();
Magnus Karlssona5aa8e52020-08-28 10:26:20 +0200322 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
Magnus Karlsson30744a62020-02-10 16:27:12 +0100323 __xskq_cons_release(xs->tx);
Xuan Zhuo3413f042020-12-01 21:56:58 +0800324 if (xsk_tx_writeable(xs))
325 xs->sk.sk_write_space(&xs->sk);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200326 }
327 rcu_read_unlock();
328}
Magnus Karlssonc4655762020-08-28 10:26:16 +0200329EXPORT_SYMBOL(xsk_tx_release);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200330
Magnus Karlssonc4655762020-08-28 10:26:16 +0200331bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200332{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200333 struct xdp_sock *xs;
334
335 rcu_read_lock();
Magnus Karlssona5aa8e52020-08-28 10:26:20 +0200336 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200337 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000338 xs->tx->queue_empty_descs++;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200339 continue;
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000340 }
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200341
Tobias Klauser0a058612020-04-22 01:29:27 +0200342 /* This is the backpressure mechanism for the Tx path.
Magnus Karlsson15d8c912019-12-19 13:39:30 +0100343 * Reserve space in the completion queue and only proceed
344 * if there is space in it. This avoids having to implement
345 * any buffering in the Tx path.
346 */
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200347 if (xskq_prod_reserve_addr(pool->cq, desc->addr))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200348 goto out;
349
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100350 xskq_cons_release(xs->tx);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200351 rcu_read_unlock();
352 return true;
353 }
354
355out:
356 rcu_read_unlock();
357 return false;
358}
Magnus Karlssonc4655762020-08-28 10:26:16 +0200359EXPORT_SYMBOL(xsk_tx_peek_desc);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200360
Magnus Karlsson9349eb32020-11-16 12:12:46 +0100361static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, struct xdp_desc *descs,
362 u32 max_entries)
363{
364 u32 nb_pkts = 0;
365
366 while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
367 nb_pkts++;
368
369 xsk_tx_release(pool);
370 return nb_pkts;
371}
372
373u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *descs,
374 u32 max_entries)
375{
376 struct xdp_sock *xs;
377 u32 nb_pkts;
378
379 rcu_read_lock();
380 if (!list_is_singular(&pool->xsk_tx_list)) {
381 /* Fallback to the non-batched version */
382 rcu_read_unlock();
383 return xsk_tx_peek_release_fallback(pool, descs, max_entries);
384 }
385
386 xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
387 if (!xs) {
388 nb_pkts = 0;
389 goto out;
390 }
391
392 nb_pkts = xskq_cons_peek_desc_batch(xs->tx, descs, pool, max_entries);
393 if (!nb_pkts) {
394 xs->tx->queue_empty_descs++;
395 goto out;
396 }
397
398 /* This is the backpressure mechanism for the Tx path. Try to
399 * reserve space in the completion queue for all packets, but
400 * if there are fewer slots available, just process that many
401 * packets. This avoids having to implement any buffering in
402 * the Tx path.
403 */
404 nb_pkts = xskq_prod_reserve_addr_batch(pool->cq, descs, nb_pkts);
405 if (!nb_pkts)
406 goto out;
407
408 xskq_cons_release_n(xs->tx, nb_pkts);
409 __xskq_cons_release(xs->tx);
410 xs->sk.sk_write_space(&xs->sk);
411
412out:
413 rcu_read_unlock();
414 return nb_pkts;
415}
416EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
417
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000418static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200419{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200420 struct net_device *dev = xs->dev;
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000421 int err;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200422
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000423 rcu_read_lock();
424 err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
425 rcu_read_unlock();
426
427 return err;
428}
429
430static int xsk_zc_xmit(struct xdp_sock *xs)
431{
432 return xsk_wakeup(xs, XDP_WAKEUP_TX);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200433}
434
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200435static void xsk_destruct_skb(struct sk_buff *skb)
436{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200437 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200438 struct xdp_sock *xs = xdp_sk(skb->sk);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200439 unsigned long flags;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200440
Magnus Karlssonf09ced42020-12-18 14:45:24 +0100441 spin_lock_irqsave(&xs->pool->cq_lock, flags);
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200442 xskq_prod_submit_addr(xs->pool->cq, addr);
Magnus Karlssonf09ced42020-12-18 14:45:24 +0100443 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200444
445 sock_wfree(skb);
446}
447
Magnus Karlssondf551052019-10-02 08:31:59 +0200448static int xsk_generic_xmit(struct sock *sk)
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200449{
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200450 struct xdp_sock *xs = xdp_sk(sk);
Magnus Karlssondf551052019-10-02 08:31:59 +0200451 u32 max_batch = TX_BATCH_SIZE;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200452 bool sent_frame = false;
453 struct xdp_desc desc;
454 struct sk_buff *skb;
Magnus Karlssonf09ced42020-12-18 14:45:24 +0100455 unsigned long flags;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200456 int err = 0;
457
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200458 mutex_lock(&xs->mutex);
459
Ilya Maximets67571642019-07-04 17:25:03 +0300460 if (xs->queue_id >= xs->dev->real_num_tx_queues)
461 goto out;
462
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200463 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200464 char *buffer;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200465 u64 addr;
466 u32 len;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200467
468 if (max_batch-- == 0) {
469 err = -EAGAIN;
470 goto out;
471 }
472
Magnus Karlsson09210c42018-07-11 10:12:52 +0200473 len = desc.len;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200474 skb = sock_alloc_send_skb(sk, len, 1, &err);
Li RongQingaa2cad02020-06-11 13:11:06 +0800475 if (unlikely(!skb))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200476 goto out;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200477
478 skb_put(skb, len);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200479 addr = desc.addr;
Magnus Karlssonc4655762020-08-28 10:26:16 +0200480 buffer = xsk_buff_raw_get_data(xs->pool, addr);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200481 err = skb_store_bits(skb, 0, buffer, len);
Tobias Klauser0a058612020-04-22 01:29:27 +0200482 /* This is the backpressure mechanism for the Tx path.
Magnus Karlsson15d8c912019-12-19 13:39:30 +0100483 * Reserve space in the completion queue and only proceed
484 * if there is space in it. This avoids having to implement
485 * any buffering in the Tx path.
486 */
Magnus Karlssonf09ced42020-12-18 14:45:24 +0100487 spin_lock_irqsave(&xs->pool->cq_lock, flags);
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200488 if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
Magnus Karlssonf09ced42020-12-18 14:45:24 +0100489 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200490 kfree_skb(skb);
491 goto out;
492 }
Magnus Karlssonf09ced42020-12-18 14:45:24 +0100493 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200494
495 skb->dev = xs->dev;
496 skb->priority = sk->sk_priority;
497 skb->mark = sk->sk_mark;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000498 skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200499 skb->destructor = xsk_destruct_skb;
500
Björn Töpel36ccdf82020-11-23 18:56:00 +0100501 err = __dev_direct_xmit(skb, xs->queue_id);
Magnus Karlsson642e4502020-09-16 14:00:25 +0200502 if (err == NETDEV_TX_BUSY) {
503 /* Tell user-space to retry the send */
504 skb->destructor = sock_wfree;
Magnus Karlssonb1b95cb2020-12-18 14:45:25 +0100505 spin_lock_irqsave(&xs->pool->cq_lock, flags);
506 xskq_prod_cancel(xs->pool->cq);
507 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
Magnus Karlsson642e4502020-09-16 14:00:25 +0200508 /* Free skb without triggering the perf drop trace */
509 consume_skb(skb);
510 err = -EAGAIN;
511 goto out;
512 }
513
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100514 xskq_cons_release(xs->tx);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200515 /* Ignore NET_XMIT_CN as packet might have been sent */
Magnus Karlsson642e4502020-09-16 14:00:25 +0200516 if (err == NET_XMIT_DROP) {
Magnus Karlssonfe588682018-06-29 09:48:18 +0200517 /* SKB completed but not sent */
518 err = -EBUSY;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200519 goto out;
520 }
521
522 sent_frame = true;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200523 }
524
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000525 xs->tx->queue_empty_descs++;
526
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200527out:
528 if (sent_frame)
Xuan Zhuo3413f042020-12-01 21:56:58 +0800529 if (xsk_tx_writeable(xs))
530 sk->sk_write_space(sk);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200531
532 mutex_unlock(&xs->mutex);
533 return err;
534}
535
Magnus Karlssondf551052019-10-02 08:31:59 +0200536static int __xsk_sendmsg(struct sock *sk)
537{
538 struct xdp_sock *xs = xdp_sk(sk);
539
540 if (unlikely(!(xs->dev->flags & IFF_UP)))
541 return -ENETDOWN;
542 if (unlikely(!xs->tx))
543 return -ENOBUFS;
544
545 return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
546}
547
Björn Töpela0731952020-11-30 19:52:00 +0100548static bool xsk_no_wakeup(struct sock *sk)
549{
550#ifdef CONFIG_NET_RX_BUSY_POLL
551 /* Prefer busy-polling, skip the wakeup. */
552 return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
553 READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID;
554#else
555 return false;
556#endif
557}
558
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200559static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
560{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200561 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200562 struct sock *sk = sock->sk;
563 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpele3920812020-11-30 19:51:59 +0100564 struct xsk_buff_pool *pool;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200565
Björn Töpel42fddcc2019-09-04 13:49:12 +0200566 if (unlikely(!xsk_is_bound(xs)))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200567 return -ENXIO;
Magnus Karlssondf551052019-10-02 08:31:59 +0200568 if (unlikely(need_wait))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200569 return -EOPNOTSUPP;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200570
Björn Töpela0731952020-11-30 19:52:00 +0100571 if (sk_can_busy_loop(sk))
572 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
573
574 if (xsk_no_wakeup(sk))
575 return 0;
576
Björn Töpele3920812020-11-30 19:51:59 +0100577 pool = xs->pool;
578 if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
579 return __xsk_sendmsg(sk);
580 return 0;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200581}
582
Björn Töpel45a86682020-11-30 19:51:58 +0100583static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
584{
585 bool need_wait = !(flags & MSG_DONTWAIT);
586 struct sock *sk = sock->sk;
587 struct xdp_sock *xs = xdp_sk(sk);
588
Björn Töpel3546b9b2020-12-07 09:20:08 +0100589 if (unlikely(!xsk_is_bound(xs)))
590 return -ENXIO;
Björn Töpel45a86682020-11-30 19:51:58 +0100591 if (unlikely(!(xs->dev->flags & IFF_UP)))
592 return -ENETDOWN;
593 if (unlikely(!xs->rx))
594 return -ENOBUFS;
Björn Töpel45a86682020-11-30 19:51:58 +0100595 if (unlikely(need_wait))
596 return -EOPNOTSUPP;
597
Björn Töpela0731952020-11-30 19:52:00 +0100598 if (sk_can_busy_loop(sk))
599 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
600
601 if (xsk_no_wakeup(sk))
602 return 0;
603
Björn Töpel45a86682020-11-30 19:51:58 +0100604 if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
605 return xsk_wakeup(xs, XDP_WAKEUP_RX);
606 return 0;
Björn Töpelc4971762018-05-02 13:01:27 +0200607}
608
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100609static __poll_t xsk_poll(struct file *file, struct socket *sock,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700610 struct poll_table_struct *wait)
Björn Töpelc4971762018-05-02 13:01:27 +0200611{
Xuan Zhuof5da5412020-12-01 21:56:57 +0800612 __poll_t mask = 0;
Magnus Karlssondf551052019-10-02 08:31:59 +0200613 struct sock *sk = sock->sk;
614 struct xdp_sock *xs = xdp_sk(sk);
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +0200615 struct xsk_buff_pool *pool;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200616
Xuan Zhuof5da5412020-12-01 21:56:57 +0800617 sock_poll_wait(file, sock, wait);
618
Björn Töpel42fddcc2019-09-04 13:49:12 +0200619 if (unlikely(!xsk_is_bound(xs)))
620 return mask;
621
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +0200622 pool = xs->pool;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200623
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +0200624 if (pool->cached_need_wakeup) {
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000625 if (xs->zc)
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +0200626 xsk_wakeup(xs, pool->cached_need_wakeup);
Magnus Karlssondf551052019-10-02 08:31:59 +0200627 else
628 /* Poll needs to drive Tx also in copy mode */
629 __xsk_sendmsg(sk);
630 }
Björn Töpelc4971762018-05-02 13:01:27 +0200631
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100632 if (xs->rx && !xskq_prod_is_empty(xs->rx))
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100633 mask |= EPOLLIN | EPOLLRDNORM;
Xuan Zhuo3413f042020-12-01 21:56:58 +0800634 if (xs->tx && xsk_tx_writeable(xs))
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100635 mask |= EPOLLOUT | EPOLLWRNORM;
Björn Töpelc4971762018-05-02 13:01:27 +0200636
637 return mask;
638}
639
Björn Töpelb9b6b682018-05-02 13:01:25 +0200640static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
641 bool umem_queue)
Magnus Karlsson423f3832018-05-02 13:01:24 +0200642{
643 struct xsk_queue *q;
644
645 if (entries == 0 || *queue || !is_power_of_2(entries))
646 return -EINVAL;
647
Björn Töpelb9b6b682018-05-02 13:01:25 +0200648 q = xskq_create(entries, umem_queue);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200649 if (!q)
650 return -ENOMEM;
651
Björn Töpel37b07692018-05-22 09:35:01 +0200652 /* Make sure queue is ready before it can be seen by others */
653 smp_wmb();
Björn Töpel94a99762019-09-04 13:49:10 +0200654 WRITE_ONCE(*queue, q);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200655 return 0;
656}
657
Ilya Maximets455302d2019-06-28 11:04:07 +0300658static void xsk_unbind_dev(struct xdp_sock *xs)
659{
660 struct net_device *dev = xs->dev;
661
Björn Töpel42fddcc2019-09-04 13:49:12 +0200662 if (xs->state != XSK_BOUND)
Ilya Maximets455302d2019-06-28 11:04:07 +0300663 return;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200664 WRITE_ONCE(xs->state, XSK_UNBOUND);
Ilya Maximets455302d2019-06-28 11:04:07 +0300665
666 /* Wait for driver to stop using the xdp socket. */
Magnus Karlssona5aa8e52020-08-28 10:26:20 +0200667 xp_del_xsk(xs->pool, xs);
Ilya Maximets455302d2019-06-28 11:04:07 +0300668 xs->dev = NULL;
669 synchronize_net();
670 dev_put(dev);
671}
672
Björn Töpel0402acd2019-08-15 11:30:13 +0200673static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
674 struct xdp_sock ***map_entry)
675{
676 struct xsk_map *map = NULL;
677 struct xsk_map_node *node;
678
679 *map_entry = NULL;
680
681 spin_lock_bh(&xs->map_list_lock);
682 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
683 node);
684 if (node) {
Zhu Yanjunbb1b25c2020-11-26 23:03:18 +0800685 bpf_map_inc(&node->map->map);
Björn Töpel0402acd2019-08-15 11:30:13 +0200686 map = node->map;
687 *map_entry = node->map_entry;
688 }
689 spin_unlock_bh(&xs->map_list_lock);
690 return map;
691}
692
693static void xsk_delete_from_maps(struct xdp_sock *xs)
694{
695 /* This function removes the current XDP socket from all the
696 * maps it resides in. We need to take extra care here, due to
697 * the two locks involved. Each map has a lock synchronizing
698 * updates to the entries, and each socket has a lock that
699 * synchronizes access to the list of maps (map_list). For
700 * deadlock avoidance the locks need to be taken in the order
701 * "map lock"->"socket map list lock". We start off by
702 * accessing the socket map list, and take a reference to the
703 * map to guarantee existence between the
704 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
705 * calls. Then we ask the map to remove the socket, which
706 * tries to remove the socket from the map. Note that there
707 * might be updates to the map between
708 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
709 */
710 struct xdp_sock **map_entry = NULL;
711 struct xsk_map *map;
712
713 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
714 xsk_map_try_sock_delete(map, xs, map_entry);
Zhu Yanjunbb1b25c2020-11-26 23:03:18 +0800715 bpf_map_put(&map->map);
Björn Töpel0402acd2019-08-15 11:30:13 +0200716 }
717}
718
Björn Töpelc0c77d82018-05-02 13:01:23 +0200719static int xsk_release(struct socket *sock)
720{
721 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200722 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200723 struct net *net;
724
725 if (!sk)
726 return 0;
727
728 net = sock_net(sk);
729
Björn Töpel1d0dc062019-01-24 19:59:37 +0100730 mutex_lock(&net->xdp.lock);
731 sk_del_node_init_rcu(sk);
732 mutex_unlock(&net->xdp.lock);
733
Björn Töpelc0c77d82018-05-02 13:01:23 +0200734 local_bh_disable();
735 sock_prot_inuse_add(net, sk->sk_prot, -1);
736 local_bh_enable();
737
Björn Töpel0402acd2019-08-15 11:30:13 +0200738 xsk_delete_from_maps(xs);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200739 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300740 xsk_unbind_dev(xs);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200741 mutex_unlock(&xs->mutex);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200742
Björn Töpel541d7fd2018-10-05 13:25:15 +0200743 xskq_destroy(xs->rx);
744 xskq_destroy(xs->tx);
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200745 xskq_destroy(xs->fq_tmp);
746 xskq_destroy(xs->cq_tmp);
Björn Töpel541d7fd2018-10-05 13:25:15 +0200747
Björn Töpelc0c77d82018-05-02 13:01:23 +0200748 sock_orphan(sk);
749 sock->sk = NULL;
750
751 sk_refcnt_debug_release(sk);
752 sock_put(sk);
753
754 return 0;
755}
756
Magnus Karlsson965a9902018-05-02 13:01:26 +0200757static struct socket *xsk_lookup_xsk_from_fd(int fd)
758{
759 struct socket *sock;
760 int err;
761
762 sock = sockfd_lookup(fd, &err);
763 if (!sock)
764 return ERR_PTR(-ENOTSOCK);
765
766 if (sock->sk->sk_family != PF_XDP) {
767 sockfd_put(sock);
768 return ERR_PTR(-ENOPROTOOPT);
769 }
770
771 return sock;
772}
773
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200774static bool xsk_validate_queues(struct xdp_sock *xs)
775{
776 return xs->fq_tmp && xs->cq_tmp;
777}
778
Magnus Karlsson965a9902018-05-02 13:01:26 +0200779static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
780{
781 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
782 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200783 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpel959b71d2018-05-22 09:34:56 +0200784 struct net_device *dev;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200785 u32 flags, qid;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200786 int err = 0;
787
788 if (addr_len < sizeof(struct sockaddr_xdp))
789 return -EINVAL;
790 if (sxdp->sxdp_family != AF_XDP)
791 return -EINVAL;
792
Björn Töpelf54ba392019-03-08 08:57:26 +0100793 flags = sxdp->sxdp_flags;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200794 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
795 XDP_USE_NEED_WAKEUP))
Björn Töpelf54ba392019-03-08 08:57:26 +0100796 return -EINVAL;
797
Ilya Maximets5464c3a2019-07-08 14:03:44 +0300798 rtnl_lock();
Magnus Karlsson965a9902018-05-02 13:01:26 +0200799 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300800 if (xs->state != XSK_READY) {
Björn Töpel959b71d2018-05-22 09:34:56 +0200801 err = -EBUSY;
802 goto out_release;
803 }
804
Magnus Karlsson965a9902018-05-02 13:01:26 +0200805 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
806 if (!dev) {
807 err = -ENODEV;
808 goto out_release;
809 }
810
Magnus Karlssonf6145902018-05-02 13:01:32 +0200811 if (!xs->rx && !xs->tx) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200812 err = -EINVAL;
813 goto out_unlock;
814 }
815
Björn Töpel173d3ad2018-06-04 14:05:55 +0200816 qid = sxdp->sxdp_queue_id;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200817
818 if (flags & XDP_SHARED_UMEM) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200819 struct xdp_sock *umem_xs;
820 struct socket *sock;
821
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200822 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
823 (flags & XDP_USE_NEED_WAKEUP)) {
Björn Töpel173d3ad2018-06-04 14:05:55 +0200824 /* Cannot specify flags for shared sockets. */
825 err = -EINVAL;
826 goto out_unlock;
827 }
828
Magnus Karlsson965a9902018-05-02 13:01:26 +0200829 if (xs->umem) {
830 /* We have already our own. */
831 err = -EINVAL;
832 goto out_unlock;
833 }
834
835 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
836 if (IS_ERR(sock)) {
837 err = PTR_ERR(sock);
838 goto out_unlock;
839 }
840
841 umem_xs = xdp_sk(sock->sk);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200842 if (!xsk_is_bound(umem_xs)) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200843 err = -EBADF;
844 sockfd_put(sock);
845 goto out_unlock;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200846 }
Magnus Karlsson965a9902018-05-02 13:01:26 +0200847
Magnus Karlssona1132432020-08-28 10:26:26 +0200848 if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
849 /* Share the umem with another socket on another qid
850 * and/or device.
851 */
Magnus Karlssonb5aea282020-08-28 10:26:25 +0200852 xs->pool = xp_create_and_assign_umem(xs,
853 umem_xs->umem);
854 if (!xs->pool) {
Magnus Karlsson1fd17c8c2020-09-26 11:26:13 +0200855 err = -ENOMEM;
Magnus Karlssonb5aea282020-08-28 10:26:25 +0200856 sockfd_put(sock);
857 goto out_unlock;
858 }
859
860 err = xp_assign_dev_shared(xs->pool, umem_xs->umem,
861 dev, qid);
862 if (err) {
863 xp_destroy(xs->pool);
Magnus Karlsson83cf5c62020-09-02 09:36:04 +0200864 xs->pool = NULL;
Magnus Karlssonb5aea282020-08-28 10:26:25 +0200865 sockfd_put(sock);
866 goto out_unlock;
867 }
868 } else {
869 /* Share the buffer pool with the other socket. */
870 if (xs->fq_tmp || xs->cq_tmp) {
871 /* Do not allow setting your own fq or cq. */
872 err = -EINVAL;
873 sockfd_put(sock);
874 goto out_unlock;
875 }
876
877 xp_get_pool(umem_xs->pool);
878 xs->pool = umem_xs->pool;
879 }
880
Magnus Karlsson965a9902018-05-02 13:01:26 +0200881 xdp_get_umem(umem_xs->umem);
Björn Töpel9764f4b2019-09-04 13:49:11 +0200882 WRITE_ONCE(xs->umem, umem_xs->umem);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200883 sockfd_put(sock);
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200884 } else if (!xs->umem || !xsk_validate_queues(xs)) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200885 err = -EINVAL;
886 goto out_unlock;
Björn Töpelc4971762018-05-02 13:01:27 +0200887 } else {
888 /* This xsk has its own umem. */
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200889 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
890 if (!xs->pool) {
891 err = -ENOMEM;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200892 goto out_unlock;
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200893 }
894
895 err = xp_assign_dev(xs->pool, dev, qid, flags);
896 if (err) {
897 xp_destroy(xs->pool);
898 xs->pool = NULL;
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200899 goto out_unlock;
900 }
Magnus Karlsson965a9902018-05-02 13:01:26 +0200901 }
902
Magnus Karlsson8bee6832020-12-14 09:51:27 +0100903 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
904 xs->fq_tmp = NULL;
905 xs->cq_tmp = NULL;
906
Magnus Karlsson965a9902018-05-02 13:01:26 +0200907 xs->dev = dev;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200908 xs->zc = xs->umem->zc;
909 xs->queue_id = qid;
Magnus Karlssona5aa8e52020-08-28 10:26:20 +0200910 xp_add_xsk(xs->pool, xs);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200911
912out_unlock:
Björn Töpel42fddcc2019-09-04 13:49:12 +0200913 if (err) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200914 dev_put(dev);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200915 } else {
916 /* Matches smp_rmb() in bind() for shared umem
917 * sockets, and xsk_is_bound().
918 */
919 smp_wmb();
920 WRITE_ONCE(xs->state, XSK_BOUND);
921 }
Magnus Karlsson965a9902018-05-02 13:01:26 +0200922out_release:
923 mutex_unlock(&xs->mutex);
Ilya Maximets5464c3a2019-07-08 14:03:44 +0300924 rtnl_unlock();
Magnus Karlsson965a9902018-05-02 13:01:26 +0200925 return err;
926}
927
Kevin Laatzc05cd362019-08-27 02:25:22 +0000928struct xdp_umem_reg_v1 {
929 __u64 addr; /* Start of packet data area */
930 __u64 len; /* Length of packet data area */
931 __u32 chunk_size;
932 __u32 headroom;
933};
934
Björn Töpelc0c77d82018-05-02 13:01:23 +0200935static int xsk_setsockopt(struct socket *sock, int level, int optname,
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200936 sockptr_t optval, unsigned int optlen)
Björn Töpelc0c77d82018-05-02 13:01:23 +0200937{
938 struct sock *sk = sock->sk;
939 struct xdp_sock *xs = xdp_sk(sk);
940 int err;
941
942 if (level != SOL_XDP)
943 return -ENOPROTOOPT;
944
945 switch (optname) {
Björn Töpelb9b6b682018-05-02 13:01:25 +0200946 case XDP_RX_RING:
Magnus Karlssonf6145902018-05-02 13:01:32 +0200947 case XDP_TX_RING:
Björn Töpelb9b6b682018-05-02 13:01:25 +0200948 {
949 struct xsk_queue **q;
950 int entries;
951
952 if (optlen < sizeof(entries))
953 return -EINVAL;
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200954 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
Björn Töpelb9b6b682018-05-02 13:01:25 +0200955 return -EFAULT;
956
957 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300958 if (xs->state != XSK_READY) {
959 mutex_unlock(&xs->mutex);
960 return -EBUSY;
961 }
Magnus Karlssonf6145902018-05-02 13:01:32 +0200962 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200963 err = xsk_init_queue(entries, q, false);
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200964 if (!err && optname == XDP_TX_RING)
965 /* Tx needs to be explicitly woken up the first time */
966 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200967 mutex_unlock(&xs->mutex);
968 return err;
969 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200970 case XDP_UMEM_REG:
971 {
Kevin Laatzc05cd362019-08-27 02:25:22 +0000972 size_t mr_size = sizeof(struct xdp_umem_reg);
973 struct xdp_umem_reg mr = {};
Björn Töpelc0c77d82018-05-02 13:01:23 +0200974 struct xdp_umem *umem;
975
Kevin Laatzc05cd362019-08-27 02:25:22 +0000976 if (optlen < sizeof(struct xdp_umem_reg_v1))
977 return -EINVAL;
978 else if (optlen < sizeof(mr))
979 mr_size = sizeof(struct xdp_umem_reg_v1);
980
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200981 if (copy_from_sockptr(&mr, optval, mr_size))
Björn Töpelc0c77d82018-05-02 13:01:23 +0200982 return -EFAULT;
983
984 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300985 if (xs->state != XSK_READY || xs->umem) {
Björn Töpelc0c77d82018-05-02 13:01:23 +0200986 mutex_unlock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200987 return -EBUSY;
988 }
989
990 umem = xdp_umem_create(&mr);
991 if (IS_ERR(umem)) {
992 mutex_unlock(&xs->mutex);
993 return PTR_ERR(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200994 }
995
996 /* Make sure umem is ready before it can be seen by others */
997 smp_wmb();
Björn Töpel9764f4b2019-09-04 13:49:11 +0200998 WRITE_ONCE(xs->umem, umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200999 mutex_unlock(&xs->mutex);
1000 return 0;
1001 }
Magnus Karlsson423f3832018-05-02 13:01:24 +02001002 case XDP_UMEM_FILL_RING:
Magnus Karlssonfe230832018-05-02 13:01:31 +02001003 case XDP_UMEM_COMPLETION_RING:
Magnus Karlsson423f3832018-05-02 13:01:24 +02001004 {
1005 struct xsk_queue **q;
1006 int entries;
1007
Christoph Hellwiga7b75c52020-07-23 08:09:07 +02001008 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
Magnus Karlsson423f3832018-05-02 13:01:24 +02001009 return -EFAULT;
1010
1011 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +03001012 if (xs->state != XSK_READY) {
1013 mutex_unlock(&xs->mutex);
1014 return -EBUSY;
1015 }
Björn Töpela49049e2018-05-22 09:35:02 +02001016
Magnus Karlsson7361f9c2020-08-28 10:26:18 +02001017 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1018 &xs->cq_tmp;
Björn Töpelb9b6b682018-05-02 13:01:25 +02001019 err = xsk_init_queue(entries, q, true);
Magnus Karlsson423f3832018-05-02 13:01:24 +02001020 mutex_unlock(&xs->mutex);
1021 return err;
1022 }
Björn Töpelc0c77d82018-05-02 13:01:23 +02001023 default:
1024 break;
1025 }
1026
1027 return -ENOPROTOOPT;
1028}
1029
Magnus Karlsson77cd0d72019-08-14 09:27:17 +02001030static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
1031{
1032 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
1033 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
1034 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
1035}
1036
1037static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
1038{
1039 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
1040 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
1041 ring->desc = offsetof(struct xdp_umem_ring, desc);
1042}
1043
Ciara Loftus8aa5a332020-07-08 07:28:33 +00001044struct xdp_statistics_v1 {
1045 __u64 rx_dropped;
1046 __u64 rx_invalid_descs;
1047 __u64 tx_invalid_descs;
1048};
1049
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +02001050static int xsk_getsockopt(struct socket *sock, int level, int optname,
1051 char __user *optval, int __user *optlen)
1052{
1053 struct sock *sk = sock->sk;
1054 struct xdp_sock *xs = xdp_sk(sk);
1055 int len;
1056
1057 if (level != SOL_XDP)
1058 return -ENOPROTOOPT;
1059
1060 if (get_user(len, optlen))
1061 return -EFAULT;
1062 if (len < 0)
1063 return -EINVAL;
1064
1065 switch (optname) {
1066 case XDP_STATISTICS:
1067 {
Peilin Ye3c4f8502020-07-28 01:36:04 -04001068 struct xdp_statistics stats = {};
Ciara Loftus8aa5a332020-07-08 07:28:33 +00001069 bool extra_stats = true;
1070 size_t stats_size;
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +02001071
Ciara Loftus8aa5a332020-07-08 07:28:33 +00001072 if (len < sizeof(struct xdp_statistics_v1)) {
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +02001073 return -EINVAL;
Ciara Loftus8aa5a332020-07-08 07:28:33 +00001074 } else if (len < sizeof(stats)) {
1075 extra_stats = false;
1076 stats_size = sizeof(struct xdp_statistics_v1);
1077 } else {
1078 stats_size = sizeof(stats);
1079 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +02001080
1081 mutex_lock(&xs->mutex);
1082 stats.rx_dropped = xs->rx_dropped;
Ciara Loftus8aa5a332020-07-08 07:28:33 +00001083 if (extra_stats) {
1084 stats.rx_ring_full = xs->rx_queue_full;
1085 stats.rx_fill_ring_empty_descs =
Magnus Karlsson7361f9c2020-08-28 10:26:18 +02001086 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
Ciara Loftus8aa5a332020-07-08 07:28:33 +00001087 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1088 } else {
1089 stats.rx_dropped += xs->rx_queue_full;
1090 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +02001091 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1092 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1093 mutex_unlock(&xs->mutex);
1094
Ciara Loftus8aa5a332020-07-08 07:28:33 +00001095 if (copy_to_user(optval, &stats, stats_size))
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +02001096 return -EFAULT;
Ciara Loftus8aa5a332020-07-08 07:28:33 +00001097 if (put_user(stats_size, optlen))
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +02001098 return -EFAULT;
1099
1100 return 0;
1101 }
Björn Töpelb3a9e0b2018-05-22 09:34:59 +02001102 case XDP_MMAP_OFFSETS:
1103 {
1104 struct xdp_mmap_offsets off;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +02001105 struct xdp_mmap_offsets_v1 off_v1;
1106 bool flags_supported = true;
1107 void *to_copy;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +02001108
Magnus Karlsson77cd0d72019-08-14 09:27:17 +02001109 if (len < sizeof(off_v1))
Björn Töpelb3a9e0b2018-05-22 09:34:59 +02001110 return -EINVAL;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +02001111 else if (len < sizeof(off))
1112 flags_supported = false;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +02001113
Magnus Karlsson77cd0d72019-08-14 09:27:17 +02001114 if (flags_supported) {
1115 /* xdp_ring_offset is identical to xdp_ring_offset_v1
1116 * except for the flags field added to the end.
1117 */
1118 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1119 &off.rx);
1120 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1121 &off.tx);
1122 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1123 &off.fr);
1124 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1125 &off.cr);
1126 off.rx.flags = offsetof(struct xdp_rxtx_ring,
1127 ptrs.flags);
1128 off.tx.flags = offsetof(struct xdp_rxtx_ring,
1129 ptrs.flags);
1130 off.fr.flags = offsetof(struct xdp_umem_ring,
1131 ptrs.flags);
1132 off.cr.flags = offsetof(struct xdp_umem_ring,
1133 ptrs.flags);
Björn Töpelb3a9e0b2018-05-22 09:34:59 +02001134
Magnus Karlsson77cd0d72019-08-14 09:27:17 +02001135 len = sizeof(off);
1136 to_copy = &off;
1137 } else {
1138 xsk_enter_rxtx_offsets(&off_v1.rx);
1139 xsk_enter_rxtx_offsets(&off_v1.tx);
1140 xsk_enter_umem_offsets(&off_v1.fr);
1141 xsk_enter_umem_offsets(&off_v1.cr);
Björn Töpelb3a9e0b2018-05-22 09:34:59 +02001142
Magnus Karlsson77cd0d72019-08-14 09:27:17 +02001143 len = sizeof(off_v1);
1144 to_copy = &off_v1;
1145 }
1146
1147 if (copy_to_user(optval, to_copy, len))
Björn Töpelb3a9e0b2018-05-22 09:34:59 +02001148 return -EFAULT;
1149 if (put_user(len, optlen))
1150 return -EFAULT;
1151
1152 return 0;
1153 }
Maxim Mikityanskiy2640d3c2019-06-26 17:35:25 +03001154 case XDP_OPTIONS:
1155 {
1156 struct xdp_options opts = {};
1157
1158 if (len < sizeof(opts))
1159 return -EINVAL;
1160
1161 mutex_lock(&xs->mutex);
1162 if (xs->zc)
1163 opts.flags |= XDP_OPTIONS_ZEROCOPY;
1164 mutex_unlock(&xs->mutex);
1165
1166 len = sizeof(opts);
1167 if (copy_to_user(optval, &opts, len))
1168 return -EFAULT;
1169 if (put_user(len, optlen))
1170 return -EFAULT;
1171
1172 return 0;
1173 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +02001174 default:
1175 break;
1176 }
1177
1178 return -EOPNOTSUPP;
1179}
1180
Magnus Karlsson423f3832018-05-02 13:01:24 +02001181static int xsk_mmap(struct file *file, struct socket *sock,
1182 struct vm_area_struct *vma)
1183{
Geert Uytterhoevena5a16e42018-06-07 15:37:34 +02001184 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
Magnus Karlsson423f3832018-05-02 13:01:24 +02001185 unsigned long size = vma->vm_end - vma->vm_start;
1186 struct xdp_sock *xs = xdp_sk(sock->sk);
1187 struct xsk_queue *q = NULL;
1188 unsigned long pfn;
1189 struct page *qpg;
1190
Björn Töpel42fddcc2019-09-04 13:49:12 +02001191 if (READ_ONCE(xs->state) != XSK_READY)
Ilya Maximets455302d2019-06-28 11:04:07 +03001192 return -EBUSY;
1193
Björn Töpelb9b6b682018-05-02 13:01:25 +02001194 if (offset == XDP_PGOFF_RX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +02001195 q = READ_ONCE(xs->rx);
Magnus Karlssonf6145902018-05-02 13:01:32 +02001196 } else if (offset == XDP_PGOFF_TX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +02001197 q = READ_ONCE(xs->tx);
Björn Töpelb9b6b682018-05-02 13:01:25 +02001198 } else {
Magnus Karlssone6762c82019-02-08 14:13:50 +01001199 /* Matches the smp_wmb() in XDP_UMEM_REG */
1200 smp_rmb();
Björn Töpelb9b6b682018-05-02 13:01:25 +02001201 if (offset == XDP_UMEM_PGOFF_FILL_RING)
Magnus Karlsson7361f9c2020-08-28 10:26:18 +02001202 q = READ_ONCE(xs->fq_tmp);
Magnus Karlssonfe230832018-05-02 13:01:31 +02001203 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
Magnus Karlsson7361f9c2020-08-28 10:26:18 +02001204 q = READ_ONCE(xs->cq_tmp);
Björn Töpelb9b6b682018-05-02 13:01:25 +02001205 }
Magnus Karlsson423f3832018-05-02 13:01:24 +02001206
1207 if (!q)
1208 return -EINVAL;
1209
Magnus Karlssone6762c82019-02-08 14:13:50 +01001210 /* Matches the smp_wmb() in xsk_init_queue */
1211 smp_rmb();
Magnus Karlsson423f3832018-05-02 13:01:24 +02001212 qpg = virt_to_head_page(q->ring);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07001213 if (size > page_size(qpg))
Magnus Karlsson423f3832018-05-02 13:01:24 +02001214 return -EINVAL;
1215
1216 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
1217 return remap_pfn_range(vma, vma->vm_start, pfn,
1218 size, vma->vm_page_prot);
1219}
1220
Ilya Maximets455302d2019-06-28 11:04:07 +03001221static int xsk_notifier(struct notifier_block *this,
1222 unsigned long msg, void *ptr)
1223{
1224 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1225 struct net *net = dev_net(dev);
1226 struct sock *sk;
1227
1228 switch (msg) {
1229 case NETDEV_UNREGISTER:
1230 mutex_lock(&net->xdp.lock);
1231 sk_for_each(sk, &net->xdp.list) {
1232 struct xdp_sock *xs = xdp_sk(sk);
1233
1234 mutex_lock(&xs->mutex);
1235 if (xs->dev == dev) {
1236 sk->sk_err = ENETDOWN;
1237 if (!sock_flag(sk, SOCK_DEAD))
1238 sk->sk_error_report(sk);
1239
1240 xsk_unbind_dev(xs);
1241
Magnus Karlsson1c1efc22020-08-28 10:26:17 +02001242 /* Clear device references. */
1243 xp_clear_dev(xs->pool);
Ilya Maximets455302d2019-06-28 11:04:07 +03001244 }
1245 mutex_unlock(&xs->mutex);
1246 }
1247 mutex_unlock(&net->xdp.lock);
1248 break;
1249 }
1250 return NOTIFY_DONE;
1251}
1252
Björn Töpelc0c77d82018-05-02 13:01:23 +02001253static struct proto xsk_proto = {
1254 .name = "XDP",
1255 .owner = THIS_MODULE,
1256 .obj_size = sizeof(struct xdp_sock),
1257};
1258
1259static const struct proto_ops xsk_proto_ops = {
Björn Töpelc2f43742018-05-18 14:00:24 +02001260 .family = PF_XDP,
1261 .owner = THIS_MODULE,
1262 .release = xsk_release,
1263 .bind = xsk_bind,
1264 .connect = sock_no_connect,
1265 .socketpair = sock_no_socketpair,
1266 .accept = sock_no_accept,
1267 .getname = sock_no_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001268 .poll = xsk_poll,
Björn Töpelc2f43742018-05-18 14:00:24 +02001269 .ioctl = sock_no_ioctl,
1270 .listen = sock_no_listen,
1271 .shutdown = sock_no_shutdown,
1272 .setsockopt = xsk_setsockopt,
1273 .getsockopt = xsk_getsockopt,
1274 .sendmsg = xsk_sendmsg,
Björn Töpel45a86682020-11-30 19:51:58 +01001275 .recvmsg = xsk_recvmsg,
Björn Töpelc2f43742018-05-18 14:00:24 +02001276 .mmap = xsk_mmap,
1277 .sendpage = sock_no_sendpage,
Björn Töpelc0c77d82018-05-02 13:01:23 +02001278};
1279
Björn Töpel11fe9262019-02-21 13:07:38 +01001280static void xsk_destruct(struct sock *sk)
1281{
1282 struct xdp_sock *xs = xdp_sk(sk);
1283
1284 if (!sock_flag(sk, SOCK_DEAD))
1285 return;
1286
Magnus Karlssone5e1a4b2020-10-27 13:32:01 +01001287 if (!xp_put_pool(xs->pool))
Magnus Karlsson537cf4e2020-11-20 12:53:39 +01001288 xdp_put_umem(xs->umem, !xs->pool);
Björn Töpel11fe9262019-02-21 13:07:38 +01001289
1290 sk_refcnt_debug_dec(sk);
1291}
1292
Björn Töpelc0c77d82018-05-02 13:01:23 +02001293static int xsk_create(struct net *net, struct socket *sock, int protocol,
1294 int kern)
1295{
Björn Töpelc0c77d82018-05-02 13:01:23 +02001296 struct xdp_sock *xs;
Magnus Karlsson1c1efc22020-08-28 10:26:17 +02001297 struct sock *sk;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001298
1299 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1300 return -EPERM;
1301 if (sock->type != SOCK_RAW)
1302 return -ESOCKTNOSUPPORT;
1303
1304 if (protocol)
1305 return -EPROTONOSUPPORT;
1306
1307 sock->state = SS_UNCONNECTED;
1308
1309 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1310 if (!sk)
1311 return -ENOBUFS;
1312
1313 sock->ops = &xsk_proto_ops;
1314
1315 sock_init_data(sock, sk);
1316
1317 sk->sk_family = PF_XDP;
1318
Björn Töpel11fe9262019-02-21 13:07:38 +01001319 sk->sk_destruct = xsk_destruct;
1320 sk_refcnt_debug_inc(sk);
1321
Björn Töpelcee27162018-10-08 19:40:16 +02001322 sock_set_flag(sk, SOCK_RCU_FREE);
1323
Björn Töpelc0c77d82018-05-02 13:01:23 +02001324 xs = xdp_sk(sk);
Ilya Maximets455302d2019-06-28 11:04:07 +03001325 xs->state = XSK_READY;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001326 mutex_init(&xs->mutex);
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +03001327 spin_lock_init(&xs->rx_lock);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001328
Björn Töpel0402acd2019-08-15 11:30:13 +02001329 INIT_LIST_HEAD(&xs->map_list);
1330 spin_lock_init(&xs->map_list_lock);
1331
Björn Töpel1d0dc062019-01-24 19:59:37 +01001332 mutex_lock(&net->xdp.lock);
1333 sk_add_node_rcu(sk, &net->xdp.list);
1334 mutex_unlock(&net->xdp.lock);
1335
Björn Töpelc0c77d82018-05-02 13:01:23 +02001336 local_bh_disable();
1337 sock_prot_inuse_add(net, &xsk_proto, 1);
1338 local_bh_enable();
1339
1340 return 0;
1341}
1342
1343static const struct net_proto_family xsk_family_ops = {
1344 .family = PF_XDP,
1345 .create = xsk_create,
1346 .owner = THIS_MODULE,
1347};
1348
Ilya Maximets455302d2019-06-28 11:04:07 +03001349static struct notifier_block xsk_netdev_notifier = {
1350 .notifier_call = xsk_notifier,
1351};
1352
Björn Töpel1d0dc062019-01-24 19:59:37 +01001353static int __net_init xsk_net_init(struct net *net)
1354{
1355 mutex_init(&net->xdp.lock);
1356 INIT_HLIST_HEAD(&net->xdp.list);
1357 return 0;
1358}
1359
1360static void __net_exit xsk_net_exit(struct net *net)
1361{
1362 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1363}
1364
1365static struct pernet_operations xsk_net_ops = {
1366 .init = xsk_net_init,
1367 .exit = xsk_net_exit,
1368};
1369
Björn Töpelc0c77d82018-05-02 13:01:23 +02001370static int __init xsk_init(void)
1371{
Björn Töpele312b9e2019-12-19 07:10:02 +01001372 int err, cpu;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001373
1374 err = proto_register(&xsk_proto, 0 /* no slab */);
1375 if (err)
1376 goto out;
1377
1378 err = sock_register(&xsk_family_ops);
1379 if (err)
1380 goto out_proto;
1381
Björn Töpel1d0dc062019-01-24 19:59:37 +01001382 err = register_pernet_subsys(&xsk_net_ops);
1383 if (err)
1384 goto out_sk;
Ilya Maximets455302d2019-06-28 11:04:07 +03001385
1386 err = register_netdevice_notifier(&xsk_netdev_notifier);
1387 if (err)
1388 goto out_pernet;
1389
Björn Töpele312b9e2019-12-19 07:10:02 +01001390 for_each_possible_cpu(cpu)
1391 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
Björn Töpelc0c77d82018-05-02 13:01:23 +02001392 return 0;
1393
Ilya Maximets455302d2019-06-28 11:04:07 +03001394out_pernet:
1395 unregister_pernet_subsys(&xsk_net_ops);
Björn Töpel1d0dc062019-01-24 19:59:37 +01001396out_sk:
1397 sock_unregister(PF_XDP);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001398out_proto:
1399 proto_unregister(&xsk_proto);
1400out:
1401 return err;
1402}
1403
1404fs_initcall(xsk_init);