blob: 8bda654e82ecedda9bca1482a992108ed3a2c164 [file] [log] [blame]
Björn Töpelc0c77d82018-05-02 13:01:23 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
Björn Töpelc0c77d82018-05-02 13:01:23 +02008 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14#include <linux/if_xdp.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/sched/task.h>
19#include <linux/socket.h>
20#include <linux/file.h>
21#include <linux/uaccess.h>
22#include <linux/net.h>
23#include <linux/netdevice.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020024#include <linux/rculist.h>
Magnus Karlssona71506a2020-05-20 21:20:51 +020025#include <net/xdp_sock_drv.h>
Björn Töpelb9b6b682018-05-02 13:01:25 +020026#include <net/xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020027
Magnus Karlsson423f3832018-05-02 13:01:24 +020028#include "xsk_queue.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020029#include "xdp_umem.h"
Björn Töpela36b38aa2019-01-24 19:59:39 +010030#include "xsk.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020031
Magnus Karlsson35fcde72018-05-02 13:01:34 +020032#define TX_BATCH_SIZE 16
33
Björn Töpele312b9e2019-12-19 07:10:02 +010034static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
35
Björn Töpelfbfc504a2018-05-02 13:01:28 +020036bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
37{
Björn Töpel173d3ad2018-06-04 14:05:55 +020038 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
39 READ_ONCE(xs->umem->fq);
Björn Töpelfbfc504a2018-05-02 13:01:28 +020040}
41
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +030042bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
43{
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +010044 return xskq_cons_has_entries(umem->fq, cnt);
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +030045}
46EXPORT_SYMBOL(xsk_umem_has_addrs);
47
Magnus Karlsson03896ef2019-12-19 13:39:27 +010048bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
Björn Töpelc4971762018-05-02 13:01:27 +020049{
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +010050 return xskq_cons_peek_addr(umem->fq, addr, umem);
Björn Töpel173d3ad2018-06-04 14:05:55 +020051}
52EXPORT_SYMBOL(xsk_umem_peek_addr);
53
Magnus Karlssonf8509aa2019-12-19 13:39:28 +010054void xsk_umem_release_addr(struct xdp_umem *umem)
Björn Töpel173d3ad2018-06-04 14:05:55 +020055{
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +010056 xskq_cons_release(umem->fq);
Björn Töpel173d3ad2018-06-04 14:05:55 +020057}
Magnus Karlssonf8509aa2019-12-19 13:39:28 +010058EXPORT_SYMBOL(xsk_umem_release_addr);
Björn Töpel173d3ad2018-06-04 14:05:55 +020059
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020060void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
61{
62 if (umem->need_wakeup & XDP_WAKEUP_RX)
63 return;
64
65 umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
66 umem->need_wakeup |= XDP_WAKEUP_RX;
67}
68EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
69
70void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
71{
72 struct xdp_sock *xs;
73
74 if (umem->need_wakeup & XDP_WAKEUP_TX)
75 return;
76
77 rcu_read_lock();
Magnus Karlssone4e5aef2020-05-04 15:33:51 +020078 list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020079 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
80 }
81 rcu_read_unlock();
82
83 umem->need_wakeup |= XDP_WAKEUP_TX;
84}
85EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
86
87void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
88{
89 if (!(umem->need_wakeup & XDP_WAKEUP_RX))
90 return;
91
92 umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
93 umem->need_wakeup &= ~XDP_WAKEUP_RX;
94}
95EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
96
97void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
98{
99 struct xdp_sock *xs;
100
101 if (!(umem->need_wakeup & XDP_WAKEUP_TX))
102 return;
103
104 rcu_read_lock();
Magnus Karlssone4e5aef2020-05-04 15:33:51 +0200105 list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200106 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
107 }
108 rcu_read_unlock();
109
110 umem->need_wakeup &= ~XDP_WAKEUP_TX;
111}
112EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
113
114bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
115{
116 return umem->flags & XDP_UMEM_USES_NEED_WAKEUP;
117}
118EXPORT_SYMBOL(xsk_umem_uses_need_wakeup);
119
Kevin Laatzc05cd362019-08-27 02:25:22 +0000120/* If a buffer crosses a page boundary, we need to do 2 memcpy's, one for
121 * each page. This is only required in copy mode.
122 */
123static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf,
124 u32 len, u32 metalen)
125{
126 void *to_buf = xdp_umem_get_data(umem, addr);
127
128 addr = xsk_umem_add_offset_to_addr(addr);
Magnus Karlsson03896ef2019-12-19 13:39:27 +0100129 if (xskq_cons_crosses_non_contig_pg(umem, addr, len + metalen)) {
Kevin Laatzc05cd362019-08-27 02:25:22 +0000130 void *next_pg_addr = umem->pages[(addr >> PAGE_SHIFT) + 1].addr;
131 u64 page_start = addr & ~(PAGE_SIZE - 1);
132 u64 first_len = PAGE_SIZE - (addr - page_start);
133
Li RongQingdb5c97f2020-04-02 15:52:10 +0800134 memcpy(to_buf, from_buf, first_len);
135 memcpy(next_pg_addr, from_buf + first_len,
136 len + metalen - first_len);
Kevin Laatzc05cd362019-08-27 02:25:22 +0000137
138 return;
139 }
140
141 memcpy(to_buf, from_buf, len + metalen);
142}
143
Björn Töpel173d3ad2018-06-04 14:05:55 +0200144static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
145{
Kevin Laatzc05cd362019-08-27 02:25:22 +0000146 u64 offset = xs->umem->headroom;
147 u64 addr, memcpy_addr;
148 void *from_buf;
Björn Töpel18baed22018-08-30 15:12:48 +0200149 u32 metalen;
Björn Töpel4e64c832018-06-04 13:57:11 +0200150 int err;
Björn Töpelc4971762018-05-02 13:01:27 +0200151
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100152 if (!xskq_cons_peek_addr(xs->umem->fq, &addr, xs->umem) ||
Björn Töpel18baed22018-08-30 15:12:48 +0200153 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
Björn Töpela509a952018-06-04 13:57:12 +0200154 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +0200155 return -ENOSPC;
Björn Töpela509a952018-06-04 13:57:12 +0200156 }
Björn Töpelc4971762018-05-02 13:01:27 +0200157
Björn Töpel18baed22018-08-30 15:12:48 +0200158 if (unlikely(xdp_data_meta_unsupported(xdp))) {
159 from_buf = xdp->data;
160 metalen = 0;
161 } else {
162 from_buf = xdp->data_meta;
163 metalen = xdp->data - xdp->data_meta;
164 }
165
Kevin Laatzc05cd362019-08-27 02:25:22 +0000166 memcpy_addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
167 __xsk_rcv_memcpy(xs->umem, memcpy_addr, from_buf, len, metalen);
168
169 offset += metalen;
170 addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100171 err = xskq_prod_reserve_desc(xs->rx, addr, len);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200172 if (!err) {
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100173 xskq_cons_release(xs->umem->fq);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200174 xdp_return_buff(xdp);
175 return 0;
176 }
177
178 xs->rx_dropped++;
179 return err;
180}
181
182static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
183{
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100184 int err = xskq_prod_reserve_desc(xs->rx, xdp->handle, len);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200185
Jakub Kicinski2d55d612018-07-27 20:20:08 -0700186 if (err)
Björn Töpela509a952018-06-04 13:57:12 +0200187 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +0200188
189 return err;
190}
191
Björn Töpel42fddcc2019-09-04 13:49:12 +0200192static bool xsk_is_bound(struct xdp_sock *xs)
193{
194 if (READ_ONCE(xs->state) == XSK_BOUND) {
195 /* Matches smp_wmb() in bind(). */
196 smp_rmb();
197 return true;
198 }
199 return false;
200}
201
Björn Töpeld8179912019-11-01 12:03:46 +0100202static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
Björn Töpelc4971762018-05-02 13:01:27 +0200203{
Björn Töpel173d3ad2018-06-04 14:05:55 +0200204 u32 len;
Björn Töpelc4971762018-05-02 13:01:27 +0200205
Björn Töpel42fddcc2019-09-04 13:49:12 +0200206 if (!xsk_is_bound(xs))
207 return -EINVAL;
208
Björn Töpel173d3ad2018-06-04 14:05:55 +0200209 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
210 return -EINVAL;
Björn Töpelc4971762018-05-02 13:01:27 +0200211
Björn Töpel173d3ad2018-06-04 14:05:55 +0200212 len = xdp->data_end - xdp->data;
213
214 return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
215 __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
Björn Töpelc4971762018-05-02 13:01:27 +0200216}
217
Björn Töpeld8179912019-11-01 12:03:46 +0100218static void xsk_flush(struct xdp_sock *xs)
Björn Töpelc4971762018-05-02 13:01:27 +0200219{
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100220 xskq_prod_submit(xs->rx);
Magnus Karlsson30744a62020-02-10 16:27:12 +0100221 __xskq_cons_release(xs->umem->fq);
Björn Töpel43a825a2020-01-20 10:29:17 +0100222 sock_def_readable(&xs->sk);
Björn Töpelc4971762018-05-02 13:01:27 +0200223}
224
225int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
226{
Björn Töpel18baed22018-08-30 15:12:48 +0200227 u32 metalen = xdp->data - xdp->data_meta;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200228 u32 len = xdp->data_end - xdp->data;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000229 u64 offset = xs->umem->headroom;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200230 void *buffer;
231 u64 addr;
Björn Töpelc4971762018-05-02 13:01:27 +0200232 int err;
233
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300234 spin_lock_bh(&xs->rx_lock);
235
236 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) {
237 err = -EINVAL;
238 goto out_unlock;
239 }
Björn Töpel5d902372018-06-12 12:02:56 +0200240
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100241 if (!xskq_cons_peek_addr(xs->umem->fq, &addr, xs->umem) ||
Björn Töpel18baed22018-08-30 15:12:48 +0200242 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300243 err = -ENOSPC;
244 goto out_drop;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200245 }
Björn Töpelc4971762018-05-02 13:01:27 +0200246
Kevin Laatzc05cd362019-08-27 02:25:22 +0000247 addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200248 buffer = xdp_umem_get_data(xs->umem, addr);
Björn Töpel18baed22018-08-30 15:12:48 +0200249 memcpy(buffer, xdp->data_meta, len + metalen);
Kevin Laatzc05cd362019-08-27 02:25:22 +0000250
251 addr = xsk_umem_adjust_offset(xs->umem, addr, metalen);
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100252 err = xskq_prod_reserve_desc(xs->rx, addr, len);
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300253 if (err)
254 goto out_drop;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200255
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100256 xskq_cons_release(xs->umem->fq);
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100257 xskq_prod_submit(xs->rx);
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300258
259 spin_unlock_bh(&xs->rx_lock);
260
261 xs->sk.sk_data_ready(&xs->sk);
262 return 0;
263
264out_drop:
Björn Töpel173d3ad2018-06-04 14:05:55 +0200265 xs->rx_dropped++;
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300266out_unlock:
267 spin_unlock_bh(&xs->rx_lock);
Björn Töpelc4971762018-05-02 13:01:27 +0200268 return err;
269}
270
Björn Töpele312b9e2019-12-19 07:10:02 +0100271int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
Björn Töpeld8179912019-11-01 12:03:46 +0100272{
Björn Töpele312b9e2019-12-19 07:10:02 +0100273 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
Björn Töpeld8179912019-11-01 12:03:46 +0100274 int err;
275
276 err = xsk_rcv(xs, xdp);
277 if (err)
278 return err;
279
280 if (!xs->flush_node.prev)
281 list_add(&xs->flush_node, flush_list);
282
283 return 0;
284}
285
Björn Töpele312b9e2019-12-19 07:10:02 +0100286void __xsk_map_flush(void)
Björn Töpeld8179912019-11-01 12:03:46 +0100287{
Björn Töpele312b9e2019-12-19 07:10:02 +0100288 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
Björn Töpeld8179912019-11-01 12:03:46 +0100289 struct xdp_sock *xs, *tmp;
290
291 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
292 xsk_flush(xs);
293 __list_del_clearprev(&xs->flush_node);
294 }
295}
296
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200297void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
298{
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100299 xskq_prod_submit_n(umem->cq, nb_entries);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200300}
301EXPORT_SYMBOL(xsk_umem_complete_tx);
302
303void xsk_umem_consume_tx_done(struct xdp_umem *umem)
304{
305 struct xdp_sock *xs;
306
307 rcu_read_lock();
Magnus Karlssone4e5aef2020-05-04 15:33:51 +0200308 list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
Magnus Karlsson30744a62020-02-10 16:27:12 +0100309 __xskq_cons_release(xs->tx);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200310 xs->sk.sk_write_space(&xs->sk);
311 }
312 rcu_read_unlock();
313}
314EXPORT_SYMBOL(xsk_umem_consume_tx_done);
315
Maxim Mikityanskiy4bce4e52019-06-26 17:35:28 +0300316bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200317{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200318 struct xdp_sock *xs;
319
320 rcu_read_lock();
Magnus Karlssone4e5aef2020-05-04 15:33:51 +0200321 list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100322 if (!xskq_cons_peek_desc(xs->tx, desc, umem))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200323 continue;
324
Tobias Klauser0a058612020-04-22 01:29:27 +0200325 /* This is the backpressure mechanism for the Tx path.
Magnus Karlsson15d8c912019-12-19 13:39:30 +0100326 * Reserve space in the completion queue and only proceed
327 * if there is space in it. This avoids having to implement
328 * any buffering in the Tx path.
329 */
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100330 if (xskq_prod_reserve_addr(umem->cq, desc->addr))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200331 goto out;
332
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100333 xskq_cons_release(xs->tx);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200334 rcu_read_unlock();
335 return true;
336 }
337
338out:
339 rcu_read_unlock();
340 return false;
341}
342EXPORT_SYMBOL(xsk_umem_consume_tx);
343
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000344static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200345{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200346 struct net_device *dev = xs->dev;
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000347 int err;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200348
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000349 rcu_read_lock();
350 err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
351 rcu_read_unlock();
352
353 return err;
354}
355
356static int xsk_zc_xmit(struct xdp_sock *xs)
357{
358 return xsk_wakeup(xs, XDP_WAKEUP_TX);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200359}
360
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200361static void xsk_destruct_skb(struct sk_buff *skb)
362{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200363 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200364 struct xdp_sock *xs = xdp_sk(skb->sk);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200365 unsigned long flags;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200366
Magnus Karlssona9744f72018-06-29 09:48:20 +0200367 spin_lock_irqsave(&xs->tx_completion_lock, flags);
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100368 xskq_prod_submit_addr(xs->umem->cq, addr);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200369 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200370
371 sock_wfree(skb);
372}
373
Magnus Karlssondf551052019-10-02 08:31:59 +0200374static int xsk_generic_xmit(struct sock *sk)
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200375{
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200376 struct xdp_sock *xs = xdp_sk(sk);
Magnus Karlssondf551052019-10-02 08:31:59 +0200377 u32 max_batch = TX_BATCH_SIZE;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200378 bool sent_frame = false;
379 struct xdp_desc desc;
380 struct sk_buff *skb;
381 int err = 0;
382
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200383 mutex_lock(&xs->mutex);
384
Ilya Maximets67571642019-07-04 17:25:03 +0300385 if (xs->queue_id >= xs->dev->real_num_tx_queues)
386 goto out;
387
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100388 while (xskq_cons_peek_desc(xs->tx, &desc, xs->umem)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200389 char *buffer;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200390 u64 addr;
391 u32 len;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200392
393 if (max_batch-- == 0) {
394 err = -EAGAIN;
395 goto out;
396 }
397
Magnus Karlsson09210c42018-07-11 10:12:52 +0200398 len = desc.len;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200399 skb = sock_alloc_send_skb(sk, len, 1, &err);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200400 if (unlikely(!skb)) {
401 err = -EAGAIN;
402 goto out;
403 }
404
405 skb_put(skb, len);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200406 addr = desc.addr;
407 buffer = xdp_umem_get_data(xs->umem, addr);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200408 err = skb_store_bits(skb, 0, buffer, len);
Tobias Klauser0a058612020-04-22 01:29:27 +0200409 /* This is the backpressure mechanism for the Tx path.
Magnus Karlsson15d8c912019-12-19 13:39:30 +0100410 * Reserve space in the completion queue and only proceed
411 * if there is space in it. This avoids having to implement
412 * any buffering in the Tx path.
413 */
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100414 if (unlikely(err) || xskq_prod_reserve(xs->umem->cq)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200415 kfree_skb(skb);
416 goto out;
417 }
418
419 skb->dev = xs->dev;
420 skb->priority = sk->sk_priority;
421 skb->mark = sk->sk_mark;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000422 skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200423 skb->destructor = xsk_destruct_skb;
424
425 err = dev_direct_xmit(skb, xs->queue_id);
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100426 xskq_cons_release(xs->tx);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200427 /* Ignore NET_XMIT_CN as packet might have been sent */
428 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
Magnus Karlssonfe588682018-06-29 09:48:18 +0200429 /* SKB completed but not sent */
430 err = -EBUSY;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200431 goto out;
432 }
433
434 sent_frame = true;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200435 }
436
437out:
438 if (sent_frame)
439 sk->sk_write_space(sk);
440
441 mutex_unlock(&xs->mutex);
442 return err;
443}
444
Magnus Karlssondf551052019-10-02 08:31:59 +0200445static int __xsk_sendmsg(struct sock *sk)
446{
447 struct xdp_sock *xs = xdp_sk(sk);
448
449 if (unlikely(!(xs->dev->flags & IFF_UP)))
450 return -ENETDOWN;
451 if (unlikely(!xs->tx))
452 return -ENOBUFS;
453
454 return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
455}
456
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200457static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
458{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200459 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200460 struct sock *sk = sock->sk;
461 struct xdp_sock *xs = xdp_sk(sk);
462
Björn Töpel42fddcc2019-09-04 13:49:12 +0200463 if (unlikely(!xsk_is_bound(xs)))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200464 return -ENXIO;
Magnus Karlssondf551052019-10-02 08:31:59 +0200465 if (unlikely(need_wait))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200466 return -EOPNOTSUPP;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200467
Magnus Karlssondf551052019-10-02 08:31:59 +0200468 return __xsk_sendmsg(sk);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200469}
470
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100471static __poll_t xsk_poll(struct file *file, struct socket *sock,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700472 struct poll_table_struct *wait)
Björn Töpelc4971762018-05-02 13:01:27 +0200473{
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100474 __poll_t mask = datagram_poll(file, sock, wait);
Magnus Karlssondf551052019-10-02 08:31:59 +0200475 struct sock *sk = sock->sk;
476 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200477 struct xdp_umem *umem;
478
479 if (unlikely(!xsk_is_bound(xs)))
480 return mask;
481
Björn Töpel42fddcc2019-09-04 13:49:12 +0200482 umem = xs->umem;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200483
Magnus Karlssondf551052019-10-02 08:31:59 +0200484 if (umem->need_wakeup) {
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000485 if (xs->zc)
486 xsk_wakeup(xs, umem->need_wakeup);
Magnus Karlssondf551052019-10-02 08:31:59 +0200487 else
488 /* Poll needs to drive Tx also in copy mode */
489 __xsk_sendmsg(sk);
490 }
Björn Töpelc4971762018-05-02 13:01:27 +0200491
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100492 if (xs->rx && !xskq_prod_is_empty(xs->rx))
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100493 mask |= EPOLLIN | EPOLLRDNORM;
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100494 if (xs->tx && !xskq_cons_is_full(xs->tx))
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100495 mask |= EPOLLOUT | EPOLLWRNORM;
Björn Töpelc4971762018-05-02 13:01:27 +0200496
497 return mask;
498}
499
Björn Töpelb9b6b682018-05-02 13:01:25 +0200500static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
501 bool umem_queue)
Magnus Karlsson423f3832018-05-02 13:01:24 +0200502{
503 struct xsk_queue *q;
504
505 if (entries == 0 || *queue || !is_power_of_2(entries))
506 return -EINVAL;
507
Björn Töpelb9b6b682018-05-02 13:01:25 +0200508 q = xskq_create(entries, umem_queue);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200509 if (!q)
510 return -ENOMEM;
511
Björn Töpel37b07692018-05-22 09:35:01 +0200512 /* Make sure queue is ready before it can be seen by others */
513 smp_wmb();
Björn Töpel94a99762019-09-04 13:49:10 +0200514 WRITE_ONCE(*queue, q);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200515 return 0;
516}
517
Ilya Maximets455302d2019-06-28 11:04:07 +0300518static void xsk_unbind_dev(struct xdp_sock *xs)
519{
520 struct net_device *dev = xs->dev;
521
Björn Töpel42fddcc2019-09-04 13:49:12 +0200522 if (xs->state != XSK_BOUND)
Ilya Maximets455302d2019-06-28 11:04:07 +0300523 return;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200524 WRITE_ONCE(xs->state, XSK_UNBOUND);
Ilya Maximets455302d2019-06-28 11:04:07 +0300525
526 /* Wait for driver to stop using the xdp socket. */
527 xdp_del_sk_umem(xs->umem, xs);
528 xs->dev = NULL;
529 synchronize_net();
530 dev_put(dev);
531}
532
Björn Töpel0402acd2019-08-15 11:30:13 +0200533static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
534 struct xdp_sock ***map_entry)
535{
536 struct xsk_map *map = NULL;
537 struct xsk_map_node *node;
538
539 *map_entry = NULL;
540
541 spin_lock_bh(&xs->map_list_lock);
542 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
543 node);
544 if (node) {
545 WARN_ON(xsk_map_inc(node->map));
546 map = node->map;
547 *map_entry = node->map_entry;
548 }
549 spin_unlock_bh(&xs->map_list_lock);
550 return map;
551}
552
553static void xsk_delete_from_maps(struct xdp_sock *xs)
554{
555 /* This function removes the current XDP socket from all the
556 * maps it resides in. We need to take extra care here, due to
557 * the two locks involved. Each map has a lock synchronizing
558 * updates to the entries, and each socket has a lock that
559 * synchronizes access to the list of maps (map_list). For
560 * deadlock avoidance the locks need to be taken in the order
561 * "map lock"->"socket map list lock". We start off by
562 * accessing the socket map list, and take a reference to the
563 * map to guarantee existence between the
564 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
565 * calls. Then we ask the map to remove the socket, which
566 * tries to remove the socket from the map. Note that there
567 * might be updates to the map between
568 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
569 */
570 struct xdp_sock **map_entry = NULL;
571 struct xsk_map *map;
572
573 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
574 xsk_map_try_sock_delete(map, xs, map_entry);
575 xsk_map_put(map);
576 }
577}
578
Björn Töpelc0c77d82018-05-02 13:01:23 +0200579static int xsk_release(struct socket *sock)
580{
581 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200582 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200583 struct net *net;
584
585 if (!sk)
586 return 0;
587
588 net = sock_net(sk);
589
Björn Töpel1d0dc062019-01-24 19:59:37 +0100590 mutex_lock(&net->xdp.lock);
591 sk_del_node_init_rcu(sk);
592 mutex_unlock(&net->xdp.lock);
593
Björn Töpelc0c77d82018-05-02 13:01:23 +0200594 local_bh_disable();
595 sock_prot_inuse_add(net, sk->sk_prot, -1);
596 local_bh_enable();
597
Björn Töpel0402acd2019-08-15 11:30:13 +0200598 xsk_delete_from_maps(xs);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200599 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300600 xsk_unbind_dev(xs);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200601 mutex_unlock(&xs->mutex);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200602
Björn Töpel541d7fd2018-10-05 13:25:15 +0200603 xskq_destroy(xs->rx);
604 xskq_destroy(xs->tx);
605
Björn Töpelc0c77d82018-05-02 13:01:23 +0200606 sock_orphan(sk);
607 sock->sk = NULL;
608
609 sk_refcnt_debug_release(sk);
610 sock_put(sk);
611
612 return 0;
613}
614
Magnus Karlsson965a9902018-05-02 13:01:26 +0200615static struct socket *xsk_lookup_xsk_from_fd(int fd)
616{
617 struct socket *sock;
618 int err;
619
620 sock = sockfd_lookup(fd, &err);
621 if (!sock)
622 return ERR_PTR(-ENOTSOCK);
623
624 if (sock->sk->sk_family != PF_XDP) {
625 sockfd_put(sock);
626 return ERR_PTR(-ENOPROTOOPT);
627 }
628
629 return sock;
630}
631
Kevin Laatzc05cd362019-08-27 02:25:22 +0000632/* Check if umem pages are contiguous.
633 * If zero-copy mode, use the DMA address to do the page contiguity check
634 * For all other modes we use addr (kernel virtual address)
635 * Store the result in the low bits of addr.
636 */
637static void xsk_check_page_contiguity(struct xdp_umem *umem, u32 flags)
638{
639 struct xdp_umem_page *pgs = umem->pages;
640 int i, is_contig;
641
642 for (i = 0; i < umem->npgs - 1; i++) {
643 is_contig = (flags & XDP_ZEROCOPY) ?
644 (pgs[i].dma + PAGE_SIZE == pgs[i + 1].dma) :
645 (pgs[i].addr + PAGE_SIZE == pgs[i + 1].addr);
646 pgs[i].addr += is_contig << XSK_NEXT_PG_CONTIG_SHIFT;
647 }
648}
649
Magnus Karlsson965a9902018-05-02 13:01:26 +0200650static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
651{
652 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
653 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200654 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpel959b71d2018-05-22 09:34:56 +0200655 struct net_device *dev;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200656 u32 flags, qid;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200657 int err = 0;
658
659 if (addr_len < sizeof(struct sockaddr_xdp))
660 return -EINVAL;
661 if (sxdp->sxdp_family != AF_XDP)
662 return -EINVAL;
663
Björn Töpelf54ba392019-03-08 08:57:26 +0100664 flags = sxdp->sxdp_flags;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200665 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
666 XDP_USE_NEED_WAKEUP))
Björn Töpelf54ba392019-03-08 08:57:26 +0100667 return -EINVAL;
668
Ilya Maximets5464c3a2019-07-08 14:03:44 +0300669 rtnl_lock();
Magnus Karlsson965a9902018-05-02 13:01:26 +0200670 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300671 if (xs->state != XSK_READY) {
Björn Töpel959b71d2018-05-22 09:34:56 +0200672 err = -EBUSY;
673 goto out_release;
674 }
675
Magnus Karlsson965a9902018-05-02 13:01:26 +0200676 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
677 if (!dev) {
678 err = -ENODEV;
679 goto out_release;
680 }
681
Magnus Karlssonf6145902018-05-02 13:01:32 +0200682 if (!xs->rx && !xs->tx) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200683 err = -EINVAL;
684 goto out_unlock;
685 }
686
Björn Töpel173d3ad2018-06-04 14:05:55 +0200687 qid = sxdp->sxdp_queue_id;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200688
689 if (flags & XDP_SHARED_UMEM) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200690 struct xdp_sock *umem_xs;
691 struct socket *sock;
692
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200693 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
694 (flags & XDP_USE_NEED_WAKEUP)) {
Björn Töpel173d3ad2018-06-04 14:05:55 +0200695 /* Cannot specify flags for shared sockets. */
696 err = -EINVAL;
697 goto out_unlock;
698 }
699
Magnus Karlsson965a9902018-05-02 13:01:26 +0200700 if (xs->umem) {
701 /* We have already our own. */
702 err = -EINVAL;
703 goto out_unlock;
704 }
705
706 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
707 if (IS_ERR(sock)) {
708 err = PTR_ERR(sock);
709 goto out_unlock;
710 }
711
712 umem_xs = xdp_sk(sock->sk);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200713 if (!xsk_is_bound(umem_xs)) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200714 err = -EBADF;
715 sockfd_put(sock);
716 goto out_unlock;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200717 }
718 if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200719 err = -EINVAL;
720 sockfd_put(sock);
721 goto out_unlock;
722 }
723
724 xdp_get_umem(umem_xs->umem);
Björn Töpel9764f4b2019-09-04 13:49:11 +0200725 WRITE_ONCE(xs->umem, umem_xs->umem);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200726 sockfd_put(sock);
727 } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
728 err = -EINVAL;
729 goto out_unlock;
Björn Töpelc4971762018-05-02 13:01:27 +0200730 } else {
731 /* This xsk has its own umem. */
Magnus Karlsson93ee30f2018-08-31 13:40:02 +0200732 xskq_set_umem(xs->umem->fq, xs->umem->size,
733 xs->umem->chunk_mask);
734 xskq_set_umem(xs->umem->cq, xs->umem->size,
735 xs->umem->chunk_mask);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200736
737 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
738 if (err)
739 goto out_unlock;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000740
741 xsk_check_page_contiguity(xs->umem, flags);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200742 }
743
Magnus Karlsson965a9902018-05-02 13:01:26 +0200744 xs->dev = dev;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200745 xs->zc = xs->umem->zc;
746 xs->queue_id = qid;
Magnus Karlsson93ee30f2018-08-31 13:40:02 +0200747 xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask);
748 xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200749 xdp_add_sk_umem(xs->umem, xs);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200750
751out_unlock:
Björn Töpel42fddcc2019-09-04 13:49:12 +0200752 if (err) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200753 dev_put(dev);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200754 } else {
755 /* Matches smp_rmb() in bind() for shared umem
756 * sockets, and xsk_is_bound().
757 */
758 smp_wmb();
759 WRITE_ONCE(xs->state, XSK_BOUND);
760 }
Magnus Karlsson965a9902018-05-02 13:01:26 +0200761out_release:
762 mutex_unlock(&xs->mutex);
Ilya Maximets5464c3a2019-07-08 14:03:44 +0300763 rtnl_unlock();
Magnus Karlsson965a9902018-05-02 13:01:26 +0200764 return err;
765}
766
Kevin Laatzc05cd362019-08-27 02:25:22 +0000767struct xdp_umem_reg_v1 {
768 __u64 addr; /* Start of packet data area */
769 __u64 len; /* Length of packet data area */
770 __u32 chunk_size;
771 __u32 headroom;
772};
773
Björn Töpelc0c77d82018-05-02 13:01:23 +0200774static int xsk_setsockopt(struct socket *sock, int level, int optname,
775 char __user *optval, unsigned int optlen)
776{
777 struct sock *sk = sock->sk;
778 struct xdp_sock *xs = xdp_sk(sk);
779 int err;
780
781 if (level != SOL_XDP)
782 return -ENOPROTOOPT;
783
784 switch (optname) {
Björn Töpelb9b6b682018-05-02 13:01:25 +0200785 case XDP_RX_RING:
Magnus Karlssonf6145902018-05-02 13:01:32 +0200786 case XDP_TX_RING:
Björn Töpelb9b6b682018-05-02 13:01:25 +0200787 {
788 struct xsk_queue **q;
789 int entries;
790
791 if (optlen < sizeof(entries))
792 return -EINVAL;
793 if (copy_from_user(&entries, optval, sizeof(entries)))
794 return -EFAULT;
795
796 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300797 if (xs->state != XSK_READY) {
798 mutex_unlock(&xs->mutex);
799 return -EBUSY;
800 }
Magnus Karlssonf6145902018-05-02 13:01:32 +0200801 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200802 err = xsk_init_queue(entries, q, false);
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200803 if (!err && optname == XDP_TX_RING)
804 /* Tx needs to be explicitly woken up the first time */
805 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200806 mutex_unlock(&xs->mutex);
807 return err;
808 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200809 case XDP_UMEM_REG:
810 {
Kevin Laatzc05cd362019-08-27 02:25:22 +0000811 size_t mr_size = sizeof(struct xdp_umem_reg);
812 struct xdp_umem_reg mr = {};
Björn Töpelc0c77d82018-05-02 13:01:23 +0200813 struct xdp_umem *umem;
814
Kevin Laatzc05cd362019-08-27 02:25:22 +0000815 if (optlen < sizeof(struct xdp_umem_reg_v1))
816 return -EINVAL;
817 else if (optlen < sizeof(mr))
818 mr_size = sizeof(struct xdp_umem_reg_v1);
819
820 if (copy_from_user(&mr, optval, mr_size))
Björn Töpelc0c77d82018-05-02 13:01:23 +0200821 return -EFAULT;
822
823 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300824 if (xs->state != XSK_READY || xs->umem) {
Björn Töpelc0c77d82018-05-02 13:01:23 +0200825 mutex_unlock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200826 return -EBUSY;
827 }
828
829 umem = xdp_umem_create(&mr);
830 if (IS_ERR(umem)) {
831 mutex_unlock(&xs->mutex);
832 return PTR_ERR(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200833 }
834
835 /* Make sure umem is ready before it can be seen by others */
836 smp_wmb();
Björn Töpel9764f4b2019-09-04 13:49:11 +0200837 WRITE_ONCE(xs->umem, umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200838 mutex_unlock(&xs->mutex);
839 return 0;
840 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200841 case XDP_UMEM_FILL_RING:
Magnus Karlssonfe230832018-05-02 13:01:31 +0200842 case XDP_UMEM_COMPLETION_RING:
Magnus Karlsson423f3832018-05-02 13:01:24 +0200843 {
844 struct xsk_queue **q;
845 int entries;
846
Magnus Karlsson423f3832018-05-02 13:01:24 +0200847 if (copy_from_user(&entries, optval, sizeof(entries)))
848 return -EFAULT;
849
850 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300851 if (xs->state != XSK_READY) {
852 mutex_unlock(&xs->mutex);
853 return -EBUSY;
854 }
Björn Töpela49049e2018-05-22 09:35:02 +0200855 if (!xs->umem) {
856 mutex_unlock(&xs->mutex);
857 return -EINVAL;
858 }
859
Magnus Karlssonfe230832018-05-02 13:01:31 +0200860 q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
861 &xs->umem->cq;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200862 err = xsk_init_queue(entries, q, true);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200863 mutex_unlock(&xs->mutex);
864 return err;
865 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200866 default:
867 break;
868 }
869
870 return -ENOPROTOOPT;
871}
872
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200873static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
874{
875 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
876 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
877 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
878}
879
880static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
881{
882 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
883 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
884 ring->desc = offsetof(struct xdp_umem_ring, desc);
885}
886
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200887static int xsk_getsockopt(struct socket *sock, int level, int optname,
888 char __user *optval, int __user *optlen)
889{
890 struct sock *sk = sock->sk;
891 struct xdp_sock *xs = xdp_sk(sk);
892 int len;
893
894 if (level != SOL_XDP)
895 return -ENOPROTOOPT;
896
897 if (get_user(len, optlen))
898 return -EFAULT;
899 if (len < 0)
900 return -EINVAL;
901
902 switch (optname) {
903 case XDP_STATISTICS:
904 {
905 struct xdp_statistics stats;
906
907 if (len < sizeof(stats))
908 return -EINVAL;
909
910 mutex_lock(&xs->mutex);
911 stats.rx_dropped = xs->rx_dropped;
912 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
913 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
914 mutex_unlock(&xs->mutex);
915
916 if (copy_to_user(optval, &stats, sizeof(stats)))
917 return -EFAULT;
918 if (put_user(sizeof(stats), optlen))
919 return -EFAULT;
920
921 return 0;
922 }
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200923 case XDP_MMAP_OFFSETS:
924 {
925 struct xdp_mmap_offsets off;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200926 struct xdp_mmap_offsets_v1 off_v1;
927 bool flags_supported = true;
928 void *to_copy;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200929
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200930 if (len < sizeof(off_v1))
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200931 return -EINVAL;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200932 else if (len < sizeof(off))
933 flags_supported = false;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200934
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200935 if (flags_supported) {
936 /* xdp_ring_offset is identical to xdp_ring_offset_v1
937 * except for the flags field added to the end.
938 */
939 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
940 &off.rx);
941 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
942 &off.tx);
943 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
944 &off.fr);
945 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
946 &off.cr);
947 off.rx.flags = offsetof(struct xdp_rxtx_ring,
948 ptrs.flags);
949 off.tx.flags = offsetof(struct xdp_rxtx_ring,
950 ptrs.flags);
951 off.fr.flags = offsetof(struct xdp_umem_ring,
952 ptrs.flags);
953 off.cr.flags = offsetof(struct xdp_umem_ring,
954 ptrs.flags);
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200955
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200956 len = sizeof(off);
957 to_copy = &off;
958 } else {
959 xsk_enter_rxtx_offsets(&off_v1.rx);
960 xsk_enter_rxtx_offsets(&off_v1.tx);
961 xsk_enter_umem_offsets(&off_v1.fr);
962 xsk_enter_umem_offsets(&off_v1.cr);
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200963
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200964 len = sizeof(off_v1);
965 to_copy = &off_v1;
966 }
967
968 if (copy_to_user(optval, to_copy, len))
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200969 return -EFAULT;
970 if (put_user(len, optlen))
971 return -EFAULT;
972
973 return 0;
974 }
Maxim Mikityanskiy2640d3c2019-06-26 17:35:25 +0300975 case XDP_OPTIONS:
976 {
977 struct xdp_options opts = {};
978
979 if (len < sizeof(opts))
980 return -EINVAL;
981
982 mutex_lock(&xs->mutex);
983 if (xs->zc)
984 opts.flags |= XDP_OPTIONS_ZEROCOPY;
985 mutex_unlock(&xs->mutex);
986
987 len = sizeof(opts);
988 if (copy_to_user(optval, &opts, len))
989 return -EFAULT;
990 if (put_user(len, optlen))
991 return -EFAULT;
992
993 return 0;
994 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200995 default:
996 break;
997 }
998
999 return -EOPNOTSUPP;
1000}
1001
Magnus Karlsson423f3832018-05-02 13:01:24 +02001002static int xsk_mmap(struct file *file, struct socket *sock,
1003 struct vm_area_struct *vma)
1004{
Geert Uytterhoevena5a16e42018-06-07 15:37:34 +02001005 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
Magnus Karlsson423f3832018-05-02 13:01:24 +02001006 unsigned long size = vma->vm_end - vma->vm_start;
1007 struct xdp_sock *xs = xdp_sk(sock->sk);
1008 struct xsk_queue *q = NULL;
Björn Töpel37b07692018-05-22 09:35:01 +02001009 struct xdp_umem *umem;
Magnus Karlsson423f3832018-05-02 13:01:24 +02001010 unsigned long pfn;
1011 struct page *qpg;
1012
Björn Töpel42fddcc2019-09-04 13:49:12 +02001013 if (READ_ONCE(xs->state) != XSK_READY)
Ilya Maximets455302d2019-06-28 11:04:07 +03001014 return -EBUSY;
1015
Björn Töpelb9b6b682018-05-02 13:01:25 +02001016 if (offset == XDP_PGOFF_RX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +02001017 q = READ_ONCE(xs->rx);
Magnus Karlssonf6145902018-05-02 13:01:32 +02001018 } else if (offset == XDP_PGOFF_TX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +02001019 q = READ_ONCE(xs->tx);
Björn Töpelb9b6b682018-05-02 13:01:25 +02001020 } else {
Björn Töpel37b07692018-05-22 09:35:01 +02001021 umem = READ_ONCE(xs->umem);
1022 if (!umem)
Björn Töpelb9b6b682018-05-02 13:01:25 +02001023 return -EINVAL;
Magnus Karlsson423f3832018-05-02 13:01:24 +02001024
Magnus Karlssone6762c82019-02-08 14:13:50 +01001025 /* Matches the smp_wmb() in XDP_UMEM_REG */
1026 smp_rmb();
Björn Töpelb9b6b682018-05-02 13:01:25 +02001027 if (offset == XDP_UMEM_PGOFF_FILL_RING)
Björn Töpel37b07692018-05-22 09:35:01 +02001028 q = READ_ONCE(umem->fq);
Magnus Karlssonfe230832018-05-02 13:01:31 +02001029 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
Björn Töpel37b07692018-05-22 09:35:01 +02001030 q = READ_ONCE(umem->cq);
Björn Töpelb9b6b682018-05-02 13:01:25 +02001031 }
Magnus Karlsson423f3832018-05-02 13:01:24 +02001032
1033 if (!q)
1034 return -EINVAL;
1035
Magnus Karlssone6762c82019-02-08 14:13:50 +01001036 /* Matches the smp_wmb() in xsk_init_queue */
1037 smp_rmb();
Magnus Karlsson423f3832018-05-02 13:01:24 +02001038 qpg = virt_to_head_page(q->ring);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07001039 if (size > page_size(qpg))
Magnus Karlsson423f3832018-05-02 13:01:24 +02001040 return -EINVAL;
1041
1042 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
1043 return remap_pfn_range(vma, vma->vm_start, pfn,
1044 size, vma->vm_page_prot);
1045}
1046
Ilya Maximets455302d2019-06-28 11:04:07 +03001047static int xsk_notifier(struct notifier_block *this,
1048 unsigned long msg, void *ptr)
1049{
1050 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1051 struct net *net = dev_net(dev);
1052 struct sock *sk;
1053
1054 switch (msg) {
1055 case NETDEV_UNREGISTER:
1056 mutex_lock(&net->xdp.lock);
1057 sk_for_each(sk, &net->xdp.list) {
1058 struct xdp_sock *xs = xdp_sk(sk);
1059
1060 mutex_lock(&xs->mutex);
1061 if (xs->dev == dev) {
1062 sk->sk_err = ENETDOWN;
1063 if (!sock_flag(sk, SOCK_DEAD))
1064 sk->sk_error_report(sk);
1065
1066 xsk_unbind_dev(xs);
1067
1068 /* Clear device references in umem. */
1069 xdp_umem_clear_dev(xs->umem);
1070 }
1071 mutex_unlock(&xs->mutex);
1072 }
1073 mutex_unlock(&net->xdp.lock);
1074 break;
1075 }
1076 return NOTIFY_DONE;
1077}
1078
Björn Töpelc0c77d82018-05-02 13:01:23 +02001079static struct proto xsk_proto = {
1080 .name = "XDP",
1081 .owner = THIS_MODULE,
1082 .obj_size = sizeof(struct xdp_sock),
1083};
1084
1085static const struct proto_ops xsk_proto_ops = {
Björn Töpelc2f43742018-05-18 14:00:24 +02001086 .family = PF_XDP,
1087 .owner = THIS_MODULE,
1088 .release = xsk_release,
1089 .bind = xsk_bind,
1090 .connect = sock_no_connect,
1091 .socketpair = sock_no_socketpair,
1092 .accept = sock_no_accept,
1093 .getname = sock_no_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001094 .poll = xsk_poll,
Björn Töpelc2f43742018-05-18 14:00:24 +02001095 .ioctl = sock_no_ioctl,
1096 .listen = sock_no_listen,
1097 .shutdown = sock_no_shutdown,
1098 .setsockopt = xsk_setsockopt,
1099 .getsockopt = xsk_getsockopt,
1100 .sendmsg = xsk_sendmsg,
1101 .recvmsg = sock_no_recvmsg,
1102 .mmap = xsk_mmap,
1103 .sendpage = sock_no_sendpage,
Björn Töpelc0c77d82018-05-02 13:01:23 +02001104};
1105
Björn Töpel11fe9262019-02-21 13:07:38 +01001106static void xsk_destruct(struct sock *sk)
1107{
1108 struct xdp_sock *xs = xdp_sk(sk);
1109
1110 if (!sock_flag(sk, SOCK_DEAD))
1111 return;
1112
1113 xdp_put_umem(xs->umem);
1114
1115 sk_refcnt_debug_dec(sk);
1116}
1117
Björn Töpelc0c77d82018-05-02 13:01:23 +02001118static int xsk_create(struct net *net, struct socket *sock, int protocol,
1119 int kern)
1120{
1121 struct sock *sk;
1122 struct xdp_sock *xs;
1123
1124 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1125 return -EPERM;
1126 if (sock->type != SOCK_RAW)
1127 return -ESOCKTNOSUPPORT;
1128
1129 if (protocol)
1130 return -EPROTONOSUPPORT;
1131
1132 sock->state = SS_UNCONNECTED;
1133
1134 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1135 if (!sk)
1136 return -ENOBUFS;
1137
1138 sock->ops = &xsk_proto_ops;
1139
1140 sock_init_data(sock, sk);
1141
1142 sk->sk_family = PF_XDP;
1143
Björn Töpel11fe9262019-02-21 13:07:38 +01001144 sk->sk_destruct = xsk_destruct;
1145 sk_refcnt_debug_inc(sk);
1146
Björn Töpelcee27162018-10-08 19:40:16 +02001147 sock_set_flag(sk, SOCK_RCU_FREE);
1148
Björn Töpelc0c77d82018-05-02 13:01:23 +02001149 xs = xdp_sk(sk);
Ilya Maximets455302d2019-06-28 11:04:07 +03001150 xs->state = XSK_READY;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001151 mutex_init(&xs->mutex);
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +03001152 spin_lock_init(&xs->rx_lock);
Magnus Karlssona9744f72018-06-29 09:48:20 +02001153 spin_lock_init(&xs->tx_completion_lock);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001154
Björn Töpel0402acd2019-08-15 11:30:13 +02001155 INIT_LIST_HEAD(&xs->map_list);
1156 spin_lock_init(&xs->map_list_lock);
1157
Björn Töpel1d0dc062019-01-24 19:59:37 +01001158 mutex_lock(&net->xdp.lock);
1159 sk_add_node_rcu(sk, &net->xdp.list);
1160 mutex_unlock(&net->xdp.lock);
1161
Björn Töpelc0c77d82018-05-02 13:01:23 +02001162 local_bh_disable();
1163 sock_prot_inuse_add(net, &xsk_proto, 1);
1164 local_bh_enable();
1165
1166 return 0;
1167}
1168
1169static const struct net_proto_family xsk_family_ops = {
1170 .family = PF_XDP,
1171 .create = xsk_create,
1172 .owner = THIS_MODULE,
1173};
1174
Ilya Maximets455302d2019-06-28 11:04:07 +03001175static struct notifier_block xsk_netdev_notifier = {
1176 .notifier_call = xsk_notifier,
1177};
1178
Björn Töpel1d0dc062019-01-24 19:59:37 +01001179static int __net_init xsk_net_init(struct net *net)
1180{
1181 mutex_init(&net->xdp.lock);
1182 INIT_HLIST_HEAD(&net->xdp.list);
1183 return 0;
1184}
1185
1186static void __net_exit xsk_net_exit(struct net *net)
1187{
1188 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1189}
1190
1191static struct pernet_operations xsk_net_ops = {
1192 .init = xsk_net_init,
1193 .exit = xsk_net_exit,
1194};
1195
Björn Töpelc0c77d82018-05-02 13:01:23 +02001196static int __init xsk_init(void)
1197{
Björn Töpele312b9e2019-12-19 07:10:02 +01001198 int err, cpu;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001199
1200 err = proto_register(&xsk_proto, 0 /* no slab */);
1201 if (err)
1202 goto out;
1203
1204 err = sock_register(&xsk_family_ops);
1205 if (err)
1206 goto out_proto;
1207
Björn Töpel1d0dc062019-01-24 19:59:37 +01001208 err = register_pernet_subsys(&xsk_net_ops);
1209 if (err)
1210 goto out_sk;
Ilya Maximets455302d2019-06-28 11:04:07 +03001211
1212 err = register_netdevice_notifier(&xsk_netdev_notifier);
1213 if (err)
1214 goto out_pernet;
1215
Björn Töpele312b9e2019-12-19 07:10:02 +01001216 for_each_possible_cpu(cpu)
1217 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
Björn Töpelc0c77d82018-05-02 13:01:23 +02001218 return 0;
1219
Ilya Maximets455302d2019-06-28 11:04:07 +03001220out_pernet:
1221 unregister_pernet_subsys(&xsk_net_ops);
Björn Töpel1d0dc062019-01-24 19:59:37 +01001222out_sk:
1223 sock_unregister(PF_XDP);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001224out_proto:
1225 proto_unregister(&xsk_proto);
1226out:
1227 return err;
1228}
1229
1230fs_initcall(xsk_init);