blob: c2f1af3b6a7c4ec2aed2beab304e0692fb462535 [file] [log] [blame]
Björn Töpelc0c77d82018-05-02 13:01:23 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
Björn Töpelc0c77d82018-05-02 13:01:23 +02008 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14#include <linux/if_xdp.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/sched/task.h>
19#include <linux/socket.h>
20#include <linux/file.h>
21#include <linux/uaccess.h>
22#include <linux/net.h>
23#include <linux/netdevice.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020024#include <linux/rculist.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020025#include <net/xdp_sock.h>
Björn Töpelb9b6b682018-05-02 13:01:25 +020026#include <net/xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020027
Magnus Karlsson423f3832018-05-02 13:01:24 +020028#include "xsk_queue.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020029#include "xdp_umem.h"
Björn Töpela36b38aa2019-01-24 19:59:39 +010030#include "xsk.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020031
Magnus Karlsson35fcde72018-05-02 13:01:34 +020032#define TX_BATCH_SIZE 16
33
Björn Töpelfbfc504a2018-05-02 13:01:28 +020034bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
35{
Björn Töpel173d3ad2018-06-04 14:05:55 +020036 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
37 READ_ONCE(xs->umem->fq);
Björn Töpelfbfc504a2018-05-02 13:01:28 +020038}
39
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +030040bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
41{
42 return xskq_has_addrs(umem->fq, cnt);
43}
44EXPORT_SYMBOL(xsk_umem_has_addrs);
45
Björn Töpel173d3ad2018-06-04 14:05:55 +020046u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
Björn Töpelc4971762018-05-02 13:01:27 +020047{
Kevin Laatzc05cd362019-08-27 02:25:22 +000048 return xskq_peek_addr(umem->fq, addr, umem);
Björn Töpel173d3ad2018-06-04 14:05:55 +020049}
50EXPORT_SYMBOL(xsk_umem_peek_addr);
51
52void xsk_umem_discard_addr(struct xdp_umem *umem)
53{
54 xskq_discard_addr(umem->fq);
55}
56EXPORT_SYMBOL(xsk_umem_discard_addr);
57
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020058void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
59{
60 if (umem->need_wakeup & XDP_WAKEUP_RX)
61 return;
62
63 umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
64 umem->need_wakeup |= XDP_WAKEUP_RX;
65}
66EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
67
68void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
69{
70 struct xdp_sock *xs;
71
72 if (umem->need_wakeup & XDP_WAKEUP_TX)
73 return;
74
75 rcu_read_lock();
76 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
77 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
78 }
79 rcu_read_unlock();
80
81 umem->need_wakeup |= XDP_WAKEUP_TX;
82}
83EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
84
85void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
86{
87 if (!(umem->need_wakeup & XDP_WAKEUP_RX))
88 return;
89
90 umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
91 umem->need_wakeup &= ~XDP_WAKEUP_RX;
92}
93EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
94
95void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
96{
97 struct xdp_sock *xs;
98
99 if (!(umem->need_wakeup & XDP_WAKEUP_TX))
100 return;
101
102 rcu_read_lock();
103 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
104 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
105 }
106 rcu_read_unlock();
107
108 umem->need_wakeup &= ~XDP_WAKEUP_TX;
109}
110EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
111
112bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
113{
114 return umem->flags & XDP_UMEM_USES_NEED_WAKEUP;
115}
116EXPORT_SYMBOL(xsk_umem_uses_need_wakeup);
117
Kevin Laatzc05cd362019-08-27 02:25:22 +0000118/* If a buffer crosses a page boundary, we need to do 2 memcpy's, one for
119 * each page. This is only required in copy mode.
120 */
121static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf,
122 u32 len, u32 metalen)
123{
124 void *to_buf = xdp_umem_get_data(umem, addr);
125
126 addr = xsk_umem_add_offset_to_addr(addr);
127 if (xskq_crosses_non_contig_pg(umem, addr, len + metalen)) {
128 void *next_pg_addr = umem->pages[(addr >> PAGE_SHIFT) + 1].addr;
129 u64 page_start = addr & ~(PAGE_SIZE - 1);
130 u64 first_len = PAGE_SIZE - (addr - page_start);
131
132 memcpy(to_buf, from_buf, first_len + metalen);
133 memcpy(next_pg_addr, from_buf + first_len, len - first_len);
134
135 return;
136 }
137
138 memcpy(to_buf, from_buf, len + metalen);
139}
140
Björn Töpel173d3ad2018-06-04 14:05:55 +0200141static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
142{
Kevin Laatzc05cd362019-08-27 02:25:22 +0000143 u64 offset = xs->umem->headroom;
144 u64 addr, memcpy_addr;
145 void *from_buf;
Björn Töpel18baed22018-08-30 15:12:48 +0200146 u32 metalen;
Björn Töpel4e64c832018-06-04 13:57:11 +0200147 int err;
Björn Töpelc4971762018-05-02 13:01:27 +0200148
Kevin Laatzc05cd362019-08-27 02:25:22 +0000149 if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) ||
Björn Töpel18baed22018-08-30 15:12:48 +0200150 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
Björn Töpela509a952018-06-04 13:57:12 +0200151 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +0200152 return -ENOSPC;
Björn Töpela509a952018-06-04 13:57:12 +0200153 }
Björn Töpelc4971762018-05-02 13:01:27 +0200154
Björn Töpel18baed22018-08-30 15:12:48 +0200155 if (unlikely(xdp_data_meta_unsupported(xdp))) {
156 from_buf = xdp->data;
157 metalen = 0;
158 } else {
159 from_buf = xdp->data_meta;
160 metalen = xdp->data - xdp->data_meta;
161 }
162
Kevin Laatzc05cd362019-08-27 02:25:22 +0000163 memcpy_addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
164 __xsk_rcv_memcpy(xs->umem, memcpy_addr, from_buf, len, metalen);
165
166 offset += metalen;
167 addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200168 err = xskq_produce_batch_desc(xs->rx, addr, len);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200169 if (!err) {
Björn Töpelbbff2f32018-06-04 13:57:13 +0200170 xskq_discard_addr(xs->umem->fq);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200171 xdp_return_buff(xdp);
172 return 0;
173 }
174
175 xs->rx_dropped++;
176 return err;
177}
178
179static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
180{
181 int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
182
Jakub Kicinski2d55d612018-07-27 20:20:08 -0700183 if (err)
Björn Töpela509a952018-06-04 13:57:12 +0200184 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +0200185
186 return err;
187}
188
Björn Töpel42fddcc2019-09-04 13:49:12 +0200189static bool xsk_is_bound(struct xdp_sock *xs)
190{
191 if (READ_ONCE(xs->state) == XSK_BOUND) {
192 /* Matches smp_wmb() in bind(). */
193 smp_rmb();
194 return true;
195 }
196 return false;
197}
198
Björn Töpelc4971762018-05-02 13:01:27 +0200199int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
200{
Björn Töpel173d3ad2018-06-04 14:05:55 +0200201 u32 len;
Björn Töpelc4971762018-05-02 13:01:27 +0200202
Björn Töpel42fddcc2019-09-04 13:49:12 +0200203 if (!xsk_is_bound(xs))
204 return -EINVAL;
205
Björn Töpel173d3ad2018-06-04 14:05:55 +0200206 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
207 return -EINVAL;
Björn Töpelc4971762018-05-02 13:01:27 +0200208
Björn Töpel173d3ad2018-06-04 14:05:55 +0200209 len = xdp->data_end - xdp->data;
210
211 return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
212 __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
Björn Töpelc4971762018-05-02 13:01:27 +0200213}
214
215void xsk_flush(struct xdp_sock *xs)
216{
217 xskq_produce_flush_desc(xs->rx);
218 xs->sk.sk_data_ready(&xs->sk);
219}
220
221int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
222{
Björn Töpel18baed22018-08-30 15:12:48 +0200223 u32 metalen = xdp->data - xdp->data_meta;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200224 u32 len = xdp->data_end - xdp->data;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000225 u64 offset = xs->umem->headroom;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200226 void *buffer;
227 u64 addr;
Björn Töpelc4971762018-05-02 13:01:27 +0200228 int err;
229
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300230 spin_lock_bh(&xs->rx_lock);
231
232 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) {
233 err = -EINVAL;
234 goto out_unlock;
235 }
Björn Töpel5d902372018-06-12 12:02:56 +0200236
Kevin Laatzc05cd362019-08-27 02:25:22 +0000237 if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) ||
Björn Töpel18baed22018-08-30 15:12:48 +0200238 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300239 err = -ENOSPC;
240 goto out_drop;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200241 }
Björn Töpelc4971762018-05-02 13:01:27 +0200242
Kevin Laatzc05cd362019-08-27 02:25:22 +0000243 addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200244 buffer = xdp_umem_get_data(xs->umem, addr);
Björn Töpel18baed22018-08-30 15:12:48 +0200245 memcpy(buffer, xdp->data_meta, len + metalen);
Kevin Laatzc05cd362019-08-27 02:25:22 +0000246
247 addr = xsk_umem_adjust_offset(xs->umem, addr, metalen);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200248 err = xskq_produce_batch_desc(xs->rx, addr, len);
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300249 if (err)
250 goto out_drop;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200251
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300252 xskq_discard_addr(xs->umem->fq);
253 xskq_produce_flush_desc(xs->rx);
254
255 spin_unlock_bh(&xs->rx_lock);
256
257 xs->sk.sk_data_ready(&xs->sk);
258 return 0;
259
260out_drop:
Björn Töpel173d3ad2018-06-04 14:05:55 +0200261 xs->rx_dropped++;
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300262out_unlock:
263 spin_unlock_bh(&xs->rx_lock);
Björn Töpelc4971762018-05-02 13:01:27 +0200264 return err;
265}
266
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200267void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
268{
269 xskq_produce_flush_addr_n(umem->cq, nb_entries);
270}
271EXPORT_SYMBOL(xsk_umem_complete_tx);
272
273void xsk_umem_consume_tx_done(struct xdp_umem *umem)
274{
275 struct xdp_sock *xs;
276
277 rcu_read_lock();
278 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
279 xs->sk.sk_write_space(&xs->sk);
280 }
281 rcu_read_unlock();
282}
283EXPORT_SYMBOL(xsk_umem_consume_tx_done);
284
Maxim Mikityanskiy4bce4e52019-06-26 17:35:28 +0300285bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200286{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200287 struct xdp_sock *xs;
288
289 rcu_read_lock();
290 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
Kevin Laatzc05cd362019-08-27 02:25:22 +0000291 if (!xskq_peek_desc(xs->tx, desc, umem))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200292 continue;
293
Maxim Mikityanskiy4bce4e52019-06-26 17:35:28 +0300294 if (xskq_produce_addr_lazy(umem->cq, desc->addr))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200295 goto out;
296
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200297 xskq_discard_desc(xs->tx);
298 rcu_read_unlock();
299 return true;
300 }
301
302out:
303 rcu_read_unlock();
304 return false;
305}
306EXPORT_SYMBOL(xsk_umem_consume_tx);
307
308static int xsk_zc_xmit(struct sock *sk)
309{
310 struct xdp_sock *xs = xdp_sk(sk);
311 struct net_device *dev = xs->dev;
312
Magnus Karlsson9116e5e2019-08-14 09:27:16 +0200313 return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
314 XDP_WAKEUP_TX);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200315}
316
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200317static void xsk_destruct_skb(struct sk_buff *skb)
318{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200319 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200320 struct xdp_sock *xs = xdp_sk(skb->sk);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200321 unsigned long flags;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200322
Magnus Karlssona9744f72018-06-29 09:48:20 +0200323 spin_lock_irqsave(&xs->tx_completion_lock, flags);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200324 WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
Magnus Karlssona9744f72018-06-29 09:48:20 +0200325 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200326
327 sock_wfree(skb);
328}
329
330static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
331 size_t total_len)
332{
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200333 u32 max_batch = TX_BATCH_SIZE;
334 struct xdp_sock *xs = xdp_sk(sk);
335 bool sent_frame = false;
336 struct xdp_desc desc;
337 struct sk_buff *skb;
338 int err = 0;
339
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200340 mutex_lock(&xs->mutex);
341
Ilya Maximets67571642019-07-04 17:25:03 +0300342 if (xs->queue_id >= xs->dev->real_num_tx_queues)
343 goto out;
344
Kevin Laatzc05cd362019-08-27 02:25:22 +0000345 while (xskq_peek_desc(xs->tx, &desc, xs->umem)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200346 char *buffer;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200347 u64 addr;
348 u32 len;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200349
350 if (max_batch-- == 0) {
351 err = -EAGAIN;
352 goto out;
353 }
354
Magnus Karlsson09210c42018-07-11 10:12:52 +0200355 len = desc.len;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200356 skb = sock_alloc_send_skb(sk, len, 1, &err);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200357 if (unlikely(!skb)) {
358 err = -EAGAIN;
359 goto out;
360 }
361
362 skb_put(skb, len);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200363 addr = desc.addr;
364 buffer = xdp_umem_get_data(xs->umem, addr);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200365 err = skb_store_bits(skb, 0, buffer, len);
Ilya Maximets67571642019-07-04 17:25:03 +0300366 if (unlikely(err) || xskq_reserve_addr(xs->umem->cq)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200367 kfree_skb(skb);
368 goto out;
369 }
370
371 skb->dev = xs->dev;
372 skb->priority = sk->sk_priority;
373 skb->mark = sk->sk_mark;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000374 skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200375 skb->destructor = xsk_destruct_skb;
376
377 err = dev_direct_xmit(skb, xs->queue_id);
Magnus Karlssonfe588682018-06-29 09:48:18 +0200378 xskq_discard_desc(xs->tx);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200379 /* Ignore NET_XMIT_CN as packet might have been sent */
380 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
Magnus Karlssonfe588682018-06-29 09:48:18 +0200381 /* SKB completed but not sent */
382 err = -EBUSY;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200383 goto out;
384 }
385
386 sent_frame = true;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200387 }
388
389out:
390 if (sent_frame)
391 sk->sk_write_space(sk);
392
393 mutex_unlock(&xs->mutex);
394 return err;
395}
396
397static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
398{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200399 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200400 struct sock *sk = sock->sk;
401 struct xdp_sock *xs = xdp_sk(sk);
402
Björn Töpel42fddcc2019-09-04 13:49:12 +0200403 if (unlikely(!xsk_is_bound(xs)))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200404 return -ENXIO;
405 if (unlikely(!(xs->dev->flags & IFF_UP)))
406 return -ENETDOWN;
Magnus Karlsson6efb4432018-07-11 10:12:51 +0200407 if (unlikely(!xs->tx))
408 return -ENOBUFS;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200409 if (need_wait)
410 return -EOPNOTSUPP;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200411
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200412 return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200413}
414
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700415static unsigned int xsk_poll(struct file *file, struct socket *sock,
416 struct poll_table_struct *wait)
Björn Töpelc4971762018-05-02 13:01:27 +0200417{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700418 unsigned int mask = datagram_poll(file, sock, wait);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200419 struct xdp_sock *xs = xdp_sk(sock->sk);
420 struct net_device *dev;
421 struct xdp_umem *umem;
422
423 if (unlikely(!xsk_is_bound(xs)))
424 return mask;
425
426 dev = xs->dev;
427 umem = xs->umem;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200428
429 if (umem->need_wakeup)
430 dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
431 umem->need_wakeup);
Björn Töpelc4971762018-05-02 13:01:27 +0200432
433 if (xs->rx && !xskq_empty_desc(xs->rx))
434 mask |= POLLIN | POLLRDNORM;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200435 if (xs->tx && !xskq_full_desc(xs->tx))
436 mask |= POLLOUT | POLLWRNORM;
Björn Töpelc4971762018-05-02 13:01:27 +0200437
438 return mask;
439}
440
Björn Töpelb9b6b682018-05-02 13:01:25 +0200441static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
442 bool umem_queue)
Magnus Karlsson423f3832018-05-02 13:01:24 +0200443{
444 struct xsk_queue *q;
445
446 if (entries == 0 || *queue || !is_power_of_2(entries))
447 return -EINVAL;
448
Björn Töpelb9b6b682018-05-02 13:01:25 +0200449 q = xskq_create(entries, umem_queue);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200450 if (!q)
451 return -ENOMEM;
452
Björn Töpel37b07692018-05-22 09:35:01 +0200453 /* Make sure queue is ready before it can be seen by others */
454 smp_wmb();
Björn Töpel94a99762019-09-04 13:49:10 +0200455 WRITE_ONCE(*queue, q);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200456 return 0;
457}
458
Ilya Maximets455302d2019-06-28 11:04:07 +0300459static void xsk_unbind_dev(struct xdp_sock *xs)
460{
461 struct net_device *dev = xs->dev;
462
Björn Töpel42fddcc2019-09-04 13:49:12 +0200463 if (xs->state != XSK_BOUND)
Ilya Maximets455302d2019-06-28 11:04:07 +0300464 return;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200465 WRITE_ONCE(xs->state, XSK_UNBOUND);
Ilya Maximets455302d2019-06-28 11:04:07 +0300466
467 /* Wait for driver to stop using the xdp socket. */
468 xdp_del_sk_umem(xs->umem, xs);
469 xs->dev = NULL;
470 synchronize_net();
471 dev_put(dev);
472}
473
Björn Töpel0402acd2019-08-15 11:30:13 +0200474static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
475 struct xdp_sock ***map_entry)
476{
477 struct xsk_map *map = NULL;
478 struct xsk_map_node *node;
479
480 *map_entry = NULL;
481
482 spin_lock_bh(&xs->map_list_lock);
483 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
484 node);
485 if (node) {
486 WARN_ON(xsk_map_inc(node->map));
487 map = node->map;
488 *map_entry = node->map_entry;
489 }
490 spin_unlock_bh(&xs->map_list_lock);
491 return map;
492}
493
494static void xsk_delete_from_maps(struct xdp_sock *xs)
495{
496 /* This function removes the current XDP socket from all the
497 * maps it resides in. We need to take extra care here, due to
498 * the two locks involved. Each map has a lock synchronizing
499 * updates to the entries, and each socket has a lock that
500 * synchronizes access to the list of maps (map_list). For
501 * deadlock avoidance the locks need to be taken in the order
502 * "map lock"->"socket map list lock". We start off by
503 * accessing the socket map list, and take a reference to the
504 * map to guarantee existence between the
505 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
506 * calls. Then we ask the map to remove the socket, which
507 * tries to remove the socket from the map. Note that there
508 * might be updates to the map between
509 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
510 */
511 struct xdp_sock **map_entry = NULL;
512 struct xsk_map *map;
513
514 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
515 xsk_map_try_sock_delete(map, xs, map_entry);
516 xsk_map_put(map);
517 }
518}
519
Björn Töpelc0c77d82018-05-02 13:01:23 +0200520static int xsk_release(struct socket *sock)
521{
522 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200523 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200524 struct net *net;
525
526 if (!sk)
527 return 0;
528
529 net = sock_net(sk);
530
Björn Töpel1d0dc062019-01-24 19:59:37 +0100531 mutex_lock(&net->xdp.lock);
532 sk_del_node_init_rcu(sk);
533 mutex_unlock(&net->xdp.lock);
534
Björn Töpelc0c77d82018-05-02 13:01:23 +0200535 local_bh_disable();
536 sock_prot_inuse_add(net, sk->sk_prot, -1);
537 local_bh_enable();
538
Björn Töpel0402acd2019-08-15 11:30:13 +0200539 xsk_delete_from_maps(xs);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200540 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300541 xsk_unbind_dev(xs);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200542 mutex_unlock(&xs->mutex);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200543
Björn Töpel541d7fd2018-10-05 13:25:15 +0200544 xskq_destroy(xs->rx);
545 xskq_destroy(xs->tx);
546
Björn Töpelc0c77d82018-05-02 13:01:23 +0200547 sock_orphan(sk);
548 sock->sk = NULL;
549
550 sk_refcnt_debug_release(sk);
551 sock_put(sk);
552
553 return 0;
554}
555
Magnus Karlsson965a9902018-05-02 13:01:26 +0200556static struct socket *xsk_lookup_xsk_from_fd(int fd)
557{
558 struct socket *sock;
559 int err;
560
561 sock = sockfd_lookup(fd, &err);
562 if (!sock)
563 return ERR_PTR(-ENOTSOCK);
564
565 if (sock->sk->sk_family != PF_XDP) {
566 sockfd_put(sock);
567 return ERR_PTR(-ENOPROTOOPT);
568 }
569
570 return sock;
571}
572
Kevin Laatzc05cd362019-08-27 02:25:22 +0000573/* Check if umem pages are contiguous.
574 * If zero-copy mode, use the DMA address to do the page contiguity check
575 * For all other modes we use addr (kernel virtual address)
576 * Store the result in the low bits of addr.
577 */
578static void xsk_check_page_contiguity(struct xdp_umem *umem, u32 flags)
579{
580 struct xdp_umem_page *pgs = umem->pages;
581 int i, is_contig;
582
583 for (i = 0; i < umem->npgs - 1; i++) {
584 is_contig = (flags & XDP_ZEROCOPY) ?
585 (pgs[i].dma + PAGE_SIZE == pgs[i + 1].dma) :
586 (pgs[i].addr + PAGE_SIZE == pgs[i + 1].addr);
587 pgs[i].addr += is_contig << XSK_NEXT_PG_CONTIG_SHIFT;
588 }
589}
590
Magnus Karlsson965a9902018-05-02 13:01:26 +0200591static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
592{
593 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
594 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200595 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpel959b71d2018-05-22 09:34:56 +0200596 struct net_device *dev;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200597 u32 flags, qid;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200598 int err = 0;
599
600 if (addr_len < sizeof(struct sockaddr_xdp))
601 return -EINVAL;
602 if (sxdp->sxdp_family != AF_XDP)
603 return -EINVAL;
604
Björn Töpelf54ba392019-03-08 08:57:26 +0100605 flags = sxdp->sxdp_flags;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200606 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
607 XDP_USE_NEED_WAKEUP))
Björn Töpelf54ba392019-03-08 08:57:26 +0100608 return -EINVAL;
609
Ilya Maximets5464c3a2019-07-08 14:03:44 +0300610 rtnl_lock();
Magnus Karlsson965a9902018-05-02 13:01:26 +0200611 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300612 if (xs->state != XSK_READY) {
Björn Töpel959b71d2018-05-22 09:34:56 +0200613 err = -EBUSY;
614 goto out_release;
615 }
616
Magnus Karlsson965a9902018-05-02 13:01:26 +0200617 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
618 if (!dev) {
619 err = -ENODEV;
620 goto out_release;
621 }
622
Magnus Karlssonf6145902018-05-02 13:01:32 +0200623 if (!xs->rx && !xs->tx) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200624 err = -EINVAL;
625 goto out_unlock;
626 }
627
Björn Töpel173d3ad2018-06-04 14:05:55 +0200628 qid = sxdp->sxdp_queue_id;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200629
630 if (flags & XDP_SHARED_UMEM) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200631 struct xdp_sock *umem_xs;
632 struct socket *sock;
633
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200634 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
635 (flags & XDP_USE_NEED_WAKEUP)) {
Björn Töpel173d3ad2018-06-04 14:05:55 +0200636 /* Cannot specify flags for shared sockets. */
637 err = -EINVAL;
638 goto out_unlock;
639 }
640
Magnus Karlsson965a9902018-05-02 13:01:26 +0200641 if (xs->umem) {
642 /* We have already our own. */
643 err = -EINVAL;
644 goto out_unlock;
645 }
646
647 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
648 if (IS_ERR(sock)) {
649 err = PTR_ERR(sock);
650 goto out_unlock;
651 }
652
653 umem_xs = xdp_sk(sock->sk);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200654 if (!xsk_is_bound(umem_xs)) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200655 err = -EBADF;
656 sockfd_put(sock);
657 goto out_unlock;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200658 }
659 if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200660 err = -EINVAL;
661 sockfd_put(sock);
662 goto out_unlock;
663 }
664
665 xdp_get_umem(umem_xs->umem);
Björn Töpel9764f4b2019-09-04 13:49:11 +0200666 WRITE_ONCE(xs->umem, umem_xs->umem);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200667 sockfd_put(sock);
668 } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
669 err = -EINVAL;
670 goto out_unlock;
Björn Töpelc4971762018-05-02 13:01:27 +0200671 } else {
672 /* This xsk has its own umem. */
Magnus Karlsson93ee30f2018-08-31 13:40:02 +0200673 xskq_set_umem(xs->umem->fq, xs->umem->size,
674 xs->umem->chunk_mask);
675 xskq_set_umem(xs->umem->cq, xs->umem->size,
676 xs->umem->chunk_mask);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200677
678 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
679 if (err)
680 goto out_unlock;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000681
682 xsk_check_page_contiguity(xs->umem, flags);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200683 }
684
Magnus Karlsson965a9902018-05-02 13:01:26 +0200685 xs->dev = dev;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200686 xs->zc = xs->umem->zc;
687 xs->queue_id = qid;
Magnus Karlsson93ee30f2018-08-31 13:40:02 +0200688 xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask);
689 xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200690 xdp_add_sk_umem(xs->umem, xs);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200691
692out_unlock:
Björn Töpel42fddcc2019-09-04 13:49:12 +0200693 if (err) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200694 dev_put(dev);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200695 } else {
696 /* Matches smp_rmb() in bind() for shared umem
697 * sockets, and xsk_is_bound().
698 */
699 smp_wmb();
700 WRITE_ONCE(xs->state, XSK_BOUND);
701 }
Magnus Karlsson965a9902018-05-02 13:01:26 +0200702out_release:
703 mutex_unlock(&xs->mutex);
Ilya Maximets5464c3a2019-07-08 14:03:44 +0300704 rtnl_unlock();
Magnus Karlsson965a9902018-05-02 13:01:26 +0200705 return err;
706}
707
Kevin Laatzc05cd362019-08-27 02:25:22 +0000708struct xdp_umem_reg_v1 {
709 __u64 addr; /* Start of packet data area */
710 __u64 len; /* Length of packet data area */
711 __u32 chunk_size;
712 __u32 headroom;
713};
714
Björn Töpelc0c77d82018-05-02 13:01:23 +0200715static int xsk_setsockopt(struct socket *sock, int level, int optname,
716 char __user *optval, unsigned int optlen)
717{
718 struct sock *sk = sock->sk;
719 struct xdp_sock *xs = xdp_sk(sk);
720 int err;
721
722 if (level != SOL_XDP)
723 return -ENOPROTOOPT;
724
725 switch (optname) {
Björn Töpelb9b6b682018-05-02 13:01:25 +0200726 case XDP_RX_RING:
Magnus Karlssonf6145902018-05-02 13:01:32 +0200727 case XDP_TX_RING:
Björn Töpelb9b6b682018-05-02 13:01:25 +0200728 {
729 struct xsk_queue **q;
730 int entries;
731
732 if (optlen < sizeof(entries))
733 return -EINVAL;
734 if (copy_from_user(&entries, optval, sizeof(entries)))
735 return -EFAULT;
736
737 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300738 if (xs->state != XSK_READY) {
739 mutex_unlock(&xs->mutex);
740 return -EBUSY;
741 }
Magnus Karlssonf6145902018-05-02 13:01:32 +0200742 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200743 err = xsk_init_queue(entries, q, false);
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200744 if (!err && optname == XDP_TX_RING)
745 /* Tx needs to be explicitly woken up the first time */
746 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200747 mutex_unlock(&xs->mutex);
748 return err;
749 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200750 case XDP_UMEM_REG:
751 {
Kevin Laatzc05cd362019-08-27 02:25:22 +0000752 size_t mr_size = sizeof(struct xdp_umem_reg);
753 struct xdp_umem_reg mr = {};
Björn Töpelc0c77d82018-05-02 13:01:23 +0200754 struct xdp_umem *umem;
755
Kevin Laatzc05cd362019-08-27 02:25:22 +0000756 if (optlen < sizeof(struct xdp_umem_reg_v1))
757 return -EINVAL;
758 else if (optlen < sizeof(mr))
759 mr_size = sizeof(struct xdp_umem_reg_v1);
760
761 if (copy_from_user(&mr, optval, mr_size))
Björn Töpelc0c77d82018-05-02 13:01:23 +0200762 return -EFAULT;
763
764 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300765 if (xs->state != XSK_READY || xs->umem) {
Björn Töpelc0c77d82018-05-02 13:01:23 +0200766 mutex_unlock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200767 return -EBUSY;
768 }
769
770 umem = xdp_umem_create(&mr);
771 if (IS_ERR(umem)) {
772 mutex_unlock(&xs->mutex);
773 return PTR_ERR(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200774 }
775
776 /* Make sure umem is ready before it can be seen by others */
777 smp_wmb();
Björn Töpel9764f4b2019-09-04 13:49:11 +0200778 WRITE_ONCE(xs->umem, umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200779 mutex_unlock(&xs->mutex);
780 return 0;
781 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200782 case XDP_UMEM_FILL_RING:
Magnus Karlssonfe230832018-05-02 13:01:31 +0200783 case XDP_UMEM_COMPLETION_RING:
Magnus Karlsson423f3832018-05-02 13:01:24 +0200784 {
785 struct xsk_queue **q;
786 int entries;
787
Magnus Karlsson423f3832018-05-02 13:01:24 +0200788 if (copy_from_user(&entries, optval, sizeof(entries)))
789 return -EFAULT;
790
791 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300792 if (xs->state != XSK_READY) {
793 mutex_unlock(&xs->mutex);
794 return -EBUSY;
795 }
Björn Töpela49049e2018-05-22 09:35:02 +0200796 if (!xs->umem) {
797 mutex_unlock(&xs->mutex);
798 return -EINVAL;
799 }
800
Magnus Karlssonfe230832018-05-02 13:01:31 +0200801 q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
802 &xs->umem->cq;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200803 err = xsk_init_queue(entries, q, true);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200804 mutex_unlock(&xs->mutex);
805 return err;
806 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200807 default:
808 break;
809 }
810
811 return -ENOPROTOOPT;
812}
813
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200814static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
815{
816 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
817 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
818 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
819}
820
821static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
822{
823 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
824 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
825 ring->desc = offsetof(struct xdp_umem_ring, desc);
826}
827
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200828static int xsk_getsockopt(struct socket *sock, int level, int optname,
829 char __user *optval, int __user *optlen)
830{
831 struct sock *sk = sock->sk;
832 struct xdp_sock *xs = xdp_sk(sk);
833 int len;
834
835 if (level != SOL_XDP)
836 return -ENOPROTOOPT;
837
838 if (get_user(len, optlen))
839 return -EFAULT;
840 if (len < 0)
841 return -EINVAL;
842
843 switch (optname) {
844 case XDP_STATISTICS:
845 {
846 struct xdp_statistics stats;
847
848 if (len < sizeof(stats))
849 return -EINVAL;
850
851 mutex_lock(&xs->mutex);
852 stats.rx_dropped = xs->rx_dropped;
853 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
854 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
855 mutex_unlock(&xs->mutex);
856
857 if (copy_to_user(optval, &stats, sizeof(stats)))
858 return -EFAULT;
859 if (put_user(sizeof(stats), optlen))
860 return -EFAULT;
861
862 return 0;
863 }
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200864 case XDP_MMAP_OFFSETS:
865 {
866 struct xdp_mmap_offsets off;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200867 struct xdp_mmap_offsets_v1 off_v1;
868 bool flags_supported = true;
869 void *to_copy;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200870
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200871 if (len < sizeof(off_v1))
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200872 return -EINVAL;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200873 else if (len < sizeof(off))
874 flags_supported = false;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200875
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200876 if (flags_supported) {
877 /* xdp_ring_offset is identical to xdp_ring_offset_v1
878 * except for the flags field added to the end.
879 */
880 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
881 &off.rx);
882 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
883 &off.tx);
884 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
885 &off.fr);
886 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
887 &off.cr);
888 off.rx.flags = offsetof(struct xdp_rxtx_ring,
889 ptrs.flags);
890 off.tx.flags = offsetof(struct xdp_rxtx_ring,
891 ptrs.flags);
892 off.fr.flags = offsetof(struct xdp_umem_ring,
893 ptrs.flags);
894 off.cr.flags = offsetof(struct xdp_umem_ring,
895 ptrs.flags);
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200896
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200897 len = sizeof(off);
898 to_copy = &off;
899 } else {
900 xsk_enter_rxtx_offsets(&off_v1.rx);
901 xsk_enter_rxtx_offsets(&off_v1.tx);
902 xsk_enter_umem_offsets(&off_v1.fr);
903 xsk_enter_umem_offsets(&off_v1.cr);
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200904
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200905 len = sizeof(off_v1);
906 to_copy = &off_v1;
907 }
908
909 if (copy_to_user(optval, to_copy, len))
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200910 return -EFAULT;
911 if (put_user(len, optlen))
912 return -EFAULT;
913
914 return 0;
915 }
Maxim Mikityanskiy2640d3c2019-06-26 17:35:25 +0300916 case XDP_OPTIONS:
917 {
918 struct xdp_options opts = {};
919
920 if (len < sizeof(opts))
921 return -EINVAL;
922
923 mutex_lock(&xs->mutex);
924 if (xs->zc)
925 opts.flags |= XDP_OPTIONS_ZEROCOPY;
926 mutex_unlock(&xs->mutex);
927
928 len = sizeof(opts);
929 if (copy_to_user(optval, &opts, len))
930 return -EFAULT;
931 if (put_user(len, optlen))
932 return -EFAULT;
933
934 return 0;
935 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200936 default:
937 break;
938 }
939
940 return -EOPNOTSUPP;
941}
942
Magnus Karlsson423f3832018-05-02 13:01:24 +0200943static int xsk_mmap(struct file *file, struct socket *sock,
944 struct vm_area_struct *vma)
945{
Geert Uytterhoevena5a16e42018-06-07 15:37:34 +0200946 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200947 unsigned long size = vma->vm_end - vma->vm_start;
948 struct xdp_sock *xs = xdp_sk(sock->sk);
949 struct xsk_queue *q = NULL;
Björn Töpel37b07692018-05-22 09:35:01 +0200950 struct xdp_umem *umem;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200951 unsigned long pfn;
952 struct page *qpg;
953
Björn Töpel42fddcc2019-09-04 13:49:12 +0200954 if (READ_ONCE(xs->state) != XSK_READY)
Ilya Maximets455302d2019-06-28 11:04:07 +0300955 return -EBUSY;
956
Björn Töpelb9b6b682018-05-02 13:01:25 +0200957 if (offset == XDP_PGOFF_RX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +0200958 q = READ_ONCE(xs->rx);
Magnus Karlssonf6145902018-05-02 13:01:32 +0200959 } else if (offset == XDP_PGOFF_TX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +0200960 q = READ_ONCE(xs->tx);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200961 } else {
Björn Töpel37b07692018-05-22 09:35:01 +0200962 umem = READ_ONCE(xs->umem);
963 if (!umem)
Björn Töpelb9b6b682018-05-02 13:01:25 +0200964 return -EINVAL;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200965
Magnus Karlssone6762c82019-02-08 14:13:50 +0100966 /* Matches the smp_wmb() in XDP_UMEM_REG */
967 smp_rmb();
Björn Töpelb9b6b682018-05-02 13:01:25 +0200968 if (offset == XDP_UMEM_PGOFF_FILL_RING)
Björn Töpel37b07692018-05-22 09:35:01 +0200969 q = READ_ONCE(umem->fq);
Magnus Karlssonfe230832018-05-02 13:01:31 +0200970 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
Björn Töpel37b07692018-05-22 09:35:01 +0200971 q = READ_ONCE(umem->cq);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200972 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200973
974 if (!q)
975 return -EINVAL;
976
Magnus Karlssone6762c82019-02-08 14:13:50 +0100977 /* Matches the smp_wmb() in xsk_init_queue */
978 smp_rmb();
Magnus Karlsson423f3832018-05-02 13:01:24 +0200979 qpg = virt_to_head_page(q->ring);
980 if (size > (PAGE_SIZE << compound_order(qpg)))
981 return -EINVAL;
982
983 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
984 return remap_pfn_range(vma, vma->vm_start, pfn,
985 size, vma->vm_page_prot);
986}
987
Ilya Maximets455302d2019-06-28 11:04:07 +0300988static int xsk_notifier(struct notifier_block *this,
989 unsigned long msg, void *ptr)
990{
991 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
992 struct net *net = dev_net(dev);
993 struct sock *sk;
994
995 switch (msg) {
996 case NETDEV_UNREGISTER:
997 mutex_lock(&net->xdp.lock);
998 sk_for_each(sk, &net->xdp.list) {
999 struct xdp_sock *xs = xdp_sk(sk);
1000
1001 mutex_lock(&xs->mutex);
1002 if (xs->dev == dev) {
1003 sk->sk_err = ENETDOWN;
1004 if (!sock_flag(sk, SOCK_DEAD))
1005 sk->sk_error_report(sk);
1006
1007 xsk_unbind_dev(xs);
1008
1009 /* Clear device references in umem. */
1010 xdp_umem_clear_dev(xs->umem);
1011 }
1012 mutex_unlock(&xs->mutex);
1013 }
1014 mutex_unlock(&net->xdp.lock);
1015 break;
1016 }
1017 return NOTIFY_DONE;
1018}
1019
Björn Töpelc0c77d82018-05-02 13:01:23 +02001020static struct proto xsk_proto = {
1021 .name = "XDP",
1022 .owner = THIS_MODULE,
1023 .obj_size = sizeof(struct xdp_sock),
1024};
1025
1026static const struct proto_ops xsk_proto_ops = {
Björn Töpelc2f43742018-05-18 14:00:24 +02001027 .family = PF_XDP,
1028 .owner = THIS_MODULE,
1029 .release = xsk_release,
1030 .bind = xsk_bind,
1031 .connect = sock_no_connect,
1032 .socketpair = sock_no_socketpair,
1033 .accept = sock_no_accept,
1034 .getname = sock_no_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001035 .poll = xsk_poll,
Björn Töpelc2f43742018-05-18 14:00:24 +02001036 .ioctl = sock_no_ioctl,
1037 .listen = sock_no_listen,
1038 .shutdown = sock_no_shutdown,
1039 .setsockopt = xsk_setsockopt,
1040 .getsockopt = xsk_getsockopt,
1041 .sendmsg = xsk_sendmsg,
1042 .recvmsg = sock_no_recvmsg,
1043 .mmap = xsk_mmap,
1044 .sendpage = sock_no_sendpage,
Björn Töpelc0c77d82018-05-02 13:01:23 +02001045};
1046
Björn Töpel11fe9262019-02-21 13:07:38 +01001047static void xsk_destruct(struct sock *sk)
1048{
1049 struct xdp_sock *xs = xdp_sk(sk);
1050
1051 if (!sock_flag(sk, SOCK_DEAD))
1052 return;
1053
1054 xdp_put_umem(xs->umem);
1055
1056 sk_refcnt_debug_dec(sk);
1057}
1058
Björn Töpelc0c77d82018-05-02 13:01:23 +02001059static int xsk_create(struct net *net, struct socket *sock, int protocol,
1060 int kern)
1061{
1062 struct sock *sk;
1063 struct xdp_sock *xs;
1064
1065 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1066 return -EPERM;
1067 if (sock->type != SOCK_RAW)
1068 return -ESOCKTNOSUPPORT;
1069
1070 if (protocol)
1071 return -EPROTONOSUPPORT;
1072
1073 sock->state = SS_UNCONNECTED;
1074
1075 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1076 if (!sk)
1077 return -ENOBUFS;
1078
1079 sock->ops = &xsk_proto_ops;
1080
1081 sock_init_data(sock, sk);
1082
1083 sk->sk_family = PF_XDP;
1084
Björn Töpel11fe9262019-02-21 13:07:38 +01001085 sk->sk_destruct = xsk_destruct;
1086 sk_refcnt_debug_inc(sk);
1087
Björn Töpelcee27162018-10-08 19:40:16 +02001088 sock_set_flag(sk, SOCK_RCU_FREE);
1089
Björn Töpelc0c77d82018-05-02 13:01:23 +02001090 xs = xdp_sk(sk);
Ilya Maximets455302d2019-06-28 11:04:07 +03001091 xs->state = XSK_READY;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001092 mutex_init(&xs->mutex);
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +03001093 spin_lock_init(&xs->rx_lock);
Magnus Karlssona9744f72018-06-29 09:48:20 +02001094 spin_lock_init(&xs->tx_completion_lock);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001095
Björn Töpel0402acd2019-08-15 11:30:13 +02001096 INIT_LIST_HEAD(&xs->map_list);
1097 spin_lock_init(&xs->map_list_lock);
1098
Björn Töpel1d0dc062019-01-24 19:59:37 +01001099 mutex_lock(&net->xdp.lock);
1100 sk_add_node_rcu(sk, &net->xdp.list);
1101 mutex_unlock(&net->xdp.lock);
1102
Björn Töpelc0c77d82018-05-02 13:01:23 +02001103 local_bh_disable();
1104 sock_prot_inuse_add(net, &xsk_proto, 1);
1105 local_bh_enable();
1106
1107 return 0;
1108}
1109
1110static const struct net_proto_family xsk_family_ops = {
1111 .family = PF_XDP,
1112 .create = xsk_create,
1113 .owner = THIS_MODULE,
1114};
1115
Ilya Maximets455302d2019-06-28 11:04:07 +03001116static struct notifier_block xsk_netdev_notifier = {
1117 .notifier_call = xsk_notifier,
1118};
1119
Björn Töpel1d0dc062019-01-24 19:59:37 +01001120static int __net_init xsk_net_init(struct net *net)
1121{
1122 mutex_init(&net->xdp.lock);
1123 INIT_HLIST_HEAD(&net->xdp.list);
1124 return 0;
1125}
1126
1127static void __net_exit xsk_net_exit(struct net *net)
1128{
1129 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1130}
1131
1132static struct pernet_operations xsk_net_ops = {
1133 .init = xsk_net_init,
1134 .exit = xsk_net_exit,
1135};
1136
Björn Töpelc0c77d82018-05-02 13:01:23 +02001137static int __init xsk_init(void)
1138{
1139 int err;
1140
1141 err = proto_register(&xsk_proto, 0 /* no slab */);
1142 if (err)
1143 goto out;
1144
1145 err = sock_register(&xsk_family_ops);
1146 if (err)
1147 goto out_proto;
1148
Björn Töpel1d0dc062019-01-24 19:59:37 +01001149 err = register_pernet_subsys(&xsk_net_ops);
1150 if (err)
1151 goto out_sk;
Ilya Maximets455302d2019-06-28 11:04:07 +03001152
1153 err = register_netdevice_notifier(&xsk_netdev_notifier);
1154 if (err)
1155 goto out_pernet;
1156
Björn Töpelc0c77d82018-05-02 13:01:23 +02001157 return 0;
1158
Ilya Maximets455302d2019-06-28 11:04:07 +03001159out_pernet:
1160 unregister_pernet_subsys(&xsk_net_ops);
Björn Töpel1d0dc062019-01-24 19:59:37 +01001161out_sk:
1162 sock_unregister(PF_XDP);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001163out_proto:
1164 proto_unregister(&xsk_proto);
1165out:
1166 return err;
1167}
1168
1169fs_initcall(xsk_init);