blob: 271d8d3fb11e2f9d938afca5ec959bd96ca4a011 [file] [log] [blame]
Björn Töpelc0c77d82018-05-02 13:01:23 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
Björn Töpelc0c77d82018-05-02 13:01:23 +02008 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14#include <linux/if_xdp.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/sched/task.h>
19#include <linux/socket.h>
20#include <linux/file.h>
21#include <linux/uaccess.h>
22#include <linux/net.h>
23#include <linux/netdevice.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020024#include <linux/rculist.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020025#include <net/xdp_sock.h>
Björn Töpelb9b6b682018-05-02 13:01:25 +020026#include <net/xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020027
Magnus Karlsson423f3832018-05-02 13:01:24 +020028#include "xsk_queue.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020029#include "xdp_umem.h"
Björn Töpela36b38aa2019-01-24 19:59:39 +010030#include "xsk.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020031
Magnus Karlsson35fcde72018-05-02 13:01:34 +020032#define TX_BATCH_SIZE 16
33
Björn Töpelfbfc504a2018-05-02 13:01:28 +020034bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
35{
Björn Töpel173d3ad2018-06-04 14:05:55 +020036 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
37 READ_ONCE(xs->umem->fq);
Björn Töpelfbfc504a2018-05-02 13:01:28 +020038}
39
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +030040bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
41{
42 return xskq_has_addrs(umem->fq, cnt);
43}
44EXPORT_SYMBOL(xsk_umem_has_addrs);
45
Björn Töpel173d3ad2018-06-04 14:05:55 +020046u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
Björn Töpelc4971762018-05-02 13:01:27 +020047{
Kevin Laatzc05cd362019-08-27 02:25:22 +000048 return xskq_peek_addr(umem->fq, addr, umem);
Björn Töpel173d3ad2018-06-04 14:05:55 +020049}
50EXPORT_SYMBOL(xsk_umem_peek_addr);
51
52void xsk_umem_discard_addr(struct xdp_umem *umem)
53{
54 xskq_discard_addr(umem->fq);
55}
56EXPORT_SYMBOL(xsk_umem_discard_addr);
57
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020058void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
59{
60 if (umem->need_wakeup & XDP_WAKEUP_RX)
61 return;
62
63 umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
64 umem->need_wakeup |= XDP_WAKEUP_RX;
65}
66EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
67
68void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
69{
70 struct xdp_sock *xs;
71
72 if (umem->need_wakeup & XDP_WAKEUP_TX)
73 return;
74
75 rcu_read_lock();
76 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
77 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
78 }
79 rcu_read_unlock();
80
81 umem->need_wakeup |= XDP_WAKEUP_TX;
82}
83EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
84
85void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
86{
87 if (!(umem->need_wakeup & XDP_WAKEUP_RX))
88 return;
89
90 umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
91 umem->need_wakeup &= ~XDP_WAKEUP_RX;
92}
93EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
94
95void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
96{
97 struct xdp_sock *xs;
98
99 if (!(umem->need_wakeup & XDP_WAKEUP_TX))
100 return;
101
102 rcu_read_lock();
103 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
104 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
105 }
106 rcu_read_unlock();
107
108 umem->need_wakeup &= ~XDP_WAKEUP_TX;
109}
110EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
111
112bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
113{
114 return umem->flags & XDP_UMEM_USES_NEED_WAKEUP;
115}
116EXPORT_SYMBOL(xsk_umem_uses_need_wakeup);
117
Kevin Laatzc05cd362019-08-27 02:25:22 +0000118/* If a buffer crosses a page boundary, we need to do 2 memcpy's, one for
119 * each page. This is only required in copy mode.
120 */
121static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf,
122 u32 len, u32 metalen)
123{
124 void *to_buf = xdp_umem_get_data(umem, addr);
125
126 addr = xsk_umem_add_offset_to_addr(addr);
127 if (xskq_crosses_non_contig_pg(umem, addr, len + metalen)) {
128 void *next_pg_addr = umem->pages[(addr >> PAGE_SHIFT) + 1].addr;
129 u64 page_start = addr & ~(PAGE_SIZE - 1);
130 u64 first_len = PAGE_SIZE - (addr - page_start);
131
132 memcpy(to_buf, from_buf, first_len + metalen);
133 memcpy(next_pg_addr, from_buf + first_len, len - first_len);
134
135 return;
136 }
137
138 memcpy(to_buf, from_buf, len + metalen);
139}
140
Björn Töpel173d3ad2018-06-04 14:05:55 +0200141static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
142{
Kevin Laatzc05cd362019-08-27 02:25:22 +0000143 u64 offset = xs->umem->headroom;
144 u64 addr, memcpy_addr;
145 void *from_buf;
Björn Töpel18baed22018-08-30 15:12:48 +0200146 u32 metalen;
Björn Töpel4e64c832018-06-04 13:57:11 +0200147 int err;
Björn Töpelc4971762018-05-02 13:01:27 +0200148
Kevin Laatzc05cd362019-08-27 02:25:22 +0000149 if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) ||
Björn Töpel18baed22018-08-30 15:12:48 +0200150 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
Björn Töpela509a952018-06-04 13:57:12 +0200151 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +0200152 return -ENOSPC;
Björn Töpela509a952018-06-04 13:57:12 +0200153 }
Björn Töpelc4971762018-05-02 13:01:27 +0200154
Björn Töpel18baed22018-08-30 15:12:48 +0200155 if (unlikely(xdp_data_meta_unsupported(xdp))) {
156 from_buf = xdp->data;
157 metalen = 0;
158 } else {
159 from_buf = xdp->data_meta;
160 metalen = xdp->data - xdp->data_meta;
161 }
162
Kevin Laatzc05cd362019-08-27 02:25:22 +0000163 memcpy_addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
164 __xsk_rcv_memcpy(xs->umem, memcpy_addr, from_buf, len, metalen);
165
166 offset += metalen;
167 addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200168 err = xskq_produce_batch_desc(xs->rx, addr, len);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200169 if (!err) {
Björn Töpelbbff2f32018-06-04 13:57:13 +0200170 xskq_discard_addr(xs->umem->fq);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200171 xdp_return_buff(xdp);
172 return 0;
173 }
174
175 xs->rx_dropped++;
176 return err;
177}
178
179static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
180{
181 int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
182
Jakub Kicinski2d55d612018-07-27 20:20:08 -0700183 if (err)
Björn Töpela509a952018-06-04 13:57:12 +0200184 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +0200185
186 return err;
187}
188
189int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
190{
Björn Töpel173d3ad2018-06-04 14:05:55 +0200191 u32 len;
Björn Töpelc4971762018-05-02 13:01:27 +0200192
Björn Töpel173d3ad2018-06-04 14:05:55 +0200193 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
194 return -EINVAL;
Björn Töpelc4971762018-05-02 13:01:27 +0200195
Björn Töpel173d3ad2018-06-04 14:05:55 +0200196 len = xdp->data_end - xdp->data;
197
198 return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
199 __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
Björn Töpelc4971762018-05-02 13:01:27 +0200200}
201
202void xsk_flush(struct xdp_sock *xs)
203{
204 xskq_produce_flush_desc(xs->rx);
205 xs->sk.sk_data_ready(&xs->sk);
206}
207
208int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
209{
Björn Töpel18baed22018-08-30 15:12:48 +0200210 u32 metalen = xdp->data - xdp->data_meta;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200211 u32 len = xdp->data_end - xdp->data;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000212 u64 offset = xs->umem->headroom;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200213 void *buffer;
214 u64 addr;
Björn Töpelc4971762018-05-02 13:01:27 +0200215 int err;
216
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300217 spin_lock_bh(&xs->rx_lock);
218
219 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) {
220 err = -EINVAL;
221 goto out_unlock;
222 }
Björn Töpel5d902372018-06-12 12:02:56 +0200223
Kevin Laatzc05cd362019-08-27 02:25:22 +0000224 if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) ||
Björn Töpel18baed22018-08-30 15:12:48 +0200225 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300226 err = -ENOSPC;
227 goto out_drop;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200228 }
Björn Töpelc4971762018-05-02 13:01:27 +0200229
Kevin Laatzc05cd362019-08-27 02:25:22 +0000230 addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200231 buffer = xdp_umem_get_data(xs->umem, addr);
Björn Töpel18baed22018-08-30 15:12:48 +0200232 memcpy(buffer, xdp->data_meta, len + metalen);
Kevin Laatzc05cd362019-08-27 02:25:22 +0000233
234 addr = xsk_umem_adjust_offset(xs->umem, addr, metalen);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200235 err = xskq_produce_batch_desc(xs->rx, addr, len);
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300236 if (err)
237 goto out_drop;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200238
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300239 xskq_discard_addr(xs->umem->fq);
240 xskq_produce_flush_desc(xs->rx);
241
242 spin_unlock_bh(&xs->rx_lock);
243
244 xs->sk.sk_data_ready(&xs->sk);
245 return 0;
246
247out_drop:
Björn Töpel173d3ad2018-06-04 14:05:55 +0200248 xs->rx_dropped++;
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300249out_unlock:
250 spin_unlock_bh(&xs->rx_lock);
Björn Töpelc4971762018-05-02 13:01:27 +0200251 return err;
252}
253
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200254void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
255{
256 xskq_produce_flush_addr_n(umem->cq, nb_entries);
257}
258EXPORT_SYMBOL(xsk_umem_complete_tx);
259
260void xsk_umem_consume_tx_done(struct xdp_umem *umem)
261{
262 struct xdp_sock *xs;
263
264 rcu_read_lock();
265 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
266 xs->sk.sk_write_space(&xs->sk);
267 }
268 rcu_read_unlock();
269}
270EXPORT_SYMBOL(xsk_umem_consume_tx_done);
271
Maxim Mikityanskiy4bce4e52019-06-26 17:35:28 +0300272bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200273{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200274 struct xdp_sock *xs;
275
276 rcu_read_lock();
277 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
Kevin Laatzc05cd362019-08-27 02:25:22 +0000278 if (!xskq_peek_desc(xs->tx, desc, umem))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200279 continue;
280
Maxim Mikityanskiy4bce4e52019-06-26 17:35:28 +0300281 if (xskq_produce_addr_lazy(umem->cq, desc->addr))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200282 goto out;
283
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200284 xskq_discard_desc(xs->tx);
285 rcu_read_unlock();
286 return true;
287 }
288
289out:
290 rcu_read_unlock();
291 return false;
292}
293EXPORT_SYMBOL(xsk_umem_consume_tx);
294
295static int xsk_zc_xmit(struct sock *sk)
296{
297 struct xdp_sock *xs = xdp_sk(sk);
298 struct net_device *dev = xs->dev;
299
Magnus Karlsson9116e5e2019-08-14 09:27:16 +0200300 return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
301 XDP_WAKEUP_TX);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200302}
303
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200304static void xsk_destruct_skb(struct sk_buff *skb)
305{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200306 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200307 struct xdp_sock *xs = xdp_sk(skb->sk);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200308 unsigned long flags;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200309
Magnus Karlssona9744f72018-06-29 09:48:20 +0200310 spin_lock_irqsave(&xs->tx_completion_lock, flags);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200311 WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
Magnus Karlssona9744f72018-06-29 09:48:20 +0200312 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200313
314 sock_wfree(skb);
315}
316
317static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
318 size_t total_len)
319{
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200320 u32 max_batch = TX_BATCH_SIZE;
321 struct xdp_sock *xs = xdp_sk(sk);
322 bool sent_frame = false;
323 struct xdp_desc desc;
324 struct sk_buff *skb;
325 int err = 0;
326
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200327 mutex_lock(&xs->mutex);
328
Ilya Maximets67571642019-07-04 17:25:03 +0300329 if (xs->queue_id >= xs->dev->real_num_tx_queues)
330 goto out;
331
Kevin Laatzc05cd362019-08-27 02:25:22 +0000332 while (xskq_peek_desc(xs->tx, &desc, xs->umem)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200333 char *buffer;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200334 u64 addr;
335 u32 len;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200336
337 if (max_batch-- == 0) {
338 err = -EAGAIN;
339 goto out;
340 }
341
Magnus Karlsson09210c42018-07-11 10:12:52 +0200342 len = desc.len;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200343 skb = sock_alloc_send_skb(sk, len, 1, &err);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200344 if (unlikely(!skb)) {
345 err = -EAGAIN;
346 goto out;
347 }
348
349 skb_put(skb, len);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200350 addr = desc.addr;
351 buffer = xdp_umem_get_data(xs->umem, addr);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200352 err = skb_store_bits(skb, 0, buffer, len);
Ilya Maximets67571642019-07-04 17:25:03 +0300353 if (unlikely(err) || xskq_reserve_addr(xs->umem->cq)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200354 kfree_skb(skb);
355 goto out;
356 }
357
358 skb->dev = xs->dev;
359 skb->priority = sk->sk_priority;
360 skb->mark = sk->sk_mark;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000361 skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200362 skb->destructor = xsk_destruct_skb;
363
364 err = dev_direct_xmit(skb, xs->queue_id);
Magnus Karlssonfe588682018-06-29 09:48:18 +0200365 xskq_discard_desc(xs->tx);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200366 /* Ignore NET_XMIT_CN as packet might have been sent */
367 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
Magnus Karlssonfe588682018-06-29 09:48:18 +0200368 /* SKB completed but not sent */
369 err = -EBUSY;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200370 goto out;
371 }
372
373 sent_frame = true;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200374 }
375
376out:
377 if (sent_frame)
378 sk->sk_write_space(sk);
379
380 mutex_unlock(&xs->mutex);
381 return err;
382}
383
384static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
385{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200386 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200387 struct sock *sk = sock->sk;
388 struct xdp_sock *xs = xdp_sk(sk);
389
390 if (unlikely(!xs->dev))
391 return -ENXIO;
392 if (unlikely(!(xs->dev->flags & IFF_UP)))
393 return -ENETDOWN;
Magnus Karlsson6efb4432018-07-11 10:12:51 +0200394 if (unlikely(!xs->tx))
395 return -ENOBUFS;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200396 if (need_wait)
397 return -EOPNOTSUPP;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200398
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200399 return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200400}
401
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700402static unsigned int xsk_poll(struct file *file, struct socket *sock,
403 struct poll_table_struct *wait)
Björn Töpelc4971762018-05-02 13:01:27 +0200404{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700405 unsigned int mask = datagram_poll(file, sock, wait);
Björn Töpelc4971762018-05-02 13:01:27 +0200406 struct sock *sk = sock->sk;
407 struct xdp_sock *xs = xdp_sk(sk);
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200408 struct net_device *dev = xs->dev;
409 struct xdp_umem *umem = xs->umem;
410
411 if (umem->need_wakeup)
412 dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
413 umem->need_wakeup);
Björn Töpelc4971762018-05-02 13:01:27 +0200414
415 if (xs->rx && !xskq_empty_desc(xs->rx))
416 mask |= POLLIN | POLLRDNORM;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200417 if (xs->tx && !xskq_full_desc(xs->tx))
418 mask |= POLLOUT | POLLWRNORM;
Björn Töpelc4971762018-05-02 13:01:27 +0200419
420 return mask;
421}
422
Björn Töpelb9b6b682018-05-02 13:01:25 +0200423static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
424 bool umem_queue)
Magnus Karlsson423f3832018-05-02 13:01:24 +0200425{
426 struct xsk_queue *q;
427
428 if (entries == 0 || *queue || !is_power_of_2(entries))
429 return -EINVAL;
430
Björn Töpelb9b6b682018-05-02 13:01:25 +0200431 q = xskq_create(entries, umem_queue);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200432 if (!q)
433 return -ENOMEM;
434
Björn Töpel37b07692018-05-22 09:35:01 +0200435 /* Make sure queue is ready before it can be seen by others */
436 smp_wmb();
Björn Töpel94a99762019-09-04 13:49:10 +0200437 WRITE_ONCE(*queue, q);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200438 return 0;
439}
440
Ilya Maximets455302d2019-06-28 11:04:07 +0300441static void xsk_unbind_dev(struct xdp_sock *xs)
442{
443 struct net_device *dev = xs->dev;
444
445 if (!dev || xs->state != XSK_BOUND)
446 return;
447
448 xs->state = XSK_UNBOUND;
449
450 /* Wait for driver to stop using the xdp socket. */
451 xdp_del_sk_umem(xs->umem, xs);
452 xs->dev = NULL;
453 synchronize_net();
454 dev_put(dev);
455}
456
Björn Töpel0402acd2019-08-15 11:30:13 +0200457static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
458 struct xdp_sock ***map_entry)
459{
460 struct xsk_map *map = NULL;
461 struct xsk_map_node *node;
462
463 *map_entry = NULL;
464
465 spin_lock_bh(&xs->map_list_lock);
466 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
467 node);
468 if (node) {
469 WARN_ON(xsk_map_inc(node->map));
470 map = node->map;
471 *map_entry = node->map_entry;
472 }
473 spin_unlock_bh(&xs->map_list_lock);
474 return map;
475}
476
477static void xsk_delete_from_maps(struct xdp_sock *xs)
478{
479 /* This function removes the current XDP socket from all the
480 * maps it resides in. We need to take extra care here, due to
481 * the two locks involved. Each map has a lock synchronizing
482 * updates to the entries, and each socket has a lock that
483 * synchronizes access to the list of maps (map_list). For
484 * deadlock avoidance the locks need to be taken in the order
485 * "map lock"->"socket map list lock". We start off by
486 * accessing the socket map list, and take a reference to the
487 * map to guarantee existence between the
488 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
489 * calls. Then we ask the map to remove the socket, which
490 * tries to remove the socket from the map. Note that there
491 * might be updates to the map between
492 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
493 */
494 struct xdp_sock **map_entry = NULL;
495 struct xsk_map *map;
496
497 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
498 xsk_map_try_sock_delete(map, xs, map_entry);
499 xsk_map_put(map);
500 }
501}
502
Björn Töpelc0c77d82018-05-02 13:01:23 +0200503static int xsk_release(struct socket *sock)
504{
505 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200506 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200507 struct net *net;
508
509 if (!sk)
510 return 0;
511
512 net = sock_net(sk);
513
Björn Töpel1d0dc062019-01-24 19:59:37 +0100514 mutex_lock(&net->xdp.lock);
515 sk_del_node_init_rcu(sk);
516 mutex_unlock(&net->xdp.lock);
517
Björn Töpelc0c77d82018-05-02 13:01:23 +0200518 local_bh_disable();
519 sock_prot_inuse_add(net, sk->sk_prot, -1);
520 local_bh_enable();
521
Björn Töpel0402acd2019-08-15 11:30:13 +0200522 xsk_delete_from_maps(xs);
Ilya Maximets455302d2019-06-28 11:04:07 +0300523 xsk_unbind_dev(xs);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200524
Björn Töpel541d7fd2018-10-05 13:25:15 +0200525 xskq_destroy(xs->rx);
526 xskq_destroy(xs->tx);
527
Björn Töpelc0c77d82018-05-02 13:01:23 +0200528 sock_orphan(sk);
529 sock->sk = NULL;
530
531 sk_refcnt_debug_release(sk);
532 sock_put(sk);
533
534 return 0;
535}
536
Magnus Karlsson965a9902018-05-02 13:01:26 +0200537static struct socket *xsk_lookup_xsk_from_fd(int fd)
538{
539 struct socket *sock;
540 int err;
541
542 sock = sockfd_lookup(fd, &err);
543 if (!sock)
544 return ERR_PTR(-ENOTSOCK);
545
546 if (sock->sk->sk_family != PF_XDP) {
547 sockfd_put(sock);
548 return ERR_PTR(-ENOPROTOOPT);
549 }
550
551 return sock;
552}
553
Kevin Laatzc05cd362019-08-27 02:25:22 +0000554/* Check if umem pages are contiguous.
555 * If zero-copy mode, use the DMA address to do the page contiguity check
556 * For all other modes we use addr (kernel virtual address)
557 * Store the result in the low bits of addr.
558 */
559static void xsk_check_page_contiguity(struct xdp_umem *umem, u32 flags)
560{
561 struct xdp_umem_page *pgs = umem->pages;
562 int i, is_contig;
563
564 for (i = 0; i < umem->npgs - 1; i++) {
565 is_contig = (flags & XDP_ZEROCOPY) ?
566 (pgs[i].dma + PAGE_SIZE == pgs[i + 1].dma) :
567 (pgs[i].addr + PAGE_SIZE == pgs[i + 1].addr);
568 pgs[i].addr += is_contig << XSK_NEXT_PG_CONTIG_SHIFT;
569 }
570}
571
Magnus Karlsson965a9902018-05-02 13:01:26 +0200572static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
573{
574 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
575 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200576 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpel959b71d2018-05-22 09:34:56 +0200577 struct net_device *dev;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200578 u32 flags, qid;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200579 int err = 0;
580
581 if (addr_len < sizeof(struct sockaddr_xdp))
582 return -EINVAL;
583 if (sxdp->sxdp_family != AF_XDP)
584 return -EINVAL;
585
Björn Töpelf54ba392019-03-08 08:57:26 +0100586 flags = sxdp->sxdp_flags;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200587 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
588 XDP_USE_NEED_WAKEUP))
Björn Töpelf54ba392019-03-08 08:57:26 +0100589 return -EINVAL;
590
Ilya Maximets5464c3a2019-07-08 14:03:44 +0300591 rtnl_lock();
Magnus Karlsson965a9902018-05-02 13:01:26 +0200592 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300593 if (xs->state != XSK_READY) {
Björn Töpel959b71d2018-05-22 09:34:56 +0200594 err = -EBUSY;
595 goto out_release;
596 }
597
Magnus Karlsson965a9902018-05-02 13:01:26 +0200598 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
599 if (!dev) {
600 err = -ENODEV;
601 goto out_release;
602 }
603
Magnus Karlssonf6145902018-05-02 13:01:32 +0200604 if (!xs->rx && !xs->tx) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200605 err = -EINVAL;
606 goto out_unlock;
607 }
608
Björn Töpel173d3ad2018-06-04 14:05:55 +0200609 qid = sxdp->sxdp_queue_id;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200610
611 if (flags & XDP_SHARED_UMEM) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200612 struct xdp_sock *umem_xs;
613 struct socket *sock;
614
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200615 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
616 (flags & XDP_USE_NEED_WAKEUP)) {
Björn Töpel173d3ad2018-06-04 14:05:55 +0200617 /* Cannot specify flags for shared sockets. */
618 err = -EINVAL;
619 goto out_unlock;
620 }
621
Magnus Karlsson965a9902018-05-02 13:01:26 +0200622 if (xs->umem) {
623 /* We have already our own. */
624 err = -EINVAL;
625 goto out_unlock;
626 }
627
628 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
629 if (IS_ERR(sock)) {
630 err = PTR_ERR(sock);
631 goto out_unlock;
632 }
633
634 umem_xs = xdp_sk(sock->sk);
635 if (!umem_xs->umem) {
636 /* No umem to inherit. */
637 err = -EBADF;
638 sockfd_put(sock);
639 goto out_unlock;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200640 } else if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200641 err = -EINVAL;
642 sockfd_put(sock);
643 goto out_unlock;
644 }
645
646 xdp_get_umem(umem_xs->umem);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200647 xs->umem = umem_xs->umem;
648 sockfd_put(sock);
649 } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
650 err = -EINVAL;
651 goto out_unlock;
Björn Töpelc4971762018-05-02 13:01:27 +0200652 } else {
653 /* This xsk has its own umem. */
Magnus Karlsson93ee30f2018-08-31 13:40:02 +0200654 xskq_set_umem(xs->umem->fq, xs->umem->size,
655 xs->umem->chunk_mask);
656 xskq_set_umem(xs->umem->cq, xs->umem->size,
657 xs->umem->chunk_mask);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200658
659 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
660 if (err)
661 goto out_unlock;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000662
663 xsk_check_page_contiguity(xs->umem, flags);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200664 }
665
Magnus Karlsson965a9902018-05-02 13:01:26 +0200666 xs->dev = dev;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200667 xs->zc = xs->umem->zc;
668 xs->queue_id = qid;
Magnus Karlsson93ee30f2018-08-31 13:40:02 +0200669 xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask);
670 xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200671 xdp_add_sk_umem(xs->umem, xs);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200672
673out_unlock:
674 if (err)
675 dev_put(dev);
Ilya Maximets455302d2019-06-28 11:04:07 +0300676 else
677 xs->state = XSK_BOUND;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200678out_release:
679 mutex_unlock(&xs->mutex);
Ilya Maximets5464c3a2019-07-08 14:03:44 +0300680 rtnl_unlock();
Magnus Karlsson965a9902018-05-02 13:01:26 +0200681 return err;
682}
683
Kevin Laatzc05cd362019-08-27 02:25:22 +0000684struct xdp_umem_reg_v1 {
685 __u64 addr; /* Start of packet data area */
686 __u64 len; /* Length of packet data area */
687 __u32 chunk_size;
688 __u32 headroom;
689};
690
Björn Töpelc0c77d82018-05-02 13:01:23 +0200691static int xsk_setsockopt(struct socket *sock, int level, int optname,
692 char __user *optval, unsigned int optlen)
693{
694 struct sock *sk = sock->sk;
695 struct xdp_sock *xs = xdp_sk(sk);
696 int err;
697
698 if (level != SOL_XDP)
699 return -ENOPROTOOPT;
700
701 switch (optname) {
Björn Töpelb9b6b682018-05-02 13:01:25 +0200702 case XDP_RX_RING:
Magnus Karlssonf6145902018-05-02 13:01:32 +0200703 case XDP_TX_RING:
Björn Töpelb9b6b682018-05-02 13:01:25 +0200704 {
705 struct xsk_queue **q;
706 int entries;
707
708 if (optlen < sizeof(entries))
709 return -EINVAL;
710 if (copy_from_user(&entries, optval, sizeof(entries)))
711 return -EFAULT;
712
713 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300714 if (xs->state != XSK_READY) {
715 mutex_unlock(&xs->mutex);
716 return -EBUSY;
717 }
Magnus Karlssonf6145902018-05-02 13:01:32 +0200718 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200719 err = xsk_init_queue(entries, q, false);
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200720 if (!err && optname == XDP_TX_RING)
721 /* Tx needs to be explicitly woken up the first time */
722 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200723 mutex_unlock(&xs->mutex);
724 return err;
725 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200726 case XDP_UMEM_REG:
727 {
Kevin Laatzc05cd362019-08-27 02:25:22 +0000728 size_t mr_size = sizeof(struct xdp_umem_reg);
729 struct xdp_umem_reg mr = {};
Björn Töpelc0c77d82018-05-02 13:01:23 +0200730 struct xdp_umem *umem;
731
Kevin Laatzc05cd362019-08-27 02:25:22 +0000732 if (optlen < sizeof(struct xdp_umem_reg_v1))
733 return -EINVAL;
734 else if (optlen < sizeof(mr))
735 mr_size = sizeof(struct xdp_umem_reg_v1);
736
737 if (copy_from_user(&mr, optval, mr_size))
Björn Töpelc0c77d82018-05-02 13:01:23 +0200738 return -EFAULT;
739
740 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300741 if (xs->state != XSK_READY || xs->umem) {
Björn Töpelc0c77d82018-05-02 13:01:23 +0200742 mutex_unlock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200743 return -EBUSY;
744 }
745
746 umem = xdp_umem_create(&mr);
747 if (IS_ERR(umem)) {
748 mutex_unlock(&xs->mutex);
749 return PTR_ERR(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200750 }
751
752 /* Make sure umem is ready before it can be seen by others */
753 smp_wmb();
Björn Töpelc0c77d82018-05-02 13:01:23 +0200754 xs->umem = umem;
755 mutex_unlock(&xs->mutex);
756 return 0;
757 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200758 case XDP_UMEM_FILL_RING:
Magnus Karlssonfe230832018-05-02 13:01:31 +0200759 case XDP_UMEM_COMPLETION_RING:
Magnus Karlsson423f3832018-05-02 13:01:24 +0200760 {
761 struct xsk_queue **q;
762 int entries;
763
Magnus Karlsson423f3832018-05-02 13:01:24 +0200764 if (copy_from_user(&entries, optval, sizeof(entries)))
765 return -EFAULT;
766
767 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300768 if (xs->state != XSK_READY) {
769 mutex_unlock(&xs->mutex);
770 return -EBUSY;
771 }
Björn Töpela49049e2018-05-22 09:35:02 +0200772 if (!xs->umem) {
773 mutex_unlock(&xs->mutex);
774 return -EINVAL;
775 }
776
Magnus Karlssonfe230832018-05-02 13:01:31 +0200777 q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
778 &xs->umem->cq;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200779 err = xsk_init_queue(entries, q, true);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200780 mutex_unlock(&xs->mutex);
781 return err;
782 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200783 default:
784 break;
785 }
786
787 return -ENOPROTOOPT;
788}
789
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200790static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
791{
792 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
793 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
794 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
795}
796
797static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
798{
799 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
800 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
801 ring->desc = offsetof(struct xdp_umem_ring, desc);
802}
803
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200804static int xsk_getsockopt(struct socket *sock, int level, int optname,
805 char __user *optval, int __user *optlen)
806{
807 struct sock *sk = sock->sk;
808 struct xdp_sock *xs = xdp_sk(sk);
809 int len;
810
811 if (level != SOL_XDP)
812 return -ENOPROTOOPT;
813
814 if (get_user(len, optlen))
815 return -EFAULT;
816 if (len < 0)
817 return -EINVAL;
818
819 switch (optname) {
820 case XDP_STATISTICS:
821 {
822 struct xdp_statistics stats;
823
824 if (len < sizeof(stats))
825 return -EINVAL;
826
827 mutex_lock(&xs->mutex);
828 stats.rx_dropped = xs->rx_dropped;
829 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
830 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
831 mutex_unlock(&xs->mutex);
832
833 if (copy_to_user(optval, &stats, sizeof(stats)))
834 return -EFAULT;
835 if (put_user(sizeof(stats), optlen))
836 return -EFAULT;
837
838 return 0;
839 }
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200840 case XDP_MMAP_OFFSETS:
841 {
842 struct xdp_mmap_offsets off;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200843 struct xdp_mmap_offsets_v1 off_v1;
844 bool flags_supported = true;
845 void *to_copy;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200846
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200847 if (len < sizeof(off_v1))
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200848 return -EINVAL;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200849 else if (len < sizeof(off))
850 flags_supported = false;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200851
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200852 if (flags_supported) {
853 /* xdp_ring_offset is identical to xdp_ring_offset_v1
854 * except for the flags field added to the end.
855 */
856 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
857 &off.rx);
858 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
859 &off.tx);
860 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
861 &off.fr);
862 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
863 &off.cr);
864 off.rx.flags = offsetof(struct xdp_rxtx_ring,
865 ptrs.flags);
866 off.tx.flags = offsetof(struct xdp_rxtx_ring,
867 ptrs.flags);
868 off.fr.flags = offsetof(struct xdp_umem_ring,
869 ptrs.flags);
870 off.cr.flags = offsetof(struct xdp_umem_ring,
871 ptrs.flags);
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200872
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200873 len = sizeof(off);
874 to_copy = &off;
875 } else {
876 xsk_enter_rxtx_offsets(&off_v1.rx);
877 xsk_enter_rxtx_offsets(&off_v1.tx);
878 xsk_enter_umem_offsets(&off_v1.fr);
879 xsk_enter_umem_offsets(&off_v1.cr);
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200880
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200881 len = sizeof(off_v1);
882 to_copy = &off_v1;
883 }
884
885 if (copy_to_user(optval, to_copy, len))
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200886 return -EFAULT;
887 if (put_user(len, optlen))
888 return -EFAULT;
889
890 return 0;
891 }
Maxim Mikityanskiy2640d3c2019-06-26 17:35:25 +0300892 case XDP_OPTIONS:
893 {
894 struct xdp_options opts = {};
895
896 if (len < sizeof(opts))
897 return -EINVAL;
898
899 mutex_lock(&xs->mutex);
900 if (xs->zc)
901 opts.flags |= XDP_OPTIONS_ZEROCOPY;
902 mutex_unlock(&xs->mutex);
903
904 len = sizeof(opts);
905 if (copy_to_user(optval, &opts, len))
906 return -EFAULT;
907 if (put_user(len, optlen))
908 return -EFAULT;
909
910 return 0;
911 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200912 default:
913 break;
914 }
915
916 return -EOPNOTSUPP;
917}
918
Magnus Karlsson423f3832018-05-02 13:01:24 +0200919static int xsk_mmap(struct file *file, struct socket *sock,
920 struct vm_area_struct *vma)
921{
Geert Uytterhoevena5a16e42018-06-07 15:37:34 +0200922 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200923 unsigned long size = vma->vm_end - vma->vm_start;
924 struct xdp_sock *xs = xdp_sk(sock->sk);
925 struct xsk_queue *q = NULL;
Björn Töpel37b07692018-05-22 09:35:01 +0200926 struct xdp_umem *umem;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200927 unsigned long pfn;
928 struct page *qpg;
929
Ilya Maximets455302d2019-06-28 11:04:07 +0300930 if (xs->state != XSK_READY)
931 return -EBUSY;
932
Björn Töpelb9b6b682018-05-02 13:01:25 +0200933 if (offset == XDP_PGOFF_RX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +0200934 q = READ_ONCE(xs->rx);
Magnus Karlssonf6145902018-05-02 13:01:32 +0200935 } else if (offset == XDP_PGOFF_TX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +0200936 q = READ_ONCE(xs->tx);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200937 } else {
Björn Töpel37b07692018-05-22 09:35:01 +0200938 umem = READ_ONCE(xs->umem);
939 if (!umem)
Björn Töpelb9b6b682018-05-02 13:01:25 +0200940 return -EINVAL;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200941
Magnus Karlssone6762c82019-02-08 14:13:50 +0100942 /* Matches the smp_wmb() in XDP_UMEM_REG */
943 smp_rmb();
Björn Töpelb9b6b682018-05-02 13:01:25 +0200944 if (offset == XDP_UMEM_PGOFF_FILL_RING)
Björn Töpel37b07692018-05-22 09:35:01 +0200945 q = READ_ONCE(umem->fq);
Magnus Karlssonfe230832018-05-02 13:01:31 +0200946 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
Björn Töpel37b07692018-05-22 09:35:01 +0200947 q = READ_ONCE(umem->cq);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200948 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200949
950 if (!q)
951 return -EINVAL;
952
Magnus Karlssone6762c82019-02-08 14:13:50 +0100953 /* Matches the smp_wmb() in xsk_init_queue */
954 smp_rmb();
Magnus Karlsson423f3832018-05-02 13:01:24 +0200955 qpg = virt_to_head_page(q->ring);
956 if (size > (PAGE_SIZE << compound_order(qpg)))
957 return -EINVAL;
958
959 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
960 return remap_pfn_range(vma, vma->vm_start, pfn,
961 size, vma->vm_page_prot);
962}
963
Ilya Maximets455302d2019-06-28 11:04:07 +0300964static int xsk_notifier(struct notifier_block *this,
965 unsigned long msg, void *ptr)
966{
967 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
968 struct net *net = dev_net(dev);
969 struct sock *sk;
970
971 switch (msg) {
972 case NETDEV_UNREGISTER:
973 mutex_lock(&net->xdp.lock);
974 sk_for_each(sk, &net->xdp.list) {
975 struct xdp_sock *xs = xdp_sk(sk);
976
977 mutex_lock(&xs->mutex);
978 if (xs->dev == dev) {
979 sk->sk_err = ENETDOWN;
980 if (!sock_flag(sk, SOCK_DEAD))
981 sk->sk_error_report(sk);
982
983 xsk_unbind_dev(xs);
984
985 /* Clear device references in umem. */
986 xdp_umem_clear_dev(xs->umem);
987 }
988 mutex_unlock(&xs->mutex);
989 }
990 mutex_unlock(&net->xdp.lock);
991 break;
992 }
993 return NOTIFY_DONE;
994}
995
Björn Töpelc0c77d82018-05-02 13:01:23 +0200996static struct proto xsk_proto = {
997 .name = "XDP",
998 .owner = THIS_MODULE,
999 .obj_size = sizeof(struct xdp_sock),
1000};
1001
1002static const struct proto_ops xsk_proto_ops = {
Björn Töpelc2f43742018-05-18 14:00:24 +02001003 .family = PF_XDP,
1004 .owner = THIS_MODULE,
1005 .release = xsk_release,
1006 .bind = xsk_bind,
1007 .connect = sock_no_connect,
1008 .socketpair = sock_no_socketpair,
1009 .accept = sock_no_accept,
1010 .getname = sock_no_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001011 .poll = xsk_poll,
Björn Töpelc2f43742018-05-18 14:00:24 +02001012 .ioctl = sock_no_ioctl,
1013 .listen = sock_no_listen,
1014 .shutdown = sock_no_shutdown,
1015 .setsockopt = xsk_setsockopt,
1016 .getsockopt = xsk_getsockopt,
1017 .sendmsg = xsk_sendmsg,
1018 .recvmsg = sock_no_recvmsg,
1019 .mmap = xsk_mmap,
1020 .sendpage = sock_no_sendpage,
Björn Töpelc0c77d82018-05-02 13:01:23 +02001021};
1022
Björn Töpel11fe9262019-02-21 13:07:38 +01001023static void xsk_destruct(struct sock *sk)
1024{
1025 struct xdp_sock *xs = xdp_sk(sk);
1026
1027 if (!sock_flag(sk, SOCK_DEAD))
1028 return;
1029
1030 xdp_put_umem(xs->umem);
1031
1032 sk_refcnt_debug_dec(sk);
1033}
1034
Björn Töpelc0c77d82018-05-02 13:01:23 +02001035static int xsk_create(struct net *net, struct socket *sock, int protocol,
1036 int kern)
1037{
1038 struct sock *sk;
1039 struct xdp_sock *xs;
1040
1041 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1042 return -EPERM;
1043 if (sock->type != SOCK_RAW)
1044 return -ESOCKTNOSUPPORT;
1045
1046 if (protocol)
1047 return -EPROTONOSUPPORT;
1048
1049 sock->state = SS_UNCONNECTED;
1050
1051 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1052 if (!sk)
1053 return -ENOBUFS;
1054
1055 sock->ops = &xsk_proto_ops;
1056
1057 sock_init_data(sock, sk);
1058
1059 sk->sk_family = PF_XDP;
1060
Björn Töpel11fe9262019-02-21 13:07:38 +01001061 sk->sk_destruct = xsk_destruct;
1062 sk_refcnt_debug_inc(sk);
1063
Björn Töpelcee27162018-10-08 19:40:16 +02001064 sock_set_flag(sk, SOCK_RCU_FREE);
1065
Björn Töpelc0c77d82018-05-02 13:01:23 +02001066 xs = xdp_sk(sk);
Ilya Maximets455302d2019-06-28 11:04:07 +03001067 xs->state = XSK_READY;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001068 mutex_init(&xs->mutex);
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +03001069 spin_lock_init(&xs->rx_lock);
Magnus Karlssona9744f72018-06-29 09:48:20 +02001070 spin_lock_init(&xs->tx_completion_lock);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001071
Björn Töpel0402acd2019-08-15 11:30:13 +02001072 INIT_LIST_HEAD(&xs->map_list);
1073 spin_lock_init(&xs->map_list_lock);
1074
Björn Töpel1d0dc062019-01-24 19:59:37 +01001075 mutex_lock(&net->xdp.lock);
1076 sk_add_node_rcu(sk, &net->xdp.list);
1077 mutex_unlock(&net->xdp.lock);
1078
Björn Töpelc0c77d82018-05-02 13:01:23 +02001079 local_bh_disable();
1080 sock_prot_inuse_add(net, &xsk_proto, 1);
1081 local_bh_enable();
1082
1083 return 0;
1084}
1085
1086static const struct net_proto_family xsk_family_ops = {
1087 .family = PF_XDP,
1088 .create = xsk_create,
1089 .owner = THIS_MODULE,
1090};
1091
Ilya Maximets455302d2019-06-28 11:04:07 +03001092static struct notifier_block xsk_netdev_notifier = {
1093 .notifier_call = xsk_notifier,
1094};
1095
Björn Töpel1d0dc062019-01-24 19:59:37 +01001096static int __net_init xsk_net_init(struct net *net)
1097{
1098 mutex_init(&net->xdp.lock);
1099 INIT_HLIST_HEAD(&net->xdp.list);
1100 return 0;
1101}
1102
1103static void __net_exit xsk_net_exit(struct net *net)
1104{
1105 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1106}
1107
1108static struct pernet_operations xsk_net_ops = {
1109 .init = xsk_net_init,
1110 .exit = xsk_net_exit,
1111};
1112
Björn Töpelc0c77d82018-05-02 13:01:23 +02001113static int __init xsk_init(void)
1114{
1115 int err;
1116
1117 err = proto_register(&xsk_proto, 0 /* no slab */);
1118 if (err)
1119 goto out;
1120
1121 err = sock_register(&xsk_family_ops);
1122 if (err)
1123 goto out_proto;
1124
Björn Töpel1d0dc062019-01-24 19:59:37 +01001125 err = register_pernet_subsys(&xsk_net_ops);
1126 if (err)
1127 goto out_sk;
Ilya Maximets455302d2019-06-28 11:04:07 +03001128
1129 err = register_netdevice_notifier(&xsk_netdev_notifier);
1130 if (err)
1131 goto out_pernet;
1132
Björn Töpelc0c77d82018-05-02 13:01:23 +02001133 return 0;
1134
Ilya Maximets455302d2019-06-28 11:04:07 +03001135out_pernet:
1136 unregister_pernet_subsys(&xsk_net_ops);
Björn Töpel1d0dc062019-01-24 19:59:37 +01001137out_sk:
1138 sock_unregister(PF_XDP);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001139out_proto:
1140 proto_unregister(&xsk_proto);
1141out:
1142 return err;
1143}
1144
1145fs_initcall(xsk_init);