blob: 949d3bbccb2f5987cc1840a5a94ee713911f7ebb [file] [log] [blame]
Björn Töpelc0c77d82018-05-02 13:01:23 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
Björn Töpelc0c77d82018-05-02 13:01:23 +02008 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14#include <linux/if_xdp.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/sched/task.h>
19#include <linux/socket.h>
20#include <linux/file.h>
21#include <linux/uaccess.h>
22#include <linux/net.h>
23#include <linux/netdevice.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020024#include <linux/rculist.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020025#include <net/xdp_sock.h>
Björn Töpelb9b6b682018-05-02 13:01:25 +020026#include <net/xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020027
Magnus Karlsson423f3832018-05-02 13:01:24 +020028#include "xsk_queue.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020029#include "xdp_umem.h"
Björn Töpela36b38aa2019-01-24 19:59:39 +010030#include "xsk.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020031
Magnus Karlsson35fcde72018-05-02 13:01:34 +020032#define TX_BATCH_SIZE 16
33
Björn Töpelfbfc504a2018-05-02 13:01:28 +020034bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
35{
Björn Töpel173d3ad2018-06-04 14:05:55 +020036 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
37 READ_ONCE(xs->umem->fq);
Björn Töpelfbfc504a2018-05-02 13:01:28 +020038}
39
Björn Töpel173d3ad2018-06-04 14:05:55 +020040u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
Björn Töpelc4971762018-05-02 13:01:27 +020041{
Björn Töpel173d3ad2018-06-04 14:05:55 +020042 return xskq_peek_addr(umem->fq, addr);
43}
44EXPORT_SYMBOL(xsk_umem_peek_addr);
45
46void xsk_umem_discard_addr(struct xdp_umem *umem)
47{
48 xskq_discard_addr(umem->fq);
49}
50EXPORT_SYMBOL(xsk_umem_discard_addr);
51
52static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
53{
Björn Töpel18baed22018-08-30 15:12:48 +020054 void *to_buf, *from_buf;
55 u32 metalen;
Björn Töpelbbff2f32018-06-04 13:57:13 +020056 u64 addr;
Björn Töpel4e64c832018-06-04 13:57:11 +020057 int err;
Björn Töpelc4971762018-05-02 13:01:27 +020058
Björn Töpelbbff2f32018-06-04 13:57:13 +020059 if (!xskq_peek_addr(xs->umem->fq, &addr) ||
Björn Töpel18baed22018-08-30 15:12:48 +020060 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
Björn Töpela509a952018-06-04 13:57:12 +020061 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +020062 return -ENOSPC;
Björn Töpela509a952018-06-04 13:57:12 +020063 }
Björn Töpelc4971762018-05-02 13:01:27 +020064
Björn Töpelbbff2f32018-06-04 13:57:13 +020065 addr += xs->umem->headroom;
66
Björn Töpel18baed22018-08-30 15:12:48 +020067 if (unlikely(xdp_data_meta_unsupported(xdp))) {
68 from_buf = xdp->data;
69 metalen = 0;
70 } else {
71 from_buf = xdp->data_meta;
72 metalen = xdp->data - xdp->data_meta;
73 }
74
75 to_buf = xdp_umem_get_data(xs->umem, addr);
76 memcpy(to_buf, from_buf, len + metalen);
77 addr += metalen;
Björn Töpelbbff2f32018-06-04 13:57:13 +020078 err = xskq_produce_batch_desc(xs->rx, addr, len);
Björn Töpel173d3ad2018-06-04 14:05:55 +020079 if (!err) {
Björn Töpelbbff2f32018-06-04 13:57:13 +020080 xskq_discard_addr(xs->umem->fq);
Björn Töpel173d3ad2018-06-04 14:05:55 +020081 xdp_return_buff(xdp);
82 return 0;
83 }
84
85 xs->rx_dropped++;
86 return err;
87}
88
89static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
90{
91 int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
92
Jakub Kicinski2d55d612018-07-27 20:20:08 -070093 if (err)
Björn Töpela509a952018-06-04 13:57:12 +020094 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +020095
96 return err;
97}
98
99int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
100{
Björn Töpel173d3ad2018-06-04 14:05:55 +0200101 u32 len;
Björn Töpelc4971762018-05-02 13:01:27 +0200102
Björn Töpel173d3ad2018-06-04 14:05:55 +0200103 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
104 return -EINVAL;
Björn Töpelc4971762018-05-02 13:01:27 +0200105
Björn Töpel173d3ad2018-06-04 14:05:55 +0200106 len = xdp->data_end - xdp->data;
107
108 return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
109 __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
Björn Töpelc4971762018-05-02 13:01:27 +0200110}
111
112void xsk_flush(struct xdp_sock *xs)
113{
114 xskq_produce_flush_desc(xs->rx);
115 xs->sk.sk_data_ready(&xs->sk);
116}
117
118int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
119{
Björn Töpel18baed22018-08-30 15:12:48 +0200120 u32 metalen = xdp->data - xdp->data_meta;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200121 u32 len = xdp->data_end - xdp->data;
122 void *buffer;
123 u64 addr;
Björn Töpelc4971762018-05-02 13:01:27 +0200124 int err;
125
Björn Töpel5d902372018-06-12 12:02:56 +0200126 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
127 return -EINVAL;
128
Björn Töpel173d3ad2018-06-04 14:05:55 +0200129 if (!xskq_peek_addr(xs->umem->fq, &addr) ||
Björn Töpel18baed22018-08-30 15:12:48 +0200130 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
Björn Töpel173d3ad2018-06-04 14:05:55 +0200131 xs->rx_dropped++;
132 return -ENOSPC;
133 }
Björn Töpelc4971762018-05-02 13:01:27 +0200134
Björn Töpel173d3ad2018-06-04 14:05:55 +0200135 addr += xs->umem->headroom;
136
137 buffer = xdp_umem_get_data(xs->umem, addr);
Björn Töpel18baed22018-08-30 15:12:48 +0200138 memcpy(buffer, xdp->data_meta, len + metalen);
139 addr += metalen;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200140 err = xskq_produce_batch_desc(xs->rx, addr, len);
141 if (!err) {
142 xskq_discard_addr(xs->umem->fq);
143 xsk_flush(xs);
144 return 0;
145 }
146
147 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +0200148 return err;
149}
150
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200151void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
152{
153 xskq_produce_flush_addr_n(umem->cq, nb_entries);
154}
155EXPORT_SYMBOL(xsk_umem_complete_tx);
156
157void xsk_umem_consume_tx_done(struct xdp_umem *umem)
158{
159 struct xdp_sock *xs;
160
161 rcu_read_lock();
162 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
163 xs->sk.sk_write_space(&xs->sk);
164 }
165 rcu_read_unlock();
166}
167EXPORT_SYMBOL(xsk_umem_consume_tx_done);
168
169bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
170{
171 struct xdp_desc desc;
172 struct xdp_sock *xs;
173
174 rcu_read_lock();
175 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
176 if (!xskq_peek_desc(xs->tx, &desc))
177 continue;
178
179 if (xskq_produce_addr_lazy(umem->cq, desc.addr))
180 goto out;
181
182 *dma = xdp_umem_get_dma(umem, desc.addr);
183 *len = desc.len;
184
185 xskq_discard_desc(xs->tx);
186 rcu_read_unlock();
187 return true;
188 }
189
190out:
191 rcu_read_unlock();
192 return false;
193}
194EXPORT_SYMBOL(xsk_umem_consume_tx);
195
196static int xsk_zc_xmit(struct sock *sk)
197{
198 struct xdp_sock *xs = xdp_sk(sk);
199 struct net_device *dev = xs->dev;
200
201 return dev->netdev_ops->ndo_xsk_async_xmit(dev, xs->queue_id);
202}
203
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200204static void xsk_destruct_skb(struct sk_buff *skb)
205{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200206 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200207 struct xdp_sock *xs = xdp_sk(skb->sk);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200208 unsigned long flags;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200209
Magnus Karlssona9744f72018-06-29 09:48:20 +0200210 spin_lock_irqsave(&xs->tx_completion_lock, flags);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200211 WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
Magnus Karlssona9744f72018-06-29 09:48:20 +0200212 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200213
214 sock_wfree(skb);
215}
216
217static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
218 size_t total_len)
219{
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200220 u32 max_batch = TX_BATCH_SIZE;
221 struct xdp_sock *xs = xdp_sk(sk);
222 bool sent_frame = false;
223 struct xdp_desc desc;
224 struct sk_buff *skb;
225 int err = 0;
226
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200227 mutex_lock(&xs->mutex);
228
229 while (xskq_peek_desc(xs->tx, &desc)) {
230 char *buffer;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200231 u64 addr;
232 u32 len;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200233
234 if (max_batch-- == 0) {
235 err = -EAGAIN;
236 goto out;
237 }
238
Magnus Karlsson9684f5e2018-07-11 10:12:50 +0200239 if (xskq_reserve_addr(xs->umem->cq))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200240 goto out;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200241
Magnus Karlsson509d7642018-07-11 10:12:49 +0200242 if (xs->queue_id >= xs->dev->real_num_tx_queues)
Magnus Karlsson2e59dd52018-05-22 09:34:58 +0200243 goto out;
Magnus Karlsson2e59dd52018-05-22 09:34:58 +0200244
Magnus Karlsson09210c42018-07-11 10:12:52 +0200245 len = desc.len;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200246 skb = sock_alloc_send_skb(sk, len, 1, &err);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200247 if (unlikely(!skb)) {
248 err = -EAGAIN;
249 goto out;
250 }
251
252 skb_put(skb, len);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200253 addr = desc.addr;
254 buffer = xdp_umem_get_data(xs->umem, addr);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200255 err = skb_store_bits(skb, 0, buffer, len);
256 if (unlikely(err)) {
257 kfree_skb(skb);
258 goto out;
259 }
260
261 skb->dev = xs->dev;
262 skb->priority = sk->sk_priority;
263 skb->mark = sk->sk_mark;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200264 skb_shinfo(skb)->destructor_arg = (void *)(long)addr;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200265 skb->destructor = xsk_destruct_skb;
266
267 err = dev_direct_xmit(skb, xs->queue_id);
Magnus Karlssonfe588682018-06-29 09:48:18 +0200268 xskq_discard_desc(xs->tx);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200269 /* Ignore NET_XMIT_CN as packet might have been sent */
270 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
Magnus Karlssonfe588682018-06-29 09:48:18 +0200271 /* SKB completed but not sent */
272 err = -EBUSY;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200273 goto out;
274 }
275
276 sent_frame = true;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200277 }
278
279out:
280 if (sent_frame)
281 sk->sk_write_space(sk);
282
283 mutex_unlock(&xs->mutex);
284 return err;
285}
286
287static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
288{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200289 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200290 struct sock *sk = sock->sk;
291 struct xdp_sock *xs = xdp_sk(sk);
292
293 if (unlikely(!xs->dev))
294 return -ENXIO;
295 if (unlikely(!(xs->dev->flags & IFF_UP)))
296 return -ENETDOWN;
Magnus Karlsson6efb4432018-07-11 10:12:51 +0200297 if (unlikely(!xs->tx))
298 return -ENOBUFS;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200299 if (need_wait)
300 return -EOPNOTSUPP;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200301
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200302 return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200303}
304
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700305static unsigned int xsk_poll(struct file *file, struct socket *sock,
306 struct poll_table_struct *wait)
Björn Töpelc4971762018-05-02 13:01:27 +0200307{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700308 unsigned int mask = datagram_poll(file, sock, wait);
Björn Töpelc4971762018-05-02 13:01:27 +0200309 struct sock *sk = sock->sk;
310 struct xdp_sock *xs = xdp_sk(sk);
311
312 if (xs->rx && !xskq_empty_desc(xs->rx))
313 mask |= POLLIN | POLLRDNORM;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200314 if (xs->tx && !xskq_full_desc(xs->tx))
315 mask |= POLLOUT | POLLWRNORM;
Björn Töpelc4971762018-05-02 13:01:27 +0200316
317 return mask;
318}
319
Björn Töpelb9b6b682018-05-02 13:01:25 +0200320static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
321 bool umem_queue)
Magnus Karlsson423f3832018-05-02 13:01:24 +0200322{
323 struct xsk_queue *q;
324
325 if (entries == 0 || *queue || !is_power_of_2(entries))
326 return -EINVAL;
327
Björn Töpelb9b6b682018-05-02 13:01:25 +0200328 q = xskq_create(entries, umem_queue);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200329 if (!q)
330 return -ENOMEM;
331
Björn Töpel37b07692018-05-22 09:35:01 +0200332 /* Make sure queue is ready before it can be seen by others */
333 smp_wmb();
Magnus Karlsson423f3832018-05-02 13:01:24 +0200334 *queue = q;
335 return 0;
336}
337
Björn Töpelc0c77d82018-05-02 13:01:23 +0200338static int xsk_release(struct socket *sock)
339{
340 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200341 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200342 struct net *net;
343
344 if (!sk)
345 return 0;
346
347 net = sock_net(sk);
348
Björn Töpel1d0dc062019-01-24 19:59:37 +0100349 mutex_lock(&net->xdp.lock);
350 sk_del_node_init_rcu(sk);
351 mutex_unlock(&net->xdp.lock);
352
Björn Töpelc0c77d82018-05-02 13:01:23 +0200353 local_bh_disable();
354 sock_prot_inuse_add(net, sk->sk_prot, -1);
355 local_bh_enable();
356
Magnus Karlsson965a9902018-05-02 13:01:26 +0200357 if (xs->dev) {
Björn Töpel541d7fd2018-10-05 13:25:15 +0200358 struct net_device *dev = xs->dev;
359
Björn Töpel959b71d2018-05-22 09:34:56 +0200360 /* Wait for driver to stop using the xdp socket. */
Björn Töpel541d7fd2018-10-05 13:25:15 +0200361 xdp_del_sk_umem(xs->umem, xs);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200362 xs->dev = NULL;
Björn Töpel541d7fd2018-10-05 13:25:15 +0200363 synchronize_net();
364 dev_put(dev);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200365 }
366
Björn Töpel541d7fd2018-10-05 13:25:15 +0200367 xskq_destroy(xs->rx);
368 xskq_destroy(xs->tx);
Björn Töpele2ce3672018-12-19 13:09:31 +0100369 xdp_put_umem(xs->umem);
Björn Töpel541d7fd2018-10-05 13:25:15 +0200370
Björn Töpelc0c77d82018-05-02 13:01:23 +0200371 sock_orphan(sk);
372 sock->sk = NULL;
373
374 sk_refcnt_debug_release(sk);
375 sock_put(sk);
376
377 return 0;
378}
379
Magnus Karlsson965a9902018-05-02 13:01:26 +0200380static struct socket *xsk_lookup_xsk_from_fd(int fd)
381{
382 struct socket *sock;
383 int err;
384
385 sock = sockfd_lookup(fd, &err);
386 if (!sock)
387 return ERR_PTR(-ENOTSOCK);
388
389 if (sock->sk->sk_family != PF_XDP) {
390 sockfd_put(sock);
391 return ERR_PTR(-ENOPROTOOPT);
392 }
393
394 return sock;
395}
396
397static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
398{
399 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
400 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200401 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpel959b71d2018-05-22 09:34:56 +0200402 struct net_device *dev;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200403 u32 flags, qid;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200404 int err = 0;
405
406 if (addr_len < sizeof(struct sockaddr_xdp))
407 return -EINVAL;
408 if (sxdp->sxdp_family != AF_XDP)
409 return -EINVAL;
410
411 mutex_lock(&xs->mutex);
Björn Töpel959b71d2018-05-22 09:34:56 +0200412 if (xs->dev) {
413 err = -EBUSY;
414 goto out_release;
415 }
416
Magnus Karlsson965a9902018-05-02 13:01:26 +0200417 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
418 if (!dev) {
419 err = -ENODEV;
420 goto out_release;
421 }
422
Magnus Karlssonf6145902018-05-02 13:01:32 +0200423 if (!xs->rx && !xs->tx) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200424 err = -EINVAL;
425 goto out_unlock;
426 }
427
Björn Töpel173d3ad2018-06-04 14:05:55 +0200428 qid = sxdp->sxdp_queue_id;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200429 flags = sxdp->sxdp_flags;
430
431 if (flags & XDP_SHARED_UMEM) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200432 struct xdp_sock *umem_xs;
433 struct socket *sock;
434
Björn Töpel173d3ad2018-06-04 14:05:55 +0200435 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY)) {
436 /* Cannot specify flags for shared sockets. */
437 err = -EINVAL;
438 goto out_unlock;
439 }
440
Magnus Karlsson965a9902018-05-02 13:01:26 +0200441 if (xs->umem) {
442 /* We have already our own. */
443 err = -EINVAL;
444 goto out_unlock;
445 }
446
447 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
448 if (IS_ERR(sock)) {
449 err = PTR_ERR(sock);
450 goto out_unlock;
451 }
452
453 umem_xs = xdp_sk(sock->sk);
454 if (!umem_xs->umem) {
455 /* No umem to inherit. */
456 err = -EBADF;
457 sockfd_put(sock);
458 goto out_unlock;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200459 } else if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200460 err = -EINVAL;
461 sockfd_put(sock);
462 goto out_unlock;
463 }
464
465 xdp_get_umem(umem_xs->umem);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200466 xs->umem = umem_xs->umem;
467 sockfd_put(sock);
468 } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
469 err = -EINVAL;
470 goto out_unlock;
Björn Töpelc4971762018-05-02 13:01:27 +0200471 } else {
472 /* This xsk has its own umem. */
Magnus Karlsson93ee30f2018-08-31 13:40:02 +0200473 xskq_set_umem(xs->umem->fq, xs->umem->size,
474 xs->umem->chunk_mask);
475 xskq_set_umem(xs->umem->cq, xs->umem->size,
476 xs->umem->chunk_mask);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200477
478 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
479 if (err)
480 goto out_unlock;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200481 }
482
Magnus Karlsson965a9902018-05-02 13:01:26 +0200483 xs->dev = dev;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200484 xs->zc = xs->umem->zc;
485 xs->queue_id = qid;
Magnus Karlsson93ee30f2018-08-31 13:40:02 +0200486 xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask);
487 xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200488 xdp_add_sk_umem(xs->umem, xs);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200489
490out_unlock:
491 if (err)
492 dev_put(dev);
493out_release:
494 mutex_unlock(&xs->mutex);
495 return err;
496}
497
Björn Töpelc0c77d82018-05-02 13:01:23 +0200498static int xsk_setsockopt(struct socket *sock, int level, int optname,
499 char __user *optval, unsigned int optlen)
500{
501 struct sock *sk = sock->sk;
502 struct xdp_sock *xs = xdp_sk(sk);
503 int err;
504
505 if (level != SOL_XDP)
506 return -ENOPROTOOPT;
507
508 switch (optname) {
Björn Töpelb9b6b682018-05-02 13:01:25 +0200509 case XDP_RX_RING:
Magnus Karlssonf6145902018-05-02 13:01:32 +0200510 case XDP_TX_RING:
Björn Töpelb9b6b682018-05-02 13:01:25 +0200511 {
512 struct xsk_queue **q;
513 int entries;
514
515 if (optlen < sizeof(entries))
516 return -EINVAL;
517 if (copy_from_user(&entries, optval, sizeof(entries)))
518 return -EFAULT;
519
520 mutex_lock(&xs->mutex);
Magnus Karlssonf6145902018-05-02 13:01:32 +0200521 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200522 err = xsk_init_queue(entries, q, false);
523 mutex_unlock(&xs->mutex);
524 return err;
525 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200526 case XDP_UMEM_REG:
527 {
528 struct xdp_umem_reg mr;
529 struct xdp_umem *umem;
530
Björn Töpelc0c77d82018-05-02 13:01:23 +0200531 if (copy_from_user(&mr, optval, sizeof(mr)))
532 return -EFAULT;
533
534 mutex_lock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200535 if (xs->umem) {
Björn Töpelc0c77d82018-05-02 13:01:23 +0200536 mutex_unlock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200537 return -EBUSY;
538 }
539
540 umem = xdp_umem_create(&mr);
541 if (IS_ERR(umem)) {
542 mutex_unlock(&xs->mutex);
543 return PTR_ERR(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200544 }
545
546 /* Make sure umem is ready before it can be seen by others */
547 smp_wmb();
Björn Töpelc0c77d82018-05-02 13:01:23 +0200548 xs->umem = umem;
549 mutex_unlock(&xs->mutex);
550 return 0;
551 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200552 case XDP_UMEM_FILL_RING:
Magnus Karlssonfe230832018-05-02 13:01:31 +0200553 case XDP_UMEM_COMPLETION_RING:
Magnus Karlsson423f3832018-05-02 13:01:24 +0200554 {
555 struct xsk_queue **q;
556 int entries;
557
Magnus Karlsson423f3832018-05-02 13:01:24 +0200558 if (copy_from_user(&entries, optval, sizeof(entries)))
559 return -EFAULT;
560
561 mutex_lock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200562 if (!xs->umem) {
563 mutex_unlock(&xs->mutex);
564 return -EINVAL;
565 }
566
Magnus Karlssonfe230832018-05-02 13:01:31 +0200567 q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
568 &xs->umem->cq;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200569 err = xsk_init_queue(entries, q, true);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200570 mutex_unlock(&xs->mutex);
571 return err;
572 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200573 default:
574 break;
575 }
576
577 return -ENOPROTOOPT;
578}
579
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200580static int xsk_getsockopt(struct socket *sock, int level, int optname,
581 char __user *optval, int __user *optlen)
582{
583 struct sock *sk = sock->sk;
584 struct xdp_sock *xs = xdp_sk(sk);
585 int len;
586
587 if (level != SOL_XDP)
588 return -ENOPROTOOPT;
589
590 if (get_user(len, optlen))
591 return -EFAULT;
592 if (len < 0)
593 return -EINVAL;
594
595 switch (optname) {
596 case XDP_STATISTICS:
597 {
598 struct xdp_statistics stats;
599
600 if (len < sizeof(stats))
601 return -EINVAL;
602
603 mutex_lock(&xs->mutex);
604 stats.rx_dropped = xs->rx_dropped;
605 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
606 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
607 mutex_unlock(&xs->mutex);
608
609 if (copy_to_user(optval, &stats, sizeof(stats)))
610 return -EFAULT;
611 if (put_user(sizeof(stats), optlen))
612 return -EFAULT;
613
614 return 0;
615 }
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200616 case XDP_MMAP_OFFSETS:
617 {
618 struct xdp_mmap_offsets off;
619
620 if (len < sizeof(off))
621 return -EINVAL;
622
623 off.rx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
624 off.rx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
625 off.rx.desc = offsetof(struct xdp_rxtx_ring, desc);
626 off.tx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
627 off.tx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
628 off.tx.desc = offsetof(struct xdp_rxtx_ring, desc);
629
630 off.fr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
631 off.fr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
632 off.fr.desc = offsetof(struct xdp_umem_ring, desc);
633 off.cr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
634 off.cr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
635 off.cr.desc = offsetof(struct xdp_umem_ring, desc);
636
637 len = sizeof(off);
638 if (copy_to_user(optval, &off, len))
639 return -EFAULT;
640 if (put_user(len, optlen))
641 return -EFAULT;
642
643 return 0;
644 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200645 default:
646 break;
647 }
648
649 return -EOPNOTSUPP;
650}
651
Magnus Karlsson423f3832018-05-02 13:01:24 +0200652static int xsk_mmap(struct file *file, struct socket *sock,
653 struct vm_area_struct *vma)
654{
Geert Uytterhoevena5a16e42018-06-07 15:37:34 +0200655 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200656 unsigned long size = vma->vm_end - vma->vm_start;
657 struct xdp_sock *xs = xdp_sk(sock->sk);
658 struct xsk_queue *q = NULL;
Björn Töpel37b07692018-05-22 09:35:01 +0200659 struct xdp_umem *umem;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200660 unsigned long pfn;
661 struct page *qpg;
662
Björn Töpelb9b6b682018-05-02 13:01:25 +0200663 if (offset == XDP_PGOFF_RX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +0200664 q = READ_ONCE(xs->rx);
Magnus Karlssonf6145902018-05-02 13:01:32 +0200665 } else if (offset == XDP_PGOFF_TX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +0200666 q = READ_ONCE(xs->tx);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200667 } else {
Björn Töpel37b07692018-05-22 09:35:01 +0200668 umem = READ_ONCE(xs->umem);
669 if (!umem)
Björn Töpelb9b6b682018-05-02 13:01:25 +0200670 return -EINVAL;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200671
Björn Töpelb9b6b682018-05-02 13:01:25 +0200672 if (offset == XDP_UMEM_PGOFF_FILL_RING)
Björn Töpel37b07692018-05-22 09:35:01 +0200673 q = READ_ONCE(umem->fq);
Magnus Karlssonfe230832018-05-02 13:01:31 +0200674 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
Björn Töpel37b07692018-05-22 09:35:01 +0200675 q = READ_ONCE(umem->cq);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200676 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200677
678 if (!q)
679 return -EINVAL;
680
681 qpg = virt_to_head_page(q->ring);
682 if (size > (PAGE_SIZE << compound_order(qpg)))
683 return -EINVAL;
684
685 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
686 return remap_pfn_range(vma, vma->vm_start, pfn,
687 size, vma->vm_page_prot);
688}
689
Björn Töpelc0c77d82018-05-02 13:01:23 +0200690static struct proto xsk_proto = {
691 .name = "XDP",
692 .owner = THIS_MODULE,
693 .obj_size = sizeof(struct xdp_sock),
694};
695
696static const struct proto_ops xsk_proto_ops = {
Björn Töpelc2f43742018-05-18 14:00:24 +0200697 .family = PF_XDP,
698 .owner = THIS_MODULE,
699 .release = xsk_release,
700 .bind = xsk_bind,
701 .connect = sock_no_connect,
702 .socketpair = sock_no_socketpair,
703 .accept = sock_no_accept,
704 .getname = sock_no_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700705 .poll = xsk_poll,
Björn Töpelc2f43742018-05-18 14:00:24 +0200706 .ioctl = sock_no_ioctl,
707 .listen = sock_no_listen,
708 .shutdown = sock_no_shutdown,
709 .setsockopt = xsk_setsockopt,
710 .getsockopt = xsk_getsockopt,
711 .sendmsg = xsk_sendmsg,
712 .recvmsg = sock_no_recvmsg,
713 .mmap = xsk_mmap,
714 .sendpage = sock_no_sendpage,
Björn Töpelc0c77d82018-05-02 13:01:23 +0200715};
716
Björn Töpelc0c77d82018-05-02 13:01:23 +0200717static int xsk_create(struct net *net, struct socket *sock, int protocol,
718 int kern)
719{
720 struct sock *sk;
721 struct xdp_sock *xs;
722
723 if (!ns_capable(net->user_ns, CAP_NET_RAW))
724 return -EPERM;
725 if (sock->type != SOCK_RAW)
726 return -ESOCKTNOSUPPORT;
727
728 if (protocol)
729 return -EPROTONOSUPPORT;
730
731 sock->state = SS_UNCONNECTED;
732
733 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
734 if (!sk)
735 return -ENOBUFS;
736
737 sock->ops = &xsk_proto_ops;
738
739 sock_init_data(sock, sk);
740
741 sk->sk_family = PF_XDP;
742
Björn Töpelcee27162018-10-08 19:40:16 +0200743 sock_set_flag(sk, SOCK_RCU_FREE);
744
Björn Töpelc0c77d82018-05-02 13:01:23 +0200745 xs = xdp_sk(sk);
746 mutex_init(&xs->mutex);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200747 spin_lock_init(&xs->tx_completion_lock);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200748
Björn Töpel1d0dc062019-01-24 19:59:37 +0100749 mutex_lock(&net->xdp.lock);
750 sk_add_node_rcu(sk, &net->xdp.list);
751 mutex_unlock(&net->xdp.lock);
752
Björn Töpelc0c77d82018-05-02 13:01:23 +0200753 local_bh_disable();
754 sock_prot_inuse_add(net, &xsk_proto, 1);
755 local_bh_enable();
756
757 return 0;
758}
759
760static const struct net_proto_family xsk_family_ops = {
761 .family = PF_XDP,
762 .create = xsk_create,
763 .owner = THIS_MODULE,
764};
765
Björn Töpel1d0dc062019-01-24 19:59:37 +0100766static int __net_init xsk_net_init(struct net *net)
767{
768 mutex_init(&net->xdp.lock);
769 INIT_HLIST_HEAD(&net->xdp.list);
770 return 0;
771}
772
773static void __net_exit xsk_net_exit(struct net *net)
774{
775 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
776}
777
778static struct pernet_operations xsk_net_ops = {
779 .init = xsk_net_init,
780 .exit = xsk_net_exit,
781};
782
Björn Töpelc0c77d82018-05-02 13:01:23 +0200783static int __init xsk_init(void)
784{
785 int err;
786
787 err = proto_register(&xsk_proto, 0 /* no slab */);
788 if (err)
789 goto out;
790
791 err = sock_register(&xsk_family_ops);
792 if (err)
793 goto out_proto;
794
Björn Töpel1d0dc062019-01-24 19:59:37 +0100795 err = register_pernet_subsys(&xsk_net_ops);
796 if (err)
797 goto out_sk;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200798 return 0;
799
Björn Töpel1d0dc062019-01-24 19:59:37 +0100800out_sk:
801 sock_unregister(PF_XDP);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200802out_proto:
803 proto_unregister(&xsk_proto);
804out:
805 return err;
806}
807
808fs_initcall(xsk_init);