blob: 356f90e4522b4cc39bf05ae4765d0517c43e5dc4 [file] [log] [blame]
Björn Töpelc0c77d82018-05-02 13:01:23 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
Björn Töpelc0c77d82018-05-02 13:01:23 +02008 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14#include <linux/if_xdp.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/sched/task.h>
19#include <linux/socket.h>
20#include <linux/file.h>
21#include <linux/uaccess.h>
22#include <linux/net.h>
23#include <linux/netdevice.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020024#include <linux/rculist.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020025#include <net/xdp_sock.h>
Björn Töpelb9b6b682018-05-02 13:01:25 +020026#include <net/xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020027
Magnus Karlsson423f3832018-05-02 13:01:24 +020028#include "xsk_queue.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020029#include "xdp_umem.h"
Björn Töpela36b38aa2019-01-24 19:59:39 +010030#include "xsk.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020031
Magnus Karlsson35fcde72018-05-02 13:01:34 +020032#define TX_BATCH_SIZE 16
33
Björn Töpele312b9e2019-12-19 07:10:02 +010034static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
35
Björn Töpelfbfc504a2018-05-02 13:01:28 +020036bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
37{
Björn Töpel173d3ad2018-06-04 14:05:55 +020038 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
39 READ_ONCE(xs->umem->fq);
Björn Töpelfbfc504a2018-05-02 13:01:28 +020040}
41
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +030042bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
43{
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +010044 return xskq_cons_has_entries(umem->fq, cnt);
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +030045}
46EXPORT_SYMBOL(xsk_umem_has_addrs);
47
Magnus Karlsson03896ef2019-12-19 13:39:27 +010048bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
Björn Töpelc4971762018-05-02 13:01:27 +020049{
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +010050 return xskq_cons_peek_addr(umem->fq, addr, umem);
Björn Töpel173d3ad2018-06-04 14:05:55 +020051}
52EXPORT_SYMBOL(xsk_umem_peek_addr);
53
Magnus Karlssonf8509aa2019-12-19 13:39:28 +010054void xsk_umem_release_addr(struct xdp_umem *umem)
Björn Töpel173d3ad2018-06-04 14:05:55 +020055{
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +010056 xskq_cons_release(umem->fq);
Björn Töpel173d3ad2018-06-04 14:05:55 +020057}
Magnus Karlssonf8509aa2019-12-19 13:39:28 +010058EXPORT_SYMBOL(xsk_umem_release_addr);
Björn Töpel173d3ad2018-06-04 14:05:55 +020059
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020060void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
61{
62 if (umem->need_wakeup & XDP_WAKEUP_RX)
63 return;
64
65 umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
66 umem->need_wakeup |= XDP_WAKEUP_RX;
67}
68EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
69
70void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
71{
72 struct xdp_sock *xs;
73
74 if (umem->need_wakeup & XDP_WAKEUP_TX)
75 return;
76
77 rcu_read_lock();
78 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
79 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
80 }
81 rcu_read_unlock();
82
83 umem->need_wakeup |= XDP_WAKEUP_TX;
84}
85EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
86
87void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
88{
89 if (!(umem->need_wakeup & XDP_WAKEUP_RX))
90 return;
91
92 umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
93 umem->need_wakeup &= ~XDP_WAKEUP_RX;
94}
95EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
96
97void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
98{
99 struct xdp_sock *xs;
100
101 if (!(umem->need_wakeup & XDP_WAKEUP_TX))
102 return;
103
104 rcu_read_lock();
105 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
106 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
107 }
108 rcu_read_unlock();
109
110 umem->need_wakeup &= ~XDP_WAKEUP_TX;
111}
112EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
113
114bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
115{
116 return umem->flags & XDP_UMEM_USES_NEED_WAKEUP;
117}
118EXPORT_SYMBOL(xsk_umem_uses_need_wakeup);
119
Kevin Laatzc05cd362019-08-27 02:25:22 +0000120/* If a buffer crosses a page boundary, we need to do 2 memcpy's, one for
121 * each page. This is only required in copy mode.
122 */
123static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf,
124 u32 len, u32 metalen)
125{
126 void *to_buf = xdp_umem_get_data(umem, addr);
127
128 addr = xsk_umem_add_offset_to_addr(addr);
Magnus Karlsson03896ef2019-12-19 13:39:27 +0100129 if (xskq_cons_crosses_non_contig_pg(umem, addr, len + metalen)) {
Kevin Laatzc05cd362019-08-27 02:25:22 +0000130 void *next_pg_addr = umem->pages[(addr >> PAGE_SHIFT) + 1].addr;
131 u64 page_start = addr & ~(PAGE_SIZE - 1);
132 u64 first_len = PAGE_SIZE - (addr - page_start);
133
134 memcpy(to_buf, from_buf, first_len + metalen);
135 memcpy(next_pg_addr, from_buf + first_len, len - first_len);
136
137 return;
138 }
139
140 memcpy(to_buf, from_buf, len + metalen);
141}
142
Björn Töpel173d3ad2018-06-04 14:05:55 +0200143static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
144{
Kevin Laatzc05cd362019-08-27 02:25:22 +0000145 u64 offset = xs->umem->headroom;
146 u64 addr, memcpy_addr;
147 void *from_buf;
Björn Töpel18baed22018-08-30 15:12:48 +0200148 u32 metalen;
Björn Töpel4e64c832018-06-04 13:57:11 +0200149 int err;
Björn Töpelc4971762018-05-02 13:01:27 +0200150
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100151 if (!xskq_cons_peek_addr(xs->umem->fq, &addr, xs->umem) ||
Björn Töpel18baed22018-08-30 15:12:48 +0200152 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
Björn Töpela509a952018-06-04 13:57:12 +0200153 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +0200154 return -ENOSPC;
Björn Töpela509a952018-06-04 13:57:12 +0200155 }
Björn Töpelc4971762018-05-02 13:01:27 +0200156
Björn Töpel18baed22018-08-30 15:12:48 +0200157 if (unlikely(xdp_data_meta_unsupported(xdp))) {
158 from_buf = xdp->data;
159 metalen = 0;
160 } else {
161 from_buf = xdp->data_meta;
162 metalen = xdp->data - xdp->data_meta;
163 }
164
Kevin Laatzc05cd362019-08-27 02:25:22 +0000165 memcpy_addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
166 __xsk_rcv_memcpy(xs->umem, memcpy_addr, from_buf, len, metalen);
167
168 offset += metalen;
169 addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100170 err = xskq_prod_reserve_desc(xs->rx, addr, len);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200171 if (!err) {
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100172 xskq_cons_release(xs->umem->fq);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200173 xdp_return_buff(xdp);
174 return 0;
175 }
176
177 xs->rx_dropped++;
178 return err;
179}
180
181static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
182{
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100183 int err = xskq_prod_reserve_desc(xs->rx, xdp->handle, len);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200184
Jakub Kicinski2d55d612018-07-27 20:20:08 -0700185 if (err)
Björn Töpela509a952018-06-04 13:57:12 +0200186 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +0200187
188 return err;
189}
190
Björn Töpel42fddcc2019-09-04 13:49:12 +0200191static bool xsk_is_bound(struct xdp_sock *xs)
192{
193 if (READ_ONCE(xs->state) == XSK_BOUND) {
194 /* Matches smp_wmb() in bind(). */
195 smp_rmb();
196 return true;
197 }
198 return false;
199}
200
Björn Töpeld8179912019-11-01 12:03:46 +0100201static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
Björn Töpelc4971762018-05-02 13:01:27 +0200202{
Björn Töpel173d3ad2018-06-04 14:05:55 +0200203 u32 len;
Björn Töpelc4971762018-05-02 13:01:27 +0200204
Björn Töpel42fddcc2019-09-04 13:49:12 +0200205 if (!xsk_is_bound(xs))
206 return -EINVAL;
207
Björn Töpel173d3ad2018-06-04 14:05:55 +0200208 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
209 return -EINVAL;
Björn Töpelc4971762018-05-02 13:01:27 +0200210
Björn Töpel173d3ad2018-06-04 14:05:55 +0200211 len = xdp->data_end - xdp->data;
212
213 return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
214 __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
Björn Töpelc4971762018-05-02 13:01:27 +0200215}
216
Björn Töpeld8179912019-11-01 12:03:46 +0100217static void xsk_flush(struct xdp_sock *xs)
Björn Töpelc4971762018-05-02 13:01:27 +0200218{
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100219 xskq_prod_submit(xs->rx);
Magnus Karlsson30744a62020-02-10 16:27:12 +0100220 __xskq_cons_release(xs->umem->fq);
Björn Töpel43a825a2020-01-20 10:29:17 +0100221 sock_def_readable(&xs->sk);
Björn Töpelc4971762018-05-02 13:01:27 +0200222}
223
224int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
225{
Björn Töpel18baed22018-08-30 15:12:48 +0200226 u32 metalen = xdp->data - xdp->data_meta;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200227 u32 len = xdp->data_end - xdp->data;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000228 u64 offset = xs->umem->headroom;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200229 void *buffer;
230 u64 addr;
Björn Töpelc4971762018-05-02 13:01:27 +0200231 int err;
232
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300233 spin_lock_bh(&xs->rx_lock);
234
235 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) {
236 err = -EINVAL;
237 goto out_unlock;
238 }
Björn Töpel5d902372018-06-12 12:02:56 +0200239
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100240 if (!xskq_cons_peek_addr(xs->umem->fq, &addr, xs->umem) ||
Björn Töpel18baed22018-08-30 15:12:48 +0200241 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300242 err = -ENOSPC;
243 goto out_drop;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200244 }
Björn Töpelc4971762018-05-02 13:01:27 +0200245
Kevin Laatzc05cd362019-08-27 02:25:22 +0000246 addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200247 buffer = xdp_umem_get_data(xs->umem, addr);
Björn Töpel18baed22018-08-30 15:12:48 +0200248 memcpy(buffer, xdp->data_meta, len + metalen);
Kevin Laatzc05cd362019-08-27 02:25:22 +0000249
250 addr = xsk_umem_adjust_offset(xs->umem, addr, metalen);
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100251 err = xskq_prod_reserve_desc(xs->rx, addr, len);
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300252 if (err)
253 goto out_drop;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200254
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100255 xskq_cons_release(xs->umem->fq);
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100256 xskq_prod_submit(xs->rx);
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300257
258 spin_unlock_bh(&xs->rx_lock);
259
260 xs->sk.sk_data_ready(&xs->sk);
261 return 0;
262
263out_drop:
Björn Töpel173d3ad2018-06-04 14:05:55 +0200264 xs->rx_dropped++;
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300265out_unlock:
266 spin_unlock_bh(&xs->rx_lock);
Björn Töpelc4971762018-05-02 13:01:27 +0200267 return err;
268}
269
Björn Töpele312b9e2019-12-19 07:10:02 +0100270int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
Björn Töpeld8179912019-11-01 12:03:46 +0100271{
Björn Töpele312b9e2019-12-19 07:10:02 +0100272 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
Björn Töpeld8179912019-11-01 12:03:46 +0100273 int err;
274
275 err = xsk_rcv(xs, xdp);
276 if (err)
277 return err;
278
279 if (!xs->flush_node.prev)
280 list_add(&xs->flush_node, flush_list);
281
282 return 0;
283}
284
Björn Töpele312b9e2019-12-19 07:10:02 +0100285void __xsk_map_flush(void)
Björn Töpeld8179912019-11-01 12:03:46 +0100286{
Björn Töpele312b9e2019-12-19 07:10:02 +0100287 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
Björn Töpeld8179912019-11-01 12:03:46 +0100288 struct xdp_sock *xs, *tmp;
289
290 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
291 xsk_flush(xs);
292 __list_del_clearprev(&xs->flush_node);
293 }
294}
295
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200296void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
297{
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100298 xskq_prod_submit_n(umem->cq, nb_entries);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200299}
300EXPORT_SYMBOL(xsk_umem_complete_tx);
301
302void xsk_umem_consume_tx_done(struct xdp_umem *umem)
303{
304 struct xdp_sock *xs;
305
306 rcu_read_lock();
307 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
Magnus Karlsson30744a62020-02-10 16:27:12 +0100308 __xskq_cons_release(xs->tx);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200309 xs->sk.sk_write_space(&xs->sk);
310 }
311 rcu_read_unlock();
312}
313EXPORT_SYMBOL(xsk_umem_consume_tx_done);
314
Maxim Mikityanskiy4bce4e52019-06-26 17:35:28 +0300315bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200316{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200317 struct xdp_sock *xs;
318
319 rcu_read_lock();
320 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100321 if (!xskq_cons_peek_desc(xs->tx, desc, umem))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200322 continue;
323
Magnus Karlsson15d8c912019-12-19 13:39:30 +0100324 /* This is the backpreassure mechanism for the Tx path.
325 * Reserve space in the completion queue and only proceed
326 * if there is space in it. This avoids having to implement
327 * any buffering in the Tx path.
328 */
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100329 if (xskq_prod_reserve_addr(umem->cq, desc->addr))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200330 goto out;
331
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100332 xskq_cons_release(xs->tx);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200333 rcu_read_unlock();
334 return true;
335 }
336
337out:
338 rcu_read_unlock();
339 return false;
340}
341EXPORT_SYMBOL(xsk_umem_consume_tx);
342
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000343static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200344{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200345 struct net_device *dev = xs->dev;
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000346 int err;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200347
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000348 rcu_read_lock();
349 err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
350 rcu_read_unlock();
351
352 return err;
353}
354
355static int xsk_zc_xmit(struct xdp_sock *xs)
356{
357 return xsk_wakeup(xs, XDP_WAKEUP_TX);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200358}
359
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200360static void xsk_destruct_skb(struct sk_buff *skb)
361{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200362 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200363 struct xdp_sock *xs = xdp_sk(skb->sk);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200364 unsigned long flags;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200365
Magnus Karlssona9744f72018-06-29 09:48:20 +0200366 spin_lock_irqsave(&xs->tx_completion_lock, flags);
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100367 xskq_prod_submit_addr(xs->umem->cq, addr);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200368 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200369
370 sock_wfree(skb);
371}
372
Magnus Karlssondf551052019-10-02 08:31:59 +0200373static int xsk_generic_xmit(struct sock *sk)
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200374{
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200375 struct xdp_sock *xs = xdp_sk(sk);
Magnus Karlssondf551052019-10-02 08:31:59 +0200376 u32 max_batch = TX_BATCH_SIZE;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200377 bool sent_frame = false;
378 struct xdp_desc desc;
379 struct sk_buff *skb;
380 int err = 0;
381
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200382 mutex_lock(&xs->mutex);
383
Ilya Maximets67571642019-07-04 17:25:03 +0300384 if (xs->queue_id >= xs->dev->real_num_tx_queues)
385 goto out;
386
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100387 while (xskq_cons_peek_desc(xs->tx, &desc, xs->umem)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200388 char *buffer;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200389 u64 addr;
390 u32 len;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200391
392 if (max_batch-- == 0) {
393 err = -EAGAIN;
394 goto out;
395 }
396
Magnus Karlsson09210c42018-07-11 10:12:52 +0200397 len = desc.len;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200398 skb = sock_alloc_send_skb(sk, len, 1, &err);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200399 if (unlikely(!skb)) {
400 err = -EAGAIN;
401 goto out;
402 }
403
404 skb_put(skb, len);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200405 addr = desc.addr;
406 buffer = xdp_umem_get_data(xs->umem, addr);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200407 err = skb_store_bits(skb, 0, buffer, len);
Magnus Karlsson15d8c912019-12-19 13:39:30 +0100408 /* This is the backpreassure mechanism for the Tx path.
409 * Reserve space in the completion queue and only proceed
410 * if there is space in it. This avoids having to implement
411 * any buffering in the Tx path.
412 */
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100413 if (unlikely(err) || xskq_prod_reserve(xs->umem->cq)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200414 kfree_skb(skb);
415 goto out;
416 }
417
418 skb->dev = xs->dev;
419 skb->priority = sk->sk_priority;
420 skb->mark = sk->sk_mark;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000421 skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200422 skb->destructor = xsk_destruct_skb;
423
424 err = dev_direct_xmit(skb, xs->queue_id);
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100425 xskq_cons_release(xs->tx);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200426 /* Ignore NET_XMIT_CN as packet might have been sent */
427 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
Magnus Karlssonfe588682018-06-29 09:48:18 +0200428 /* SKB completed but not sent */
429 err = -EBUSY;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200430 goto out;
431 }
432
433 sent_frame = true;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200434 }
435
436out:
437 if (sent_frame)
438 sk->sk_write_space(sk);
439
440 mutex_unlock(&xs->mutex);
441 return err;
442}
443
Magnus Karlssondf551052019-10-02 08:31:59 +0200444static int __xsk_sendmsg(struct sock *sk)
445{
446 struct xdp_sock *xs = xdp_sk(sk);
447
448 if (unlikely(!(xs->dev->flags & IFF_UP)))
449 return -ENETDOWN;
450 if (unlikely(!xs->tx))
451 return -ENOBUFS;
452
453 return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
454}
455
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200456static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
457{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200458 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200459 struct sock *sk = sock->sk;
460 struct xdp_sock *xs = xdp_sk(sk);
461
Björn Töpel42fddcc2019-09-04 13:49:12 +0200462 if (unlikely(!xsk_is_bound(xs)))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200463 return -ENXIO;
Magnus Karlssondf551052019-10-02 08:31:59 +0200464 if (unlikely(need_wait))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200465 return -EOPNOTSUPP;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200466
Magnus Karlssondf551052019-10-02 08:31:59 +0200467 return __xsk_sendmsg(sk);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200468}
469
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100470static __poll_t xsk_poll(struct file *file, struct socket *sock,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700471 struct poll_table_struct *wait)
Björn Töpelc4971762018-05-02 13:01:27 +0200472{
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100473 __poll_t mask = datagram_poll(file, sock, wait);
Magnus Karlssondf551052019-10-02 08:31:59 +0200474 struct sock *sk = sock->sk;
475 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200476 struct xdp_umem *umem;
477
478 if (unlikely(!xsk_is_bound(xs)))
479 return mask;
480
Björn Töpel42fddcc2019-09-04 13:49:12 +0200481 umem = xs->umem;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200482
Magnus Karlssondf551052019-10-02 08:31:59 +0200483 if (umem->need_wakeup) {
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000484 if (xs->zc)
485 xsk_wakeup(xs, umem->need_wakeup);
Magnus Karlssondf551052019-10-02 08:31:59 +0200486 else
487 /* Poll needs to drive Tx also in copy mode */
488 __xsk_sendmsg(sk);
489 }
Björn Töpelc4971762018-05-02 13:01:27 +0200490
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100491 if (xs->rx && !xskq_prod_is_empty(xs->rx))
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100492 mask |= EPOLLIN | EPOLLRDNORM;
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100493 if (xs->tx && !xskq_cons_is_full(xs->tx))
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100494 mask |= EPOLLOUT | EPOLLWRNORM;
Björn Töpelc4971762018-05-02 13:01:27 +0200495
496 return mask;
497}
498
Björn Töpelb9b6b682018-05-02 13:01:25 +0200499static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
500 bool umem_queue)
Magnus Karlsson423f3832018-05-02 13:01:24 +0200501{
502 struct xsk_queue *q;
503
504 if (entries == 0 || *queue || !is_power_of_2(entries))
505 return -EINVAL;
506
Björn Töpelb9b6b682018-05-02 13:01:25 +0200507 q = xskq_create(entries, umem_queue);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200508 if (!q)
509 return -ENOMEM;
510
Björn Töpel37b07692018-05-22 09:35:01 +0200511 /* Make sure queue is ready before it can be seen by others */
512 smp_wmb();
Björn Töpel94a99762019-09-04 13:49:10 +0200513 WRITE_ONCE(*queue, q);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200514 return 0;
515}
516
Ilya Maximets455302d2019-06-28 11:04:07 +0300517static void xsk_unbind_dev(struct xdp_sock *xs)
518{
519 struct net_device *dev = xs->dev;
520
Björn Töpel42fddcc2019-09-04 13:49:12 +0200521 if (xs->state != XSK_BOUND)
Ilya Maximets455302d2019-06-28 11:04:07 +0300522 return;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200523 WRITE_ONCE(xs->state, XSK_UNBOUND);
Ilya Maximets455302d2019-06-28 11:04:07 +0300524
525 /* Wait for driver to stop using the xdp socket. */
526 xdp_del_sk_umem(xs->umem, xs);
527 xs->dev = NULL;
528 synchronize_net();
529 dev_put(dev);
530}
531
Björn Töpel0402acd2019-08-15 11:30:13 +0200532static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
533 struct xdp_sock ***map_entry)
534{
535 struct xsk_map *map = NULL;
536 struct xsk_map_node *node;
537
538 *map_entry = NULL;
539
540 spin_lock_bh(&xs->map_list_lock);
541 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
542 node);
543 if (node) {
544 WARN_ON(xsk_map_inc(node->map));
545 map = node->map;
546 *map_entry = node->map_entry;
547 }
548 spin_unlock_bh(&xs->map_list_lock);
549 return map;
550}
551
552static void xsk_delete_from_maps(struct xdp_sock *xs)
553{
554 /* This function removes the current XDP socket from all the
555 * maps it resides in. We need to take extra care here, due to
556 * the two locks involved. Each map has a lock synchronizing
557 * updates to the entries, and each socket has a lock that
558 * synchronizes access to the list of maps (map_list). For
559 * deadlock avoidance the locks need to be taken in the order
560 * "map lock"->"socket map list lock". We start off by
561 * accessing the socket map list, and take a reference to the
562 * map to guarantee existence between the
563 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
564 * calls. Then we ask the map to remove the socket, which
565 * tries to remove the socket from the map. Note that there
566 * might be updates to the map between
567 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
568 */
569 struct xdp_sock **map_entry = NULL;
570 struct xsk_map *map;
571
572 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
573 xsk_map_try_sock_delete(map, xs, map_entry);
574 xsk_map_put(map);
575 }
576}
577
Björn Töpelc0c77d82018-05-02 13:01:23 +0200578static int xsk_release(struct socket *sock)
579{
580 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200581 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200582 struct net *net;
583
584 if (!sk)
585 return 0;
586
587 net = sock_net(sk);
588
Björn Töpel1d0dc062019-01-24 19:59:37 +0100589 mutex_lock(&net->xdp.lock);
590 sk_del_node_init_rcu(sk);
591 mutex_unlock(&net->xdp.lock);
592
Björn Töpelc0c77d82018-05-02 13:01:23 +0200593 local_bh_disable();
594 sock_prot_inuse_add(net, sk->sk_prot, -1);
595 local_bh_enable();
596
Björn Töpel0402acd2019-08-15 11:30:13 +0200597 xsk_delete_from_maps(xs);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200598 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300599 xsk_unbind_dev(xs);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200600 mutex_unlock(&xs->mutex);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200601
Björn Töpel541d7fd2018-10-05 13:25:15 +0200602 xskq_destroy(xs->rx);
603 xskq_destroy(xs->tx);
604
Björn Töpelc0c77d82018-05-02 13:01:23 +0200605 sock_orphan(sk);
606 sock->sk = NULL;
607
608 sk_refcnt_debug_release(sk);
609 sock_put(sk);
610
611 return 0;
612}
613
Magnus Karlsson965a9902018-05-02 13:01:26 +0200614static struct socket *xsk_lookup_xsk_from_fd(int fd)
615{
616 struct socket *sock;
617 int err;
618
619 sock = sockfd_lookup(fd, &err);
620 if (!sock)
621 return ERR_PTR(-ENOTSOCK);
622
623 if (sock->sk->sk_family != PF_XDP) {
624 sockfd_put(sock);
625 return ERR_PTR(-ENOPROTOOPT);
626 }
627
628 return sock;
629}
630
Kevin Laatzc05cd362019-08-27 02:25:22 +0000631/* Check if umem pages are contiguous.
632 * If zero-copy mode, use the DMA address to do the page contiguity check
633 * For all other modes we use addr (kernel virtual address)
634 * Store the result in the low bits of addr.
635 */
636static void xsk_check_page_contiguity(struct xdp_umem *umem, u32 flags)
637{
638 struct xdp_umem_page *pgs = umem->pages;
639 int i, is_contig;
640
641 for (i = 0; i < umem->npgs - 1; i++) {
642 is_contig = (flags & XDP_ZEROCOPY) ?
643 (pgs[i].dma + PAGE_SIZE == pgs[i + 1].dma) :
644 (pgs[i].addr + PAGE_SIZE == pgs[i + 1].addr);
645 pgs[i].addr += is_contig << XSK_NEXT_PG_CONTIG_SHIFT;
646 }
647}
648
Magnus Karlsson965a9902018-05-02 13:01:26 +0200649static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
650{
651 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
652 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200653 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpel959b71d2018-05-22 09:34:56 +0200654 struct net_device *dev;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200655 u32 flags, qid;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200656 int err = 0;
657
658 if (addr_len < sizeof(struct sockaddr_xdp))
659 return -EINVAL;
660 if (sxdp->sxdp_family != AF_XDP)
661 return -EINVAL;
662
Björn Töpelf54ba392019-03-08 08:57:26 +0100663 flags = sxdp->sxdp_flags;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200664 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
665 XDP_USE_NEED_WAKEUP))
Björn Töpelf54ba392019-03-08 08:57:26 +0100666 return -EINVAL;
667
Ilya Maximets5464c3a2019-07-08 14:03:44 +0300668 rtnl_lock();
Magnus Karlsson965a9902018-05-02 13:01:26 +0200669 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300670 if (xs->state != XSK_READY) {
Björn Töpel959b71d2018-05-22 09:34:56 +0200671 err = -EBUSY;
672 goto out_release;
673 }
674
Magnus Karlsson965a9902018-05-02 13:01:26 +0200675 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
676 if (!dev) {
677 err = -ENODEV;
678 goto out_release;
679 }
680
Magnus Karlssonf6145902018-05-02 13:01:32 +0200681 if (!xs->rx && !xs->tx) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200682 err = -EINVAL;
683 goto out_unlock;
684 }
685
Björn Töpel173d3ad2018-06-04 14:05:55 +0200686 qid = sxdp->sxdp_queue_id;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200687
688 if (flags & XDP_SHARED_UMEM) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200689 struct xdp_sock *umem_xs;
690 struct socket *sock;
691
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200692 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
693 (flags & XDP_USE_NEED_WAKEUP)) {
Björn Töpel173d3ad2018-06-04 14:05:55 +0200694 /* Cannot specify flags for shared sockets. */
695 err = -EINVAL;
696 goto out_unlock;
697 }
698
Magnus Karlsson965a9902018-05-02 13:01:26 +0200699 if (xs->umem) {
700 /* We have already our own. */
701 err = -EINVAL;
702 goto out_unlock;
703 }
704
705 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
706 if (IS_ERR(sock)) {
707 err = PTR_ERR(sock);
708 goto out_unlock;
709 }
710
711 umem_xs = xdp_sk(sock->sk);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200712 if (!xsk_is_bound(umem_xs)) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200713 err = -EBADF;
714 sockfd_put(sock);
715 goto out_unlock;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200716 }
717 if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200718 err = -EINVAL;
719 sockfd_put(sock);
720 goto out_unlock;
721 }
722
723 xdp_get_umem(umem_xs->umem);
Björn Töpel9764f4b2019-09-04 13:49:11 +0200724 WRITE_ONCE(xs->umem, umem_xs->umem);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200725 sockfd_put(sock);
726 } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
727 err = -EINVAL;
728 goto out_unlock;
Björn Töpelc4971762018-05-02 13:01:27 +0200729 } else {
730 /* This xsk has its own umem. */
Magnus Karlsson93ee30f2018-08-31 13:40:02 +0200731 xskq_set_umem(xs->umem->fq, xs->umem->size,
732 xs->umem->chunk_mask);
733 xskq_set_umem(xs->umem->cq, xs->umem->size,
734 xs->umem->chunk_mask);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200735
736 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
737 if (err)
738 goto out_unlock;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000739
740 xsk_check_page_contiguity(xs->umem, flags);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200741 }
742
Magnus Karlsson965a9902018-05-02 13:01:26 +0200743 xs->dev = dev;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200744 xs->zc = xs->umem->zc;
745 xs->queue_id = qid;
Magnus Karlsson93ee30f2018-08-31 13:40:02 +0200746 xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask);
747 xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200748 xdp_add_sk_umem(xs->umem, xs);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200749
750out_unlock:
Björn Töpel42fddcc2019-09-04 13:49:12 +0200751 if (err) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200752 dev_put(dev);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200753 } else {
754 /* Matches smp_rmb() in bind() for shared umem
755 * sockets, and xsk_is_bound().
756 */
757 smp_wmb();
758 WRITE_ONCE(xs->state, XSK_BOUND);
759 }
Magnus Karlsson965a9902018-05-02 13:01:26 +0200760out_release:
761 mutex_unlock(&xs->mutex);
Ilya Maximets5464c3a2019-07-08 14:03:44 +0300762 rtnl_unlock();
Magnus Karlsson965a9902018-05-02 13:01:26 +0200763 return err;
764}
765
Kevin Laatzc05cd362019-08-27 02:25:22 +0000766struct xdp_umem_reg_v1 {
767 __u64 addr; /* Start of packet data area */
768 __u64 len; /* Length of packet data area */
769 __u32 chunk_size;
770 __u32 headroom;
771};
772
Björn Töpelc0c77d82018-05-02 13:01:23 +0200773static int xsk_setsockopt(struct socket *sock, int level, int optname,
774 char __user *optval, unsigned int optlen)
775{
776 struct sock *sk = sock->sk;
777 struct xdp_sock *xs = xdp_sk(sk);
778 int err;
779
780 if (level != SOL_XDP)
781 return -ENOPROTOOPT;
782
783 switch (optname) {
Björn Töpelb9b6b682018-05-02 13:01:25 +0200784 case XDP_RX_RING:
Magnus Karlssonf6145902018-05-02 13:01:32 +0200785 case XDP_TX_RING:
Björn Töpelb9b6b682018-05-02 13:01:25 +0200786 {
787 struct xsk_queue **q;
788 int entries;
789
790 if (optlen < sizeof(entries))
791 return -EINVAL;
792 if (copy_from_user(&entries, optval, sizeof(entries)))
793 return -EFAULT;
794
795 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300796 if (xs->state != XSK_READY) {
797 mutex_unlock(&xs->mutex);
798 return -EBUSY;
799 }
Magnus Karlssonf6145902018-05-02 13:01:32 +0200800 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200801 err = xsk_init_queue(entries, q, false);
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200802 if (!err && optname == XDP_TX_RING)
803 /* Tx needs to be explicitly woken up the first time */
804 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200805 mutex_unlock(&xs->mutex);
806 return err;
807 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200808 case XDP_UMEM_REG:
809 {
Kevin Laatzc05cd362019-08-27 02:25:22 +0000810 size_t mr_size = sizeof(struct xdp_umem_reg);
811 struct xdp_umem_reg mr = {};
Björn Töpelc0c77d82018-05-02 13:01:23 +0200812 struct xdp_umem *umem;
813
Kevin Laatzc05cd362019-08-27 02:25:22 +0000814 if (optlen < sizeof(struct xdp_umem_reg_v1))
815 return -EINVAL;
816 else if (optlen < sizeof(mr))
817 mr_size = sizeof(struct xdp_umem_reg_v1);
818
819 if (copy_from_user(&mr, optval, mr_size))
Björn Töpelc0c77d82018-05-02 13:01:23 +0200820 return -EFAULT;
821
822 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300823 if (xs->state != XSK_READY || xs->umem) {
Björn Töpelc0c77d82018-05-02 13:01:23 +0200824 mutex_unlock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200825 return -EBUSY;
826 }
827
828 umem = xdp_umem_create(&mr);
829 if (IS_ERR(umem)) {
830 mutex_unlock(&xs->mutex);
831 return PTR_ERR(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200832 }
833
834 /* Make sure umem is ready before it can be seen by others */
835 smp_wmb();
Björn Töpel9764f4b2019-09-04 13:49:11 +0200836 WRITE_ONCE(xs->umem, umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200837 mutex_unlock(&xs->mutex);
838 return 0;
839 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200840 case XDP_UMEM_FILL_RING:
Magnus Karlssonfe230832018-05-02 13:01:31 +0200841 case XDP_UMEM_COMPLETION_RING:
Magnus Karlsson423f3832018-05-02 13:01:24 +0200842 {
843 struct xsk_queue **q;
844 int entries;
845
Magnus Karlsson423f3832018-05-02 13:01:24 +0200846 if (copy_from_user(&entries, optval, sizeof(entries)))
847 return -EFAULT;
848
849 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300850 if (xs->state != XSK_READY) {
851 mutex_unlock(&xs->mutex);
852 return -EBUSY;
853 }
Björn Töpela49049e2018-05-22 09:35:02 +0200854 if (!xs->umem) {
855 mutex_unlock(&xs->mutex);
856 return -EINVAL;
857 }
858
Magnus Karlssonfe230832018-05-02 13:01:31 +0200859 q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
860 &xs->umem->cq;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200861 err = xsk_init_queue(entries, q, true);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200862 mutex_unlock(&xs->mutex);
863 return err;
864 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200865 default:
866 break;
867 }
868
869 return -ENOPROTOOPT;
870}
871
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200872static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
873{
874 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
875 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
876 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
877}
878
879static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
880{
881 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
882 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
883 ring->desc = offsetof(struct xdp_umem_ring, desc);
884}
885
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200886static int xsk_getsockopt(struct socket *sock, int level, int optname,
887 char __user *optval, int __user *optlen)
888{
889 struct sock *sk = sock->sk;
890 struct xdp_sock *xs = xdp_sk(sk);
891 int len;
892
893 if (level != SOL_XDP)
894 return -ENOPROTOOPT;
895
896 if (get_user(len, optlen))
897 return -EFAULT;
898 if (len < 0)
899 return -EINVAL;
900
901 switch (optname) {
902 case XDP_STATISTICS:
903 {
904 struct xdp_statistics stats;
905
906 if (len < sizeof(stats))
907 return -EINVAL;
908
909 mutex_lock(&xs->mutex);
910 stats.rx_dropped = xs->rx_dropped;
911 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
912 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
913 mutex_unlock(&xs->mutex);
914
915 if (copy_to_user(optval, &stats, sizeof(stats)))
916 return -EFAULT;
917 if (put_user(sizeof(stats), optlen))
918 return -EFAULT;
919
920 return 0;
921 }
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200922 case XDP_MMAP_OFFSETS:
923 {
924 struct xdp_mmap_offsets off;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200925 struct xdp_mmap_offsets_v1 off_v1;
926 bool flags_supported = true;
927 void *to_copy;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200928
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200929 if (len < sizeof(off_v1))
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200930 return -EINVAL;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200931 else if (len < sizeof(off))
932 flags_supported = false;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200933
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200934 if (flags_supported) {
935 /* xdp_ring_offset is identical to xdp_ring_offset_v1
936 * except for the flags field added to the end.
937 */
938 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
939 &off.rx);
940 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
941 &off.tx);
942 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
943 &off.fr);
944 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
945 &off.cr);
946 off.rx.flags = offsetof(struct xdp_rxtx_ring,
947 ptrs.flags);
948 off.tx.flags = offsetof(struct xdp_rxtx_ring,
949 ptrs.flags);
950 off.fr.flags = offsetof(struct xdp_umem_ring,
951 ptrs.flags);
952 off.cr.flags = offsetof(struct xdp_umem_ring,
953 ptrs.flags);
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200954
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200955 len = sizeof(off);
956 to_copy = &off;
957 } else {
958 xsk_enter_rxtx_offsets(&off_v1.rx);
959 xsk_enter_rxtx_offsets(&off_v1.tx);
960 xsk_enter_umem_offsets(&off_v1.fr);
961 xsk_enter_umem_offsets(&off_v1.cr);
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200962
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200963 len = sizeof(off_v1);
964 to_copy = &off_v1;
965 }
966
967 if (copy_to_user(optval, to_copy, len))
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200968 return -EFAULT;
969 if (put_user(len, optlen))
970 return -EFAULT;
971
972 return 0;
973 }
Maxim Mikityanskiy2640d3c2019-06-26 17:35:25 +0300974 case XDP_OPTIONS:
975 {
976 struct xdp_options opts = {};
977
978 if (len < sizeof(opts))
979 return -EINVAL;
980
981 mutex_lock(&xs->mutex);
982 if (xs->zc)
983 opts.flags |= XDP_OPTIONS_ZEROCOPY;
984 mutex_unlock(&xs->mutex);
985
986 len = sizeof(opts);
987 if (copy_to_user(optval, &opts, len))
988 return -EFAULT;
989 if (put_user(len, optlen))
990 return -EFAULT;
991
992 return 0;
993 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200994 default:
995 break;
996 }
997
998 return -EOPNOTSUPP;
999}
1000
Magnus Karlsson423f3832018-05-02 13:01:24 +02001001static int xsk_mmap(struct file *file, struct socket *sock,
1002 struct vm_area_struct *vma)
1003{
Geert Uytterhoevena5a16e42018-06-07 15:37:34 +02001004 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
Magnus Karlsson423f3832018-05-02 13:01:24 +02001005 unsigned long size = vma->vm_end - vma->vm_start;
1006 struct xdp_sock *xs = xdp_sk(sock->sk);
1007 struct xsk_queue *q = NULL;
Björn Töpel37b07692018-05-22 09:35:01 +02001008 struct xdp_umem *umem;
Magnus Karlsson423f3832018-05-02 13:01:24 +02001009 unsigned long pfn;
1010 struct page *qpg;
1011
Björn Töpel42fddcc2019-09-04 13:49:12 +02001012 if (READ_ONCE(xs->state) != XSK_READY)
Ilya Maximets455302d2019-06-28 11:04:07 +03001013 return -EBUSY;
1014
Björn Töpelb9b6b682018-05-02 13:01:25 +02001015 if (offset == XDP_PGOFF_RX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +02001016 q = READ_ONCE(xs->rx);
Magnus Karlssonf6145902018-05-02 13:01:32 +02001017 } else if (offset == XDP_PGOFF_TX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +02001018 q = READ_ONCE(xs->tx);
Björn Töpelb9b6b682018-05-02 13:01:25 +02001019 } else {
Björn Töpel37b07692018-05-22 09:35:01 +02001020 umem = READ_ONCE(xs->umem);
1021 if (!umem)
Björn Töpelb9b6b682018-05-02 13:01:25 +02001022 return -EINVAL;
Magnus Karlsson423f3832018-05-02 13:01:24 +02001023
Magnus Karlssone6762c82019-02-08 14:13:50 +01001024 /* Matches the smp_wmb() in XDP_UMEM_REG */
1025 smp_rmb();
Björn Töpelb9b6b682018-05-02 13:01:25 +02001026 if (offset == XDP_UMEM_PGOFF_FILL_RING)
Björn Töpel37b07692018-05-22 09:35:01 +02001027 q = READ_ONCE(umem->fq);
Magnus Karlssonfe230832018-05-02 13:01:31 +02001028 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
Björn Töpel37b07692018-05-22 09:35:01 +02001029 q = READ_ONCE(umem->cq);
Björn Töpelb9b6b682018-05-02 13:01:25 +02001030 }
Magnus Karlsson423f3832018-05-02 13:01:24 +02001031
1032 if (!q)
1033 return -EINVAL;
1034
Magnus Karlssone6762c82019-02-08 14:13:50 +01001035 /* Matches the smp_wmb() in xsk_init_queue */
1036 smp_rmb();
Magnus Karlsson423f3832018-05-02 13:01:24 +02001037 qpg = virt_to_head_page(q->ring);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07001038 if (size > page_size(qpg))
Magnus Karlsson423f3832018-05-02 13:01:24 +02001039 return -EINVAL;
1040
1041 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
1042 return remap_pfn_range(vma, vma->vm_start, pfn,
1043 size, vma->vm_page_prot);
1044}
1045
Ilya Maximets455302d2019-06-28 11:04:07 +03001046static int xsk_notifier(struct notifier_block *this,
1047 unsigned long msg, void *ptr)
1048{
1049 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1050 struct net *net = dev_net(dev);
1051 struct sock *sk;
1052
1053 switch (msg) {
1054 case NETDEV_UNREGISTER:
1055 mutex_lock(&net->xdp.lock);
1056 sk_for_each(sk, &net->xdp.list) {
1057 struct xdp_sock *xs = xdp_sk(sk);
1058
1059 mutex_lock(&xs->mutex);
1060 if (xs->dev == dev) {
1061 sk->sk_err = ENETDOWN;
1062 if (!sock_flag(sk, SOCK_DEAD))
1063 sk->sk_error_report(sk);
1064
1065 xsk_unbind_dev(xs);
1066
1067 /* Clear device references in umem. */
1068 xdp_umem_clear_dev(xs->umem);
1069 }
1070 mutex_unlock(&xs->mutex);
1071 }
1072 mutex_unlock(&net->xdp.lock);
1073 break;
1074 }
1075 return NOTIFY_DONE;
1076}
1077
Björn Töpelc0c77d82018-05-02 13:01:23 +02001078static struct proto xsk_proto = {
1079 .name = "XDP",
1080 .owner = THIS_MODULE,
1081 .obj_size = sizeof(struct xdp_sock),
1082};
1083
1084static const struct proto_ops xsk_proto_ops = {
Björn Töpelc2f43742018-05-18 14:00:24 +02001085 .family = PF_XDP,
1086 .owner = THIS_MODULE,
1087 .release = xsk_release,
1088 .bind = xsk_bind,
1089 .connect = sock_no_connect,
1090 .socketpair = sock_no_socketpair,
1091 .accept = sock_no_accept,
1092 .getname = sock_no_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001093 .poll = xsk_poll,
Björn Töpelc2f43742018-05-18 14:00:24 +02001094 .ioctl = sock_no_ioctl,
1095 .listen = sock_no_listen,
1096 .shutdown = sock_no_shutdown,
1097 .setsockopt = xsk_setsockopt,
1098 .getsockopt = xsk_getsockopt,
1099 .sendmsg = xsk_sendmsg,
1100 .recvmsg = sock_no_recvmsg,
1101 .mmap = xsk_mmap,
1102 .sendpage = sock_no_sendpage,
Björn Töpelc0c77d82018-05-02 13:01:23 +02001103};
1104
Björn Töpel11fe9262019-02-21 13:07:38 +01001105static void xsk_destruct(struct sock *sk)
1106{
1107 struct xdp_sock *xs = xdp_sk(sk);
1108
1109 if (!sock_flag(sk, SOCK_DEAD))
1110 return;
1111
1112 xdp_put_umem(xs->umem);
1113
1114 sk_refcnt_debug_dec(sk);
1115}
1116
Björn Töpelc0c77d82018-05-02 13:01:23 +02001117static int xsk_create(struct net *net, struct socket *sock, int protocol,
1118 int kern)
1119{
1120 struct sock *sk;
1121 struct xdp_sock *xs;
1122
1123 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1124 return -EPERM;
1125 if (sock->type != SOCK_RAW)
1126 return -ESOCKTNOSUPPORT;
1127
1128 if (protocol)
1129 return -EPROTONOSUPPORT;
1130
1131 sock->state = SS_UNCONNECTED;
1132
1133 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1134 if (!sk)
1135 return -ENOBUFS;
1136
1137 sock->ops = &xsk_proto_ops;
1138
1139 sock_init_data(sock, sk);
1140
1141 sk->sk_family = PF_XDP;
1142
Björn Töpel11fe9262019-02-21 13:07:38 +01001143 sk->sk_destruct = xsk_destruct;
1144 sk_refcnt_debug_inc(sk);
1145
Björn Töpelcee27162018-10-08 19:40:16 +02001146 sock_set_flag(sk, SOCK_RCU_FREE);
1147
Björn Töpelc0c77d82018-05-02 13:01:23 +02001148 xs = xdp_sk(sk);
Ilya Maximets455302d2019-06-28 11:04:07 +03001149 xs->state = XSK_READY;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001150 mutex_init(&xs->mutex);
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +03001151 spin_lock_init(&xs->rx_lock);
Magnus Karlssona9744f72018-06-29 09:48:20 +02001152 spin_lock_init(&xs->tx_completion_lock);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001153
Björn Töpel0402acd2019-08-15 11:30:13 +02001154 INIT_LIST_HEAD(&xs->map_list);
1155 spin_lock_init(&xs->map_list_lock);
1156
Björn Töpel1d0dc062019-01-24 19:59:37 +01001157 mutex_lock(&net->xdp.lock);
1158 sk_add_node_rcu(sk, &net->xdp.list);
1159 mutex_unlock(&net->xdp.lock);
1160
Björn Töpelc0c77d82018-05-02 13:01:23 +02001161 local_bh_disable();
1162 sock_prot_inuse_add(net, &xsk_proto, 1);
1163 local_bh_enable();
1164
1165 return 0;
1166}
1167
1168static const struct net_proto_family xsk_family_ops = {
1169 .family = PF_XDP,
1170 .create = xsk_create,
1171 .owner = THIS_MODULE,
1172};
1173
Ilya Maximets455302d2019-06-28 11:04:07 +03001174static struct notifier_block xsk_netdev_notifier = {
1175 .notifier_call = xsk_notifier,
1176};
1177
Björn Töpel1d0dc062019-01-24 19:59:37 +01001178static int __net_init xsk_net_init(struct net *net)
1179{
1180 mutex_init(&net->xdp.lock);
1181 INIT_HLIST_HEAD(&net->xdp.list);
1182 return 0;
1183}
1184
1185static void __net_exit xsk_net_exit(struct net *net)
1186{
1187 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1188}
1189
1190static struct pernet_operations xsk_net_ops = {
1191 .init = xsk_net_init,
1192 .exit = xsk_net_exit,
1193};
1194
Björn Töpelc0c77d82018-05-02 13:01:23 +02001195static int __init xsk_init(void)
1196{
Björn Töpele312b9e2019-12-19 07:10:02 +01001197 int err, cpu;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001198
1199 err = proto_register(&xsk_proto, 0 /* no slab */);
1200 if (err)
1201 goto out;
1202
1203 err = sock_register(&xsk_family_ops);
1204 if (err)
1205 goto out_proto;
1206
Björn Töpel1d0dc062019-01-24 19:59:37 +01001207 err = register_pernet_subsys(&xsk_net_ops);
1208 if (err)
1209 goto out_sk;
Ilya Maximets455302d2019-06-28 11:04:07 +03001210
1211 err = register_netdevice_notifier(&xsk_netdev_notifier);
1212 if (err)
1213 goto out_pernet;
1214
Björn Töpele312b9e2019-12-19 07:10:02 +01001215 for_each_possible_cpu(cpu)
1216 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
Björn Töpelc0c77d82018-05-02 13:01:23 +02001217 return 0;
1218
Ilya Maximets455302d2019-06-28 11:04:07 +03001219out_pernet:
1220 unregister_pernet_subsys(&xsk_net_ops);
Björn Töpel1d0dc062019-01-24 19:59:37 +01001221out_sk:
1222 sock_unregister(PF_XDP);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001223out_proto:
1224 proto_unregister(&xsk_proto);
1225out:
1226 return err;
1227}
1228
1229fs_initcall(xsk_init);