blob: 3a8c9ab4ecbe350ad14965473f33b97819dff08a [file] [log] [blame]
Thomas Gleixnerddc64d02019-05-31 01:09:24 -07001// SPDX-License-Identifier: GPL-2.0-only
Jesper Dangaard Broueraecd67b2018-01-03 11:25:13 +01002/* net/core/xdp.c
3 *
4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
Jesper Dangaard Broueraecd67b2018-01-03 11:25:13 +01005 */
Jakub Kicinski05296622018-07-11 20:36:40 -07006#include <linux/bpf.h>
7#include <linux/filter.h>
Jesper Dangaard Broueraecd67b2018-01-03 11:25:13 +01008#include <linux/types.h>
9#include <linux/mm.h>
Jakub Kicinski05296622018-07-11 20:36:40 -070010#include <linux/netdevice.h>
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +020011#include <linux/slab.h>
12#include <linux/idr.h>
13#include <linux/rhashtable.h>
Jesper Dangaard Brouer34cc0b32020-05-14 12:49:33 +020014#include <linux/bug.h>
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +020015#include <net/page_pool.h>
Jesper Dangaard Broueraecd67b2018-01-03 11:25:13 +010016
17#include <net/xdp.h>
Jesper Dangaard Brouerf033b682019-06-18 15:05:58 +020018#include <net/xdp_priv.h> /* struct xdp_mem_allocator */
19#include <trace/events/xdp.h>
Björn Töpel2b434702020-05-20 21:20:53 +020020#include <net/xdp_sock_drv.h>
Jesper Dangaard Broueraecd67b2018-01-03 11:25:13 +010021
22#define REG_STATE_NEW 0x0
23#define REG_STATE_REGISTERED 0x1
24#define REG_STATE_UNREGISTERED 0x2
25#define REG_STATE_UNUSED 0x3
26
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +020027static DEFINE_IDA(mem_id_pool);
28static DEFINE_MUTEX(mem_id_lock);
29#define MEM_ID_MAX 0xFFFE
30#define MEM_ID_MIN 1
31static int mem_id_next = MEM_ID_MIN;
32
33static bool mem_id_init; /* false */
34static struct rhashtable *mem_id_ht;
35
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +020036static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
37{
38 const u32 *k = data;
39 const u32 key = *k;
40
Pankaj Bharadiyac5936422019-12-09 10:31:43 -080041 BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id)
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +020042 != sizeof(u32));
43
NeilBrown9f9a7072018-06-18 12:52:50 +100044 /* Use cyclic increasing ID as direct hash key */
45 return key;
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +020046}
47
48static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
49 const void *ptr)
50{
51 const struct xdp_mem_allocator *xa = ptr;
52 u32 mem_id = *(u32 *)arg->key;
53
54 return xa->mem.id != mem_id;
55}
56
57static const struct rhashtable_params mem_id_rht_params = {
58 .nelem_hint = 64,
59 .head_offset = offsetof(struct xdp_mem_allocator, node),
60 .key_offset = offsetof(struct xdp_mem_allocator, mem.id),
Pankaj Bharadiyac5936422019-12-09 10:31:43 -080061 .key_len = sizeof_field(struct xdp_mem_allocator, mem.id),
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +020062 .max_size = MEM_ID_MAX,
63 .min_size = 8,
64 .automatic_shrinking = true,
65 .hashfn = xdp_mem_id_hashfn,
66 .obj_cmpfn = xdp_mem_id_cmp,
67};
68
69static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
70{
71 struct xdp_mem_allocator *xa;
72
73 xa = container_of(rcu, struct xdp_mem_allocator, rcu);
74
75 /* Allow this ID to be reused */
76 ida_simple_remove(&mem_id_pool, xa->mem.id);
77
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +020078 kfree(xa);
79}
80
Jonathan Lemonc3f812c2019-11-14 14:13:00 -080081static void mem_xa_remove(struct xdp_mem_allocator *xa)
82{
83 trace_mem_disconnect(xa);
84
Jonathan Lemonc3f812c2019-11-14 14:13:00 -080085 if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
86 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
Jonathan Lemonc3f812c2019-11-14 14:13:00 -080087}
88
89static void mem_allocator_disconnect(void *allocator)
Jesper Dangaard Brouer99c07c42019-06-18 15:05:47 +020090{
91 struct xdp_mem_allocator *xa;
Jonathan Lemonc3f812c2019-11-14 14:13:00 -080092 struct rhashtable_iter iter;
93
Jonathan Lemon86c76c02019-12-03 14:01:14 -080094 mutex_lock(&mem_id_lock);
95
Jonathan Lemonc3f812c2019-11-14 14:13:00 -080096 rhashtable_walk_enter(mem_id_ht, &iter);
97 do {
98 rhashtable_walk_start(&iter);
99
100 while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
101 if (xa->allocator == allocator)
102 mem_xa_remove(xa);
103 }
104
105 rhashtable_walk_stop(&iter);
106
107 } while (xa == ERR_PTR(-EAGAIN));
108 rhashtable_walk_exit(&iter);
Jonathan Lemon86c76c02019-12-03 14:01:14 -0800109
110 mutex_unlock(&mem_id_lock);
Jonathan Lemonc3f812c2019-11-14 14:13:00 -0800111}
112
Björn Töpeldce5bd62018-08-28 14:44:26 +0200113void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200114{
115 struct xdp_mem_allocator *xa;
116 int id = xdp_rxq->mem.id;
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200117
Björn Töpeldce5bd62018-08-28 14:44:26 +0200118 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
119 WARN(1, "Missing register, driver bug");
120 return;
121 }
122
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200123 if (id == 0)
124 return;
125
Jonathan Lemonc3f812c2019-11-14 14:13:00 -0800126 if (xdp_rxq->mem.type == MEM_TYPE_PAGE_POOL) {
127 rcu_read_lock();
128 xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
129 page_pool_destroy(xa->page_pool);
130 rcu_read_unlock();
Jesper Dangaard Brouer99c07c42019-06-18 15:05:47 +0200131 }
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200132}
Björn Töpeldce5bd62018-08-28 14:44:26 +0200133EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200134
Jesper Dangaard Broueraecd67b2018-01-03 11:25:13 +0100135void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
136{
137 /* Simplify driver cleanup code paths, allow unreg "unused" */
138 if (xdp_rxq->reg_state == REG_STATE_UNUSED)
139 return;
140
141 WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");
142
Björn Töpeldce5bd62018-08-28 14:44:26 +0200143 xdp_rxq_info_unreg_mem_model(xdp_rxq);
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200144
Jesper Dangaard Broueraecd67b2018-01-03 11:25:13 +0100145 xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
146 xdp_rxq->dev = NULL;
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200147
148 /* Reset mem info to defaults */
149 xdp_rxq->mem.id = 0;
150 xdp_rxq->mem.type = 0;
Jesper Dangaard Broueraecd67b2018-01-03 11:25:13 +0100151}
152EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
153
154static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
155{
156 memset(xdp_rxq, 0, sizeof(*xdp_rxq));
157}
158
159/* Returns 0 on success, negative on failure */
160int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
Björn Töpelb02e5a02020-11-30 19:52:01 +0100161 struct net_device *dev, u32 queue_index, unsigned int napi_id)
Jesper Dangaard Broueraecd67b2018-01-03 11:25:13 +0100162{
163 if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
164 WARN(1, "Driver promised not to register this");
165 return -EINVAL;
166 }
167
168 if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
169 WARN(1, "Missing unregister, handled but fix driver");
170 xdp_rxq_info_unreg(xdp_rxq);
171 }
172
173 if (!dev) {
174 WARN(1, "Missing net_device from driver");
175 return -ENODEV;
176 }
177
178 /* State either UNREGISTERED or NEW */
179 xdp_rxq_info_init(xdp_rxq);
180 xdp_rxq->dev = dev;
181 xdp_rxq->queue_index = queue_index;
Björn Töpelb02e5a02020-11-30 19:52:01 +0100182 xdp_rxq->napi_id = napi_id;
Jesper Dangaard Broueraecd67b2018-01-03 11:25:13 +0100183
184 xdp_rxq->reg_state = REG_STATE_REGISTERED;
185 return 0;
186}
187EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);
188
189void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
190{
191 xdp_rxq->reg_state = REG_STATE_UNUSED;
192}
193EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
Jesper Dangaard Brouerc0124f32018-01-03 11:25:34 +0100194
195bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
196{
197 return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
198}
199EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
Jesper Dangaard Brouer5ab073f2018-04-17 16:45:26 +0200200
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200201static int __mem_id_init_hash_table(void)
202{
203 struct rhashtable *rht;
204 int ret;
205
206 if (unlikely(mem_id_init))
207 return 0;
208
209 rht = kzalloc(sizeof(*rht), GFP_KERNEL);
210 if (!rht)
211 return -ENOMEM;
212
213 ret = rhashtable_init(rht, &mem_id_rht_params);
214 if (ret < 0) {
215 kfree(rht);
216 return ret;
217 }
218 mem_id_ht = rht;
219 smp_mb(); /* mutex lock should provide enough pairing */
220 mem_id_init = true;
221
222 return 0;
223}
224
225/* Allocate a cyclic ID that maps to allocator pointer.
226 * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
227 *
228 * Caller must lock mem_id_lock.
229 */
230static int __mem_id_cyclic_get(gfp_t gfp)
231{
232 int retries = 1;
233 int id;
234
235again:
236 id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
237 if (id < 0) {
238 if (id == -ENOSPC) {
239 /* Cyclic allocator, reset next id */
240 if (retries--) {
241 mem_id_next = MEM_ID_MIN;
242 goto again;
243 }
244 }
245 return id; /* errno */
246 }
247 mem_id_next = id + 1;
248
249 return id;
250}
251
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +0200252static bool __is_supported_mem_type(enum xdp_mem_type type)
253{
254 if (type == MEM_TYPE_PAGE_POOL)
255 return is_page_pool_compiled_in();
256
257 if (type >= MEM_TYPE_MAX)
258 return false;
259
260 return true;
261}
262
Jesper Dangaard Brouer5ab073f2018-04-17 16:45:26 +0200263int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
264 enum xdp_mem_type type, void *allocator)
265{
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200266 struct xdp_mem_allocator *xdp_alloc;
267 gfp_t gfp = GFP_KERNEL;
268 int id, errno, ret;
269 void *ptr;
270
271 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
272 WARN(1, "Missing register, driver bug");
273 return -EFAULT;
274 }
275
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +0200276 if (!__is_supported_mem_type(type))
277 return -EOPNOTSUPP;
Jesper Dangaard Brouer5ab073f2018-04-17 16:45:26 +0200278
279 xdp_rxq->mem.type = type;
280
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +0200281 if (!allocator) {
Björn Töpel08078922020-05-20 21:21:00 +0200282 if (type == MEM_TYPE_PAGE_POOL)
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +0200283 return -EINVAL; /* Setup time check page_pool req */
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200284 return 0;
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +0200285 }
Jesper Dangaard Brouer5ab073f2018-04-17 16:45:26 +0200286
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200287 /* Delay init of rhashtable to save memory if feature isn't used */
288 if (!mem_id_init) {
289 mutex_lock(&mem_id_lock);
290 ret = __mem_id_init_hash_table();
291 mutex_unlock(&mem_id_lock);
292 if (ret < 0) {
293 WARN_ON(1);
294 return ret;
295 }
296 }
297
298 xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
299 if (!xdp_alloc)
300 return -ENOMEM;
301
302 mutex_lock(&mem_id_lock);
303 id = __mem_id_cyclic_get(gfp);
304 if (id < 0) {
305 errno = id;
306 goto err;
307 }
308 xdp_rxq->mem.id = id;
309 xdp_alloc->mem = xdp_rxq->mem;
310 xdp_alloc->allocator = allocator;
311
312 /* Insert allocator into ID lookup table */
313 ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
314 if (IS_ERR(ptr)) {
Jesper Dangaard Brouer516a7592019-06-18 15:05:22 +0200315 ida_simple_remove(&mem_id_pool, xdp_rxq->mem.id);
316 xdp_rxq->mem.id = 0;
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200317 errno = PTR_ERR(ptr);
318 goto err;
319 }
320
Ivan Khoronzhuk1da4bbe2019-07-09 00:34:28 +0300321 if (type == MEM_TYPE_PAGE_POOL)
Jonathan Lemonc3f812c2019-11-14 14:13:00 -0800322 page_pool_use_xdp_mem(allocator, mem_allocator_disconnect);
Ivan Khoronzhuk1da4bbe2019-07-09 00:34:28 +0300323
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200324 mutex_unlock(&mem_id_lock);
325
Jesper Dangaard Brouerf033b682019-06-18 15:05:58 +0200326 trace_mem_connect(xdp_alloc, xdp_rxq);
Jesper Dangaard Brouer5ab073f2018-04-17 16:45:26 +0200327 return 0;
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200328err:
329 mutex_unlock(&mem_id_lock);
330 kfree(xdp_alloc);
331 return errno;
Jesper Dangaard Brouer5ab073f2018-04-17 16:45:26 +0200332}
333EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200334
Jesper Dangaard Brouer389ab7f2018-05-24 16:46:07 +0200335/* XDP RX runs under NAPI protection, and in different delivery error
336 * scenarios (e.g. queue full), it is possible to return the xdp_frame
Anton Ivanovbaead852019-10-11 09:43:03 +0100337 * while still leveraging this protection. The @napi_direct boolean
Jesper Dangaard Brouer389ab7f2018-05-24 16:46:07 +0200338 * is used for those calls sites. Thus, allowing for faster recycling
Björn Töpeled1182d2020-11-27 18:17:26 +0100339 * of xdp_frames/pages in those cases.
Jesper Dangaard Brouer389ab7f2018-05-24 16:46:07 +0200340 */
Björn Töpeled1182d2020-11-27 18:17:26 +0100341static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
342 struct xdp_buff *xdp)
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200343{
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +0200344 struct xdp_mem_allocator *xa;
345 struct page *page;
346
347 switch (mem->type) {
348 case MEM_TYPE_PAGE_POOL:
349 rcu_read_lock();
350 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
351 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
352 page = virt_to_head_page(data);
Jonathan Lemonc3f812c2019-11-14 14:13:00 -0800353 napi_direct &= !xdp_return_frame_no_direct();
Ilias Apalodimas458de8a2020-02-20 09:41:55 +0200354 page_pool_put_full_page(xa->page_pool, page, napi_direct);
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +0200355 rcu_read_unlock();
356 break;
357 case MEM_TYPE_PAGE_SHARED:
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200358 page_frag_free(data);
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +0200359 break;
360 case MEM_TYPE_PAGE_ORDER0:
361 page = virt_to_page(data); /* Assumes order0 page*/
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200362 put_page(page);
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +0200363 break;
Björn Töpeled1182d2020-11-27 18:17:26 +0100364 case MEM_TYPE_XSK_BUFF_POOL:
365 /* NB! Only valid from an xdp_buff! */
366 xsk_buff_free(xdp);
367 break;
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +0200368 default:
369 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
Björn Töpel82c41672020-05-20 21:21:01 +0200370 WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
Jesper Dangaard Brouer57d0a1c2018-04-17 16:46:22 +0200371 break;
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200372 }
373}
Björn Töpelc4971762018-05-02 13:01:27 +0200374
375void xdp_return_frame(struct xdp_frame *xdpf)
376{
Björn Töpeled1182d2020-11-27 18:17:26 +0100377 __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
Björn Töpelc4971762018-05-02 13:01:27 +0200378}
Jesper Dangaard Brouer8d5d8852018-04-17 16:46:12 +0200379EXPORT_SYMBOL_GPL(xdp_return_frame);
Björn Töpelc4971762018-05-02 13:01:27 +0200380
Jesper Dangaard Brouer389ab7f2018-05-24 16:46:07 +0200381void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
382{
Björn Töpeled1182d2020-11-27 18:17:26 +0100383 __xdp_return(xdpf->data, &xdpf->mem, true, NULL);
Jesper Dangaard Brouer389ab7f2018-05-24 16:46:07 +0200384}
385EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
386
Lorenzo Bianconi89653982020-11-13 12:48:28 +0100387/* XDP bulk APIs introduce a defer/flush mechanism to return
388 * pages belonging to the same xdp_mem_allocator object
389 * (identified via the mem.id field) in bulk to optimize
390 * I-cache and D-cache.
391 * The bulk queue size is set to 16 to be aligned to how
392 * XDP_REDIRECT bulking works. The bulk is flushed when
393 * it is full or when mem.id changes.
394 * xdp_frame_bulk is usually stored/allocated on the function
395 * call-stack to avoid locking penalties.
396 */
397void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
398{
399 struct xdp_mem_allocator *xa = bq->xa;
Lorenzo Bianconi89653982020-11-13 12:48:28 +0100400
Lorenzo Bianconi78862442020-11-13 12:48:29 +0100401 if (unlikely(!xa || !bq->count))
Lorenzo Bianconi89653982020-11-13 12:48:28 +0100402 return;
403
Lorenzo Bianconi78862442020-11-13 12:48:29 +0100404 page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count);
Lorenzo Bianconi89653982020-11-13 12:48:28 +0100405 /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */
406 bq->count = 0;
407}
408EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
409
410/* Must be called with rcu_read_lock held */
411void xdp_return_frame_bulk(struct xdp_frame *xdpf,
412 struct xdp_frame_bulk *bq)
413{
414 struct xdp_mem_info *mem = &xdpf->mem;
415 struct xdp_mem_allocator *xa;
416
417 if (mem->type != MEM_TYPE_PAGE_POOL) {
Jakub Kicinski46d5e622020-12-11 20:12:36 -0800418 __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
Lorenzo Bianconi89653982020-11-13 12:48:28 +0100419 return;
420 }
421
422 xa = bq->xa;
423 if (unlikely(!xa)) {
424 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
425 bq->count = 0;
426 bq->xa = xa;
427 }
428
429 if (bq->count == XDP_BULK_QUEUE_SIZE)
430 xdp_flush_frame_bulk(bq);
431
432 if (unlikely(mem->id != xa->mem.id)) {
433 xdp_flush_frame_bulk(bq);
434 bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
435 }
436
437 bq->q[bq->count++] = xdpf->data;
438}
439EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
440
Björn Töpelc4971762018-05-02 13:01:27 +0200441void xdp_return_buff(struct xdp_buff *xdp)
442{
Björn Töpeled1182d2020-11-27 18:17:26 +0100443 __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
Björn Töpelc4971762018-05-02 13:01:27 +0200444}
Jakub Kicinski05296622018-07-11 20:36:40 -0700445
Jesper Dangaard Brouer6bf071b2019-06-18 15:05:27 +0200446/* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
447void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
448{
449 struct xdp_mem_allocator *xa;
450 struct page *page;
451
452 rcu_read_lock();
453 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
454 page = virt_to_head_page(data);
455 if (xa)
456 page_pool_release_page(xa->page_pool, page);
457 rcu_read_unlock();
458}
459EXPORT_SYMBOL_GPL(__xdp_release_frame);
460
Jakub Kicinski05296622018-07-11 20:36:40 -0700461void xdp_attachment_setup(struct xdp_attachment_info *info,
462 struct netdev_bpf *bpf)
463{
464 if (info->prog)
465 bpf_prog_put(info->prog);
466 info->prog = bpf->prog;
467 info->flags = bpf->flags;
468}
469EXPORT_SYMBOL_GPL(xdp_attachment_setup);
Björn Töpelb0d1bee2018-08-28 14:44:25 +0200470
471struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
472{
Colin Ian King72962162018-08-30 15:27:18 +0100473 unsigned int metasize, totsize;
Björn Töpelb0d1bee2018-08-28 14:44:25 +0200474 void *addr, *data_to_copy;
475 struct xdp_frame *xdpf;
476 struct page *page;
477
478 /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
479 metasize = xdp_data_meta_unsupported(xdp) ? 0 :
480 xdp->data - xdp->data_meta;
Björn Töpelb0d1bee2018-08-28 14:44:25 +0200481 totsize = xdp->data_end - xdp->data + metasize;
482
483 if (sizeof(*xdpf) + totsize > PAGE_SIZE)
484 return NULL;
485
486 page = dev_alloc_page();
487 if (!page)
488 return NULL;
489
490 addr = page_to_virt(page);
491 xdpf = addr;
492 memset(xdpf, 0, sizeof(*xdpf));
493
494 addr += sizeof(*xdpf);
495 data_to_copy = metasize ? xdp->data_meta : xdp->data;
496 memcpy(addr, data_to_copy, totsize);
497
498 xdpf->data = addr + metasize;
499 xdpf->len = totsize - metasize;
500 xdpf->headroom = 0;
501 xdpf->metasize = metasize;
Hangbin Liu3ff23512020-06-16 18:35:18 +0800502 xdpf->frame_sz = PAGE_SIZE;
Björn Töpelb0d1bee2018-08-28 14:44:25 +0200503 xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
504
Björn Töpel82c41672020-05-20 21:21:01 +0200505 xsk_buff_free(xdp);
Björn Töpelb0d1bee2018-08-28 14:44:25 +0200506 return xdpf;
507}
508EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
Jesper Dangaard Brouer34cc0b32020-05-14 12:49:33 +0200509
510/* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */
511void xdp_warn(const char *msg, const char *func, const int line)
512{
513 WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg);
514};
515EXPORT_SYMBOL_GPL(xdp_warn);