Thomas Gleixner | ddc64d0 | 2019-05-31 01:09:24 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 2 | /* net/core/xdp.c |
| 3 | * |
| 4 | * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 5 | */ |
Jakub Kicinski | 0529662 | 2018-07-11 20:36:40 -0700 | [diff] [blame] | 6 | #include <linux/bpf.h> |
| 7 | #include <linux/filter.h> |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 8 | #include <linux/types.h> |
| 9 | #include <linux/mm.h> |
Jakub Kicinski | 0529662 | 2018-07-11 20:36:40 -0700 | [diff] [blame] | 10 | #include <linux/netdevice.h> |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 11 | #include <linux/slab.h> |
| 12 | #include <linux/idr.h> |
| 13 | #include <linux/rhashtable.h> |
Jesper Dangaard Brouer | 34cc0b3 | 2020-05-14 12:49:33 +0200 | [diff] [blame] | 14 | #include <linux/bug.h> |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 15 | #include <net/page_pool.h> |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 16 | |
| 17 | #include <net/xdp.h> |
Jesper Dangaard Brouer | f033b68 | 2019-06-18 15:05:58 +0200 | [diff] [blame] | 18 | #include <net/xdp_priv.h> /* struct xdp_mem_allocator */ |
| 19 | #include <trace/events/xdp.h> |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 20 | #include <net/xdp_sock_drv.h> |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 21 | |
| 22 | #define REG_STATE_NEW 0x0 |
| 23 | #define REG_STATE_REGISTERED 0x1 |
| 24 | #define REG_STATE_UNREGISTERED 0x2 |
| 25 | #define REG_STATE_UNUSED 0x3 |
| 26 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 27 | static DEFINE_IDA(mem_id_pool); |
| 28 | static DEFINE_MUTEX(mem_id_lock); |
| 29 | #define MEM_ID_MAX 0xFFFE |
| 30 | #define MEM_ID_MIN 1 |
| 31 | static int mem_id_next = MEM_ID_MIN; |
| 32 | |
| 33 | static bool mem_id_init; /* false */ |
| 34 | static struct rhashtable *mem_id_ht; |
| 35 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 36 | static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) |
| 37 | { |
| 38 | const u32 *k = data; |
| 39 | const u32 key = *k; |
| 40 | |
Pankaj Bharadiya | c593642 | 2019-12-09 10:31:43 -0800 | [diff] [blame] | 41 | BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id) |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 42 | != sizeof(u32)); |
| 43 | |
NeilBrown | 9f9a707 | 2018-06-18 12:52:50 +1000 | [diff] [blame] | 44 | /* Use cyclic increasing ID as direct hash key */ |
| 45 | return key; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 46 | } |
| 47 | |
| 48 | static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg, |
| 49 | const void *ptr) |
| 50 | { |
| 51 | const struct xdp_mem_allocator *xa = ptr; |
| 52 | u32 mem_id = *(u32 *)arg->key; |
| 53 | |
| 54 | return xa->mem.id != mem_id; |
| 55 | } |
| 56 | |
| 57 | static const struct rhashtable_params mem_id_rht_params = { |
| 58 | .nelem_hint = 64, |
| 59 | .head_offset = offsetof(struct xdp_mem_allocator, node), |
| 60 | .key_offset = offsetof(struct xdp_mem_allocator, mem.id), |
Pankaj Bharadiya | c593642 | 2019-12-09 10:31:43 -0800 | [diff] [blame] | 61 | .key_len = sizeof_field(struct xdp_mem_allocator, mem.id), |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 62 | .max_size = MEM_ID_MAX, |
| 63 | .min_size = 8, |
| 64 | .automatic_shrinking = true, |
| 65 | .hashfn = xdp_mem_id_hashfn, |
| 66 | .obj_cmpfn = xdp_mem_id_cmp, |
| 67 | }; |
| 68 | |
| 69 | static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) |
| 70 | { |
| 71 | struct xdp_mem_allocator *xa; |
| 72 | |
| 73 | xa = container_of(rcu, struct xdp_mem_allocator, rcu); |
| 74 | |
| 75 | /* Allow this ID to be reused */ |
| 76 | ida_simple_remove(&mem_id_pool, xa->mem.id); |
| 77 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 78 | kfree(xa); |
| 79 | } |
| 80 | |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 81 | static void mem_xa_remove(struct xdp_mem_allocator *xa) |
| 82 | { |
| 83 | trace_mem_disconnect(xa); |
| 84 | |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 85 | if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) |
| 86 | call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 87 | } |
| 88 | |
| 89 | static void mem_allocator_disconnect(void *allocator) |
Jesper Dangaard Brouer | 99c07c4 | 2019-06-18 15:05:47 +0200 | [diff] [blame] | 90 | { |
| 91 | struct xdp_mem_allocator *xa; |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 92 | struct rhashtable_iter iter; |
| 93 | |
Jonathan Lemon | 86c76c0 | 2019-12-03 14:01:14 -0800 | [diff] [blame] | 94 | mutex_lock(&mem_id_lock); |
| 95 | |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 96 | rhashtable_walk_enter(mem_id_ht, &iter); |
| 97 | do { |
| 98 | rhashtable_walk_start(&iter); |
| 99 | |
| 100 | while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) { |
| 101 | if (xa->allocator == allocator) |
| 102 | mem_xa_remove(xa); |
| 103 | } |
| 104 | |
| 105 | rhashtable_walk_stop(&iter); |
| 106 | |
| 107 | } while (xa == ERR_PTR(-EAGAIN)); |
| 108 | rhashtable_walk_exit(&iter); |
Jonathan Lemon | 86c76c0 | 2019-12-03 14:01:14 -0800 | [diff] [blame] | 109 | |
| 110 | mutex_unlock(&mem_id_lock); |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 111 | } |
| 112 | |
Björn Töpel | dce5bd6 | 2018-08-28 14:44:26 +0200 | [diff] [blame] | 113 | void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 114 | { |
| 115 | struct xdp_mem_allocator *xa; |
| 116 | int id = xdp_rxq->mem.id; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 117 | |
Björn Töpel | dce5bd6 | 2018-08-28 14:44:26 +0200 | [diff] [blame] | 118 | if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { |
| 119 | WARN(1, "Missing register, driver bug"); |
| 120 | return; |
| 121 | } |
| 122 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 123 | if (id == 0) |
| 124 | return; |
| 125 | |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 126 | if (xdp_rxq->mem.type == MEM_TYPE_PAGE_POOL) { |
| 127 | rcu_read_lock(); |
| 128 | xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params); |
| 129 | page_pool_destroy(xa->page_pool); |
| 130 | rcu_read_unlock(); |
Jesper Dangaard Brouer | 99c07c4 | 2019-06-18 15:05:47 +0200 | [diff] [blame] | 131 | } |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 132 | } |
Björn Töpel | dce5bd6 | 2018-08-28 14:44:26 +0200 | [diff] [blame] | 133 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model); |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 134 | |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 135 | void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) |
| 136 | { |
| 137 | /* Simplify driver cleanup code paths, allow unreg "unused" */ |
| 138 | if (xdp_rxq->reg_state == REG_STATE_UNUSED) |
| 139 | return; |
| 140 | |
| 141 | WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG"); |
| 142 | |
Björn Töpel | dce5bd6 | 2018-08-28 14:44:26 +0200 | [diff] [blame] | 143 | xdp_rxq_info_unreg_mem_model(xdp_rxq); |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 144 | |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 145 | xdp_rxq->reg_state = REG_STATE_UNREGISTERED; |
| 146 | xdp_rxq->dev = NULL; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 147 | |
| 148 | /* Reset mem info to defaults */ |
| 149 | xdp_rxq->mem.id = 0; |
| 150 | xdp_rxq->mem.type = 0; |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 151 | } |
| 152 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg); |
| 153 | |
| 154 | static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq) |
| 155 | { |
| 156 | memset(xdp_rxq, 0, sizeof(*xdp_rxq)); |
| 157 | } |
| 158 | |
| 159 | /* Returns 0 on success, negative on failure */ |
| 160 | int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, |
Björn Töpel | b02e5a0 | 2020-11-30 19:52:01 +0100 | [diff] [blame] | 161 | struct net_device *dev, u32 queue_index, unsigned int napi_id) |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 162 | { |
| 163 | if (xdp_rxq->reg_state == REG_STATE_UNUSED) { |
| 164 | WARN(1, "Driver promised not to register this"); |
| 165 | return -EINVAL; |
| 166 | } |
| 167 | |
| 168 | if (xdp_rxq->reg_state == REG_STATE_REGISTERED) { |
| 169 | WARN(1, "Missing unregister, handled but fix driver"); |
| 170 | xdp_rxq_info_unreg(xdp_rxq); |
| 171 | } |
| 172 | |
| 173 | if (!dev) { |
| 174 | WARN(1, "Missing net_device from driver"); |
| 175 | return -ENODEV; |
| 176 | } |
| 177 | |
| 178 | /* State either UNREGISTERED or NEW */ |
| 179 | xdp_rxq_info_init(xdp_rxq); |
| 180 | xdp_rxq->dev = dev; |
| 181 | xdp_rxq->queue_index = queue_index; |
Björn Töpel | b02e5a0 | 2020-11-30 19:52:01 +0100 | [diff] [blame] | 182 | xdp_rxq->napi_id = napi_id; |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 183 | |
| 184 | xdp_rxq->reg_state = REG_STATE_REGISTERED; |
| 185 | return 0; |
| 186 | } |
| 187 | EXPORT_SYMBOL_GPL(xdp_rxq_info_reg); |
| 188 | |
| 189 | void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq) |
| 190 | { |
| 191 | xdp_rxq->reg_state = REG_STATE_UNUSED; |
| 192 | } |
| 193 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unused); |
Jesper Dangaard Brouer | c0124f3 | 2018-01-03 11:25:34 +0100 | [diff] [blame] | 194 | |
| 195 | bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq) |
| 196 | { |
| 197 | return (xdp_rxq->reg_state == REG_STATE_REGISTERED); |
| 198 | } |
| 199 | EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg); |
Jesper Dangaard Brouer | 5ab073f | 2018-04-17 16:45:26 +0200 | [diff] [blame] | 200 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 201 | static int __mem_id_init_hash_table(void) |
| 202 | { |
| 203 | struct rhashtable *rht; |
| 204 | int ret; |
| 205 | |
| 206 | if (unlikely(mem_id_init)) |
| 207 | return 0; |
| 208 | |
| 209 | rht = kzalloc(sizeof(*rht), GFP_KERNEL); |
| 210 | if (!rht) |
| 211 | return -ENOMEM; |
| 212 | |
| 213 | ret = rhashtable_init(rht, &mem_id_rht_params); |
| 214 | if (ret < 0) { |
| 215 | kfree(rht); |
| 216 | return ret; |
| 217 | } |
| 218 | mem_id_ht = rht; |
| 219 | smp_mb(); /* mutex lock should provide enough pairing */ |
| 220 | mem_id_init = true; |
| 221 | |
| 222 | return 0; |
| 223 | } |
| 224 | |
| 225 | /* Allocate a cyclic ID that maps to allocator pointer. |
| 226 | * See: https://www.kernel.org/doc/html/latest/core-api/idr.html |
| 227 | * |
| 228 | * Caller must lock mem_id_lock. |
| 229 | */ |
| 230 | static int __mem_id_cyclic_get(gfp_t gfp) |
| 231 | { |
| 232 | int retries = 1; |
| 233 | int id; |
| 234 | |
| 235 | again: |
| 236 | id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp); |
| 237 | if (id < 0) { |
| 238 | if (id == -ENOSPC) { |
| 239 | /* Cyclic allocator, reset next id */ |
| 240 | if (retries--) { |
| 241 | mem_id_next = MEM_ID_MIN; |
| 242 | goto again; |
| 243 | } |
| 244 | } |
| 245 | return id; /* errno */ |
| 246 | } |
| 247 | mem_id_next = id + 1; |
| 248 | |
| 249 | return id; |
| 250 | } |
| 251 | |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 252 | static bool __is_supported_mem_type(enum xdp_mem_type type) |
| 253 | { |
| 254 | if (type == MEM_TYPE_PAGE_POOL) |
| 255 | return is_page_pool_compiled_in(); |
| 256 | |
| 257 | if (type >= MEM_TYPE_MAX) |
| 258 | return false; |
| 259 | |
| 260 | return true; |
| 261 | } |
| 262 | |
Jesper Dangaard Brouer | 5ab073f | 2018-04-17 16:45:26 +0200 | [diff] [blame] | 263 | int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, |
| 264 | enum xdp_mem_type type, void *allocator) |
| 265 | { |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 266 | struct xdp_mem_allocator *xdp_alloc; |
| 267 | gfp_t gfp = GFP_KERNEL; |
| 268 | int id, errno, ret; |
| 269 | void *ptr; |
| 270 | |
| 271 | if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { |
| 272 | WARN(1, "Missing register, driver bug"); |
| 273 | return -EFAULT; |
| 274 | } |
| 275 | |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 276 | if (!__is_supported_mem_type(type)) |
| 277 | return -EOPNOTSUPP; |
Jesper Dangaard Brouer | 5ab073f | 2018-04-17 16:45:26 +0200 | [diff] [blame] | 278 | |
| 279 | xdp_rxq->mem.type = type; |
| 280 | |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 281 | if (!allocator) { |
Björn Töpel | 0807892 | 2020-05-20 21:21:00 +0200 | [diff] [blame] | 282 | if (type == MEM_TYPE_PAGE_POOL) |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 283 | return -EINVAL; /* Setup time check page_pool req */ |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 284 | return 0; |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 285 | } |
Jesper Dangaard Brouer | 5ab073f | 2018-04-17 16:45:26 +0200 | [diff] [blame] | 286 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 287 | /* Delay init of rhashtable to save memory if feature isn't used */ |
| 288 | if (!mem_id_init) { |
| 289 | mutex_lock(&mem_id_lock); |
| 290 | ret = __mem_id_init_hash_table(); |
| 291 | mutex_unlock(&mem_id_lock); |
| 292 | if (ret < 0) { |
| 293 | WARN_ON(1); |
| 294 | return ret; |
| 295 | } |
| 296 | } |
| 297 | |
| 298 | xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp); |
| 299 | if (!xdp_alloc) |
| 300 | return -ENOMEM; |
| 301 | |
| 302 | mutex_lock(&mem_id_lock); |
| 303 | id = __mem_id_cyclic_get(gfp); |
| 304 | if (id < 0) { |
| 305 | errno = id; |
| 306 | goto err; |
| 307 | } |
| 308 | xdp_rxq->mem.id = id; |
| 309 | xdp_alloc->mem = xdp_rxq->mem; |
| 310 | xdp_alloc->allocator = allocator; |
| 311 | |
| 312 | /* Insert allocator into ID lookup table */ |
| 313 | ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node); |
| 314 | if (IS_ERR(ptr)) { |
Jesper Dangaard Brouer | 516a759 | 2019-06-18 15:05:22 +0200 | [diff] [blame] | 315 | ida_simple_remove(&mem_id_pool, xdp_rxq->mem.id); |
| 316 | xdp_rxq->mem.id = 0; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 317 | errno = PTR_ERR(ptr); |
| 318 | goto err; |
| 319 | } |
| 320 | |
Ivan Khoronzhuk | 1da4bbe | 2019-07-09 00:34:28 +0300 | [diff] [blame] | 321 | if (type == MEM_TYPE_PAGE_POOL) |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 322 | page_pool_use_xdp_mem(allocator, mem_allocator_disconnect); |
Ivan Khoronzhuk | 1da4bbe | 2019-07-09 00:34:28 +0300 | [diff] [blame] | 323 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 324 | mutex_unlock(&mem_id_lock); |
| 325 | |
Jesper Dangaard Brouer | f033b68 | 2019-06-18 15:05:58 +0200 | [diff] [blame] | 326 | trace_mem_connect(xdp_alloc, xdp_rxq); |
Jesper Dangaard Brouer | 5ab073f | 2018-04-17 16:45:26 +0200 | [diff] [blame] | 327 | return 0; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 328 | err: |
| 329 | mutex_unlock(&mem_id_lock); |
| 330 | kfree(xdp_alloc); |
| 331 | return errno; |
Jesper Dangaard Brouer | 5ab073f | 2018-04-17 16:45:26 +0200 | [diff] [blame] | 332 | } |
| 333 | EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 334 | |
Jesper Dangaard Brouer | 389ab7f | 2018-05-24 16:46:07 +0200 | [diff] [blame] | 335 | /* XDP RX runs under NAPI protection, and in different delivery error |
| 336 | * scenarios (e.g. queue full), it is possible to return the xdp_frame |
Anton Ivanov | baead85 | 2019-10-11 09:43:03 +0100 | [diff] [blame] | 337 | * while still leveraging this protection. The @napi_direct boolean |
Jesper Dangaard Brouer | 389ab7f | 2018-05-24 16:46:07 +0200 | [diff] [blame] | 338 | * is used for those calls sites. Thus, allowing for faster recycling |
Björn Töpel | ed1182d | 2020-11-27 18:17:26 +0100 | [diff] [blame] | 339 | * of xdp_frames/pages in those cases. |
Jesper Dangaard Brouer | 389ab7f | 2018-05-24 16:46:07 +0200 | [diff] [blame] | 340 | */ |
Björn Töpel | ed1182d | 2020-11-27 18:17:26 +0100 | [diff] [blame] | 341 | static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, |
| 342 | struct xdp_buff *xdp) |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 343 | { |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 344 | struct xdp_mem_allocator *xa; |
| 345 | struct page *page; |
| 346 | |
| 347 | switch (mem->type) { |
| 348 | case MEM_TYPE_PAGE_POOL: |
| 349 | rcu_read_lock(); |
| 350 | /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ |
| 351 | xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); |
| 352 | page = virt_to_head_page(data); |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 353 | napi_direct &= !xdp_return_frame_no_direct(); |
Ilias Apalodimas | 458de8a | 2020-02-20 09:41:55 +0200 | [diff] [blame] | 354 | page_pool_put_full_page(xa->page_pool, page, napi_direct); |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 355 | rcu_read_unlock(); |
| 356 | break; |
| 357 | case MEM_TYPE_PAGE_SHARED: |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 358 | page_frag_free(data); |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 359 | break; |
| 360 | case MEM_TYPE_PAGE_ORDER0: |
| 361 | page = virt_to_page(data); /* Assumes order0 page*/ |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 362 | put_page(page); |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 363 | break; |
Björn Töpel | ed1182d | 2020-11-27 18:17:26 +0100 | [diff] [blame] | 364 | case MEM_TYPE_XSK_BUFF_POOL: |
| 365 | /* NB! Only valid from an xdp_buff! */ |
| 366 | xsk_buff_free(xdp); |
| 367 | break; |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 368 | default: |
| 369 | /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ |
Björn Töpel | 82c4167 | 2020-05-20 21:21:01 +0200 | [diff] [blame] | 370 | WARN(1, "Incorrect XDP memory type (%d) usage", mem->type); |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 371 | break; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 372 | } |
| 373 | } |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 374 | |
| 375 | void xdp_return_frame(struct xdp_frame *xdpf) |
| 376 | { |
Björn Töpel | ed1182d | 2020-11-27 18:17:26 +0100 | [diff] [blame] | 377 | __xdp_return(xdpf->data, &xdpf->mem, false, NULL); |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 378 | } |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 379 | EXPORT_SYMBOL_GPL(xdp_return_frame); |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 380 | |
Jesper Dangaard Brouer | 389ab7f | 2018-05-24 16:46:07 +0200 | [diff] [blame] | 381 | void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) |
| 382 | { |
Björn Töpel | ed1182d | 2020-11-27 18:17:26 +0100 | [diff] [blame] | 383 | __xdp_return(xdpf->data, &xdpf->mem, true, NULL); |
Jesper Dangaard Brouer | 389ab7f | 2018-05-24 16:46:07 +0200 | [diff] [blame] | 384 | } |
| 385 | EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); |
| 386 | |
Lorenzo Bianconi | 8965398 | 2020-11-13 12:48:28 +0100 | [diff] [blame] | 387 | /* XDP bulk APIs introduce a defer/flush mechanism to return |
| 388 | * pages belonging to the same xdp_mem_allocator object |
| 389 | * (identified via the mem.id field) in bulk to optimize |
| 390 | * I-cache and D-cache. |
| 391 | * The bulk queue size is set to 16 to be aligned to how |
| 392 | * XDP_REDIRECT bulking works. The bulk is flushed when |
| 393 | * it is full or when mem.id changes. |
| 394 | * xdp_frame_bulk is usually stored/allocated on the function |
| 395 | * call-stack to avoid locking penalties. |
| 396 | */ |
| 397 | void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq) |
| 398 | { |
| 399 | struct xdp_mem_allocator *xa = bq->xa; |
Lorenzo Bianconi | 8965398 | 2020-11-13 12:48:28 +0100 | [diff] [blame] | 400 | |
Lorenzo Bianconi | 7886244 | 2020-11-13 12:48:29 +0100 | [diff] [blame] | 401 | if (unlikely(!xa || !bq->count)) |
Lorenzo Bianconi | 8965398 | 2020-11-13 12:48:28 +0100 | [diff] [blame] | 402 | return; |
| 403 | |
Lorenzo Bianconi | 7886244 | 2020-11-13 12:48:29 +0100 | [diff] [blame] | 404 | page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count); |
Lorenzo Bianconi | 8965398 | 2020-11-13 12:48:28 +0100 | [diff] [blame] | 405 | /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */ |
| 406 | bq->count = 0; |
| 407 | } |
| 408 | EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk); |
| 409 | |
| 410 | /* Must be called with rcu_read_lock held */ |
| 411 | void xdp_return_frame_bulk(struct xdp_frame *xdpf, |
| 412 | struct xdp_frame_bulk *bq) |
| 413 | { |
| 414 | struct xdp_mem_info *mem = &xdpf->mem; |
| 415 | struct xdp_mem_allocator *xa; |
| 416 | |
| 417 | if (mem->type != MEM_TYPE_PAGE_POOL) { |
Jakub Kicinski | 46d5e62 | 2020-12-11 20:12:36 -0800 | [diff] [blame^] | 418 | __xdp_return(xdpf->data, &xdpf->mem, false, NULL); |
Lorenzo Bianconi | 8965398 | 2020-11-13 12:48:28 +0100 | [diff] [blame] | 419 | return; |
| 420 | } |
| 421 | |
| 422 | xa = bq->xa; |
| 423 | if (unlikely(!xa)) { |
| 424 | xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); |
| 425 | bq->count = 0; |
| 426 | bq->xa = xa; |
| 427 | } |
| 428 | |
| 429 | if (bq->count == XDP_BULK_QUEUE_SIZE) |
| 430 | xdp_flush_frame_bulk(bq); |
| 431 | |
| 432 | if (unlikely(mem->id != xa->mem.id)) { |
| 433 | xdp_flush_frame_bulk(bq); |
| 434 | bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); |
| 435 | } |
| 436 | |
| 437 | bq->q[bq->count++] = xdpf->data; |
| 438 | } |
| 439 | EXPORT_SYMBOL_GPL(xdp_return_frame_bulk); |
| 440 | |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 441 | void xdp_return_buff(struct xdp_buff *xdp) |
| 442 | { |
Björn Töpel | ed1182d | 2020-11-27 18:17:26 +0100 | [diff] [blame] | 443 | __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp); |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 444 | } |
Jakub Kicinski | 0529662 | 2018-07-11 20:36:40 -0700 | [diff] [blame] | 445 | |
Jesper Dangaard Brouer | 6bf071b | 2019-06-18 15:05:27 +0200 | [diff] [blame] | 446 | /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */ |
| 447 | void __xdp_release_frame(void *data, struct xdp_mem_info *mem) |
| 448 | { |
| 449 | struct xdp_mem_allocator *xa; |
| 450 | struct page *page; |
| 451 | |
| 452 | rcu_read_lock(); |
| 453 | xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); |
| 454 | page = virt_to_head_page(data); |
| 455 | if (xa) |
| 456 | page_pool_release_page(xa->page_pool, page); |
| 457 | rcu_read_unlock(); |
| 458 | } |
| 459 | EXPORT_SYMBOL_GPL(__xdp_release_frame); |
| 460 | |
Jakub Kicinski | 0529662 | 2018-07-11 20:36:40 -0700 | [diff] [blame] | 461 | void xdp_attachment_setup(struct xdp_attachment_info *info, |
| 462 | struct netdev_bpf *bpf) |
| 463 | { |
| 464 | if (info->prog) |
| 465 | bpf_prog_put(info->prog); |
| 466 | info->prog = bpf->prog; |
| 467 | info->flags = bpf->flags; |
| 468 | } |
| 469 | EXPORT_SYMBOL_GPL(xdp_attachment_setup); |
Björn Töpel | b0d1bee | 2018-08-28 14:44:25 +0200 | [diff] [blame] | 470 | |
| 471 | struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp) |
| 472 | { |
Colin Ian King | 7296216 | 2018-08-30 15:27:18 +0100 | [diff] [blame] | 473 | unsigned int metasize, totsize; |
Björn Töpel | b0d1bee | 2018-08-28 14:44:25 +0200 | [diff] [blame] | 474 | void *addr, *data_to_copy; |
| 475 | struct xdp_frame *xdpf; |
| 476 | struct page *page; |
| 477 | |
| 478 | /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */ |
| 479 | metasize = xdp_data_meta_unsupported(xdp) ? 0 : |
| 480 | xdp->data - xdp->data_meta; |
Björn Töpel | b0d1bee | 2018-08-28 14:44:25 +0200 | [diff] [blame] | 481 | totsize = xdp->data_end - xdp->data + metasize; |
| 482 | |
| 483 | if (sizeof(*xdpf) + totsize > PAGE_SIZE) |
| 484 | return NULL; |
| 485 | |
| 486 | page = dev_alloc_page(); |
| 487 | if (!page) |
| 488 | return NULL; |
| 489 | |
| 490 | addr = page_to_virt(page); |
| 491 | xdpf = addr; |
| 492 | memset(xdpf, 0, sizeof(*xdpf)); |
| 493 | |
| 494 | addr += sizeof(*xdpf); |
| 495 | data_to_copy = metasize ? xdp->data_meta : xdp->data; |
| 496 | memcpy(addr, data_to_copy, totsize); |
| 497 | |
| 498 | xdpf->data = addr + metasize; |
| 499 | xdpf->len = totsize - metasize; |
| 500 | xdpf->headroom = 0; |
| 501 | xdpf->metasize = metasize; |
Hangbin Liu | 3ff2351 | 2020-06-16 18:35:18 +0800 | [diff] [blame] | 502 | xdpf->frame_sz = PAGE_SIZE; |
Björn Töpel | b0d1bee | 2018-08-28 14:44:25 +0200 | [diff] [blame] | 503 | xdpf->mem.type = MEM_TYPE_PAGE_ORDER0; |
| 504 | |
Björn Töpel | 82c4167 | 2020-05-20 21:21:01 +0200 | [diff] [blame] | 505 | xsk_buff_free(xdp); |
Björn Töpel | b0d1bee | 2018-08-28 14:44:25 +0200 | [diff] [blame] | 506 | return xdpf; |
| 507 | } |
| 508 | EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame); |
Jesper Dangaard Brouer | 34cc0b3 | 2020-05-14 12:49:33 +0200 | [diff] [blame] | 509 | |
| 510 | /* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */ |
| 511 | void xdp_warn(const char *msg, const char *func, const int line) |
| 512 | { |
| 513 | WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg); |
| 514 | }; |
| 515 | EXPORT_SYMBOL_GPL(xdp_warn); |