Thomas Gleixner | ddc64d0 | 2019-05-31 01:09:24 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 2 | /* net/core/xdp.c |
| 3 | * |
| 4 | * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 5 | */ |
Jakub Kicinski | 0529662 | 2018-07-11 20:36:40 -0700 | [diff] [blame] | 6 | #include <linux/bpf.h> |
| 7 | #include <linux/filter.h> |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 8 | #include <linux/types.h> |
| 9 | #include <linux/mm.h> |
Jakub Kicinski | 0529662 | 2018-07-11 20:36:40 -0700 | [diff] [blame] | 10 | #include <linux/netdevice.h> |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 11 | #include <linux/slab.h> |
| 12 | #include <linux/idr.h> |
| 13 | #include <linux/rhashtable.h> |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 14 | #include <net/page_pool.h> |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 15 | |
| 16 | #include <net/xdp.h> |
Jesper Dangaard Brouer | f033b68 | 2019-06-18 15:05:58 +0200 | [diff] [blame] | 17 | #include <net/xdp_priv.h> /* struct xdp_mem_allocator */ |
| 18 | #include <trace/events/xdp.h> |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 19 | |
| 20 | #define REG_STATE_NEW 0x0 |
| 21 | #define REG_STATE_REGISTERED 0x1 |
| 22 | #define REG_STATE_UNREGISTERED 0x2 |
| 23 | #define REG_STATE_UNUSED 0x3 |
| 24 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 25 | static DEFINE_IDA(mem_id_pool); |
| 26 | static DEFINE_MUTEX(mem_id_lock); |
| 27 | #define MEM_ID_MAX 0xFFFE |
| 28 | #define MEM_ID_MIN 1 |
| 29 | static int mem_id_next = MEM_ID_MIN; |
| 30 | |
| 31 | static bool mem_id_init; /* false */ |
| 32 | static struct rhashtable *mem_id_ht; |
| 33 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 34 | static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) |
| 35 | { |
| 36 | const u32 *k = data; |
| 37 | const u32 key = *k; |
| 38 | |
Pankaj Bharadiya | c593642 | 2019-12-09 10:31:43 -0800 | [diff] [blame] | 39 | BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id) |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 40 | != sizeof(u32)); |
| 41 | |
NeilBrown | 9f9a707 | 2018-06-18 12:52:50 +1000 | [diff] [blame] | 42 | /* Use cyclic increasing ID as direct hash key */ |
| 43 | return key; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 44 | } |
| 45 | |
| 46 | static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg, |
| 47 | const void *ptr) |
| 48 | { |
| 49 | const struct xdp_mem_allocator *xa = ptr; |
| 50 | u32 mem_id = *(u32 *)arg->key; |
| 51 | |
| 52 | return xa->mem.id != mem_id; |
| 53 | } |
| 54 | |
| 55 | static const struct rhashtable_params mem_id_rht_params = { |
| 56 | .nelem_hint = 64, |
| 57 | .head_offset = offsetof(struct xdp_mem_allocator, node), |
| 58 | .key_offset = offsetof(struct xdp_mem_allocator, mem.id), |
Pankaj Bharadiya | c593642 | 2019-12-09 10:31:43 -0800 | [diff] [blame] | 59 | .key_len = sizeof_field(struct xdp_mem_allocator, mem.id), |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 60 | .max_size = MEM_ID_MAX, |
| 61 | .min_size = 8, |
| 62 | .automatic_shrinking = true, |
| 63 | .hashfn = xdp_mem_id_hashfn, |
| 64 | .obj_cmpfn = xdp_mem_id_cmp, |
| 65 | }; |
| 66 | |
| 67 | static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) |
| 68 | { |
| 69 | struct xdp_mem_allocator *xa; |
| 70 | |
| 71 | xa = container_of(rcu, struct xdp_mem_allocator, rcu); |
| 72 | |
| 73 | /* Allow this ID to be reused */ |
| 74 | ida_simple_remove(&mem_id_pool, xa->mem.id); |
| 75 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 76 | kfree(xa); |
| 77 | } |
| 78 | |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 79 | static void mem_xa_remove(struct xdp_mem_allocator *xa) |
| 80 | { |
| 81 | trace_mem_disconnect(xa); |
| 82 | |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 83 | if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) |
| 84 | call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 85 | } |
| 86 | |
| 87 | static void mem_allocator_disconnect(void *allocator) |
Jesper Dangaard Brouer | 99c07c4 | 2019-06-18 15:05:47 +0200 | [diff] [blame] | 88 | { |
| 89 | struct xdp_mem_allocator *xa; |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 90 | struct rhashtable_iter iter; |
| 91 | |
Jonathan Lemon | 86c76c0 | 2019-12-03 14:01:14 -0800 | [diff] [blame] | 92 | mutex_lock(&mem_id_lock); |
| 93 | |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 94 | rhashtable_walk_enter(mem_id_ht, &iter); |
| 95 | do { |
| 96 | rhashtable_walk_start(&iter); |
| 97 | |
| 98 | while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) { |
| 99 | if (xa->allocator == allocator) |
| 100 | mem_xa_remove(xa); |
| 101 | } |
| 102 | |
| 103 | rhashtable_walk_stop(&iter); |
| 104 | |
| 105 | } while (xa == ERR_PTR(-EAGAIN)); |
| 106 | rhashtable_walk_exit(&iter); |
Jonathan Lemon | 86c76c0 | 2019-12-03 14:01:14 -0800 | [diff] [blame] | 107 | |
| 108 | mutex_unlock(&mem_id_lock); |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 109 | } |
| 110 | |
| 111 | static void mem_id_disconnect(int id) |
| 112 | { |
| 113 | struct xdp_mem_allocator *xa; |
Jesper Dangaard Brouer | 99c07c4 | 2019-06-18 15:05:47 +0200 | [diff] [blame] | 114 | |
| 115 | mutex_lock(&mem_id_lock); |
| 116 | |
| 117 | xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params); |
| 118 | if (!xa) { |
| 119 | mutex_unlock(&mem_id_lock); |
| 120 | WARN(1, "Request remove non-existing id(%d), driver bug?", id); |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 121 | return; |
Jesper Dangaard Brouer | 99c07c4 | 2019-06-18 15:05:47 +0200 | [diff] [blame] | 122 | } |
| 123 | |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 124 | trace_mem_disconnect(xa); |
Jesper Dangaard Brouer | 99c07c4 | 2019-06-18 15:05:47 +0200 | [diff] [blame] | 125 | |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 126 | if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) |
Jesper Dangaard Brouer | 99c07c4 | 2019-06-18 15:05:47 +0200 | [diff] [blame] | 127 | call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); |
| 128 | |
| 129 | mutex_unlock(&mem_id_lock); |
Jesper Dangaard Brouer | 99c07c4 | 2019-06-18 15:05:47 +0200 | [diff] [blame] | 130 | } |
| 131 | |
Björn Töpel | dce5bd6 | 2018-08-28 14:44:26 +0200 | [diff] [blame] | 132 | void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 133 | { |
| 134 | struct xdp_mem_allocator *xa; |
| 135 | int id = xdp_rxq->mem.id; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 136 | |
Björn Töpel | dce5bd6 | 2018-08-28 14:44:26 +0200 | [diff] [blame] | 137 | if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { |
| 138 | WARN(1, "Missing register, driver bug"); |
| 139 | return; |
| 140 | } |
| 141 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 142 | if (id == 0) |
| 143 | return; |
| 144 | |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 145 | if (xdp_rxq->mem.type == MEM_TYPE_ZERO_COPY) |
| 146 | return mem_id_disconnect(id); |
Jesper Dangaard Brouer | 99c07c4 | 2019-06-18 15:05:47 +0200 | [diff] [blame] | 147 | |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 148 | if (xdp_rxq->mem.type == MEM_TYPE_PAGE_POOL) { |
| 149 | rcu_read_lock(); |
| 150 | xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params); |
| 151 | page_pool_destroy(xa->page_pool); |
| 152 | rcu_read_unlock(); |
Jesper Dangaard Brouer | 99c07c4 | 2019-06-18 15:05:47 +0200 | [diff] [blame] | 153 | } |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 154 | } |
Björn Töpel | dce5bd6 | 2018-08-28 14:44:26 +0200 | [diff] [blame] | 155 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model); |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 156 | |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 157 | void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) |
| 158 | { |
| 159 | /* Simplify driver cleanup code paths, allow unreg "unused" */ |
| 160 | if (xdp_rxq->reg_state == REG_STATE_UNUSED) |
| 161 | return; |
| 162 | |
| 163 | WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG"); |
| 164 | |
Björn Töpel | dce5bd6 | 2018-08-28 14:44:26 +0200 | [diff] [blame] | 165 | xdp_rxq_info_unreg_mem_model(xdp_rxq); |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 166 | |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 167 | xdp_rxq->reg_state = REG_STATE_UNREGISTERED; |
| 168 | xdp_rxq->dev = NULL; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 169 | |
| 170 | /* Reset mem info to defaults */ |
| 171 | xdp_rxq->mem.id = 0; |
| 172 | xdp_rxq->mem.type = 0; |
Jesper Dangaard Brouer | aecd67b | 2018-01-03 11:25:13 +0100 | [diff] [blame] | 173 | } |
| 174 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg); |
| 175 | |
| 176 | static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq) |
| 177 | { |
| 178 | memset(xdp_rxq, 0, sizeof(*xdp_rxq)); |
| 179 | } |
| 180 | |
| 181 | /* Returns 0 on success, negative on failure */ |
| 182 | int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, |
| 183 | struct net_device *dev, u32 queue_index) |
| 184 | { |
| 185 | if (xdp_rxq->reg_state == REG_STATE_UNUSED) { |
| 186 | WARN(1, "Driver promised not to register this"); |
| 187 | return -EINVAL; |
| 188 | } |
| 189 | |
| 190 | if (xdp_rxq->reg_state == REG_STATE_REGISTERED) { |
| 191 | WARN(1, "Missing unregister, handled but fix driver"); |
| 192 | xdp_rxq_info_unreg(xdp_rxq); |
| 193 | } |
| 194 | |
| 195 | if (!dev) { |
| 196 | WARN(1, "Missing net_device from driver"); |
| 197 | return -ENODEV; |
| 198 | } |
| 199 | |
| 200 | /* State either UNREGISTERED or NEW */ |
| 201 | xdp_rxq_info_init(xdp_rxq); |
| 202 | xdp_rxq->dev = dev; |
| 203 | xdp_rxq->queue_index = queue_index; |
| 204 | |
| 205 | xdp_rxq->reg_state = REG_STATE_REGISTERED; |
| 206 | return 0; |
| 207 | } |
| 208 | EXPORT_SYMBOL_GPL(xdp_rxq_info_reg); |
| 209 | |
| 210 | void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq) |
| 211 | { |
| 212 | xdp_rxq->reg_state = REG_STATE_UNUSED; |
| 213 | } |
| 214 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unused); |
Jesper Dangaard Brouer | c0124f3 | 2018-01-03 11:25:34 +0100 | [diff] [blame] | 215 | |
| 216 | bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq) |
| 217 | { |
| 218 | return (xdp_rxq->reg_state == REG_STATE_REGISTERED); |
| 219 | } |
| 220 | EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg); |
Jesper Dangaard Brouer | 5ab073f | 2018-04-17 16:45:26 +0200 | [diff] [blame] | 221 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 222 | static int __mem_id_init_hash_table(void) |
| 223 | { |
| 224 | struct rhashtable *rht; |
| 225 | int ret; |
| 226 | |
| 227 | if (unlikely(mem_id_init)) |
| 228 | return 0; |
| 229 | |
| 230 | rht = kzalloc(sizeof(*rht), GFP_KERNEL); |
| 231 | if (!rht) |
| 232 | return -ENOMEM; |
| 233 | |
| 234 | ret = rhashtable_init(rht, &mem_id_rht_params); |
| 235 | if (ret < 0) { |
| 236 | kfree(rht); |
| 237 | return ret; |
| 238 | } |
| 239 | mem_id_ht = rht; |
| 240 | smp_mb(); /* mutex lock should provide enough pairing */ |
| 241 | mem_id_init = true; |
| 242 | |
| 243 | return 0; |
| 244 | } |
| 245 | |
| 246 | /* Allocate a cyclic ID that maps to allocator pointer. |
| 247 | * See: https://www.kernel.org/doc/html/latest/core-api/idr.html |
| 248 | * |
| 249 | * Caller must lock mem_id_lock. |
| 250 | */ |
| 251 | static int __mem_id_cyclic_get(gfp_t gfp) |
| 252 | { |
| 253 | int retries = 1; |
| 254 | int id; |
| 255 | |
| 256 | again: |
| 257 | id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp); |
| 258 | if (id < 0) { |
| 259 | if (id == -ENOSPC) { |
| 260 | /* Cyclic allocator, reset next id */ |
| 261 | if (retries--) { |
| 262 | mem_id_next = MEM_ID_MIN; |
| 263 | goto again; |
| 264 | } |
| 265 | } |
| 266 | return id; /* errno */ |
| 267 | } |
| 268 | mem_id_next = id + 1; |
| 269 | |
| 270 | return id; |
| 271 | } |
| 272 | |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 273 | static bool __is_supported_mem_type(enum xdp_mem_type type) |
| 274 | { |
| 275 | if (type == MEM_TYPE_PAGE_POOL) |
| 276 | return is_page_pool_compiled_in(); |
| 277 | |
| 278 | if (type >= MEM_TYPE_MAX) |
| 279 | return false; |
| 280 | |
| 281 | return true; |
| 282 | } |
| 283 | |
Jesper Dangaard Brouer | 5ab073f | 2018-04-17 16:45:26 +0200 | [diff] [blame] | 284 | int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, |
| 285 | enum xdp_mem_type type, void *allocator) |
| 286 | { |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 287 | struct xdp_mem_allocator *xdp_alloc; |
| 288 | gfp_t gfp = GFP_KERNEL; |
| 289 | int id, errno, ret; |
| 290 | void *ptr; |
| 291 | |
| 292 | if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { |
| 293 | WARN(1, "Missing register, driver bug"); |
| 294 | return -EFAULT; |
| 295 | } |
| 296 | |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 297 | if (!__is_supported_mem_type(type)) |
| 298 | return -EOPNOTSUPP; |
Jesper Dangaard Brouer | 5ab073f | 2018-04-17 16:45:26 +0200 | [diff] [blame] | 299 | |
| 300 | xdp_rxq->mem.type = type; |
| 301 | |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 302 | if (!allocator) { |
Björn Töpel | 02b55e5 | 2018-06-04 14:05:54 +0200 | [diff] [blame] | 303 | if (type == MEM_TYPE_PAGE_POOL || type == MEM_TYPE_ZERO_COPY) |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 304 | return -EINVAL; /* Setup time check page_pool req */ |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 305 | return 0; |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 306 | } |
Jesper Dangaard Brouer | 5ab073f | 2018-04-17 16:45:26 +0200 | [diff] [blame] | 307 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 308 | /* Delay init of rhashtable to save memory if feature isn't used */ |
| 309 | if (!mem_id_init) { |
| 310 | mutex_lock(&mem_id_lock); |
| 311 | ret = __mem_id_init_hash_table(); |
| 312 | mutex_unlock(&mem_id_lock); |
| 313 | if (ret < 0) { |
| 314 | WARN_ON(1); |
| 315 | return ret; |
| 316 | } |
| 317 | } |
| 318 | |
| 319 | xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp); |
| 320 | if (!xdp_alloc) |
| 321 | return -ENOMEM; |
| 322 | |
| 323 | mutex_lock(&mem_id_lock); |
| 324 | id = __mem_id_cyclic_get(gfp); |
| 325 | if (id < 0) { |
| 326 | errno = id; |
| 327 | goto err; |
| 328 | } |
| 329 | xdp_rxq->mem.id = id; |
| 330 | xdp_alloc->mem = xdp_rxq->mem; |
| 331 | xdp_alloc->allocator = allocator; |
| 332 | |
| 333 | /* Insert allocator into ID lookup table */ |
| 334 | ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node); |
| 335 | if (IS_ERR(ptr)) { |
Jesper Dangaard Brouer | 516a759 | 2019-06-18 15:05:22 +0200 | [diff] [blame] | 336 | ida_simple_remove(&mem_id_pool, xdp_rxq->mem.id); |
| 337 | xdp_rxq->mem.id = 0; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 338 | errno = PTR_ERR(ptr); |
| 339 | goto err; |
| 340 | } |
| 341 | |
Ivan Khoronzhuk | 1da4bbe | 2019-07-09 00:34:28 +0300 | [diff] [blame] | 342 | if (type == MEM_TYPE_PAGE_POOL) |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 343 | page_pool_use_xdp_mem(allocator, mem_allocator_disconnect); |
Ivan Khoronzhuk | 1da4bbe | 2019-07-09 00:34:28 +0300 | [diff] [blame] | 344 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 345 | mutex_unlock(&mem_id_lock); |
| 346 | |
Jesper Dangaard Brouer | f033b68 | 2019-06-18 15:05:58 +0200 | [diff] [blame] | 347 | trace_mem_connect(xdp_alloc, xdp_rxq); |
Jesper Dangaard Brouer | 5ab073f | 2018-04-17 16:45:26 +0200 | [diff] [blame] | 348 | return 0; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 349 | err: |
| 350 | mutex_unlock(&mem_id_lock); |
| 351 | kfree(xdp_alloc); |
| 352 | return errno; |
Jesper Dangaard Brouer | 5ab073f | 2018-04-17 16:45:26 +0200 | [diff] [blame] | 353 | } |
| 354 | EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 355 | |
Jesper Dangaard Brouer | 389ab7f | 2018-05-24 16:46:07 +0200 | [diff] [blame] | 356 | /* XDP RX runs under NAPI protection, and in different delivery error |
| 357 | * scenarios (e.g. queue full), it is possible to return the xdp_frame |
Anton Ivanov | baead85 | 2019-10-11 09:43:03 +0100 | [diff] [blame] | 358 | * while still leveraging this protection. The @napi_direct boolean |
Jesper Dangaard Brouer | 389ab7f | 2018-05-24 16:46:07 +0200 | [diff] [blame] | 359 | * is used for those calls sites. Thus, allowing for faster recycling |
| 360 | * of xdp_frames/pages in those cases. |
| 361 | */ |
Björn Töpel | 02b55e5 | 2018-06-04 14:05:54 +0200 | [diff] [blame] | 362 | static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, |
| 363 | unsigned long handle) |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 364 | { |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 365 | struct xdp_mem_allocator *xa; |
| 366 | struct page *page; |
| 367 | |
| 368 | switch (mem->type) { |
| 369 | case MEM_TYPE_PAGE_POOL: |
| 370 | rcu_read_lock(); |
| 371 | /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ |
| 372 | xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); |
| 373 | page = virt_to_head_page(data); |
Jonathan Lemon | c3f812c | 2019-11-14 14:13:00 -0800 | [diff] [blame] | 374 | napi_direct &= !xdp_return_frame_no_direct(); |
Ilias Apalodimas | 458de8a | 2020-02-20 09:41:55 +0200 | [diff] [blame^] | 375 | page_pool_put_full_page(xa->page_pool, page, napi_direct); |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 376 | rcu_read_unlock(); |
| 377 | break; |
| 378 | case MEM_TYPE_PAGE_SHARED: |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 379 | page_frag_free(data); |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 380 | break; |
| 381 | case MEM_TYPE_PAGE_ORDER0: |
| 382 | page = virt_to_page(data); /* Assumes order0 page*/ |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 383 | put_page(page); |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 384 | break; |
Björn Töpel | 02b55e5 | 2018-06-04 14:05:54 +0200 | [diff] [blame] | 385 | case MEM_TYPE_ZERO_COPY: |
| 386 | /* NB! Only valid from an xdp_buff! */ |
| 387 | rcu_read_lock(); |
| 388 | /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ |
| 389 | xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); |
Björn Töpel | eb91e4d | 2018-08-10 11:28:02 +0200 | [diff] [blame] | 390 | xa->zc_alloc->free(xa->zc_alloc, handle); |
Björn Töpel | 02b55e5 | 2018-06-04 14:05:54 +0200 | [diff] [blame] | 391 | rcu_read_unlock(); |
Jesper Dangaard Brouer | 57d0a1c | 2018-04-17 16:46:22 +0200 | [diff] [blame] | 392 | default: |
| 393 | /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ |
| 394 | break; |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 395 | } |
| 396 | } |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 397 | |
| 398 | void xdp_return_frame(struct xdp_frame *xdpf) |
| 399 | { |
Björn Töpel | 02b55e5 | 2018-06-04 14:05:54 +0200 | [diff] [blame] | 400 | __xdp_return(xdpf->data, &xdpf->mem, false, 0); |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 401 | } |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 402 | EXPORT_SYMBOL_GPL(xdp_return_frame); |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 403 | |
Jesper Dangaard Brouer | 389ab7f | 2018-05-24 16:46:07 +0200 | [diff] [blame] | 404 | void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) |
| 405 | { |
Björn Töpel | 02b55e5 | 2018-06-04 14:05:54 +0200 | [diff] [blame] | 406 | __xdp_return(xdpf->data, &xdpf->mem, true, 0); |
Jesper Dangaard Brouer | 389ab7f | 2018-05-24 16:46:07 +0200 | [diff] [blame] | 407 | } |
| 408 | EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); |
| 409 | |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 410 | void xdp_return_buff(struct xdp_buff *xdp) |
| 411 | { |
Björn Töpel | 02b55e5 | 2018-06-04 14:05:54 +0200 | [diff] [blame] | 412 | __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle); |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 413 | } |
| 414 | EXPORT_SYMBOL_GPL(xdp_return_buff); |
Jakub Kicinski | 0529662 | 2018-07-11 20:36:40 -0700 | [diff] [blame] | 415 | |
Jesper Dangaard Brouer | 6bf071b | 2019-06-18 15:05:27 +0200 | [diff] [blame] | 416 | /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */ |
| 417 | void __xdp_release_frame(void *data, struct xdp_mem_info *mem) |
| 418 | { |
| 419 | struct xdp_mem_allocator *xa; |
| 420 | struct page *page; |
| 421 | |
| 422 | rcu_read_lock(); |
| 423 | xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); |
| 424 | page = virt_to_head_page(data); |
| 425 | if (xa) |
| 426 | page_pool_release_page(xa->page_pool, page); |
| 427 | rcu_read_unlock(); |
| 428 | } |
| 429 | EXPORT_SYMBOL_GPL(__xdp_release_frame); |
| 430 | |
Jakub Kicinski | 0529662 | 2018-07-11 20:36:40 -0700 | [diff] [blame] | 431 | int xdp_attachment_query(struct xdp_attachment_info *info, |
| 432 | struct netdev_bpf *bpf) |
| 433 | { |
| 434 | bpf->prog_id = info->prog ? info->prog->aux->id : 0; |
| 435 | bpf->prog_flags = info->prog ? info->flags : 0; |
| 436 | return 0; |
| 437 | } |
| 438 | EXPORT_SYMBOL_GPL(xdp_attachment_query); |
| 439 | |
| 440 | bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, |
| 441 | struct netdev_bpf *bpf) |
| 442 | { |
| 443 | if (info->prog && (bpf->flags ^ info->flags) & XDP_FLAGS_MODES) { |
| 444 | NL_SET_ERR_MSG(bpf->extack, |
| 445 | "program loaded with different flags"); |
| 446 | return false; |
| 447 | } |
| 448 | return true; |
| 449 | } |
| 450 | EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok); |
| 451 | |
| 452 | void xdp_attachment_setup(struct xdp_attachment_info *info, |
| 453 | struct netdev_bpf *bpf) |
| 454 | { |
| 455 | if (info->prog) |
| 456 | bpf_prog_put(info->prog); |
| 457 | info->prog = bpf->prog; |
| 458 | info->flags = bpf->flags; |
| 459 | } |
| 460 | EXPORT_SYMBOL_GPL(xdp_attachment_setup); |
Björn Töpel | b0d1bee | 2018-08-28 14:44:25 +0200 | [diff] [blame] | 461 | |
| 462 | struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp) |
| 463 | { |
Colin Ian King | 7296216 | 2018-08-30 15:27:18 +0100 | [diff] [blame] | 464 | unsigned int metasize, totsize; |
Björn Töpel | b0d1bee | 2018-08-28 14:44:25 +0200 | [diff] [blame] | 465 | void *addr, *data_to_copy; |
| 466 | struct xdp_frame *xdpf; |
| 467 | struct page *page; |
| 468 | |
| 469 | /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */ |
| 470 | metasize = xdp_data_meta_unsupported(xdp) ? 0 : |
| 471 | xdp->data - xdp->data_meta; |
Björn Töpel | b0d1bee | 2018-08-28 14:44:25 +0200 | [diff] [blame] | 472 | totsize = xdp->data_end - xdp->data + metasize; |
| 473 | |
| 474 | if (sizeof(*xdpf) + totsize > PAGE_SIZE) |
| 475 | return NULL; |
| 476 | |
| 477 | page = dev_alloc_page(); |
| 478 | if (!page) |
| 479 | return NULL; |
| 480 | |
| 481 | addr = page_to_virt(page); |
| 482 | xdpf = addr; |
| 483 | memset(xdpf, 0, sizeof(*xdpf)); |
| 484 | |
| 485 | addr += sizeof(*xdpf); |
| 486 | data_to_copy = metasize ? xdp->data_meta : xdp->data; |
| 487 | memcpy(addr, data_to_copy, totsize); |
| 488 | |
| 489 | xdpf->data = addr + metasize; |
| 490 | xdpf->len = totsize - metasize; |
| 491 | xdpf->headroom = 0; |
| 492 | xdpf->metasize = metasize; |
| 493 | xdpf->mem.type = MEM_TYPE_PAGE_ORDER0; |
| 494 | |
| 495 | xdp_return_buff(xdp); |
| 496 | return xdpf; |
| 497 | } |
| 498 | EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame); |