KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* Copyright (c) 2019 Facebook */ |
| 3 | #include <linux/rculist.h> |
| 4 | #include <linux/list.h> |
| 5 | #include <linux/hash.h> |
| 6 | #include <linux/types.h> |
| 7 | #include <linux/spinlock.h> |
| 8 | #include <linux/bpf.h> |
| 9 | #include <linux/btf_ids.h> |
| 10 | #include <linux/bpf_local_storage.h> |
| 11 | #include <net/sock.h> |
| 12 | #include <uapi/linux/sock_diag.h> |
| 13 | #include <uapi/linux/btf.h> |
KP Singh | 0fe4b38 | 2021-12-24 15:29:15 +0000 | [diff] [blame] | 14 | #include <linux/rcupdate.h> |
| 15 | #include <linux/rcupdate_trace.h> |
| 16 | #include <linux/rcupdate_wait.h> |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 17 | |
| 18 | #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE) |
| 19 | |
| 20 | static struct bpf_local_storage_map_bucket * |
| 21 | select_bucket(struct bpf_local_storage_map *smap, |
| 22 | struct bpf_local_storage_elem *selem) |
| 23 | { |
| 24 | return &smap->buckets[hash_ptr(selem, smap->bucket_log)]; |
| 25 | } |
| 26 | |
| 27 | static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size) |
| 28 | { |
| 29 | struct bpf_map *map = &smap->map; |
| 30 | |
| 31 | if (!map->ops->map_local_storage_charge) |
| 32 | return 0; |
| 33 | |
| 34 | return map->ops->map_local_storage_charge(smap, owner, size); |
| 35 | } |
| 36 | |
| 37 | static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner, |
| 38 | u32 size) |
| 39 | { |
| 40 | struct bpf_map *map = &smap->map; |
| 41 | |
| 42 | if (map->ops->map_local_storage_uncharge) |
| 43 | map->ops->map_local_storage_uncharge(smap, owner, size); |
| 44 | } |
| 45 | |
| 46 | static struct bpf_local_storage __rcu ** |
| 47 | owner_storage(struct bpf_local_storage_map *smap, void *owner) |
| 48 | { |
| 49 | struct bpf_map *map = &smap->map; |
| 50 | |
| 51 | return map->ops->map_owner_storage_ptr(owner); |
| 52 | } |
| 53 | |
| 54 | static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem) |
| 55 | { |
| 56 | return !hlist_unhashed(&selem->snode); |
| 57 | } |
| 58 | |
| 59 | static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem) |
| 60 | { |
| 61 | return !hlist_unhashed(&selem->map_node); |
| 62 | } |
| 63 | |
| 64 | struct bpf_local_storage_elem * |
| 65 | bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, |
| 66 | void *value, bool charge_mem) |
| 67 | { |
| 68 | struct bpf_local_storage_elem *selem; |
| 69 | |
| 70 | if (charge_mem && mem_charge(smap, owner, smap->elem_size)) |
| 71 | return NULL; |
| 72 | |
Roman Gushchin | e9aae8b | 2020-12-01 13:58:41 -0800 | [diff] [blame] | 73 | selem = bpf_map_kzalloc(&smap->map, smap->elem_size, |
| 74 | GFP_ATOMIC | __GFP_NOWARN); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 75 | if (selem) { |
| 76 | if (value) |
| 77 | memcpy(SDATA(selem)->data, value, smap->map.value_size); |
| 78 | return selem; |
| 79 | } |
| 80 | |
| 81 | if (charge_mem) |
| 82 | mem_uncharge(smap, owner, smap->elem_size); |
| 83 | |
| 84 | return NULL; |
| 85 | } |
| 86 | |
KP Singh | 0fe4b38 | 2021-12-24 15:29:15 +0000 | [diff] [blame] | 87 | void bpf_local_storage_free_rcu(struct rcu_head *rcu) |
| 88 | { |
| 89 | struct bpf_local_storage *local_storage; |
| 90 | |
| 91 | local_storage = container_of(rcu, struct bpf_local_storage, rcu); |
| 92 | kfree_rcu(local_storage, rcu); |
| 93 | } |
| 94 | |
| 95 | static void bpf_selem_free_rcu(struct rcu_head *rcu) |
| 96 | { |
| 97 | struct bpf_local_storage_elem *selem; |
| 98 | |
| 99 | selem = container_of(rcu, struct bpf_local_storage_elem, rcu); |
| 100 | kfree_rcu(selem, rcu); |
| 101 | } |
| 102 | |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 103 | /* local_storage->lock must be held and selem->local_storage == local_storage. |
| 104 | * The caller must ensure selem->smap is still valid to be |
| 105 | * dereferenced for its smap->elem_size and smap->cache_idx. |
| 106 | */ |
| 107 | bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage, |
| 108 | struct bpf_local_storage_elem *selem, |
| 109 | bool uncharge_mem) |
| 110 | { |
| 111 | struct bpf_local_storage_map *smap; |
| 112 | bool free_local_storage; |
| 113 | void *owner; |
| 114 | |
KP Singh | 0fe4b38 | 2021-12-24 15:29:15 +0000 | [diff] [blame] | 115 | smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held()); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 116 | owner = local_storage->owner; |
| 117 | |
| 118 | /* All uncharging on the owner must be done first. |
| 119 | * The owner may be freed once the last selem is unlinked |
| 120 | * from local_storage. |
| 121 | */ |
| 122 | if (uncharge_mem) |
| 123 | mem_uncharge(smap, owner, smap->elem_size); |
| 124 | |
| 125 | free_local_storage = hlist_is_singular_node(&selem->snode, |
| 126 | &local_storage->list); |
| 127 | if (free_local_storage) { |
| 128 | mem_uncharge(smap, owner, sizeof(struct bpf_local_storage)); |
| 129 | local_storage->owner = NULL; |
| 130 | |
| 131 | /* After this RCU_INIT, owner may be freed and cannot be used */ |
| 132 | RCU_INIT_POINTER(*owner_storage(smap, owner), NULL); |
| 133 | |
| 134 | /* local_storage is not freed now. local_storage->lock is |
| 135 | * still held and raw_spin_unlock_bh(&local_storage->lock) |
| 136 | * will be done by the caller. |
| 137 | * |
| 138 | * Although the unlock will be done under |
| 139 | * rcu_read_lock(), it is more intutivie to |
KP Singh | 0fe4b38 | 2021-12-24 15:29:15 +0000 | [diff] [blame] | 140 | * read if the freeing of the storage is done |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 141 | * after the raw_spin_unlock_bh(&local_storage->lock). |
| 142 | * |
| 143 | * Hence, a "bool free_local_storage" is returned |
KP Singh | 0fe4b38 | 2021-12-24 15:29:15 +0000 | [diff] [blame] | 144 | * to the caller which then calls then frees the storage after |
| 145 | * all the RCU grace periods have expired. |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 146 | */ |
| 147 | } |
| 148 | hlist_del_init_rcu(&selem->snode); |
| 149 | if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) == |
| 150 | SDATA(selem)) |
| 151 | RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL); |
| 152 | |
KP Singh | 0fe4b38 | 2021-12-24 15:29:15 +0000 | [diff] [blame] | 153 | call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 154 | return free_local_storage; |
| 155 | } |
| 156 | |
| 157 | static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem) |
| 158 | { |
| 159 | struct bpf_local_storage *local_storage; |
| 160 | bool free_local_storage = false; |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 161 | unsigned long flags; |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 162 | |
| 163 | if (unlikely(!selem_linked_to_storage(selem))) |
| 164 | /* selem has already been unlinked from sk */ |
| 165 | return; |
| 166 | |
KP Singh | 0fe4b38 | 2021-12-24 15:29:15 +0000 | [diff] [blame] | 167 | local_storage = rcu_dereference_check(selem->local_storage, |
| 168 | bpf_rcu_lock_held()); |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 169 | raw_spin_lock_irqsave(&local_storage->lock, flags); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 170 | if (likely(selem_linked_to_storage(selem))) |
| 171 | free_local_storage = bpf_selem_unlink_storage_nolock( |
| 172 | local_storage, selem, true); |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 173 | raw_spin_unlock_irqrestore(&local_storage->lock, flags); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 174 | |
| 175 | if (free_local_storage) |
KP Singh | 0fe4b38 | 2021-12-24 15:29:15 +0000 | [diff] [blame] | 176 | call_rcu_tasks_trace(&local_storage->rcu, |
| 177 | bpf_local_storage_free_rcu); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage, |
| 181 | struct bpf_local_storage_elem *selem) |
| 182 | { |
| 183 | RCU_INIT_POINTER(selem->local_storage, local_storage); |
Martin KaFai Lau | 70b9711 | 2020-09-16 13:44:53 -0700 | [diff] [blame] | 184 | hlist_add_head_rcu(&selem->snode, &local_storage->list); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 185 | } |
| 186 | |
| 187 | void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem) |
| 188 | { |
| 189 | struct bpf_local_storage_map *smap; |
| 190 | struct bpf_local_storage_map_bucket *b; |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 191 | unsigned long flags; |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 192 | |
| 193 | if (unlikely(!selem_linked_to_map(selem))) |
| 194 | /* selem has already be unlinked from smap */ |
| 195 | return; |
| 196 | |
KP Singh | 0fe4b38 | 2021-12-24 15:29:15 +0000 | [diff] [blame] | 197 | smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held()); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 198 | b = select_bucket(smap, selem); |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 199 | raw_spin_lock_irqsave(&b->lock, flags); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 200 | if (likely(selem_linked_to_map(selem))) |
| 201 | hlist_del_init_rcu(&selem->map_node); |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 202 | raw_spin_unlock_irqrestore(&b->lock, flags); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 203 | } |
| 204 | |
| 205 | void bpf_selem_link_map(struct bpf_local_storage_map *smap, |
| 206 | struct bpf_local_storage_elem *selem) |
| 207 | { |
| 208 | struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem); |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 209 | unsigned long flags; |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 210 | |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 211 | raw_spin_lock_irqsave(&b->lock, flags); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 212 | RCU_INIT_POINTER(SDATA(selem)->smap, smap); |
| 213 | hlist_add_head_rcu(&selem->map_node, &b->list); |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 214 | raw_spin_unlock_irqrestore(&b->lock, flags); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 215 | } |
| 216 | |
| 217 | void bpf_selem_unlink(struct bpf_local_storage_elem *selem) |
| 218 | { |
| 219 | /* Always unlink from map before unlinking from local_storage |
| 220 | * because selem will be freed after successfully unlinked from |
| 221 | * the local_storage. |
| 222 | */ |
| 223 | bpf_selem_unlink_map(selem); |
| 224 | __bpf_selem_unlink_storage(selem); |
| 225 | } |
| 226 | |
| 227 | struct bpf_local_storage_data * |
| 228 | bpf_local_storage_lookup(struct bpf_local_storage *local_storage, |
| 229 | struct bpf_local_storage_map *smap, |
| 230 | bool cacheit_lockit) |
| 231 | { |
| 232 | struct bpf_local_storage_data *sdata; |
| 233 | struct bpf_local_storage_elem *selem; |
| 234 | |
| 235 | /* Fast path (cache hit) */ |
KP Singh | 0fe4b38 | 2021-12-24 15:29:15 +0000 | [diff] [blame] | 236 | sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx], |
| 237 | bpf_rcu_lock_held()); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 238 | if (sdata && rcu_access_pointer(sdata->smap) == smap) |
| 239 | return sdata; |
| 240 | |
| 241 | /* Slow path (cache miss) */ |
KP Singh | 0fe4b38 | 2021-12-24 15:29:15 +0000 | [diff] [blame] | 242 | hlist_for_each_entry_rcu(selem, &local_storage->list, snode, |
| 243 | rcu_read_lock_trace_held()) |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 244 | if (rcu_access_pointer(SDATA(selem)->smap) == smap) |
| 245 | break; |
| 246 | |
| 247 | if (!selem) |
| 248 | return NULL; |
| 249 | |
| 250 | sdata = SDATA(selem); |
| 251 | if (cacheit_lockit) { |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 252 | unsigned long flags; |
| 253 | |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 254 | /* spinlock is needed to avoid racing with the |
| 255 | * parallel delete. Otherwise, publishing an already |
| 256 | * deleted sdata to the cache will become a use-after-free |
| 257 | * problem in the next bpf_local_storage_lookup(). |
| 258 | */ |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 259 | raw_spin_lock_irqsave(&local_storage->lock, flags); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 260 | if (selem_linked_to_storage(selem)) |
| 261 | rcu_assign_pointer(local_storage->cache[smap->cache_idx], |
| 262 | sdata); |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 263 | raw_spin_unlock_irqrestore(&local_storage->lock, flags); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 264 | } |
| 265 | |
| 266 | return sdata; |
| 267 | } |
| 268 | |
| 269 | static int check_flags(const struct bpf_local_storage_data *old_sdata, |
| 270 | u64 map_flags) |
| 271 | { |
| 272 | if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST) |
| 273 | /* elem already exists */ |
| 274 | return -EEXIST; |
| 275 | |
| 276 | if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST) |
| 277 | /* elem doesn't exist, cannot update it */ |
| 278 | return -ENOENT; |
| 279 | |
| 280 | return 0; |
| 281 | } |
| 282 | |
| 283 | int bpf_local_storage_alloc(void *owner, |
| 284 | struct bpf_local_storage_map *smap, |
| 285 | struct bpf_local_storage_elem *first_selem) |
| 286 | { |
| 287 | struct bpf_local_storage *prev_storage, *storage; |
| 288 | struct bpf_local_storage **owner_storage_ptr; |
| 289 | int err; |
| 290 | |
| 291 | err = mem_charge(smap, owner, sizeof(*storage)); |
| 292 | if (err) |
| 293 | return err; |
| 294 | |
Roman Gushchin | e9aae8b | 2020-12-01 13:58:41 -0800 | [diff] [blame] | 295 | storage = bpf_map_kzalloc(&smap->map, sizeof(*storage), |
| 296 | GFP_ATOMIC | __GFP_NOWARN); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 297 | if (!storage) { |
| 298 | err = -ENOMEM; |
| 299 | goto uncharge; |
| 300 | } |
| 301 | |
| 302 | INIT_HLIST_HEAD(&storage->list); |
| 303 | raw_spin_lock_init(&storage->lock); |
| 304 | storage->owner = owner; |
| 305 | |
| 306 | bpf_selem_link_storage_nolock(storage, first_selem); |
| 307 | bpf_selem_link_map(smap, first_selem); |
| 308 | |
| 309 | owner_storage_ptr = |
| 310 | (struct bpf_local_storage **)owner_storage(smap, owner); |
| 311 | /* Publish storage to the owner. |
| 312 | * Instead of using any lock of the kernel object (i.e. owner), |
| 313 | * cmpxchg will work with any kernel object regardless what |
| 314 | * the running context is, bh, irq...etc. |
| 315 | * |
| 316 | * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage) |
| 317 | * is protected by the storage->lock. Hence, when freeing |
| 318 | * the owner->storage, the storage->lock must be held before |
| 319 | * setting owner->storage ptr to NULL. |
| 320 | */ |
| 321 | prev_storage = cmpxchg(owner_storage_ptr, NULL, storage); |
| 322 | if (unlikely(prev_storage)) { |
| 323 | bpf_selem_unlink_map(first_selem); |
| 324 | err = -EAGAIN; |
| 325 | goto uncharge; |
| 326 | |
| 327 | /* Note that even first_selem was linked to smap's |
| 328 | * bucket->list, first_selem can be freed immediately |
| 329 | * (instead of kfree_rcu) because |
| 330 | * bpf_local_storage_map_free() does a |
KP Singh | 0fe4b38 | 2021-12-24 15:29:15 +0000 | [diff] [blame] | 331 | * synchronize_rcu_mult (waiting for both sleepable and |
| 332 | * normal programs) before walking the bucket->list. |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 333 | * Hence, no one is accessing selem from the |
| 334 | * bucket->list under rcu_read_lock(). |
| 335 | */ |
| 336 | } |
| 337 | |
| 338 | return 0; |
| 339 | |
| 340 | uncharge: |
| 341 | kfree(storage); |
| 342 | mem_uncharge(smap, owner, sizeof(*storage)); |
| 343 | return err; |
| 344 | } |
| 345 | |
| 346 | /* sk cannot be going away because it is linking new elem |
| 347 | * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0). |
| 348 | * Otherwise, it will become a leak (and other memory issues |
| 349 | * during map destruction). |
| 350 | */ |
| 351 | struct bpf_local_storage_data * |
| 352 | bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, |
| 353 | void *value, u64 map_flags) |
| 354 | { |
| 355 | struct bpf_local_storage_data *old_sdata = NULL; |
| 356 | struct bpf_local_storage_elem *selem; |
| 357 | struct bpf_local_storage *local_storage; |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 358 | unsigned long flags; |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 359 | int err; |
| 360 | |
| 361 | /* BPF_EXIST and BPF_NOEXIST cannot be both set */ |
| 362 | if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) || |
| 363 | /* BPF_F_LOCK can only be used in a value with spin_lock */ |
| 364 | unlikely((map_flags & BPF_F_LOCK) && |
| 365 | !map_value_has_spin_lock(&smap->map))) |
| 366 | return ERR_PTR(-EINVAL); |
| 367 | |
KP Singh | 0fe4b38 | 2021-12-24 15:29:15 +0000 | [diff] [blame] | 368 | local_storage = rcu_dereference_check(*owner_storage(smap, owner), |
| 369 | bpf_rcu_lock_held()); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 370 | if (!local_storage || hlist_empty(&local_storage->list)) { |
| 371 | /* Very first elem for the owner */ |
| 372 | err = check_flags(NULL, map_flags); |
| 373 | if (err) |
| 374 | return ERR_PTR(err); |
| 375 | |
| 376 | selem = bpf_selem_alloc(smap, owner, value, true); |
| 377 | if (!selem) |
| 378 | return ERR_PTR(-ENOMEM); |
| 379 | |
| 380 | err = bpf_local_storage_alloc(owner, smap, selem); |
| 381 | if (err) { |
| 382 | kfree(selem); |
| 383 | mem_uncharge(smap, owner, smap->elem_size); |
| 384 | return ERR_PTR(err); |
| 385 | } |
| 386 | |
| 387 | return SDATA(selem); |
| 388 | } |
| 389 | |
| 390 | if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) { |
| 391 | /* Hoping to find an old_sdata to do inline update |
| 392 | * such that it can avoid taking the local_storage->lock |
| 393 | * and changing the lists. |
| 394 | */ |
| 395 | old_sdata = |
| 396 | bpf_local_storage_lookup(local_storage, smap, false); |
| 397 | err = check_flags(old_sdata, map_flags); |
| 398 | if (err) |
| 399 | return ERR_PTR(err); |
| 400 | if (old_sdata && selem_linked_to_storage(SELEM(old_sdata))) { |
| 401 | copy_map_value_locked(&smap->map, old_sdata->data, |
| 402 | value, false); |
| 403 | return old_sdata; |
| 404 | } |
| 405 | } |
| 406 | |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 407 | raw_spin_lock_irqsave(&local_storage->lock, flags); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 408 | |
| 409 | /* Recheck local_storage->list under local_storage->lock */ |
| 410 | if (unlikely(hlist_empty(&local_storage->list))) { |
| 411 | /* A parallel del is happening and local_storage is going |
| 412 | * away. It has just been checked before, so very |
| 413 | * unlikely. Return instead of retry to keep things |
| 414 | * simple. |
| 415 | */ |
| 416 | err = -EAGAIN; |
| 417 | goto unlock_err; |
| 418 | } |
| 419 | |
| 420 | old_sdata = bpf_local_storage_lookup(local_storage, smap, false); |
| 421 | err = check_flags(old_sdata, map_flags); |
| 422 | if (err) |
| 423 | goto unlock_err; |
| 424 | |
| 425 | if (old_sdata && (map_flags & BPF_F_LOCK)) { |
| 426 | copy_map_value_locked(&smap->map, old_sdata->data, value, |
| 427 | false); |
| 428 | selem = SELEM(old_sdata); |
| 429 | goto unlock; |
| 430 | } |
| 431 | |
| 432 | /* local_storage->lock is held. Hence, we are sure |
| 433 | * we can unlink and uncharge the old_sdata successfully |
| 434 | * later. Hence, instead of charging the new selem now |
| 435 | * and then uncharge the old selem later (which may cause |
| 436 | * a potential but unnecessary charge failure), avoid taking |
| 437 | * a charge at all here (the "!old_sdata" check) and the |
| 438 | * old_sdata will not be uncharged later during |
| 439 | * bpf_selem_unlink_storage_nolock(). |
| 440 | */ |
| 441 | selem = bpf_selem_alloc(smap, owner, value, !old_sdata); |
| 442 | if (!selem) { |
| 443 | err = -ENOMEM; |
| 444 | goto unlock_err; |
| 445 | } |
| 446 | |
| 447 | /* First, link the new selem to the map */ |
| 448 | bpf_selem_link_map(smap, selem); |
| 449 | |
| 450 | /* Second, link (and publish) the new selem to local_storage */ |
| 451 | bpf_selem_link_storage_nolock(local_storage, selem); |
| 452 | |
| 453 | /* Third, remove old selem, SELEM(old_sdata) */ |
| 454 | if (old_sdata) { |
| 455 | bpf_selem_unlink_map(SELEM(old_sdata)); |
| 456 | bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata), |
| 457 | false); |
| 458 | } |
| 459 | |
| 460 | unlock: |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 461 | raw_spin_unlock_irqrestore(&local_storage->lock, flags); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 462 | return SDATA(selem); |
| 463 | |
| 464 | unlock_err: |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 465 | raw_spin_unlock_irqrestore(&local_storage->lock, flags); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 466 | return ERR_PTR(err); |
| 467 | } |
| 468 | |
| 469 | u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache) |
| 470 | { |
| 471 | u64 min_usage = U64_MAX; |
| 472 | u16 i, res = 0; |
| 473 | |
| 474 | spin_lock(&cache->idx_lock); |
| 475 | |
| 476 | for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) { |
| 477 | if (cache->idx_usage_counts[i] < min_usage) { |
| 478 | min_usage = cache->idx_usage_counts[i]; |
| 479 | res = i; |
| 480 | |
| 481 | /* Found a free cache_idx */ |
| 482 | if (!min_usage) |
| 483 | break; |
| 484 | } |
| 485 | } |
| 486 | cache->idx_usage_counts[res]++; |
| 487 | |
| 488 | spin_unlock(&cache->idx_lock); |
| 489 | |
| 490 | return res; |
| 491 | } |
| 492 | |
| 493 | void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache, |
| 494 | u16 idx) |
| 495 | { |
| 496 | spin_lock(&cache->idx_lock); |
| 497 | cache->idx_usage_counts[idx]--; |
| 498 | spin_unlock(&cache->idx_lock); |
| 499 | } |
| 500 | |
Song Liu | bc235cd | 2021-02-25 15:43:15 -0800 | [diff] [blame] | 501 | void bpf_local_storage_map_free(struct bpf_local_storage_map *smap, |
| 502 | int __percpu *busy_counter) |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 503 | { |
| 504 | struct bpf_local_storage_elem *selem; |
| 505 | struct bpf_local_storage_map_bucket *b; |
| 506 | unsigned int i; |
| 507 | |
| 508 | /* Note that this map might be concurrently cloned from |
| 509 | * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone |
| 510 | * RCU read section to finish before proceeding. New RCU |
| 511 | * read sections should be prevented via bpf_map_inc_not_zero. |
| 512 | */ |
| 513 | synchronize_rcu(); |
| 514 | |
| 515 | /* bpf prog and the userspace can no longer access this map |
| 516 | * now. No new selem (of this map) can be added |
| 517 | * to the owner->storage or to the map bucket's list. |
| 518 | * |
| 519 | * The elem of this map can be cleaned up here |
| 520 | * or when the storage is freed e.g. |
| 521 | * by bpf_sk_storage_free() during __sk_destruct(). |
| 522 | */ |
| 523 | for (i = 0; i < (1U << smap->bucket_log); i++) { |
| 524 | b = &smap->buckets[i]; |
| 525 | |
| 526 | rcu_read_lock(); |
| 527 | /* No one is adding to b->list now */ |
| 528 | while ((selem = hlist_entry_safe( |
| 529 | rcu_dereference_raw(hlist_first_rcu(&b->list)), |
| 530 | struct bpf_local_storage_elem, map_node))) { |
Song Liu | bc235cd | 2021-02-25 15:43:15 -0800 | [diff] [blame] | 531 | if (busy_counter) { |
| 532 | migrate_disable(); |
| 533 | __this_cpu_inc(*busy_counter); |
| 534 | } |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 535 | bpf_selem_unlink(selem); |
Song Liu | bc235cd | 2021-02-25 15:43:15 -0800 | [diff] [blame] | 536 | if (busy_counter) { |
| 537 | __this_cpu_dec(*busy_counter); |
| 538 | migrate_enable(); |
| 539 | } |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 540 | cond_resched_rcu(); |
| 541 | } |
| 542 | rcu_read_unlock(); |
| 543 | } |
| 544 | |
| 545 | /* While freeing the storage we may still need to access the map. |
| 546 | * |
| 547 | * e.g. when bpf_sk_storage_free() has unlinked selem from the map |
| 548 | * which then made the above while((selem = ...)) loop |
| 549 | * exit immediately. |
| 550 | * |
| 551 | * However, while freeing the storage one still needs to access the |
| 552 | * smap->elem_size to do the uncharging in |
| 553 | * bpf_selem_unlink_storage_nolock(). |
| 554 | * |
| 555 | * Hence, wait another rcu grace period for the storage to be freed. |
| 556 | */ |
| 557 | synchronize_rcu(); |
| 558 | |
| 559 | kvfree(smap->buckets); |
| 560 | kfree(smap); |
| 561 | } |
| 562 | |
| 563 | int bpf_local_storage_map_alloc_check(union bpf_attr *attr) |
| 564 | { |
| 565 | if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK || |
| 566 | !(attr->map_flags & BPF_F_NO_PREALLOC) || |
| 567 | attr->max_entries || |
| 568 | attr->key_size != sizeof(int) || !attr->value_size || |
| 569 | /* Enforce BTF for userspace sk dumping */ |
| 570 | !attr->btf_key_type_id || !attr->btf_value_type_id) |
| 571 | return -EINVAL; |
| 572 | |
| 573 | if (!bpf_capable()) |
| 574 | return -EPERM; |
| 575 | |
| 576 | if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE) |
| 577 | return -E2BIG; |
| 578 | |
| 579 | return 0; |
| 580 | } |
| 581 | |
| 582 | struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr) |
| 583 | { |
| 584 | struct bpf_local_storage_map *smap; |
| 585 | unsigned int i; |
| 586 | u32 nbuckets; |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 587 | |
Roman Gushchin | e9aae8b | 2020-12-01 13:58:41 -0800 | [diff] [blame] | 588 | smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 589 | if (!smap) |
| 590 | return ERR_PTR(-ENOMEM); |
| 591 | bpf_map_init_from_attr(&smap->map, attr); |
| 592 | |
| 593 | nbuckets = roundup_pow_of_two(num_possible_cpus()); |
| 594 | /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */ |
| 595 | nbuckets = max_t(u32, 2, nbuckets); |
| 596 | smap->bucket_log = ilog2(nbuckets); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 597 | |
| 598 | smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets, |
Roman Gushchin | e9aae8b | 2020-12-01 13:58:41 -0800 | [diff] [blame] | 599 | GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT); |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 600 | if (!smap->buckets) { |
KP Singh | 450af8d | 2020-08-25 20:29:16 +0200 | [diff] [blame] | 601 | kfree(smap); |
| 602 | return ERR_PTR(-ENOMEM); |
| 603 | } |
| 604 | |
| 605 | for (i = 0; i < nbuckets; i++) { |
| 606 | INIT_HLIST_HEAD(&smap->buckets[i].list); |
| 607 | raw_spin_lock_init(&smap->buckets[i].lock); |
| 608 | } |
| 609 | |
| 610 | smap->elem_size = |
| 611 | sizeof(struct bpf_local_storage_elem) + attr->value_size; |
| 612 | |
| 613 | return smap; |
| 614 | } |
| 615 | |
| 616 | int bpf_local_storage_map_check_btf(const struct bpf_map *map, |
| 617 | const struct btf *btf, |
| 618 | const struct btf_type *key_type, |
| 619 | const struct btf_type *value_type) |
| 620 | { |
| 621 | u32 int_data; |
| 622 | |
| 623 | if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) |
| 624 | return -EINVAL; |
| 625 | |
| 626 | int_data = *(u32 *)(key_type + 1); |
| 627 | if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data)) |
| 628 | return -EINVAL; |
| 629 | |
| 630 | return 0; |
| 631 | } |