Thomas Gleixner | 25763b3 | 2019-05-28 10:10:09 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Martin KaFai Lau | 56f668d | 2017-03-22 10:00:33 -0700 | [diff] [blame] | 2 | /* Copyright (c) 2017 Facebook |
Martin KaFai Lau | 56f668d | 2017-03-22 10:00:33 -0700 | [diff] [blame] | 3 | */ |
| 4 | #include <linux/slab.h> |
| 5 | #include <linux/bpf.h> |
| 6 | |
| 7 | #include "map_in_map.h" |
| 8 | |
| 9 | struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) |
| 10 | { |
| 11 | struct bpf_map *inner_map, *inner_map_meta; |
Daniel Borkmann | 9d5564d | 2019-01-17 16:34:45 +0100 | [diff] [blame] | 12 | u32 inner_map_meta_size; |
Martin KaFai Lau | 56f668d | 2017-03-22 10:00:33 -0700 | [diff] [blame] | 13 | struct fd f; |
| 14 | |
| 15 | f = fdget(inner_map_ufd); |
| 16 | inner_map = __bpf_map_get(f); |
| 17 | if (IS_ERR(inner_map)) |
| 18 | return inner_map; |
| 19 | |
Martin KaFai Lau | 56f668d | 2017-03-22 10:00:33 -0700 | [diff] [blame] | 20 | /* Does not support >1 level map-in-map */ |
| 21 | if (inner_map->inner_map_meta) { |
| 22 | fdput(f); |
| 23 | return ERR_PTR(-EINVAL); |
| 24 | } |
| 25 | |
Martin KaFai Lau | f4d0525 | 2020-08-27 18:18:06 -0700 | [diff] [blame] | 26 | if (!inner_map->ops->map_meta_equal) { |
| 27 | fdput(f); |
| 28 | return ERR_PTR(-ENOTSUPP); |
| 29 | } |
| 30 | |
Alexei Starovoitov | d83525c | 2019-01-31 15:40:04 -0800 | [diff] [blame] | 31 | if (map_value_has_spin_lock(inner_map)) { |
| 32 | fdput(f); |
| 33 | return ERR_PTR(-ENOTSUPP); |
| 34 | } |
| 35 | |
Daniel Borkmann | 9d5564d | 2019-01-17 16:34:45 +0100 | [diff] [blame] | 36 | inner_map_meta_size = sizeof(*inner_map_meta); |
| 37 | /* In some cases verifier needs to access beyond just base map. */ |
| 38 | if (inner_map->ops == &array_map_ops) |
| 39 | inner_map_meta_size = sizeof(struct bpf_array); |
| 40 | |
| 41 | inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER); |
Martin KaFai Lau | 56f668d | 2017-03-22 10:00:33 -0700 | [diff] [blame] | 42 | if (!inner_map_meta) { |
| 43 | fdput(f); |
| 44 | return ERR_PTR(-ENOMEM); |
| 45 | } |
| 46 | |
| 47 | inner_map_meta->map_type = inner_map->map_type; |
| 48 | inner_map_meta->key_size = inner_map->key_size; |
| 49 | inner_map_meta->value_size = inner_map->value_size; |
| 50 | inner_map_meta->map_flags = inner_map->map_flags; |
Martin KaFai Lau | 56f668d | 2017-03-22 10:00:33 -0700 | [diff] [blame] | 51 | inner_map_meta->max_entries = inner_map->max_entries; |
Yonghong Song | a115d0e | 2019-02-27 13:22:56 -0800 | [diff] [blame] | 52 | inner_map_meta->spin_lock_off = inner_map->spin_lock_off; |
Martin KaFai Lau | 56f668d | 2017-03-22 10:00:33 -0700 | [diff] [blame] | 53 | |
Daniel Borkmann | 9d5564d | 2019-01-17 16:34:45 +0100 | [diff] [blame] | 54 | /* Misc members not needed in bpf_map_meta_equal() check. */ |
| 55 | inner_map_meta->ops = inner_map->ops; |
| 56 | if (inner_map->ops == &array_map_ops) { |
Alexei Starovoitov | 2c78ee8 | 2020-05-13 16:03:54 -0700 | [diff] [blame] | 57 | inner_map_meta->bypass_spec_v1 = inner_map->bypass_spec_v1; |
Daniel Borkmann | 9d5564d | 2019-01-17 16:34:45 +0100 | [diff] [blame] | 58 | container_of(inner_map_meta, struct bpf_array, map)->index_mask = |
| 59 | container_of(inner_map, struct bpf_array, map)->index_mask; |
| 60 | } |
| 61 | |
Martin KaFai Lau | 56f668d | 2017-03-22 10:00:33 -0700 | [diff] [blame] | 62 | fdput(f); |
| 63 | return inner_map_meta; |
| 64 | } |
| 65 | |
| 66 | void bpf_map_meta_free(struct bpf_map *map_meta) |
| 67 | { |
| 68 | kfree(map_meta); |
| 69 | } |
| 70 | |
| 71 | bool bpf_map_meta_equal(const struct bpf_map *meta0, |
| 72 | const struct bpf_map *meta1) |
| 73 | { |
| 74 | /* No need to compare ops because it is covered by map_type */ |
| 75 | return meta0->map_type == meta1->map_type && |
| 76 | meta0->key_size == meta1->key_size && |
| 77 | meta0->value_size == meta1->value_size && |
Martin KaFai Lau | 134fede | 2020-08-27 18:18:13 -0700 | [diff] [blame^] | 78 | meta0->map_flags == meta1->map_flags; |
Martin KaFai Lau | 56f668d | 2017-03-22 10:00:33 -0700 | [diff] [blame] | 79 | } |
| 80 | |
| 81 | void *bpf_map_fd_get_ptr(struct bpf_map *map, |
| 82 | struct file *map_file /* not used */, |
| 83 | int ufd) |
| 84 | { |
Martin KaFai Lau | f4d0525 | 2020-08-27 18:18:06 -0700 | [diff] [blame] | 85 | struct bpf_map *inner_map, *inner_map_meta; |
Martin KaFai Lau | 56f668d | 2017-03-22 10:00:33 -0700 | [diff] [blame] | 86 | struct fd f; |
| 87 | |
| 88 | f = fdget(ufd); |
| 89 | inner_map = __bpf_map_get(f); |
| 90 | if (IS_ERR(inner_map)) |
| 91 | return inner_map; |
| 92 | |
Martin KaFai Lau | f4d0525 | 2020-08-27 18:18:06 -0700 | [diff] [blame] | 93 | inner_map_meta = map->inner_map_meta; |
| 94 | if (inner_map_meta->ops->map_meta_equal(inner_map_meta, inner_map)) |
Andrii Nakryiko | 1e0bd5a | 2019-11-17 09:28:02 -0800 | [diff] [blame] | 95 | bpf_map_inc(inner_map); |
Martin KaFai Lau | 56f668d | 2017-03-22 10:00:33 -0700 | [diff] [blame] | 96 | else |
| 97 | inner_map = ERR_PTR(-EINVAL); |
| 98 | |
| 99 | fdput(f); |
| 100 | return inner_map; |
| 101 | } |
| 102 | |
| 103 | void bpf_map_fd_put_ptr(void *ptr) |
| 104 | { |
| 105 | /* ptr->ops->map_free() has to go through one |
| 106 | * rcu grace period by itself. |
| 107 | */ |
| 108 | bpf_map_put(ptr); |
| 109 | } |
Martin KaFai Lau | 14dc6f0 | 2017-06-27 23:08:34 -0700 | [diff] [blame] | 110 | |
| 111 | u32 bpf_map_fd_sys_lookup_elem(void *ptr) |
| 112 | { |
| 113 | return ((struct bpf_map *)ptr)->id; |
| 114 | } |