Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 1 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or |
| 4 | * modify it under the terms of version 2 of the GNU General Public |
| 5 | * License as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, but |
| 8 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 10 | * General Public License for more details. |
| 11 | */ |
| 12 | #include <linux/bpf.h> |
Daniel Borkmann | a67edbf | 2017-01-25 02:28:18 +0100 | [diff] [blame] | 13 | #include <linux/bpf_trace.h> |
Sean Young | f4364dc | 2018-05-27 12:24:09 +0100 | [diff] [blame^] | 14 | #include <linux/bpf_lirc.h> |
Martin KaFai Lau | f56a653 | 2018-04-18 15:56:01 -0700 | [diff] [blame] | 15 | #include <linux/btf.h> |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 16 | #include <linux/syscalls.h> |
| 17 | #include <linux/slab.h> |
Ingo Molnar | 3f07c01 | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 18 | #include <linux/sched/signal.h> |
Daniel Borkmann | d407bd2 | 2017-01-18 15:14:17 +0100 | [diff] [blame] | 19 | #include <linux/vmalloc.h> |
| 20 | #include <linux/mmzone.h> |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 21 | #include <linux/anon_inodes.h> |
Yonghong Song | 41bdc4b | 2018-05-24 11:21:09 -0700 | [diff] [blame] | 22 | #include <linux/fdtable.h> |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 23 | #include <linux/file.h> |
Yonghong Song | 41bdc4b | 2018-05-24 11:21:09 -0700 | [diff] [blame] | 24 | #include <linux/fs.h> |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 25 | #include <linux/license.h> |
| 26 | #include <linux/filter.h> |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 27 | #include <linux/version.h> |
Mickaël Salaün | 535e7b4b | 2016-11-13 19:44:03 +0100 | [diff] [blame] | 28 | #include <linux/kernel.h> |
Martin KaFai Lau | dc4bb0e | 2017-06-05 12:15:46 -0700 | [diff] [blame] | 29 | #include <linux/idr.h> |
Martin KaFai Lau | cb4d2b3 | 2017-09-27 14:37:52 -0700 | [diff] [blame] | 30 | #include <linux/cred.h> |
| 31 | #include <linux/timekeeping.h> |
| 32 | #include <linux/ctype.h> |
Martin KaFai Lau | a26ca7c | 2018-04-18 15:56:03 -0700 | [diff] [blame] | 33 | #include <linux/btf.h> |
Mark Rutland | 9ef09e3 | 2018-05-03 17:04:59 +0100 | [diff] [blame] | 34 | #include <linux/nospec.h> |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 35 | |
Martin KaFai Lau | 14dc6f0 | 2017-06-27 23:08:34 -0700 | [diff] [blame] | 36 | #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \ |
| 37 | (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ |
| 38 | (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ |
| 39 | (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) |
| 40 | #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) |
| 41 | #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map)) |
| 42 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 43 | #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) |
| 44 | |
Alexei Starovoitov | b121d1e | 2016-03-07 21:57:13 -0800 | [diff] [blame] | 45 | DEFINE_PER_CPU(int, bpf_prog_active); |
Martin KaFai Lau | dc4bb0e | 2017-06-05 12:15:46 -0700 | [diff] [blame] | 46 | static DEFINE_IDR(prog_idr); |
| 47 | static DEFINE_SPINLOCK(prog_idr_lock); |
Martin KaFai Lau | f3f1c05 | 2017-06-05 12:15:47 -0700 | [diff] [blame] | 48 | static DEFINE_IDR(map_idr); |
| 49 | static DEFINE_SPINLOCK(map_idr_lock); |
Alexei Starovoitov | b121d1e | 2016-03-07 21:57:13 -0800 | [diff] [blame] | 50 | |
Alexei Starovoitov | 1be7f75 | 2015-10-07 22:23:21 -0700 | [diff] [blame] | 51 | int sysctl_unprivileged_bpf_disabled __read_mostly; |
| 52 | |
Johannes Berg | 40077e0 | 2017-04-11 15:34:58 +0200 | [diff] [blame] | 53 | static const struct bpf_map_ops * const bpf_map_types[] = { |
| 54 | #define BPF_PROG_TYPE(_id, _ops) |
| 55 | #define BPF_MAP_TYPE(_id, _ops) \ |
| 56 | [_id] = &_ops, |
| 57 | #include <linux/bpf_types.h> |
| 58 | #undef BPF_PROG_TYPE |
| 59 | #undef BPF_MAP_TYPE |
| 60 | }; |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 61 | |
Mickaël Salaün | 752ba56 | 2017-08-07 20:45:20 +0200 | [diff] [blame] | 62 | /* |
| 63 | * If we're handed a bigger struct than we know of, ensure all the unknown bits |
| 64 | * are 0 - i.e. new user-space does not rely on any kernel feature extensions |
| 65 | * we don't know about yet. |
| 66 | * |
| 67 | * There is a ToCToU between this function call and the following |
| 68 | * copy_from_user() call. However, this is not a concern since this function is |
| 69 | * meant to be a future-proofing of bits. |
| 70 | */ |
Martin KaFai Lau | dcab51f | 2018-05-22 15:03:31 -0700 | [diff] [blame] | 71 | int bpf_check_uarg_tail_zero(void __user *uaddr, |
| 72 | size_t expected_size, |
| 73 | size_t actual_size) |
Mickaël Salaün | 58291a7 | 2017-08-07 20:45:19 +0200 | [diff] [blame] | 74 | { |
| 75 | unsigned char __user *addr; |
| 76 | unsigned char __user *end; |
| 77 | unsigned char val; |
| 78 | int err; |
| 79 | |
Mickaël Salaün | 752ba56 | 2017-08-07 20:45:20 +0200 | [diff] [blame] | 80 | if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ |
| 81 | return -E2BIG; |
| 82 | |
| 83 | if (unlikely(!access_ok(VERIFY_READ, uaddr, actual_size))) |
| 84 | return -EFAULT; |
| 85 | |
Mickaël Salaün | 58291a7 | 2017-08-07 20:45:19 +0200 | [diff] [blame] | 86 | if (actual_size <= expected_size) |
| 87 | return 0; |
| 88 | |
| 89 | addr = uaddr + expected_size; |
| 90 | end = uaddr + actual_size; |
| 91 | |
| 92 | for (; addr < end; addr++) { |
| 93 | err = get_user(val, addr); |
| 94 | if (err) |
| 95 | return err; |
| 96 | if (val) |
| 97 | return -E2BIG; |
| 98 | } |
| 99 | |
| 100 | return 0; |
| 101 | } |
| 102 | |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 103 | const struct bpf_map_ops bpf_map_offload_ops = { |
| 104 | .map_alloc = bpf_map_offload_map_alloc, |
| 105 | .map_free = bpf_map_offload_map_free, |
| 106 | }; |
| 107 | |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 108 | static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) |
| 109 | { |
Jakub Kicinski | 1110f3a | 2018-01-11 20:29:03 -0800 | [diff] [blame] | 110 | const struct bpf_map_ops *ops; |
Mark Rutland | 9ef09e3 | 2018-05-03 17:04:59 +0100 | [diff] [blame] | 111 | u32 type = attr->map_type; |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 112 | struct bpf_map *map; |
Jakub Kicinski | 1110f3a | 2018-01-11 20:29:03 -0800 | [diff] [blame] | 113 | int err; |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 114 | |
Mark Rutland | 9ef09e3 | 2018-05-03 17:04:59 +0100 | [diff] [blame] | 115 | if (type >= ARRAY_SIZE(bpf_map_types)) |
Jakub Kicinski | 1110f3a | 2018-01-11 20:29:03 -0800 | [diff] [blame] | 116 | return ERR_PTR(-EINVAL); |
Mark Rutland | 9ef09e3 | 2018-05-03 17:04:59 +0100 | [diff] [blame] | 117 | type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types)); |
| 118 | ops = bpf_map_types[type]; |
Jakub Kicinski | 1110f3a | 2018-01-11 20:29:03 -0800 | [diff] [blame] | 119 | if (!ops) |
Johannes Berg | 40077e0 | 2017-04-11 15:34:58 +0200 | [diff] [blame] | 120 | return ERR_PTR(-EINVAL); |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 121 | |
Jakub Kicinski | 1110f3a | 2018-01-11 20:29:03 -0800 | [diff] [blame] | 122 | if (ops->map_alloc_check) { |
| 123 | err = ops->map_alloc_check(attr); |
| 124 | if (err) |
| 125 | return ERR_PTR(err); |
| 126 | } |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 127 | if (attr->map_ifindex) |
| 128 | ops = &bpf_map_offload_ops; |
Jakub Kicinski | 1110f3a | 2018-01-11 20:29:03 -0800 | [diff] [blame] | 129 | map = ops->map_alloc(attr); |
Johannes Berg | 40077e0 | 2017-04-11 15:34:58 +0200 | [diff] [blame] | 130 | if (IS_ERR(map)) |
| 131 | return map; |
Jakub Kicinski | 1110f3a | 2018-01-11 20:29:03 -0800 | [diff] [blame] | 132 | map->ops = ops; |
Mark Rutland | 9ef09e3 | 2018-05-03 17:04:59 +0100 | [diff] [blame] | 133 | map->map_type = type; |
Johannes Berg | 40077e0 | 2017-04-11 15:34:58 +0200 | [diff] [blame] | 134 | return map; |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 135 | } |
| 136 | |
Martin KaFai Lau | 96eabe7 | 2017-08-18 11:28:00 -0700 | [diff] [blame] | 137 | void *bpf_map_area_alloc(size_t size, int numa_node) |
Daniel Borkmann | d407bd2 | 2017-01-18 15:14:17 +0100 | [diff] [blame] | 138 | { |
| 139 | /* We definitely need __GFP_NORETRY, so OOM killer doesn't |
| 140 | * trigger under memory pressure as we really just want to |
| 141 | * fail instead. |
| 142 | */ |
| 143 | const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO; |
| 144 | void *area; |
| 145 | |
| 146 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { |
Martin KaFai Lau | 96eabe7 | 2017-08-18 11:28:00 -0700 | [diff] [blame] | 147 | area = kmalloc_node(size, GFP_USER | flags, numa_node); |
Daniel Borkmann | d407bd2 | 2017-01-18 15:14:17 +0100 | [diff] [blame] | 148 | if (area != NULL) |
| 149 | return area; |
| 150 | } |
| 151 | |
Martin KaFai Lau | 96eabe7 | 2017-08-18 11:28:00 -0700 | [diff] [blame] | 152 | return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags, |
| 153 | __builtin_return_address(0)); |
Daniel Borkmann | d407bd2 | 2017-01-18 15:14:17 +0100 | [diff] [blame] | 154 | } |
| 155 | |
| 156 | void bpf_map_area_free(void *area) |
| 157 | { |
| 158 | kvfree(area); |
| 159 | } |
| 160 | |
Jakub Kicinski | bd47564 | 2018-01-11 20:29:06 -0800 | [diff] [blame] | 161 | void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) |
| 162 | { |
| 163 | map->map_type = attr->map_type; |
| 164 | map->key_size = attr->key_size; |
| 165 | map->value_size = attr->value_size; |
| 166 | map->max_entries = attr->max_entries; |
| 167 | map->map_flags = attr->map_flags; |
| 168 | map->numa_node = bpf_map_attr_numa_node(attr); |
| 169 | } |
| 170 | |
Alexei Starovoitov | 6c90598 | 2016-03-07 21:57:15 -0800 | [diff] [blame] | 171 | int bpf_map_precharge_memlock(u32 pages) |
| 172 | { |
| 173 | struct user_struct *user = get_current_user(); |
| 174 | unsigned long memlock_limit, cur; |
| 175 | |
| 176 | memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
| 177 | cur = atomic_long_read(&user->locked_vm); |
| 178 | free_uid(user); |
| 179 | if (cur + pages > memlock_limit) |
| 180 | return -EPERM; |
| 181 | return 0; |
| 182 | } |
| 183 | |
Alexei Starovoitov | aaac3ba | 2015-10-07 22:23:22 -0700 | [diff] [blame] | 184 | static int bpf_map_charge_memlock(struct bpf_map *map) |
| 185 | { |
| 186 | struct user_struct *user = get_current_user(); |
| 187 | unsigned long memlock_limit; |
| 188 | |
| 189 | memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
| 190 | |
| 191 | atomic_long_add(map->pages, &user->locked_vm); |
| 192 | |
| 193 | if (atomic_long_read(&user->locked_vm) > memlock_limit) { |
| 194 | atomic_long_sub(map->pages, &user->locked_vm); |
| 195 | free_uid(user); |
| 196 | return -EPERM; |
| 197 | } |
| 198 | map->user = user; |
| 199 | return 0; |
| 200 | } |
| 201 | |
| 202 | static void bpf_map_uncharge_memlock(struct bpf_map *map) |
| 203 | { |
| 204 | struct user_struct *user = map->user; |
| 205 | |
| 206 | atomic_long_sub(map->pages, &user->locked_vm); |
| 207 | free_uid(user); |
| 208 | } |
| 209 | |
Martin KaFai Lau | f3f1c05 | 2017-06-05 12:15:47 -0700 | [diff] [blame] | 210 | static int bpf_map_alloc_id(struct bpf_map *map) |
| 211 | { |
| 212 | int id; |
| 213 | |
Shaohua Li | b76354c | 2018-03-27 11:53:21 -0700 | [diff] [blame] | 214 | idr_preload(GFP_KERNEL); |
Martin KaFai Lau | f3f1c05 | 2017-06-05 12:15:47 -0700 | [diff] [blame] | 215 | spin_lock_bh(&map_idr_lock); |
| 216 | id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); |
| 217 | if (id > 0) |
| 218 | map->id = id; |
| 219 | spin_unlock_bh(&map_idr_lock); |
Shaohua Li | b76354c | 2018-03-27 11:53:21 -0700 | [diff] [blame] | 220 | idr_preload_end(); |
Martin KaFai Lau | f3f1c05 | 2017-06-05 12:15:47 -0700 | [diff] [blame] | 221 | |
| 222 | if (WARN_ON_ONCE(!id)) |
| 223 | return -ENOSPC; |
| 224 | |
| 225 | return id > 0 ? 0 : id; |
| 226 | } |
| 227 | |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 228 | void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) |
Martin KaFai Lau | f3f1c05 | 2017-06-05 12:15:47 -0700 | [diff] [blame] | 229 | { |
Eric Dumazet | 930651a | 2017-09-19 09:15:59 -0700 | [diff] [blame] | 230 | unsigned long flags; |
| 231 | |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 232 | /* Offloaded maps are removed from the IDR store when their device |
| 233 | * disappears - even if someone holds an fd to them they are unusable, |
| 234 | * the memory is gone, all ops will fail; they are simply waiting for |
| 235 | * refcnt to drop to be freed. |
| 236 | */ |
| 237 | if (!map->id) |
| 238 | return; |
| 239 | |
Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 240 | if (do_idr_lock) |
Eric Dumazet | 930651a | 2017-09-19 09:15:59 -0700 | [diff] [blame] | 241 | spin_lock_irqsave(&map_idr_lock, flags); |
Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 242 | else |
| 243 | __acquire(&map_idr_lock); |
| 244 | |
Martin KaFai Lau | f3f1c05 | 2017-06-05 12:15:47 -0700 | [diff] [blame] | 245 | idr_remove(&map_idr, map->id); |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 246 | map->id = 0; |
Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 247 | |
| 248 | if (do_idr_lock) |
Eric Dumazet | 930651a | 2017-09-19 09:15:59 -0700 | [diff] [blame] | 249 | spin_unlock_irqrestore(&map_idr_lock, flags); |
Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 250 | else |
| 251 | __release(&map_idr_lock); |
Martin KaFai Lau | f3f1c05 | 2017-06-05 12:15:47 -0700 | [diff] [blame] | 252 | } |
| 253 | |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 254 | /* called from workqueue */ |
| 255 | static void bpf_map_free_deferred(struct work_struct *work) |
| 256 | { |
| 257 | struct bpf_map *map = container_of(work, struct bpf_map, work); |
| 258 | |
Alexei Starovoitov | aaac3ba | 2015-10-07 22:23:22 -0700 | [diff] [blame] | 259 | bpf_map_uncharge_memlock(map); |
Chenbo Feng | afdb09c | 2017-10-18 13:00:24 -0700 | [diff] [blame] | 260 | security_bpf_map_free(map); |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 261 | /* implementation dependent freeing */ |
| 262 | map->ops->map_free(map); |
| 263 | } |
| 264 | |
Daniel Borkmann | c9da161 | 2015-11-24 21:28:15 +0100 | [diff] [blame] | 265 | static void bpf_map_put_uref(struct bpf_map *map) |
| 266 | { |
| 267 | if (atomic_dec_and_test(&map->usercnt)) { |
John Fastabend | ba6b8de | 2018-04-23 15:39:23 -0700 | [diff] [blame] | 268 | if (map->ops->map_release_uref) |
| 269 | map->ops->map_release_uref(map); |
Daniel Borkmann | c9da161 | 2015-11-24 21:28:15 +0100 | [diff] [blame] | 270 | } |
| 271 | } |
| 272 | |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 273 | /* decrement map refcnt and schedule it for freeing via workqueue |
| 274 | * (unrelying map implementation ops->map_free() might sleep) |
| 275 | */ |
Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 276 | static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 277 | { |
| 278 | if (atomic_dec_and_test(&map->refcnt)) { |
Martin KaFai Lau | 34ad558 | 2017-06-05 12:15:48 -0700 | [diff] [blame] | 279 | /* bpf_map_free_id() must be called first */ |
Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 280 | bpf_map_free_id(map, do_idr_lock); |
Martin KaFai Lau | 78958fc | 2018-05-04 14:49:51 -0700 | [diff] [blame] | 281 | btf_put(map->btf); |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 282 | INIT_WORK(&map->work, bpf_map_free_deferred); |
| 283 | schedule_work(&map->work); |
| 284 | } |
| 285 | } |
| 286 | |
Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 287 | void bpf_map_put(struct bpf_map *map) |
| 288 | { |
| 289 | __bpf_map_put(map, true); |
| 290 | } |
Jakub Kicinski | 630a4d3 | 2018-05-03 18:37:09 -0700 | [diff] [blame] | 291 | EXPORT_SYMBOL_GPL(bpf_map_put); |
Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 292 | |
Daniel Borkmann | c9da161 | 2015-11-24 21:28:15 +0100 | [diff] [blame] | 293 | void bpf_map_put_with_uref(struct bpf_map *map) |
| 294 | { |
| 295 | bpf_map_put_uref(map); |
| 296 | bpf_map_put(map); |
| 297 | } |
| 298 | |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 299 | static int bpf_map_release(struct inode *inode, struct file *filp) |
| 300 | { |
Daniel Borkmann | 61d1b6a | 2016-06-15 22:47:12 +0200 | [diff] [blame] | 301 | struct bpf_map *map = filp->private_data; |
| 302 | |
| 303 | if (map->ops->map_release) |
| 304 | map->ops->map_release(map, filp); |
| 305 | |
| 306 | bpf_map_put_with_uref(map); |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 307 | return 0; |
| 308 | } |
| 309 | |
Daniel Borkmann | f99bf20 | 2015-11-19 11:56:22 +0100 | [diff] [blame] | 310 | #ifdef CONFIG_PROC_FS |
| 311 | static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) |
| 312 | { |
| 313 | const struct bpf_map *map = filp->private_data; |
Daniel Borkmann | 21116b7 | 2016-11-26 01:28:07 +0100 | [diff] [blame] | 314 | const struct bpf_array *array; |
| 315 | u32 owner_prog_type = 0; |
Daniel Borkmann | 9780c0a | 2017-07-02 02:13:28 +0200 | [diff] [blame] | 316 | u32 owner_jited = 0; |
Daniel Borkmann | 21116b7 | 2016-11-26 01:28:07 +0100 | [diff] [blame] | 317 | |
| 318 | if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { |
| 319 | array = container_of(map, struct bpf_array, map); |
| 320 | owner_prog_type = array->owner_prog_type; |
Daniel Borkmann | 9780c0a | 2017-07-02 02:13:28 +0200 | [diff] [blame] | 321 | owner_jited = array->owner_jited; |
Daniel Borkmann | 21116b7 | 2016-11-26 01:28:07 +0100 | [diff] [blame] | 322 | } |
Daniel Borkmann | f99bf20 | 2015-11-19 11:56:22 +0100 | [diff] [blame] | 323 | |
| 324 | seq_printf(m, |
| 325 | "map_type:\t%u\n" |
| 326 | "key_size:\t%u\n" |
| 327 | "value_size:\t%u\n" |
Daniel Borkmann | 322cea2 | 2016-03-25 00:30:25 +0100 | [diff] [blame] | 328 | "max_entries:\t%u\n" |
Daniel Borkmann | 21116b7 | 2016-11-26 01:28:07 +0100 | [diff] [blame] | 329 | "map_flags:\t%#x\n" |
| 330 | "memlock:\t%llu\n", |
Daniel Borkmann | f99bf20 | 2015-11-19 11:56:22 +0100 | [diff] [blame] | 331 | map->map_type, |
| 332 | map->key_size, |
| 333 | map->value_size, |
Daniel Borkmann | 322cea2 | 2016-03-25 00:30:25 +0100 | [diff] [blame] | 334 | map->max_entries, |
Daniel Borkmann | 21116b7 | 2016-11-26 01:28:07 +0100 | [diff] [blame] | 335 | map->map_flags, |
| 336 | map->pages * 1ULL << PAGE_SHIFT); |
| 337 | |
Daniel Borkmann | 9780c0a | 2017-07-02 02:13:28 +0200 | [diff] [blame] | 338 | if (owner_prog_type) { |
Daniel Borkmann | 21116b7 | 2016-11-26 01:28:07 +0100 | [diff] [blame] | 339 | seq_printf(m, "owner_prog_type:\t%u\n", |
| 340 | owner_prog_type); |
Daniel Borkmann | 9780c0a | 2017-07-02 02:13:28 +0200 | [diff] [blame] | 341 | seq_printf(m, "owner_jited:\t%u\n", |
| 342 | owner_jited); |
| 343 | } |
Daniel Borkmann | f99bf20 | 2015-11-19 11:56:22 +0100 | [diff] [blame] | 344 | } |
| 345 | #endif |
| 346 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 347 | static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, |
| 348 | loff_t *ppos) |
| 349 | { |
| 350 | /* We need this handler such that alloc_file() enables |
| 351 | * f_mode with FMODE_CAN_READ. |
| 352 | */ |
| 353 | return -EINVAL; |
| 354 | } |
| 355 | |
| 356 | static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, |
| 357 | size_t siz, loff_t *ppos) |
| 358 | { |
| 359 | /* We need this handler such that alloc_file() enables |
| 360 | * f_mode with FMODE_CAN_WRITE. |
| 361 | */ |
| 362 | return -EINVAL; |
| 363 | } |
| 364 | |
Chenbo Feng | f66e448 | 2017-10-18 13:00:26 -0700 | [diff] [blame] | 365 | const struct file_operations bpf_map_fops = { |
Daniel Borkmann | f99bf20 | 2015-11-19 11:56:22 +0100 | [diff] [blame] | 366 | #ifdef CONFIG_PROC_FS |
| 367 | .show_fdinfo = bpf_map_show_fdinfo, |
| 368 | #endif |
| 369 | .release = bpf_map_release, |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 370 | .read = bpf_dummy_read, |
| 371 | .write = bpf_dummy_write, |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 372 | }; |
| 373 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 374 | int bpf_map_new_fd(struct bpf_map *map, int flags) |
Daniel Borkmann | aa79781 | 2015-10-29 14:58:06 +0100 | [diff] [blame] | 375 | { |
Chenbo Feng | afdb09c | 2017-10-18 13:00:24 -0700 | [diff] [blame] | 376 | int ret; |
| 377 | |
| 378 | ret = security_bpf_map(map, OPEN_FMODE(flags)); |
| 379 | if (ret < 0) |
| 380 | return ret; |
| 381 | |
Daniel Borkmann | aa79781 | 2015-10-29 14:58:06 +0100 | [diff] [blame] | 382 | return anon_inode_getfd("bpf-map", &bpf_map_fops, map, |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 383 | flags | O_CLOEXEC); |
| 384 | } |
| 385 | |
| 386 | int bpf_get_file_flag(int flags) |
| 387 | { |
| 388 | if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) |
| 389 | return -EINVAL; |
| 390 | if (flags & BPF_F_RDONLY) |
| 391 | return O_RDONLY; |
| 392 | if (flags & BPF_F_WRONLY) |
| 393 | return O_WRONLY; |
| 394 | return O_RDWR; |
Daniel Borkmann | aa79781 | 2015-10-29 14:58:06 +0100 | [diff] [blame] | 395 | } |
| 396 | |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 397 | /* helper macro to check that unused fields 'union bpf_attr' are zero */ |
| 398 | #define CHECK_ATTR(CMD) \ |
| 399 | memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ |
| 400 | sizeof(attr->CMD##_LAST_FIELD), 0, \ |
| 401 | sizeof(*attr) - \ |
| 402 | offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ |
| 403 | sizeof(attr->CMD##_LAST_FIELD)) != NULL |
| 404 | |
Martin KaFai Lau | cb4d2b3 | 2017-09-27 14:37:52 -0700 | [diff] [blame] | 405 | /* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes. |
| 406 | * Return 0 on success and < 0 on error. |
| 407 | */ |
| 408 | static int bpf_obj_name_cpy(char *dst, const char *src) |
| 409 | { |
| 410 | const char *end = src + BPF_OBJ_NAME_LEN; |
| 411 | |
Martin KaFai Lau | 473d973 | 2017-10-05 21:52:11 -0700 | [diff] [blame] | 412 | memset(dst, 0, BPF_OBJ_NAME_LEN); |
| 413 | |
Martin KaFai Lau | cb4d2b3 | 2017-09-27 14:37:52 -0700 | [diff] [blame] | 414 | /* Copy all isalnum() and '_' char */ |
| 415 | while (src < end && *src) { |
| 416 | if (!isalnum(*src) && *src != '_') |
| 417 | return -EINVAL; |
| 418 | *dst++ = *src++; |
| 419 | } |
| 420 | |
| 421 | /* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */ |
| 422 | if (src == end) |
| 423 | return -EINVAL; |
| 424 | |
Martin KaFai Lau | cb4d2b3 | 2017-09-27 14:37:52 -0700 | [diff] [blame] | 425 | return 0; |
| 426 | } |
| 427 | |
Martin KaFai Lau | 9b2cf32 | 2018-05-22 14:57:21 -0700 | [diff] [blame] | 428 | #define BPF_MAP_CREATE_LAST_FIELD btf_value_type_id |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 429 | /* called via syscall */ |
| 430 | static int map_create(union bpf_attr *attr) |
| 431 | { |
Martin KaFai Lau | 96eabe7 | 2017-08-18 11:28:00 -0700 | [diff] [blame] | 432 | int numa_node = bpf_map_attr_numa_node(attr); |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 433 | struct bpf_map *map; |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 434 | int f_flags; |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 435 | int err; |
| 436 | |
| 437 | err = CHECK_ATTR(BPF_MAP_CREATE); |
| 438 | if (err) |
| 439 | return -EINVAL; |
| 440 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 441 | f_flags = bpf_get_file_flag(attr->map_flags); |
| 442 | if (f_flags < 0) |
| 443 | return f_flags; |
| 444 | |
Martin KaFai Lau | 96eabe7 | 2017-08-18 11:28:00 -0700 | [diff] [blame] | 445 | if (numa_node != NUMA_NO_NODE && |
Eric Dumazet | 96e5ae4 | 2017-09-04 22:41:02 -0700 | [diff] [blame] | 446 | ((unsigned int)numa_node >= nr_node_ids || |
| 447 | !node_online(numa_node))) |
Martin KaFai Lau | 96eabe7 | 2017-08-18 11:28:00 -0700 | [diff] [blame] | 448 | return -EINVAL; |
| 449 | |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 450 | /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ |
| 451 | map = find_and_alloc_map(attr); |
| 452 | if (IS_ERR(map)) |
| 453 | return PTR_ERR(map); |
| 454 | |
Martin KaFai Lau | ad5b177 | 2017-09-27 14:37:53 -0700 | [diff] [blame] | 455 | err = bpf_obj_name_cpy(map->name, attr->map_name); |
| 456 | if (err) |
| 457 | goto free_map_nouncharge; |
| 458 | |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 459 | atomic_set(&map->refcnt, 1); |
Daniel Borkmann | c9da161 | 2015-11-24 21:28:15 +0100 | [diff] [blame] | 460 | atomic_set(&map->usercnt, 1); |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 461 | |
Martin KaFai Lau | a26ca7c | 2018-04-18 15:56:03 -0700 | [diff] [blame] | 462 | if (bpf_map_support_seq_show(map) && |
Martin KaFai Lau | 9b2cf32 | 2018-05-22 14:57:21 -0700 | [diff] [blame] | 463 | (attr->btf_key_type_id || attr->btf_value_type_id)) { |
Martin KaFai Lau | a26ca7c | 2018-04-18 15:56:03 -0700 | [diff] [blame] | 464 | struct btf *btf; |
| 465 | |
Martin KaFai Lau | 9b2cf32 | 2018-05-22 14:57:21 -0700 | [diff] [blame] | 466 | if (!attr->btf_key_type_id || !attr->btf_value_type_id) { |
Martin KaFai Lau | a26ca7c | 2018-04-18 15:56:03 -0700 | [diff] [blame] | 467 | err = -EINVAL; |
| 468 | goto free_map_nouncharge; |
| 469 | } |
| 470 | |
| 471 | btf = btf_get_by_fd(attr->btf_fd); |
| 472 | if (IS_ERR(btf)) { |
| 473 | err = PTR_ERR(btf); |
| 474 | goto free_map_nouncharge; |
| 475 | } |
| 476 | |
Martin KaFai Lau | 9b2cf32 | 2018-05-22 14:57:21 -0700 | [diff] [blame] | 477 | err = map->ops->map_check_btf(map, btf, attr->btf_key_type_id, |
| 478 | attr->btf_value_type_id); |
Martin KaFai Lau | a26ca7c | 2018-04-18 15:56:03 -0700 | [diff] [blame] | 479 | if (err) { |
| 480 | btf_put(btf); |
| 481 | goto free_map_nouncharge; |
| 482 | } |
| 483 | |
| 484 | map->btf = btf; |
Martin KaFai Lau | 9b2cf32 | 2018-05-22 14:57:21 -0700 | [diff] [blame] | 485 | map->btf_key_type_id = attr->btf_key_type_id; |
| 486 | map->btf_value_type_id = attr->btf_value_type_id; |
Martin KaFai Lau | a26ca7c | 2018-04-18 15:56:03 -0700 | [diff] [blame] | 487 | } |
| 488 | |
Chenbo Feng | afdb09c | 2017-10-18 13:00:24 -0700 | [diff] [blame] | 489 | err = security_bpf_map_alloc(map); |
Alexei Starovoitov | aaac3ba | 2015-10-07 22:23:22 -0700 | [diff] [blame] | 490 | if (err) |
Daniel Borkmann | 20b2b24 | 2016-11-04 00:56:31 +0100 | [diff] [blame] | 491 | goto free_map_nouncharge; |
Alexei Starovoitov | aaac3ba | 2015-10-07 22:23:22 -0700 | [diff] [blame] | 492 | |
Chenbo Feng | afdb09c | 2017-10-18 13:00:24 -0700 | [diff] [blame] | 493 | err = bpf_map_charge_memlock(map); |
| 494 | if (err) |
| 495 | goto free_map_sec; |
| 496 | |
Martin KaFai Lau | f3f1c05 | 2017-06-05 12:15:47 -0700 | [diff] [blame] | 497 | err = bpf_map_alloc_id(map); |
| 498 | if (err) |
| 499 | goto free_map; |
| 500 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 501 | err = bpf_map_new_fd(map, f_flags); |
Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 502 | if (err < 0) { |
| 503 | /* failed to allocate fd. |
| 504 | * bpf_map_put() is needed because the above |
| 505 | * bpf_map_alloc_id() has published the map |
| 506 | * to the userspace and the userspace may |
| 507 | * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. |
| 508 | */ |
| 509 | bpf_map_put(map); |
| 510 | return err; |
| 511 | } |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 512 | |
| 513 | return err; |
| 514 | |
| 515 | free_map: |
Daniel Borkmann | 20b2b24 | 2016-11-04 00:56:31 +0100 | [diff] [blame] | 516 | bpf_map_uncharge_memlock(map); |
Chenbo Feng | afdb09c | 2017-10-18 13:00:24 -0700 | [diff] [blame] | 517 | free_map_sec: |
| 518 | security_bpf_map_free(map); |
Daniel Borkmann | 20b2b24 | 2016-11-04 00:56:31 +0100 | [diff] [blame] | 519 | free_map_nouncharge: |
Martin KaFai Lau | a26ca7c | 2018-04-18 15:56:03 -0700 | [diff] [blame] | 520 | btf_put(map->btf); |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 521 | map->ops->map_free(map); |
| 522 | return err; |
| 523 | } |
| 524 | |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 525 | /* if error is returned, fd is released. |
| 526 | * On success caller should complete fd access with matching fdput() |
| 527 | */ |
Daniel Borkmann | c210129 | 2015-10-29 14:58:07 +0100 | [diff] [blame] | 528 | struct bpf_map *__bpf_map_get(struct fd f) |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 529 | { |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 530 | if (!f.file) |
| 531 | return ERR_PTR(-EBADF); |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 532 | if (f.file->f_op != &bpf_map_fops) { |
| 533 | fdput(f); |
| 534 | return ERR_PTR(-EINVAL); |
| 535 | } |
| 536 | |
Daniel Borkmann | c210129 | 2015-10-29 14:58:07 +0100 | [diff] [blame] | 537 | return f.file->private_data; |
| 538 | } |
| 539 | |
Alexei Starovoitov | 92117d8 | 2016-04-27 18:56:20 -0700 | [diff] [blame] | 540 | /* prog's and map's refcnt limit */ |
| 541 | #define BPF_MAX_REFCNT 32768 |
| 542 | |
| 543 | struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref) |
Daniel Borkmann | c9da161 | 2015-11-24 21:28:15 +0100 | [diff] [blame] | 544 | { |
Alexei Starovoitov | 92117d8 | 2016-04-27 18:56:20 -0700 | [diff] [blame] | 545 | if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) { |
| 546 | atomic_dec(&map->refcnt); |
| 547 | return ERR_PTR(-EBUSY); |
| 548 | } |
Daniel Borkmann | c9da161 | 2015-11-24 21:28:15 +0100 | [diff] [blame] | 549 | if (uref) |
| 550 | atomic_inc(&map->usercnt); |
Alexei Starovoitov | 92117d8 | 2016-04-27 18:56:20 -0700 | [diff] [blame] | 551 | return map; |
Daniel Borkmann | c9da161 | 2015-11-24 21:28:15 +0100 | [diff] [blame] | 552 | } |
Jakub Kicinski | 630a4d3 | 2018-05-03 18:37:09 -0700 | [diff] [blame] | 553 | EXPORT_SYMBOL_GPL(bpf_map_inc); |
Daniel Borkmann | c9da161 | 2015-11-24 21:28:15 +0100 | [diff] [blame] | 554 | |
| 555 | struct bpf_map *bpf_map_get_with_uref(u32 ufd) |
Daniel Borkmann | c210129 | 2015-10-29 14:58:07 +0100 | [diff] [blame] | 556 | { |
| 557 | struct fd f = fdget(ufd); |
| 558 | struct bpf_map *map; |
| 559 | |
| 560 | map = __bpf_map_get(f); |
| 561 | if (IS_ERR(map)) |
| 562 | return map; |
| 563 | |
Alexei Starovoitov | 92117d8 | 2016-04-27 18:56:20 -0700 | [diff] [blame] | 564 | map = bpf_map_inc(map, true); |
Daniel Borkmann | c210129 | 2015-10-29 14:58:07 +0100 | [diff] [blame] | 565 | fdput(f); |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 566 | |
| 567 | return map; |
| 568 | } |
| 569 | |
Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 570 | /* map_idr_lock should have been held */ |
| 571 | static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, |
| 572 | bool uref) |
| 573 | { |
| 574 | int refold; |
| 575 | |
| 576 | refold = __atomic_add_unless(&map->refcnt, 1, 0); |
| 577 | |
| 578 | if (refold >= BPF_MAX_REFCNT) { |
| 579 | __bpf_map_put(map, false); |
| 580 | return ERR_PTR(-EBUSY); |
| 581 | } |
| 582 | |
| 583 | if (!refold) |
| 584 | return ERR_PTR(-ENOENT); |
| 585 | |
| 586 | if (uref) |
| 587 | atomic_inc(&map->usercnt); |
| 588 | |
| 589 | return map; |
| 590 | } |
| 591 | |
Alexei Starovoitov | b8cdc05 | 2016-03-09 18:56:49 -0800 | [diff] [blame] | 592 | int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) |
| 593 | { |
| 594 | return -ENOTSUPP; |
| 595 | } |
| 596 | |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 597 | /* last field in 'union bpf_attr' used by this command */ |
| 598 | #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value |
| 599 | |
| 600 | static int map_lookup_elem(union bpf_attr *attr) |
| 601 | { |
Mickaël Salaün | 535e7b4b | 2016-11-13 19:44:03 +0100 | [diff] [blame] | 602 | void __user *ukey = u64_to_user_ptr(attr->key); |
| 603 | void __user *uvalue = u64_to_user_ptr(attr->value); |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 604 | int ufd = attr->map_fd; |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 605 | struct bpf_map *map; |
Alexei Starovoitov | 8ebe667 | 2015-01-22 17:11:08 -0800 | [diff] [blame] | 606 | void *key, *value, *ptr; |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 607 | u32 value_size; |
Daniel Borkmann | 592867b | 2015-09-08 18:00:09 +0200 | [diff] [blame] | 608 | struct fd f; |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 609 | int err; |
| 610 | |
| 611 | if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) |
| 612 | return -EINVAL; |
| 613 | |
Daniel Borkmann | 592867b | 2015-09-08 18:00:09 +0200 | [diff] [blame] | 614 | f = fdget(ufd); |
Daniel Borkmann | c210129 | 2015-10-29 14:58:07 +0100 | [diff] [blame] | 615 | map = __bpf_map_get(f); |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 616 | if (IS_ERR(map)) |
| 617 | return PTR_ERR(map); |
| 618 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 619 | if (!(f.file->f_mode & FMODE_CAN_READ)) { |
| 620 | err = -EPERM; |
| 621 | goto err_put; |
| 622 | } |
| 623 | |
Al Viro | e4448ed | 2017-05-13 18:43:00 -0400 | [diff] [blame] | 624 | key = memdup_user(ukey, map->key_size); |
| 625 | if (IS_ERR(key)) { |
| 626 | err = PTR_ERR(key); |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 627 | goto err_put; |
Al Viro | e4448ed | 2017-05-13 18:43:00 -0400 | [diff] [blame] | 628 | } |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 629 | |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 630 | if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || |
Martin KaFai Lau | 8f84493 | 2016-11-11 10:55:10 -0800 | [diff] [blame] | 631 | map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 632 | map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) |
| 633 | value_size = round_up(map->value_size, 8) * num_possible_cpus(); |
Martin KaFai Lau | 14dc6f0 | 2017-06-27 23:08:34 -0700 | [diff] [blame] | 634 | else if (IS_FD_MAP(map)) |
| 635 | value_size = sizeof(u32); |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 636 | else |
| 637 | value_size = map->value_size; |
| 638 | |
Alexei Starovoitov | 8ebe667 | 2015-01-22 17:11:08 -0800 | [diff] [blame] | 639 | err = -ENOMEM; |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 640 | value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 641 | if (!value) |
Alexei Starovoitov | 8ebe667 | 2015-01-22 17:11:08 -0800 | [diff] [blame] | 642 | goto free_key; |
| 643 | |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 644 | if (bpf_map_is_dev_bound(map)) { |
| 645 | err = bpf_map_offload_lookup_elem(map, key, value); |
| 646 | } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || |
| 647 | map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 648 | err = bpf_percpu_hash_copy(map, key, value); |
| 649 | } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { |
| 650 | err = bpf_percpu_array_copy(map, key, value); |
Alexei Starovoitov | 557c0c6 | 2016-03-07 21:57:17 -0800 | [diff] [blame] | 651 | } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { |
| 652 | err = bpf_stackmap_copy(map, key, value); |
Martin KaFai Lau | 14dc6f0 | 2017-06-27 23:08:34 -0700 | [diff] [blame] | 653 | } else if (IS_FD_ARRAY(map)) { |
| 654 | err = bpf_fd_array_map_lookup_elem(map, key, value); |
| 655 | } else if (IS_FD_HASH(map)) { |
| 656 | err = bpf_fd_htab_map_lookup_elem(map, key, value); |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 657 | } else { |
| 658 | rcu_read_lock(); |
| 659 | ptr = map->ops->map_lookup_elem(map, key); |
| 660 | if (ptr) |
| 661 | memcpy(value, ptr, value_size); |
| 662 | rcu_read_unlock(); |
| 663 | err = ptr ? 0 : -ENOENT; |
| 664 | } |
Alexei Starovoitov | 8ebe667 | 2015-01-22 17:11:08 -0800 | [diff] [blame] | 665 | |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 666 | if (err) |
Alexei Starovoitov | 8ebe667 | 2015-01-22 17:11:08 -0800 | [diff] [blame] | 667 | goto free_value; |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 668 | |
| 669 | err = -EFAULT; |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 670 | if (copy_to_user(uvalue, value, value_size) != 0) |
Alexei Starovoitov | 8ebe667 | 2015-01-22 17:11:08 -0800 | [diff] [blame] | 671 | goto free_value; |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 672 | |
| 673 | err = 0; |
| 674 | |
Alexei Starovoitov | 8ebe667 | 2015-01-22 17:11:08 -0800 | [diff] [blame] | 675 | free_value: |
| 676 | kfree(value); |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 677 | free_key: |
| 678 | kfree(key); |
| 679 | err_put: |
| 680 | fdput(f); |
| 681 | return err; |
| 682 | } |
| 683 | |
Alexei Starovoitov | 3274f52 | 2014-11-13 17:36:44 -0800 | [diff] [blame] | 684 | #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 685 | |
| 686 | static int map_update_elem(union bpf_attr *attr) |
| 687 | { |
Mickaël Salaün | 535e7b4b | 2016-11-13 19:44:03 +0100 | [diff] [blame] | 688 | void __user *ukey = u64_to_user_ptr(attr->key); |
| 689 | void __user *uvalue = u64_to_user_ptr(attr->value); |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 690 | int ufd = attr->map_fd; |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 691 | struct bpf_map *map; |
| 692 | void *key, *value; |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 693 | u32 value_size; |
Daniel Borkmann | 592867b | 2015-09-08 18:00:09 +0200 | [diff] [blame] | 694 | struct fd f; |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 695 | int err; |
| 696 | |
| 697 | if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) |
| 698 | return -EINVAL; |
| 699 | |
Daniel Borkmann | 592867b | 2015-09-08 18:00:09 +0200 | [diff] [blame] | 700 | f = fdget(ufd); |
Daniel Borkmann | c210129 | 2015-10-29 14:58:07 +0100 | [diff] [blame] | 701 | map = __bpf_map_get(f); |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 702 | if (IS_ERR(map)) |
| 703 | return PTR_ERR(map); |
| 704 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 705 | if (!(f.file->f_mode & FMODE_CAN_WRITE)) { |
| 706 | err = -EPERM; |
| 707 | goto err_put; |
| 708 | } |
| 709 | |
Al Viro | e4448ed | 2017-05-13 18:43:00 -0400 | [diff] [blame] | 710 | key = memdup_user(ukey, map->key_size); |
| 711 | if (IS_ERR(key)) { |
| 712 | err = PTR_ERR(key); |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 713 | goto err_put; |
Al Viro | e4448ed | 2017-05-13 18:43:00 -0400 | [diff] [blame] | 714 | } |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 715 | |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 716 | if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || |
Martin KaFai Lau | 8f84493 | 2016-11-11 10:55:10 -0800 | [diff] [blame] | 717 | map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 718 | map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) |
| 719 | value_size = round_up(map->value_size, 8) * num_possible_cpus(); |
| 720 | else |
| 721 | value_size = map->value_size; |
| 722 | |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 723 | err = -ENOMEM; |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 724 | value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 725 | if (!value) |
| 726 | goto free_key; |
| 727 | |
| 728 | err = -EFAULT; |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 729 | if (copy_from_user(value, uvalue, value_size) != 0) |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 730 | goto free_value; |
| 731 | |
Jesper Dangaard Brouer | 6710e11 | 2017-10-16 12:19:28 +0200 | [diff] [blame] | 732 | /* Need to create a kthread, thus must support schedule */ |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 733 | if (bpf_map_is_dev_bound(map)) { |
| 734 | err = bpf_map_offload_update_elem(map, key, value, attr->flags); |
| 735 | goto out; |
| 736 | } else if (map->map_type == BPF_MAP_TYPE_CPUMAP) { |
Jesper Dangaard Brouer | 6710e11 | 2017-10-16 12:19:28 +0200 | [diff] [blame] | 737 | err = map->ops->map_update_elem(map, key, value, attr->flags); |
| 738 | goto out; |
| 739 | } |
| 740 | |
Alexei Starovoitov | b121d1e | 2016-03-07 21:57:13 -0800 | [diff] [blame] | 741 | /* must increment bpf_prog_active to avoid kprobe+bpf triggering from |
| 742 | * inside bpf map update or delete otherwise deadlocks are possible |
| 743 | */ |
| 744 | preempt_disable(); |
| 745 | __this_cpu_inc(bpf_prog_active); |
Martin KaFai Lau | 8f84493 | 2016-11-11 10:55:10 -0800 | [diff] [blame] | 746 | if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || |
| 747 | map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 748 | err = bpf_percpu_hash_update(map, key, value, attr->flags); |
| 749 | } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { |
| 750 | err = bpf_percpu_array_update(map, key, value, attr->flags); |
Mickaël Salaün | 9c147b5 | 2018-01-26 00:54:02 +0100 | [diff] [blame] | 751 | } else if (IS_FD_ARRAY(map)) { |
Daniel Borkmann | d056a78 | 2016-06-15 22:47:13 +0200 | [diff] [blame] | 752 | rcu_read_lock(); |
| 753 | err = bpf_fd_array_map_update_elem(map, f.file, key, value, |
| 754 | attr->flags); |
| 755 | rcu_read_unlock(); |
Martin KaFai Lau | bcc6b1b | 2017-03-22 10:00:34 -0700 | [diff] [blame] | 756 | } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { |
| 757 | rcu_read_lock(); |
| 758 | err = bpf_fd_htab_map_update_elem(map, f.file, key, value, |
| 759 | attr->flags); |
| 760 | rcu_read_unlock(); |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 761 | } else { |
| 762 | rcu_read_lock(); |
| 763 | err = map->ops->map_update_elem(map, key, value, attr->flags); |
| 764 | rcu_read_unlock(); |
| 765 | } |
Alexei Starovoitov | b121d1e | 2016-03-07 21:57:13 -0800 | [diff] [blame] | 766 | __this_cpu_dec(bpf_prog_active); |
| 767 | preempt_enable(); |
Jesper Dangaard Brouer | 6710e11 | 2017-10-16 12:19:28 +0200 | [diff] [blame] | 768 | out: |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 769 | free_value: |
| 770 | kfree(value); |
| 771 | free_key: |
| 772 | kfree(key); |
| 773 | err_put: |
| 774 | fdput(f); |
| 775 | return err; |
| 776 | } |
| 777 | |
| 778 | #define BPF_MAP_DELETE_ELEM_LAST_FIELD key |
| 779 | |
| 780 | static int map_delete_elem(union bpf_attr *attr) |
| 781 | { |
Mickaël Salaün | 535e7b4b | 2016-11-13 19:44:03 +0100 | [diff] [blame] | 782 | void __user *ukey = u64_to_user_ptr(attr->key); |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 783 | int ufd = attr->map_fd; |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 784 | struct bpf_map *map; |
Daniel Borkmann | 592867b | 2015-09-08 18:00:09 +0200 | [diff] [blame] | 785 | struct fd f; |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 786 | void *key; |
| 787 | int err; |
| 788 | |
| 789 | if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) |
| 790 | return -EINVAL; |
| 791 | |
Daniel Borkmann | 592867b | 2015-09-08 18:00:09 +0200 | [diff] [blame] | 792 | f = fdget(ufd); |
Daniel Borkmann | c210129 | 2015-10-29 14:58:07 +0100 | [diff] [blame] | 793 | map = __bpf_map_get(f); |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 794 | if (IS_ERR(map)) |
| 795 | return PTR_ERR(map); |
| 796 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 797 | if (!(f.file->f_mode & FMODE_CAN_WRITE)) { |
| 798 | err = -EPERM; |
| 799 | goto err_put; |
| 800 | } |
| 801 | |
Al Viro | e4448ed | 2017-05-13 18:43:00 -0400 | [diff] [blame] | 802 | key = memdup_user(ukey, map->key_size); |
| 803 | if (IS_ERR(key)) { |
| 804 | err = PTR_ERR(key); |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 805 | goto err_put; |
Al Viro | e4448ed | 2017-05-13 18:43:00 -0400 | [diff] [blame] | 806 | } |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 807 | |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 808 | if (bpf_map_is_dev_bound(map)) { |
| 809 | err = bpf_map_offload_delete_elem(map, key); |
| 810 | goto out; |
| 811 | } |
| 812 | |
Alexei Starovoitov | b121d1e | 2016-03-07 21:57:13 -0800 | [diff] [blame] | 813 | preempt_disable(); |
| 814 | __this_cpu_inc(bpf_prog_active); |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 815 | rcu_read_lock(); |
| 816 | err = map->ops->map_delete_elem(map, key); |
| 817 | rcu_read_unlock(); |
Alexei Starovoitov | b121d1e | 2016-03-07 21:57:13 -0800 | [diff] [blame] | 818 | __this_cpu_dec(bpf_prog_active); |
| 819 | preempt_enable(); |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 820 | out: |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 821 | kfree(key); |
| 822 | err_put: |
| 823 | fdput(f); |
| 824 | return err; |
| 825 | } |
| 826 | |
| 827 | /* last field in 'union bpf_attr' used by this command */ |
| 828 | #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key |
| 829 | |
| 830 | static int map_get_next_key(union bpf_attr *attr) |
| 831 | { |
Mickaël Salaün | 535e7b4b | 2016-11-13 19:44:03 +0100 | [diff] [blame] | 832 | void __user *ukey = u64_to_user_ptr(attr->key); |
| 833 | void __user *unext_key = u64_to_user_ptr(attr->next_key); |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 834 | int ufd = attr->map_fd; |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 835 | struct bpf_map *map; |
| 836 | void *key, *next_key; |
Daniel Borkmann | 592867b | 2015-09-08 18:00:09 +0200 | [diff] [blame] | 837 | struct fd f; |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 838 | int err; |
| 839 | |
| 840 | if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) |
| 841 | return -EINVAL; |
| 842 | |
Daniel Borkmann | 592867b | 2015-09-08 18:00:09 +0200 | [diff] [blame] | 843 | f = fdget(ufd); |
Daniel Borkmann | c210129 | 2015-10-29 14:58:07 +0100 | [diff] [blame] | 844 | map = __bpf_map_get(f); |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 845 | if (IS_ERR(map)) |
| 846 | return PTR_ERR(map); |
| 847 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 848 | if (!(f.file->f_mode & FMODE_CAN_READ)) { |
| 849 | err = -EPERM; |
| 850 | goto err_put; |
| 851 | } |
| 852 | |
Teng Qin | 8fe4592 | 2017-04-24 19:00:37 -0700 | [diff] [blame] | 853 | if (ukey) { |
Al Viro | e4448ed | 2017-05-13 18:43:00 -0400 | [diff] [blame] | 854 | key = memdup_user(ukey, map->key_size); |
| 855 | if (IS_ERR(key)) { |
| 856 | err = PTR_ERR(key); |
Teng Qin | 8fe4592 | 2017-04-24 19:00:37 -0700 | [diff] [blame] | 857 | goto err_put; |
Al Viro | e4448ed | 2017-05-13 18:43:00 -0400 | [diff] [blame] | 858 | } |
Teng Qin | 8fe4592 | 2017-04-24 19:00:37 -0700 | [diff] [blame] | 859 | } else { |
| 860 | key = NULL; |
| 861 | } |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 862 | |
| 863 | err = -ENOMEM; |
| 864 | next_key = kmalloc(map->key_size, GFP_USER); |
| 865 | if (!next_key) |
| 866 | goto free_key; |
| 867 | |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 868 | if (bpf_map_is_dev_bound(map)) { |
| 869 | err = bpf_map_offload_get_next_key(map, key, next_key); |
| 870 | goto out; |
| 871 | } |
| 872 | |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 873 | rcu_read_lock(); |
| 874 | err = map->ops->map_get_next_key(map, key, next_key); |
| 875 | rcu_read_unlock(); |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 876 | out: |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 877 | if (err) |
| 878 | goto free_next_key; |
| 879 | |
| 880 | err = -EFAULT; |
| 881 | if (copy_to_user(unext_key, next_key, map->key_size) != 0) |
| 882 | goto free_next_key; |
| 883 | |
| 884 | err = 0; |
| 885 | |
| 886 | free_next_key: |
| 887 | kfree(next_key); |
| 888 | free_key: |
| 889 | kfree(key); |
| 890 | err_put: |
| 891 | fdput(f); |
| 892 | return err; |
| 893 | } |
| 894 | |
Jakub Kicinski | 7de16e3 | 2017-10-16 16:40:53 -0700 | [diff] [blame] | 895 | static const struct bpf_prog_ops * const bpf_prog_types[] = { |
| 896 | #define BPF_PROG_TYPE(_id, _name) \ |
| 897 | [_id] = & _name ## _prog_ops, |
| 898 | #define BPF_MAP_TYPE(_id, _ops) |
| 899 | #include <linux/bpf_types.h> |
| 900 | #undef BPF_PROG_TYPE |
| 901 | #undef BPF_MAP_TYPE |
| 902 | }; |
| 903 | |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 904 | static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) |
| 905 | { |
Daniel Borkmann | d0f1a45 | 2018-05-04 02:13:57 +0200 | [diff] [blame] | 906 | const struct bpf_prog_ops *ops; |
| 907 | |
| 908 | if (type >= ARRAY_SIZE(bpf_prog_types)) |
| 909 | return -EINVAL; |
| 910 | type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); |
| 911 | ops = bpf_prog_types[type]; |
| 912 | if (!ops) |
Johannes Berg | be9370a | 2017-04-11 15:34:57 +0200 | [diff] [blame] | 913 | return -EINVAL; |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 914 | |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 915 | if (!bpf_prog_is_dev_bound(prog->aux)) |
Daniel Borkmann | d0f1a45 | 2018-05-04 02:13:57 +0200 | [diff] [blame] | 916 | prog->aux->ops = ops; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 917 | else |
| 918 | prog->aux->ops = &bpf_offload_prog_ops; |
Johannes Berg | be9370a | 2017-04-11 15:34:57 +0200 | [diff] [blame] | 919 | prog->type = type; |
| 920 | return 0; |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 921 | } |
| 922 | |
| 923 | /* drop refcnt on maps used by eBPF program and free auxilary data */ |
| 924 | static void free_used_maps(struct bpf_prog_aux *aux) |
| 925 | { |
| 926 | int i; |
| 927 | |
| 928 | for (i = 0; i < aux->used_map_cnt; i++) |
| 929 | bpf_map_put(aux->used_maps[i]); |
| 930 | |
| 931 | kfree(aux->used_maps); |
| 932 | } |
| 933 | |
Daniel Borkmann | 5ccb071 | 2016-12-18 01:52:58 +0100 | [diff] [blame] | 934 | int __bpf_prog_charge(struct user_struct *user, u32 pages) |
| 935 | { |
| 936 | unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
| 937 | unsigned long user_bufs; |
| 938 | |
| 939 | if (user) { |
| 940 | user_bufs = atomic_long_add_return(pages, &user->locked_vm); |
| 941 | if (user_bufs > memlock_limit) { |
| 942 | atomic_long_sub(pages, &user->locked_vm); |
| 943 | return -EPERM; |
| 944 | } |
| 945 | } |
| 946 | |
| 947 | return 0; |
| 948 | } |
| 949 | |
| 950 | void __bpf_prog_uncharge(struct user_struct *user, u32 pages) |
| 951 | { |
| 952 | if (user) |
| 953 | atomic_long_sub(pages, &user->locked_vm); |
| 954 | } |
| 955 | |
Alexei Starovoitov | aaac3ba | 2015-10-07 22:23:22 -0700 | [diff] [blame] | 956 | static int bpf_prog_charge_memlock(struct bpf_prog *prog) |
| 957 | { |
| 958 | struct user_struct *user = get_current_user(); |
Daniel Borkmann | 5ccb071 | 2016-12-18 01:52:58 +0100 | [diff] [blame] | 959 | int ret; |
Alexei Starovoitov | aaac3ba | 2015-10-07 22:23:22 -0700 | [diff] [blame] | 960 | |
Daniel Borkmann | 5ccb071 | 2016-12-18 01:52:58 +0100 | [diff] [blame] | 961 | ret = __bpf_prog_charge(user, prog->pages); |
| 962 | if (ret) { |
Alexei Starovoitov | aaac3ba | 2015-10-07 22:23:22 -0700 | [diff] [blame] | 963 | free_uid(user); |
Daniel Borkmann | 5ccb071 | 2016-12-18 01:52:58 +0100 | [diff] [blame] | 964 | return ret; |
Alexei Starovoitov | aaac3ba | 2015-10-07 22:23:22 -0700 | [diff] [blame] | 965 | } |
Daniel Borkmann | 5ccb071 | 2016-12-18 01:52:58 +0100 | [diff] [blame] | 966 | |
Alexei Starovoitov | aaac3ba | 2015-10-07 22:23:22 -0700 | [diff] [blame] | 967 | prog->aux->user = user; |
| 968 | return 0; |
| 969 | } |
| 970 | |
| 971 | static void bpf_prog_uncharge_memlock(struct bpf_prog *prog) |
| 972 | { |
| 973 | struct user_struct *user = prog->aux->user; |
| 974 | |
Daniel Borkmann | 5ccb071 | 2016-12-18 01:52:58 +0100 | [diff] [blame] | 975 | __bpf_prog_uncharge(user, prog->pages); |
Alexei Starovoitov | aaac3ba | 2015-10-07 22:23:22 -0700 | [diff] [blame] | 976 | free_uid(user); |
| 977 | } |
| 978 | |
Martin KaFai Lau | dc4bb0e | 2017-06-05 12:15:46 -0700 | [diff] [blame] | 979 | static int bpf_prog_alloc_id(struct bpf_prog *prog) |
| 980 | { |
| 981 | int id; |
| 982 | |
Shaohua Li | b76354c | 2018-03-27 11:53:21 -0700 | [diff] [blame] | 983 | idr_preload(GFP_KERNEL); |
Martin KaFai Lau | dc4bb0e | 2017-06-05 12:15:46 -0700 | [diff] [blame] | 984 | spin_lock_bh(&prog_idr_lock); |
| 985 | id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); |
| 986 | if (id > 0) |
| 987 | prog->aux->id = id; |
| 988 | spin_unlock_bh(&prog_idr_lock); |
Shaohua Li | b76354c | 2018-03-27 11:53:21 -0700 | [diff] [blame] | 989 | idr_preload_end(); |
Martin KaFai Lau | dc4bb0e | 2017-06-05 12:15:46 -0700 | [diff] [blame] | 990 | |
| 991 | /* id is in [1, INT_MAX) */ |
| 992 | if (WARN_ON_ONCE(!id)) |
| 993 | return -ENOSPC; |
| 994 | |
| 995 | return id > 0 ? 0 : id; |
| 996 | } |
| 997 | |
Jakub Kicinski | ad8ad79 | 2017-12-27 18:39:07 -0800 | [diff] [blame] | 998 | void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) |
Martin KaFai Lau | dc4bb0e | 2017-06-05 12:15:46 -0700 | [diff] [blame] | 999 | { |
Jakub Kicinski | ad8ad79 | 2017-12-27 18:39:07 -0800 | [diff] [blame] | 1000 | /* cBPF to eBPF migrations are currently not in the idr store. |
| 1001 | * Offloaded programs are removed from the store when their device |
| 1002 | * disappears - even if someone grabs an fd to them they are unusable, |
| 1003 | * simply waiting for refcnt to drop to be freed. |
| 1004 | */ |
Martin KaFai Lau | dc4bb0e | 2017-06-05 12:15:46 -0700 | [diff] [blame] | 1005 | if (!prog->aux->id) |
| 1006 | return; |
| 1007 | |
Martin KaFai Lau | b16d9aa | 2017-06-05 12:15:49 -0700 | [diff] [blame] | 1008 | if (do_idr_lock) |
| 1009 | spin_lock_bh(&prog_idr_lock); |
| 1010 | else |
| 1011 | __acquire(&prog_idr_lock); |
| 1012 | |
Martin KaFai Lau | dc4bb0e | 2017-06-05 12:15:46 -0700 | [diff] [blame] | 1013 | idr_remove(&prog_idr, prog->aux->id); |
Jakub Kicinski | ad8ad79 | 2017-12-27 18:39:07 -0800 | [diff] [blame] | 1014 | prog->aux->id = 0; |
Martin KaFai Lau | b16d9aa | 2017-06-05 12:15:49 -0700 | [diff] [blame] | 1015 | |
| 1016 | if (do_idr_lock) |
| 1017 | spin_unlock_bh(&prog_idr_lock); |
| 1018 | else |
| 1019 | __release(&prog_idr_lock); |
Martin KaFai Lau | dc4bb0e | 2017-06-05 12:15:46 -0700 | [diff] [blame] | 1020 | } |
| 1021 | |
Daniel Borkmann | 1aacde3 | 2016-06-30 17:24:43 +0200 | [diff] [blame] | 1022 | static void __bpf_prog_put_rcu(struct rcu_head *rcu) |
Alexei Starovoitov | abf2e7d | 2015-05-28 19:26:02 -0700 | [diff] [blame] | 1023 | { |
| 1024 | struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); |
| 1025 | |
| 1026 | free_used_maps(aux); |
Alexei Starovoitov | aaac3ba | 2015-10-07 22:23:22 -0700 | [diff] [blame] | 1027 | bpf_prog_uncharge_memlock(aux->prog); |
Chenbo Feng | afdb09c | 2017-10-18 13:00:24 -0700 | [diff] [blame] | 1028 | security_bpf_prog_free(aux); |
Alexei Starovoitov | abf2e7d | 2015-05-28 19:26:02 -0700 | [diff] [blame] | 1029 | bpf_prog_free(aux->prog); |
| 1030 | } |
| 1031 | |
Martin KaFai Lau | b16d9aa | 2017-06-05 12:15:49 -0700 | [diff] [blame] | 1032 | static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1033 | { |
Daniel Borkmann | a67edbf | 2017-01-25 02:28:18 +0100 | [diff] [blame] | 1034 | if (atomic_dec_and_test(&prog->aux->refcnt)) { |
Daniel Borkmann | 4f74d80 | 2017-12-20 13:42:56 +0100 | [diff] [blame] | 1035 | int i; |
| 1036 | |
Martin KaFai Lau | 34ad558 | 2017-06-05 12:15:48 -0700 | [diff] [blame] | 1037 | /* bpf_prog_free_id() must be called first */ |
Martin KaFai Lau | b16d9aa | 2017-06-05 12:15:49 -0700 | [diff] [blame] | 1038 | bpf_prog_free_id(prog, do_idr_lock); |
Daniel Borkmann | 4f74d80 | 2017-12-20 13:42:56 +0100 | [diff] [blame] | 1039 | |
| 1040 | for (i = 0; i < prog->aux->func_cnt; i++) |
| 1041 | bpf_prog_kallsyms_del(prog->aux->func[i]); |
Daniel Borkmann | 74451e66 | 2017-02-16 22:24:50 +0100 | [diff] [blame] | 1042 | bpf_prog_kallsyms_del(prog); |
Daniel Borkmann | 4f74d80 | 2017-12-20 13:42:56 +0100 | [diff] [blame] | 1043 | |
Daniel Borkmann | 1aacde3 | 2016-06-30 17:24:43 +0200 | [diff] [blame] | 1044 | call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); |
Daniel Borkmann | a67edbf | 2017-01-25 02:28:18 +0100 | [diff] [blame] | 1045 | } |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1046 | } |
Martin KaFai Lau | b16d9aa | 2017-06-05 12:15:49 -0700 | [diff] [blame] | 1047 | |
| 1048 | void bpf_prog_put(struct bpf_prog *prog) |
| 1049 | { |
| 1050 | __bpf_prog_put(prog, true); |
| 1051 | } |
Daniel Borkmann | e2e9b65 | 2015-03-01 12:31:48 +0100 | [diff] [blame] | 1052 | EXPORT_SYMBOL_GPL(bpf_prog_put); |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1053 | |
| 1054 | static int bpf_prog_release(struct inode *inode, struct file *filp) |
| 1055 | { |
| 1056 | struct bpf_prog *prog = filp->private_data; |
| 1057 | |
Daniel Borkmann | 1aacde3 | 2016-06-30 17:24:43 +0200 | [diff] [blame] | 1058 | bpf_prog_put(prog); |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1059 | return 0; |
| 1060 | } |
| 1061 | |
Daniel Borkmann | 7bd509e | 2016-12-04 23:19:41 +0100 | [diff] [blame] | 1062 | #ifdef CONFIG_PROC_FS |
| 1063 | static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) |
| 1064 | { |
| 1065 | const struct bpf_prog *prog = filp->private_data; |
Daniel Borkmann | f1f7714 | 2017-01-13 23:38:15 +0100 | [diff] [blame] | 1066 | char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; |
Daniel Borkmann | 7bd509e | 2016-12-04 23:19:41 +0100 | [diff] [blame] | 1067 | |
Daniel Borkmann | f1f7714 | 2017-01-13 23:38:15 +0100 | [diff] [blame] | 1068 | bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); |
Daniel Borkmann | 7bd509e | 2016-12-04 23:19:41 +0100 | [diff] [blame] | 1069 | seq_printf(m, |
| 1070 | "prog_type:\t%u\n" |
| 1071 | "prog_jited:\t%u\n" |
Daniel Borkmann | f1f7714 | 2017-01-13 23:38:15 +0100 | [diff] [blame] | 1072 | "prog_tag:\t%s\n" |
Daniel Borkmann | 7bd509e | 2016-12-04 23:19:41 +0100 | [diff] [blame] | 1073 | "memlock:\t%llu\n", |
| 1074 | prog->type, |
| 1075 | prog->jited, |
Daniel Borkmann | f1f7714 | 2017-01-13 23:38:15 +0100 | [diff] [blame] | 1076 | prog_tag, |
Daniel Borkmann | 7bd509e | 2016-12-04 23:19:41 +0100 | [diff] [blame] | 1077 | prog->pages * 1ULL << PAGE_SHIFT); |
| 1078 | } |
| 1079 | #endif |
| 1080 | |
Chenbo Feng | f66e448 | 2017-10-18 13:00:26 -0700 | [diff] [blame] | 1081 | const struct file_operations bpf_prog_fops = { |
Daniel Borkmann | 7bd509e | 2016-12-04 23:19:41 +0100 | [diff] [blame] | 1082 | #ifdef CONFIG_PROC_FS |
| 1083 | .show_fdinfo = bpf_prog_show_fdinfo, |
| 1084 | #endif |
| 1085 | .release = bpf_prog_release, |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 1086 | .read = bpf_dummy_read, |
| 1087 | .write = bpf_dummy_write, |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1088 | }; |
| 1089 | |
Daniel Borkmann | b219775 | 2015-10-29 14:58:09 +0100 | [diff] [blame] | 1090 | int bpf_prog_new_fd(struct bpf_prog *prog) |
Daniel Borkmann | aa79781 | 2015-10-29 14:58:06 +0100 | [diff] [blame] | 1091 | { |
Chenbo Feng | afdb09c | 2017-10-18 13:00:24 -0700 | [diff] [blame] | 1092 | int ret; |
| 1093 | |
| 1094 | ret = security_bpf_prog(prog); |
| 1095 | if (ret < 0) |
| 1096 | return ret; |
| 1097 | |
Daniel Borkmann | aa79781 | 2015-10-29 14:58:06 +0100 | [diff] [blame] | 1098 | return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, |
| 1099 | O_RDWR | O_CLOEXEC); |
| 1100 | } |
| 1101 | |
Daniel Borkmann | 113214b | 2016-06-30 17:24:44 +0200 | [diff] [blame] | 1102 | static struct bpf_prog *____bpf_prog_get(struct fd f) |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1103 | { |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1104 | if (!f.file) |
| 1105 | return ERR_PTR(-EBADF); |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1106 | if (f.file->f_op != &bpf_prog_fops) { |
| 1107 | fdput(f); |
| 1108 | return ERR_PTR(-EINVAL); |
| 1109 | } |
| 1110 | |
Daniel Borkmann | c210129 | 2015-10-29 14:58:07 +0100 | [diff] [blame] | 1111 | return f.file->private_data; |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1112 | } |
| 1113 | |
Brenden Blanco | 59d3656 | 2016-07-19 12:16:46 -0700 | [diff] [blame] | 1114 | struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i) |
Alexei Starovoitov | 92117d8 | 2016-04-27 18:56:20 -0700 | [diff] [blame] | 1115 | { |
Brenden Blanco | 59d3656 | 2016-07-19 12:16:46 -0700 | [diff] [blame] | 1116 | if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) { |
| 1117 | atomic_sub(i, &prog->aux->refcnt); |
Alexei Starovoitov | 92117d8 | 2016-04-27 18:56:20 -0700 | [diff] [blame] | 1118 | return ERR_PTR(-EBUSY); |
| 1119 | } |
| 1120 | return prog; |
| 1121 | } |
Brenden Blanco | 59d3656 | 2016-07-19 12:16:46 -0700 | [diff] [blame] | 1122 | EXPORT_SYMBOL_GPL(bpf_prog_add); |
| 1123 | |
Daniel Borkmann | c540594 | 2016-11-09 22:02:34 +0100 | [diff] [blame] | 1124 | void bpf_prog_sub(struct bpf_prog *prog, int i) |
| 1125 | { |
| 1126 | /* Only to be used for undoing previous bpf_prog_add() in some |
| 1127 | * error path. We still know that another entity in our call |
| 1128 | * path holds a reference to the program, thus atomic_sub() can |
| 1129 | * be safely used in such cases! |
| 1130 | */ |
| 1131 | WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0); |
| 1132 | } |
| 1133 | EXPORT_SYMBOL_GPL(bpf_prog_sub); |
| 1134 | |
Brenden Blanco | 59d3656 | 2016-07-19 12:16:46 -0700 | [diff] [blame] | 1135 | struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog) |
| 1136 | { |
| 1137 | return bpf_prog_add(prog, 1); |
| 1138 | } |
Daniel Borkmann | 97bc402 | 2016-11-19 01:45:00 +0100 | [diff] [blame] | 1139 | EXPORT_SYMBOL_GPL(bpf_prog_inc); |
Alexei Starovoitov | 92117d8 | 2016-04-27 18:56:20 -0700 | [diff] [blame] | 1140 | |
Martin KaFai Lau | b16d9aa | 2017-06-05 12:15:49 -0700 | [diff] [blame] | 1141 | /* prog_idr_lock should have been held */ |
John Fastabend | a6f6df6 | 2017-08-15 22:32:22 -0700 | [diff] [blame] | 1142 | struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) |
Martin KaFai Lau | b16d9aa | 2017-06-05 12:15:49 -0700 | [diff] [blame] | 1143 | { |
| 1144 | int refold; |
| 1145 | |
| 1146 | refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0); |
| 1147 | |
| 1148 | if (refold >= BPF_MAX_REFCNT) { |
| 1149 | __bpf_prog_put(prog, false); |
| 1150 | return ERR_PTR(-EBUSY); |
| 1151 | } |
| 1152 | |
| 1153 | if (!refold) |
| 1154 | return ERR_PTR(-ENOENT); |
| 1155 | |
| 1156 | return prog; |
| 1157 | } |
John Fastabend | a6f6df6 | 2017-08-15 22:32:22 -0700 | [diff] [blame] | 1158 | EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); |
Martin KaFai Lau | b16d9aa | 2017-06-05 12:15:49 -0700 | [diff] [blame] | 1159 | |
Al Viro | 040ee69 | 2017-12-02 20:20:38 -0500 | [diff] [blame] | 1160 | bool bpf_prog_get_ok(struct bpf_prog *prog, |
Jakub Kicinski | 288b3de | 2017-11-20 15:21:54 -0800 | [diff] [blame] | 1161 | enum bpf_prog_type *attach_type, bool attach_drv) |
Jakub Kicinski | 248f346 | 2017-11-03 13:56:20 -0700 | [diff] [blame] | 1162 | { |
Jakub Kicinski | 288b3de | 2017-11-20 15:21:54 -0800 | [diff] [blame] | 1163 | /* not an attachment, just a refcount inc, always allow */ |
| 1164 | if (!attach_type) |
| 1165 | return true; |
Jakub Kicinski | 248f346 | 2017-11-03 13:56:20 -0700 | [diff] [blame] | 1166 | |
| 1167 | if (prog->type != *attach_type) |
| 1168 | return false; |
Jakub Kicinski | 288b3de | 2017-11-20 15:21:54 -0800 | [diff] [blame] | 1169 | if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv) |
Jakub Kicinski | 248f346 | 2017-11-03 13:56:20 -0700 | [diff] [blame] | 1170 | return false; |
| 1171 | |
| 1172 | return true; |
| 1173 | } |
| 1174 | |
| 1175 | static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, |
Jakub Kicinski | 288b3de | 2017-11-20 15:21:54 -0800 | [diff] [blame] | 1176 | bool attach_drv) |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1177 | { |
| 1178 | struct fd f = fdget(ufd); |
| 1179 | struct bpf_prog *prog; |
| 1180 | |
Daniel Borkmann | 113214b | 2016-06-30 17:24:44 +0200 | [diff] [blame] | 1181 | prog = ____bpf_prog_get(f); |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1182 | if (IS_ERR(prog)) |
| 1183 | return prog; |
Jakub Kicinski | 288b3de | 2017-11-20 15:21:54 -0800 | [diff] [blame] | 1184 | if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { |
Daniel Borkmann | 113214b | 2016-06-30 17:24:44 +0200 | [diff] [blame] | 1185 | prog = ERR_PTR(-EINVAL); |
| 1186 | goto out; |
| 1187 | } |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1188 | |
Alexei Starovoitov | 92117d8 | 2016-04-27 18:56:20 -0700 | [diff] [blame] | 1189 | prog = bpf_prog_inc(prog); |
Daniel Borkmann | 113214b | 2016-06-30 17:24:44 +0200 | [diff] [blame] | 1190 | out: |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1191 | fdput(f); |
| 1192 | return prog; |
| 1193 | } |
Daniel Borkmann | 113214b | 2016-06-30 17:24:44 +0200 | [diff] [blame] | 1194 | |
| 1195 | struct bpf_prog *bpf_prog_get(u32 ufd) |
| 1196 | { |
Jakub Kicinski | 288b3de | 2017-11-20 15:21:54 -0800 | [diff] [blame] | 1197 | return __bpf_prog_get(ufd, NULL, false); |
Daniel Borkmann | 113214b | 2016-06-30 17:24:44 +0200 | [diff] [blame] | 1198 | } |
| 1199 | |
Jakub Kicinski | 248f346 | 2017-11-03 13:56:20 -0700 | [diff] [blame] | 1200 | struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, |
Jakub Kicinski | 288b3de | 2017-11-20 15:21:54 -0800 | [diff] [blame] | 1201 | bool attach_drv) |
Jakub Kicinski | 248f346 | 2017-11-03 13:56:20 -0700 | [diff] [blame] | 1202 | { |
Alexei Starovoitov | 4d220ed | 2018-04-28 19:56:37 -0700 | [diff] [blame] | 1203 | return __bpf_prog_get(ufd, &type, attach_drv); |
Jakub Kicinski | 248f346 | 2017-11-03 13:56:20 -0700 | [diff] [blame] | 1204 | } |
Jakub Kicinski | 6c8dfe2 | 2017-11-03 13:56:21 -0700 | [diff] [blame] | 1205 | EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); |
Jakub Kicinski | 248f346 | 2017-11-03 13:56:20 -0700 | [diff] [blame] | 1206 | |
Andrey Ignatov | aac3fc3 | 2018-03-30 15:08:07 -0700 | [diff] [blame] | 1207 | /* Initially all BPF programs could be loaded w/o specifying |
| 1208 | * expected_attach_type. Later for some of them specifying expected_attach_type |
| 1209 | * at load time became required so that program could be validated properly. |
| 1210 | * Programs of types that are allowed to be loaded both w/ and w/o (for |
| 1211 | * backward compatibility) expected_attach_type, should have the default attach |
| 1212 | * type assigned to expected_attach_type for the latter case, so that it can be |
| 1213 | * validated later at attach time. |
| 1214 | * |
| 1215 | * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if |
| 1216 | * prog type requires it but has some attach types that have to be backward |
| 1217 | * compatible. |
| 1218 | */ |
| 1219 | static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) |
| 1220 | { |
| 1221 | switch (attr->prog_type) { |
| 1222 | case BPF_PROG_TYPE_CGROUP_SOCK: |
| 1223 | /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't |
| 1224 | * exist so checking for non-zero is the way to go here. |
| 1225 | */ |
| 1226 | if (!attr->expected_attach_type) |
| 1227 | attr->expected_attach_type = |
| 1228 | BPF_CGROUP_INET_SOCK_CREATE; |
| 1229 | break; |
| 1230 | } |
| 1231 | } |
| 1232 | |
Andrey Ignatov | 5e43f89 | 2018-03-30 15:08:00 -0700 | [diff] [blame] | 1233 | static int |
| 1234 | bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type, |
| 1235 | enum bpf_attach_type expected_attach_type) |
| 1236 | { |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 1237 | switch (prog_type) { |
Andrey Ignatov | aac3fc3 | 2018-03-30 15:08:07 -0700 | [diff] [blame] | 1238 | case BPF_PROG_TYPE_CGROUP_SOCK: |
| 1239 | switch (expected_attach_type) { |
| 1240 | case BPF_CGROUP_INET_SOCK_CREATE: |
| 1241 | case BPF_CGROUP_INET4_POST_BIND: |
| 1242 | case BPF_CGROUP_INET6_POST_BIND: |
| 1243 | return 0; |
| 1244 | default: |
| 1245 | return -EINVAL; |
| 1246 | } |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 1247 | case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: |
| 1248 | switch (expected_attach_type) { |
| 1249 | case BPF_CGROUP_INET4_BIND: |
| 1250 | case BPF_CGROUP_INET6_BIND: |
Andrey Ignatov | d74bad4 | 2018-03-30 15:08:05 -0700 | [diff] [blame] | 1251 | case BPF_CGROUP_INET4_CONNECT: |
| 1252 | case BPF_CGROUP_INET6_CONNECT: |
Andrey Ignatov | 1cedee1 | 2018-05-25 08:55:23 -0700 | [diff] [blame] | 1253 | case BPF_CGROUP_UDP4_SENDMSG: |
| 1254 | case BPF_CGROUP_UDP6_SENDMSG: |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 1255 | return 0; |
| 1256 | default: |
| 1257 | return -EINVAL; |
| 1258 | } |
| 1259 | default: |
| 1260 | return 0; |
| 1261 | } |
Andrey Ignatov | 5e43f89 | 2018-03-30 15:08:00 -0700 | [diff] [blame] | 1262 | } |
| 1263 | |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1264 | /* last field in 'union bpf_attr' used by this command */ |
Andrey Ignatov | 5e43f89 | 2018-03-30 15:08:00 -0700 | [diff] [blame] | 1265 | #define BPF_PROG_LOAD_LAST_FIELD expected_attach_type |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1266 | |
| 1267 | static int bpf_prog_load(union bpf_attr *attr) |
| 1268 | { |
| 1269 | enum bpf_prog_type type = attr->prog_type; |
| 1270 | struct bpf_prog *prog; |
| 1271 | int err; |
| 1272 | char license[128]; |
| 1273 | bool is_gpl; |
| 1274 | |
| 1275 | if (CHECK_ATTR(BPF_PROG_LOAD)) |
| 1276 | return -EINVAL; |
| 1277 | |
David S. Miller | e07b98d | 2017-05-10 11:38:07 -0700 | [diff] [blame] | 1278 | if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT) |
| 1279 | return -EINVAL; |
| 1280 | |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1281 | /* copy eBPF program license from user space */ |
Mickaël Salaün | 535e7b4b | 2016-11-13 19:44:03 +0100 | [diff] [blame] | 1282 | if (strncpy_from_user(license, u64_to_user_ptr(attr->license), |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1283 | sizeof(license) - 1) < 0) |
| 1284 | return -EFAULT; |
| 1285 | license[sizeof(license) - 1] = 0; |
| 1286 | |
| 1287 | /* eBPF programs must be GPL compatible to use GPL-ed functions */ |
| 1288 | is_gpl = license_is_gpl_compatible(license); |
| 1289 | |
Daniel Borkmann | ef0915c | 2016-12-07 01:15:44 +0100 | [diff] [blame] | 1290 | if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS) |
| 1291 | return -E2BIG; |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1292 | |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 1293 | if (type == BPF_PROG_TYPE_KPROBE && |
| 1294 | attr->kern_version != LINUX_VERSION_CODE) |
| 1295 | return -EINVAL; |
| 1296 | |
Chenbo Feng | 80b7d81 | 2017-05-31 18:16:00 -0700 | [diff] [blame] | 1297 | if (type != BPF_PROG_TYPE_SOCKET_FILTER && |
| 1298 | type != BPF_PROG_TYPE_CGROUP_SKB && |
| 1299 | !capable(CAP_SYS_ADMIN)) |
Alexei Starovoitov | 1be7f75 | 2015-10-07 22:23:21 -0700 | [diff] [blame] | 1300 | return -EPERM; |
| 1301 | |
Andrey Ignatov | aac3fc3 | 2018-03-30 15:08:07 -0700 | [diff] [blame] | 1302 | bpf_prog_load_fixup_attach_type(attr); |
Andrey Ignatov | 5e43f89 | 2018-03-30 15:08:00 -0700 | [diff] [blame] | 1303 | if (bpf_prog_load_check_attach_type(type, attr->expected_attach_type)) |
| 1304 | return -EINVAL; |
| 1305 | |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1306 | /* plain bpf_prog allocation */ |
| 1307 | prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); |
| 1308 | if (!prog) |
| 1309 | return -ENOMEM; |
| 1310 | |
Andrey Ignatov | 5e43f89 | 2018-03-30 15:08:00 -0700 | [diff] [blame] | 1311 | prog->expected_attach_type = attr->expected_attach_type; |
| 1312 | |
Jakub Kicinski | 9a18eed | 2017-12-27 18:39:04 -0800 | [diff] [blame] | 1313 | prog->aux->offload_requested = !!attr->prog_ifindex; |
| 1314 | |
Chenbo Feng | afdb09c | 2017-10-18 13:00:24 -0700 | [diff] [blame] | 1315 | err = security_bpf_prog_alloc(prog->aux); |
Alexei Starovoitov | aaac3ba | 2015-10-07 22:23:22 -0700 | [diff] [blame] | 1316 | if (err) |
| 1317 | goto free_prog_nouncharge; |
| 1318 | |
Chenbo Feng | afdb09c | 2017-10-18 13:00:24 -0700 | [diff] [blame] | 1319 | err = bpf_prog_charge_memlock(prog); |
| 1320 | if (err) |
| 1321 | goto free_prog_sec; |
| 1322 | |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1323 | prog->len = attr->insn_cnt; |
| 1324 | |
| 1325 | err = -EFAULT; |
Mickaël Salaün | 535e7b4b | 2016-11-13 19:44:03 +0100 | [diff] [blame] | 1326 | if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns), |
Daniel Borkmann | aafe6ae | 2016-12-18 01:52:57 +0100 | [diff] [blame] | 1327 | bpf_prog_insn_size(prog)) != 0) |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1328 | goto free_prog; |
| 1329 | |
| 1330 | prog->orig_prog = NULL; |
Daniel Borkmann | a91263d | 2015-09-30 01:41:50 +0200 | [diff] [blame] | 1331 | prog->jited = 0; |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1332 | |
| 1333 | atomic_set(&prog->aux->refcnt, 1); |
Daniel Borkmann | a91263d | 2015-09-30 01:41:50 +0200 | [diff] [blame] | 1334 | prog->gpl_compatible = is_gpl ? 1 : 0; |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1335 | |
Jakub Kicinski | 9a18eed | 2017-12-27 18:39:04 -0800 | [diff] [blame] | 1336 | if (bpf_prog_is_dev_bound(prog->aux)) { |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 1337 | err = bpf_prog_offload_init(prog, attr); |
| 1338 | if (err) |
| 1339 | goto free_prog; |
| 1340 | } |
| 1341 | |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1342 | /* find program type: socket_filter vs tracing_filter */ |
| 1343 | err = find_prog_type(type, prog); |
| 1344 | if (err < 0) |
| 1345 | goto free_prog; |
| 1346 | |
Martin KaFai Lau | cb4d2b3 | 2017-09-27 14:37:52 -0700 | [diff] [blame] | 1347 | prog->aux->load_time = ktime_get_boot_ns(); |
| 1348 | err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name); |
| 1349 | if (err) |
| 1350 | goto free_prog; |
| 1351 | |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1352 | /* run eBPF verifier */ |
Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 1353 | err = bpf_check(&prog, attr); |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1354 | if (err < 0) |
| 1355 | goto free_used_maps; |
| 1356 | |
| 1357 | /* eBPF program is ready to be JITed */ |
Alexei Starovoitov | 1c2a088 | 2017-12-14 17:55:15 -0800 | [diff] [blame] | 1358 | if (!prog->bpf_func) |
| 1359 | prog = bpf_prog_select_runtime(prog, &err); |
Alexei Starovoitov | 04fd61ab | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 1360 | if (err < 0) |
| 1361 | goto free_used_maps; |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1362 | |
Martin KaFai Lau | dc4bb0e | 2017-06-05 12:15:46 -0700 | [diff] [blame] | 1363 | err = bpf_prog_alloc_id(prog); |
| 1364 | if (err) |
| 1365 | goto free_used_maps; |
| 1366 | |
Daniel Borkmann | aa79781 | 2015-10-29 14:58:06 +0100 | [diff] [blame] | 1367 | err = bpf_prog_new_fd(prog); |
Martin KaFai Lau | b16d9aa | 2017-06-05 12:15:49 -0700 | [diff] [blame] | 1368 | if (err < 0) { |
| 1369 | /* failed to allocate fd. |
| 1370 | * bpf_prog_put() is needed because the above |
| 1371 | * bpf_prog_alloc_id() has published the prog |
| 1372 | * to the userspace and the userspace may |
| 1373 | * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID. |
| 1374 | */ |
| 1375 | bpf_prog_put(prog); |
| 1376 | return err; |
| 1377 | } |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1378 | |
Daniel Borkmann | 74451e66 | 2017-02-16 22:24:50 +0100 | [diff] [blame] | 1379 | bpf_prog_kallsyms_add(prog); |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1380 | return err; |
| 1381 | |
| 1382 | free_used_maps: |
| 1383 | free_used_maps(prog->aux); |
| 1384 | free_prog: |
Alexei Starovoitov | aaac3ba | 2015-10-07 22:23:22 -0700 | [diff] [blame] | 1385 | bpf_prog_uncharge_memlock(prog); |
Chenbo Feng | afdb09c | 2017-10-18 13:00:24 -0700 | [diff] [blame] | 1386 | free_prog_sec: |
| 1387 | security_bpf_prog_free(prog->aux); |
Alexei Starovoitov | aaac3ba | 2015-10-07 22:23:22 -0700 | [diff] [blame] | 1388 | free_prog_nouncharge: |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 1389 | bpf_prog_free(prog); |
| 1390 | return err; |
| 1391 | } |
| 1392 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 1393 | #define BPF_OBJ_LAST_FIELD file_flags |
Daniel Borkmann | b219775 | 2015-10-29 14:58:09 +0100 | [diff] [blame] | 1394 | |
| 1395 | static int bpf_obj_pin(const union bpf_attr *attr) |
| 1396 | { |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 1397 | if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0) |
Daniel Borkmann | b219775 | 2015-10-29 14:58:09 +0100 | [diff] [blame] | 1398 | return -EINVAL; |
| 1399 | |
Mickaël Salaün | 535e7b4b | 2016-11-13 19:44:03 +0100 | [diff] [blame] | 1400 | return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname)); |
Daniel Borkmann | b219775 | 2015-10-29 14:58:09 +0100 | [diff] [blame] | 1401 | } |
| 1402 | |
| 1403 | static int bpf_obj_get(const union bpf_attr *attr) |
| 1404 | { |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 1405 | if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || |
| 1406 | attr->file_flags & ~BPF_OBJ_FLAG_MASK) |
Daniel Borkmann | b219775 | 2015-10-29 14:58:09 +0100 | [diff] [blame] | 1407 | return -EINVAL; |
| 1408 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 1409 | return bpf_obj_get_user(u64_to_user_ptr(attr->pathname), |
| 1410 | attr->file_flags); |
Daniel Borkmann | b219775 | 2015-10-29 14:58:09 +0100 | [diff] [blame] | 1411 | } |
| 1412 | |
Alexei Starovoitov | c4f6699 | 2018-03-28 12:05:37 -0700 | [diff] [blame] | 1413 | struct bpf_raw_tracepoint { |
| 1414 | struct bpf_raw_event_map *btp; |
| 1415 | struct bpf_prog *prog; |
| 1416 | }; |
| 1417 | |
| 1418 | static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp) |
| 1419 | { |
| 1420 | struct bpf_raw_tracepoint *raw_tp = filp->private_data; |
| 1421 | |
| 1422 | if (raw_tp->prog) { |
| 1423 | bpf_probe_unregister(raw_tp->btp, raw_tp->prog); |
| 1424 | bpf_prog_put(raw_tp->prog); |
| 1425 | } |
| 1426 | kfree(raw_tp); |
| 1427 | return 0; |
| 1428 | } |
| 1429 | |
| 1430 | static const struct file_operations bpf_raw_tp_fops = { |
| 1431 | .release = bpf_raw_tracepoint_release, |
| 1432 | .read = bpf_dummy_read, |
| 1433 | .write = bpf_dummy_write, |
| 1434 | }; |
| 1435 | |
| 1436 | #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd |
| 1437 | |
| 1438 | static int bpf_raw_tracepoint_open(const union bpf_attr *attr) |
| 1439 | { |
| 1440 | struct bpf_raw_tracepoint *raw_tp; |
| 1441 | struct bpf_raw_event_map *btp; |
| 1442 | struct bpf_prog *prog; |
| 1443 | char tp_name[128]; |
| 1444 | int tp_fd, err; |
| 1445 | |
| 1446 | if (strncpy_from_user(tp_name, u64_to_user_ptr(attr->raw_tracepoint.name), |
| 1447 | sizeof(tp_name) - 1) < 0) |
| 1448 | return -EFAULT; |
| 1449 | tp_name[sizeof(tp_name) - 1] = 0; |
| 1450 | |
| 1451 | btp = bpf_find_raw_tracepoint(tp_name); |
| 1452 | if (!btp) |
| 1453 | return -ENOENT; |
| 1454 | |
| 1455 | raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER); |
| 1456 | if (!raw_tp) |
| 1457 | return -ENOMEM; |
| 1458 | raw_tp->btp = btp; |
| 1459 | |
| 1460 | prog = bpf_prog_get_type(attr->raw_tracepoint.prog_fd, |
| 1461 | BPF_PROG_TYPE_RAW_TRACEPOINT); |
| 1462 | if (IS_ERR(prog)) { |
| 1463 | err = PTR_ERR(prog); |
| 1464 | goto out_free_tp; |
| 1465 | } |
| 1466 | |
| 1467 | err = bpf_probe_register(raw_tp->btp, prog); |
| 1468 | if (err) |
| 1469 | goto out_put_prog; |
| 1470 | |
| 1471 | raw_tp->prog = prog; |
| 1472 | tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp, |
| 1473 | O_CLOEXEC); |
| 1474 | if (tp_fd < 0) { |
| 1475 | bpf_probe_unregister(raw_tp->btp, prog); |
| 1476 | err = tp_fd; |
| 1477 | goto out_put_prog; |
| 1478 | } |
| 1479 | return tp_fd; |
| 1480 | |
| 1481 | out_put_prog: |
| 1482 | bpf_prog_put(prog); |
| 1483 | out_free_tp: |
| 1484 | kfree(raw_tp); |
| 1485 | return err; |
| 1486 | } |
| 1487 | |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 1488 | #ifdef CONFIG_CGROUP_BPF |
| 1489 | |
Anders Roxell | 3349158 | 2018-04-03 14:09:47 +0200 | [diff] [blame] | 1490 | static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, |
| 1491 | enum bpf_attach_type attach_type) |
| 1492 | { |
| 1493 | switch (prog->type) { |
| 1494 | case BPF_PROG_TYPE_CGROUP_SOCK: |
| 1495 | case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: |
| 1496 | return attach_type == prog->expected_attach_type ? 0 : -EINVAL; |
| 1497 | default: |
| 1498 | return 0; |
| 1499 | } |
| 1500 | } |
| 1501 | |
John Fastabend | 464bc0f | 2017-08-28 07:10:04 -0700 | [diff] [blame] | 1502 | #define BPF_PROG_ATTACH_LAST_FIELD attach_flags |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 1503 | |
John Fastabend | 4f738ad | 2018-03-18 12:57:10 -0700 | [diff] [blame] | 1504 | static int sockmap_get_from_fd(const union bpf_attr *attr, |
| 1505 | int type, bool attach) |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 1506 | { |
John Fastabend | 5a67da2 | 2017-09-08 14:00:49 -0700 | [diff] [blame] | 1507 | struct bpf_prog *prog = NULL; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 1508 | int ufd = attr->target_fd; |
| 1509 | struct bpf_map *map; |
| 1510 | struct fd f; |
| 1511 | int err; |
| 1512 | |
| 1513 | f = fdget(ufd); |
| 1514 | map = __bpf_map_get(f); |
| 1515 | if (IS_ERR(map)) |
| 1516 | return PTR_ERR(map); |
| 1517 | |
John Fastabend | 5a67da2 | 2017-09-08 14:00:49 -0700 | [diff] [blame] | 1518 | if (attach) { |
John Fastabend | 4f738ad | 2018-03-18 12:57:10 -0700 | [diff] [blame] | 1519 | prog = bpf_prog_get_type(attr->attach_bpf_fd, type); |
John Fastabend | 5a67da2 | 2017-09-08 14:00:49 -0700 | [diff] [blame] | 1520 | if (IS_ERR(prog)) { |
| 1521 | fdput(f); |
| 1522 | return PTR_ERR(prog); |
| 1523 | } |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 1524 | } |
| 1525 | |
John Fastabend | 5a67da2 | 2017-09-08 14:00:49 -0700 | [diff] [blame] | 1526 | err = sock_map_prog(map, prog, attr->attach_type); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 1527 | if (err) { |
| 1528 | fdput(f); |
John Fastabend | 5a67da2 | 2017-09-08 14:00:49 -0700 | [diff] [blame] | 1529 | if (prog) |
| 1530 | bpf_prog_put(prog); |
Dan Carpenter | ae2b27b | 2017-08-18 10:27:02 +0300 | [diff] [blame] | 1531 | return err; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 1532 | } |
| 1533 | |
| 1534 | fdput(f); |
Dan Carpenter | ae2b27b | 2017-08-18 10:27:02 +0300 | [diff] [blame] | 1535 | return 0; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 1536 | } |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 1537 | |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 1538 | #define BPF_F_ATTACH_MASK \ |
| 1539 | (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI) |
| 1540 | |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 1541 | static int bpf_prog_attach(const union bpf_attr *attr) |
| 1542 | { |
Alexei Starovoitov | 7f67763 | 2017-02-10 20:28:24 -0800 | [diff] [blame] | 1543 | enum bpf_prog_type ptype; |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 1544 | struct bpf_prog *prog; |
| 1545 | struct cgroup *cgrp; |
Alexei Starovoitov | 7f67763 | 2017-02-10 20:28:24 -0800 | [diff] [blame] | 1546 | int ret; |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 1547 | |
| 1548 | if (!capable(CAP_NET_ADMIN)) |
| 1549 | return -EPERM; |
| 1550 | |
| 1551 | if (CHECK_ATTR(BPF_PROG_ATTACH)) |
| 1552 | return -EINVAL; |
| 1553 | |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 1554 | if (attr->attach_flags & ~BPF_F_ATTACH_MASK) |
Alexei Starovoitov | 7f67763 | 2017-02-10 20:28:24 -0800 | [diff] [blame] | 1555 | return -EINVAL; |
| 1556 | |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 1557 | switch (attr->attach_type) { |
| 1558 | case BPF_CGROUP_INET_INGRESS: |
| 1559 | case BPF_CGROUP_INET_EGRESS: |
David Ahern | b2cd125 | 2016-12-01 08:48:03 -0800 | [diff] [blame] | 1560 | ptype = BPF_PROG_TYPE_CGROUP_SKB; |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 1561 | break; |
David Ahern | 61023658 | 2016-12-01 08:48:04 -0800 | [diff] [blame] | 1562 | case BPF_CGROUP_INET_SOCK_CREATE: |
Andrey Ignatov | aac3fc3 | 2018-03-30 15:08:07 -0700 | [diff] [blame] | 1563 | case BPF_CGROUP_INET4_POST_BIND: |
| 1564 | case BPF_CGROUP_INET6_POST_BIND: |
David Ahern | 61023658 | 2016-12-01 08:48:04 -0800 | [diff] [blame] | 1565 | ptype = BPF_PROG_TYPE_CGROUP_SOCK; |
| 1566 | break; |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 1567 | case BPF_CGROUP_INET4_BIND: |
| 1568 | case BPF_CGROUP_INET6_BIND: |
Andrey Ignatov | d74bad4 | 2018-03-30 15:08:05 -0700 | [diff] [blame] | 1569 | case BPF_CGROUP_INET4_CONNECT: |
| 1570 | case BPF_CGROUP_INET6_CONNECT: |
Andrey Ignatov | 1cedee1 | 2018-05-25 08:55:23 -0700 | [diff] [blame] | 1571 | case BPF_CGROUP_UDP4_SENDMSG: |
| 1572 | case BPF_CGROUP_UDP6_SENDMSG: |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 1573 | ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; |
| 1574 | break; |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 1575 | case BPF_CGROUP_SOCK_OPS: |
| 1576 | ptype = BPF_PROG_TYPE_SOCK_OPS; |
| 1577 | break; |
Roman Gushchin | ebc614f | 2017-11-05 08:15:32 -0500 | [diff] [blame] | 1578 | case BPF_CGROUP_DEVICE: |
| 1579 | ptype = BPF_PROG_TYPE_CGROUP_DEVICE; |
| 1580 | break; |
John Fastabend | 4f738ad | 2018-03-18 12:57:10 -0700 | [diff] [blame] | 1581 | case BPF_SK_MSG_VERDICT: |
| 1582 | return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, true); |
John Fastabend | 464bc0f | 2017-08-28 07:10:04 -0700 | [diff] [blame] | 1583 | case BPF_SK_SKB_STREAM_PARSER: |
| 1584 | case BPF_SK_SKB_STREAM_VERDICT: |
John Fastabend | 4f738ad | 2018-03-18 12:57:10 -0700 | [diff] [blame] | 1585 | return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, true); |
Sean Young | f4364dc | 2018-05-27 12:24:09 +0100 | [diff] [blame^] | 1586 | case BPF_LIRC_MODE2: |
| 1587 | return lirc_prog_attach(attr); |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 1588 | default: |
| 1589 | return -EINVAL; |
| 1590 | } |
| 1591 | |
David Ahern | b2cd125 | 2016-12-01 08:48:03 -0800 | [diff] [blame] | 1592 | prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); |
| 1593 | if (IS_ERR(prog)) |
| 1594 | return PTR_ERR(prog); |
| 1595 | |
Andrey Ignatov | 5e43f89 | 2018-03-30 15:08:00 -0700 | [diff] [blame] | 1596 | if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { |
| 1597 | bpf_prog_put(prog); |
| 1598 | return -EINVAL; |
| 1599 | } |
| 1600 | |
David Ahern | b2cd125 | 2016-12-01 08:48:03 -0800 | [diff] [blame] | 1601 | cgrp = cgroup_get_from_fd(attr->target_fd); |
| 1602 | if (IS_ERR(cgrp)) { |
| 1603 | bpf_prog_put(prog); |
| 1604 | return PTR_ERR(cgrp); |
| 1605 | } |
| 1606 | |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 1607 | ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type, |
| 1608 | attr->attach_flags); |
Alexei Starovoitov | 7f67763 | 2017-02-10 20:28:24 -0800 | [diff] [blame] | 1609 | if (ret) |
| 1610 | bpf_prog_put(prog); |
David Ahern | b2cd125 | 2016-12-01 08:48:03 -0800 | [diff] [blame] | 1611 | cgroup_put(cgrp); |
| 1612 | |
Alexei Starovoitov | 7f67763 | 2017-02-10 20:28:24 -0800 | [diff] [blame] | 1613 | return ret; |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 1614 | } |
| 1615 | |
| 1616 | #define BPF_PROG_DETACH_LAST_FIELD attach_type |
| 1617 | |
| 1618 | static int bpf_prog_detach(const union bpf_attr *attr) |
| 1619 | { |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 1620 | enum bpf_prog_type ptype; |
| 1621 | struct bpf_prog *prog; |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 1622 | struct cgroup *cgrp; |
Alexei Starovoitov | 7f67763 | 2017-02-10 20:28:24 -0800 | [diff] [blame] | 1623 | int ret; |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 1624 | |
| 1625 | if (!capable(CAP_NET_ADMIN)) |
| 1626 | return -EPERM; |
| 1627 | |
| 1628 | if (CHECK_ATTR(BPF_PROG_DETACH)) |
| 1629 | return -EINVAL; |
| 1630 | |
| 1631 | switch (attr->attach_type) { |
| 1632 | case BPF_CGROUP_INET_INGRESS: |
| 1633 | case BPF_CGROUP_INET_EGRESS: |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 1634 | ptype = BPF_PROG_TYPE_CGROUP_SKB; |
| 1635 | break; |
David Ahern | 61023658 | 2016-12-01 08:48:04 -0800 | [diff] [blame] | 1636 | case BPF_CGROUP_INET_SOCK_CREATE: |
Andrey Ignatov | aac3fc3 | 2018-03-30 15:08:07 -0700 | [diff] [blame] | 1637 | case BPF_CGROUP_INET4_POST_BIND: |
| 1638 | case BPF_CGROUP_INET6_POST_BIND: |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 1639 | ptype = BPF_PROG_TYPE_CGROUP_SOCK; |
| 1640 | break; |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 1641 | case BPF_CGROUP_INET4_BIND: |
| 1642 | case BPF_CGROUP_INET6_BIND: |
Andrey Ignatov | d74bad4 | 2018-03-30 15:08:05 -0700 | [diff] [blame] | 1643 | case BPF_CGROUP_INET4_CONNECT: |
| 1644 | case BPF_CGROUP_INET6_CONNECT: |
Andrey Ignatov | 1cedee1 | 2018-05-25 08:55:23 -0700 | [diff] [blame] | 1645 | case BPF_CGROUP_UDP4_SENDMSG: |
| 1646 | case BPF_CGROUP_UDP6_SENDMSG: |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 1647 | ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; |
| 1648 | break; |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 1649 | case BPF_CGROUP_SOCK_OPS: |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 1650 | ptype = BPF_PROG_TYPE_SOCK_OPS; |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 1651 | break; |
Roman Gushchin | ebc614f | 2017-11-05 08:15:32 -0500 | [diff] [blame] | 1652 | case BPF_CGROUP_DEVICE: |
| 1653 | ptype = BPF_PROG_TYPE_CGROUP_DEVICE; |
| 1654 | break; |
John Fastabend | 4f738ad | 2018-03-18 12:57:10 -0700 | [diff] [blame] | 1655 | case BPF_SK_MSG_VERDICT: |
| 1656 | return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, false); |
John Fastabend | 5a67da2 | 2017-09-08 14:00:49 -0700 | [diff] [blame] | 1657 | case BPF_SK_SKB_STREAM_PARSER: |
| 1658 | case BPF_SK_SKB_STREAM_VERDICT: |
John Fastabend | 4f738ad | 2018-03-18 12:57:10 -0700 | [diff] [blame] | 1659 | return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, false); |
Sean Young | f4364dc | 2018-05-27 12:24:09 +0100 | [diff] [blame^] | 1660 | case BPF_LIRC_MODE2: |
| 1661 | return lirc_prog_detach(attr); |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 1662 | default: |
| 1663 | return -EINVAL; |
| 1664 | } |
| 1665 | |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 1666 | cgrp = cgroup_get_from_fd(attr->target_fd); |
| 1667 | if (IS_ERR(cgrp)) |
| 1668 | return PTR_ERR(cgrp); |
| 1669 | |
| 1670 | prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); |
| 1671 | if (IS_ERR(prog)) |
| 1672 | prog = NULL; |
| 1673 | |
| 1674 | ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0); |
| 1675 | if (prog) |
| 1676 | bpf_prog_put(prog); |
| 1677 | cgroup_put(cgrp); |
Alexei Starovoitov | 7f67763 | 2017-02-10 20:28:24 -0800 | [diff] [blame] | 1678 | return ret; |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 1679 | } |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 1680 | |
Alexei Starovoitov | 468e2f6 | 2017-10-02 22:50:22 -0700 | [diff] [blame] | 1681 | #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt |
| 1682 | |
| 1683 | static int bpf_prog_query(const union bpf_attr *attr, |
| 1684 | union bpf_attr __user *uattr) |
| 1685 | { |
| 1686 | struct cgroup *cgrp; |
| 1687 | int ret; |
| 1688 | |
| 1689 | if (!capable(CAP_NET_ADMIN)) |
| 1690 | return -EPERM; |
| 1691 | if (CHECK_ATTR(BPF_PROG_QUERY)) |
| 1692 | return -EINVAL; |
| 1693 | if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) |
| 1694 | return -EINVAL; |
| 1695 | |
| 1696 | switch (attr->query.attach_type) { |
| 1697 | case BPF_CGROUP_INET_INGRESS: |
| 1698 | case BPF_CGROUP_INET_EGRESS: |
| 1699 | case BPF_CGROUP_INET_SOCK_CREATE: |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 1700 | case BPF_CGROUP_INET4_BIND: |
| 1701 | case BPF_CGROUP_INET6_BIND: |
Andrey Ignatov | aac3fc3 | 2018-03-30 15:08:07 -0700 | [diff] [blame] | 1702 | case BPF_CGROUP_INET4_POST_BIND: |
| 1703 | case BPF_CGROUP_INET6_POST_BIND: |
Andrey Ignatov | d74bad4 | 2018-03-30 15:08:05 -0700 | [diff] [blame] | 1704 | case BPF_CGROUP_INET4_CONNECT: |
| 1705 | case BPF_CGROUP_INET6_CONNECT: |
Andrey Ignatov | 1cedee1 | 2018-05-25 08:55:23 -0700 | [diff] [blame] | 1706 | case BPF_CGROUP_UDP4_SENDMSG: |
| 1707 | case BPF_CGROUP_UDP6_SENDMSG: |
Alexei Starovoitov | 468e2f6 | 2017-10-02 22:50:22 -0700 | [diff] [blame] | 1708 | case BPF_CGROUP_SOCK_OPS: |
Roman Gushchin | ebc614f | 2017-11-05 08:15:32 -0500 | [diff] [blame] | 1709 | case BPF_CGROUP_DEVICE: |
Alexei Starovoitov | 468e2f6 | 2017-10-02 22:50:22 -0700 | [diff] [blame] | 1710 | break; |
Sean Young | f4364dc | 2018-05-27 12:24:09 +0100 | [diff] [blame^] | 1711 | case BPF_LIRC_MODE2: |
| 1712 | return lirc_prog_query(attr, uattr); |
Alexei Starovoitov | 468e2f6 | 2017-10-02 22:50:22 -0700 | [diff] [blame] | 1713 | default: |
| 1714 | return -EINVAL; |
| 1715 | } |
| 1716 | cgrp = cgroup_get_from_fd(attr->query.target_fd); |
| 1717 | if (IS_ERR(cgrp)) |
| 1718 | return PTR_ERR(cgrp); |
| 1719 | ret = cgroup_bpf_query(cgrp, attr, uattr); |
| 1720 | cgroup_put(cgrp); |
| 1721 | return ret; |
| 1722 | } |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 1723 | #endif /* CONFIG_CGROUP_BPF */ |
| 1724 | |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 1725 | #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration |
| 1726 | |
| 1727 | static int bpf_prog_test_run(const union bpf_attr *attr, |
| 1728 | union bpf_attr __user *uattr) |
| 1729 | { |
| 1730 | struct bpf_prog *prog; |
| 1731 | int ret = -ENOTSUPP; |
| 1732 | |
Alexei Starovoitov | 61f3c96 | 2018-01-17 16:52:02 -0800 | [diff] [blame] | 1733 | if (!capable(CAP_SYS_ADMIN)) |
| 1734 | return -EPERM; |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 1735 | if (CHECK_ATTR(BPF_PROG_TEST_RUN)) |
| 1736 | return -EINVAL; |
| 1737 | |
| 1738 | prog = bpf_prog_get(attr->test.prog_fd); |
| 1739 | if (IS_ERR(prog)) |
| 1740 | return PTR_ERR(prog); |
| 1741 | |
| 1742 | if (prog->aux->ops->test_run) |
| 1743 | ret = prog->aux->ops->test_run(prog, attr, uattr); |
| 1744 | |
| 1745 | bpf_prog_put(prog); |
| 1746 | return ret; |
| 1747 | } |
| 1748 | |
Martin KaFai Lau | 34ad558 | 2017-06-05 12:15:48 -0700 | [diff] [blame] | 1749 | #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id |
| 1750 | |
| 1751 | static int bpf_obj_get_next_id(const union bpf_attr *attr, |
| 1752 | union bpf_attr __user *uattr, |
| 1753 | struct idr *idr, |
| 1754 | spinlock_t *lock) |
| 1755 | { |
| 1756 | u32 next_id = attr->start_id; |
| 1757 | int err = 0; |
| 1758 | |
| 1759 | if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) |
| 1760 | return -EINVAL; |
| 1761 | |
| 1762 | if (!capable(CAP_SYS_ADMIN)) |
| 1763 | return -EPERM; |
| 1764 | |
| 1765 | next_id++; |
| 1766 | spin_lock_bh(lock); |
| 1767 | if (!idr_get_next(idr, &next_id)) |
| 1768 | err = -ENOENT; |
| 1769 | spin_unlock_bh(lock); |
| 1770 | |
| 1771 | if (!err) |
| 1772 | err = put_user(next_id, &uattr->next_id); |
| 1773 | |
| 1774 | return err; |
| 1775 | } |
| 1776 | |
Martin KaFai Lau | b16d9aa | 2017-06-05 12:15:49 -0700 | [diff] [blame] | 1777 | #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id |
| 1778 | |
| 1779 | static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) |
| 1780 | { |
| 1781 | struct bpf_prog *prog; |
| 1782 | u32 id = attr->prog_id; |
| 1783 | int fd; |
| 1784 | |
| 1785 | if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) |
| 1786 | return -EINVAL; |
| 1787 | |
| 1788 | if (!capable(CAP_SYS_ADMIN)) |
| 1789 | return -EPERM; |
| 1790 | |
| 1791 | spin_lock_bh(&prog_idr_lock); |
| 1792 | prog = idr_find(&prog_idr, id); |
| 1793 | if (prog) |
| 1794 | prog = bpf_prog_inc_not_zero(prog); |
| 1795 | else |
| 1796 | prog = ERR_PTR(-ENOENT); |
| 1797 | spin_unlock_bh(&prog_idr_lock); |
| 1798 | |
| 1799 | if (IS_ERR(prog)) |
| 1800 | return PTR_ERR(prog); |
| 1801 | |
| 1802 | fd = bpf_prog_new_fd(prog); |
| 1803 | if (fd < 0) |
| 1804 | bpf_prog_put(prog); |
| 1805 | |
| 1806 | return fd; |
| 1807 | } |
| 1808 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 1809 | #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags |
Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 1810 | |
| 1811 | static int bpf_map_get_fd_by_id(const union bpf_attr *attr) |
| 1812 | { |
| 1813 | struct bpf_map *map; |
| 1814 | u32 id = attr->map_id; |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 1815 | int f_flags; |
Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 1816 | int fd; |
| 1817 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 1818 | if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || |
| 1819 | attr->open_flags & ~BPF_OBJ_FLAG_MASK) |
Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 1820 | return -EINVAL; |
| 1821 | |
| 1822 | if (!capable(CAP_SYS_ADMIN)) |
| 1823 | return -EPERM; |
| 1824 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 1825 | f_flags = bpf_get_file_flag(attr->open_flags); |
| 1826 | if (f_flags < 0) |
| 1827 | return f_flags; |
| 1828 | |
Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 1829 | spin_lock_bh(&map_idr_lock); |
| 1830 | map = idr_find(&map_idr, id); |
| 1831 | if (map) |
| 1832 | map = bpf_map_inc_not_zero(map, true); |
| 1833 | else |
| 1834 | map = ERR_PTR(-ENOENT); |
| 1835 | spin_unlock_bh(&map_idr_lock); |
| 1836 | |
| 1837 | if (IS_ERR(map)) |
| 1838 | return PTR_ERR(map); |
| 1839 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 1840 | fd = bpf_map_new_fd(map, f_flags); |
Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 1841 | if (fd < 0) |
| 1842 | bpf_map_put(map); |
| 1843 | |
| 1844 | return fd; |
| 1845 | } |
| 1846 | |
Daniel Borkmann | 7105e82 | 2017-12-20 13:42:57 +0100 | [diff] [blame] | 1847 | static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, |
| 1848 | unsigned long addr) |
| 1849 | { |
| 1850 | int i; |
| 1851 | |
| 1852 | for (i = 0; i < prog->aux->used_map_cnt; i++) |
| 1853 | if (prog->aux->used_maps[i] == (void *)addr) |
| 1854 | return prog->aux->used_maps[i]; |
| 1855 | return NULL; |
| 1856 | } |
| 1857 | |
| 1858 | static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog) |
| 1859 | { |
| 1860 | const struct bpf_map *map; |
| 1861 | struct bpf_insn *insns; |
| 1862 | u64 imm; |
| 1863 | int i; |
| 1864 | |
| 1865 | insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), |
| 1866 | GFP_USER); |
| 1867 | if (!insns) |
| 1868 | return insns; |
| 1869 | |
| 1870 | for (i = 0; i < prog->len; i++) { |
| 1871 | if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) { |
| 1872 | insns[i].code = BPF_JMP | BPF_CALL; |
| 1873 | insns[i].imm = BPF_FUNC_tail_call; |
| 1874 | /* fall-through */ |
| 1875 | } |
| 1876 | if (insns[i].code == (BPF_JMP | BPF_CALL) || |
| 1877 | insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) { |
| 1878 | if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) |
| 1879 | insns[i].code = BPF_JMP | BPF_CALL; |
| 1880 | if (!bpf_dump_raw_ok()) |
| 1881 | insns[i].imm = 0; |
| 1882 | continue; |
| 1883 | } |
| 1884 | |
| 1885 | if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW)) |
| 1886 | continue; |
| 1887 | |
| 1888 | imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; |
| 1889 | map = bpf_map_from_imm(prog, imm); |
| 1890 | if (map) { |
| 1891 | insns[i].src_reg = BPF_PSEUDO_MAP_FD; |
| 1892 | insns[i].imm = map->id; |
| 1893 | insns[i + 1].imm = 0; |
| 1894 | continue; |
| 1895 | } |
| 1896 | |
| 1897 | if (!bpf_dump_raw_ok() && |
| 1898 | imm == (unsigned long)prog->aux) { |
| 1899 | insns[i].imm = 0; |
| 1900 | insns[i + 1].imm = 0; |
| 1901 | continue; |
| 1902 | } |
| 1903 | } |
| 1904 | |
| 1905 | return insns; |
| 1906 | } |
| 1907 | |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 1908 | static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, |
| 1909 | const union bpf_attr *attr, |
| 1910 | union bpf_attr __user *uattr) |
| 1911 | { |
| 1912 | struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); |
| 1913 | struct bpf_prog_info info = {}; |
| 1914 | u32 info_len = attr->info.info_len; |
| 1915 | char __user *uinsns; |
| 1916 | u32 ulen; |
| 1917 | int err; |
| 1918 | |
Martin KaFai Lau | dcab51f | 2018-05-22 15:03:31 -0700 | [diff] [blame] | 1919 | err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 1920 | if (err) |
| 1921 | return err; |
| 1922 | info_len = min_t(u32, sizeof(info), info_len); |
| 1923 | |
| 1924 | if (copy_from_user(&info, uinfo, info_len)) |
Daniel Borkmann | 89b0968 | 2017-07-27 21:02:46 +0200 | [diff] [blame] | 1925 | return -EFAULT; |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 1926 | |
| 1927 | info.type = prog->type; |
| 1928 | info.id = prog->aux->id; |
Martin KaFai Lau | cb4d2b3 | 2017-09-27 14:37:52 -0700 | [diff] [blame] | 1929 | info.load_time = prog->aux->load_time; |
| 1930 | info.created_by_uid = from_kuid_munged(current_user_ns(), |
| 1931 | prog->aux->user->uid); |
Jiri Olsa | b85fab0 | 2018-04-25 19:41:06 +0200 | [diff] [blame] | 1932 | info.gpl_compatible = prog->gpl_compatible; |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 1933 | |
| 1934 | memcpy(info.tag, prog->tag, sizeof(prog->tag)); |
Martin KaFai Lau | cb4d2b3 | 2017-09-27 14:37:52 -0700 | [diff] [blame] | 1935 | memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); |
| 1936 | |
| 1937 | ulen = info.nr_map_ids; |
| 1938 | info.nr_map_ids = prog->aux->used_map_cnt; |
| 1939 | ulen = min_t(u32, info.nr_map_ids, ulen); |
| 1940 | if (ulen) { |
Martin KaFai Lau | 721e08d | 2017-09-29 10:52:17 -0700 | [diff] [blame] | 1941 | u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); |
Martin KaFai Lau | cb4d2b3 | 2017-09-27 14:37:52 -0700 | [diff] [blame] | 1942 | u32 i; |
| 1943 | |
| 1944 | for (i = 0; i < ulen; i++) |
| 1945 | if (put_user(prog->aux->used_maps[i]->id, |
| 1946 | &user_map_ids[i])) |
| 1947 | return -EFAULT; |
| 1948 | } |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 1949 | |
| 1950 | if (!capable(CAP_SYS_ADMIN)) { |
| 1951 | info.jited_prog_len = 0; |
| 1952 | info.xlated_prog_len = 0; |
Sandipan Das | dbecd73 | 2018-05-24 12:26:48 +0530 | [diff] [blame] | 1953 | info.nr_jited_ksyms = 0; |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 1954 | goto done; |
| 1955 | } |
| 1956 | |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 1957 | ulen = info.xlated_prog_len; |
Daniel Borkmann | 9975a54 | 2017-07-28 17:05:25 +0200 | [diff] [blame] | 1958 | info.xlated_prog_len = bpf_prog_insn_size(prog); |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 1959 | if (info.xlated_prog_len && ulen) { |
Daniel Borkmann | 7105e82 | 2017-12-20 13:42:57 +0100 | [diff] [blame] | 1960 | struct bpf_insn *insns_sanitized; |
| 1961 | bool fault; |
| 1962 | |
| 1963 | if (prog->blinded && !bpf_dump_raw_ok()) { |
| 1964 | info.xlated_prog_insns = 0; |
| 1965 | goto done; |
| 1966 | } |
| 1967 | insns_sanitized = bpf_insn_prepare_dump(prog); |
| 1968 | if (!insns_sanitized) |
| 1969 | return -ENOMEM; |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 1970 | uinsns = u64_to_user_ptr(info.xlated_prog_insns); |
| 1971 | ulen = min_t(u32, info.xlated_prog_len, ulen); |
Daniel Borkmann | 7105e82 | 2017-12-20 13:42:57 +0100 | [diff] [blame] | 1972 | fault = copy_to_user(uinsns, insns_sanitized, ulen); |
| 1973 | kfree(insns_sanitized); |
| 1974 | if (fault) |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 1975 | return -EFAULT; |
| 1976 | } |
| 1977 | |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 1978 | if (bpf_prog_is_dev_bound(prog->aux)) { |
| 1979 | err = bpf_prog_offload_info_fill(&info, prog); |
| 1980 | if (err) |
| 1981 | return err; |
Jiong Wang | fcfb126 | 2018-01-16 16:05:19 -0800 | [diff] [blame] | 1982 | goto done; |
| 1983 | } |
| 1984 | |
| 1985 | /* NOTE: the following code is supposed to be skipped for offload. |
| 1986 | * bpf_prog_offload_info_fill() is the place to fill similar fields |
| 1987 | * for offload. |
| 1988 | */ |
| 1989 | ulen = info.jited_prog_len; |
Sandipan Das | 4d56a76 | 2018-05-24 12:26:51 +0530 | [diff] [blame] | 1990 | if (prog->aux->func_cnt) { |
| 1991 | u32 i; |
| 1992 | |
| 1993 | info.jited_prog_len = 0; |
| 1994 | for (i = 0; i < prog->aux->func_cnt; i++) |
| 1995 | info.jited_prog_len += prog->aux->func[i]->jited_len; |
| 1996 | } else { |
| 1997 | info.jited_prog_len = prog->jited_len; |
| 1998 | } |
| 1999 | |
Jiong Wang | fcfb126 | 2018-01-16 16:05:19 -0800 | [diff] [blame] | 2000 | if (info.jited_prog_len && ulen) { |
| 2001 | if (bpf_dump_raw_ok()) { |
| 2002 | uinsns = u64_to_user_ptr(info.jited_prog_insns); |
| 2003 | ulen = min_t(u32, info.jited_prog_len, ulen); |
Sandipan Das | 4d56a76 | 2018-05-24 12:26:51 +0530 | [diff] [blame] | 2004 | |
| 2005 | /* for multi-function programs, copy the JITed |
| 2006 | * instructions for all the functions |
| 2007 | */ |
| 2008 | if (prog->aux->func_cnt) { |
| 2009 | u32 len, free, i; |
| 2010 | u8 *img; |
| 2011 | |
| 2012 | free = ulen; |
| 2013 | for (i = 0; i < prog->aux->func_cnt; i++) { |
| 2014 | len = prog->aux->func[i]->jited_len; |
| 2015 | len = min_t(u32, len, free); |
| 2016 | img = (u8 *) prog->aux->func[i]->bpf_func; |
| 2017 | if (copy_to_user(uinsns, img, len)) |
| 2018 | return -EFAULT; |
| 2019 | uinsns += len; |
| 2020 | free -= len; |
| 2021 | if (!free) |
| 2022 | break; |
| 2023 | } |
| 2024 | } else { |
| 2025 | if (copy_to_user(uinsns, prog->bpf_func, ulen)) |
| 2026 | return -EFAULT; |
| 2027 | } |
Jiong Wang | fcfb126 | 2018-01-16 16:05:19 -0800 | [diff] [blame] | 2028 | } else { |
| 2029 | info.jited_prog_insns = 0; |
| 2030 | } |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 2031 | } |
| 2032 | |
Sandipan Das | dbecd73 | 2018-05-24 12:26:48 +0530 | [diff] [blame] | 2033 | ulen = info.nr_jited_ksyms; |
| 2034 | info.nr_jited_ksyms = prog->aux->func_cnt; |
| 2035 | if (info.nr_jited_ksyms && ulen) { |
| 2036 | if (bpf_dump_raw_ok()) { |
| 2037 | u64 __user *user_ksyms; |
| 2038 | ulong ksym_addr; |
| 2039 | u32 i; |
| 2040 | |
| 2041 | /* copy the address of the kernel symbol |
| 2042 | * corresponding to each function |
| 2043 | */ |
| 2044 | ulen = min_t(u32, info.nr_jited_ksyms, ulen); |
| 2045 | user_ksyms = u64_to_user_ptr(info.jited_ksyms); |
| 2046 | for (i = 0; i < ulen; i++) { |
| 2047 | ksym_addr = (ulong) prog->aux->func[i]->bpf_func; |
| 2048 | ksym_addr &= PAGE_MASK; |
| 2049 | if (put_user((u64) ksym_addr, &user_ksyms[i])) |
| 2050 | return -EFAULT; |
| 2051 | } |
| 2052 | } else { |
| 2053 | info.jited_ksyms = 0; |
| 2054 | } |
| 2055 | } |
| 2056 | |
Sandipan Das | 815581c | 2018-05-24 12:26:52 +0530 | [diff] [blame] | 2057 | ulen = info.nr_jited_func_lens; |
| 2058 | info.nr_jited_func_lens = prog->aux->func_cnt; |
| 2059 | if (info.nr_jited_func_lens && ulen) { |
| 2060 | if (bpf_dump_raw_ok()) { |
| 2061 | u32 __user *user_lens; |
| 2062 | u32 func_len, i; |
| 2063 | |
| 2064 | /* copy the JITed image lengths for each function */ |
| 2065 | ulen = min_t(u32, info.nr_jited_func_lens, ulen); |
| 2066 | user_lens = u64_to_user_ptr(info.jited_func_lens); |
| 2067 | for (i = 0; i < ulen; i++) { |
| 2068 | func_len = prog->aux->func[i]->jited_len; |
| 2069 | if (put_user(func_len, &user_lens[i])) |
| 2070 | return -EFAULT; |
| 2071 | } |
| 2072 | } else { |
| 2073 | info.jited_func_lens = 0; |
| 2074 | } |
| 2075 | } |
| 2076 | |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 2077 | done: |
| 2078 | if (copy_to_user(uinfo, &info, info_len) || |
| 2079 | put_user(info_len, &uattr->info.info_len)) |
| 2080 | return -EFAULT; |
| 2081 | |
| 2082 | return 0; |
| 2083 | } |
| 2084 | |
| 2085 | static int bpf_map_get_info_by_fd(struct bpf_map *map, |
| 2086 | const union bpf_attr *attr, |
| 2087 | union bpf_attr __user *uattr) |
| 2088 | { |
| 2089 | struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); |
| 2090 | struct bpf_map_info info = {}; |
| 2091 | u32 info_len = attr->info.info_len; |
| 2092 | int err; |
| 2093 | |
Martin KaFai Lau | dcab51f | 2018-05-22 15:03:31 -0700 | [diff] [blame] | 2094 | err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 2095 | if (err) |
| 2096 | return err; |
| 2097 | info_len = min_t(u32, sizeof(info), info_len); |
| 2098 | |
| 2099 | info.type = map->map_type; |
| 2100 | info.id = map->id; |
| 2101 | info.key_size = map->key_size; |
| 2102 | info.value_size = map->value_size; |
| 2103 | info.max_entries = map->max_entries; |
| 2104 | info.map_flags = map->map_flags; |
Martin KaFai Lau | ad5b177 | 2017-09-27 14:37:53 -0700 | [diff] [blame] | 2105 | memcpy(info.name, map->name, sizeof(map->name)); |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 2106 | |
Martin KaFai Lau | 78958fc | 2018-05-04 14:49:51 -0700 | [diff] [blame] | 2107 | if (map->btf) { |
| 2108 | info.btf_id = btf_id(map->btf); |
Martin KaFai Lau | 9b2cf32 | 2018-05-22 14:57:21 -0700 | [diff] [blame] | 2109 | info.btf_key_type_id = map->btf_key_type_id; |
| 2110 | info.btf_value_type_id = map->btf_value_type_id; |
Martin KaFai Lau | 78958fc | 2018-05-04 14:49:51 -0700 | [diff] [blame] | 2111 | } |
| 2112 | |
Jakub Kicinski | 52775b3 | 2018-01-17 19:13:28 -0800 | [diff] [blame] | 2113 | if (bpf_map_is_dev_bound(map)) { |
| 2114 | err = bpf_map_offload_info_fill(&info, map); |
| 2115 | if (err) |
| 2116 | return err; |
| 2117 | } |
| 2118 | |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 2119 | if (copy_to_user(uinfo, &info, info_len) || |
| 2120 | put_user(info_len, &uattr->info.info_len)) |
| 2121 | return -EFAULT; |
| 2122 | |
| 2123 | return 0; |
| 2124 | } |
| 2125 | |
Martin KaFai Lau | 62dab84 | 2018-05-04 14:49:52 -0700 | [diff] [blame] | 2126 | static int bpf_btf_get_info_by_fd(struct btf *btf, |
| 2127 | const union bpf_attr *attr, |
| 2128 | union bpf_attr __user *uattr) |
| 2129 | { |
| 2130 | struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); |
| 2131 | u32 info_len = attr->info.info_len; |
| 2132 | int err; |
| 2133 | |
Martin KaFai Lau | dcab51f | 2018-05-22 15:03:31 -0700 | [diff] [blame] | 2134 | err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len); |
Martin KaFai Lau | 62dab84 | 2018-05-04 14:49:52 -0700 | [diff] [blame] | 2135 | if (err) |
| 2136 | return err; |
| 2137 | |
| 2138 | return btf_get_info_by_fd(btf, attr, uattr); |
| 2139 | } |
| 2140 | |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 2141 | #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info |
| 2142 | |
| 2143 | static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, |
| 2144 | union bpf_attr __user *uattr) |
| 2145 | { |
| 2146 | int ufd = attr->info.bpf_fd; |
| 2147 | struct fd f; |
| 2148 | int err; |
| 2149 | |
| 2150 | if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) |
| 2151 | return -EINVAL; |
| 2152 | |
| 2153 | f = fdget(ufd); |
| 2154 | if (!f.file) |
| 2155 | return -EBADFD; |
| 2156 | |
| 2157 | if (f.file->f_op == &bpf_prog_fops) |
| 2158 | err = bpf_prog_get_info_by_fd(f.file->private_data, attr, |
| 2159 | uattr); |
| 2160 | else if (f.file->f_op == &bpf_map_fops) |
| 2161 | err = bpf_map_get_info_by_fd(f.file->private_data, attr, |
| 2162 | uattr); |
Martin KaFai Lau | 60197cf | 2018-04-18 15:56:02 -0700 | [diff] [blame] | 2163 | else if (f.file->f_op == &btf_fops) |
Martin KaFai Lau | 62dab84 | 2018-05-04 14:49:52 -0700 | [diff] [blame] | 2164 | err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr); |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 2165 | else |
| 2166 | err = -EINVAL; |
| 2167 | |
| 2168 | fdput(f); |
| 2169 | return err; |
| 2170 | } |
| 2171 | |
Martin KaFai Lau | f56a653 | 2018-04-18 15:56:01 -0700 | [diff] [blame] | 2172 | #define BPF_BTF_LOAD_LAST_FIELD btf_log_level |
| 2173 | |
| 2174 | static int bpf_btf_load(const union bpf_attr *attr) |
| 2175 | { |
| 2176 | if (CHECK_ATTR(BPF_BTF_LOAD)) |
| 2177 | return -EINVAL; |
| 2178 | |
| 2179 | if (!capable(CAP_SYS_ADMIN)) |
| 2180 | return -EPERM; |
| 2181 | |
| 2182 | return btf_new_fd(attr); |
| 2183 | } |
| 2184 | |
Martin KaFai Lau | 78958fc | 2018-05-04 14:49:51 -0700 | [diff] [blame] | 2185 | #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id |
| 2186 | |
| 2187 | static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) |
| 2188 | { |
| 2189 | if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) |
| 2190 | return -EINVAL; |
| 2191 | |
| 2192 | if (!capable(CAP_SYS_ADMIN)) |
| 2193 | return -EPERM; |
| 2194 | |
| 2195 | return btf_get_fd_by_id(attr->btf_id); |
| 2196 | } |
| 2197 | |
Yonghong Song | 41bdc4b | 2018-05-24 11:21:09 -0700 | [diff] [blame] | 2198 | static int bpf_task_fd_query_copy(const union bpf_attr *attr, |
| 2199 | union bpf_attr __user *uattr, |
| 2200 | u32 prog_id, u32 fd_type, |
| 2201 | const char *buf, u64 probe_offset, |
| 2202 | u64 probe_addr) |
| 2203 | { |
| 2204 | char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); |
| 2205 | u32 len = buf ? strlen(buf) : 0, input_len; |
| 2206 | int err = 0; |
| 2207 | |
| 2208 | if (put_user(len, &uattr->task_fd_query.buf_len)) |
| 2209 | return -EFAULT; |
| 2210 | input_len = attr->task_fd_query.buf_len; |
| 2211 | if (input_len && ubuf) { |
| 2212 | if (!len) { |
| 2213 | /* nothing to copy, just make ubuf NULL terminated */ |
| 2214 | char zero = '\0'; |
| 2215 | |
| 2216 | if (put_user(zero, ubuf)) |
| 2217 | return -EFAULT; |
| 2218 | } else if (input_len >= len + 1) { |
| 2219 | /* ubuf can hold the string with NULL terminator */ |
| 2220 | if (copy_to_user(ubuf, buf, len + 1)) |
| 2221 | return -EFAULT; |
| 2222 | } else { |
| 2223 | /* ubuf cannot hold the string with NULL terminator, |
| 2224 | * do a partial copy with NULL terminator. |
| 2225 | */ |
| 2226 | char zero = '\0'; |
| 2227 | |
| 2228 | err = -ENOSPC; |
| 2229 | if (copy_to_user(ubuf, buf, input_len - 1)) |
| 2230 | return -EFAULT; |
| 2231 | if (put_user(zero, ubuf + input_len - 1)) |
| 2232 | return -EFAULT; |
| 2233 | } |
| 2234 | } |
| 2235 | |
| 2236 | if (put_user(prog_id, &uattr->task_fd_query.prog_id) || |
| 2237 | put_user(fd_type, &uattr->task_fd_query.fd_type) || |
| 2238 | put_user(probe_offset, &uattr->task_fd_query.probe_offset) || |
| 2239 | put_user(probe_addr, &uattr->task_fd_query.probe_addr)) |
| 2240 | return -EFAULT; |
| 2241 | |
| 2242 | return err; |
| 2243 | } |
| 2244 | |
| 2245 | #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr |
| 2246 | |
| 2247 | static int bpf_task_fd_query(const union bpf_attr *attr, |
| 2248 | union bpf_attr __user *uattr) |
| 2249 | { |
| 2250 | pid_t pid = attr->task_fd_query.pid; |
| 2251 | u32 fd = attr->task_fd_query.fd; |
| 2252 | const struct perf_event *event; |
| 2253 | struct files_struct *files; |
| 2254 | struct task_struct *task; |
| 2255 | struct file *file; |
| 2256 | int err; |
| 2257 | |
| 2258 | if (CHECK_ATTR(BPF_TASK_FD_QUERY)) |
| 2259 | return -EINVAL; |
| 2260 | |
| 2261 | if (!capable(CAP_SYS_ADMIN)) |
| 2262 | return -EPERM; |
| 2263 | |
| 2264 | if (attr->task_fd_query.flags != 0) |
| 2265 | return -EINVAL; |
| 2266 | |
| 2267 | task = get_pid_task(find_vpid(pid), PIDTYPE_PID); |
| 2268 | if (!task) |
| 2269 | return -ENOENT; |
| 2270 | |
| 2271 | files = get_files_struct(task); |
| 2272 | put_task_struct(task); |
| 2273 | if (!files) |
| 2274 | return -ENOENT; |
| 2275 | |
| 2276 | err = 0; |
| 2277 | spin_lock(&files->file_lock); |
| 2278 | file = fcheck_files(files, fd); |
| 2279 | if (!file) |
| 2280 | err = -EBADF; |
| 2281 | else |
| 2282 | get_file(file); |
| 2283 | spin_unlock(&files->file_lock); |
| 2284 | put_files_struct(files); |
| 2285 | |
| 2286 | if (err) |
| 2287 | goto out; |
| 2288 | |
| 2289 | if (file->f_op == &bpf_raw_tp_fops) { |
| 2290 | struct bpf_raw_tracepoint *raw_tp = file->private_data; |
| 2291 | struct bpf_raw_event_map *btp = raw_tp->btp; |
| 2292 | |
| 2293 | err = bpf_task_fd_query_copy(attr, uattr, |
| 2294 | raw_tp->prog->aux->id, |
| 2295 | BPF_FD_TYPE_RAW_TRACEPOINT, |
| 2296 | btp->tp->name, 0, 0); |
| 2297 | goto put_file; |
| 2298 | } |
| 2299 | |
| 2300 | event = perf_get_event(file); |
| 2301 | if (!IS_ERR(event)) { |
| 2302 | u64 probe_offset, probe_addr; |
| 2303 | u32 prog_id, fd_type; |
| 2304 | const char *buf; |
| 2305 | |
| 2306 | err = bpf_get_perf_event_info(event, &prog_id, &fd_type, |
| 2307 | &buf, &probe_offset, |
| 2308 | &probe_addr); |
| 2309 | if (!err) |
| 2310 | err = bpf_task_fd_query_copy(attr, uattr, prog_id, |
| 2311 | fd_type, buf, |
| 2312 | probe_offset, |
| 2313 | probe_addr); |
| 2314 | goto put_file; |
| 2315 | } |
| 2316 | |
| 2317 | err = -ENOTSUPP; |
| 2318 | put_file: |
| 2319 | fput(file); |
| 2320 | out: |
| 2321 | return err; |
| 2322 | } |
| 2323 | |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 2324 | SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) |
| 2325 | { |
| 2326 | union bpf_attr attr = {}; |
| 2327 | int err; |
| 2328 | |
Chenbo Feng | 0fa4fe8 | 2018-03-19 17:57:27 -0700 | [diff] [blame] | 2329 | if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN)) |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 2330 | return -EPERM; |
| 2331 | |
Martin KaFai Lau | dcab51f | 2018-05-22 15:03:31 -0700 | [diff] [blame] | 2332 | err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 2333 | if (err) |
| 2334 | return err; |
| 2335 | size = min_t(u32, size, sizeof(attr)); |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 2336 | |
| 2337 | /* copy attributes from user space, may be less than sizeof(bpf_attr) */ |
| 2338 | if (copy_from_user(&attr, uattr, size) != 0) |
| 2339 | return -EFAULT; |
| 2340 | |
Chenbo Feng | afdb09c | 2017-10-18 13:00:24 -0700 | [diff] [blame] | 2341 | err = security_bpf(cmd, &attr, size); |
| 2342 | if (err < 0) |
| 2343 | return err; |
| 2344 | |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 2345 | switch (cmd) { |
| 2346 | case BPF_MAP_CREATE: |
| 2347 | err = map_create(&attr); |
| 2348 | break; |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 2349 | case BPF_MAP_LOOKUP_ELEM: |
| 2350 | err = map_lookup_elem(&attr); |
| 2351 | break; |
| 2352 | case BPF_MAP_UPDATE_ELEM: |
| 2353 | err = map_update_elem(&attr); |
| 2354 | break; |
| 2355 | case BPF_MAP_DELETE_ELEM: |
| 2356 | err = map_delete_elem(&attr); |
| 2357 | break; |
| 2358 | case BPF_MAP_GET_NEXT_KEY: |
| 2359 | err = map_get_next_key(&attr); |
| 2360 | break; |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 2361 | case BPF_PROG_LOAD: |
| 2362 | err = bpf_prog_load(&attr); |
| 2363 | break; |
Daniel Borkmann | b219775 | 2015-10-29 14:58:09 +0100 | [diff] [blame] | 2364 | case BPF_OBJ_PIN: |
| 2365 | err = bpf_obj_pin(&attr); |
| 2366 | break; |
| 2367 | case BPF_OBJ_GET: |
| 2368 | err = bpf_obj_get(&attr); |
| 2369 | break; |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 2370 | #ifdef CONFIG_CGROUP_BPF |
| 2371 | case BPF_PROG_ATTACH: |
| 2372 | err = bpf_prog_attach(&attr); |
| 2373 | break; |
| 2374 | case BPF_PROG_DETACH: |
| 2375 | err = bpf_prog_detach(&attr); |
| 2376 | break; |
Alexei Starovoitov | 468e2f6 | 2017-10-02 22:50:22 -0700 | [diff] [blame] | 2377 | case BPF_PROG_QUERY: |
| 2378 | err = bpf_prog_query(&attr, uattr); |
| 2379 | break; |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 2380 | #endif |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 2381 | case BPF_PROG_TEST_RUN: |
| 2382 | err = bpf_prog_test_run(&attr, uattr); |
| 2383 | break; |
Martin KaFai Lau | 34ad558 | 2017-06-05 12:15:48 -0700 | [diff] [blame] | 2384 | case BPF_PROG_GET_NEXT_ID: |
| 2385 | err = bpf_obj_get_next_id(&attr, uattr, |
| 2386 | &prog_idr, &prog_idr_lock); |
| 2387 | break; |
| 2388 | case BPF_MAP_GET_NEXT_ID: |
| 2389 | err = bpf_obj_get_next_id(&attr, uattr, |
| 2390 | &map_idr, &map_idr_lock); |
| 2391 | break; |
Martin KaFai Lau | b16d9aa | 2017-06-05 12:15:49 -0700 | [diff] [blame] | 2392 | case BPF_PROG_GET_FD_BY_ID: |
| 2393 | err = bpf_prog_get_fd_by_id(&attr); |
| 2394 | break; |
Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 2395 | case BPF_MAP_GET_FD_BY_ID: |
| 2396 | err = bpf_map_get_fd_by_id(&attr); |
| 2397 | break; |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 2398 | case BPF_OBJ_GET_INFO_BY_FD: |
| 2399 | err = bpf_obj_get_info_by_fd(&attr, uattr); |
| 2400 | break; |
Alexei Starovoitov | c4f6699 | 2018-03-28 12:05:37 -0700 | [diff] [blame] | 2401 | case BPF_RAW_TRACEPOINT_OPEN: |
| 2402 | err = bpf_raw_tracepoint_open(&attr); |
| 2403 | break; |
Martin KaFai Lau | f56a653 | 2018-04-18 15:56:01 -0700 | [diff] [blame] | 2404 | case BPF_BTF_LOAD: |
| 2405 | err = bpf_btf_load(&attr); |
| 2406 | break; |
Martin KaFai Lau | 78958fc | 2018-05-04 14:49:51 -0700 | [diff] [blame] | 2407 | case BPF_BTF_GET_FD_BY_ID: |
| 2408 | err = bpf_btf_get_fd_by_id(&attr); |
| 2409 | break; |
Yonghong Song | 41bdc4b | 2018-05-24 11:21:09 -0700 | [diff] [blame] | 2410 | case BPF_TASK_FD_QUERY: |
| 2411 | err = bpf_task_fd_query(&attr, uattr); |
| 2412 | break; |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 2413 | default: |
| 2414 | err = -EINVAL; |
| 2415 | break; |
| 2416 | } |
| 2417 | |
| 2418 | return err; |
| 2419 | } |