blob: 584636c9e2eb0e27f235990f5e1306877bb7aad1 [file] [log] [blame]
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -08001/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
Alexei Starovoitov81ed18a2017-03-15 18:26:42 -07002 * Copyright (c) 2016,2017 Facebook
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -08003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/bpf.h>
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -070014#include <linux/btf.h>
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080015#include <linux/err.h>
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080016#include <linux/slab.h>
17#include <linux/mm.h>
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -070018#include <linux/filter.h>
Daniel Borkmann0cdf56402015-10-02 18:42:00 +020019#include <linux/perf_event.h>
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -070020#include <uapi/linux/btf.h>
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080021
Martin KaFai Lau56f668d2017-03-22 10:00:33 -070022#include "map_in_map.h"
23
Chenbo Feng6e71b042017-10-18 13:00:22 -070024#define ARRAY_CREATE_FLAG_MASK \
Daniel Borkmann591fe982019-04-09 23:20:05 +020025 (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
Chenbo Feng6e71b042017-10-18 13:00:22 -070026
Alexei Starovoitova10423b2016-02-01 22:39:54 -080027static void bpf_array_free_percpu(struct bpf_array *array)
28{
29 int i;
30
Eric Dumazet32fff232018-02-22 08:33:24 -080031 for (i = 0; i < array->map.max_entries; i++) {
Alexei Starovoitova10423b2016-02-01 22:39:54 -080032 free_percpu(array->pptrs[i]);
Eric Dumazet32fff232018-02-22 08:33:24 -080033 cond_resched();
34 }
Alexei Starovoitova10423b2016-02-01 22:39:54 -080035}
36
37static int bpf_array_alloc_percpu(struct bpf_array *array)
38{
39 void __percpu *ptr;
40 int i;
41
42 for (i = 0; i < array->map.max_entries; i++) {
43 ptr = __alloc_percpu_gfp(array->elem_size, 8,
44 GFP_USER | __GFP_NOWARN);
45 if (!ptr) {
46 bpf_array_free_percpu(array);
47 return -ENOMEM;
48 }
49 array->pptrs[i] = ptr;
Eric Dumazet32fff232018-02-22 08:33:24 -080050 cond_resched();
Alexei Starovoitova10423b2016-02-01 22:39:54 -080051 }
52
53 return 0;
54}
55
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080056/* Called from syscall */
Martin KaFai Lau5dc4c4b2018-08-08 01:01:24 -070057int array_map_alloc_check(union bpf_attr *attr)
Jakub Kicinskiad460612018-01-17 19:13:25 -080058{
59 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
60 int numa_node = bpf_map_attr_numa_node(attr);
61
62 /* check sanity of attributes */
63 if (attr->max_entries == 0 || attr->key_size != 4 ||
64 attr->value_size == 0 ||
65 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
Daniel Borkmann591fe982019-04-09 23:20:05 +020066 !bpf_map_flags_access_ok(attr->map_flags) ||
Jakub Kicinskiad460612018-01-17 19:13:25 -080067 (percpu && numa_node != NUMA_NO_NODE))
68 return -EINVAL;
69
70 if (attr->value_size > KMALLOC_MAX_SIZE)
71 /* if value_size is bigger, the user space won't be able to
72 * access the elements.
73 */
74 return -E2BIG;
75
76 return 0;
77}
78
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080079static struct bpf_map *array_map_alloc(union bpf_attr *attr)
80{
Alexei Starovoitova10423b2016-02-01 22:39:54 -080081 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
Daniel Borkmann9c2d63b2018-02-16 01:10:29 +010082 int ret, numa_node = bpf_map_attr_numa_node(attr);
Alexei Starovoitovb2157392018-01-07 17:33:02 -080083 u32 elem_size, index_mask, max_entries;
84 bool unpriv = !capable(CAP_SYS_ADMIN);
Daniel Borkmann9c2d63b2018-02-16 01:10:29 +010085 u64 cost, array_size, mask64;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080086 struct bpf_array *array;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080087
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080088 elem_size = round_up(attr->value_size, 8);
89
Alexei Starovoitovb2157392018-01-07 17:33:02 -080090 max_entries = attr->max_entries;
Alexei Starovoitovb2157392018-01-07 17:33:02 -080091
Daniel Borkmannbbeb6e42018-01-10 23:25:05 +010092 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
93 * upper most bit set in u32 space is undefined behavior due to
94 * resulting 1U << 32, so do it manually here in u64 space.
95 */
96 mask64 = fls_long(max_entries - 1);
97 mask64 = 1ULL << mask64;
98 mask64 -= 1;
99
100 index_mask = mask64;
101 if (unpriv) {
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800102 /* round up array size to nearest power of 2,
103 * since cpu will speculate within index_mask limits
104 */
105 max_entries = index_mask + 1;
Daniel Borkmannbbeb6e42018-01-10 23:25:05 +0100106 /* Check for overflows. */
107 if (max_entries < attr->max_entries)
108 return ERR_PTR(-E2BIG);
109 }
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800110
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800111 array_size = sizeof(*array);
112 if (percpu)
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800113 array_size += (u64) max_entries * sizeof(void *);
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800114 else
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800115 array_size += (u64) max_entries * elem_size;
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800116
117 /* make sure there is no u32 overflow later in round_up() */
Daniel Borkmann9c2d63b2018-02-16 01:10:29 +0100118 cost = array_size;
119 if (cost >= U32_MAX - PAGE_SIZE)
Alexei Starovoitovdaaf4272014-11-18 17:32:16 -0800120 return ERR_PTR(-ENOMEM);
Daniel Borkmann9c2d63b2018-02-16 01:10:29 +0100121 if (percpu) {
122 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
123 if (cost >= U32_MAX - PAGE_SIZE)
124 return ERR_PTR(-ENOMEM);
125 }
126 cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
127
128 ret = bpf_map_precharge_memlock(cost);
129 if (ret < 0)
130 return ERR_PTR(ret);
Alexei Starovoitovdaaf4272014-11-18 17:32:16 -0800131
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800132 /* allocate all map elements and zero-initialize them */
Martin KaFai Lau96eabe72017-08-18 11:28:00 -0700133 array = bpf_map_area_alloc(array_size, numa_node);
Daniel Borkmannd407bd22017-01-18 15:14:17 +0100134 if (!array)
135 return ERR_PTR(-ENOMEM);
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800136 array->index_mask = index_mask;
137 array->map.unpriv_array = unpriv;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800138
139 /* copy mandatory map attributes */
Jakub Kicinski32852642018-01-17 19:13:26 -0800140 bpf_map_init_from_attr(&array->map, attr);
Daniel Borkmann9c2d63b2018-02-16 01:10:29 +0100141 array->map.pages = cost;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800142 array->elem_size = elem_size;
143
Daniel Borkmann9c2d63b2018-02-16 01:10:29 +0100144 if (percpu && bpf_array_alloc_percpu(array)) {
Daniel Borkmannd407bd22017-01-18 15:14:17 +0100145 bpf_map_area_free(array);
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800146 return ERR_PTR(-ENOMEM);
147 }
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800148
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800149 return &array->map;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800150}
151
152/* Called from syscall or from eBPF program */
153static void *array_map_lookup_elem(struct bpf_map *map, void *key)
154{
155 struct bpf_array *array = container_of(map, struct bpf_array, map);
156 u32 index = *(u32 *)key;
157
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800158 if (unlikely(index >= array->map.max_entries))
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800159 return NULL;
160
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800161 return array->value + array->elem_size * (index & array->index_mask);
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800162}
163
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +0200164static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
165 u32 off)
166{
167 struct bpf_array *array = container_of(map, struct bpf_array, map);
168
169 if (map->max_entries != 1)
170 return -ENOTSUPP;
171 if (off >= map->value_size)
172 return -EINVAL;
173
174 *imm = (unsigned long)array->value;
175 return 0;
176}
177
178static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
179 u32 *off)
180{
181 struct bpf_array *array = container_of(map, struct bpf_array, map);
182 u64 base = (unsigned long)array->value;
183 u64 range = array->elem_size;
184
185 if (map->max_entries != 1)
186 return -ENOTSUPP;
187 if (imm < base || imm >= base + range)
188 return -ENOENT;
189
190 *off = imm - base;
191 return 0;
192}
193
Alexei Starovoitov81ed18a2017-03-15 18:26:42 -0700194/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
195static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
196{
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800197 struct bpf_array *array = container_of(map, struct bpf_array, map);
Alexei Starovoitov81ed18a2017-03-15 18:26:42 -0700198 struct bpf_insn *insn = insn_buf;
Martin KaFai Laufad73a12017-03-22 10:00:32 -0700199 u32 elem_size = round_up(map->value_size, 8);
Alexei Starovoitov81ed18a2017-03-15 18:26:42 -0700200 const int ret = BPF_REG_0;
201 const int map_ptr = BPF_REG_1;
202 const int index = BPF_REG_2;
203
204 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
205 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800206 if (map->unpriv_array) {
207 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
208 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
209 } else {
210 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
211 }
Martin KaFai Laufad73a12017-03-22 10:00:32 -0700212
213 if (is_power_of_2(elem_size)) {
Alexei Starovoitov81ed18a2017-03-15 18:26:42 -0700214 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
215 } else {
216 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
217 }
218 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
219 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
220 *insn++ = BPF_MOV64_IMM(ret, 0);
221 return insn - insn_buf;
222}
223
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800224/* Called from eBPF program */
225static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
226{
227 struct bpf_array *array = container_of(map, struct bpf_array, map);
228 u32 index = *(u32 *)key;
229
230 if (unlikely(index >= array->map.max_entries))
231 return NULL;
232
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800233 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800234}
235
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800236int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
237{
238 struct bpf_array *array = container_of(map, struct bpf_array, map);
239 u32 index = *(u32 *)key;
240 void __percpu *pptr;
241 int cpu, off = 0;
242 u32 size;
243
244 if (unlikely(index >= array->map.max_entries))
245 return -ENOENT;
246
247 /* per_cpu areas are zero-filled and bpf programs can only
248 * access 'value_size' of them, so copying rounded areas
249 * will not leak any kernel data
250 */
251 size = round_up(map->value_size, 8);
252 rcu_read_lock();
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800253 pptr = array->pptrs[index & array->index_mask];
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800254 for_each_possible_cpu(cpu) {
255 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
256 off += size;
257 }
258 rcu_read_unlock();
259 return 0;
260}
261
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800262/* Called from syscall */
263static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
264{
265 struct bpf_array *array = container_of(map, struct bpf_array, map);
Teng Qin8fe45922017-04-24 19:00:37 -0700266 u32 index = key ? *(u32 *)key : U32_MAX;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800267 u32 *next = (u32 *)next_key;
268
269 if (index >= array->map.max_entries) {
270 *next = 0;
271 return 0;
272 }
273
274 if (index == array->map.max_entries - 1)
275 return -ENOENT;
276
277 *next = index + 1;
278 return 0;
279}
280
281/* Called from syscall or from eBPF program */
282static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
283 u64 map_flags)
284{
285 struct bpf_array *array = container_of(map, struct bpf_array, map);
286 u32 index = *(u32 *)key;
Alexei Starovoitov96049f32019-01-31 15:40:09 -0800287 char *val;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800288
Alexei Starovoitov96049f32019-01-31 15:40:09 -0800289 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800290 /* unknown flags */
291 return -EINVAL;
292
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800293 if (unlikely(index >= array->map.max_entries))
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800294 /* all elements were pre-allocated, cannot insert a new one */
295 return -E2BIG;
296
Alexei Starovoitov96049f32019-01-31 15:40:09 -0800297 if (unlikely(map_flags & BPF_NOEXIST))
Alexei Starovoitovdaaf4272014-11-18 17:32:16 -0800298 /* all elements already exist */
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800299 return -EEXIST;
300
Alexei Starovoitov96049f32019-01-31 15:40:09 -0800301 if (unlikely((map_flags & BPF_F_LOCK) &&
302 !map_value_has_spin_lock(map)))
303 return -EINVAL;
304
305 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800306 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800307 value, map->value_size);
Alexei Starovoitov96049f32019-01-31 15:40:09 -0800308 } else {
309 val = array->value +
310 array->elem_size * (index & array->index_mask);
311 if (map_flags & BPF_F_LOCK)
312 copy_map_value_locked(map, val, value, false);
313 else
314 copy_map_value(map, val, value);
315 }
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800316 return 0;
317}
318
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800319int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
320 u64 map_flags)
321{
322 struct bpf_array *array = container_of(map, struct bpf_array, map);
323 u32 index = *(u32 *)key;
324 void __percpu *pptr;
325 int cpu, off = 0;
326 u32 size;
327
328 if (unlikely(map_flags > BPF_EXIST))
329 /* unknown flags */
330 return -EINVAL;
331
332 if (unlikely(index >= array->map.max_entries))
333 /* all elements were pre-allocated, cannot insert a new one */
334 return -E2BIG;
335
336 if (unlikely(map_flags == BPF_NOEXIST))
337 /* all elements already exist */
338 return -EEXIST;
339
340 /* the user space will provide round_up(value_size, 8) bytes that
341 * will be copied into per-cpu area. bpf programs can only access
342 * value_size of it. During lookup the same extra bytes will be
343 * returned or zeros which were zero-filled by percpu_alloc,
344 * so no kernel data leaks possible
345 */
346 size = round_up(map->value_size, 8);
347 rcu_read_lock();
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800348 pptr = array->pptrs[index & array->index_mask];
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800349 for_each_possible_cpu(cpu) {
350 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
351 off += size;
352 }
353 rcu_read_unlock();
354 return 0;
355}
356
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800357/* Called from syscall or from eBPF program */
358static int array_map_delete_elem(struct bpf_map *map, void *key)
359{
360 return -EINVAL;
361}
362
363/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
364static void array_map_free(struct bpf_map *map)
365{
366 struct bpf_array *array = container_of(map, struct bpf_array, map);
367
368 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
369 * so the programs (can be more than one that used this map) were
370 * disconnected from events. Wait for outstanding programs to complete
371 * and free the array
372 */
373 synchronize_rcu();
374
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800375 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
376 bpf_array_free_percpu(array);
377
Daniel Borkmannd407bd22017-01-18 15:14:17 +0100378 bpf_map_area_free(array);
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800379}
380
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700381static void array_map_seq_show_elem(struct bpf_map *map, void *key,
382 struct seq_file *m)
383{
384 void *value;
385
386 rcu_read_lock();
387
388 value = array_map_lookup_elem(map, key);
389 if (!value) {
390 rcu_read_unlock();
391 return;
392 }
393
Daniel Borkmann2824ecb2019-04-09 23:20:10 +0200394 if (map->btf_key_type_id)
395 seq_printf(m, "%u: ", *(u32 *)key);
Martin KaFai Lau9b2cf322018-05-22 14:57:21 -0700396 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700397 seq_puts(m, "\n");
398
399 rcu_read_unlock();
400}
401
Yonghong Songc7b27c32018-08-29 14:43:13 -0700402static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
403 struct seq_file *m)
404{
405 struct bpf_array *array = container_of(map, struct bpf_array, map);
406 u32 index = *(u32 *)key;
407 void __percpu *pptr;
408 int cpu;
409
410 rcu_read_lock();
411
412 seq_printf(m, "%u: {\n", *(u32 *)key);
413 pptr = array->pptrs[index & array->index_mask];
414 for_each_possible_cpu(cpu) {
415 seq_printf(m, "\tcpu%d: ", cpu);
416 btf_type_seq_show(map->btf, map->btf_value_type_id,
417 per_cpu_ptr(pptr, cpu), m);
418 seq_puts(m, "\n");
419 }
420 seq_puts(m, "}\n");
421
422 rcu_read_unlock();
423}
424
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200425static int array_map_check_btf(const struct bpf_map *map,
Roman Gushchin1b2b2342018-12-10 15:43:00 -0800426 const struct btf *btf,
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200427 const struct btf_type *key_type,
428 const struct btf_type *value_type)
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700429{
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700430 u32 int_data;
431
Daniel Borkmann2824ecb2019-04-09 23:20:10 +0200432 /* One exception for keyless BTF: .bss/.data/.rodata map */
433 if (btf_type_is_void(key_type)) {
434 if (map->map_type != BPF_MAP_TYPE_ARRAY ||
435 map->max_entries != 1)
436 return -EINVAL;
437
438 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
439 return -EINVAL;
440
441 return 0;
442 }
443
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200444 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700445 return -EINVAL;
446
447 int_data = *(u32 *)(key_type + 1);
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200448 /* bpf array can only take a u32 key. This check makes sure
449 * that the btf matches the attr used during map_create.
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700450 */
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200451 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700452 return -EINVAL;
453
454 return 0;
455}
456
Johannes Berg40077e02017-04-11 15:34:58 +0200457const struct bpf_map_ops array_map_ops = {
Jakub Kicinskiad460612018-01-17 19:13:25 -0800458 .map_alloc_check = array_map_alloc_check,
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800459 .map_alloc = array_map_alloc,
460 .map_free = array_map_free,
461 .map_get_next_key = array_map_get_next_key,
462 .map_lookup_elem = array_map_lookup_elem,
463 .map_update_elem = array_map_update_elem,
464 .map_delete_elem = array_map_delete_elem,
Alexei Starovoitov81ed18a2017-03-15 18:26:42 -0700465 .map_gen_lookup = array_map_gen_lookup,
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +0200466 .map_direct_value_addr = array_map_direct_value_addr,
467 .map_direct_value_meta = array_map_direct_value_meta,
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700468 .map_seq_show_elem = array_map_seq_show_elem,
469 .map_check_btf = array_map_check_btf,
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800470};
471
Johannes Berg40077e02017-04-11 15:34:58 +0200472const struct bpf_map_ops percpu_array_map_ops = {
Jakub Kicinskiad460612018-01-17 19:13:25 -0800473 .map_alloc_check = array_map_alloc_check,
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800474 .map_alloc = array_map_alloc,
475 .map_free = array_map_free,
476 .map_get_next_key = array_map_get_next_key,
477 .map_lookup_elem = percpu_array_map_lookup_elem,
478 .map_update_elem = array_map_update_elem,
479 .map_delete_elem = array_map_delete_elem,
Yonghong Songc7b27c32018-08-29 14:43:13 -0700480 .map_seq_show_elem = percpu_array_map_seq_show_elem,
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200481 .map_check_btf = array_map_check_btf,
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800482};
483
Jakub Kicinskiad460612018-01-17 19:13:25 -0800484static int fd_array_map_alloc_check(union bpf_attr *attr)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700485{
Wang Nan2a36f0b2015-08-06 07:02:33 +0000486 /* only file descriptors can be stored in this type of map */
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700487 if (attr->value_size != sizeof(u32))
Jakub Kicinskiad460612018-01-17 19:13:25 -0800488 return -EINVAL;
Daniel Borkmann591fe982019-04-09 23:20:05 +0200489 /* Program read-only/write-only not supported for special maps yet. */
490 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
491 return -EINVAL;
Jakub Kicinskiad460612018-01-17 19:13:25 -0800492 return array_map_alloc_check(attr);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700493}
494
Wang Nan2a36f0b2015-08-06 07:02:33 +0000495static void fd_array_map_free(struct bpf_map *map)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700496{
497 struct bpf_array *array = container_of(map, struct bpf_array, map);
498 int i;
499
500 synchronize_rcu();
501
502 /* make sure it's empty */
503 for (i = 0; i < array->map.max_entries; i++)
Wang Nan2a36f0b2015-08-06 07:02:33 +0000504 BUG_ON(array->ptrs[i] != NULL);
Daniel Borkmannd407bd22017-01-18 15:14:17 +0100505
506 bpf_map_area_free(array);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700507}
508
Wang Nan2a36f0b2015-08-06 07:02:33 +0000509static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700510{
Prashant Bhole3b4a63f2018-10-09 10:04:50 +0900511 return ERR_PTR(-EOPNOTSUPP);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700512}
513
514/* only called from syscall */
Martin KaFai Lau14dc6f02017-06-27 23:08:34 -0700515int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
516{
517 void **elem, *ptr;
518 int ret = 0;
519
520 if (!map->ops->map_fd_sys_lookup_elem)
521 return -ENOTSUPP;
522
523 rcu_read_lock();
524 elem = array_map_lookup_elem(map, key);
525 if (elem && (ptr = READ_ONCE(*elem)))
526 *value = map->ops->map_fd_sys_lookup_elem(ptr);
527 else
528 ret = -ENOENT;
529 rcu_read_unlock();
530
531 return ret;
532}
533
534/* only called from syscall */
Daniel Borkmannd056a782016-06-15 22:47:13 +0200535int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
536 void *key, void *value, u64 map_flags)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700537{
538 struct bpf_array *array = container_of(map, struct bpf_array, map);
Wang Nan2a36f0b2015-08-06 07:02:33 +0000539 void *new_ptr, *old_ptr;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700540 u32 index = *(u32 *)key, ufd;
541
542 if (map_flags != BPF_ANY)
543 return -EINVAL;
544
545 if (index >= array->map.max_entries)
546 return -E2BIG;
547
548 ufd = *(u32 *)value;
Daniel Borkmannd056a782016-06-15 22:47:13 +0200549 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
Wang Nan2a36f0b2015-08-06 07:02:33 +0000550 if (IS_ERR(new_ptr))
551 return PTR_ERR(new_ptr);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700552
Wang Nan2a36f0b2015-08-06 07:02:33 +0000553 old_ptr = xchg(array->ptrs + index, new_ptr);
554 if (old_ptr)
555 map->ops->map_fd_put_ptr(old_ptr);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700556
557 return 0;
558}
559
Wang Nan2a36f0b2015-08-06 07:02:33 +0000560static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700561{
562 struct bpf_array *array = container_of(map, struct bpf_array, map);
Wang Nan2a36f0b2015-08-06 07:02:33 +0000563 void *old_ptr;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700564 u32 index = *(u32 *)key;
565
566 if (index >= array->map.max_entries)
567 return -E2BIG;
568
Wang Nan2a36f0b2015-08-06 07:02:33 +0000569 old_ptr = xchg(array->ptrs + index, NULL);
570 if (old_ptr) {
571 map->ops->map_fd_put_ptr(old_ptr);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700572 return 0;
573 } else {
574 return -ENOENT;
575 }
576}
577
Daniel Borkmannd056a782016-06-15 22:47:13 +0200578static void *prog_fd_array_get_ptr(struct bpf_map *map,
579 struct file *map_file, int fd)
Wang Nan2a36f0b2015-08-06 07:02:33 +0000580{
581 struct bpf_array *array = container_of(map, struct bpf_array, map);
582 struct bpf_prog *prog = bpf_prog_get(fd);
Daniel Borkmannd056a782016-06-15 22:47:13 +0200583
Wang Nan2a36f0b2015-08-06 07:02:33 +0000584 if (IS_ERR(prog))
585 return prog;
586
587 if (!bpf_prog_array_compatible(array, prog)) {
588 bpf_prog_put(prog);
589 return ERR_PTR(-EINVAL);
590 }
Daniel Borkmannd056a782016-06-15 22:47:13 +0200591
Wang Nan2a36f0b2015-08-06 07:02:33 +0000592 return prog;
593}
594
595static void prog_fd_array_put_ptr(void *ptr)
596{
Daniel Borkmann1aacde32016-06-30 17:24:43 +0200597 bpf_prog_put(ptr);
Wang Nan2a36f0b2015-08-06 07:02:33 +0000598}
599
Martin KaFai Lau14dc6f02017-06-27 23:08:34 -0700600static u32 prog_fd_array_sys_lookup_elem(void *ptr)
601{
602 return ((struct bpf_prog *)ptr)->aux->id;
603}
604
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700605/* decrement refcnt of all bpf_progs that are stored in this map */
John Fastabendba6b8de2018-04-23 15:39:23 -0700606static void bpf_fd_array_map_clear(struct bpf_map *map)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700607{
608 struct bpf_array *array = container_of(map, struct bpf_array, map);
609 int i;
610
611 for (i = 0; i < array->map.max_entries; i++)
Wang Nan2a36f0b2015-08-06 07:02:33 +0000612 fd_array_map_delete_elem(map, &i);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700613}
614
Yonghong Songa7c19db2018-09-06 17:26:04 -0700615static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
616 struct seq_file *m)
617{
618 void **elem, *ptr;
619 u32 prog_id;
620
621 rcu_read_lock();
622
623 elem = array_map_lookup_elem(map, key);
624 if (elem) {
625 ptr = READ_ONCE(*elem);
626 if (ptr) {
627 seq_printf(m, "%u: ", *(u32 *)key);
628 prog_id = prog_fd_array_sys_lookup_elem(ptr);
629 btf_type_seq_show(map->btf, map->btf_value_type_id,
630 &prog_id, m);
631 seq_puts(m, "\n");
632 }
633 }
634
635 rcu_read_unlock();
636}
637
Johannes Berg40077e02017-04-11 15:34:58 +0200638const struct bpf_map_ops prog_array_map_ops = {
Jakub Kicinskiad460612018-01-17 19:13:25 -0800639 .map_alloc_check = fd_array_map_alloc_check,
640 .map_alloc = array_map_alloc,
Wang Nan2a36f0b2015-08-06 07:02:33 +0000641 .map_free = fd_array_map_free,
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700642 .map_get_next_key = array_map_get_next_key,
Wang Nan2a36f0b2015-08-06 07:02:33 +0000643 .map_lookup_elem = fd_array_map_lookup_elem,
Wang Nan2a36f0b2015-08-06 07:02:33 +0000644 .map_delete_elem = fd_array_map_delete_elem,
645 .map_fd_get_ptr = prog_fd_array_get_ptr,
646 .map_fd_put_ptr = prog_fd_array_put_ptr,
Martin KaFai Lau14dc6f02017-06-27 23:08:34 -0700647 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
John Fastabendba6b8de2018-04-23 15:39:23 -0700648 .map_release_uref = bpf_fd_array_map_clear,
Yonghong Songa7c19db2018-09-06 17:26:04 -0700649 .map_seq_show_elem = prog_array_map_seq_show_elem,
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700650};
651
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200652static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
653 struct file *map_file)
Kaixu Xiaea317b22015-08-06 07:02:34 +0000654{
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200655 struct bpf_event_entry *ee;
656
Daniel Borkmann858d68f2016-07-16 01:15:55 +0200657 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200658 if (ee) {
659 ee->event = perf_file->private_data;
660 ee->perf_file = perf_file;
661 ee->map_file = map_file;
662 }
663
664 return ee;
665}
666
667static void __bpf_event_entry_free(struct rcu_head *rcu)
668{
669 struct bpf_event_entry *ee;
670
671 ee = container_of(rcu, struct bpf_event_entry, rcu);
672 fput(ee->perf_file);
673 kfree(ee);
674}
675
676static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
677{
678 call_rcu(&ee->rcu, __bpf_event_entry_free);
Kaixu Xiaea317b22015-08-06 07:02:34 +0000679}
680
Daniel Borkmannd056a782016-06-15 22:47:13 +0200681static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
682 struct file *map_file, int fd)
Kaixu Xiaea317b22015-08-06 07:02:34 +0000683{
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200684 struct bpf_event_entry *ee;
685 struct perf_event *event;
686 struct file *perf_file;
Alexei Starovoitovf91840a2017-06-02 21:03:52 -0700687 u64 value;
Kaixu Xiaea317b22015-08-06 07:02:34 +0000688
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200689 perf_file = perf_event_get(fd);
690 if (IS_ERR(perf_file))
691 return perf_file;
Alexei Starovoitove03e7ee2016-01-25 20:59:49 -0800692
Alexei Starovoitovf91840a2017-06-02 21:03:52 -0700693 ee = ERR_PTR(-EOPNOTSUPP);
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200694 event = perf_file->private_data;
Yonghong Song97562632017-10-05 09:19:19 -0700695 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200696 goto err_out;
Kaixu Xiaea317b22015-08-06 07:02:34 +0000697
Alexei Starovoitovf91840a2017-06-02 21:03:52 -0700698 ee = bpf_event_entry_gen(perf_file, map_file);
699 if (ee)
700 return ee;
701 ee = ERR_PTR(-ENOMEM);
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200702err_out:
703 fput(perf_file);
704 return ee;
Kaixu Xiaea317b22015-08-06 07:02:34 +0000705}
706
707static void perf_event_fd_array_put_ptr(void *ptr)
708{
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200709 bpf_event_entry_free_rcu(ptr);
710}
711
712static void perf_event_fd_array_release(struct bpf_map *map,
713 struct file *map_file)
714{
715 struct bpf_array *array = container_of(map, struct bpf_array, map);
716 struct bpf_event_entry *ee;
717 int i;
718
719 rcu_read_lock();
720 for (i = 0; i < array->map.max_entries; i++) {
721 ee = READ_ONCE(array->ptrs[i]);
722 if (ee && ee->map_file == map_file)
723 fd_array_map_delete_elem(map, &i);
724 }
725 rcu_read_unlock();
Kaixu Xiaea317b22015-08-06 07:02:34 +0000726}
727
Johannes Berg40077e02017-04-11 15:34:58 +0200728const struct bpf_map_ops perf_event_array_map_ops = {
Jakub Kicinskiad460612018-01-17 19:13:25 -0800729 .map_alloc_check = fd_array_map_alloc_check,
730 .map_alloc = array_map_alloc,
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200731 .map_free = fd_array_map_free,
Kaixu Xiaea317b22015-08-06 07:02:34 +0000732 .map_get_next_key = array_map_get_next_key,
733 .map_lookup_elem = fd_array_map_lookup_elem,
Kaixu Xiaea317b22015-08-06 07:02:34 +0000734 .map_delete_elem = fd_array_map_delete_elem,
735 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
736 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200737 .map_release = perf_event_fd_array_release,
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200738 .map_check_btf = map_check_no_btf,
Kaixu Xiaea317b22015-08-06 07:02:34 +0000739};
740
Sargun Dhillon60d20f92016-08-12 08:56:52 -0700741#ifdef CONFIG_CGROUPS
Martin KaFai Lau4ed8ec52016-06-30 10:28:43 -0700742static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
743 struct file *map_file /* not used */,
744 int fd)
745{
746 return cgroup_get_from_fd(fd);
747}
748
749static void cgroup_fd_array_put_ptr(void *ptr)
750{
751 /* cgroup_put free cgrp after a rcu grace period */
752 cgroup_put(ptr);
753}
754
755static void cgroup_fd_array_free(struct bpf_map *map)
756{
757 bpf_fd_array_map_clear(map);
758 fd_array_map_free(map);
759}
760
Johannes Berg40077e02017-04-11 15:34:58 +0200761const struct bpf_map_ops cgroup_array_map_ops = {
Jakub Kicinskiad460612018-01-17 19:13:25 -0800762 .map_alloc_check = fd_array_map_alloc_check,
763 .map_alloc = array_map_alloc,
Martin KaFai Lau4ed8ec52016-06-30 10:28:43 -0700764 .map_free = cgroup_fd_array_free,
765 .map_get_next_key = array_map_get_next_key,
766 .map_lookup_elem = fd_array_map_lookup_elem,
767 .map_delete_elem = fd_array_map_delete_elem,
768 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
769 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200770 .map_check_btf = map_check_no_btf,
Martin KaFai Lau4ed8ec52016-06-30 10:28:43 -0700771};
Martin KaFai Lau4ed8ec52016-06-30 10:28:43 -0700772#endif
Martin KaFai Lau56f668d2017-03-22 10:00:33 -0700773
774static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
775{
776 struct bpf_map *map, *inner_map_meta;
777
778 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
779 if (IS_ERR(inner_map_meta))
780 return inner_map_meta;
781
Jakub Kicinskiad460612018-01-17 19:13:25 -0800782 map = array_map_alloc(attr);
Martin KaFai Lau56f668d2017-03-22 10:00:33 -0700783 if (IS_ERR(map)) {
784 bpf_map_meta_free(inner_map_meta);
785 return map;
786 }
787
788 map->inner_map_meta = inner_map_meta;
789
790 return map;
791}
792
793static void array_of_map_free(struct bpf_map *map)
794{
795 /* map->inner_map_meta is only accessed by syscall which
796 * is protected by fdget/fdput.
797 */
798 bpf_map_meta_free(map->inner_map_meta);
799 bpf_fd_array_map_clear(map);
800 fd_array_map_free(map);
801}
802
803static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
804{
805 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
806
807 if (!inner_map)
808 return NULL;
809
810 return READ_ONCE(*inner_map);
811}
812
Daniel Borkmann7b0c2a02017-08-19 03:12:46 +0200813static u32 array_of_map_gen_lookup(struct bpf_map *map,
814 struct bpf_insn *insn_buf)
815{
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800816 struct bpf_array *array = container_of(map, struct bpf_array, map);
Daniel Borkmann7b0c2a02017-08-19 03:12:46 +0200817 u32 elem_size = round_up(map->value_size, 8);
818 struct bpf_insn *insn = insn_buf;
819 const int ret = BPF_REG_0;
820 const int map_ptr = BPF_REG_1;
821 const int index = BPF_REG_2;
822
823 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
824 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800825 if (map->unpriv_array) {
826 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
827 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
828 } else {
829 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
830 }
Daniel Borkmann7b0c2a02017-08-19 03:12:46 +0200831 if (is_power_of_2(elem_size))
832 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
833 else
834 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
835 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
836 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
837 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
838 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
839 *insn++ = BPF_MOV64_IMM(ret, 0);
840
841 return insn - insn_buf;
842}
843
Johannes Berg40077e02017-04-11 15:34:58 +0200844const struct bpf_map_ops array_of_maps_map_ops = {
Jakub Kicinskiad460612018-01-17 19:13:25 -0800845 .map_alloc_check = fd_array_map_alloc_check,
Martin KaFai Lau56f668d2017-03-22 10:00:33 -0700846 .map_alloc = array_of_map_alloc,
847 .map_free = array_of_map_free,
848 .map_get_next_key = array_map_get_next_key,
849 .map_lookup_elem = array_of_map_lookup_elem,
850 .map_delete_elem = fd_array_map_delete_elem,
851 .map_fd_get_ptr = bpf_map_fd_get_ptr,
852 .map_fd_put_ptr = bpf_map_fd_put_ptr,
Martin KaFai Lau14dc6f02017-06-27 23:08:34 -0700853 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
Daniel Borkmann7b0c2a02017-08-19 03:12:46 +0200854 .map_gen_lookup = array_of_map_gen_lookup,
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200855 .map_check_btf = map_check_no_btf,
Martin KaFai Lau56f668d2017-03-22 10:00:33 -0700856};