blob: 6ef6746a7871b32829158d6833a464cc2efa6195 [file] [log] [blame]
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
6 *
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
Alexei Starovoitov4df95ff2014-07-30 20:34:14 -070021 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070022 */
Daniel Borkmann738cbe72014-09-08 08:04:47 +020023
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070024#include <linux/filter.h>
25#include <linux/skbuff.h>
Daniel Borkmann60a3b222014-09-02 22:53:44 +020026#include <linux/vmalloc.h>
Daniel Borkmann738cbe72014-09-08 08:04:47 +020027#include <linux/random.h>
28#include <linux/moduleloader.h>
Alexei Starovoitov09756af2014-09-26 00:17:00 -070029#include <linux/bpf.h>
Josh Poimboeuf39853cc2016-02-28 22:22:37 -060030#include <linux/frame.h>
Daniel Borkmann74451e662017-02-16 22:24:50 +010031#include <linux/rbtree_latch.h>
32#include <linux/kallsyms.h>
33#include <linux/rcupdate.h>
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070034
Daniel Borkmann3324b582015-05-29 23:23:07 +020035#include <asm/unaligned.h>
36
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070037/* Registers */
38#define BPF_R0 regs[BPF_REG_0]
39#define BPF_R1 regs[BPF_REG_1]
40#define BPF_R2 regs[BPF_REG_2]
41#define BPF_R3 regs[BPF_REG_3]
42#define BPF_R4 regs[BPF_REG_4]
43#define BPF_R5 regs[BPF_REG_5]
44#define BPF_R6 regs[BPF_REG_6]
45#define BPF_R7 regs[BPF_REG_7]
46#define BPF_R8 regs[BPF_REG_8]
47#define BPF_R9 regs[BPF_REG_9]
48#define BPF_R10 regs[BPF_REG_10]
49
50/* Named registers */
51#define DST regs[insn->dst_reg]
52#define SRC regs[insn->src_reg]
53#define FP regs[BPF_REG_FP]
54#define ARG1 regs[BPF_REG_ARG1]
55#define CTX regs[BPF_REG_CTX]
56#define IMM insn->imm
57
58/* No hurry in this branch
59 *
60 * Exported for the bpf jit load helper.
61 */
62void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
63{
64 u8 *ptr = NULL;
65
66 if (k >= SKF_NET_OFF)
67 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
68 else if (k >= SKF_LL_OFF)
69 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
Daniel Borkmann3324b582015-05-29 23:23:07 +020070
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070071 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
72 return ptr;
73
74 return NULL;
75}
76
Daniel Borkmann60a3b222014-09-02 22:53:44 +020077struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
78{
Michal Hocko19809c22017-05-08 15:57:44 -070079 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
Alexei Starovoitov09756af2014-09-26 00:17:00 -070080 struct bpf_prog_aux *aux;
Daniel Borkmann60a3b222014-09-02 22:53:44 +020081 struct bpf_prog *fp;
82
83 size = round_up(size, PAGE_SIZE);
84 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
85 if (fp == NULL)
86 return NULL;
87
Alexei Starovoitov09756af2014-09-26 00:17:00 -070088 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
89 if (aux == NULL) {
Daniel Borkmann60a3b222014-09-02 22:53:44 +020090 vfree(fp);
91 return NULL;
92 }
93
94 fp->pages = size / PAGE_SIZE;
Alexei Starovoitov09756af2014-09-26 00:17:00 -070095 fp->aux = aux;
Daniel Borkmanne9d8afa2015-10-29 14:58:08 +010096 fp->aux->prog = fp;
Alexei Starovoitov60b58afc2017-12-14 17:55:14 -080097 fp->jit_requested = ebpf_jit_enabled();
Daniel Borkmann60a3b222014-09-02 22:53:44 +020098
Daniel Borkmann74451e662017-02-16 22:24:50 +010099 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
100
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200101 return fp;
102}
103EXPORT_SYMBOL_GPL(bpf_prog_alloc);
104
105struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
106 gfp_t gfp_extra_flags)
107{
Michal Hocko19809c22017-05-08 15:57:44 -0700108 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200109 struct bpf_prog *fp;
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100110 u32 pages, delta;
111 int ret;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200112
113 BUG_ON(fp_old == NULL);
114
115 size = round_up(size, PAGE_SIZE);
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100116 pages = size / PAGE_SIZE;
117 if (pages <= fp_old->pages)
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200118 return fp_old;
119
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100120 delta = pages - fp_old->pages;
121 ret = __bpf_prog_charge(fp_old->aux->user, delta);
122 if (ret)
123 return NULL;
124
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200125 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100126 if (fp == NULL) {
127 __bpf_prog_uncharge(fp_old->aux->user, delta);
128 } else {
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200129 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100130 fp->pages = pages;
Daniel Borkmanne9d8afa2015-10-29 14:58:08 +0100131 fp->aux->prog = fp;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200132
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700133 /* We keep fp->aux from fp_old around in the new
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200134 * reallocated structure.
135 */
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700136 fp_old->aux = NULL;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200137 __bpf_prog_free(fp_old);
138 }
139
140 return fp;
141}
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200142
143void __bpf_prog_free(struct bpf_prog *fp)
144{
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700145 kfree(fp->aux);
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200146 vfree(fp);
147}
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200148
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100149int bpf_prog_calc_tag(struct bpf_prog *fp)
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100150{
151 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100152 u32 raw_size = bpf_prog_tag_scratch_size(fp);
153 u32 digest[SHA_DIGEST_WORDS];
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100154 u32 ws[SHA_WORKSPACE_WORDS];
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100155 u32 i, bsize, psize, blocks;
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100156 struct bpf_insn *dst;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100157 bool was_ld_map;
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100158 u8 *raw, *todo;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100159 __be32 *result;
160 __be64 *bits;
161
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100162 raw = vmalloc(raw_size);
163 if (!raw)
164 return -ENOMEM;
165
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100166 sha_init(digest);
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100167 memset(ws, 0, sizeof(ws));
168
169 /* We need to take out the map fd for the digest calculation
170 * since they are unstable from user space side.
171 */
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100172 dst = (void *)raw;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100173 for (i = 0, was_ld_map = false; i < fp->len; i++) {
174 dst[i] = fp->insnsi[i];
175 if (!was_ld_map &&
176 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
177 dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
178 was_ld_map = true;
179 dst[i].imm = 0;
180 } else if (was_ld_map &&
181 dst[i].code == 0 &&
182 dst[i].dst_reg == 0 &&
183 dst[i].src_reg == 0 &&
184 dst[i].off == 0) {
185 was_ld_map = false;
186 dst[i].imm = 0;
187 } else {
188 was_ld_map = false;
189 }
190 }
191
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100192 psize = bpf_prog_insn_size(fp);
193 memset(&raw[psize], 0, raw_size - psize);
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100194 raw[psize++] = 0x80;
195
196 bsize = round_up(psize, SHA_MESSAGE_BYTES);
197 blocks = bsize / SHA_MESSAGE_BYTES;
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100198 todo = raw;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100199 if (bsize - psize >= sizeof(__be64)) {
200 bits = (__be64 *)(todo + bsize - sizeof(__be64));
201 } else {
202 bits = (__be64 *)(todo + bsize + bits_offset);
203 blocks++;
204 }
205 *bits = cpu_to_be64((psize - 1) << 3);
206
207 while (blocks--) {
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100208 sha_transform(digest, todo, ws);
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100209 todo += SHA_MESSAGE_BYTES;
210 }
211
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100212 result = (__force __be32 *)digest;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100213 for (i = 0; i < SHA_DIGEST_WORDS; i++)
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100214 result[i] = cpu_to_be32(digest[i]);
215 memcpy(fp->tag, result, sizeof(fp->tag));
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100216
217 vfree(raw);
218 return 0;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100219}
220
Daniel Borkmann050fad72018-05-17 01:44:11 +0200221static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
222 u32 curr, const bool probe_pass)
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200223{
Daniel Borkmann050fad72018-05-17 01:44:11 +0200224 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
225 s64 imm = insn->imm;
226
227 if (curr < pos && curr + imm + 1 > pos)
228 imm += delta;
229 else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
230 imm -= delta;
231 if (imm < imm_min || imm > imm_max)
232 return -ERANGE;
233 if (!probe_pass)
234 insn->imm = imm;
235 return 0;
236}
237
238static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
239 u32 curr, const bool probe_pass)
240{
241 const s32 off_min = S16_MIN, off_max = S16_MAX;
242 s32 off = insn->off;
243
244 if (curr < pos && curr + off + 1 > pos)
245 off += delta;
246 else if (curr > pos + delta && curr + off + 1 <= pos + delta)
247 off -= delta;
248 if (off < off_min || off > off_max)
249 return -ERANGE;
250 if (!probe_pass)
251 insn->off = off;
252 return 0;
253}
254
255static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
256 const bool probe_pass)
257{
258 u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200259 struct bpf_insn *insn = prog->insnsi;
Daniel Borkmann050fad72018-05-17 01:44:11 +0200260 int ret = 0;
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200261
262 for (i = 0; i < insn_cnt; i++, insn++) {
Daniel Borkmann050fad72018-05-17 01:44:11 +0200263 u8 code;
264
265 /* In the probing pass we still operate on the original,
266 * unpatched image in order to check overflows before we
267 * do any other adjustments. Therefore skip the patchlet.
268 */
269 if (probe_pass && i == pos) {
270 i += delta + 1;
271 insn++;
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -0800272 }
Daniel Borkmann050fad72018-05-17 01:44:11 +0200273 code = insn->code;
274 if (BPF_CLASS(code) != BPF_JMP ||
275 BPF_OP(code) == BPF_EXIT)
276 continue;
277 /* Adjust offset of jmps if we cross patch boundaries. */
278 if (BPF_OP(code) == BPF_CALL) {
279 if (insn->src_reg != BPF_PSEUDO_CALL)
280 continue;
281 ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
282 probe_pass);
283 } else {
284 ret = bpf_adj_delta_to_off(insn, pos, delta, i,
285 probe_pass);
286 }
287 if (ret)
288 break;
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200289 }
Daniel Borkmann050fad72018-05-17 01:44:11 +0200290
291 return ret;
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200292}
293
294struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
295 const struct bpf_insn *patch, u32 len)
296{
297 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
Daniel Borkmann050fad72018-05-17 01:44:11 +0200298 const u32 cnt_max = S16_MAX;
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200299 struct bpf_prog *prog_adj;
300
301 /* Since our patchlet doesn't expand the image, we're done. */
302 if (insn_delta == 0) {
303 memcpy(prog->insnsi + off, patch, sizeof(*patch));
304 return prog;
305 }
306
307 insn_adj_cnt = prog->len + insn_delta;
308
Daniel Borkmann050fad72018-05-17 01:44:11 +0200309 /* Reject anything that would potentially let the insn->off
310 * target overflow when we have excessive program expansions.
311 * We need to probe here before we do any reallocation where
312 * we afterwards may not fail anymore.
313 */
314 if (insn_adj_cnt > cnt_max &&
315 bpf_adj_branches(prog, off, insn_delta, true))
316 return NULL;
317
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200318 /* Several new instructions need to be inserted. Make room
319 * for them. Likely, there's no need for a new allocation as
320 * last page could have large enough tailroom.
321 */
322 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
323 GFP_USER);
324 if (!prog_adj)
325 return NULL;
326
327 prog_adj->len = insn_adj_cnt;
328
329 /* Patching happens in 3 steps:
330 *
331 * 1) Move over tail of insnsi from next instruction onwards,
332 * so we can patch the single target insn with one or more
333 * new ones (patching is always from 1 to n insns, n > 0).
334 * 2) Inject new instructions at the target location.
335 * 3) Adjust branch offsets if necessary.
336 */
337 insn_rest = insn_adj_cnt - off - len;
338
339 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
340 sizeof(*patch) * insn_rest);
341 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
342
Daniel Borkmann050fad72018-05-17 01:44:11 +0200343 /* We are guaranteed to not fail at this point, otherwise
344 * the ship has sailed to reverse to the original state. An
345 * overflow cannot happen at this point.
346 */
347 BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200348
349 return prog_adj;
350}
351
Daniel Borkmannb954d832014-09-10 15:01:02 +0200352#ifdef CONFIG_BPF_JIT
Daniel Borkmannfa9dd592018-01-20 01:24:33 +0100353/* All BPF JIT sysctl knobs here. */
354int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
355int bpf_jit_harden __read_mostly;
356int bpf_jit_kallsyms __read_mostly;
357
Daniel Borkmann74451e662017-02-16 22:24:50 +0100358static __always_inline void
359bpf_get_prog_addr_region(const struct bpf_prog *prog,
360 unsigned long *symbol_start,
361 unsigned long *symbol_end)
362{
363 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
364 unsigned long addr = (unsigned long)hdr;
365
366 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
367
368 *symbol_start = addr;
369 *symbol_end = addr + hdr->pages * PAGE_SIZE;
370}
371
372static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
373{
Martin KaFai Lau368211f2017-10-05 21:52:13 -0700374 const char *end = sym + KSYM_NAME_LEN;
375
Daniel Borkmann74451e662017-02-16 22:24:50 +0100376 BUILD_BUG_ON(sizeof("bpf_prog_") +
Martin KaFai Lau368211f2017-10-05 21:52:13 -0700377 sizeof(prog->tag) * 2 +
378 /* name has been null terminated.
379 * We should need +1 for the '_' preceding
380 * the name. However, the null character
381 * is double counted between the name and the
382 * sizeof("bpf_prog_") above, so we omit
383 * the +1 here.
384 */
385 sizeof(prog->aux->name) > KSYM_NAME_LEN);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100386
387 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
388 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
Martin KaFai Lau368211f2017-10-05 21:52:13 -0700389 if (prog->aux->name[0])
390 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
391 else
392 *sym = 0;
Daniel Borkmann74451e662017-02-16 22:24:50 +0100393}
394
395static __always_inline unsigned long
396bpf_get_prog_addr_start(struct latch_tree_node *n)
397{
398 unsigned long symbol_start, symbol_end;
399 const struct bpf_prog_aux *aux;
400
401 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
402 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
403
404 return symbol_start;
405}
406
407static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
408 struct latch_tree_node *b)
409{
410 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
411}
412
413static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
414{
415 unsigned long val = (unsigned long)key;
416 unsigned long symbol_start, symbol_end;
417 const struct bpf_prog_aux *aux;
418
419 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
420 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
421
422 if (val < symbol_start)
423 return -1;
424 if (val >= symbol_end)
425 return 1;
426
427 return 0;
428}
429
430static const struct latch_tree_ops bpf_tree_ops = {
431 .less = bpf_tree_less,
432 .comp = bpf_tree_comp,
433};
434
435static DEFINE_SPINLOCK(bpf_lock);
436static LIST_HEAD(bpf_kallsyms);
437static struct latch_tree_root bpf_tree __cacheline_aligned;
438
Daniel Borkmann74451e662017-02-16 22:24:50 +0100439static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
440{
441 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
442 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
443 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
444}
445
446static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
447{
448 if (list_empty(&aux->ksym_lnode))
449 return;
450
451 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
452 list_del_rcu(&aux->ksym_lnode);
453}
454
455static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
456{
457 return fp->jited && !bpf_prog_was_classic(fp);
458}
459
460static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
461{
462 return list_empty(&fp->aux->ksym_lnode) ||
463 fp->aux->ksym_lnode.prev == LIST_POISON2;
464}
465
466void bpf_prog_kallsyms_add(struct bpf_prog *fp)
467{
Daniel Borkmann74451e662017-02-16 22:24:50 +0100468 if (!bpf_prog_kallsyms_candidate(fp) ||
469 !capable(CAP_SYS_ADMIN))
470 return;
471
Hannes Frederic Sowad24f7c72017-04-27 01:39:33 +0200472 spin_lock_bh(&bpf_lock);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100473 bpf_prog_ksym_node_add(fp->aux);
Hannes Frederic Sowad24f7c72017-04-27 01:39:33 +0200474 spin_unlock_bh(&bpf_lock);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100475}
476
477void bpf_prog_kallsyms_del(struct bpf_prog *fp)
478{
Daniel Borkmann74451e662017-02-16 22:24:50 +0100479 if (!bpf_prog_kallsyms_candidate(fp))
480 return;
481
Hannes Frederic Sowad24f7c72017-04-27 01:39:33 +0200482 spin_lock_bh(&bpf_lock);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100483 bpf_prog_ksym_node_del(fp->aux);
Hannes Frederic Sowad24f7c72017-04-27 01:39:33 +0200484 spin_unlock_bh(&bpf_lock);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100485}
486
487static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
488{
489 struct latch_tree_node *n;
490
491 if (!bpf_jit_kallsyms_enabled())
492 return NULL;
493
494 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
495 return n ?
496 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
497 NULL;
498}
499
500const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
501 unsigned long *off, char *sym)
502{
503 unsigned long symbol_start, symbol_end;
504 struct bpf_prog *prog;
505 char *ret = NULL;
506
507 rcu_read_lock();
508 prog = bpf_prog_kallsyms_find(addr);
509 if (prog) {
510 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
511 bpf_get_prog_name(prog, sym);
512
513 ret = sym;
514 if (size)
515 *size = symbol_end - symbol_start;
516 if (off)
517 *off = addr - symbol_start;
518 }
519 rcu_read_unlock();
520
521 return ret;
522}
523
524bool is_bpf_text_address(unsigned long addr)
525{
526 bool ret;
527
528 rcu_read_lock();
529 ret = bpf_prog_kallsyms_find(addr) != NULL;
530 rcu_read_unlock();
531
532 return ret;
533}
534
535int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
536 char *sym)
537{
538 unsigned long symbol_start, symbol_end;
539 struct bpf_prog_aux *aux;
540 unsigned int it = 0;
541 int ret = -ERANGE;
542
543 if (!bpf_jit_kallsyms_enabled())
544 return ret;
545
546 rcu_read_lock();
547 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
548 if (it++ != symnum)
549 continue;
550
551 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
552 bpf_get_prog_name(aux->prog, sym);
553
554 *value = symbol_start;
555 *type = BPF_SYM_ELF_TYPE;
556
557 ret = 0;
558 break;
559 }
560 rcu_read_unlock();
561
562 return ret;
563}
564
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200565struct bpf_binary_header *
566bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
567 unsigned int alignment,
568 bpf_jit_fill_hole_t bpf_fill_ill_insns)
569{
570 struct bpf_binary_header *hdr;
571 unsigned int size, hole, start;
572
573 /* Most of BPF filters are really small, but if some of them
574 * fill a page, allow at least 128 extra bytes to insert a
575 * random section of illegal instructions.
576 */
577 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
578 hdr = module_alloc(size);
579 if (hdr == NULL)
580 return NULL;
581
582 /* Fill space with illegal/arch-dep instructions. */
583 bpf_fill_ill_insns(hdr, size);
584
585 hdr->pages = size / PAGE_SIZE;
586 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
587 PAGE_SIZE - sizeof(*hdr));
Daniel Borkmannb7552e1b2016-05-18 14:14:28 +0200588 start = (get_random_int() % hole) & ~(alignment - 1);
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200589
590 /* Leave a random number of instructions before BPF code. */
591 *image_ptr = &hdr->image[start];
592
593 return hdr;
594}
595
596void bpf_jit_binary_free(struct bpf_binary_header *hdr)
597{
Rusty Russellbe1f2212015-01-20 09:07:05 +1030598 module_memfree(hdr);
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200599}
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200600
Daniel Borkmann74451e662017-02-16 22:24:50 +0100601/* This symbol is only overridden by archs that have different
602 * requirements than the usual eBPF JITs, f.e. when they only
603 * implement cBPF JIT, do not set images read-only, etc.
604 */
605void __weak bpf_jit_free(struct bpf_prog *fp)
606{
607 if (fp->jited) {
608 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
609
610 bpf_jit_binary_unlock_ro(hdr);
611 bpf_jit_binary_free(hdr);
612
613 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
614 }
615
616 bpf_prog_unlock_free(fp);
617}
618
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200619static int bpf_jit_blind_insn(const struct bpf_insn *from,
620 const struct bpf_insn *aux,
621 struct bpf_insn *to_buff)
622{
623 struct bpf_insn *to = to_buff;
Daniel Borkmannb7552e1b2016-05-18 14:14:28 +0200624 u32 imm_rnd = get_random_int();
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200625 s16 off;
626
627 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
628 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
629
630 if (from->imm == 0 &&
631 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
632 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
633 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
634 goto out;
635 }
636
637 switch (from->code) {
638 case BPF_ALU | BPF_ADD | BPF_K:
639 case BPF_ALU | BPF_SUB | BPF_K:
640 case BPF_ALU | BPF_AND | BPF_K:
641 case BPF_ALU | BPF_OR | BPF_K:
642 case BPF_ALU | BPF_XOR | BPF_K:
643 case BPF_ALU | BPF_MUL | BPF_K:
644 case BPF_ALU | BPF_MOV | BPF_K:
645 case BPF_ALU | BPF_DIV | BPF_K:
646 case BPF_ALU | BPF_MOD | BPF_K:
647 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
648 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
649 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
650 break;
651
652 case BPF_ALU64 | BPF_ADD | BPF_K:
653 case BPF_ALU64 | BPF_SUB | BPF_K:
654 case BPF_ALU64 | BPF_AND | BPF_K:
655 case BPF_ALU64 | BPF_OR | BPF_K:
656 case BPF_ALU64 | BPF_XOR | BPF_K:
657 case BPF_ALU64 | BPF_MUL | BPF_K:
658 case BPF_ALU64 | BPF_MOV | BPF_K:
659 case BPF_ALU64 | BPF_DIV | BPF_K:
660 case BPF_ALU64 | BPF_MOD | BPF_K:
661 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
662 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
663 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
664 break;
665
666 case BPF_JMP | BPF_JEQ | BPF_K:
667 case BPF_JMP | BPF_JNE | BPF_K:
668 case BPF_JMP | BPF_JGT | BPF_K:
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200669 case BPF_JMP | BPF_JLT | BPF_K:
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200670 case BPF_JMP | BPF_JGE | BPF_K:
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200671 case BPF_JMP | BPF_JLE | BPF_K:
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200672 case BPF_JMP | BPF_JSGT | BPF_K:
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200673 case BPF_JMP | BPF_JSLT | BPF_K:
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200674 case BPF_JMP | BPF_JSGE | BPF_K:
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200675 case BPF_JMP | BPF_JSLE | BPF_K:
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200676 case BPF_JMP | BPF_JSET | BPF_K:
677 /* Accommodate for extra offset in case of a backjump. */
678 off = from->off;
679 if (off < 0)
680 off -= 2;
681 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
682 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
683 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
684 break;
685
686 case BPF_LD | BPF_ABS | BPF_W:
687 case BPF_LD | BPF_ABS | BPF_H:
688 case BPF_LD | BPF_ABS | BPF_B:
689 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
690 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
691 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
692 break;
693
694 case BPF_LD | BPF_IND | BPF_W:
695 case BPF_LD | BPF_IND | BPF_H:
696 case BPF_LD | BPF_IND | BPF_B:
697 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
698 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
699 *to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg);
700 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
701 break;
702
703 case BPF_LD | BPF_IMM | BPF_DW:
704 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
705 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
706 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
707 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
708 break;
709 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
710 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
711 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
712 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
713 break;
714
715 case BPF_ST | BPF_MEM | BPF_DW:
716 case BPF_ST | BPF_MEM | BPF_W:
717 case BPF_ST | BPF_MEM | BPF_H:
718 case BPF_ST | BPF_MEM | BPF_B:
719 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
720 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
721 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
722 break;
723 }
724out:
725 return to - to_buff;
726}
727
728static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
729 gfp_t gfp_extra_flags)
730{
Michal Hocko19809c22017-05-08 15:57:44 -0700731 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200732 struct bpf_prog *fp;
733
734 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
735 if (fp != NULL) {
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200736 /* aux->prog still points to the fp_other one, so
737 * when promoting the clone to the real program,
738 * this still needs to be adapted.
739 */
740 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
741 }
742
743 return fp;
744}
745
746static void bpf_prog_clone_free(struct bpf_prog *fp)
747{
748 /* aux was stolen by the other clone, so we cannot free
749 * it from this path! It will be freed eventually by the
750 * other program on release.
751 *
752 * At this point, we don't need a deferred release since
753 * clone is guaranteed to not be locked.
754 */
755 fp->aux = NULL;
756 __bpf_prog_free(fp);
757}
758
759void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
760{
761 /* We have to repoint aux->prog to self, as we don't
762 * know whether fp here is the clone or the original.
763 */
764 fp->aux->prog = fp;
765 bpf_prog_clone_free(fp_other);
766}
767
768struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
769{
770 struct bpf_insn insn_buff[16], aux[2];
771 struct bpf_prog *clone, *tmp;
772 int insn_delta, insn_cnt;
773 struct bpf_insn *insn;
774 int i, rewritten;
775
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -0800776 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200777 return prog;
778
779 clone = bpf_prog_clone_create(prog, GFP_USER);
780 if (!clone)
781 return ERR_PTR(-ENOMEM);
782
783 insn_cnt = clone->len;
784 insn = clone->insnsi;
785
786 for (i = 0; i < insn_cnt; i++, insn++) {
787 /* We temporarily need to hold the original ld64 insn
788 * so that we can still access the first part in the
789 * second blinding run.
790 */
791 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
792 insn[1].code == 0)
793 memcpy(aux, insn, sizeof(aux));
794
795 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
796 if (!rewritten)
797 continue;
798
799 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
800 if (!tmp) {
801 /* Patching may have repointed aux->prog during
802 * realloc from the original one, so we need to
803 * fix it up here on error.
804 */
805 bpf_jit_prog_release_other(prog, clone);
806 return ERR_PTR(-ENOMEM);
807 }
808
809 clone = tmp;
810 insn_delta = rewritten - 1;
811
812 /* Walk new program and skip insns we just inserted. */
813 insn = clone->insnsi + i + insn_delta;
814 insn_cnt += insn_delta;
815 i += insn_delta;
816 }
817
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -0800818 clone->blinded = 1;
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200819 return clone;
820}
Daniel Borkmannb954d832014-09-10 15:01:02 +0200821#endif /* CONFIG_BPF_JIT */
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200822
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700823/* Base function for offset calculation. Needs to go into .text section,
824 * therefore keeping it non-static as well; will also be used by JITs
Daniel Borkmann7105e822017-12-20 13:42:57 +0100825 * anyway later on, so do not let the compiler omit it. This also needs
826 * to go into kallsyms for correlation from e.g. bpftool, so naming
827 * must not change.
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700828 */
829noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
830{
831 return 0;
832}
Alexei Starovoitov4d9c5c52015-07-20 20:34:19 -0700833EXPORT_SYMBOL_GPL(__bpf_call_base);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700834
Daniel Borkmann5e581da2018-01-26 23:33:38 +0100835/* All UAPI available opcodes. */
836#define BPF_INSN_MAP(INSN_2, INSN_3) \
837 /* 32 bit ALU operations. */ \
838 /* Register based. */ \
839 INSN_3(ALU, ADD, X), \
840 INSN_3(ALU, SUB, X), \
841 INSN_3(ALU, AND, X), \
842 INSN_3(ALU, OR, X), \
843 INSN_3(ALU, LSH, X), \
844 INSN_3(ALU, RSH, X), \
845 INSN_3(ALU, XOR, X), \
846 INSN_3(ALU, MUL, X), \
847 INSN_3(ALU, MOV, X), \
848 INSN_3(ALU, DIV, X), \
849 INSN_3(ALU, MOD, X), \
850 INSN_2(ALU, NEG), \
851 INSN_3(ALU, END, TO_BE), \
852 INSN_3(ALU, END, TO_LE), \
853 /* Immediate based. */ \
854 INSN_3(ALU, ADD, K), \
855 INSN_3(ALU, SUB, K), \
856 INSN_3(ALU, AND, K), \
857 INSN_3(ALU, OR, K), \
858 INSN_3(ALU, LSH, K), \
859 INSN_3(ALU, RSH, K), \
860 INSN_3(ALU, XOR, K), \
861 INSN_3(ALU, MUL, K), \
862 INSN_3(ALU, MOV, K), \
863 INSN_3(ALU, DIV, K), \
864 INSN_3(ALU, MOD, K), \
865 /* 64 bit ALU operations. */ \
866 /* Register based. */ \
867 INSN_3(ALU64, ADD, X), \
868 INSN_3(ALU64, SUB, X), \
869 INSN_3(ALU64, AND, X), \
870 INSN_3(ALU64, OR, X), \
871 INSN_3(ALU64, LSH, X), \
872 INSN_3(ALU64, RSH, X), \
873 INSN_3(ALU64, XOR, X), \
874 INSN_3(ALU64, MUL, X), \
875 INSN_3(ALU64, MOV, X), \
876 INSN_3(ALU64, ARSH, X), \
877 INSN_3(ALU64, DIV, X), \
878 INSN_3(ALU64, MOD, X), \
879 INSN_2(ALU64, NEG), \
880 /* Immediate based. */ \
881 INSN_3(ALU64, ADD, K), \
882 INSN_3(ALU64, SUB, K), \
883 INSN_3(ALU64, AND, K), \
884 INSN_3(ALU64, OR, K), \
885 INSN_3(ALU64, LSH, K), \
886 INSN_3(ALU64, RSH, K), \
887 INSN_3(ALU64, XOR, K), \
888 INSN_3(ALU64, MUL, K), \
889 INSN_3(ALU64, MOV, K), \
890 INSN_3(ALU64, ARSH, K), \
891 INSN_3(ALU64, DIV, K), \
892 INSN_3(ALU64, MOD, K), \
893 /* Call instruction. */ \
894 INSN_2(JMP, CALL), \
895 /* Exit instruction. */ \
896 INSN_2(JMP, EXIT), \
897 /* Jump instructions. */ \
898 /* Register based. */ \
899 INSN_3(JMP, JEQ, X), \
900 INSN_3(JMP, JNE, X), \
901 INSN_3(JMP, JGT, X), \
902 INSN_3(JMP, JLT, X), \
903 INSN_3(JMP, JGE, X), \
904 INSN_3(JMP, JLE, X), \
905 INSN_3(JMP, JSGT, X), \
906 INSN_3(JMP, JSLT, X), \
907 INSN_3(JMP, JSGE, X), \
908 INSN_3(JMP, JSLE, X), \
909 INSN_3(JMP, JSET, X), \
910 /* Immediate based. */ \
911 INSN_3(JMP, JEQ, K), \
912 INSN_3(JMP, JNE, K), \
913 INSN_3(JMP, JGT, K), \
914 INSN_3(JMP, JLT, K), \
915 INSN_3(JMP, JGE, K), \
916 INSN_3(JMP, JLE, K), \
917 INSN_3(JMP, JSGT, K), \
918 INSN_3(JMP, JSLT, K), \
919 INSN_3(JMP, JSGE, K), \
920 INSN_3(JMP, JSLE, K), \
921 INSN_3(JMP, JSET, K), \
922 INSN_2(JMP, JA), \
923 /* Store instructions. */ \
924 /* Register based. */ \
925 INSN_3(STX, MEM, B), \
926 INSN_3(STX, MEM, H), \
927 INSN_3(STX, MEM, W), \
928 INSN_3(STX, MEM, DW), \
929 INSN_3(STX, XADD, W), \
930 INSN_3(STX, XADD, DW), \
931 /* Immediate based. */ \
932 INSN_3(ST, MEM, B), \
933 INSN_3(ST, MEM, H), \
934 INSN_3(ST, MEM, W), \
935 INSN_3(ST, MEM, DW), \
936 /* Load instructions. */ \
937 /* Register based. */ \
938 INSN_3(LDX, MEM, B), \
939 INSN_3(LDX, MEM, H), \
940 INSN_3(LDX, MEM, W), \
941 INSN_3(LDX, MEM, DW), \
942 /* Immediate based. */ \
943 INSN_3(LD, IMM, DW), \
944 /* Misc (old cBPF carry-over). */ \
945 INSN_3(LD, ABS, B), \
946 INSN_3(LD, ABS, H), \
947 INSN_3(LD, ABS, W), \
948 INSN_3(LD, IND, B), \
949 INSN_3(LD, IND, H), \
950 INSN_3(LD, IND, W)
951
952bool bpf_opcode_in_insntable(u8 code)
953{
954#define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
955#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
956 static const bool public_insntable[256] = {
957 [0 ... 255] = false,
958 /* Now overwrite non-defaults ... */
959 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
960 };
961#undef BPF_INSN_3_TBL
962#undef BPF_INSN_2_TBL
963 return public_insntable[code];
964}
965
Alexei Starovoitov290af862018-01-09 10:04:29 -0800966#ifndef CONFIG_BPF_JIT_ALWAYS_ON
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700967/**
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700968 * __bpf_prog_run - run eBPF program on a given context
969 * @ctx: is the data we are operating on
970 * @insn: is the array of eBPF instructions
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700971 *
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700972 * Decode and execute eBPF instructions.
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700973 */
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -0800974static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700975{
Alexei Starovoitovf696b8f2017-05-30 13:31:28 -0700976 u64 tmp;
Daniel Borkmann5e581da2018-01-26 23:33:38 +0100977#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
978#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700979 static const void *jumptable[256] = {
980 [0 ... 255] = &&default_label,
981 /* Now overwrite non-defaults ... */
Daniel Borkmann5e581da2018-01-26 23:33:38 +0100982 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
983 /* Non-UAPI available opcodes. */
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -0800984 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
Alexei Starovoitov71189fa2017-05-30 13:31:27 -0700985 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700986 };
Daniel Borkmann5e581da2018-01-26 23:33:38 +0100987#undef BPF_INSN_3_LBL
988#undef BPF_INSN_2_LBL
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700989 u32 tail_call_cnt = 0;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700990 void *ptr;
991 int off;
992
993#define CONT ({ insn++; goto select_insn; })
994#define CONT_JMP ({ insn++; goto select_insn; })
995
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700996select_insn:
997 goto *jumptable[insn->code];
998
999 /* ALU */
1000#define ALU(OPCODE, OP) \
1001 ALU64_##OPCODE##_X: \
1002 DST = DST OP SRC; \
1003 CONT; \
1004 ALU_##OPCODE##_X: \
1005 DST = (u32) DST OP (u32) SRC; \
1006 CONT; \
1007 ALU64_##OPCODE##_K: \
1008 DST = DST OP IMM; \
1009 CONT; \
1010 ALU_##OPCODE##_K: \
1011 DST = (u32) DST OP (u32) IMM; \
1012 CONT;
1013
1014 ALU(ADD, +)
1015 ALU(SUB, -)
1016 ALU(AND, &)
1017 ALU(OR, |)
1018 ALU(LSH, <<)
1019 ALU(RSH, >>)
1020 ALU(XOR, ^)
1021 ALU(MUL, *)
1022#undef ALU
1023 ALU_NEG:
1024 DST = (u32) -DST;
1025 CONT;
1026 ALU64_NEG:
1027 DST = -DST;
1028 CONT;
1029 ALU_MOV_X:
1030 DST = (u32) SRC;
1031 CONT;
1032 ALU_MOV_K:
1033 DST = (u32) IMM;
1034 CONT;
1035 ALU64_MOV_X:
1036 DST = SRC;
1037 CONT;
1038 ALU64_MOV_K:
1039 DST = IMM;
1040 CONT;
Alexei Starovoitov02ab6952014-09-04 22:17:17 -07001041 LD_IMM_DW:
1042 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1043 insn++;
1044 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001045 ALU64_ARSH_X:
1046 (*(s64 *) &DST) >>= SRC;
1047 CONT;
1048 ALU64_ARSH_K:
1049 (*(s64 *) &DST) >>= IMM;
1050 CONT;
1051 ALU64_MOD_X:
Alexei Starovoitov876a7ae2015-04-27 14:40:37 -07001052 div64_u64_rem(DST, SRC, &tmp);
1053 DST = tmp;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001054 CONT;
1055 ALU_MOD_X:
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001056 tmp = (u32) DST;
1057 DST = do_div(tmp, (u32) SRC);
1058 CONT;
1059 ALU64_MOD_K:
Alexei Starovoitov876a7ae2015-04-27 14:40:37 -07001060 div64_u64_rem(DST, IMM, &tmp);
1061 DST = tmp;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001062 CONT;
1063 ALU_MOD_K:
1064 tmp = (u32) DST;
1065 DST = do_div(tmp, (u32) IMM);
1066 CONT;
1067 ALU64_DIV_X:
Alexei Starovoitov876a7ae2015-04-27 14:40:37 -07001068 DST = div64_u64(DST, SRC);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001069 CONT;
1070 ALU_DIV_X:
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001071 tmp = (u32) DST;
1072 do_div(tmp, (u32) SRC);
1073 DST = (u32) tmp;
1074 CONT;
1075 ALU64_DIV_K:
Alexei Starovoitov876a7ae2015-04-27 14:40:37 -07001076 DST = div64_u64(DST, IMM);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001077 CONT;
1078 ALU_DIV_K:
1079 tmp = (u32) DST;
1080 do_div(tmp, (u32) IMM);
1081 DST = (u32) tmp;
1082 CONT;
1083 ALU_END_TO_BE:
1084 switch (IMM) {
1085 case 16:
1086 DST = (__force u16) cpu_to_be16(DST);
1087 break;
1088 case 32:
1089 DST = (__force u32) cpu_to_be32(DST);
1090 break;
1091 case 64:
1092 DST = (__force u64) cpu_to_be64(DST);
1093 break;
1094 }
1095 CONT;
1096 ALU_END_TO_LE:
1097 switch (IMM) {
1098 case 16:
1099 DST = (__force u16) cpu_to_le16(DST);
1100 break;
1101 case 32:
1102 DST = (__force u32) cpu_to_le32(DST);
1103 break;
1104 case 64:
1105 DST = (__force u64) cpu_to_le64(DST);
1106 break;
1107 }
1108 CONT;
1109
1110 /* CALL */
1111 JMP_CALL:
1112 /* Function call scratches BPF_R1-BPF_R5 registers,
1113 * preserves BPF_R6-BPF_R9, and stores return value
1114 * into BPF_R0.
1115 */
1116 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1117 BPF_R4, BPF_R5);
1118 CONT;
1119
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001120 JMP_CALL_ARGS:
1121 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1122 BPF_R3, BPF_R4,
1123 BPF_R5,
1124 insn + insn->off + 1);
1125 CONT;
1126
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001127 JMP_TAIL_CALL: {
1128 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1129 struct bpf_array *array = container_of(map, struct bpf_array, map);
1130 struct bpf_prog *prog;
Alexei Starovoitov90caccd2017-10-03 15:37:20 -07001131 u32 index = BPF_R3;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001132
1133 if (unlikely(index >= array->map.max_entries))
1134 goto out;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001135 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1136 goto out;
1137
1138 tail_call_cnt++;
1139
Wang Nan2a36f0b2015-08-06 07:02:33 +00001140 prog = READ_ONCE(array->ptrs[index]);
Daniel Borkmann1ca1cc92016-06-28 12:18:23 +02001141 if (!prog)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001142 goto out;
1143
Daniel Borkmannc4675f92015-07-13 20:49:32 +02001144 /* ARG1 at this point is guaranteed to point to CTX from
1145 * the verifier side due to the fact that the tail call is
1146 * handeled like a helper, that is, bpf_tail_call_proto,
1147 * where arg1_type is ARG_PTR_TO_CTX.
1148 */
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001149 insn = prog->insnsi;
1150 goto select_insn;
1151out:
1152 CONT;
1153 }
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001154 /* JMP */
1155 JMP_JA:
1156 insn += insn->off;
1157 CONT;
1158 JMP_JEQ_X:
1159 if (DST == SRC) {
1160 insn += insn->off;
1161 CONT_JMP;
1162 }
1163 CONT;
1164 JMP_JEQ_K:
1165 if (DST == IMM) {
1166 insn += insn->off;
1167 CONT_JMP;
1168 }
1169 CONT;
1170 JMP_JNE_X:
1171 if (DST != SRC) {
1172 insn += insn->off;
1173 CONT_JMP;
1174 }
1175 CONT;
1176 JMP_JNE_K:
1177 if (DST != IMM) {
1178 insn += insn->off;
1179 CONT_JMP;
1180 }
1181 CONT;
1182 JMP_JGT_X:
1183 if (DST > SRC) {
1184 insn += insn->off;
1185 CONT_JMP;
1186 }
1187 CONT;
1188 JMP_JGT_K:
1189 if (DST > IMM) {
1190 insn += insn->off;
1191 CONT_JMP;
1192 }
1193 CONT;
Daniel Borkmann92b31a92017-08-10 01:39:55 +02001194 JMP_JLT_X:
1195 if (DST < SRC) {
1196 insn += insn->off;
1197 CONT_JMP;
1198 }
1199 CONT;
1200 JMP_JLT_K:
1201 if (DST < IMM) {
1202 insn += insn->off;
1203 CONT_JMP;
1204 }
1205 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001206 JMP_JGE_X:
1207 if (DST >= SRC) {
1208 insn += insn->off;
1209 CONT_JMP;
1210 }
1211 CONT;
1212 JMP_JGE_K:
1213 if (DST >= IMM) {
1214 insn += insn->off;
1215 CONT_JMP;
1216 }
1217 CONT;
Daniel Borkmann92b31a92017-08-10 01:39:55 +02001218 JMP_JLE_X:
1219 if (DST <= SRC) {
1220 insn += insn->off;
1221 CONT_JMP;
1222 }
1223 CONT;
1224 JMP_JLE_K:
1225 if (DST <= IMM) {
1226 insn += insn->off;
1227 CONT_JMP;
1228 }
1229 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001230 JMP_JSGT_X:
1231 if (((s64) DST) > ((s64) SRC)) {
1232 insn += insn->off;
1233 CONT_JMP;
1234 }
1235 CONT;
1236 JMP_JSGT_K:
1237 if (((s64) DST) > ((s64) IMM)) {
1238 insn += insn->off;
1239 CONT_JMP;
1240 }
1241 CONT;
Daniel Borkmann92b31a92017-08-10 01:39:55 +02001242 JMP_JSLT_X:
1243 if (((s64) DST) < ((s64) SRC)) {
1244 insn += insn->off;
1245 CONT_JMP;
1246 }
1247 CONT;
1248 JMP_JSLT_K:
1249 if (((s64) DST) < ((s64) IMM)) {
1250 insn += insn->off;
1251 CONT_JMP;
1252 }
1253 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001254 JMP_JSGE_X:
1255 if (((s64) DST) >= ((s64) SRC)) {
1256 insn += insn->off;
1257 CONT_JMP;
1258 }
1259 CONT;
1260 JMP_JSGE_K:
1261 if (((s64) DST) >= ((s64) IMM)) {
1262 insn += insn->off;
1263 CONT_JMP;
1264 }
1265 CONT;
Daniel Borkmann92b31a92017-08-10 01:39:55 +02001266 JMP_JSLE_X:
1267 if (((s64) DST) <= ((s64) SRC)) {
1268 insn += insn->off;
1269 CONT_JMP;
1270 }
1271 CONT;
1272 JMP_JSLE_K:
1273 if (((s64) DST) <= ((s64) IMM)) {
1274 insn += insn->off;
1275 CONT_JMP;
1276 }
1277 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001278 JMP_JSET_X:
1279 if (DST & SRC) {
1280 insn += insn->off;
1281 CONT_JMP;
1282 }
1283 CONT;
1284 JMP_JSET_K:
1285 if (DST & IMM) {
1286 insn += insn->off;
1287 CONT_JMP;
1288 }
1289 CONT;
1290 JMP_EXIT:
1291 return BPF_R0;
1292
1293 /* STX and ST and LDX*/
1294#define LDST(SIZEOP, SIZE) \
1295 STX_MEM_##SIZEOP: \
1296 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1297 CONT; \
1298 ST_MEM_##SIZEOP: \
1299 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1300 CONT; \
1301 LDX_MEM_##SIZEOP: \
1302 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1303 CONT;
1304
1305 LDST(B, u8)
1306 LDST(H, u16)
1307 LDST(W, u32)
1308 LDST(DW, u64)
1309#undef LDST
1310 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1311 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1312 (DST + insn->off));
1313 CONT;
1314 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1315 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1316 (DST + insn->off));
1317 CONT;
1318 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
1319 off = IMM;
1320load_word:
Johannes Berg96a94cc2017-04-11 12:10:58 +02001321 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
1322 * appearing in the programs where ctx == skb
1323 * (see may_access_skb() in the verifier). All programs
1324 * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
1325 * bpf_convert_filter() saves it in BPF_R6, internal BPF
1326 * verifier will check that BPF_R6 == ctx.
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001327 *
1328 * BPF_ABS and BPF_IND are wrappers of function calls,
1329 * so they scratch BPF_R1-BPF_R5 registers, preserve
1330 * BPF_R6-BPF_R9, and store return value into BPF_R0.
1331 *
1332 * Implicit input:
1333 * ctx == skb == BPF_R6 == CTX
1334 *
1335 * Explicit input:
1336 * SRC == any register
1337 * IMM == 32-bit immediate
1338 *
1339 * Output:
1340 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
1341 */
1342
1343 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
1344 if (likely(ptr != NULL)) {
1345 BPF_R0 = get_unaligned_be32(ptr);
1346 CONT;
1347 }
1348
1349 return 0;
1350 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
1351 off = IMM;
1352load_half:
1353 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
1354 if (likely(ptr != NULL)) {
1355 BPF_R0 = get_unaligned_be16(ptr);
1356 CONT;
1357 }
1358
1359 return 0;
1360 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
1361 off = IMM;
1362load_byte:
1363 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
1364 if (likely(ptr != NULL)) {
1365 BPF_R0 = *(u8 *)ptr;
1366 CONT;
1367 }
1368
1369 return 0;
1370 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
1371 off = IMM + SRC;
1372 goto load_word;
1373 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
1374 off = IMM + SRC;
1375 goto load_half;
1376 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
1377 off = IMM + SRC;
1378 goto load_byte;
1379
1380 default_label:
Daniel Borkmann5e581da2018-01-26 23:33:38 +01001381 /* If we ever reach this, we have a bug somewhere. Die hard here
1382 * instead of just returning 0; we could be somewhere in a subprog,
1383 * so execution could continue otherwise which we do /not/ want.
1384 *
1385 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1386 */
1387 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1388 BUG_ON(1);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001389 return 0;
1390}
Alexei Starovoitovf696b8f2017-05-30 13:31:28 -07001391STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1392
Alexei Starovoitovb870aa92017-05-30 13:31:33 -07001393#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1394#define DEFINE_BPF_PROG_RUN(stack_size) \
1395static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1396{ \
1397 u64 stack[stack_size / sizeof(u64)]; \
1398 u64 regs[MAX_BPF_REG]; \
1399\
1400 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1401 ARG1 = (u64) (unsigned long) ctx; \
1402 return ___bpf_prog_run(regs, insn, stack); \
Alexei Starovoitovf696b8f2017-05-30 13:31:28 -07001403}
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001404
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001405#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1406#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1407static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1408 const struct bpf_insn *insn) \
1409{ \
1410 u64 stack[stack_size / sizeof(u64)]; \
1411 u64 regs[MAX_BPF_REG]; \
1412\
1413 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1414 BPF_R1 = r1; \
1415 BPF_R2 = r2; \
1416 BPF_R3 = r3; \
1417 BPF_R4 = r4; \
1418 BPF_R5 = r5; \
1419 return ___bpf_prog_run(regs, insn, stack); \
1420}
1421
Alexei Starovoitovb870aa92017-05-30 13:31:33 -07001422#define EVAL1(FN, X) FN(X)
1423#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1424#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1425#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1426#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1427#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1428
1429EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1430EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1431EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1432
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001433EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1434EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1435EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1436
Alexei Starovoitovb870aa92017-05-30 13:31:33 -07001437#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1438
1439static unsigned int (*interpreters[])(const void *ctx,
1440 const struct bpf_insn *insn) = {
1441EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1442EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1443EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1444};
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001445#undef PROG_NAME_LIST
1446#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1447static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1448 const struct bpf_insn *insn) = {
1449EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1450EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1451EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1452};
1453#undef PROG_NAME_LIST
1454
1455void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1456{
1457 stack_depth = max_t(u32, stack_depth, 1);
1458 insn->off = (s16) insn->imm;
1459 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1460 __bpf_call_base_args;
1461 insn->code = BPF_JMP | BPF_CALL_ARGS;
1462}
Alexei Starovoitovb870aa92017-05-30 13:31:33 -07001463
Alexei Starovoitov290af862018-01-09 10:04:29 -08001464#else
Daniel Borkmannfa9dd592018-01-20 01:24:33 +01001465static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1466 const struct bpf_insn *insn)
Alexei Starovoitov290af862018-01-09 10:04:29 -08001467{
Daniel Borkmannfa9dd592018-01-20 01:24:33 +01001468 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1469 * is not working properly, so warn about it!
1470 */
1471 WARN_ON_ONCE(1);
Alexei Starovoitov290af862018-01-09 10:04:29 -08001472 return 0;
1473}
1474#endif
1475
Daniel Borkmann3324b582015-05-29 23:23:07 +02001476bool bpf_prog_array_compatible(struct bpf_array *array,
1477 const struct bpf_prog *fp)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001478{
Josef Bacik9802d862017-12-11 11:36:48 -05001479 if (fp->kprobe_override)
1480 return false;
1481
Daniel Borkmann3324b582015-05-29 23:23:07 +02001482 if (!array->owner_prog_type) {
1483 /* There's no owner yet where we could check for
1484 * compatibility.
1485 */
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001486 array->owner_prog_type = fp->type;
1487 array->owner_jited = fp->jited;
Daniel Borkmann3324b582015-05-29 23:23:07 +02001488
1489 return true;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001490 }
Daniel Borkmann3324b582015-05-29 23:23:07 +02001491
1492 return array->owner_prog_type == fp->type &&
1493 array->owner_jited == fp->jited;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001494}
1495
Daniel Borkmann3324b582015-05-29 23:23:07 +02001496static int bpf_check_tail_call(const struct bpf_prog *fp)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001497{
1498 struct bpf_prog_aux *aux = fp->aux;
1499 int i;
1500
1501 for (i = 0; i < aux->used_map_cnt; i++) {
Daniel Borkmann3324b582015-05-29 23:23:07 +02001502 struct bpf_map *map = aux->used_maps[i];
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001503 struct bpf_array *array;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001504
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001505 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1506 continue;
Daniel Borkmann3324b582015-05-29 23:23:07 +02001507
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001508 array = container_of(map, struct bpf_array, map);
1509 if (!bpf_prog_array_compatible(array, fp))
1510 return -EINVAL;
1511 }
1512
1513 return 0;
1514}
1515
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001516/**
Daniel Borkmann3324b582015-05-29 23:23:07 +02001517 * bpf_prog_select_runtime - select exec runtime for BPF program
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001518 * @fp: bpf_prog populated with internal BPF program
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001519 * @err: pointer to error variable
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001520 *
Daniel Borkmann3324b582015-05-29 23:23:07 +02001521 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1522 * The BPF program will be executed via BPF_PROG_RUN() macro.
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001523 */
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001524struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001525{
Alexei Starovoitov290af862018-01-09 10:04:29 -08001526#ifndef CONFIG_BPF_JIT_ALWAYS_ON
Martin KaFai Lau8007e402017-06-28 10:41:24 -07001527 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1528
1529 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
Alexei Starovoitov290af862018-01-09 10:04:29 -08001530#else
Daniel Borkmannfa9dd592018-01-20 01:24:33 +01001531 fp->bpf_func = __bpf_prog_ret0_warn;
Alexei Starovoitov290af862018-01-09 10:04:29 -08001532#endif
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001533
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001534 /* eBPF JITs can rewrite the program in case constant
1535 * blinding is active. However, in case of error during
1536 * blinding, bpf_int_jit_compile() must always return a
1537 * valid program, which in this case would simply not
1538 * be JITed, but falls back to the interpreter.
1539 */
Jakub Kicinskiab3f0062017-11-03 13:56:17 -07001540 if (!bpf_prog_is_dev_bound(fp->aux)) {
1541 fp = bpf_int_jit_compile(fp);
Alexei Starovoitov290af862018-01-09 10:04:29 -08001542#ifdef CONFIG_BPF_JIT_ALWAYS_ON
1543 if (!fp->jited) {
1544 *err = -ENOTSUPP;
1545 return fp;
1546 }
1547#endif
Jakub Kicinskiab3f0062017-11-03 13:56:17 -07001548 } else {
1549 *err = bpf_prog_offload_compile(fp);
1550 if (*err)
1551 return fp;
1552 }
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001553 bpf_prog_lock_ro(fp);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001554
Daniel Borkmann3324b582015-05-29 23:23:07 +02001555 /* The tail call compatibility check can only be done at
1556 * this late stage as we need to determine, if we deal
1557 * with JITed or non JITed program concatenations and not
1558 * all eBPF JITs might immediately support all features.
1559 */
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001560 *err = bpf_check_tail_call(fp);
1561
1562 return fp;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001563}
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001564EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001565
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001566static unsigned int __bpf_prog_ret1(const void *ctx,
1567 const struct bpf_insn *insn)
1568{
1569 return 1;
1570}
1571
1572static struct bpf_prog_dummy {
1573 struct bpf_prog prog;
1574} dummy_bpf_prog = {
1575 .prog = {
1576 .bpf_func = __bpf_prog_ret1,
1577 },
1578};
1579
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -07001580/* to avoid allocating empty bpf_prog_array for cgroups that
1581 * don't have bpf program attached use one global 'empty_prog_array'
1582 * It will not be modified the caller of bpf_prog_array_alloc()
1583 * (since caller requested prog_cnt == 0)
1584 * that pointer should be 'freed' by bpf_prog_array_free()
1585 */
1586static struct {
1587 struct bpf_prog_array hdr;
1588 struct bpf_prog *null_prog;
1589} empty_prog_array = {
1590 .null_prog = NULL,
1591};
1592
1593struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1594{
1595 if (prog_cnt)
1596 return kzalloc(sizeof(struct bpf_prog_array) +
1597 sizeof(struct bpf_prog *) * (prog_cnt + 1),
1598 flags);
1599
1600 return &empty_prog_array.hdr;
1601}
1602
1603void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
1604{
1605 if (!progs ||
1606 progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
1607 return;
1608 kfree_rcu(progs, rcu);
1609}
1610
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001611int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
1612{
1613 struct bpf_prog **prog;
1614 u32 cnt = 0;
1615
1616 rcu_read_lock();
1617 prog = rcu_dereference(progs)->progs;
1618 for (; *prog; prog++)
Yonghong Songc8c088b2017-11-30 13:47:54 -08001619 if (*prog != &dummy_bpf_prog.prog)
1620 cnt++;
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001621 rcu_read_unlock();
1622 return cnt;
1623}
1624
Yonghong Song3a38bb92018-04-10 09:37:32 -07001625static bool bpf_prog_array_copy_core(struct bpf_prog **prog,
1626 u32 *prog_ids,
1627 u32 request_cnt)
1628{
1629 int i = 0;
1630
1631 for (; *prog; prog++) {
1632 if (*prog == &dummy_bpf_prog.prog)
1633 continue;
1634 prog_ids[i] = (*prog)->aux->id;
1635 if (++i == request_cnt) {
1636 prog++;
1637 break;
1638 }
1639 }
1640
1641 return !!(*prog);
1642}
1643
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001644int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
1645 __u32 __user *prog_ids, u32 cnt)
1646{
1647 struct bpf_prog **prog;
Alexei Starovoitov09112872018-02-02 15:14:05 -08001648 unsigned long err = 0;
Alexei Starovoitov09112872018-02-02 15:14:05 -08001649 bool nospc;
Yonghong Song3a38bb92018-04-10 09:37:32 -07001650 u32 *ids;
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001651
Alexei Starovoitov09112872018-02-02 15:14:05 -08001652 /* users of this function are doing:
1653 * cnt = bpf_prog_array_length();
1654 * if (cnt > 0)
1655 * bpf_prog_array_copy_to_user(..., cnt);
1656 * so below kcalloc doesn't need extra cnt > 0 check, but
1657 * bpf_prog_array_length() releases rcu lock and
1658 * prog array could have been swapped with empty or larger array,
1659 * so always copy 'cnt' prog_ids to the user.
1660 * In a rare race the user will see zero prog_ids
1661 */
Daniel Borkmann9c481b92018-02-14 15:31:00 +01001662 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
Alexei Starovoitov09112872018-02-02 15:14:05 -08001663 if (!ids)
1664 return -ENOMEM;
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001665 rcu_read_lock();
1666 prog = rcu_dereference(progs)->progs;
Yonghong Song3a38bb92018-04-10 09:37:32 -07001667 nospc = bpf_prog_array_copy_core(prog, ids, cnt);
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001668 rcu_read_unlock();
Alexei Starovoitov09112872018-02-02 15:14:05 -08001669 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1670 kfree(ids);
1671 if (err)
1672 return -EFAULT;
1673 if (nospc)
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001674 return -ENOSPC;
1675 return 0;
1676}
1677
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001678void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
1679 struct bpf_prog *old_prog)
1680{
1681 struct bpf_prog **prog = progs->progs;
1682
1683 for (; *prog; prog++)
1684 if (*prog == old_prog) {
1685 WRITE_ONCE(*prog, &dummy_bpf_prog.prog);
1686 break;
1687 }
1688}
1689
1690int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1691 struct bpf_prog *exclude_prog,
1692 struct bpf_prog *include_prog,
1693 struct bpf_prog_array **new_array)
1694{
1695 int new_prog_cnt, carry_prog_cnt = 0;
1696 struct bpf_prog **existing_prog;
1697 struct bpf_prog_array *array;
1698 int new_prog_idx = 0;
1699
1700 /* Figure out how many existing progs we need to carry over to
1701 * the new array.
1702 */
1703 if (old_array) {
1704 existing_prog = old_array->progs;
1705 for (; *existing_prog; existing_prog++) {
1706 if (*existing_prog != exclude_prog &&
1707 *existing_prog != &dummy_bpf_prog.prog)
1708 carry_prog_cnt++;
1709 if (*existing_prog == include_prog)
1710 return -EEXIST;
1711 }
1712 }
1713
1714 /* How many progs (not NULL) will be in the new array? */
1715 new_prog_cnt = carry_prog_cnt;
1716 if (include_prog)
1717 new_prog_cnt += 1;
1718
1719 /* Do we have any prog (not NULL) in the new array? */
1720 if (!new_prog_cnt) {
1721 *new_array = NULL;
1722 return 0;
1723 }
1724
1725 /* +1 as the end of prog_array is marked with NULL */
1726 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1727 if (!array)
1728 return -ENOMEM;
1729
1730 /* Fill in the new prog array */
1731 if (carry_prog_cnt) {
1732 existing_prog = old_array->progs;
1733 for (; *existing_prog; existing_prog++)
1734 if (*existing_prog != exclude_prog &&
1735 *existing_prog != &dummy_bpf_prog.prog)
1736 array->progs[new_prog_idx++] = *existing_prog;
1737 }
1738 if (include_prog)
1739 array->progs[new_prog_idx++] = include_prog;
1740 array->progs[new_prog_idx] = NULL;
1741 *new_array = array;
1742 return 0;
1743}
1744
Yonghong Songf371b302017-12-11 11:39:02 -08001745int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
Yonghong Song3a38bb92018-04-10 09:37:32 -07001746 u32 *prog_ids, u32 request_cnt,
1747 u32 *prog_cnt)
Yonghong Songf371b302017-12-11 11:39:02 -08001748{
Yonghong Song3a38bb92018-04-10 09:37:32 -07001749 struct bpf_prog **prog;
Yonghong Songf371b302017-12-11 11:39:02 -08001750 u32 cnt = 0;
1751
1752 if (array)
1753 cnt = bpf_prog_array_length(array);
1754
Yonghong Song3a38bb92018-04-10 09:37:32 -07001755 *prog_cnt = cnt;
Yonghong Songf371b302017-12-11 11:39:02 -08001756
1757 /* return early if user requested only program count or nothing to copy */
1758 if (!request_cnt || !cnt)
1759 return 0;
1760
Yonghong Song3a38bb92018-04-10 09:37:32 -07001761 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
1762 prog = rcu_dereference_check(array, 1)->progs;
1763 return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC
1764 : 0;
Yonghong Songf371b302017-12-11 11:39:02 -08001765}
1766
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001767static void bpf_prog_free_deferred(struct work_struct *work)
1768{
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001769 struct bpf_prog_aux *aux;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001770 int i;
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001771
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001772 aux = container_of(work, struct bpf_prog_aux, work);
Jakub Kicinskiab3f0062017-11-03 13:56:17 -07001773 if (bpf_prog_is_dev_bound(aux))
1774 bpf_prog_offload_destroy(aux->prog);
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001775 for (i = 0; i < aux->func_cnt; i++)
1776 bpf_jit_free(aux->func[i]);
1777 if (aux->func_cnt) {
1778 kfree(aux->func);
1779 bpf_prog_unlock_free(aux->prog);
1780 } else {
1781 bpf_jit_free(aux->prog);
1782 }
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001783}
1784
1785/* Free internal BPF program */
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001786void bpf_prog_free(struct bpf_prog *fp)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001787{
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001788 struct bpf_prog_aux *aux = fp->aux;
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001789
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001790 INIT_WORK(&aux->work, bpf_prog_free_deferred);
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001791 schedule_work(&aux->work);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001792}
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001793EXPORT_SYMBOL_GPL(bpf_prog_free);
Alexei Starovoitovf89b7752014-10-23 18:41:08 -07001794
Daniel Borkmann3ad00402015-10-08 01:20:39 +02001795/* RNG for unpriviledged user space with separated state from prandom_u32(). */
1796static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1797
1798void bpf_user_rnd_init_once(void)
1799{
1800 prandom_init_once(&bpf_user_rnd_state);
1801}
1802
Daniel Borkmannf3694e02016-09-09 02:45:31 +02001803BPF_CALL_0(bpf_user_rnd_u32)
Daniel Borkmann3ad00402015-10-08 01:20:39 +02001804{
1805 /* Should someone ever have the rather unwise idea to use some
1806 * of the registers passed into this function, then note that
1807 * this function is called from native eBPF and classic-to-eBPF
1808 * transformations. Register assignments from both sides are
1809 * different, f.e. classic always sets fn(ctx, A, X) here.
1810 */
1811 struct rnd_state *state;
1812 u32 res;
1813
1814 state = &get_cpu_var(bpf_user_rnd_state);
1815 res = prandom_u32_state(state);
Shaohua Lib761fe22016-09-27 08:42:41 -07001816 put_cpu_var(bpf_user_rnd_state);
Daniel Borkmann3ad00402015-10-08 01:20:39 +02001817
1818 return res;
1819}
1820
Daniel Borkmann3ba67da2015-03-05 23:27:51 +01001821/* Weak definitions of helper functions in case we don't have bpf syscall. */
1822const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1823const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1824const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1825
Daniel Borkmann03e69b52015-03-14 02:27:16 +01001826const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
Daniel Borkmannc04167c2015-03-14 02:27:17 +01001827const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
Daniel Borkmann2d0e30c2016-10-21 12:46:33 +02001828const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
Daniel Borkmann17ca8cb2015-05-29 23:23:06 +02001829const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
Daniel Borkmannbd570ff2016-04-18 21:01:24 +02001830
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -07001831const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1832const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1833const struct bpf_func_proto bpf_get_current_comm_proto __weak;
John Fastabend6bdc9c42017-08-16 15:02:32 -07001834const struct bpf_func_proto bpf_sock_map_update_proto __weak;
Daniel Borkmannbd570ff2016-04-18 21:01:24 +02001835
Alexei Starovoitov0756ea32015-06-12 19:39:13 -07001836const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1837{
1838 return NULL;
1839}
Daniel Borkmann03e69b52015-03-14 02:27:16 +01001840
Daniel Borkmann555c8a82016-07-14 18:08:05 +02001841u64 __weak
1842bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1843 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
Daniel Borkmannbd570ff2016-04-18 21:01:24 +02001844{
Daniel Borkmann555c8a82016-07-14 18:08:05 +02001845 return -ENOTSUPP;
Daniel Borkmannbd570ff2016-04-18 21:01:24 +02001846}
1847
Daniel Borkmann3324b582015-05-29 23:23:07 +02001848/* Always built-in helper functions. */
1849const struct bpf_func_proto bpf_tail_call_proto = {
1850 .func = NULL,
1851 .gpl_only = false,
1852 .ret_type = RET_VOID,
1853 .arg1_type = ARG_PTR_TO_CTX,
1854 .arg2_type = ARG_CONST_MAP_PTR,
1855 .arg3_type = ARG_ANYTHING,
1856};
1857
Daniel Borkmann93831912017-02-16 22:24:49 +01001858/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
1859 * It is encouraged to implement bpf_int_jit_compile() instead, so that
1860 * eBPF and implicitly also cBPF can get JITed!
1861 */
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001862struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
Daniel Borkmann3324b582015-05-29 23:23:07 +02001863{
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001864 return prog;
Daniel Borkmann3324b582015-05-29 23:23:07 +02001865}
1866
Daniel Borkmann93831912017-02-16 22:24:49 +01001867/* Stub for JITs that support eBPF. All cBPF code gets transformed into
1868 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
1869 */
1870void __weak bpf_jit_compile(struct bpf_prog *prog)
1871{
1872}
1873
Martin KaFai Lau17bedab2016-12-07 15:53:11 -08001874bool __weak bpf_helper_changes_pkt_data(void *func)
Alexei Starovoitov969bf052016-05-05 19:49:10 -07001875{
1876 return false;
1877}
1878
Alexei Starovoitovf89b7752014-10-23 18:41:08 -07001879/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1880 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1881 */
1882int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1883 int len)
1884{
1885 return -EFAULT;
1886}
Daniel Borkmanna67edbf2017-01-25 02:28:18 +01001887
1888/* All definitions of tracepoints related to BPF. */
1889#define CREATE_TRACE_POINTS
1890#include <linux/bpf_trace.h>
1891
1892EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
1893
Steven Rostedt (VMware)9185a612017-10-12 18:40:02 -04001894/* These are only used within the BPF_SYSCALL code */
1895#ifdef CONFIG_BPF_SYSCALL
Daniel Borkmanna67edbf2017-01-25 02:28:18 +01001896EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type);
1897EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu);
Steven Rostedt (VMware)9185a612017-10-12 18:40:02 -04001898#endif