blob: 890b3ec375a394a862799cc5b07718d571d19a4d [file] [log] [blame]
Thomas Gleixner5b497af2019-05-29 07:18:09 -07001// SPDX-License-Identifier: GPL-2.0-only
Alexei Starovoitov51580e72014-09-26 00:17:02 -07002/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
Alexei Starovoitov969bf052016-05-05 19:49:10 -07003 * Copyright (c) 2016 Facebook
Joe Stringerfd978bf72018-10-02 13:35:35 -07004 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
Alexei Starovoitov51580e72014-09-26 00:17:02 -07005 */
Yonghong Song838e9692018-11-19 15:29:11 -08006#include <uapi/linux/btf.h>
Alexei Starovoitov51580e72014-09-26 00:17:02 -07007#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/bpf.h>
Yonghong Song838e9692018-11-19 15:29:11 -080011#include <linux/btf.h>
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010012#include <linux/bpf_verifier.h>
Alexei Starovoitov51580e72014-09-26 00:17:02 -070013#include <linux/filter.h>
14#include <net/netlink.h>
15#include <linux/file.h>
16#include <linux/vmalloc.h>
Thomas Grafebb676d2016-10-27 11:23:51 +020017#include <linux/stringify.h>
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -080018#include <linux/bsearch.h>
19#include <linux/sort.h>
Yonghong Songc195651e2018-04-28 22:28:08 -070020#include <linux/perf_event.h>
Martin KaFai Laud9762e82018-12-13 10:41:48 -080021#include <linux/ctype.h>
KP Singh6ba43b72020-03-04 20:18:50 +010022#include <linux/error-injection.h>
KP Singh9e4e01d2020-03-29 01:43:52 +010023#include <linux/bpf_lsm.h>
Alexei Starovoitov1e6c62a2020-08-27 15:01:11 -070024#include <linux/btf_ids.h>
Alexei Starovoitov51580e72014-09-26 00:17:02 -070025
Jakub Kicinskif4ac7e02017-10-09 10:30:12 -070026#include "disasm.h"
27
Jakub Kicinski00176a32017-10-16 16:40:54 -070028static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
Alexei Starovoitov91cc1a92019-11-14 10:57:15 -080029#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
Jakub Kicinski00176a32017-10-16 16:40:54 -070030 [_id] = & _name ## _verifier_ops,
31#define BPF_MAP_TYPE(_id, _ops)
Andrii Nakryikof2e10bf2020-04-28 17:16:08 -070032#define BPF_LINK_TYPE(_id, _name)
Jakub Kicinski00176a32017-10-16 16:40:54 -070033#include <linux/bpf_types.h>
34#undef BPF_PROG_TYPE
35#undef BPF_MAP_TYPE
Andrii Nakryikof2e10bf2020-04-28 17:16:08 -070036#undef BPF_LINK_TYPE
Jakub Kicinski00176a32017-10-16 16:40:54 -070037};
38
Alexei Starovoitov51580e72014-09-26 00:17:02 -070039/* bpf_check() is a static code analyzer that walks eBPF program
40 * instruction by instruction and updates register/stack state.
41 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
42 *
43 * The first pass is depth-first-search to check that the program is a DAG.
44 * It rejects the following programs:
45 * - larger than BPF_MAXINSNS insns
46 * - if loop is present (detected via back-edge)
47 * - unreachable insns exist (shouldn't be a forest. program = one function)
48 * - out of bounds or malformed jumps
49 * The second pass is all possible path descent from the 1st insn.
Zhen Lei8fb33b62021-05-25 10:56:59 +080050 * Since it's analyzing all paths through the program, the length of the
Gary Lineba38a92017-03-01 16:25:51 +080051 * analysis is limited to 64k insn, which may be hit even if total number of
Alexei Starovoitov51580e72014-09-26 00:17:02 -070052 * insn is less then 4K, but there are too many branches that change stack/regs.
53 * Number of 'branches to be analyzed' is limited to 1k
54 *
55 * On entry to each instruction, each register has a type, and the instruction
56 * changes the types of the registers depending on instruction semantics.
57 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
58 * copied to R1.
59 *
60 * All registers are 64-bit.
61 * R0 - return register
62 * R1-R5 argument passing registers
63 * R6-R9 callee saved registers
64 * R10 - frame pointer read-only
65 *
66 * At the start of BPF program the register R1 contains a pointer to bpf_context
67 * and has type PTR_TO_CTX.
68 *
69 * Verifier tracks arithmetic operations on pointers in case:
70 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
71 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
72 * 1st insn copies R10 (which has FRAME_PTR) type into R1
73 * and 2nd arithmetic instruction is pattern matched to recognize
74 * that it wants to construct a pointer to some element within stack.
75 * So after 2nd insn, the register R1 has type PTR_TO_STACK
76 * (and -20 constant is saved for further stack bounds checking).
77 * Meaning that this reg is a pointer to stack plus known immediate constant.
78 *
Edward Creef1174f72017-08-07 15:26:19 +010079 * Most of the time the registers have SCALAR_VALUE type, which
Alexei Starovoitov51580e72014-09-26 00:17:02 -070080 * means the register has some value, but it's not a valid pointer.
Edward Creef1174f72017-08-07 15:26:19 +010081 * (like pointer plus pointer becomes SCALAR_VALUE type)
Alexei Starovoitov51580e72014-09-26 00:17:02 -070082 *
83 * When verifier sees load or store instructions the type of base register
Joe Stringerc64b7982018-10-02 13:35:33 -070084 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
85 * four pointer types recognized by check_mem_access() function.
Alexei Starovoitov51580e72014-09-26 00:17:02 -070086 *
87 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
88 * and the range of [ptr, ptr + map's value_size) is accessible.
89 *
90 * registers used to pass values to function calls are checked against
91 * function argument constraints.
92 *
93 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
94 * It means that the register type passed to this function must be
95 * PTR_TO_STACK and it will be used inside the function as
96 * 'pointer to map element key'
97 *
98 * For example the argument constraints for bpf_map_lookup_elem():
99 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
100 * .arg1_type = ARG_CONST_MAP_PTR,
101 * .arg2_type = ARG_PTR_TO_MAP_KEY,
102 *
103 * ret_type says that this function returns 'pointer to map elem value or null'
104 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
105 * 2nd argument should be a pointer to stack, which will be used inside
106 * the helper function as a pointer to map element key.
107 *
108 * On the kernel side the helper function looks like:
109 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
110 * {
111 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
112 * void *key = (void *) (unsigned long) r2;
113 * void *value;
114 *
115 * here kernel can access 'key' and 'map' pointers safely, knowing that
116 * [key, key + map->key_size) bytes are valid and were initialized on
117 * the stack of eBPF program.
118 * }
119 *
120 * Corresponding eBPF program may look like:
121 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
122 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
123 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
124 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
125 * here verifier looks at prototype of map_lookup_elem() and sees:
126 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
127 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
128 *
129 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
130 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
131 * and were initialized prior to this call.
132 * If it's ok, then verifier allows this BPF_CALL insn and looks at
133 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
134 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
Zhen Lei8fb33b62021-05-25 10:56:59 +0800135 * returns either pointer to map value or NULL.
Alexei Starovoitov51580e72014-09-26 00:17:02 -0700136 *
137 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
138 * insn, the register holding that pointer in the true branch changes state to
139 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
140 * branch. See check_cond_jmp_op().
141 *
142 * After the call R0 is set to return type of the function and registers R1-R5
143 * are set to NOT_INIT to indicate that they are no longer readable.
Joe Stringerfd978bf72018-10-02 13:35:35 -0700144 *
145 * The following reference types represent a potential reference to a kernel
146 * resource which, after first being allocated, must be checked and freed by
147 * the BPF program:
148 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
149 *
150 * When the verifier sees a helper call return a reference type, it allocates a
151 * pointer id for the reference and stores it in the current function state.
152 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
153 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
154 * passes through a NULL-check conditional. For the branch wherein the state is
155 * changed to CONST_IMM, the verifier releases the reference.
Joe Stringer6acc9b42018-10-02 13:35:36 -0700156 *
157 * For each helper function that allocates a reference, such as
158 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
159 * bpf_sk_release(). When a reference type passes into the release function,
160 * the verifier also releases the reference. If any unchecked or unreleased
161 * reference remains at the end of the program, the verifier rejects it.
Alexei Starovoitov51580e72014-09-26 00:17:02 -0700162 */
163
Alexei Starovoitov17a52672014-09-26 00:17:06 -0700164/* verifier_state + insn_idx are pushed to stack when branch is encountered */
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +0100165struct bpf_verifier_stack_elem {
Alexei Starovoitov17a52672014-09-26 00:17:06 -0700166 /* verifer state is 'st'
167 * before processing instruction 'insn_idx'
168 * and after processing instruction 'prev_insn_idx'
169 */
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +0100170 struct bpf_verifier_state st;
Alexei Starovoitov17a52672014-09-26 00:17:06 -0700171 int insn_idx;
172 int prev_insn_idx;
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +0100173 struct bpf_verifier_stack_elem *next;
Andrii Nakryiko6f8a57c2020-04-23 12:58:50 -0700174 /* length of verifier log at the time this state was pushed on stack */
175 u32 log_pos;
Alexei Starovoitovcbd35702014-09-26 00:17:03 -0700176};
177
Alexei Starovoitovb285fcb2019-05-21 20:14:19 -0700178#define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
Alexei Starovoitovceefbc92018-12-03 22:46:06 -0800179#define BPF_COMPLEXITY_LIMIT_STATES 64
Daniel Borkmann07016152016-04-05 22:33:17 +0200180
Daniel Borkmannd2e4c1e2019-11-22 21:07:59 +0100181#define BPF_MAP_KEY_POISON (1ULL << 63)
182#define BPF_MAP_KEY_SEEN (1ULL << 62)
183
Daniel Borkmannc93552c2018-05-24 02:32:53 +0200184#define BPF_MAP_PTR_UNPRIV 1UL
185#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
186 POISON_POINTER_DELTA))
187#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
188
189static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
190{
Daniel Borkmannd2e4c1e2019-11-22 21:07:59 +0100191 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
Daniel Borkmannc93552c2018-05-24 02:32:53 +0200192}
193
194static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
195{
Daniel Borkmannd2e4c1e2019-11-22 21:07:59 +0100196 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
Daniel Borkmannc93552c2018-05-24 02:32:53 +0200197}
198
199static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
200 const struct bpf_map *map, bool unpriv)
201{
202 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
203 unpriv |= bpf_map_ptr_unpriv(aux);
Daniel Borkmannd2e4c1e2019-11-22 21:07:59 +0100204 aux->map_ptr_state = (unsigned long)map |
205 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
206}
207
208static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
209{
210 return aux->map_key_state & BPF_MAP_KEY_POISON;
211}
212
213static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
214{
215 return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
216}
217
218static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
219{
220 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
221}
222
223static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
224{
225 bool poisoned = bpf_map_key_poisoned(aux);
226
227 aux->map_key_state = state | BPF_MAP_KEY_SEEN |
228 (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
Daniel Borkmannc93552c2018-05-24 02:32:53 +0200229}
Martin KaFai Laufad73a12017-03-22 10:00:32 -0700230
Yonghong Song23a2d702021-02-04 15:48:27 -0800231static bool bpf_pseudo_call(const struct bpf_insn *insn)
232{
233 return insn->code == (BPF_JMP | BPF_CALL) &&
234 insn->src_reg == BPF_PSEUDO_CALL;
235}
236
Martin KaFai Laue6ac2452021-03-24 18:51:42 -0700237static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
238{
239 return insn->code == (BPF_JMP | BPF_CALL) &&
240 insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
241}
242
Daniel Borkmann33ff9822016-04-13 00:10:50 +0200243struct bpf_call_arg_meta {
244 struct bpf_map *map_ptr;
Daniel Borkmann435faee12016-04-13 00:10:51 +0200245 bool raw_mode;
Daniel Borkmann36bbef52016-09-20 00:26:13 +0200246 bool pkt_access;
Daniel Borkmann435faee12016-04-13 00:10:51 +0200247 int regno;
248 int access_size;
Andrii Nakryiko457f4432020-05-29 00:54:20 -0700249 int mem_size;
John Fastabend10060502020-03-30 14:36:19 -0700250 u64 msize_max_value;
Martin KaFai Lau1b986582019-03-12 10:23:02 -0700251 int ref_obj_id;
Alexei Starovoitov3e8ce292021-07-14 17:54:11 -0700252 int map_uid;
Alexei Starovoitovd83525c2019-01-31 15:40:04 -0800253 int func_id;
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -0800254 struct btf *btf;
Hao Luoeaa6bcb2020-09-29 16:50:47 -0700255 u32 btf_id;
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -0800256 struct btf *ret_btf;
Hao Luoeaa6bcb2020-09-29 16:50:47 -0700257 u32 ret_btf_id;
Yonghong Song69c087b2021-02-26 12:49:25 -0800258 u32 subprogno;
Daniel Borkmann33ff9822016-04-13 00:10:50 +0200259};
260
Alexei Starovoitov8580ac92019-10-15 20:24:57 -0700261struct btf *btf_vmlinux;
262
Alexei Starovoitovcbd35702014-09-26 00:17:03 -0700263static DEFINE_MUTEX(bpf_verifier_lock);
264
Martin KaFai Laud9762e82018-12-13 10:41:48 -0800265static const struct bpf_line_info *
266find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
267{
268 const struct bpf_line_info *linfo;
269 const struct bpf_prog *prog;
270 u32 i, nr_linfo;
271
272 prog = env->prog;
273 nr_linfo = prog->aux->nr_linfo;
274
275 if (!nr_linfo || insn_off >= prog->len)
276 return NULL;
277
278 linfo = prog->aux->linfo;
279 for (i = 1; i < nr_linfo; i++)
280 if (insn_off < linfo[i].insn_off)
281 break;
282
283 return &linfo[i - 1];
284}
285
Martin KaFai Lau77d2e052018-03-24 11:44:23 -0700286void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
287 va_list args)
Alexei Starovoitovcbd35702014-09-26 00:17:03 -0700288{
Jakub Kicinskia2a7d572017-10-09 10:30:15 -0700289 unsigned int n;
Alexei Starovoitovcbd35702014-09-26 00:17:03 -0700290
Jakub Kicinskia2a7d572017-10-09 10:30:15 -0700291 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
Jakub Kicinskia2a7d572017-10-09 10:30:15 -0700292
293 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
294 "verifier log line truncated - local buffer too short\n");
295
296 n = min(log->len_total - log->len_used - 1, n);
297 log->kbuf[n] = '\0';
298
Alexei Starovoitov8580ac92019-10-15 20:24:57 -0700299 if (log->level == BPF_LOG_KERNEL) {
300 pr_err("BPF:%s\n", log->kbuf);
301 return;
302 }
Jakub Kicinskia2a7d572017-10-09 10:30:15 -0700303 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
304 log->len_used += n;
305 else
306 log->ubuf = NULL;
Alexei Starovoitovcbd35702014-09-26 00:17:03 -0700307}
Jiri Olsaabe08842018-03-23 11:41:28 +0100308
Andrii Nakryiko6f8a57c2020-04-23 12:58:50 -0700309static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos)
310{
311 char zero = 0;
312
313 if (!bpf_verifier_log_needed(log))
314 return;
315
316 log->len_used = new_pos;
317 if (put_user(zero, log->ubuf + new_pos))
318 log->ubuf = NULL;
319}
320
Jiri Olsaabe08842018-03-23 11:41:28 +0100321/* log_level controls verbosity level of eBPF verifier.
322 * bpf_verifier_log_write() is used to dump the verification trace to the log,
323 * so the user can figure out what's wrong with the program
Quentin Monnet430e68d2018-01-10 12:26:06 +0000324 */
Jiri Olsaabe08842018-03-23 11:41:28 +0100325__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
326 const char *fmt, ...)
327{
328 va_list args;
329
Martin KaFai Lau77d2e052018-03-24 11:44:23 -0700330 if (!bpf_verifier_log_needed(&env->log))
331 return;
332
Jiri Olsaabe08842018-03-23 11:41:28 +0100333 va_start(args, fmt);
Martin KaFai Lau77d2e052018-03-24 11:44:23 -0700334 bpf_verifier_vlog(&env->log, fmt, args);
Jiri Olsaabe08842018-03-23 11:41:28 +0100335 va_end(args);
336}
337EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
338
339__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
340{
Martin KaFai Lau77d2e052018-03-24 11:44:23 -0700341 struct bpf_verifier_env *env = private_data;
Jiri Olsaabe08842018-03-23 11:41:28 +0100342 va_list args;
343
Martin KaFai Lau77d2e052018-03-24 11:44:23 -0700344 if (!bpf_verifier_log_needed(&env->log))
345 return;
346
Jiri Olsaabe08842018-03-23 11:41:28 +0100347 va_start(args, fmt);
Martin KaFai Lau77d2e052018-03-24 11:44:23 -0700348 bpf_verifier_vlog(&env->log, fmt, args);
Jiri Olsaabe08842018-03-23 11:41:28 +0100349 va_end(args);
350}
Alexei Starovoitovcbd35702014-09-26 00:17:03 -0700351
Alexei Starovoitov9e15db62019-10-15 20:25:00 -0700352__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
353 const char *fmt, ...)
354{
355 va_list args;
356
357 if (!bpf_verifier_log_needed(log))
358 return;
359
360 va_start(args, fmt);
361 bpf_verifier_vlog(log, fmt, args);
362 va_end(args);
363}
364
Martin KaFai Laud9762e82018-12-13 10:41:48 -0800365static const char *ltrim(const char *s)
366{
367 while (isspace(*s))
368 s++;
369
370 return s;
371}
372
373__printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
374 u32 insn_off,
375 const char *prefix_fmt, ...)
376{
377 const struct bpf_line_info *linfo;
378
379 if (!bpf_verifier_log_needed(&env->log))
380 return;
381
382 linfo = find_linfo(env, insn_off);
383 if (!linfo || linfo == env->prev_linfo)
384 return;
385
386 if (prefix_fmt) {
387 va_list args;
388
389 va_start(args, prefix_fmt);
390 bpf_verifier_vlog(&env->log, prefix_fmt, args);
391 va_end(args);
392 }
393
394 verbose(env, "%s\n",
395 ltrim(btf_name_by_offset(env->prog->aux->btf,
396 linfo->line_off)));
397
398 env->prev_linfo = linfo;
399}
400
Yonghong Songbc2591d2021-02-26 12:49:22 -0800401static void verbose_invalid_scalar(struct bpf_verifier_env *env,
402 struct bpf_reg_state *reg,
403 struct tnum *range, const char *ctx,
404 const char *reg_name)
405{
406 char tn_buf[48];
407
408 verbose(env, "At %s the register %s ", ctx, reg_name);
409 if (!tnum_is_unknown(reg->var_off)) {
410 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
411 verbose(env, "has value %s", tn_buf);
412 } else {
413 verbose(env, "has unknown scalar value");
414 }
415 tnum_strn(tn_buf, sizeof(tn_buf), *range);
416 verbose(env, " should have been in %s\n", tn_buf);
417}
418
Daniel Borkmannde8f3a82017-09-25 02:25:51 +0200419static bool type_is_pkt_pointer(enum bpf_reg_type type)
420{
421 return type == PTR_TO_PACKET ||
422 type == PTR_TO_PACKET_META;
423}
424
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -0800425static bool type_is_sk_pointer(enum bpf_reg_type type)
426{
427 return type == PTR_TO_SOCKET ||
Martin KaFai Lau655a51e2019-02-09 23:22:24 -0800428 type == PTR_TO_SOCK_COMMON ||
Jonathan Lemonfada7fd2019-06-06 13:59:40 -0700429 type == PTR_TO_TCP_SOCK ||
430 type == PTR_TO_XDP_SOCK;
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -0800431}
432
John Fastabendcac616d2020-05-21 13:07:26 -0700433static bool reg_type_not_null(enum bpf_reg_type type)
434{
435 return type == PTR_TO_SOCKET ||
436 type == PTR_TO_TCP_SOCK ||
437 type == PTR_TO_MAP_VALUE ||
Yonghong Song69c087b2021-02-26 12:49:25 -0800438 type == PTR_TO_MAP_KEY ||
Yonghong Song01c66c42020-06-30 10:12:40 -0700439 type == PTR_TO_SOCK_COMMON;
John Fastabendcac616d2020-05-21 13:07:26 -0700440}
441
Joe Stringer840b9612018-10-02 13:35:32 -0700442static bool reg_type_may_be_null(enum bpf_reg_type type)
443{
Joe Stringerfd978bf72018-10-02 13:35:35 -0700444 return type == PTR_TO_MAP_VALUE_OR_NULL ||
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -0800445 type == PTR_TO_SOCKET_OR_NULL ||
Martin KaFai Lau655a51e2019-02-09 23:22:24 -0800446 type == PTR_TO_SOCK_COMMON_OR_NULL ||
Yonghong Songb121b342020-05-09 10:59:12 -0700447 type == PTR_TO_TCP_SOCK_OR_NULL ||
Andrii Nakryiko457f4432020-05-29 00:54:20 -0700448 type == PTR_TO_BTF_ID_OR_NULL ||
Yonghong Songafbf21d2020-07-23 11:41:11 -0700449 type == PTR_TO_MEM_OR_NULL ||
450 type == PTR_TO_RDONLY_BUF_OR_NULL ||
451 type == PTR_TO_RDWR_BUF_OR_NULL;
Joe Stringerfd978bf72018-10-02 13:35:35 -0700452}
453
Alexei Starovoitovd83525c2019-01-31 15:40:04 -0800454static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
455{
456 return reg->type == PTR_TO_MAP_VALUE &&
457 map_value_has_spin_lock(reg->map_ptr);
458}
459
Martin KaFai Laucba368c2019-03-18 10:37:13 -0700460static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
461{
462 return type == PTR_TO_SOCKET ||
463 type == PTR_TO_SOCKET_OR_NULL ||
464 type == PTR_TO_TCP_SOCK ||
Andrii Nakryiko457f4432020-05-29 00:54:20 -0700465 type == PTR_TO_TCP_SOCK_OR_NULL ||
466 type == PTR_TO_MEM ||
467 type == PTR_TO_MEM_OR_NULL;
Martin KaFai Laucba368c2019-03-18 10:37:13 -0700468}
469
Martin KaFai Lau1b986582019-03-12 10:23:02 -0700470static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
Joe Stringerfd978bf72018-10-02 13:35:35 -0700471{
Martin KaFai Lau1b986582019-03-12 10:23:02 -0700472 return type == ARG_PTR_TO_SOCK_COMMON;
Joe Stringerfd978bf72018-10-02 13:35:35 -0700473}
474
Lorenz Bauerfd1b0d62020-09-21 13:12:26 +0100475static bool arg_type_may_be_null(enum bpf_arg_type type)
476{
477 return type == ARG_PTR_TO_MAP_VALUE_OR_NULL ||
478 type == ARG_PTR_TO_MEM_OR_NULL ||
479 type == ARG_PTR_TO_CTX_OR_NULL ||
480 type == ARG_PTR_TO_SOCKET_OR_NULL ||
Yonghong Song69c087b2021-02-26 12:49:25 -0800481 type == ARG_PTR_TO_ALLOC_MEM_OR_NULL ||
482 type == ARG_PTR_TO_STACK_OR_NULL;
Lorenz Bauerfd1b0d62020-09-21 13:12:26 +0100483}
484
Joe Stringerfd978bf72018-10-02 13:35:35 -0700485/* Determine whether the function releases some resources allocated by another
486 * function call. The first reference type argument will be assumed to be
487 * released by release_reference().
488 */
489static bool is_release_function(enum bpf_func_id func_id)
490{
Andrii Nakryiko457f4432020-05-29 00:54:20 -0700491 return func_id == BPF_FUNC_sk_release ||
492 func_id == BPF_FUNC_ringbuf_submit ||
493 func_id == BPF_FUNC_ringbuf_discard;
Joe Stringer840b9612018-10-02 13:35:32 -0700494}
495
Jakub Sitnicki64d85292020-04-29 20:11:52 +0200496static bool may_be_acquire_function(enum bpf_func_id func_id)
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -0800497{
498 return func_id == BPF_FUNC_sk_lookup_tcp ||
Lorenz Baueredbf8c02019-03-22 09:54:01 +0800499 func_id == BPF_FUNC_sk_lookup_udp ||
Jakub Sitnicki64d85292020-04-29 20:11:52 +0200500 func_id == BPF_FUNC_skc_lookup_tcp ||
Andrii Nakryiko457f4432020-05-29 00:54:20 -0700501 func_id == BPF_FUNC_map_lookup_elem ||
502 func_id == BPF_FUNC_ringbuf_reserve;
Jakub Sitnicki64d85292020-04-29 20:11:52 +0200503}
504
505static bool is_acquire_function(enum bpf_func_id func_id,
506 const struct bpf_map *map)
507{
508 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
509
510 if (func_id == BPF_FUNC_sk_lookup_tcp ||
511 func_id == BPF_FUNC_sk_lookup_udp ||
Andrii Nakryiko457f4432020-05-29 00:54:20 -0700512 func_id == BPF_FUNC_skc_lookup_tcp ||
513 func_id == BPF_FUNC_ringbuf_reserve)
Jakub Sitnicki64d85292020-04-29 20:11:52 +0200514 return true;
515
516 if (func_id == BPF_FUNC_map_lookup_elem &&
517 (map_type == BPF_MAP_TYPE_SOCKMAP ||
518 map_type == BPF_MAP_TYPE_SOCKHASH))
519 return true;
520
521 return false;
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -0800522}
523
Martin KaFai Lau1b986582019-03-12 10:23:02 -0700524static bool is_ptr_cast_function(enum bpf_func_id func_id)
525{
526 return func_id == BPF_FUNC_tcp_sock ||
Martin KaFai Lau1df8f552020-09-24 17:03:50 -0700527 func_id == BPF_FUNC_sk_fullsock ||
528 func_id == BPF_FUNC_skc_to_tcp_sock ||
529 func_id == BPF_FUNC_skc_to_tcp6_sock ||
530 func_id == BPF_FUNC_skc_to_udp6_sock ||
531 func_id == BPF_FUNC_skc_to_tcp_timewait_sock ||
532 func_id == BPF_FUNC_skc_to_tcp_request_sock;
Martin KaFai Lau1b986582019-03-12 10:23:02 -0700533}
534
Brendan Jackman39491862021-03-04 18:56:46 -0800535static bool is_cmpxchg_insn(const struct bpf_insn *insn)
536{
537 return BPF_CLASS(insn->code) == BPF_STX &&
538 BPF_MODE(insn->code) == BPF_ATOMIC &&
539 insn->imm == BPF_CMPXCHG;
540}
541
Alexei Starovoitov17a52672014-09-26 00:17:06 -0700542/* string representation of 'enum bpf_reg_type' */
543static const char * const reg_type_str[] = {
544 [NOT_INIT] = "?",
Edward Creef1174f72017-08-07 15:26:19 +0100545 [SCALAR_VALUE] = "inv",
Alexei Starovoitov17a52672014-09-26 00:17:06 -0700546 [PTR_TO_CTX] = "ctx",
547 [CONST_PTR_TO_MAP] = "map_ptr",
548 [PTR_TO_MAP_VALUE] = "map_value",
549 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
Alexei Starovoitov17a52672014-09-26 00:17:06 -0700550 [PTR_TO_STACK] = "fp",
Alexei Starovoitov969bf052016-05-05 19:49:10 -0700551 [PTR_TO_PACKET] = "pkt",
Daniel Borkmannde8f3a82017-09-25 02:25:51 +0200552 [PTR_TO_PACKET_META] = "pkt_meta",
Alexei Starovoitov969bf052016-05-05 19:49:10 -0700553 [PTR_TO_PACKET_END] = "pkt_end",
Petar Penkovd58e4682018-09-14 07:46:18 -0700554 [PTR_TO_FLOW_KEYS] = "flow_keys",
Joe Stringerc64b7982018-10-02 13:35:33 -0700555 [PTR_TO_SOCKET] = "sock",
556 [PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -0800557 [PTR_TO_SOCK_COMMON] = "sock_common",
558 [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null",
Martin KaFai Lau655a51e2019-02-09 23:22:24 -0800559 [PTR_TO_TCP_SOCK] = "tcp_sock",
560 [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
Matt Mullins9df1c282019-04-26 11:49:47 -0700561 [PTR_TO_TP_BUFFER] = "tp_buffer",
Jonathan Lemonfada7fd2019-06-06 13:59:40 -0700562 [PTR_TO_XDP_SOCK] = "xdp_sock",
Alexei Starovoitov9e15db62019-10-15 20:25:00 -0700563 [PTR_TO_BTF_ID] = "ptr_",
Yonghong Songb121b342020-05-09 10:59:12 -0700564 [PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_",
Hao Luoeaa6bcb2020-09-29 16:50:47 -0700565 [PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_",
Andrii Nakryiko457f4432020-05-29 00:54:20 -0700566 [PTR_TO_MEM] = "mem",
567 [PTR_TO_MEM_OR_NULL] = "mem_or_null",
Yonghong Songafbf21d2020-07-23 11:41:11 -0700568 [PTR_TO_RDONLY_BUF] = "rdonly_buf",
569 [PTR_TO_RDONLY_BUF_OR_NULL] = "rdonly_buf_or_null",
570 [PTR_TO_RDWR_BUF] = "rdwr_buf",
571 [PTR_TO_RDWR_BUF_OR_NULL] = "rdwr_buf_or_null",
Yonghong Song69c087b2021-02-26 12:49:25 -0800572 [PTR_TO_FUNC] = "func",
573 [PTR_TO_MAP_KEY] = "map_key",
Alexei Starovoitov17a52672014-09-26 00:17:06 -0700574};
575
Edward Cree8efea212018-08-22 20:02:44 +0100576static char slot_type_char[] = {
577 [STACK_INVALID] = '?',
578 [STACK_SPILL] = 'r',
579 [STACK_MISC] = 'm',
580 [STACK_ZERO] = '0',
581};
582
Alexei Starovoitov4e920242017-11-30 21:31:36 -0800583static void print_liveness(struct bpf_verifier_env *env,
584 enum bpf_reg_liveness live)
585{
Alexei Starovoitov9242b5f2018-12-13 11:42:34 -0800586 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
Alexei Starovoitov4e920242017-11-30 21:31:36 -0800587 verbose(env, "_");
588 if (live & REG_LIVE_READ)
589 verbose(env, "r");
590 if (live & REG_LIVE_WRITTEN)
591 verbose(env, "w");
Alexei Starovoitov9242b5f2018-12-13 11:42:34 -0800592 if (live & REG_LIVE_DONE)
593 verbose(env, "D");
Alexei Starovoitov4e920242017-11-30 21:31:36 -0800594}
595
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -0800596static struct bpf_func_state *func(struct bpf_verifier_env *env,
597 const struct bpf_reg_state *reg)
Alexei Starovoitov17a52672014-09-26 00:17:06 -0700598{
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -0800599 struct bpf_verifier_state *cur = env->cur_state;
600
601 return cur->frame[reg->frameno];
602}
603
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -0800604static const char *kernel_type_name(const struct btf* btf, u32 id)
Alexei Starovoitov9e15db62019-10-15 20:25:00 -0700605{
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -0800606 return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
Alexei Starovoitov9e15db62019-10-15 20:25:00 -0700607}
608
Martin KaFai Lau27113c52021-09-21 17:49:34 -0700609/* The reg state of a pointer or a bounded scalar was saved when
610 * it was spilled to the stack.
611 */
612static bool is_spilled_reg(const struct bpf_stack_state *stack)
613{
614 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
615}
616
Martin KaFai Lau354e8f12021-09-21 17:49:41 -0700617static void scrub_spilled_slot(u8 *stype)
618{
619 if (*stype != STACK_INVALID)
620 *stype = STACK_MISC;
621}
622
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -0800623static void print_verifier_state(struct bpf_verifier_env *env,
624 const struct bpf_func_state *state)
625{
626 const struct bpf_reg_state *reg;
Alexei Starovoitov17a52672014-09-26 00:17:06 -0700627 enum bpf_reg_type t;
628 int i;
629
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -0800630 if (state->frameno)
631 verbose(env, " frame%d:", state->frameno);
Alexei Starovoitov17a52672014-09-26 00:17:06 -0700632 for (i = 0; i < MAX_BPF_REG; i++) {
Alexei Starovoitov1a0dc1a2016-05-05 19:49:09 -0700633 reg = &state->regs[i];
634 t = reg->type;
Alexei Starovoitov17a52672014-09-26 00:17:06 -0700635 if (t == NOT_INIT)
636 continue;
Alexei Starovoitov4e920242017-11-30 21:31:36 -0800637 verbose(env, " R%d", i);
638 print_liveness(env, reg->live);
639 verbose(env, "=%s", reg_type_str[t]);
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -0700640 if (t == SCALAR_VALUE && reg->precise)
641 verbose(env, "P");
Edward Creef1174f72017-08-07 15:26:19 +0100642 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
643 tnum_is_const(reg->var_off)) {
644 /* reg->off should be 0 for SCALAR_VALUE */
Jakub Kicinski61bd5212017-10-09 10:30:11 -0700645 verbose(env, "%lld", reg->var_off.value + reg->off);
Edward Creef1174f72017-08-07 15:26:19 +0100646 } else {
Hao Luoeaa6bcb2020-09-29 16:50:47 -0700647 if (t == PTR_TO_BTF_ID ||
648 t == PTR_TO_BTF_ID_OR_NULL ||
649 t == PTR_TO_PERCPU_BTF_ID)
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -0800650 verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id));
Martin KaFai Laucba368c2019-03-18 10:37:13 -0700651 verbose(env, "(id=%d", reg->id);
652 if (reg_type_may_be_refcounted_or_null(t))
653 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
Edward Creef1174f72017-08-07 15:26:19 +0100654 if (t != SCALAR_VALUE)
Jakub Kicinski61bd5212017-10-09 10:30:11 -0700655 verbose(env, ",off=%d", reg->off);
Daniel Borkmannde8f3a82017-09-25 02:25:51 +0200656 if (type_is_pkt_pointer(t))
Jakub Kicinski61bd5212017-10-09 10:30:11 -0700657 verbose(env, ",r=%d", reg->range);
Edward Creef1174f72017-08-07 15:26:19 +0100658 else if (t == CONST_PTR_TO_MAP ||
Yonghong Song69c087b2021-02-26 12:49:25 -0800659 t == PTR_TO_MAP_KEY ||
Edward Creef1174f72017-08-07 15:26:19 +0100660 t == PTR_TO_MAP_VALUE ||
661 t == PTR_TO_MAP_VALUE_OR_NULL)
Jakub Kicinski61bd5212017-10-09 10:30:11 -0700662 verbose(env, ",ks=%d,vs=%d",
Edward Creef1174f72017-08-07 15:26:19 +0100663 reg->map_ptr->key_size,
664 reg->map_ptr->value_size);
Edward Cree7d1238f2017-08-07 15:26:56 +0100665 if (tnum_is_const(reg->var_off)) {
666 /* Typically an immediate SCALAR_VALUE, but
667 * could be a pointer whose offset is too big
668 * for reg->off
669 */
Jakub Kicinski61bd5212017-10-09 10:30:11 -0700670 verbose(env, ",imm=%llx", reg->var_off.value);
Edward Cree7d1238f2017-08-07 15:26:56 +0100671 } else {
672 if (reg->smin_value != reg->umin_value &&
673 reg->smin_value != S64_MIN)
Jakub Kicinski61bd5212017-10-09 10:30:11 -0700674 verbose(env, ",smin_value=%lld",
Edward Cree7d1238f2017-08-07 15:26:56 +0100675 (long long)reg->smin_value);
676 if (reg->smax_value != reg->umax_value &&
677 reg->smax_value != S64_MAX)
Jakub Kicinski61bd5212017-10-09 10:30:11 -0700678 verbose(env, ",smax_value=%lld",
Edward Cree7d1238f2017-08-07 15:26:56 +0100679 (long long)reg->smax_value);
680 if (reg->umin_value != 0)
Jakub Kicinski61bd5212017-10-09 10:30:11 -0700681 verbose(env, ",umin_value=%llu",
Edward Cree7d1238f2017-08-07 15:26:56 +0100682 (unsigned long long)reg->umin_value);
683 if (reg->umax_value != U64_MAX)
Jakub Kicinski61bd5212017-10-09 10:30:11 -0700684 verbose(env, ",umax_value=%llu",
Edward Cree7d1238f2017-08-07 15:26:56 +0100685 (unsigned long long)reg->umax_value);
686 if (!tnum_is_unknown(reg->var_off)) {
687 char tn_buf[48];
Edward Creef1174f72017-08-07 15:26:19 +0100688
Edward Cree7d1238f2017-08-07 15:26:56 +0100689 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
Jakub Kicinski61bd5212017-10-09 10:30:11 -0700690 verbose(env, ",var_off=%s", tn_buf);
Edward Cree7d1238f2017-08-07 15:26:56 +0100691 }
John Fastabend3f50f132020-03-30 14:36:39 -0700692 if (reg->s32_min_value != reg->smin_value &&
693 reg->s32_min_value != S32_MIN)
694 verbose(env, ",s32_min_value=%d",
695 (int)(reg->s32_min_value));
696 if (reg->s32_max_value != reg->smax_value &&
697 reg->s32_max_value != S32_MAX)
698 verbose(env, ",s32_max_value=%d",
699 (int)(reg->s32_max_value));
700 if (reg->u32_min_value != reg->umin_value &&
701 reg->u32_min_value != U32_MIN)
702 verbose(env, ",u32_min_value=%d",
703 (int)(reg->u32_min_value));
704 if (reg->u32_max_value != reg->umax_value &&
705 reg->u32_max_value != U32_MAX)
706 verbose(env, ",u32_max_value=%d",
707 (int)(reg->u32_max_value));
Edward Creef1174f72017-08-07 15:26:19 +0100708 }
Jakub Kicinski61bd5212017-10-09 10:30:11 -0700709 verbose(env, ")");
Edward Creef1174f72017-08-07 15:26:19 +0100710 }
Alexei Starovoitov17a52672014-09-26 00:17:06 -0700711 }
Alexei Starovoitov638f5b92017-10-31 18:16:05 -0700712 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
Edward Cree8efea212018-08-22 20:02:44 +0100713 char types_buf[BPF_REG_SIZE + 1];
714 bool valid = false;
715 int j;
716
717 for (j = 0; j < BPF_REG_SIZE; j++) {
718 if (state->stack[i].slot_type[j] != STACK_INVALID)
719 valid = true;
720 types_buf[j] = slot_type_char[
721 state->stack[i].slot_type[j]];
722 }
723 types_buf[BPF_REG_SIZE] = 0;
724 if (!valid)
725 continue;
726 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
727 print_liveness(env, state->stack[i].spilled_ptr.live);
Martin KaFai Lau27113c52021-09-21 17:49:34 -0700728 if (is_spilled_reg(&state->stack[i])) {
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -0700729 reg = &state->stack[i].spilled_ptr;
730 t = reg->type;
731 verbose(env, "=%s", reg_type_str[t]);
732 if (t == SCALAR_VALUE && reg->precise)
733 verbose(env, "P");
734 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
735 verbose(env, "%lld", reg->var_off.value + reg->off);
736 } else {
Edward Cree8efea212018-08-22 20:02:44 +0100737 verbose(env, "=%s", types_buf);
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -0700738 }
Alexei Starovoitov17a52672014-09-26 00:17:06 -0700739 }
Joe Stringerfd978bf72018-10-02 13:35:35 -0700740 if (state->acquired_refs && state->refs[0].id) {
741 verbose(env, " refs=%d", state->refs[0].id);
742 for (i = 1; i < state->acquired_refs; i++)
743 if (state->refs[i].id)
744 verbose(env, ",%d", state->refs[i].id);
745 }
Alexei Starovoitovbfc6bb72021-07-14 17:54:14 -0700746 if (state->in_callback_fn)
747 verbose(env, " cb");
748 if (state->in_async_callback_fn)
749 verbose(env, " async_cb");
Jakub Kicinski61bd5212017-10-09 10:30:11 -0700750 verbose(env, "\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -0700751}
752
Lorenz Bauerc69431a2021-04-29 14:46:54 +0100753/* copy array src of length n * size bytes to dst. dst is reallocated if it's too
754 * small to hold src. This is different from krealloc since we don't want to preserve
755 * the contents of dst.
756 *
757 * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could
758 * not be allocated.
Alexei Starovoitov638f5b92017-10-31 18:16:05 -0700759 */
Lorenz Bauerc69431a2021-04-29 14:46:54 +0100760static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
Alexei Starovoitov638f5b92017-10-31 18:16:05 -0700761{
Lorenz Bauerc69431a2021-04-29 14:46:54 +0100762 size_t bytes;
763
764 if (ZERO_OR_NULL_PTR(src))
765 goto out;
766
767 if (unlikely(check_mul_overflow(n, size, &bytes)))
768 return NULL;
769
770 if (ksize(dst) < bytes) {
771 kfree(dst);
772 dst = kmalloc_track_caller(bytes, flags);
773 if (!dst)
774 return NULL;
775 }
776
777 memcpy(dst, src, bytes);
778out:
779 return dst ? dst : ZERO_SIZE_PTR;
780}
781
782/* resize an array from old_n items to new_n items. the array is reallocated if it's too
783 * small to hold new_n items. new items are zeroed out if the array grows.
784 *
785 * Contrary to krealloc_array, does not free arr if new_n is zero.
786 */
787static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
788{
789 if (!new_n || old_n == new_n)
790 goto out;
791
792 arr = krealloc_array(arr, new_n, size, GFP_KERNEL);
793 if (!arr)
794 return NULL;
795
796 if (new_n > old_n)
797 memset(arr + old_n * size, 0, (new_n - old_n) * size);
798
799out:
800 return arr ? arr : ZERO_SIZE_PTR;
801}
802
803static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
804{
805 dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
806 sizeof(struct bpf_reference_state), GFP_KERNEL);
807 if (!dst->refs)
808 return -ENOMEM;
809
810 dst->acquired_refs = src->acquired_refs;
811 return 0;
812}
813
814static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
815{
816 size_t n = src->allocated_stack / BPF_REG_SIZE;
817
818 dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state),
819 GFP_KERNEL);
820 if (!dst->stack)
821 return -ENOMEM;
822
823 dst->allocated_stack = src->allocated_stack;
824 return 0;
825}
826
827static int resize_reference_state(struct bpf_func_state *state, size_t n)
828{
829 state->refs = realloc_array(state->refs, state->acquired_refs, n,
830 sizeof(struct bpf_reference_state));
831 if (!state->refs)
832 return -ENOMEM;
833
834 state->acquired_refs = n;
835 return 0;
836}
837
838static int grow_stack_state(struct bpf_func_state *state, int size)
839{
840 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE;
841
842 if (old_n >= n)
843 return 0;
844
845 state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state));
846 if (!state->stack)
847 return -ENOMEM;
848
849 state->allocated_stack = size;
850 return 0;
Joe Stringerfd978bf72018-10-02 13:35:35 -0700851}
Alexei Starovoitov638f5b92017-10-31 18:16:05 -0700852
Joe Stringerfd978bf72018-10-02 13:35:35 -0700853/* Acquire a pointer id from the env and update the state->refs to include
854 * this new pointer reference.
855 * On success, returns a valid pointer id to associate with the register
856 * On failure, returns a negative errno.
857 */
858static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
859{
860 struct bpf_func_state *state = cur_func(env);
861 int new_ofs = state->acquired_refs;
862 int id, err;
863
Lorenz Bauerc69431a2021-04-29 14:46:54 +0100864 err = resize_reference_state(state, state->acquired_refs + 1);
Joe Stringerfd978bf72018-10-02 13:35:35 -0700865 if (err)
866 return err;
867 id = ++env->id_gen;
868 state->refs[new_ofs].id = id;
869 state->refs[new_ofs].insn_idx = insn_idx;
870
871 return id;
872}
873
874/* release function corresponding to acquire_reference_state(). Idempotent. */
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -0800875static int release_reference_state(struct bpf_func_state *state, int ptr_id)
Joe Stringerfd978bf72018-10-02 13:35:35 -0700876{
877 int i, last_idx;
878
Joe Stringerfd978bf72018-10-02 13:35:35 -0700879 last_idx = state->acquired_refs - 1;
880 for (i = 0; i < state->acquired_refs; i++) {
881 if (state->refs[i].id == ptr_id) {
882 if (last_idx && i != last_idx)
883 memcpy(&state->refs[i], &state->refs[last_idx],
884 sizeof(*state->refs));
885 memset(&state->refs[last_idx], 0, sizeof(*state->refs));
886 state->acquired_refs--;
Alexei Starovoitov638f5b92017-10-31 18:16:05 -0700887 return 0;
Alexei Starovoitov638f5b92017-10-31 18:16:05 -0700888 }
Alexei Starovoitov638f5b92017-10-31 18:16:05 -0700889 }
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -0800890 return -EINVAL;
Joe Stringerfd978bf72018-10-02 13:35:35 -0700891}
892
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -0800893static void free_func_state(struct bpf_func_state *state)
894{
Alexei Starovoitov58963512018-01-08 07:51:17 -0800895 if (!state)
896 return;
Joe Stringerfd978bf72018-10-02 13:35:35 -0700897 kfree(state->refs);
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -0800898 kfree(state->stack);
899 kfree(state);
900}
901
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -0700902static void clear_jmp_history(struct bpf_verifier_state *state)
903{
904 kfree(state->jmp_history);
905 state->jmp_history = NULL;
906 state->jmp_history_cnt = 0;
907}
908
Alexei Starovoitov1969db42017-11-01 00:08:04 -0700909static void free_verifier_state(struct bpf_verifier_state *state,
910 bool free_self)
Alexei Starovoitov638f5b92017-10-31 18:16:05 -0700911{
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -0800912 int i;
913
914 for (i = 0; i <= state->curframe; i++) {
915 free_func_state(state->frame[i]);
916 state->frame[i] = NULL;
917 }
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -0700918 clear_jmp_history(state);
Alexei Starovoitov1969db42017-11-01 00:08:04 -0700919 if (free_self)
920 kfree(state);
Alexei Starovoitov638f5b92017-10-31 18:16:05 -0700921}
922
923/* copy verifier state from src to dst growing dst stack space
924 * when necessary to accommodate larger src stack
925 */
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -0800926static int copy_func_state(struct bpf_func_state *dst,
927 const struct bpf_func_state *src)
Alexei Starovoitov638f5b92017-10-31 18:16:05 -0700928{
929 int err;
930
Joe Stringerfd978bf72018-10-02 13:35:35 -0700931 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
932 err = copy_reference_state(dst, src);
933 if (err)
934 return err;
Alexei Starovoitov638f5b92017-10-31 18:16:05 -0700935 return copy_stack_state(dst, src);
936}
937
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -0800938static int copy_verifier_state(struct bpf_verifier_state *dst_state,
939 const struct bpf_verifier_state *src)
940{
941 struct bpf_func_state *dst;
942 int i, err;
943
Lorenz Bauer06ab6a52021-04-29 14:46:55 +0100944 dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
945 src->jmp_history_cnt, sizeof(struct bpf_idx_pair),
946 GFP_USER);
947 if (!dst_state->jmp_history)
948 return -ENOMEM;
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -0700949 dst_state->jmp_history_cnt = src->jmp_history_cnt;
950
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -0800951 /* if dst has more stack frames then src frame, free them */
952 for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
953 free_func_state(dst_state->frame[i]);
954 dst_state->frame[i] = NULL;
955 }
Daniel Borkmann979d63d2019-01-03 00:58:34 +0100956 dst_state->speculative = src->speculative;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -0800957 dst_state->curframe = src->curframe;
Alexei Starovoitovd83525c2019-01-31 15:40:04 -0800958 dst_state->active_spin_lock = src->active_spin_lock;
Alexei Starovoitov25897262019-06-15 12:12:20 -0700959 dst_state->branches = src->branches;
960 dst_state->parent = src->parent;
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -0700961 dst_state->first_insn_idx = src->first_insn_idx;
962 dst_state->last_insn_idx = src->last_insn_idx;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -0800963 for (i = 0; i <= src->curframe; i++) {
964 dst = dst_state->frame[i];
965 if (!dst) {
966 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
967 if (!dst)
968 return -ENOMEM;
969 dst_state->frame[i] = dst;
970 }
971 err = copy_func_state(dst, src->frame[i]);
972 if (err)
973 return err;
974 }
975 return 0;
976}
977
Alexei Starovoitov25897262019-06-15 12:12:20 -0700978static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
979{
980 while (st) {
981 u32 br = --st->branches;
982
983 /* WARN_ON(br > 1) technically makes sense here,
984 * but see comment in push_stack(), hence:
985 */
986 WARN_ONCE((int)br < 0,
987 "BUG update_branch_counts:branches_to_explore=%d\n",
988 br);
989 if (br)
990 break;
991 st = st->parent;
992 }
993}
994
Alexei Starovoitov638f5b92017-10-31 18:16:05 -0700995static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
Andrii Nakryiko6f8a57c2020-04-23 12:58:50 -0700996 int *insn_idx, bool pop_log)
Alexei Starovoitov638f5b92017-10-31 18:16:05 -0700997{
998 struct bpf_verifier_state *cur = env->cur_state;
999 struct bpf_verifier_stack_elem *elem, *head = env->head;
1000 int err;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07001001
1002 if (env->head == NULL)
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07001003 return -ENOENT;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07001004
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07001005 if (cur) {
1006 err = copy_verifier_state(cur, &head->st);
1007 if (err)
1008 return err;
1009 }
Andrii Nakryiko6f8a57c2020-04-23 12:58:50 -07001010 if (pop_log)
1011 bpf_vlog_reset(&env->log, head->log_pos);
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07001012 if (insn_idx)
1013 *insn_idx = head->insn_idx;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07001014 if (prev_insn_idx)
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07001015 *prev_insn_idx = head->prev_insn_idx;
1016 elem = head->next;
Alexei Starovoitov1969db42017-11-01 00:08:04 -07001017 free_verifier_state(&head->st, false);
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07001018 kfree(head);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07001019 env->head = elem;
1020 env->stack_size--;
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07001021 return 0;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07001022}
1023
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +01001024static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
Daniel Borkmann979d63d2019-01-03 00:58:34 +01001025 int insn_idx, int prev_insn_idx,
1026 bool speculative)
Alexei Starovoitov17a52672014-09-26 00:17:06 -07001027{
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07001028 struct bpf_verifier_state *cur = env->cur_state;
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +01001029 struct bpf_verifier_stack_elem *elem;
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07001030 int err;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07001031
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07001032 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07001033 if (!elem)
1034 goto err;
1035
Alexei Starovoitov17a52672014-09-26 00:17:06 -07001036 elem->insn_idx = insn_idx;
1037 elem->prev_insn_idx = prev_insn_idx;
1038 elem->next = env->head;
Andrii Nakryiko6f8a57c2020-04-23 12:58:50 -07001039 elem->log_pos = env->log.len_used;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07001040 env->head = elem;
1041 env->stack_size++;
Alexei Starovoitov1969db42017-11-01 00:08:04 -07001042 err = copy_verifier_state(&elem->st, cur);
1043 if (err)
1044 goto err;
Daniel Borkmann979d63d2019-01-03 00:58:34 +01001045 elem->st.speculative |= speculative;
Alexei Starovoitovb285fcb2019-05-21 20:14:19 -07001046 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1047 verbose(env, "The sequence of %d jumps is too complex.\n",
1048 env->stack_size);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07001049 goto err;
1050 }
Alexei Starovoitov25897262019-06-15 12:12:20 -07001051 if (elem->st.parent) {
1052 ++elem->st.parent->branches;
1053 /* WARN_ON(branches > 2) technically makes sense here,
1054 * but
1055 * 1. speculative states will bump 'branches' for non-branch
1056 * instructions
1057 * 2. is_state_visited() heuristics may decide not to create
1058 * a new state for a sequence of branches and all such current
1059 * and cloned states will be pointing to a single parent state
1060 * which might have large 'branches' count.
1061 */
1062 }
Alexei Starovoitov17a52672014-09-26 00:17:06 -07001063 return &elem->st;
1064err:
Alexei Starovoitov58963512018-01-08 07:51:17 -08001065 free_verifier_state(env->cur_state, true);
1066 env->cur_state = NULL;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07001067 /* pop all elements and return */
Andrii Nakryiko6f8a57c2020-04-23 12:58:50 -07001068 while (!pop_stack(env, NULL, NULL, false));
Alexei Starovoitov17a52672014-09-26 00:17:06 -07001069 return NULL;
1070}
1071
1072#define CALLER_SAVED_REGS 6
1073static const int caller_saved[CALLER_SAVED_REGS] = {
1074 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
1075};
1076
Daniel Borkmannf54c7892019-12-22 23:37:40 +01001077static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1078 struct bpf_reg_state *reg);
Edward Creef1174f72017-08-07 15:26:19 +01001079
Alexei Starovoitove688c3d2020-10-14 10:56:08 -07001080/* This helper doesn't clear reg->id */
1081static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
Edward Creeb03c9f92017-08-07 15:26:36 +01001082{
Edward Creeb03c9f92017-08-07 15:26:36 +01001083 reg->var_off = tnum_const(imm);
1084 reg->smin_value = (s64)imm;
1085 reg->smax_value = (s64)imm;
1086 reg->umin_value = imm;
1087 reg->umax_value = imm;
John Fastabend3f50f132020-03-30 14:36:39 -07001088
1089 reg->s32_min_value = (s32)imm;
1090 reg->s32_max_value = (s32)imm;
1091 reg->u32_min_value = (u32)imm;
1092 reg->u32_max_value = (u32)imm;
1093}
1094
Alexei Starovoitove688c3d2020-10-14 10:56:08 -07001095/* Mark the unknown part of a register (variable offset or scalar value) as
1096 * known to have the value @imm.
1097 */
1098static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1099{
1100 /* Clear id, off, and union(map_ptr, range) */
1101 memset(((u8 *)reg) + sizeof(reg->type), 0,
1102 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
1103 ___mark_reg_known(reg, imm);
1104}
1105
John Fastabend3f50f132020-03-30 14:36:39 -07001106static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
1107{
1108 reg->var_off = tnum_const_subreg(reg->var_off, imm);
1109 reg->s32_min_value = (s32)imm;
1110 reg->s32_max_value = (s32)imm;
1111 reg->u32_min_value = (u32)imm;
1112 reg->u32_max_value = (u32)imm;
Edward Creeb03c9f92017-08-07 15:26:36 +01001113}
1114
Edward Creef1174f72017-08-07 15:26:19 +01001115/* Mark the 'variable offset' part of a register as zero. This should be
1116 * used only on registers holding a pointer type.
1117 */
1118static void __mark_reg_known_zero(struct bpf_reg_state *reg)
1119{
Edward Creeb03c9f92017-08-07 15:26:36 +01001120 __mark_reg_known(reg, 0);
Edward Creef1174f72017-08-07 15:26:19 +01001121}
1122
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -08001123static void __mark_reg_const_zero(struct bpf_reg_state *reg)
1124{
1125 __mark_reg_known(reg, 0);
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -08001126 reg->type = SCALAR_VALUE;
1127}
1128
Jakub Kicinski61bd5212017-10-09 10:30:11 -07001129static void mark_reg_known_zero(struct bpf_verifier_env *env,
1130 struct bpf_reg_state *regs, u32 regno)
Edward Creef1174f72017-08-07 15:26:19 +01001131{
1132 if (WARN_ON(regno >= MAX_BPF_REG)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07001133 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
Edward Creef1174f72017-08-07 15:26:19 +01001134 /* Something bad happened, let's kill all regs */
1135 for (regno = 0; regno < MAX_BPF_REG; regno++)
Daniel Borkmannf54c7892019-12-22 23:37:40 +01001136 __mark_reg_not_init(env, regs + regno);
Edward Creef1174f72017-08-07 15:26:19 +01001137 return;
1138 }
1139 __mark_reg_known_zero(regs + regno);
1140}
1141
Dmitrii Banshchikov4ddb7412021-02-13 00:56:40 +04001142static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
1143{
1144 switch (reg->type) {
1145 case PTR_TO_MAP_VALUE_OR_NULL: {
1146 const struct bpf_map *map = reg->map_ptr;
1147
1148 if (map->inner_map_meta) {
1149 reg->type = CONST_PTR_TO_MAP;
1150 reg->map_ptr = map->inner_map_meta;
Alexei Starovoitov3e8ce292021-07-14 17:54:11 -07001151 /* transfer reg's id which is unique for every map_lookup_elem
1152 * as UID of the inner map.
1153 */
1154 reg->map_uid = reg->id;
Dmitrii Banshchikov4ddb7412021-02-13 00:56:40 +04001155 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
1156 reg->type = PTR_TO_XDP_SOCK;
1157 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
1158 map->map_type == BPF_MAP_TYPE_SOCKHASH) {
1159 reg->type = PTR_TO_SOCKET;
1160 } else {
1161 reg->type = PTR_TO_MAP_VALUE;
1162 }
1163 break;
1164 }
1165 case PTR_TO_SOCKET_OR_NULL:
1166 reg->type = PTR_TO_SOCKET;
1167 break;
1168 case PTR_TO_SOCK_COMMON_OR_NULL:
1169 reg->type = PTR_TO_SOCK_COMMON;
1170 break;
1171 case PTR_TO_TCP_SOCK_OR_NULL:
1172 reg->type = PTR_TO_TCP_SOCK;
1173 break;
1174 case PTR_TO_BTF_ID_OR_NULL:
1175 reg->type = PTR_TO_BTF_ID;
1176 break;
1177 case PTR_TO_MEM_OR_NULL:
1178 reg->type = PTR_TO_MEM;
1179 break;
1180 case PTR_TO_RDONLY_BUF_OR_NULL:
1181 reg->type = PTR_TO_RDONLY_BUF;
1182 break;
1183 case PTR_TO_RDWR_BUF_OR_NULL:
1184 reg->type = PTR_TO_RDWR_BUF;
1185 break;
1186 default:
Dan Carpenter33ccec52021-02-17 10:45:25 +03001187 WARN_ONCE(1, "unknown nullable register type");
Dmitrii Banshchikov4ddb7412021-02-13 00:56:40 +04001188 }
1189}
1190
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02001191static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
1192{
1193 return type_is_pkt_pointer(reg->type);
1194}
1195
1196static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
1197{
1198 return reg_is_pkt_pointer(reg) ||
1199 reg->type == PTR_TO_PACKET_END;
1200}
1201
1202/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
1203static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
1204 enum bpf_reg_type which)
1205{
1206 /* The register can already have a range from prior markings.
1207 * This is fine as long as it hasn't been advanced from its
1208 * origin.
1209 */
1210 return reg->type == which &&
1211 reg->id == 0 &&
1212 reg->off == 0 &&
1213 tnum_equals_const(reg->var_off, 0);
1214}
1215
John Fastabend3f50f132020-03-30 14:36:39 -07001216/* Reset the min/max bounds of a register */
1217static void __mark_reg_unbounded(struct bpf_reg_state *reg)
1218{
1219 reg->smin_value = S64_MIN;
1220 reg->smax_value = S64_MAX;
1221 reg->umin_value = 0;
1222 reg->umax_value = U64_MAX;
1223
1224 reg->s32_min_value = S32_MIN;
1225 reg->s32_max_value = S32_MAX;
1226 reg->u32_min_value = 0;
1227 reg->u32_max_value = U32_MAX;
1228}
1229
1230static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
1231{
1232 reg->smin_value = S64_MIN;
1233 reg->smax_value = S64_MAX;
1234 reg->umin_value = 0;
1235 reg->umax_value = U64_MAX;
1236}
1237
1238static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
1239{
1240 reg->s32_min_value = S32_MIN;
1241 reg->s32_max_value = S32_MAX;
1242 reg->u32_min_value = 0;
1243 reg->u32_max_value = U32_MAX;
1244}
1245
1246static void __update_reg32_bounds(struct bpf_reg_state *reg)
1247{
1248 struct tnum var32_off = tnum_subreg(reg->var_off);
1249
1250 /* min signed is max(sign bit) | min(other bits) */
1251 reg->s32_min_value = max_t(s32, reg->s32_min_value,
1252 var32_off.value | (var32_off.mask & S32_MIN));
1253 /* max signed is min(sign bit) | max(other bits) */
1254 reg->s32_max_value = min_t(s32, reg->s32_max_value,
1255 var32_off.value | (var32_off.mask & S32_MAX));
1256 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
1257 reg->u32_max_value = min(reg->u32_max_value,
1258 (u32)(var32_off.value | var32_off.mask));
1259}
1260
1261static void __update_reg64_bounds(struct bpf_reg_state *reg)
Edward Creeb03c9f92017-08-07 15:26:36 +01001262{
1263 /* min signed is max(sign bit) | min(other bits) */
1264 reg->smin_value = max_t(s64, reg->smin_value,
1265 reg->var_off.value | (reg->var_off.mask & S64_MIN));
1266 /* max signed is min(sign bit) | max(other bits) */
1267 reg->smax_value = min_t(s64, reg->smax_value,
1268 reg->var_off.value | (reg->var_off.mask & S64_MAX));
1269 reg->umin_value = max(reg->umin_value, reg->var_off.value);
1270 reg->umax_value = min(reg->umax_value,
1271 reg->var_off.value | reg->var_off.mask);
1272}
1273
John Fastabend3f50f132020-03-30 14:36:39 -07001274static void __update_reg_bounds(struct bpf_reg_state *reg)
1275{
1276 __update_reg32_bounds(reg);
1277 __update_reg64_bounds(reg);
1278}
1279
Edward Creeb03c9f92017-08-07 15:26:36 +01001280/* Uses signed min/max values to inform unsigned, and vice-versa */
John Fastabend3f50f132020-03-30 14:36:39 -07001281static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
1282{
1283 /* Learn sign from signed bounds.
1284 * If we cannot cross the sign boundary, then signed and unsigned bounds
1285 * are the same, so combine. This works even in the negative case, e.g.
1286 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1287 */
1288 if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) {
1289 reg->s32_min_value = reg->u32_min_value =
1290 max_t(u32, reg->s32_min_value, reg->u32_min_value);
1291 reg->s32_max_value = reg->u32_max_value =
1292 min_t(u32, reg->s32_max_value, reg->u32_max_value);
1293 return;
1294 }
1295 /* Learn sign from unsigned bounds. Signed bounds cross the sign
1296 * boundary, so we must be careful.
1297 */
1298 if ((s32)reg->u32_max_value >= 0) {
1299 /* Positive. We can't learn anything from the smin, but smax
1300 * is positive, hence safe.
1301 */
1302 reg->s32_min_value = reg->u32_min_value;
1303 reg->s32_max_value = reg->u32_max_value =
1304 min_t(u32, reg->s32_max_value, reg->u32_max_value);
1305 } else if ((s32)reg->u32_min_value < 0) {
1306 /* Negative. We can't learn anything from the smax, but smin
1307 * is negative, hence safe.
1308 */
1309 reg->s32_min_value = reg->u32_min_value =
1310 max_t(u32, reg->s32_min_value, reg->u32_min_value);
1311 reg->s32_max_value = reg->u32_max_value;
1312 }
1313}
1314
1315static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
Edward Creeb03c9f92017-08-07 15:26:36 +01001316{
1317 /* Learn sign from signed bounds.
1318 * If we cannot cross the sign boundary, then signed and unsigned bounds
1319 * are the same, so combine. This works even in the negative case, e.g.
1320 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1321 */
1322 if (reg->smin_value >= 0 || reg->smax_value < 0) {
1323 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1324 reg->umin_value);
1325 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1326 reg->umax_value);
1327 return;
1328 }
1329 /* Learn sign from unsigned bounds. Signed bounds cross the sign
1330 * boundary, so we must be careful.
1331 */
1332 if ((s64)reg->umax_value >= 0) {
1333 /* Positive. We can't learn anything from the smin, but smax
1334 * is positive, hence safe.
1335 */
1336 reg->smin_value = reg->umin_value;
1337 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1338 reg->umax_value);
1339 } else if ((s64)reg->umin_value < 0) {
1340 /* Negative. We can't learn anything from the smax, but smin
1341 * is negative, hence safe.
1342 */
1343 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1344 reg->umin_value);
1345 reg->smax_value = reg->umax_value;
1346 }
1347}
1348
John Fastabend3f50f132020-03-30 14:36:39 -07001349static void __reg_deduce_bounds(struct bpf_reg_state *reg)
1350{
1351 __reg32_deduce_bounds(reg);
1352 __reg64_deduce_bounds(reg);
1353}
1354
Edward Creeb03c9f92017-08-07 15:26:36 +01001355/* Attempts to improve var_off based on unsigned min/max information */
1356static void __reg_bound_offset(struct bpf_reg_state *reg)
1357{
John Fastabend3f50f132020-03-30 14:36:39 -07001358 struct tnum var64_off = tnum_intersect(reg->var_off,
1359 tnum_range(reg->umin_value,
1360 reg->umax_value));
1361 struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
1362 tnum_range(reg->u32_min_value,
1363 reg->u32_max_value));
1364
1365 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
Edward Creeb03c9f92017-08-07 15:26:36 +01001366}
1367
John Fastabend3f50f132020-03-30 14:36:39 -07001368static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
Edward Creeb03c9f92017-08-07 15:26:36 +01001369{
John Fastabend3f50f132020-03-30 14:36:39 -07001370 reg->umin_value = reg->u32_min_value;
1371 reg->umax_value = reg->u32_max_value;
1372 /* Attempt to pull 32-bit signed bounds into 64-bit bounds
1373 * but must be positive otherwise set to worse case bounds
1374 * and refine later from tnum.
1375 */
John Fastabend3a71dc32020-05-29 10:28:40 -07001376 if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0)
John Fastabend3f50f132020-03-30 14:36:39 -07001377 reg->smax_value = reg->s32_max_value;
1378 else
1379 reg->smax_value = U32_MAX;
John Fastabend3a71dc32020-05-29 10:28:40 -07001380 if (reg->s32_min_value >= 0)
1381 reg->smin_value = reg->s32_min_value;
1382 else
1383 reg->smin_value = 0;
John Fastabend3f50f132020-03-30 14:36:39 -07001384}
1385
1386static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
1387{
1388 /* special case when 64-bit register has upper 32-bit register
1389 * zeroed. Typically happens after zext or <<32, >>32 sequence
1390 * allowing us to use 32-bit bounds directly,
1391 */
1392 if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
1393 __reg_assign_32_into_64(reg);
1394 } else {
1395 /* Otherwise the best we can do is push lower 32bit known and
1396 * unknown bits into register (var_off set from jmp logic)
1397 * then learn as much as possible from the 64-bit tnum
1398 * known and unknown bits. The previous smin/smax bounds are
1399 * invalid here because of jmp32 compare so mark them unknown
1400 * so they do not impact tnum bounds calculation.
1401 */
1402 __mark_reg64_unbounded(reg);
1403 __update_reg_bounds(reg);
1404 }
1405
1406 /* Intersecting with the old var_off might have improved our bounds
1407 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
1408 * then new var_off is (0; 0x7f...fc) which improves our umax.
1409 */
1410 __reg_deduce_bounds(reg);
1411 __reg_bound_offset(reg);
1412 __update_reg_bounds(reg);
1413}
1414
1415static bool __reg64_bound_s32(s64 a)
1416{
Alexei Starovoitov388e2c02021-11-01 15:21:52 -07001417 return a >= S32_MIN && a <= S32_MAX;
John Fastabend3f50f132020-03-30 14:36:39 -07001418}
1419
1420static bool __reg64_bound_u32(u64 a)
1421{
Alexei Starovoitovb9979db2021-11-01 15:21:51 -07001422 return a >= U32_MIN && a <= U32_MAX;
John Fastabend3f50f132020-03-30 14:36:39 -07001423}
1424
1425static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
1426{
1427 __mark_reg32_unbounded(reg);
1428
Alexei Starovoitovb02709582020-12-08 19:01:51 +01001429 if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
John Fastabend3f50f132020-03-30 14:36:39 -07001430 reg->s32_min_value = (s32)reg->smin_value;
John Fastabend3f50f132020-03-30 14:36:39 -07001431 reg->s32_max_value = (s32)reg->smax_value;
Alexei Starovoitovb02709582020-12-08 19:01:51 +01001432 }
Daniel Borkmann10bf4e82021-04-23 13:59:55 +00001433 if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
John Fastabend3f50f132020-03-30 14:36:39 -07001434 reg->u32_min_value = (u32)reg->umin_value;
John Fastabend3f50f132020-03-30 14:36:39 -07001435 reg->u32_max_value = (u32)reg->umax_value;
Daniel Borkmann10bf4e82021-04-23 13:59:55 +00001436 }
John Fastabend3f50f132020-03-30 14:36:39 -07001437
1438 /* Intersecting with the old var_off might have improved our bounds
1439 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
1440 * then new var_off is (0; 0x7f...fc) which improves our umax.
1441 */
1442 __reg_deduce_bounds(reg);
1443 __reg_bound_offset(reg);
1444 __update_reg_bounds(reg);
Edward Creeb03c9f92017-08-07 15:26:36 +01001445}
1446
Edward Creef1174f72017-08-07 15:26:19 +01001447/* Mark a register as having a completely unknown (scalar) value. */
Daniel Borkmannf54c7892019-12-22 23:37:40 +01001448static void __mark_reg_unknown(const struct bpf_verifier_env *env,
1449 struct bpf_reg_state *reg)
Edward Creef1174f72017-08-07 15:26:19 +01001450{
Alexei Starovoitova9c676b2018-09-04 19:13:44 -07001451 /*
1452 * Clear type, id, off, and union(map_ptr, range) and
1453 * padding between 'type' and union
1454 */
1455 memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
Edward Creef1174f72017-08-07 15:26:19 +01001456 reg->type = SCALAR_VALUE;
Edward Creef1174f72017-08-07 15:26:19 +01001457 reg->var_off = tnum_unknown;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08001458 reg->frameno = 0;
Alexei Starovoitov2c78ee82020-05-13 16:03:54 -07001459 reg->precise = env->subprog_cnt > 1 || !env->bpf_capable;
Edward Creeb03c9f92017-08-07 15:26:36 +01001460 __mark_reg_unbounded(reg);
Edward Creef1174f72017-08-07 15:26:19 +01001461}
1462
Jakub Kicinski61bd5212017-10-09 10:30:11 -07001463static void mark_reg_unknown(struct bpf_verifier_env *env,
1464 struct bpf_reg_state *regs, u32 regno)
Edward Creef1174f72017-08-07 15:26:19 +01001465{
1466 if (WARN_ON(regno >= MAX_BPF_REG)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07001467 verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
Alexei Starovoitov19ceb412017-11-30 21:31:37 -08001468 /* Something bad happened, let's kill all regs except FP */
1469 for (regno = 0; regno < BPF_REG_FP; regno++)
Daniel Borkmannf54c7892019-12-22 23:37:40 +01001470 __mark_reg_not_init(env, regs + regno);
Edward Creef1174f72017-08-07 15:26:19 +01001471 return;
1472 }
Daniel Borkmannf54c7892019-12-22 23:37:40 +01001473 __mark_reg_unknown(env, regs + regno);
Edward Creef1174f72017-08-07 15:26:19 +01001474}
1475
Daniel Borkmannf54c7892019-12-22 23:37:40 +01001476static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1477 struct bpf_reg_state *reg)
Edward Creef1174f72017-08-07 15:26:19 +01001478{
Daniel Borkmannf54c7892019-12-22 23:37:40 +01001479 __mark_reg_unknown(env, reg);
Edward Creef1174f72017-08-07 15:26:19 +01001480 reg->type = NOT_INIT;
1481}
1482
Jakub Kicinski61bd5212017-10-09 10:30:11 -07001483static void mark_reg_not_init(struct bpf_verifier_env *env,
1484 struct bpf_reg_state *regs, u32 regno)
Daniel Borkmanna9789ef2017-05-25 01:05:06 +02001485{
Edward Creef1174f72017-08-07 15:26:19 +01001486 if (WARN_ON(regno >= MAX_BPF_REG)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07001487 verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
Alexei Starovoitov19ceb412017-11-30 21:31:37 -08001488 /* Something bad happened, let's kill all regs except FP */
1489 for (regno = 0; regno < BPF_REG_FP; regno++)
Daniel Borkmannf54c7892019-12-22 23:37:40 +01001490 __mark_reg_not_init(env, regs + regno);
Edward Creef1174f72017-08-07 15:26:19 +01001491 return;
1492 }
Daniel Borkmannf54c7892019-12-22 23:37:40 +01001493 __mark_reg_not_init(env, regs + regno);
Daniel Borkmanna9789ef2017-05-25 01:05:06 +02001494}
1495
Andrey Ignatov41c48f32020-06-19 14:11:43 -07001496static void mark_btf_ld_reg(struct bpf_verifier_env *env,
1497 struct bpf_reg_state *regs, u32 regno,
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08001498 enum bpf_reg_type reg_type,
1499 struct btf *btf, u32 btf_id)
Andrey Ignatov41c48f32020-06-19 14:11:43 -07001500{
1501 if (reg_type == SCALAR_VALUE) {
1502 mark_reg_unknown(env, regs, regno);
1503 return;
1504 }
1505 mark_reg_known_zero(env, regs, regno);
1506 regs[regno].type = PTR_TO_BTF_ID;
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08001507 regs[regno].btf = btf;
Andrey Ignatov41c48f32020-06-19 14:11:43 -07001508 regs[regno].btf_id = btf_id;
1509}
1510
Jiong Wang5327ed32019-05-24 23:25:12 +01001511#define DEF_NOT_SUBREG (0)
Jakub Kicinski61bd5212017-10-09 10:30:11 -07001512static void init_reg_state(struct bpf_verifier_env *env,
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08001513 struct bpf_func_state *state)
Alexei Starovoitov17a52672014-09-26 00:17:06 -07001514{
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08001515 struct bpf_reg_state *regs = state->regs;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07001516 int i;
1517
Edward Creedc503a82017-08-15 20:34:35 +01001518 for (i = 0; i < MAX_BPF_REG; i++) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07001519 mark_reg_not_init(env, regs, i);
Edward Creedc503a82017-08-15 20:34:35 +01001520 regs[i].live = REG_LIVE_NONE;
Edward Cree679c7822018-08-22 20:02:19 +01001521 regs[i].parent = NULL;
Jiong Wang5327ed32019-05-24 23:25:12 +01001522 regs[i].subreg_def = DEF_NOT_SUBREG;
Edward Creedc503a82017-08-15 20:34:35 +01001523 }
Alexei Starovoitov17a52672014-09-26 00:17:06 -07001524
1525 /* frame pointer */
Edward Creef1174f72017-08-07 15:26:19 +01001526 regs[BPF_REG_FP].type = PTR_TO_STACK;
Jakub Kicinski61bd5212017-10-09 10:30:11 -07001527 mark_reg_known_zero(env, regs, BPF_REG_FP);
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08001528 regs[BPF_REG_FP].frameno = state->frameno;
Daniel Borkmann6760bf22016-12-18 01:52:59 +01001529}
1530
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08001531#define BPF_MAIN_FUNC (-1)
1532static void init_func_state(struct bpf_verifier_env *env,
1533 struct bpf_func_state *state,
1534 int callsite, int frameno, int subprogno)
1535{
1536 state->callsite = callsite;
1537 state->frameno = frameno;
1538 state->subprogno = subprogno;
1539 init_reg_state(env, state);
1540}
1541
Alexei Starovoitovbfc6bb72021-07-14 17:54:14 -07001542/* Similar to push_stack(), but for async callbacks */
1543static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
1544 int insn_idx, int prev_insn_idx,
1545 int subprog)
1546{
1547 struct bpf_verifier_stack_elem *elem;
1548 struct bpf_func_state *frame;
1549
1550 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
1551 if (!elem)
1552 goto err;
1553
1554 elem->insn_idx = insn_idx;
1555 elem->prev_insn_idx = prev_insn_idx;
1556 elem->next = env->head;
1557 elem->log_pos = env->log.len_used;
1558 env->head = elem;
1559 env->stack_size++;
1560 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1561 verbose(env,
1562 "The sequence of %d jumps is too complex for async cb.\n",
1563 env->stack_size);
1564 goto err;
1565 }
1566 /* Unlike push_stack() do not copy_verifier_state().
1567 * The caller state doesn't matter.
1568 * This is async callback. It starts in a fresh stack.
1569 * Initialize it similar to do_check_common().
1570 */
1571 elem->st.branches = 1;
1572 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1573 if (!frame)
1574 goto err;
1575 init_func_state(env, frame,
1576 BPF_MAIN_FUNC /* callsite */,
1577 0 /* frameno within this callchain */,
1578 subprog /* subprog number within this prog */);
1579 elem->st.frame[0] = frame;
1580 return &elem->st;
1581err:
1582 free_verifier_state(env->cur_state, true);
1583 env->cur_state = NULL;
1584 /* pop all elements and return */
1585 while (!pop_stack(env, NULL, NULL, false));
1586 return NULL;
1587}
1588
1589
Alexei Starovoitov17a52672014-09-26 00:17:06 -07001590enum reg_arg_type {
1591 SRC_OP, /* register is used as source operand */
1592 DST_OP, /* register is used as destination operand */
1593 DST_OP_NO_MARK /* same as above, check only, don't mark */
1594};
1595
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08001596static int cmp_subprogs(const void *a, const void *b)
1597{
Jiong Wang9c8105b2018-05-02 16:17:18 -04001598 return ((struct bpf_subprog_info *)a)->start -
1599 ((struct bpf_subprog_info *)b)->start;
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08001600}
1601
1602static int find_subprog(struct bpf_verifier_env *env, int off)
1603{
Jiong Wang9c8105b2018-05-02 16:17:18 -04001604 struct bpf_subprog_info *p;
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08001605
Jiong Wang9c8105b2018-05-02 16:17:18 -04001606 p = bsearch(&off, env->subprog_info, env->subprog_cnt,
1607 sizeof(env->subprog_info[0]), cmp_subprogs);
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08001608 if (!p)
1609 return -ENOENT;
Jiong Wang9c8105b2018-05-02 16:17:18 -04001610 return p - env->subprog_info;
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08001611
1612}
1613
1614static int add_subprog(struct bpf_verifier_env *env, int off)
1615{
1616 int insn_cnt = env->prog->len;
1617 int ret;
1618
1619 if (off >= insn_cnt || off < 0) {
1620 verbose(env, "call to invalid destination\n");
1621 return -EINVAL;
1622 }
1623 ret = find_subprog(env, off);
1624 if (ret >= 0)
Yonghong Song282a0f42021-02-26 12:49:24 -08001625 return ret;
Jiong Wang4cb3d992018-05-02 16:17:19 -04001626 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08001627 verbose(env, "too many subprograms\n");
1628 return -E2BIG;
1629 }
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001630 /* determine subprog starts. The end is one before the next starts */
Jiong Wang9c8105b2018-05-02 16:17:18 -04001631 env->subprog_info[env->subprog_cnt++].start = off;
1632 sort(env->subprog_info, env->subprog_cnt,
1633 sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
Yonghong Song282a0f42021-02-26 12:49:24 -08001634 return env->subprog_cnt - 1;
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08001635}
1636
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301637#define MAX_KFUNC_DESCS 256
1638#define MAX_KFUNC_BTFS 256
1639
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001640struct bpf_kfunc_desc {
1641 struct btf_func_model func_model;
1642 u32 func_id;
1643 s32 imm;
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301644 u16 offset;
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001645};
1646
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301647struct bpf_kfunc_btf {
1648 struct btf *btf;
1649 struct module *module;
1650 u16 offset;
1651};
1652
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001653struct bpf_kfunc_desc_tab {
1654 struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
1655 u32 nr_descs;
1656};
1657
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301658struct bpf_kfunc_btf_tab {
1659 struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS];
1660 u32 nr_descs;
1661};
1662
1663static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08001664{
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001665 const struct bpf_kfunc_desc *d0 = a;
1666 const struct bpf_kfunc_desc *d1 = b;
1667
1668 /* func_id is not greater than BTF_MAX_TYPE */
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301669 return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
1670}
1671
1672static int kfunc_btf_cmp_by_off(const void *a, const void *b)
1673{
1674 const struct bpf_kfunc_btf *d0 = a;
1675 const struct bpf_kfunc_btf *d1 = b;
1676
1677 return d0->offset - d1->offset;
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001678}
1679
1680static const struct bpf_kfunc_desc *
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301681find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001682{
1683 struct bpf_kfunc_desc desc = {
1684 .func_id = func_id,
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301685 .offset = offset,
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001686 };
1687 struct bpf_kfunc_desc_tab *tab;
1688
1689 tab = prog->aux->kfunc_tab;
1690 return bsearch(&desc, tab->descs, tab->nr_descs,
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301691 sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off);
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001692}
1693
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301694static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
1695 s16 offset, struct module **btf_modp)
1696{
1697 struct bpf_kfunc_btf kf_btf = { .offset = offset };
1698 struct bpf_kfunc_btf_tab *tab;
1699 struct bpf_kfunc_btf *b;
1700 struct module *mod;
1701 struct btf *btf;
1702 int btf_fd;
1703
1704 tab = env->prog->aux->kfunc_btf_tab;
1705 b = bsearch(&kf_btf, tab->descs, tab->nr_descs,
1706 sizeof(tab->descs[0]), kfunc_btf_cmp_by_off);
1707 if (!b) {
1708 if (tab->nr_descs == MAX_KFUNC_BTFS) {
1709 verbose(env, "too many different module BTFs\n");
1710 return ERR_PTR(-E2BIG);
1711 }
1712
1713 if (bpfptr_is_null(env->fd_array)) {
1714 verbose(env, "kfunc offset > 0 without fd_array is invalid\n");
1715 return ERR_PTR(-EPROTO);
1716 }
1717
1718 if (copy_from_bpfptr_offset(&btf_fd, env->fd_array,
1719 offset * sizeof(btf_fd),
1720 sizeof(btf_fd)))
1721 return ERR_PTR(-EFAULT);
1722
1723 btf = btf_get_by_fd(btf_fd);
Kumar Kartikeya Dwivedi588cd7ef2021-10-09 09:39:00 +05301724 if (IS_ERR(btf)) {
1725 verbose(env, "invalid module BTF fd specified\n");
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301726 return btf;
Kumar Kartikeya Dwivedi588cd7ef2021-10-09 09:39:00 +05301727 }
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301728
1729 if (!btf_is_module(btf)) {
1730 verbose(env, "BTF fd for kfunc is not a module BTF\n");
1731 btf_put(btf);
1732 return ERR_PTR(-EINVAL);
1733 }
1734
1735 mod = btf_try_get_module(btf);
1736 if (!mod) {
1737 btf_put(btf);
1738 return ERR_PTR(-ENXIO);
1739 }
1740
1741 b = &tab->descs[tab->nr_descs++];
1742 b->btf = btf;
1743 b->module = mod;
1744 b->offset = offset;
1745
1746 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
1747 kfunc_btf_cmp_by_off, NULL);
1748 }
1749 if (btf_modp)
1750 *btf_modp = b->module;
1751 return b->btf;
1752}
1753
1754void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
1755{
1756 if (!tab)
1757 return;
1758
1759 while (tab->nr_descs--) {
1760 module_put(tab->descs[tab->nr_descs].module);
1761 btf_put(tab->descs[tab->nr_descs].btf);
1762 }
1763 kfree(tab);
1764}
1765
1766static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env,
1767 u32 func_id, s16 offset,
1768 struct module **btf_modp)
1769{
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301770 if (offset) {
1771 if (offset < 0) {
1772 /* In the future, this can be allowed to increase limit
1773 * of fd index into fd_array, interpreted as u16.
1774 */
1775 verbose(env, "negative offset disallowed for kernel module function call\n");
1776 return ERR_PTR(-EINVAL);
1777 }
1778
Kumar Kartikeya Dwivedi588cd7ef2021-10-09 09:39:00 +05301779 return __find_kfunc_desc_btf(env, offset, btf_modp);
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301780 }
1781 return btf_vmlinux ?: ERR_PTR(-ENOENT);
1782}
1783
1784static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001785{
1786 const struct btf_type *func, *func_proto;
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301787 struct bpf_kfunc_btf_tab *btf_tab;
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001788 struct bpf_kfunc_desc_tab *tab;
1789 struct bpf_prog_aux *prog_aux;
1790 struct bpf_kfunc_desc *desc;
1791 const char *func_name;
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301792 struct btf *desc_btf;
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001793 unsigned long addr;
1794 int err;
1795
1796 prog_aux = env->prog->aux;
1797 tab = prog_aux->kfunc_tab;
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301798 btf_tab = prog_aux->kfunc_btf_tab;
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001799 if (!tab) {
1800 if (!btf_vmlinux) {
1801 verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n");
1802 return -ENOTSUPP;
1803 }
1804
1805 if (!env->prog->jit_requested) {
1806 verbose(env, "JIT is required for calling kernel function\n");
1807 return -ENOTSUPP;
1808 }
1809
1810 if (!bpf_jit_supports_kfunc_call()) {
1811 verbose(env, "JIT does not support calling kernel function\n");
1812 return -ENOTSUPP;
1813 }
1814
1815 if (!env->prog->gpl_compatible) {
1816 verbose(env, "cannot call kernel function from non-GPL compatible program\n");
1817 return -EINVAL;
1818 }
1819
1820 tab = kzalloc(sizeof(*tab), GFP_KERNEL);
1821 if (!tab)
1822 return -ENOMEM;
1823 prog_aux->kfunc_tab = tab;
1824 }
1825
Kumar Kartikeya Dwivedia5d82722021-10-02 06:47:50 +05301826 /* func_id == 0 is always invalid, but instead of returning an error, be
1827 * conservative and wait until the code elimination pass before returning
1828 * error, so that invalid calls that get pruned out can be in BPF programs
1829 * loaded from userspace. It is also required that offset be untouched
1830 * for such calls.
1831 */
1832 if (!func_id && !offset)
1833 return 0;
1834
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301835 if (!btf_tab && offset) {
1836 btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL);
1837 if (!btf_tab)
1838 return -ENOMEM;
1839 prog_aux->kfunc_btf_tab = btf_tab;
1840 }
1841
1842 desc_btf = find_kfunc_desc_btf(env, func_id, offset, NULL);
1843 if (IS_ERR(desc_btf)) {
1844 verbose(env, "failed to find BTF for kernel function\n");
1845 return PTR_ERR(desc_btf);
1846 }
1847
1848 if (find_kfunc_desc(env->prog, func_id, offset))
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001849 return 0;
1850
1851 if (tab->nr_descs == MAX_KFUNC_DESCS) {
1852 verbose(env, "too many different kernel function calls\n");
1853 return -E2BIG;
1854 }
1855
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301856 func = btf_type_by_id(desc_btf, func_id);
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001857 if (!func || !btf_type_is_func(func)) {
1858 verbose(env, "kernel btf_id %u is not a function\n",
1859 func_id);
1860 return -EINVAL;
1861 }
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301862 func_proto = btf_type_by_id(desc_btf, func->type);
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001863 if (!func_proto || !btf_type_is_func_proto(func_proto)) {
1864 verbose(env, "kernel function btf_id %u does not have a valid func_proto\n",
1865 func_id);
1866 return -EINVAL;
1867 }
1868
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301869 func_name = btf_name_by_offset(desc_btf, func->name_off);
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001870 addr = kallsyms_lookup_name(func_name);
1871 if (!addr) {
1872 verbose(env, "cannot find address for kernel function %s\n",
1873 func_name);
1874 return -EINVAL;
1875 }
1876
1877 desc = &tab->descs[tab->nr_descs++];
1878 desc->func_id = func_id;
Kees Cook3d717fa2021-09-28 16:09:45 -07001879 desc->imm = BPF_CALL_IMM(addr);
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301880 desc->offset = offset;
1881 err = btf_distill_func_proto(&env->log, desc_btf,
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001882 func_proto, func_name,
1883 &desc->func_model);
1884 if (!err)
1885 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301886 kfunc_desc_cmp_by_id_off, NULL);
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001887 return err;
1888}
1889
1890static int kfunc_desc_cmp_by_imm(const void *a, const void *b)
1891{
1892 const struct bpf_kfunc_desc *d0 = a;
1893 const struct bpf_kfunc_desc *d1 = b;
1894
1895 if (d0->imm > d1->imm)
1896 return 1;
1897 else if (d0->imm < d1->imm)
1898 return -1;
1899 return 0;
1900}
1901
1902static void sort_kfunc_descs_by_imm(struct bpf_prog *prog)
1903{
1904 struct bpf_kfunc_desc_tab *tab;
1905
1906 tab = prog->aux->kfunc_tab;
1907 if (!tab)
1908 return;
1909
1910 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
1911 kfunc_desc_cmp_by_imm, NULL);
1912}
1913
1914bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
1915{
1916 return !!prog->aux->kfunc_tab;
1917}
1918
1919const struct btf_func_model *
1920bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
1921 const struct bpf_insn *insn)
1922{
1923 const struct bpf_kfunc_desc desc = {
1924 .imm = insn->imm,
1925 };
1926 const struct bpf_kfunc_desc *res;
1927 struct bpf_kfunc_desc_tab *tab;
1928
1929 tab = prog->aux->kfunc_tab;
1930 res = bsearch(&desc, tab->descs, tab->nr_descs,
1931 sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm);
1932
1933 return res ? &res->func_model : NULL;
1934}
1935
1936static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
1937{
Jiong Wang9c8105b2018-05-02 16:17:18 -04001938 struct bpf_subprog_info *subprog = env->subprog_info;
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08001939 struct bpf_insn *insn = env->prog->insnsi;
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001940 int i, ret, insn_cnt = env->prog->len;
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08001941
Jiong Wangf910cef2018-05-02 16:17:17 -04001942 /* Add entry function. */
1943 ret = add_subprog(env, 0);
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001944 if (ret)
Jiong Wangf910cef2018-05-02 16:17:17 -04001945 return ret;
1946
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001947 for (i = 0; i < insn_cnt; i++, insn++) {
1948 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) &&
1949 !bpf_pseudo_kfunc_call(insn))
Yonghong Song69c087b2021-02-26 12:49:25 -08001950 continue;
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001951
Alexei Starovoitov2c78ee82020-05-13 16:03:54 -07001952 if (!env->bpf_capable) {
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001953 verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n");
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08001954 return -EPERM;
1955 }
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001956
Martin KaFai Lau3990ed42021-11-05 18:40:14 -07001957 if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn))
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001958 ret = add_subprog(env, i + insn->imm + 1);
Martin KaFai Lau3990ed42021-11-05 18:40:14 -07001959 else
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05301960 ret = add_kfunc_call(env, insn->imm, insn->off);
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001961
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08001962 if (ret < 0)
1963 return ret;
1964 }
1965
Jiong Wang4cb3d992018-05-02 16:17:19 -04001966 /* Add a fake 'exit' subprog which could simplify subprog iteration
1967 * logic. 'subprog_cnt' should not be increased.
1968 */
1969 subprog[env->subprog_cnt].start = insn_cnt;
1970
Alexei Starovoitov06ee7112019-04-01 21:27:40 -07001971 if (env->log.level & BPF_LOG_LEVEL2)
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08001972 for (i = 0; i < env->subprog_cnt; i++)
Jiong Wang9c8105b2018-05-02 16:17:18 -04001973 verbose(env, "func#%d @%d\n", i, subprog[i].start);
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08001974
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07001975 return 0;
1976}
1977
1978static int check_subprogs(struct bpf_verifier_env *env)
1979{
1980 int i, subprog_start, subprog_end, off, cur_subprog = 0;
1981 struct bpf_subprog_info *subprog = env->subprog_info;
1982 struct bpf_insn *insn = env->prog->insnsi;
1983 int insn_cnt = env->prog->len;
1984
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08001985 /* now check that all jumps are within the same subprog */
Jiong Wang4cb3d992018-05-02 16:17:19 -04001986 subprog_start = subprog[cur_subprog].start;
1987 subprog_end = subprog[cur_subprog + 1].start;
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08001988 for (i = 0; i < insn_cnt; i++) {
1989 u8 code = insn[i].code;
1990
Maciej Fijalkowski7f6e4312020-09-16 23:10:07 +02001991 if (code == (BPF_JMP | BPF_CALL) &&
1992 insn[i].imm == BPF_FUNC_tail_call &&
1993 insn[i].src_reg != BPF_PSEUDO_CALL)
1994 subprog[cur_subprog].has_tail_call = true;
Alexei Starovoitov09b28d72020-09-17 19:09:18 -07001995 if (BPF_CLASS(code) == BPF_LD &&
1996 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
1997 subprog[cur_subprog].has_ld_abs = true;
Jiong Wang092ed092019-01-26 12:26:01 -05001998 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08001999 goto next;
2000 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
2001 goto next;
2002 off = i + insn[i].off + 1;
2003 if (off < subprog_start || off >= subprog_end) {
2004 verbose(env, "jump out of range from insn %d to %d\n", i, off);
2005 return -EINVAL;
2006 }
2007next:
2008 if (i == subprog_end - 1) {
2009 /* to avoid fall-through from one subprog into another
2010 * the last insn of the subprog should be either exit
2011 * or unconditional jump back
2012 */
2013 if (code != (BPF_JMP | BPF_EXIT) &&
2014 code != (BPF_JMP | BPF_JA)) {
2015 verbose(env, "last insn is not an exit or jmp\n");
2016 return -EINVAL;
2017 }
2018 subprog_start = subprog_end;
Jiong Wang4cb3d992018-05-02 16:17:19 -04002019 cur_subprog++;
2020 if (cur_subprog < env->subprog_cnt)
Jiong Wang9c8105b2018-05-02 16:17:18 -04002021 subprog_end = subprog[cur_subprog + 1].start;
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08002022 }
2023 }
2024 return 0;
2025}
2026
Edward Cree679c7822018-08-22 20:02:19 +01002027/* Parentage chain of this register (or stack slot) should take care of all
2028 * issues like callee-saved registers, stack slot allocation time, etc.
2029 */
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08002030static int mark_reg_read(struct bpf_verifier_env *env,
Edward Cree679c7822018-08-22 20:02:19 +01002031 const struct bpf_reg_state *state,
Jiong Wang5327ed32019-05-24 23:25:12 +01002032 struct bpf_reg_state *parent, u8 flag)
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08002033{
2034 bool writes = parent == state->parent; /* Observe write marks */
Alexei Starovoitov06ee7112019-04-01 21:27:40 -07002035 int cnt = 0;
Edward Creedc503a82017-08-15 20:34:35 +01002036
2037 while (parent) {
2038 /* if read wasn't screened by an earlier write ... */
Edward Cree679c7822018-08-22 20:02:19 +01002039 if (writes && state->live & REG_LIVE_WRITTEN)
Edward Creedc503a82017-08-15 20:34:35 +01002040 break;
Alexei Starovoitov9242b5f2018-12-13 11:42:34 -08002041 if (parent->live & REG_LIVE_DONE) {
2042 verbose(env, "verifier BUG type %s var_off %lld off %d\n",
2043 reg_type_str[parent->type],
2044 parent->var_off.value, parent->off);
2045 return -EFAULT;
2046 }
Jiong Wang5327ed32019-05-24 23:25:12 +01002047 /* The first condition is more likely to be true than the
2048 * second, checked it first.
2049 */
2050 if ((parent->live & REG_LIVE_READ) == flag ||
2051 parent->live & REG_LIVE_READ64)
Alexei Starovoitov25af32d2019-04-01 21:27:42 -07002052 /* The parentage chain never changes and
2053 * this parent was already marked as LIVE_READ.
2054 * There is no need to keep walking the chain again and
2055 * keep re-marking all parents as LIVE_READ.
2056 * This case happens when the same register is read
2057 * multiple times without writes into it in-between.
Jiong Wang5327ed32019-05-24 23:25:12 +01002058 * Also, if parent has the stronger REG_LIVE_READ64 set,
2059 * then no need to set the weak REG_LIVE_READ32.
Alexei Starovoitov25af32d2019-04-01 21:27:42 -07002060 */
2061 break;
Edward Creedc503a82017-08-15 20:34:35 +01002062 /* ... then we depend on parent's value */
Jiong Wang5327ed32019-05-24 23:25:12 +01002063 parent->live |= flag;
2064 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
2065 if (flag == REG_LIVE_READ64)
2066 parent->live &= ~REG_LIVE_READ32;
Edward Creedc503a82017-08-15 20:34:35 +01002067 state = parent;
2068 parent = state->parent;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08002069 writes = true;
Alexei Starovoitov06ee7112019-04-01 21:27:40 -07002070 cnt++;
Edward Creedc503a82017-08-15 20:34:35 +01002071 }
Alexei Starovoitov06ee7112019-04-01 21:27:40 -07002072
2073 if (env->longest_mark_read_walk < cnt)
2074 env->longest_mark_read_walk = cnt;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08002075 return 0;
Edward Creedc503a82017-08-15 20:34:35 +01002076}
2077
Jiong Wang5327ed32019-05-24 23:25:12 +01002078/* This function is supposed to be used by the following 32-bit optimization
2079 * code only. It returns TRUE if the source or destination register operates
2080 * on 64-bit, otherwise return FALSE.
2081 */
2082static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
2083 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
2084{
2085 u8 code, class, op;
2086
2087 code = insn->code;
2088 class = BPF_CLASS(code);
2089 op = BPF_OP(code);
2090 if (class == BPF_JMP) {
2091 /* BPF_EXIT for "main" will reach here. Return TRUE
2092 * conservatively.
2093 */
2094 if (op == BPF_EXIT)
2095 return true;
2096 if (op == BPF_CALL) {
2097 /* BPF to BPF call will reach here because of marking
2098 * caller saved clobber with DST_OP_NO_MARK for which we
2099 * don't care the register def because they are anyway
2100 * marked as NOT_INIT already.
2101 */
2102 if (insn->src_reg == BPF_PSEUDO_CALL)
2103 return false;
2104 /* Helper call will reach here because of arg type
2105 * check, conservatively return TRUE.
2106 */
2107 if (t == SRC_OP)
2108 return true;
2109
2110 return false;
2111 }
2112 }
2113
2114 if (class == BPF_ALU64 || class == BPF_JMP ||
2115 /* BPF_END always use BPF_ALU class. */
2116 (class == BPF_ALU && op == BPF_END && insn->imm == 64))
2117 return true;
2118
2119 if (class == BPF_ALU || class == BPF_JMP32)
2120 return false;
2121
2122 if (class == BPF_LDX) {
2123 if (t != SRC_OP)
2124 return BPF_SIZE(code) == BPF_DW;
2125 /* LDX source must be ptr. */
2126 return true;
2127 }
2128
2129 if (class == BPF_STX) {
Ilya Leoshkevich83a28812021-03-01 16:40:19 +01002130 /* BPF_STX (including atomic variants) has multiple source
2131 * operands, one of which is a ptr. Check whether the caller is
2132 * asking about it.
2133 */
2134 if (t == SRC_OP && reg->type != SCALAR_VALUE)
Jiong Wang5327ed32019-05-24 23:25:12 +01002135 return true;
2136 return BPF_SIZE(code) == BPF_DW;
2137 }
2138
2139 if (class == BPF_LD) {
2140 u8 mode = BPF_MODE(code);
2141
2142 /* LD_IMM64 */
2143 if (mode == BPF_IMM)
2144 return true;
2145
2146 /* Both LD_IND and LD_ABS return 32-bit data. */
2147 if (t != SRC_OP)
2148 return false;
2149
2150 /* Implicit ctx ptr. */
2151 if (regno == BPF_REG_6)
2152 return true;
2153
2154 /* Explicit source could be any width. */
2155 return true;
2156 }
2157
2158 if (class == BPF_ST)
2159 /* The only source register for BPF_ST is a ptr. */
2160 return true;
2161
2162 /* Conservatively return true at default. */
2163 return true;
2164}
2165
Ilya Leoshkevich83a28812021-03-01 16:40:19 +01002166/* Return the regno defined by the insn, or -1. */
2167static int insn_def_regno(const struct bpf_insn *insn)
Jiong Wangb325fbc2019-05-24 23:25:13 +01002168{
Ilya Leoshkevich83a28812021-03-01 16:40:19 +01002169 switch (BPF_CLASS(insn->code)) {
2170 case BPF_JMP:
2171 case BPF_JMP32:
2172 case BPF_ST:
2173 return -1;
2174 case BPF_STX:
2175 if (BPF_MODE(insn->code) == BPF_ATOMIC &&
2176 (insn->imm & BPF_FETCH)) {
2177 if (insn->imm == BPF_CMPXCHG)
2178 return BPF_REG_0;
2179 else
2180 return insn->src_reg;
2181 } else {
2182 return -1;
2183 }
2184 default:
2185 return insn->dst_reg;
2186 }
Jiong Wangb325fbc2019-05-24 23:25:13 +01002187}
2188
2189/* Return TRUE if INSN has defined any 32-bit value explicitly. */
2190static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
2191{
Ilya Leoshkevich83a28812021-03-01 16:40:19 +01002192 int dst_reg = insn_def_regno(insn);
2193
2194 if (dst_reg == -1)
Jiong Wangb325fbc2019-05-24 23:25:13 +01002195 return false;
2196
Ilya Leoshkevich83a28812021-03-01 16:40:19 +01002197 return !is_reg64(env, insn, dst_reg, NULL, DST_OP);
Jiong Wangb325fbc2019-05-24 23:25:13 +01002198}
2199
Jiong Wang5327ed32019-05-24 23:25:12 +01002200static void mark_insn_zext(struct bpf_verifier_env *env,
2201 struct bpf_reg_state *reg)
2202{
2203 s32 def_idx = reg->subreg_def;
2204
2205 if (def_idx == DEF_NOT_SUBREG)
2206 return;
2207
2208 env->insn_aux_data[def_idx - 1].zext_dst = true;
2209 /* The dst will be zero extended, so won't be sub-register anymore. */
2210 reg->subreg_def = DEF_NOT_SUBREG;
2211}
2212
Edward Creedc503a82017-08-15 20:34:35 +01002213static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
Alexei Starovoitov17a52672014-09-26 00:17:06 -07002214 enum reg_arg_type t)
2215{
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08002216 struct bpf_verifier_state *vstate = env->cur_state;
2217 struct bpf_func_state *state = vstate->frame[vstate->curframe];
Jiong Wang5327ed32019-05-24 23:25:12 +01002218 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
Jiong Wangc342dc12019-04-12 22:59:37 +01002219 struct bpf_reg_state *reg, *regs = state->regs;
Jiong Wang5327ed32019-05-24 23:25:12 +01002220 bool rw64;
Edward Creedc503a82017-08-15 20:34:35 +01002221
Alexei Starovoitov17a52672014-09-26 00:17:06 -07002222 if (regno >= MAX_BPF_REG) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07002223 verbose(env, "R%d is invalid\n", regno);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07002224 return -EINVAL;
2225 }
2226
Jiong Wangc342dc12019-04-12 22:59:37 +01002227 reg = &regs[regno];
Jiong Wang5327ed32019-05-24 23:25:12 +01002228 rw64 = is_reg64(env, insn, regno, reg, t);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07002229 if (t == SRC_OP) {
2230 /* check whether register used as source operand can be read */
Jiong Wangc342dc12019-04-12 22:59:37 +01002231 if (reg->type == NOT_INIT) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07002232 verbose(env, "R%d !read_ok\n", regno);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07002233 return -EACCES;
2234 }
Edward Cree679c7822018-08-22 20:02:19 +01002235 /* We don't need to worry about FP liveness because it's read-only */
Jiong Wangc342dc12019-04-12 22:59:37 +01002236 if (regno == BPF_REG_FP)
2237 return 0;
2238
Jiong Wang5327ed32019-05-24 23:25:12 +01002239 if (rw64)
2240 mark_insn_zext(env, reg);
2241
2242 return mark_reg_read(env, reg, reg->parent,
2243 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07002244 } else {
2245 /* check whether register used as dest operand can be written to */
2246 if (regno == BPF_REG_FP) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07002247 verbose(env, "frame pointer is read only\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -07002248 return -EACCES;
2249 }
Jiong Wangc342dc12019-04-12 22:59:37 +01002250 reg->live |= REG_LIVE_WRITTEN;
Jiong Wang5327ed32019-05-24 23:25:12 +01002251 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07002252 if (t == DST_OP)
Jakub Kicinski61bd5212017-10-09 10:30:11 -07002253 mark_reg_unknown(env, regs, regno);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07002254 }
2255 return 0;
2256}
2257
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002258/* for any branch, call, exit record the history of jmps in the given state */
2259static int push_jmp_history(struct bpf_verifier_env *env,
2260 struct bpf_verifier_state *cur)
2261{
2262 u32 cnt = cur->jmp_history_cnt;
2263 struct bpf_idx_pair *p;
2264
2265 cnt++;
2266 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
2267 if (!p)
2268 return -ENOMEM;
2269 p[cnt - 1].idx = env->insn_idx;
2270 p[cnt - 1].prev_idx = env->prev_insn_idx;
2271 cur->jmp_history = p;
2272 cur->jmp_history_cnt = cnt;
2273 return 0;
2274}
2275
2276/* Backtrack one insn at a time. If idx is not at the top of recorded
2277 * history then previous instruction came from straight line execution.
2278 */
2279static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
2280 u32 *history)
2281{
2282 u32 cnt = *history;
2283
2284 if (cnt && st->jmp_history[cnt - 1].idx == i) {
2285 i = st->jmp_history[cnt - 1].prev_idx;
2286 (*history)--;
2287 } else {
2288 i--;
2289 }
2290 return i;
2291}
2292
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07002293static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
2294{
2295 const struct btf_type *func;
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05302296 struct btf *desc_btf;
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07002297
2298 if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL)
2299 return NULL;
2300
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05302301 desc_btf = find_kfunc_desc_btf(data, insn->imm, insn->off, NULL);
2302 if (IS_ERR(desc_btf))
2303 return "<error>";
2304
2305 func = btf_type_by_id(desc_btf, insn->imm);
2306 return btf_name_by_offset(desc_btf, func->name_off);
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07002307}
2308
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002309/* For given verifier state backtrack_insn() is called from the last insn to
2310 * the first insn. Its purpose is to compute a bitmask of registers and
2311 * stack slots that needs precision in the parent verifier state.
2312 */
2313static int backtrack_insn(struct bpf_verifier_env *env, int idx,
2314 u32 *reg_mask, u64 *stack_mask)
2315{
2316 const struct bpf_insn_cbs cbs = {
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07002317 .cb_call = disasm_kfunc_name,
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002318 .cb_print = verbose,
2319 .private_data = env,
2320 };
2321 struct bpf_insn *insn = env->prog->insnsi + idx;
2322 u8 class = BPF_CLASS(insn->code);
2323 u8 opcode = BPF_OP(insn->code);
2324 u8 mode = BPF_MODE(insn->code);
2325 u32 dreg = 1u << insn->dst_reg;
2326 u32 sreg = 1u << insn->src_reg;
2327 u32 spi;
2328
2329 if (insn->code == 0)
2330 return 0;
2331 if (env->log.level & BPF_LOG_LEVEL) {
2332 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
2333 verbose(env, "%d: ", idx);
2334 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
2335 }
2336
2337 if (class == BPF_ALU || class == BPF_ALU64) {
2338 if (!(*reg_mask & dreg))
2339 return 0;
2340 if (opcode == BPF_MOV) {
2341 if (BPF_SRC(insn->code) == BPF_X) {
2342 /* dreg = sreg
2343 * dreg needs precision after this insn
2344 * sreg needs precision before this insn
2345 */
2346 *reg_mask &= ~dreg;
2347 *reg_mask |= sreg;
2348 } else {
2349 /* dreg = K
2350 * dreg needs precision after this insn.
2351 * Corresponding register is already marked
2352 * as precise=true in this verifier state.
2353 * No further markings in parent are necessary
2354 */
2355 *reg_mask &= ~dreg;
2356 }
2357 } else {
2358 if (BPF_SRC(insn->code) == BPF_X) {
2359 /* dreg += sreg
2360 * both dreg and sreg need precision
2361 * before this insn
2362 */
2363 *reg_mask |= sreg;
2364 } /* else dreg += K
2365 * dreg still needs precision before this insn
2366 */
2367 }
2368 } else if (class == BPF_LDX) {
2369 if (!(*reg_mask & dreg))
2370 return 0;
2371 *reg_mask &= ~dreg;
2372
2373 /* scalars can only be spilled into stack w/o losing precision.
2374 * Load from any other memory can be zero extended.
2375 * The desire to keep that precision is already indicated
2376 * by 'precise' mark in corresponding register of this state.
2377 * No further tracking necessary.
2378 */
2379 if (insn->src_reg != BPF_REG_FP)
2380 return 0;
2381 if (BPF_SIZE(insn->code) != BPF_DW)
2382 return 0;
2383
2384 /* dreg = *(u64 *)[fp - off] was a fill from the stack.
2385 * that [fp - off] slot contains scalar that needs to be
2386 * tracked with precision
2387 */
2388 spi = (-insn->off - 1) / BPF_REG_SIZE;
2389 if (spi >= 64) {
2390 verbose(env, "BUG spi %d\n", spi);
2391 WARN_ONCE(1, "verifier backtracking bug");
2392 return -EFAULT;
2393 }
2394 *stack_mask |= 1ull << spi;
Andrii Nakryikob3b50f02019-07-08 20:32:44 -07002395 } else if (class == BPF_STX || class == BPF_ST) {
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002396 if (*reg_mask & dreg)
Andrii Nakryikob3b50f02019-07-08 20:32:44 -07002397 /* stx & st shouldn't be using _scalar_ dst_reg
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002398 * to access memory. It means backtracking
2399 * encountered a case of pointer subtraction.
2400 */
2401 return -ENOTSUPP;
2402 /* scalars can only be spilled into stack */
2403 if (insn->dst_reg != BPF_REG_FP)
2404 return 0;
2405 if (BPF_SIZE(insn->code) != BPF_DW)
2406 return 0;
2407 spi = (-insn->off - 1) / BPF_REG_SIZE;
2408 if (spi >= 64) {
2409 verbose(env, "BUG spi %d\n", spi);
2410 WARN_ONCE(1, "verifier backtracking bug");
2411 return -EFAULT;
2412 }
2413 if (!(*stack_mask & (1ull << spi)))
2414 return 0;
2415 *stack_mask &= ~(1ull << spi);
Andrii Nakryikob3b50f02019-07-08 20:32:44 -07002416 if (class == BPF_STX)
2417 *reg_mask |= sreg;
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002418 } else if (class == BPF_JMP || class == BPF_JMP32) {
2419 if (opcode == BPF_CALL) {
2420 if (insn->src_reg == BPF_PSEUDO_CALL)
2421 return -ENOTSUPP;
2422 /* regular helper call sets R0 */
2423 *reg_mask &= ~1;
2424 if (*reg_mask & 0x3f) {
2425 /* if backtracing was looking for registers R1-R5
2426 * they should have been found already.
2427 */
2428 verbose(env, "BUG regs %x\n", *reg_mask);
2429 WARN_ONCE(1, "verifier backtracking bug");
2430 return -EFAULT;
2431 }
2432 } else if (opcode == BPF_EXIT) {
2433 return -ENOTSUPP;
2434 }
2435 } else if (class == BPF_LD) {
2436 if (!(*reg_mask & dreg))
2437 return 0;
2438 *reg_mask &= ~dreg;
2439 /* It's ld_imm64 or ld_abs or ld_ind.
2440 * For ld_imm64 no further tracking of precision
2441 * into parent is necessary
2442 */
2443 if (mode == BPF_IND || mode == BPF_ABS)
2444 /* to be analyzed */
2445 return -ENOTSUPP;
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002446 }
2447 return 0;
2448}
2449
2450/* the scalar precision tracking algorithm:
2451 * . at the start all registers have precise=false.
2452 * . scalar ranges are tracked as normal through alu and jmp insns.
2453 * . once precise value of the scalar register is used in:
2454 * . ptr + scalar alu
2455 * . if (scalar cond K|scalar)
2456 * . helper_call(.., scalar, ...) where ARG_CONST is expected
2457 * backtrack through the verifier states and mark all registers and
2458 * stack slots with spilled constants that these scalar regisers
2459 * should be precise.
2460 * . during state pruning two registers (or spilled stack slots)
2461 * are equivalent if both are not precise.
2462 *
2463 * Note the verifier cannot simply walk register parentage chain,
2464 * since many different registers and stack slots could have been
2465 * used to compute single precise scalar.
2466 *
2467 * The approach of starting with precise=true for all registers and then
2468 * backtrack to mark a register as not precise when the verifier detects
2469 * that program doesn't care about specific value (e.g., when helper
2470 * takes register as ARG_ANYTHING parameter) is not safe.
2471 *
2472 * It's ok to walk single parentage chain of the verifier states.
2473 * It's possible that this backtracking will go all the way till 1st insn.
2474 * All other branches will be explored for needing precision later.
2475 *
2476 * The backtracking needs to deal with cases like:
2477 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
2478 * r9 -= r8
2479 * r5 = r9
2480 * if r5 > 0x79f goto pc+7
2481 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
2482 * r5 += 1
2483 * ...
2484 * call bpf_perf_event_output#25
2485 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO
2486 *
2487 * and this case:
2488 * r6 = 1
2489 * call foo // uses callee's r6 inside to compute r0
2490 * r0 += r6
2491 * if r0 == 0 goto
2492 *
2493 * to track above reg_mask/stack_mask needs to be independent for each frame.
2494 *
2495 * Also if parent's curframe > frame where backtracking started,
2496 * the verifier need to mark registers in both frames, otherwise callees
2497 * may incorrectly prune callers. This is similar to
2498 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
2499 *
2500 * For now backtracking falls back into conservative marking.
2501 */
2502static void mark_all_scalars_precise(struct bpf_verifier_env *env,
2503 struct bpf_verifier_state *st)
2504{
2505 struct bpf_func_state *func;
2506 struct bpf_reg_state *reg;
2507 int i, j;
2508
2509 /* big hammer: mark all scalars precise in this path.
2510 * pop_stack may still get !precise scalars.
2511 */
2512 for (; st; st = st->parent)
2513 for (i = 0; i <= st->curframe; i++) {
2514 func = st->frame[i];
2515 for (j = 0; j < BPF_REG_FP; j++) {
2516 reg = &func->regs[j];
2517 if (reg->type != SCALAR_VALUE)
2518 continue;
2519 reg->precise = true;
2520 }
2521 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
Martin KaFai Lau27113c52021-09-21 17:49:34 -07002522 if (!is_spilled_reg(&func->stack[j]))
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002523 continue;
2524 reg = &func->stack[j].spilled_ptr;
2525 if (reg->type != SCALAR_VALUE)
2526 continue;
2527 reg->precise = true;
2528 }
2529 }
2530}
2531
Alexei Starovoitova3ce6852019-06-28 09:24:09 -07002532static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
2533 int spi)
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002534{
2535 struct bpf_verifier_state *st = env->cur_state;
2536 int first_idx = st->first_insn_idx;
2537 int last_idx = env->insn_idx;
2538 struct bpf_func_state *func;
2539 struct bpf_reg_state *reg;
Alexei Starovoitova3ce6852019-06-28 09:24:09 -07002540 u32 reg_mask = regno >= 0 ? 1u << regno : 0;
2541 u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002542 bool skip_first = true;
Alexei Starovoitova3ce6852019-06-28 09:24:09 -07002543 bool new_marks = false;
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002544 int i, err;
2545
Alexei Starovoitov2c78ee82020-05-13 16:03:54 -07002546 if (!env->bpf_capable)
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002547 return 0;
2548
2549 func = st->frame[st->curframe];
Alexei Starovoitova3ce6852019-06-28 09:24:09 -07002550 if (regno >= 0) {
2551 reg = &func->regs[regno];
2552 if (reg->type != SCALAR_VALUE) {
2553 WARN_ONCE(1, "backtracing misuse");
2554 return -EFAULT;
2555 }
2556 if (!reg->precise)
2557 new_marks = true;
2558 else
2559 reg_mask = 0;
2560 reg->precise = true;
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002561 }
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002562
Alexei Starovoitova3ce6852019-06-28 09:24:09 -07002563 while (spi >= 0) {
Martin KaFai Lau27113c52021-09-21 17:49:34 -07002564 if (!is_spilled_reg(&func->stack[spi])) {
Alexei Starovoitova3ce6852019-06-28 09:24:09 -07002565 stack_mask = 0;
2566 break;
2567 }
2568 reg = &func->stack[spi].spilled_ptr;
2569 if (reg->type != SCALAR_VALUE) {
2570 stack_mask = 0;
2571 break;
2572 }
2573 if (!reg->precise)
2574 new_marks = true;
2575 else
2576 stack_mask = 0;
2577 reg->precise = true;
2578 break;
2579 }
2580
2581 if (!new_marks)
2582 return 0;
2583 if (!reg_mask && !stack_mask)
2584 return 0;
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002585 for (;;) {
2586 DECLARE_BITMAP(mask, 64);
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002587 u32 history = st->jmp_history_cnt;
2588
2589 if (env->log.level & BPF_LOG_LEVEL)
2590 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
2591 for (i = last_idx;;) {
2592 if (skip_first) {
2593 err = 0;
2594 skip_first = false;
2595 } else {
2596 err = backtrack_insn(env, i, &reg_mask, &stack_mask);
2597 }
2598 if (err == -ENOTSUPP) {
2599 mark_all_scalars_precise(env, st);
2600 return 0;
2601 } else if (err) {
2602 return err;
2603 }
2604 if (!reg_mask && !stack_mask)
2605 /* Found assignment(s) into tracked register in this state.
2606 * Since this state is already marked, just return.
2607 * Nothing to be tracked further in the parent state.
2608 */
2609 return 0;
2610 if (i == first_idx)
2611 break;
2612 i = get_prev_insn_idx(st, i, &history);
2613 if (i >= env->prog->len) {
2614 /* This can happen if backtracking reached insn 0
2615 * and there are still reg_mask or stack_mask
2616 * to backtrack.
2617 * It means the backtracking missed the spot where
2618 * particular register was initialized with a constant.
2619 */
2620 verbose(env, "BUG backtracking idx %d\n", i);
2621 WARN_ONCE(1, "verifier backtracking bug");
2622 return -EFAULT;
2623 }
2624 }
2625 st = st->parent;
2626 if (!st)
2627 break;
2628
Alexei Starovoitova3ce6852019-06-28 09:24:09 -07002629 new_marks = false;
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002630 func = st->frame[st->curframe];
2631 bitmap_from_u64(mask, reg_mask);
2632 for_each_set_bit(i, mask, 32) {
2633 reg = &func->regs[i];
Alexei Starovoitova3ce6852019-06-28 09:24:09 -07002634 if (reg->type != SCALAR_VALUE) {
2635 reg_mask &= ~(1u << i);
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002636 continue;
Alexei Starovoitova3ce6852019-06-28 09:24:09 -07002637 }
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002638 if (!reg->precise)
2639 new_marks = true;
2640 reg->precise = true;
2641 }
2642
2643 bitmap_from_u64(mask, stack_mask);
2644 for_each_set_bit(i, mask, 64) {
2645 if (i >= func->allocated_stack / BPF_REG_SIZE) {
Alexei Starovoitov2339cd62019-09-03 15:16:17 -07002646 /* the sequence of instructions:
2647 * 2: (bf) r3 = r10
2648 * 3: (7b) *(u64 *)(r3 -8) = r0
2649 * 4: (79) r4 = *(u64 *)(r10 -8)
2650 * doesn't contain jmps. It's backtracked
2651 * as a single block.
2652 * During backtracking insn 3 is not recognized as
2653 * stack access, so at the end of backtracking
2654 * stack slot fp-8 is still marked in stack_mask.
2655 * However the parent state may not have accessed
2656 * fp-8 and it's "unallocated" stack space.
2657 * In such case fallback to conservative.
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002658 */
Alexei Starovoitov2339cd62019-09-03 15:16:17 -07002659 mark_all_scalars_precise(env, st);
2660 return 0;
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002661 }
2662
Martin KaFai Lau27113c52021-09-21 17:49:34 -07002663 if (!is_spilled_reg(&func->stack[i])) {
Alexei Starovoitova3ce6852019-06-28 09:24:09 -07002664 stack_mask &= ~(1ull << i);
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002665 continue;
Alexei Starovoitova3ce6852019-06-28 09:24:09 -07002666 }
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002667 reg = &func->stack[i].spilled_ptr;
Alexei Starovoitova3ce6852019-06-28 09:24:09 -07002668 if (reg->type != SCALAR_VALUE) {
2669 stack_mask &= ~(1ull << i);
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002670 continue;
Alexei Starovoitova3ce6852019-06-28 09:24:09 -07002671 }
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002672 if (!reg->precise)
2673 new_marks = true;
2674 reg->precise = true;
2675 }
2676 if (env->log.level & BPF_LOG_LEVEL) {
2677 print_verifier_state(env, func);
2678 verbose(env, "parent %s regs=%x stack=%llx marks\n",
2679 new_marks ? "didn't have" : "already had",
2680 reg_mask, stack_mask);
2681 }
2682
Alexei Starovoitova3ce6852019-06-28 09:24:09 -07002683 if (!reg_mask && !stack_mask)
2684 break;
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002685 if (!new_marks)
2686 break;
2687
2688 last_idx = st->last_insn_idx;
2689 first_idx = st->first_insn_idx;
2690 }
2691 return 0;
2692}
2693
Alexei Starovoitova3ce6852019-06-28 09:24:09 -07002694static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
2695{
2696 return __mark_chain_precision(env, regno, -1);
2697}
2698
2699static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
2700{
2701 return __mark_chain_precision(env, -1, spi);
2702}
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002703
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07002704static bool is_spillable_regtype(enum bpf_reg_type type)
2705{
2706 switch (type) {
2707 case PTR_TO_MAP_VALUE:
2708 case PTR_TO_MAP_VALUE_OR_NULL:
2709 case PTR_TO_STACK:
2710 case PTR_TO_CTX:
Alexei Starovoitov969bf052016-05-05 19:49:10 -07002711 case PTR_TO_PACKET:
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02002712 case PTR_TO_PACKET_META:
Alexei Starovoitov969bf052016-05-05 19:49:10 -07002713 case PTR_TO_PACKET_END:
Petar Penkovd58e4682018-09-14 07:46:18 -07002714 case PTR_TO_FLOW_KEYS:
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07002715 case CONST_PTR_TO_MAP:
Joe Stringerc64b7982018-10-02 13:35:33 -07002716 case PTR_TO_SOCKET:
2717 case PTR_TO_SOCKET_OR_NULL:
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -08002718 case PTR_TO_SOCK_COMMON:
2719 case PTR_TO_SOCK_COMMON_OR_NULL:
Martin KaFai Lau655a51e2019-02-09 23:22:24 -08002720 case PTR_TO_TCP_SOCK:
2721 case PTR_TO_TCP_SOCK_OR_NULL:
Jonathan Lemonfada7fd2019-06-06 13:59:40 -07002722 case PTR_TO_XDP_SOCK:
Martin KaFai Lau65726b52020-01-08 16:34:54 -08002723 case PTR_TO_BTF_ID:
Yonghong Songb121b342020-05-09 10:59:12 -07002724 case PTR_TO_BTF_ID_OR_NULL:
Yonghong Songafbf21d2020-07-23 11:41:11 -07002725 case PTR_TO_RDONLY_BUF:
2726 case PTR_TO_RDONLY_BUF_OR_NULL:
2727 case PTR_TO_RDWR_BUF:
2728 case PTR_TO_RDWR_BUF_OR_NULL:
Hao Luoeaa6bcb2020-09-29 16:50:47 -07002729 case PTR_TO_PERCPU_BTF_ID:
Gilad Reti744ea4e2021-01-13 07:38:07 +02002730 case PTR_TO_MEM:
2731 case PTR_TO_MEM_OR_NULL:
Yonghong Song69c087b2021-02-26 12:49:25 -08002732 case PTR_TO_FUNC:
2733 case PTR_TO_MAP_KEY:
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07002734 return true;
2735 default:
2736 return false;
2737 }
2738}
2739
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -08002740/* Does this register contain a constant zero? */
2741static bool register_is_null(struct bpf_reg_state *reg)
2742{
2743 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
2744}
2745
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07002746static bool register_is_const(struct bpf_reg_state *reg)
2747{
2748 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
2749}
2750
Yonghong Song5689d492020-10-08 18:12:38 -07002751static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
2752{
2753 return tnum_is_unknown(reg->var_off) &&
2754 reg->smin_value == S64_MIN && reg->smax_value == S64_MAX &&
2755 reg->umin_value == 0 && reg->umax_value == U64_MAX &&
2756 reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX &&
2757 reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX;
2758}
2759
2760static bool register_is_bounded(struct bpf_reg_state *reg)
2761{
2762 return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
2763}
2764
Jann Horn6e7e63c2020-04-17 02:00:06 +02002765static bool __is_pointer_value(bool allow_ptr_leaks,
2766 const struct bpf_reg_state *reg)
2767{
2768 if (allow_ptr_leaks)
2769 return false;
2770
2771 return reg->type != SCALAR_VALUE;
2772}
2773
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07002774static void save_register_state(struct bpf_func_state *state,
Martin KaFai Lau354e8f12021-09-21 17:49:41 -07002775 int spi, struct bpf_reg_state *reg,
2776 int size)
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07002777{
2778 int i;
2779
2780 state->stack[spi].spilled_ptr = *reg;
Martin KaFai Lau354e8f12021-09-21 17:49:41 -07002781 if (size == BPF_REG_SIZE)
2782 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07002783
Martin KaFai Lau354e8f12021-09-21 17:49:41 -07002784 for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
2785 state->stack[spi].slot_type[i - 1] = STACK_SPILL;
2786
2787 /* size < 8 bytes spill */
2788 for (; i; i--)
2789 scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07002790}
2791
Andrei Matei01f810a2021-02-06 20:10:24 -05002792/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
Alexei Starovoitov17a52672014-09-26 00:17:06 -07002793 * stack boundary and alignment are checked in check_mem_access()
2794 */
Andrei Matei01f810a2021-02-06 20:10:24 -05002795static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
2796 /* stack frame we're writing to */
2797 struct bpf_func_state *state,
2798 int off, int size, int value_regno,
2799 int insn_idx)
Alexei Starovoitov17a52672014-09-26 00:17:06 -07002800{
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08002801 struct bpf_func_state *cur; /* state of the current function */
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07002802 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002803 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07002804 struct bpf_reg_state *reg = NULL;
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07002805
Lorenz Bauerc69431a2021-04-29 14:46:54 +01002806 err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07002807 if (err)
2808 return err;
Alexei Starovoitov9c3997602014-10-28 15:11:41 -07002809 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
2810 * so it's aligned access and [off, off + size) are within stack limits
2811 */
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07002812 if (!env->allow_ptr_leaks &&
2813 state->stack[spi].slot_type[0] == STACK_SPILL &&
2814 size != BPF_REG_SIZE) {
2815 verbose(env, "attempt to corrupt spilled pointer on stack\n");
2816 return -EACCES;
2817 }
Alexei Starovoitov17a52672014-09-26 00:17:06 -07002818
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08002819 cur = env->cur_state->frame[env->cur_state->curframe];
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07002820 if (value_regno >= 0)
2821 reg = &cur->regs[value_regno];
Daniel Borkmann2039f262021-07-13 08:18:31 +00002822 if (!env->bypass_spec_v4) {
2823 bool sanitize = reg && is_spillable_regtype(reg->type);
2824
2825 for (i = 0; i < size; i++) {
2826 if (state->stack[spi].slot_type[i] == STACK_INVALID) {
2827 sanitize = true;
2828 break;
2829 }
2830 }
2831
2832 if (sanitize)
2833 env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
2834 }
Alexei Starovoitov17a52672014-09-26 00:17:06 -07002835
Martin KaFai Lau354e8f12021-09-21 17:49:41 -07002836 if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
Alexei Starovoitov2c78ee82020-05-13 16:03:54 -07002837 !register_is_null(reg) && env->bpf_capable) {
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002838 if (dst_reg != BPF_REG_FP) {
2839 /* The backtracking logic can only recognize explicit
2840 * stack slot address like [fp - 8]. Other spill of
Zhen Lei8fb33b62021-05-25 10:56:59 +08002841 * scalar via different register has to be conservative.
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002842 * Backtrack from here and mark all registers as precise
2843 * that contributed into 'reg' being a constant.
2844 */
2845 err = mark_chain_precision(env, value_regno);
2846 if (err)
2847 return err;
2848 }
Martin KaFai Lau354e8f12021-09-21 17:49:41 -07002849 save_register_state(state, spi, reg, size);
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07002850 } else if (reg && is_spillable_regtype(reg->type)) {
Alexei Starovoitov17a52672014-09-26 00:17:06 -07002851 /* register containing pointer is being spilled into stack */
Alexei Starovoitov9c3997602014-10-28 15:11:41 -07002852 if (size != BPF_REG_SIZE) {
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07002853 verbose_linfo(env, insn_idx, "; ");
Jakub Kicinski61bd5212017-10-09 10:30:11 -07002854 verbose(env, "invalid size of register spill\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -07002855 return -EACCES;
2856 }
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07002857 if (state != cur && reg->type == PTR_TO_STACK) {
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08002858 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
2859 return -EINVAL;
2860 }
Martin KaFai Lau354e8f12021-09-21 17:49:41 -07002861 save_register_state(state, spi, reg, size);
Alexei Starovoitov9c3997602014-10-28 15:11:41 -07002862 } else {
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -08002863 u8 type = STACK_MISC;
2864
Edward Cree679c7822018-08-22 20:02:19 +01002865 /* regular write of data into stack destroys any spilled ptr */
2866 state->stack[spi].spilled_ptr.type = NOT_INIT;
Jiong Wang0bae2d42018-12-15 03:34:40 -05002867 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */
Martin KaFai Lau27113c52021-09-21 17:49:34 -07002868 if (is_spilled_reg(&state->stack[spi]))
Jiong Wang0bae2d42018-12-15 03:34:40 -05002869 for (i = 0; i < BPF_REG_SIZE; i++)
Martin KaFai Lau354e8f12021-09-21 17:49:41 -07002870 scrub_spilled_slot(&state->stack[spi].slot_type[i]);
Alexei Starovoitov9c3997602014-10-28 15:11:41 -07002871
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -08002872 /* only mark the slot as written if all 8 bytes were written
2873 * otherwise read propagation may incorrectly stop too soon
2874 * when stack slots are partially written.
2875 * This heuristic means that read propagation will be
2876 * conservative, since it will add reg_live_read marks
2877 * to stack slots all the way to first state when programs
2878 * writes+reads less than 8 bytes
2879 */
2880 if (size == BPF_REG_SIZE)
2881 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
2882
2883 /* when we zero initialize stack slots mark them as such */
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002884 if (reg && register_is_null(reg)) {
2885 /* backtracking doesn't work for STACK_ZERO yet. */
2886 err = mark_chain_precision(env, value_regno);
2887 if (err)
2888 return err;
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -08002889 type = STACK_ZERO;
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07002890 }
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -08002891
Jiong Wang0bae2d42018-12-15 03:34:40 -05002892 /* Mark slots affected by this stack write. */
Alexei Starovoitov9c3997602014-10-28 15:11:41 -07002893 for (i = 0; i < size; i++)
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07002894 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -08002895 type;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07002896 }
2897 return 0;
2898}
2899
Andrei Matei01f810a2021-02-06 20:10:24 -05002900/* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
2901 * known to contain a variable offset.
2902 * This function checks whether the write is permitted and conservatively
2903 * tracks the effects of the write, considering that each stack slot in the
2904 * dynamic range is potentially written to.
2905 *
2906 * 'off' includes 'regno->off'.
2907 * 'value_regno' can be -1, meaning that an unknown value is being written to
2908 * the stack.
2909 *
2910 * Spilled pointers in range are not marked as written because we don't know
2911 * what's going to be actually written. This means that read propagation for
2912 * future reads cannot be terminated by this write.
2913 *
2914 * For privileged programs, uninitialized stack slots are considered
2915 * initialized by this write (even though we don't know exactly what offsets
2916 * are going to be written to). The idea is that we don't want the verifier to
2917 * reject future reads that access slots written to through variable offsets.
2918 */
2919static int check_stack_write_var_off(struct bpf_verifier_env *env,
2920 /* func where register points to */
2921 struct bpf_func_state *state,
2922 int ptr_regno, int off, int size,
2923 int value_regno, int insn_idx)
2924{
2925 struct bpf_func_state *cur; /* state of the current function */
2926 int min_off, max_off;
2927 int i, err;
2928 struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
2929 bool writing_zero = false;
2930 /* set if the fact that we're writing a zero is used to let any
2931 * stack slots remain STACK_ZERO
2932 */
2933 bool zero_used = false;
2934
2935 cur = env->cur_state->frame[env->cur_state->curframe];
2936 ptr_reg = &cur->regs[ptr_regno];
2937 min_off = ptr_reg->smin_value + off;
2938 max_off = ptr_reg->smax_value + off + size;
2939 if (value_regno >= 0)
2940 value_reg = &cur->regs[value_regno];
2941 if (value_reg && register_is_null(value_reg))
2942 writing_zero = true;
2943
Lorenz Bauerc69431a2021-04-29 14:46:54 +01002944 err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
Andrei Matei01f810a2021-02-06 20:10:24 -05002945 if (err)
2946 return err;
2947
2948
2949 /* Variable offset writes destroy any spilled pointers in range. */
2950 for (i = min_off; i < max_off; i++) {
2951 u8 new_type, *stype;
2952 int slot, spi;
2953
2954 slot = -i - 1;
2955 spi = slot / BPF_REG_SIZE;
2956 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
2957
2958 if (!env->allow_ptr_leaks
2959 && *stype != NOT_INIT
2960 && *stype != SCALAR_VALUE) {
2961 /* Reject the write if there's are spilled pointers in
2962 * range. If we didn't reject here, the ptr status
2963 * would be erased below (even though not all slots are
2964 * actually overwritten), possibly opening the door to
2965 * leaks.
2966 */
2967 verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
2968 insn_idx, i);
2969 return -EINVAL;
2970 }
2971
2972 /* Erase all spilled pointers. */
2973 state->stack[spi].spilled_ptr.type = NOT_INIT;
2974
2975 /* Update the slot type. */
2976 new_type = STACK_MISC;
2977 if (writing_zero && *stype == STACK_ZERO) {
2978 new_type = STACK_ZERO;
2979 zero_used = true;
2980 }
2981 /* If the slot is STACK_INVALID, we check whether it's OK to
2982 * pretend that it will be initialized by this write. The slot
2983 * might not actually be written to, and so if we mark it as
2984 * initialized future reads might leak uninitialized memory.
2985 * For privileged programs, we will accept such reads to slots
2986 * that may or may not be written because, if we're reject
2987 * them, the error would be too confusing.
2988 */
2989 if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
2990 verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
2991 insn_idx, i);
2992 return -EINVAL;
2993 }
2994 *stype = new_type;
2995 }
2996 if (zero_used) {
2997 /* backtracking doesn't work for STACK_ZERO yet. */
2998 err = mark_chain_precision(env, value_regno);
2999 if (err)
3000 return err;
3001 }
3002 return 0;
3003}
3004
3005/* When register 'dst_regno' is assigned some values from stack[min_off,
3006 * max_off), we set the register's type according to the types of the
3007 * respective stack slots. If all the stack values are known to be zeros, then
3008 * so is the destination reg. Otherwise, the register is considered to be
3009 * SCALAR. This function does not deal with register filling; the caller must
3010 * ensure that all spilled registers in the stack range have been marked as
3011 * read.
3012 */
3013static void mark_reg_stack_read(struct bpf_verifier_env *env,
3014 /* func where src register points to */
3015 struct bpf_func_state *ptr_state,
3016 int min_off, int max_off, int dst_regno)
3017{
3018 struct bpf_verifier_state *vstate = env->cur_state;
3019 struct bpf_func_state *state = vstate->frame[vstate->curframe];
3020 int i, slot, spi;
3021 u8 *stype;
3022 int zeros = 0;
3023
3024 for (i = min_off; i < max_off; i++) {
3025 slot = -i - 1;
3026 spi = slot / BPF_REG_SIZE;
3027 stype = ptr_state->stack[spi].slot_type;
3028 if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
3029 break;
3030 zeros++;
3031 }
3032 if (zeros == max_off - min_off) {
3033 /* any access_size read into register is zero extended,
3034 * so the whole register == const_zero
3035 */
3036 __mark_reg_const_zero(&state->regs[dst_regno]);
3037 /* backtracking doesn't support STACK_ZERO yet,
3038 * so mark it precise here, so that later
3039 * backtracking can stop here.
3040 * Backtracking may not need this if this register
3041 * doesn't participate in pointer adjustment.
3042 * Forward propagation of precise flag is not
3043 * necessary either. This mark is only to stop
3044 * backtracking. Any register that contributed
3045 * to const 0 was marked precise before spill.
3046 */
3047 state->regs[dst_regno].precise = true;
3048 } else {
3049 /* have read misc data from the stack */
3050 mark_reg_unknown(env, state->regs, dst_regno);
3051 }
3052 state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3053}
3054
3055/* Read the stack at 'off' and put the results into the register indicated by
3056 * 'dst_regno'. It handles reg filling if the addressed stack slot is a
3057 * spilled reg.
3058 *
3059 * 'dst_regno' can be -1, meaning that the read value is not going to a
3060 * register.
3061 *
3062 * The access is assumed to be within the current stack bounds.
3063 */
3064static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
3065 /* func where src register points to */
3066 struct bpf_func_state *reg_state,
3067 int off, int size, int dst_regno)
Alexei Starovoitov17a52672014-09-26 00:17:06 -07003068{
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08003069 struct bpf_verifier_state *vstate = env->cur_state;
3070 struct bpf_func_state *state = vstate->frame[vstate->curframe];
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07003071 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07003072 struct bpf_reg_state *reg;
Martin KaFai Lau354e8f12021-09-21 17:49:41 -07003073 u8 *stype, type;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07003074
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08003075 stype = reg_state->stack[spi].slot_type;
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07003076 reg = &reg_state->stack[spi].spilled_ptr;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07003077
Martin KaFai Lau27113c52021-09-21 17:49:34 -07003078 if (is_spilled_reg(&reg_state->stack[spi])) {
Martin KaFai Lauf30d49682021-11-01 23:45:35 -07003079 u8 spill_size = 1;
Martin KaFai Lau354e8f12021-09-21 17:49:41 -07003080
Martin KaFai Lauf30d49682021-11-01 23:45:35 -07003081 for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--)
3082 spill_size++;
3083
3084 if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) {
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07003085 if (reg->type != SCALAR_VALUE) {
3086 verbose_linfo(env, env->insn_idx, "; ");
3087 verbose(env, "invalid size of register fill\n");
3088 return -EACCES;
3089 }
Martin KaFai Lau354e8f12021-09-21 17:49:41 -07003090
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07003091 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
Martin KaFai Lau354e8f12021-09-21 17:49:41 -07003092 if (dst_regno < 0)
3093 return 0;
3094
Martin KaFai Lauf30d49682021-11-01 23:45:35 -07003095 if (!(off % BPF_REG_SIZE) && size == spill_size) {
Martin KaFai Lau354e8f12021-09-21 17:49:41 -07003096 /* The earlier check_reg_arg() has decided the
3097 * subreg_def for this insn. Save it first.
3098 */
3099 s32 subreg_def = state->regs[dst_regno].subreg_def;
3100
3101 state->regs[dst_regno] = *reg;
3102 state->regs[dst_regno].subreg_def = subreg_def;
3103 } else {
3104 for (i = 0; i < size; i++) {
3105 type = stype[(slot - i) % BPF_REG_SIZE];
3106 if (type == STACK_SPILL)
3107 continue;
3108 if (type == STACK_MISC)
3109 continue;
3110 verbose(env, "invalid read from stack off %d+%d size %d\n",
3111 off, i, size);
3112 return -EACCES;
3113 }
3114 mark_reg_unknown(env, state->regs, dst_regno);
3115 }
3116 state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07003117 return 0;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07003118 }
Alexei Starovoitov17a52672014-09-26 00:17:06 -07003119
Andrei Matei01f810a2021-02-06 20:10:24 -05003120 if (dst_regno >= 0) {
Alexei Starovoitov17a52672014-09-26 00:17:06 -07003121 /* restore register state from stack */
Andrei Matei01f810a2021-02-06 20:10:24 -05003122 state->regs[dst_regno] = *reg;
Alexei Starovoitov2f18f622017-11-30 21:31:38 -08003123 /* mark reg as written since spilled pointer state likely
3124 * has its liveness marks cleared by is_state_visited()
3125 * which resets stack/reg liveness for state transitions
3126 */
Andrei Matei01f810a2021-02-06 20:10:24 -05003127 state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
Jann Horn6e7e63c2020-04-17 02:00:06 +02003128 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
Andrei Matei01f810a2021-02-06 20:10:24 -05003129 /* If dst_regno==-1, the caller is asking us whether
Jann Horn6e7e63c2020-04-17 02:00:06 +02003130 * it is acceptable to use this value as a SCALAR_VALUE
3131 * (e.g. for XADD).
3132 * We must not allow unprivileged callers to do that
3133 * with spilled pointers.
3134 */
3135 verbose(env, "leaking pointer from stack off %d\n",
3136 off);
3137 return -EACCES;
Edward Creedc503a82017-08-15 20:34:35 +01003138 }
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07003139 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07003140 } else {
3141 for (i = 0; i < size; i++) {
Andrei Matei01f810a2021-02-06 20:10:24 -05003142 type = stype[(slot - i) % BPF_REG_SIZE];
3143 if (type == STACK_MISC)
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -08003144 continue;
Andrei Matei01f810a2021-02-06 20:10:24 -05003145 if (type == STACK_ZERO)
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -08003146 continue;
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -08003147 verbose(env, "invalid read from stack off %d+%d size %d\n",
3148 off, i, size);
3149 return -EACCES;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07003150 }
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07003151 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
Andrei Matei01f810a2021-02-06 20:10:24 -05003152 if (dst_regno >= 0)
3153 mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07003154 }
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07003155 return 0;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07003156}
3157
Andrei Matei01f810a2021-02-06 20:10:24 -05003158enum stack_access_src {
3159 ACCESS_DIRECT = 1, /* the access is performed by an instruction */
3160 ACCESS_HELPER = 2, /* the access is performed by a helper */
3161};
3162
3163static int check_stack_range_initialized(struct bpf_verifier_env *env,
3164 int regno, int off, int access_size,
3165 bool zero_size_allowed,
3166 enum stack_access_src type,
3167 struct bpf_call_arg_meta *meta);
3168
3169static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
Daniel Borkmanne4298d22019-01-03 00:58:31 +01003170{
Andrei Matei01f810a2021-02-06 20:10:24 -05003171 return cur_regs(env) + regno;
3172}
3173
3174/* Read the stack at 'ptr_regno + off' and put the result into the register
3175 * 'dst_regno'.
3176 * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
3177 * but not its variable offset.
3178 * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
3179 *
3180 * As opposed to check_stack_read_fixed_off, this function doesn't deal with
3181 * filling registers (i.e. reads of spilled register cannot be detected when
3182 * the offset is not fixed). We conservatively mark 'dst_regno' as containing
3183 * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
3184 * offset; for a fixed offset check_stack_read_fixed_off should be used
3185 * instead.
3186 */
3187static int check_stack_read_var_off(struct bpf_verifier_env *env,
3188 int ptr_regno, int off, int size, int dst_regno)
3189{
3190 /* The state of the source register. */
3191 struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3192 struct bpf_func_state *ptr_state = func(env, reg);
3193 int err;
3194 int min_off, max_off;
3195
3196 /* Note that we pass a NULL meta, so raw access will not be permitted.
Daniel Borkmanne4298d22019-01-03 00:58:31 +01003197 */
Andrei Matei01f810a2021-02-06 20:10:24 -05003198 err = check_stack_range_initialized(env, ptr_regno, off, size,
3199 false, ACCESS_DIRECT, NULL);
3200 if (err)
3201 return err;
3202
3203 min_off = reg->smin_value + off;
3204 max_off = reg->smax_value + off;
3205 mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
3206 return 0;
3207}
3208
3209/* check_stack_read dispatches to check_stack_read_fixed_off or
3210 * check_stack_read_var_off.
3211 *
3212 * The caller must ensure that the offset falls within the allocated stack
3213 * bounds.
3214 *
3215 * 'dst_regno' is a register which will receive the value from the stack. It
3216 * can be -1, meaning that the read value is not going to a register.
3217 */
3218static int check_stack_read(struct bpf_verifier_env *env,
3219 int ptr_regno, int off, int size,
3220 int dst_regno)
3221{
3222 struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3223 struct bpf_func_state *state = func(env, reg);
3224 int err;
3225 /* Some accesses are only permitted with a static offset. */
3226 bool var_off = !tnum_is_const(reg->var_off);
3227
3228 /* The offset is required to be static when reads don't go to a
3229 * register, in order to not leak pointers (see
3230 * check_stack_read_fixed_off).
3231 */
3232 if (dst_regno < 0 && var_off) {
Daniel Borkmanne4298d22019-01-03 00:58:31 +01003233 char tn_buf[48];
3234
3235 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
Andrei Matei01f810a2021-02-06 20:10:24 -05003236 verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
Daniel Borkmanne4298d22019-01-03 00:58:31 +01003237 tn_buf, off, size);
3238 return -EACCES;
3239 }
Andrei Matei01f810a2021-02-06 20:10:24 -05003240 /* Variable offset is prohibited for unprivileged mode for simplicity
3241 * since it requires corresponding support in Spectre masking for stack
3242 * ALU. See also retrieve_ptr_limit().
3243 */
3244 if (!env->bypass_spec_v1 && var_off) {
3245 char tn_buf[48];
Daniel Borkmanne4298d22019-01-03 00:58:31 +01003246
Andrei Matei01f810a2021-02-06 20:10:24 -05003247 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3248 verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
3249 ptr_regno, tn_buf);
Daniel Borkmanne4298d22019-01-03 00:58:31 +01003250 return -EACCES;
3251 }
3252
Andrei Matei01f810a2021-02-06 20:10:24 -05003253 if (!var_off) {
3254 off += reg->var_off.value;
3255 err = check_stack_read_fixed_off(env, state, off, size,
3256 dst_regno);
3257 } else {
3258 /* Variable offset stack reads need more conservative handling
3259 * than fixed offset ones. Note that dst_regno >= 0 on this
3260 * branch.
3261 */
3262 err = check_stack_read_var_off(env, ptr_regno, off, size,
3263 dst_regno);
3264 }
3265 return err;
3266}
3267
3268
3269/* check_stack_write dispatches to check_stack_write_fixed_off or
3270 * check_stack_write_var_off.
3271 *
3272 * 'ptr_regno' is the register used as a pointer into the stack.
3273 * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
3274 * 'value_regno' is the register whose value we're writing to the stack. It can
3275 * be -1, meaning that we're not writing from a register.
3276 *
3277 * The caller must ensure that the offset falls within the maximum stack size.
3278 */
3279static int check_stack_write(struct bpf_verifier_env *env,
3280 int ptr_regno, int off, int size,
3281 int value_regno, int insn_idx)
3282{
3283 struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3284 struct bpf_func_state *state = func(env, reg);
3285 int err;
3286
3287 if (tnum_is_const(reg->var_off)) {
3288 off += reg->var_off.value;
3289 err = check_stack_write_fixed_off(env, state, off, size,
3290 value_regno, insn_idx);
3291 } else {
3292 /* Variable offset stack reads need more conservative handling
3293 * than fixed offset ones.
3294 */
3295 err = check_stack_write_var_off(env, state,
3296 ptr_regno, off, size,
3297 value_regno, insn_idx);
3298 }
3299 return err;
Daniel Borkmanne4298d22019-01-03 00:58:31 +01003300}
3301
Daniel Borkmann591fe982019-04-09 23:20:05 +02003302static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
3303 int off, int size, enum bpf_access_type type)
3304{
3305 struct bpf_reg_state *regs = cur_regs(env);
3306 struct bpf_map *map = regs[regno].map_ptr;
3307 u32 cap = bpf_map_flags_to_cap(map);
3308
3309 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
3310 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
3311 map->value_size, off, size);
3312 return -EACCES;
3313 }
3314
3315 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
3316 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
3317 map->value_size, off, size);
3318 return -EACCES;
3319 }
3320
3321 return 0;
3322}
3323
Andrii Nakryiko457f4432020-05-29 00:54:20 -07003324/* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
3325static int __check_mem_access(struct bpf_verifier_env *env, int regno,
3326 int off, int size, u32 mem_size,
3327 bool zero_size_allowed)
Alexei Starovoitov17a52672014-09-26 00:17:06 -07003328{
Andrii Nakryiko457f4432020-05-29 00:54:20 -07003329 bool size_ok = size > 0 || (size == 0 && zero_size_allowed);
3330 struct bpf_reg_state *reg;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07003331
Andrii Nakryiko457f4432020-05-29 00:54:20 -07003332 if (off >= 0 && size_ok && (u64)off + size <= mem_size)
3333 return 0;
3334
3335 reg = &cur_regs(env)[regno];
3336 switch (reg->type) {
Yonghong Song69c087b2021-02-26 12:49:25 -08003337 case PTR_TO_MAP_KEY:
3338 verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n",
3339 mem_size, off, size);
3340 break;
Andrii Nakryiko457f4432020-05-29 00:54:20 -07003341 case PTR_TO_MAP_VALUE:
Jakub Kicinski61bd5212017-10-09 10:30:11 -07003342 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
Andrii Nakryiko457f4432020-05-29 00:54:20 -07003343 mem_size, off, size);
3344 break;
3345 case PTR_TO_PACKET:
3346 case PTR_TO_PACKET_META:
3347 case PTR_TO_PACKET_END:
3348 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
3349 off, size, regno, reg->id, off, mem_size);
3350 break;
3351 case PTR_TO_MEM:
3352 default:
3353 verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n",
3354 mem_size, off, size);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07003355 }
Andrii Nakryiko457f4432020-05-29 00:54:20 -07003356
3357 return -EACCES;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07003358}
3359
Andrii Nakryiko457f4432020-05-29 00:54:20 -07003360/* check read/write into a memory region with possible variable offset */
3361static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
3362 int off, int size, u32 mem_size,
3363 bool zero_size_allowed)
Gianluca Borellodbcfe5f2017-01-09 10:19:46 -08003364{
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08003365 struct bpf_verifier_state *vstate = env->cur_state;
3366 struct bpf_func_state *state = vstate->frame[vstate->curframe];
Gianluca Borellodbcfe5f2017-01-09 10:19:46 -08003367 struct bpf_reg_state *reg = &state->regs[regno];
3368 int err;
3369
Andrii Nakryiko457f4432020-05-29 00:54:20 -07003370 /* We may have adjusted the register pointing to memory region, so we
Edward Creef1174f72017-08-07 15:26:19 +01003371 * need to try adding each of min_value and max_value to off
3372 * to make sure our theoretical access will be safe.
Gianluca Borellodbcfe5f2017-01-09 10:19:46 -08003373 */
Alexei Starovoitov06ee7112019-04-01 21:27:40 -07003374 if (env->log.level & BPF_LOG_LEVEL)
Jakub Kicinski61bd5212017-10-09 10:30:11 -07003375 print_verifier_state(env, state);
Daniel Borkmannb7137c42019-01-03 00:58:33 +01003376
Gianluca Borellodbcfe5f2017-01-09 10:19:46 -08003377 /* The minimum value is only important with signed
3378 * comparisons where we can't assume the floor of a
3379 * value is 0. If we are using signed variables for our
3380 * index'es we need to make sure that whatever we use
3381 * will have a set floor within our range.
3382 */
Daniel Borkmannb7137c42019-01-03 00:58:33 +01003383 if (reg->smin_value < 0 &&
3384 (reg->smin_value == S64_MIN ||
3385 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
3386 reg->smin_value + off < 0)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07003387 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
Gianluca Borellodbcfe5f2017-01-09 10:19:46 -08003388 regno);
3389 return -EACCES;
3390 }
Andrii Nakryiko457f4432020-05-29 00:54:20 -07003391 err = __check_mem_access(env, regno, reg->smin_value + off, size,
3392 mem_size, zero_size_allowed);
Gianluca Borellodbcfe5f2017-01-09 10:19:46 -08003393 if (err) {
Andrii Nakryiko457f4432020-05-29 00:54:20 -07003394 verbose(env, "R%d min value is outside of the allowed memory range\n",
Jakub Kicinski61bd5212017-10-09 10:30:11 -07003395 regno);
Gianluca Borellodbcfe5f2017-01-09 10:19:46 -08003396 return err;
3397 }
3398
Edward Creeb03c9f92017-08-07 15:26:36 +01003399 /* If we haven't set a max value then we need to bail since we can't be
3400 * sure we won't do bad things.
3401 * If reg->umax_value + off could overflow, treat that as unbounded too.
Gianluca Borellodbcfe5f2017-01-09 10:19:46 -08003402 */
Edward Creeb03c9f92017-08-07 15:26:36 +01003403 if (reg->umax_value >= BPF_MAX_VAR_OFF) {
Andrii Nakryiko457f4432020-05-29 00:54:20 -07003404 verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
Gianluca Borellodbcfe5f2017-01-09 10:19:46 -08003405 regno);
3406 return -EACCES;
3407 }
Andrii Nakryiko457f4432020-05-29 00:54:20 -07003408 err = __check_mem_access(env, regno, reg->umax_value + off, size,
3409 mem_size, zero_size_allowed);
3410 if (err) {
3411 verbose(env, "R%d max value is outside of the allowed memory range\n",
Jakub Kicinski61bd5212017-10-09 10:30:11 -07003412 regno);
Andrii Nakryiko457f4432020-05-29 00:54:20 -07003413 return err;
3414 }
Alexei Starovoitovd83525c2019-01-31 15:40:04 -08003415
Andrii Nakryiko457f4432020-05-29 00:54:20 -07003416 return 0;
3417}
3418
3419/* check read/write into a map element with possible variable offset */
3420static int check_map_access(struct bpf_verifier_env *env, u32 regno,
3421 int off, int size, bool zero_size_allowed)
3422{
3423 struct bpf_verifier_state *vstate = env->cur_state;
3424 struct bpf_func_state *state = vstate->frame[vstate->curframe];
3425 struct bpf_reg_state *reg = &state->regs[regno];
3426 struct bpf_map *map = reg->map_ptr;
3427 int err;
3428
3429 err = check_mem_region_access(env, regno, off, size, map->value_size,
3430 zero_size_allowed);
3431 if (err)
3432 return err;
3433
3434 if (map_value_has_spin_lock(map)) {
3435 u32 lock = map->spin_lock_off;
Alexei Starovoitovd83525c2019-01-31 15:40:04 -08003436
3437 /* if any part of struct bpf_spin_lock can be touched by
3438 * load/store reject this program.
3439 * To check that [x1, x2) overlaps with [y1, y2)
3440 * it is sufficient to check x1 < y2 && y1 < x2.
3441 */
3442 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) &&
3443 lock < reg->umax_value + off + size) {
3444 verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n");
3445 return -EACCES;
3446 }
3447 }
Alexei Starovoitov68134662021-07-14 17:54:10 -07003448 if (map_value_has_timer(map)) {
3449 u32 t = map->timer_off;
3450
3451 if (reg->smin_value + off < t + sizeof(struct bpf_timer) &&
3452 t < reg->umax_value + off + size) {
3453 verbose(env, "bpf_timer cannot be accessed directly by load/store\n");
3454 return -EACCES;
3455 }
3456 }
Edward Creef1174f72017-08-07 15:26:19 +01003457 return err;
Gianluca Borellodbcfe5f2017-01-09 10:19:46 -08003458}
3459
Alexei Starovoitov969bf052016-05-05 19:49:10 -07003460#define MAX_PACKET_OFF 0xffff
3461
Udip Pant7e407812020-08-25 16:20:00 -07003462static enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog)
3463{
Toke Høiland-Jørgensen3aac1ea2020-09-29 14:45:50 +02003464 return prog->aux->dst_prog ? prog->aux->dst_prog->type : prog->type;
Udip Pant7e407812020-08-25 16:20:00 -07003465}
3466
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +01003467static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
Thomas Graf3a0af8f2016-11-30 17:10:10 +01003468 const struct bpf_call_arg_meta *meta,
3469 enum bpf_access_type t)
Brenden Blanco4acf6c02016-07-19 12:16:56 -07003470{
Udip Pant7e407812020-08-25 16:20:00 -07003471 enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
3472
3473 switch (prog_type) {
Daniel Borkmann5d66fa72018-10-24 22:05:45 +02003474 /* Program types only with direct read access go here! */
Thomas Graf3a0af8f2016-11-30 17:10:10 +01003475 case BPF_PROG_TYPE_LWT_IN:
3476 case BPF_PROG_TYPE_LWT_OUT:
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +01003477 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
Martin KaFai Lau2dbb9b92018-08-08 01:01:25 -07003478 case BPF_PROG_TYPE_SK_REUSEPORT:
Daniel Borkmann5d66fa72018-10-24 22:05:45 +02003479 case BPF_PROG_TYPE_FLOW_DISSECTOR:
Daniel Borkmannd5563d32018-10-24 22:05:46 +02003480 case BPF_PROG_TYPE_CGROUP_SKB:
Thomas Graf3a0af8f2016-11-30 17:10:10 +01003481 if (t == BPF_WRITE)
3482 return false;
Gustavo A. R. Silva87317452020-10-02 18:42:17 -05003483 fallthrough;
Daniel Borkmann5d66fa72018-10-24 22:05:45 +02003484
3485 /* Program types with direct read + write access go here! */
Daniel Borkmann36bbef52016-09-20 00:26:13 +02003486 case BPF_PROG_TYPE_SCHED_CLS:
3487 case BPF_PROG_TYPE_SCHED_ACT:
Brenden Blanco4acf6c02016-07-19 12:16:56 -07003488 case BPF_PROG_TYPE_XDP:
Thomas Graf3a0af8f2016-11-30 17:10:10 +01003489 case BPF_PROG_TYPE_LWT_XMIT:
John Fastabend8a31db52017-08-15 22:33:09 -07003490 case BPF_PROG_TYPE_SK_SKB:
John Fastabend4f738ad2018-03-18 12:57:10 -07003491 case BPF_PROG_TYPE_SK_MSG:
Daniel Borkmann36bbef52016-09-20 00:26:13 +02003492 if (meta)
3493 return meta->pkt_access;
3494
3495 env->seen_direct_write = true;
Brenden Blanco4acf6c02016-07-19 12:16:56 -07003496 return true;
Stanislav Fomichev0d01da62019-06-27 13:38:47 -07003497
3498 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3499 if (t == BPF_WRITE)
3500 env->seen_direct_write = true;
3501
3502 return true;
3503
Brenden Blanco4acf6c02016-07-19 12:16:56 -07003504 default:
3505 return false;
3506 }
3507}
3508
Edward Creef1174f72017-08-07 15:26:19 +01003509static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
Yonghong Song9fd29c02017-11-12 14:49:09 -08003510 int size, bool zero_size_allowed)
Edward Creef1174f72017-08-07 15:26:19 +01003511{
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07003512 struct bpf_reg_state *regs = cur_regs(env);
Edward Creef1174f72017-08-07 15:26:19 +01003513 struct bpf_reg_state *reg = &regs[regno];
3514 int err;
3515
3516 /* We may have added a variable offset to the packet pointer; but any
3517 * reg->range we have comes after that. We are only checking the fixed
3518 * offset.
3519 */
3520
3521 /* We don't allow negative numbers, because we aren't tracking enough
3522 * detail to prove they're safe.
3523 */
Edward Creeb03c9f92017-08-07 15:26:36 +01003524 if (reg->smin_value < 0) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07003525 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
Edward Creef1174f72017-08-07 15:26:19 +01003526 regno);
3527 return -EACCES;
3528 }
Alexei Starovoitov6d94e742020-11-10 19:12:11 -08003529
3530 err = reg->range < 0 ? -EINVAL :
3531 __check_mem_access(env, regno, off, size, reg->range,
Andrii Nakryiko457f4432020-05-29 00:54:20 -07003532 zero_size_allowed);
Edward Creef1174f72017-08-07 15:26:19 +01003533 if (err) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07003534 verbose(env, "R%d offset is outside of the packet\n", regno);
Edward Creef1174f72017-08-07 15:26:19 +01003535 return err;
3536 }
Jiong Wange6478152018-11-08 04:08:42 -05003537
Andrii Nakryiko457f4432020-05-29 00:54:20 -07003538 /* __check_mem_access has made sure "off + size - 1" is within u16.
Jiong Wange6478152018-11-08 04:08:42 -05003539 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
3540 * otherwise find_good_pkt_pointers would have refused to set range info
Andrii Nakryiko457f4432020-05-29 00:54:20 -07003541 * that __check_mem_access would have rejected this pkt access.
Jiong Wange6478152018-11-08 04:08:42 -05003542 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
3543 */
3544 env->prog->aux->max_pkt_offset =
3545 max_t(u32, env->prog->aux->max_pkt_offset,
3546 off + reg->umax_value + size - 1);
3547
Edward Creef1174f72017-08-07 15:26:19 +01003548 return err;
3549}
3550
3551/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
Yonghong Song31fd8582017-06-13 15:52:13 -07003552static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003553 enum bpf_access_type t, enum bpf_reg_type *reg_type,
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08003554 struct btf **btf, u32 *btf_id)
Alexei Starovoitov17a52672014-09-26 00:17:06 -07003555{
Daniel Borkmannf96da092017-07-02 02:13:27 +02003556 struct bpf_insn_access_aux info = {
3557 .reg_type = *reg_type,
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003558 .log = &env->log,
Daniel Borkmannf96da092017-07-02 02:13:27 +02003559 };
Yonghong Song31fd8582017-06-13 15:52:13 -07003560
Jakub Kicinski4f9218a2017-10-16 16:40:55 -07003561 if (env->ops->is_valid_access &&
Andrey Ignatov5e43f892018-03-30 15:08:00 -07003562 env->ops->is_valid_access(off, size, t, env->prog, &info)) {
Daniel Borkmannf96da092017-07-02 02:13:27 +02003563 /* A non zero info.ctx_field_size indicates that this field is a
3564 * candidate for later verifier transformation to load the whole
3565 * field and then apply a mask when accessed with a narrower
3566 * access than actual ctx access size. A zero info.ctx_field_size
3567 * will only allow for whole field access and rejects any other
3568 * type of narrower access.
Yonghong Song31fd8582017-06-13 15:52:13 -07003569 */
Yonghong Song23994632017-06-22 15:07:39 -07003570 *reg_type = info.reg_type;
Yonghong Song31fd8582017-06-13 15:52:13 -07003571
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08003572 if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL) {
3573 *btf = info.btf;
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003574 *btf_id = info.btf_id;
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08003575 } else {
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003576 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08003577 }
Alexei Starovoitov32bbe002016-04-06 18:43:28 -07003578 /* remember the offset of last byte accessed in ctx */
3579 if (env->prog->aux->max_ctx_offset < off + size)
3580 env->prog->aux->max_ctx_offset = off + size;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07003581 return 0;
Alexei Starovoitov32bbe002016-04-06 18:43:28 -07003582 }
Alexei Starovoitov17a52672014-09-26 00:17:06 -07003583
Jakub Kicinski61bd5212017-10-09 10:30:11 -07003584 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07003585 return -EACCES;
3586}
3587
Petar Penkovd58e4682018-09-14 07:46:18 -07003588static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
3589 int size)
3590{
3591 if (size < 0 || off < 0 ||
3592 (u64)off + size > sizeof(struct bpf_flow_keys)) {
3593 verbose(env, "invalid access to flow keys off=%d size=%d\n",
3594 off, size);
3595 return -EACCES;
3596 }
3597 return 0;
3598}
3599
Martin KaFai Lau5f456642019-02-08 22:25:54 -08003600static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
3601 u32 regno, int off, int size,
3602 enum bpf_access_type t)
Joe Stringerc64b7982018-10-02 13:35:33 -07003603{
3604 struct bpf_reg_state *regs = cur_regs(env);
3605 struct bpf_reg_state *reg = &regs[regno];
Martin KaFai Lau5f456642019-02-08 22:25:54 -08003606 struct bpf_insn_access_aux info = {};
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -08003607 bool valid;
Joe Stringerc64b7982018-10-02 13:35:33 -07003608
3609 if (reg->smin_value < 0) {
3610 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
3611 regno);
3612 return -EACCES;
3613 }
3614
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -08003615 switch (reg->type) {
3616 case PTR_TO_SOCK_COMMON:
3617 valid = bpf_sock_common_is_valid_access(off, size, t, &info);
3618 break;
3619 case PTR_TO_SOCKET:
3620 valid = bpf_sock_is_valid_access(off, size, t, &info);
3621 break;
Martin KaFai Lau655a51e2019-02-09 23:22:24 -08003622 case PTR_TO_TCP_SOCK:
3623 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
3624 break;
Jonathan Lemonfada7fd2019-06-06 13:59:40 -07003625 case PTR_TO_XDP_SOCK:
3626 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
3627 break;
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -08003628 default:
3629 valid = false;
Joe Stringerc64b7982018-10-02 13:35:33 -07003630 }
3631
Martin KaFai Lau5f456642019-02-08 22:25:54 -08003632
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -08003633 if (valid) {
3634 env->insn_aux_data[insn_idx].ctx_field_size =
3635 info.ctx_field_size;
3636 return 0;
3637 }
3638
3639 verbose(env, "R%d invalid %s access off=%d size=%d\n",
3640 regno, reg_type_str[reg->type], off, size);
3641
3642 return -EACCES;
Joe Stringerc64b7982018-10-02 13:35:33 -07003643}
3644
Daniel Borkmann4cabc5b2017-07-21 00:00:21 +02003645static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
3646{
Daniel Borkmann2a159c62018-10-21 02:09:24 +02003647 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
Daniel Borkmann4cabc5b2017-07-21 00:00:21 +02003648}
3649
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01003650static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
3651{
Daniel Borkmann2a159c62018-10-21 02:09:24 +02003652 const struct bpf_reg_state *reg = reg_state(env, regno);
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01003653
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -08003654 return reg->type == PTR_TO_CTX;
3655}
3656
3657static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
3658{
3659 const struct bpf_reg_state *reg = reg_state(env, regno);
3660
3661 return type_is_sk_pointer(reg->type);
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01003662}
3663
Daniel Borkmannca369602018-02-23 22:29:05 +01003664static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
3665{
Daniel Borkmann2a159c62018-10-21 02:09:24 +02003666 const struct bpf_reg_state *reg = reg_state(env, regno);
Daniel Borkmannca369602018-02-23 22:29:05 +01003667
3668 return type_is_pkt_pointer(reg->type);
3669}
3670
Daniel Borkmann4b5defd2018-10-21 02:09:25 +02003671static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
3672{
3673 const struct bpf_reg_state *reg = reg_state(env, regno);
3674
3675 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
3676 return reg->type == PTR_TO_FLOW_KEYS;
3677}
3678
Jakub Kicinski61bd5212017-10-09 10:30:11 -07003679static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
3680 const struct bpf_reg_state *reg,
David S. Millerd1174412017-05-10 11:22:52 -07003681 int off, int size, bool strict)
Alexei Starovoitov969bf052016-05-05 19:49:10 -07003682{
Edward Creef1174f72017-08-07 15:26:19 +01003683 struct tnum reg_off;
David S. Millere07b98d2017-05-10 11:38:07 -07003684 int ip_align;
David S. Millerd1174412017-05-10 11:22:52 -07003685
3686 /* Byte size accesses are always allowed. */
3687 if (!strict || size == 1)
3688 return 0;
3689
David S. Millere4eda882017-05-22 12:27:07 -04003690 /* For platforms that do not have a Kconfig enabling
3691 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
3692 * NET_IP_ALIGN is universally set to '2'. And on platforms
3693 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
3694 * to this code only in strict mode where we want to emulate
3695 * the NET_IP_ALIGN==2 checking. Therefore use an
3696 * unconditional IP align value of '2'.
David S. Millere07b98d2017-05-10 11:38:07 -07003697 */
David S. Millere4eda882017-05-22 12:27:07 -04003698 ip_align = 2;
Edward Creef1174f72017-08-07 15:26:19 +01003699
3700 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
3701 if (!tnum_is_aligned(reg_off, size)) {
3702 char tn_buf[48];
3703
3704 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
Jakub Kicinski61bd5212017-10-09 10:30:11 -07003705 verbose(env,
3706 "misaligned packet access off %d+%s+%d+%d size %d\n",
Edward Creef1174f72017-08-07 15:26:19 +01003707 ip_align, tn_buf, reg->off, off, size);
Alexei Starovoitov969bf052016-05-05 19:49:10 -07003708 return -EACCES;
3709 }
Daniel Borkmann79adffc2017-03-31 02:24:03 +02003710
Alexei Starovoitov969bf052016-05-05 19:49:10 -07003711 return 0;
3712}
3713
Jakub Kicinski61bd5212017-10-09 10:30:11 -07003714static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
3715 const struct bpf_reg_state *reg,
Edward Creef1174f72017-08-07 15:26:19 +01003716 const char *pointer_desc,
3717 int off, int size, bool strict)
Daniel Borkmann79adffc2017-03-31 02:24:03 +02003718{
Edward Creef1174f72017-08-07 15:26:19 +01003719 struct tnum reg_off;
3720
3721 /* Byte size accesses are always allowed. */
3722 if (!strict || size == 1)
3723 return 0;
3724
3725 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
3726 if (!tnum_is_aligned(reg_off, size)) {
3727 char tn_buf[48];
3728
3729 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
Jakub Kicinski61bd5212017-10-09 10:30:11 -07003730 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
Edward Creef1174f72017-08-07 15:26:19 +01003731 pointer_desc, tn_buf, reg->off, off, size);
Daniel Borkmann79adffc2017-03-31 02:24:03 +02003732 return -EACCES;
3733 }
3734
3735 return 0;
3736}
3737
David S. Millere07b98d2017-05-10 11:38:07 -07003738static int check_ptr_alignment(struct bpf_verifier_env *env,
Daniel Borkmannca369602018-02-23 22:29:05 +01003739 const struct bpf_reg_state *reg, int off,
3740 int size, bool strict_alignment_once)
Daniel Borkmann79adffc2017-03-31 02:24:03 +02003741{
Daniel Borkmannca369602018-02-23 22:29:05 +01003742 bool strict = env->strict_alignment || strict_alignment_once;
Edward Creef1174f72017-08-07 15:26:19 +01003743 const char *pointer_desc = "";
David S. Millerd1174412017-05-10 11:22:52 -07003744
Daniel Borkmann79adffc2017-03-31 02:24:03 +02003745 switch (reg->type) {
3746 case PTR_TO_PACKET:
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02003747 case PTR_TO_PACKET_META:
3748 /* Special case, because of NET_IP_ALIGN. Given metadata sits
3749 * right in front, treat it the very same way.
3750 */
Jakub Kicinski61bd5212017-10-09 10:30:11 -07003751 return check_pkt_ptr_alignment(env, reg, off, size, strict);
Petar Penkovd58e4682018-09-14 07:46:18 -07003752 case PTR_TO_FLOW_KEYS:
3753 pointer_desc = "flow keys ";
3754 break;
Yonghong Song69c087b2021-02-26 12:49:25 -08003755 case PTR_TO_MAP_KEY:
3756 pointer_desc = "key ";
3757 break;
Edward Creef1174f72017-08-07 15:26:19 +01003758 case PTR_TO_MAP_VALUE:
3759 pointer_desc = "value ";
3760 break;
3761 case PTR_TO_CTX:
3762 pointer_desc = "context ";
3763 break;
3764 case PTR_TO_STACK:
3765 pointer_desc = "stack ";
Andrei Matei01f810a2021-02-06 20:10:24 -05003766 /* The stack spill tracking logic in check_stack_write_fixed_off()
3767 * and check_stack_read_fixed_off() relies on stack accesses being
Jann Horna5ec6ae2017-12-18 20:11:58 -08003768 * aligned.
3769 */
3770 strict = true;
Edward Creef1174f72017-08-07 15:26:19 +01003771 break;
Joe Stringerc64b7982018-10-02 13:35:33 -07003772 case PTR_TO_SOCKET:
3773 pointer_desc = "sock ";
3774 break;
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -08003775 case PTR_TO_SOCK_COMMON:
3776 pointer_desc = "sock_common ";
3777 break;
Martin KaFai Lau655a51e2019-02-09 23:22:24 -08003778 case PTR_TO_TCP_SOCK:
3779 pointer_desc = "tcp_sock ";
3780 break;
Jonathan Lemonfada7fd2019-06-06 13:59:40 -07003781 case PTR_TO_XDP_SOCK:
3782 pointer_desc = "xdp_sock ";
3783 break;
Daniel Borkmann79adffc2017-03-31 02:24:03 +02003784 default:
Edward Creef1174f72017-08-07 15:26:19 +01003785 break;
Daniel Borkmann79adffc2017-03-31 02:24:03 +02003786 }
Jakub Kicinski61bd5212017-10-09 10:30:11 -07003787 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
3788 strict);
Daniel Borkmann79adffc2017-03-31 02:24:03 +02003789}
3790
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08003791static int update_stack_depth(struct bpf_verifier_env *env,
3792 const struct bpf_func_state *func,
3793 int off)
3794{
Jiong Wang9c8105b2018-05-02 16:17:18 -04003795 u16 stack = env->subprog_info[func->subprogno].stack_depth;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08003796
3797 if (stack >= -off)
3798 return 0;
3799
3800 /* update known max for given subprogram */
Jiong Wang9c8105b2018-05-02 16:17:18 -04003801 env->subprog_info[func->subprogno].stack_depth = -off;
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003802 return 0;
3803}
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08003804
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003805/* starting from main bpf function walk all instructions of the function
3806 * and recursively walk all callees that given function can call.
3807 * Ignore jump and exit insns.
3808 * Since recursion is prevented by check_cfg() this algorithm
3809 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
3810 */
3811static int check_max_stack_depth(struct bpf_verifier_env *env)
3812{
Jiong Wang9c8105b2018-05-02 16:17:18 -04003813 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
3814 struct bpf_subprog_info *subprog = env->subprog_info;
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003815 struct bpf_insn *insn = env->prog->insnsi;
Maciej Fijalkowskiebf7d1f2020-09-16 23:10:08 +02003816 bool tail_call_reachable = false;
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003817 int ret_insn[MAX_CALL_FRAMES];
3818 int ret_prog[MAX_CALL_FRAMES];
Maciej Fijalkowskiebf7d1f2020-09-16 23:10:08 +02003819 int j;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08003820
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003821process_func:
Maciej Fijalkowski7f6e4312020-09-16 23:10:07 +02003822 /* protect against potential stack overflow that might happen when
3823 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
3824 * depth for such case down to 256 so that the worst case scenario
3825 * would result in 8k stack size (32 which is tailcall limit * 256 =
3826 * 8k).
3827 *
3828 * To get the idea what might happen, see an example:
3829 * func1 -> sub rsp, 128
3830 * subfunc1 -> sub rsp, 256
3831 * tailcall1 -> add rsp, 256
3832 * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
3833 * subfunc2 -> sub rsp, 64
3834 * subfunc22 -> sub rsp, 128
3835 * tailcall2 -> add rsp, 128
3836 * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
3837 *
3838 * tailcall will unwind the current stack frame but it will not get rid
3839 * of caller's stack as shown on the example above.
3840 */
3841 if (idx && subprog[idx].has_tail_call && depth >= 256) {
3842 verbose(env,
3843 "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
3844 depth);
3845 return -EACCES;
3846 }
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003847 /* round up to 32-bytes, since this is granularity
3848 * of interpreter stack size
3849 */
Jiong Wang9c8105b2018-05-02 16:17:18 -04003850 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003851 if (depth > MAX_BPF_STACK) {
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08003852 verbose(env, "combined stack size of %d calls is %d. Too large\n",
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003853 frame + 1, depth);
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08003854 return -EACCES;
3855 }
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003856continue_func:
Jiong Wang4cb3d992018-05-02 16:17:19 -04003857 subprog_end = subprog[idx + 1].start;
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003858 for (; i < subprog_end; i++) {
Alexei Starovoitov7ddc80a2021-07-14 17:54:15 -07003859 int next_insn;
3860
Yonghong Song69c087b2021-02-26 12:49:25 -08003861 if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003862 continue;
3863 /* remember insn and function to return to */
3864 ret_insn[frame] = i + 1;
Jiong Wang9c8105b2018-05-02 16:17:18 -04003865 ret_prog[frame] = idx;
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003866
3867 /* find the callee */
Alexei Starovoitov7ddc80a2021-07-14 17:54:15 -07003868 next_insn = i + insn[i].imm + 1;
3869 idx = find_subprog(env, next_insn);
Jiong Wang9c8105b2018-05-02 16:17:18 -04003870 if (idx < 0) {
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003871 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
Alexei Starovoitov7ddc80a2021-07-14 17:54:15 -07003872 next_insn);
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003873 return -EFAULT;
3874 }
Alexei Starovoitov7ddc80a2021-07-14 17:54:15 -07003875 if (subprog[idx].is_async_cb) {
3876 if (subprog[idx].has_tail_call) {
3877 verbose(env, "verifier bug. subprog has tail_call and async cb\n");
3878 return -EFAULT;
3879 }
3880 /* async callbacks don't increase bpf prog stack size */
3881 continue;
3882 }
3883 i = next_insn;
Maciej Fijalkowskiebf7d1f2020-09-16 23:10:08 +02003884
3885 if (subprog[idx].has_tail_call)
3886 tail_call_reachable = true;
3887
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003888 frame++;
3889 if (frame >= MAX_CALL_FRAMES) {
Paul Chaignon927cb782019-03-20 13:58:27 +01003890 verbose(env, "the call stack of %d frames is too deep !\n",
3891 frame);
3892 return -E2BIG;
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003893 }
3894 goto process_func;
3895 }
Maciej Fijalkowskiebf7d1f2020-09-16 23:10:08 +02003896 /* if tail call got detected across bpf2bpf calls then mark each of the
3897 * currently present subprog frames as tail call reachable subprogs;
3898 * this info will be utilized by JIT so that we will be preserving the
3899 * tail call counter throughout bpf2bpf calls combined with tailcalls
3900 */
3901 if (tail_call_reachable)
3902 for (j = 0; j < frame; j++)
3903 subprog[ret_prog[j]].tail_call_reachable = true;
Daniel Borkmann5dd0a6b2021-07-12 22:57:35 +02003904 if (subprog[0].tail_call_reachable)
3905 env->prog->aux->tail_call_reachable = true;
Maciej Fijalkowskiebf7d1f2020-09-16 23:10:08 +02003906
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003907 /* end of for() loop means the last insn of the 'subprog'
3908 * was reached. Doesn't matter whether it was JA or EXIT
3909 */
3910 if (frame == 0)
3911 return 0;
Jiong Wang9c8105b2018-05-02 16:17:18 -04003912 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003913 frame--;
3914 i = ret_insn[frame];
Jiong Wang9c8105b2018-05-02 16:17:18 -04003915 idx = ret_prog[frame];
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -08003916 goto continue_func;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08003917}
3918
David S. Miller19d28fb2018-01-11 21:27:54 -05003919#ifndef CONFIG_BPF_JIT_ALWAYS_ON
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08003920static int get_callee_stack_depth(struct bpf_verifier_env *env,
3921 const struct bpf_insn *insn, int idx)
3922{
3923 int start = idx + insn->imm + 1, subprog;
3924
3925 subprog = find_subprog(env, start);
3926 if (subprog < 0) {
3927 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
3928 start);
3929 return -EFAULT;
3930 }
Jiong Wang9c8105b2018-05-02 16:17:18 -04003931 return env->subprog_info[subprog].stack_depth;
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08003932}
David S. Miller19d28fb2018-01-11 21:27:54 -05003933#endif
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08003934
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -08003935int check_ctx_reg(struct bpf_verifier_env *env,
3936 const struct bpf_reg_state *reg, int regno)
Daniel Borkmann58990d12018-06-07 17:40:03 +02003937{
3938 /* Access to ctx or passing it to a helper is only allowed in
3939 * its original, unmodified form.
3940 */
3941
3942 if (reg->off) {
3943 verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
3944 regno, reg->off);
3945 return -EACCES;
3946 }
3947
3948 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
3949 char tn_buf[48];
3950
3951 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3952 verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
3953 return -EACCES;
3954 }
3955
3956 return 0;
3957}
3958
Yonghong Songafbf21d2020-07-23 11:41:11 -07003959static int __check_buffer_access(struct bpf_verifier_env *env,
3960 const char *buf_info,
3961 const struct bpf_reg_state *reg,
3962 int regno, int off, int size)
Matt Mullins9df1c282019-04-26 11:49:47 -07003963{
3964 if (off < 0) {
3965 verbose(env,
Yonghong Song4fc00b72020-07-28 15:18:01 -07003966 "R%d invalid %s buffer access: off=%d, size=%d\n",
Yonghong Songafbf21d2020-07-23 11:41:11 -07003967 regno, buf_info, off, size);
Matt Mullins9df1c282019-04-26 11:49:47 -07003968 return -EACCES;
3969 }
3970 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
3971 char tn_buf[48];
3972
3973 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3974 verbose(env,
Yonghong Song4fc00b72020-07-28 15:18:01 -07003975 "R%d invalid variable buffer offset: off=%d, var_off=%s\n",
Matt Mullins9df1c282019-04-26 11:49:47 -07003976 regno, off, tn_buf);
3977 return -EACCES;
3978 }
Yonghong Songafbf21d2020-07-23 11:41:11 -07003979
3980 return 0;
3981}
3982
3983static int check_tp_buffer_access(struct bpf_verifier_env *env,
3984 const struct bpf_reg_state *reg,
3985 int regno, int off, int size)
3986{
3987 int err;
3988
3989 err = __check_buffer_access(env, "tracepoint", reg, regno, off, size);
3990 if (err)
3991 return err;
3992
Matt Mullins9df1c282019-04-26 11:49:47 -07003993 if (off + size > env->prog->aux->max_tp_access)
3994 env->prog->aux->max_tp_access = off + size;
3995
3996 return 0;
3997}
3998
Yonghong Songafbf21d2020-07-23 11:41:11 -07003999static int check_buffer_access(struct bpf_verifier_env *env,
4000 const struct bpf_reg_state *reg,
4001 int regno, int off, int size,
4002 bool zero_size_allowed,
4003 const char *buf_info,
4004 u32 *max_access)
4005{
4006 int err;
4007
4008 err = __check_buffer_access(env, buf_info, reg, regno, off, size);
4009 if (err)
4010 return err;
4011
4012 if (off + size > *max_access)
4013 *max_access = off + size;
4014
4015 return 0;
4016}
4017
John Fastabend3f50f132020-03-30 14:36:39 -07004018/* BPF architecture zero extends alu32 ops into 64-bit registesr */
4019static void zext_32_to_64(struct bpf_reg_state *reg)
4020{
4021 reg->var_off = tnum_subreg(reg->var_off);
4022 __reg_assign_32_into_64(reg);
4023}
Matt Mullins9df1c282019-04-26 11:49:47 -07004024
Jann Horn0c17d1d2017-12-18 20:11:55 -08004025/* truncate register to smaller size (in bytes)
4026 * must be called with size < BPF_REG_SIZE
4027 */
4028static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
4029{
4030 u64 mask;
4031
4032 /* clear high bits in bit representation */
4033 reg->var_off = tnum_cast(reg->var_off, size);
4034
4035 /* fix arithmetic bounds */
4036 mask = ((u64)1 << (size * 8)) - 1;
4037 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
4038 reg->umin_value &= mask;
4039 reg->umax_value &= mask;
4040 } else {
4041 reg->umin_value = 0;
4042 reg->umax_value = mask;
4043 }
4044 reg->smin_value = reg->umin_value;
4045 reg->smax_value = reg->umax_value;
John Fastabend3f50f132020-03-30 14:36:39 -07004046
4047 /* If size is smaller than 32bit register the 32bit register
4048 * values are also truncated so we push 64-bit bounds into
4049 * 32-bit bounds. Above were truncated < 32-bits already.
4050 */
4051 if (size >= 4)
4052 return;
4053 __reg_combine_64_into_32(reg);
Jann Horn0c17d1d2017-12-18 20:11:55 -08004054}
4055
Andrii Nakryikoa23740e2019-10-09 13:14:57 -07004056static bool bpf_map_is_rdonly(const struct bpf_map *map)
4057{
4058 return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
4059}
4060
4061static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
4062{
4063 void *ptr;
4064 u64 addr;
4065 int err;
4066
4067 err = map->ops->map_direct_value_addr(map, &addr, off);
4068 if (err)
4069 return err;
Andrii Nakryiko2dedd7d2019-10-11 10:20:53 -07004070 ptr = (void *)(long)addr + off;
Andrii Nakryikoa23740e2019-10-09 13:14:57 -07004071
4072 switch (size) {
4073 case sizeof(u8):
4074 *val = (u64)*(u8 *)ptr;
4075 break;
4076 case sizeof(u16):
4077 *val = (u64)*(u16 *)ptr;
4078 break;
4079 case sizeof(u32):
4080 *val = (u64)*(u32 *)ptr;
4081 break;
4082 case sizeof(u64):
4083 *val = *(u64 *)ptr;
4084 break;
4085 default:
4086 return -EINVAL;
4087 }
4088 return 0;
4089}
4090
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07004091static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
4092 struct bpf_reg_state *regs,
4093 int regno, int off, int size,
4094 enum bpf_access_type atype,
4095 int value_regno)
4096{
4097 struct bpf_reg_state *reg = regs + regno;
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08004098 const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
4099 const char *tname = btf_name_by_offset(reg->btf, t->name_off);
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07004100 u32 btf_id;
4101 int ret;
4102
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07004103 if (off < 0) {
4104 verbose(env,
4105 "R%d is ptr_%s invalid negative access: off=%d\n",
4106 regno, tname, off);
4107 return -EACCES;
4108 }
4109 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4110 char tn_buf[48];
4111
4112 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4113 verbose(env,
4114 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
4115 regno, tname, off, tn_buf);
4116 return -EACCES;
4117 }
4118
Martin KaFai Lau27ae79972020-01-08 16:35:03 -08004119 if (env->ops->btf_struct_access) {
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08004120 ret = env->ops->btf_struct_access(&env->log, reg->btf, t,
4121 off, size, atype, &btf_id);
Martin KaFai Lau27ae79972020-01-08 16:35:03 -08004122 } else {
4123 if (atype != BPF_READ) {
4124 verbose(env, "only read is supported\n");
4125 return -EACCES;
4126 }
4127
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08004128 ret = btf_struct_access(&env->log, reg->btf, t, off, size,
4129 atype, &btf_id);
Martin KaFai Lau27ae79972020-01-08 16:35:03 -08004130 }
4131
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07004132 if (ret < 0)
4133 return ret;
4134
Andrey Ignatov41c48f32020-06-19 14:11:43 -07004135 if (atype == BPF_READ && value_regno >= 0)
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08004136 mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id);
Martin KaFai Lau27ae79972020-01-08 16:35:03 -08004137
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07004138 return 0;
4139}
4140
Andrey Ignatov41c48f32020-06-19 14:11:43 -07004141static int check_ptr_to_map_access(struct bpf_verifier_env *env,
4142 struct bpf_reg_state *regs,
4143 int regno, int off, int size,
4144 enum bpf_access_type atype,
4145 int value_regno)
4146{
4147 struct bpf_reg_state *reg = regs + regno;
4148 struct bpf_map *map = reg->map_ptr;
4149 const struct btf_type *t;
4150 const char *tname;
4151 u32 btf_id;
4152 int ret;
4153
4154 if (!btf_vmlinux) {
4155 verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
4156 return -ENOTSUPP;
4157 }
4158
4159 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
4160 verbose(env, "map_ptr access not supported for map type %d\n",
4161 map->map_type);
4162 return -ENOTSUPP;
4163 }
4164
4165 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
4166 tname = btf_name_by_offset(btf_vmlinux, t->name_off);
4167
4168 if (!env->allow_ptr_to_map_access) {
4169 verbose(env,
4170 "%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
4171 tname);
4172 return -EPERM;
4173 }
4174
4175 if (off < 0) {
4176 verbose(env, "R%d is %s invalid negative access: off=%d\n",
4177 regno, tname, off);
4178 return -EACCES;
4179 }
4180
4181 if (atype != BPF_READ) {
4182 verbose(env, "only read from %s is supported\n", tname);
4183 return -EACCES;
4184 }
4185
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08004186 ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id);
Andrey Ignatov41c48f32020-06-19 14:11:43 -07004187 if (ret < 0)
4188 return ret;
4189
4190 if (value_regno >= 0)
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08004191 mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id);
Andrey Ignatov41c48f32020-06-19 14:11:43 -07004192
4193 return 0;
4194}
4195
Andrei Matei01f810a2021-02-06 20:10:24 -05004196/* Check that the stack access at the given offset is within bounds. The
4197 * maximum valid offset is -1.
4198 *
4199 * The minimum valid offset is -MAX_BPF_STACK for writes, and
4200 * -state->allocated_stack for reads.
4201 */
4202static int check_stack_slot_within_bounds(int off,
4203 struct bpf_func_state *state,
4204 enum bpf_access_type t)
4205{
4206 int min_valid_off;
4207
4208 if (t == BPF_WRITE)
4209 min_valid_off = -MAX_BPF_STACK;
4210 else
4211 min_valid_off = -state->allocated_stack;
4212
4213 if (off < min_valid_off || off > -1)
4214 return -EACCES;
4215 return 0;
4216}
4217
4218/* Check that the stack access at 'regno + off' falls within the maximum stack
4219 * bounds.
4220 *
4221 * 'off' includes `regno->offset`, but not its dynamic part (if any).
4222 */
4223static int check_stack_access_within_bounds(
4224 struct bpf_verifier_env *env,
4225 int regno, int off, int access_size,
4226 enum stack_access_src src, enum bpf_access_type type)
4227{
4228 struct bpf_reg_state *regs = cur_regs(env);
4229 struct bpf_reg_state *reg = regs + regno;
4230 struct bpf_func_state *state = func(env, reg);
4231 int min_off, max_off;
4232 int err;
4233 char *err_extra;
4234
4235 if (src == ACCESS_HELPER)
4236 /* We don't know if helpers are reading or writing (or both). */
4237 err_extra = " indirect access to";
4238 else if (type == BPF_READ)
4239 err_extra = " read from";
4240 else
4241 err_extra = " write to";
4242
4243 if (tnum_is_const(reg->var_off)) {
4244 min_off = reg->var_off.value + off;
4245 if (access_size > 0)
4246 max_off = min_off + access_size - 1;
4247 else
4248 max_off = min_off;
4249 } else {
4250 if (reg->smax_value >= BPF_MAX_VAR_OFF ||
4251 reg->smin_value <= -BPF_MAX_VAR_OFF) {
4252 verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
4253 err_extra, regno);
4254 return -EACCES;
4255 }
4256 min_off = reg->smin_value + off;
4257 if (access_size > 0)
4258 max_off = reg->smax_value + off + access_size - 1;
4259 else
4260 max_off = min_off;
4261 }
4262
4263 err = check_stack_slot_within_bounds(min_off, state, type);
4264 if (!err)
4265 err = check_stack_slot_within_bounds(max_off, state, type);
4266
4267 if (err) {
4268 if (tnum_is_const(reg->var_off)) {
4269 verbose(env, "invalid%s stack R%d off=%d size=%d\n",
4270 err_extra, regno, off, access_size);
4271 } else {
4272 char tn_buf[48];
4273
4274 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4275 verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
4276 err_extra, regno, tn_buf, access_size);
4277 }
4278 }
4279 return err;
4280}
Andrey Ignatov41c48f32020-06-19 14:11:43 -07004281
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004282/* check whether memory at (regno + off) is accessible for t = (read | write)
4283 * if t==write, value_regno is a register which value is stored into memory
4284 * if t==read, value_regno is a register which will receive the value from memory
4285 * if t==write && value_regno==-1, some unknown value is stored into memory
4286 * if t==read && value_regno==-1, don't care what we read from memory
4287 */
Daniel Borkmannca369602018-02-23 22:29:05 +01004288static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
4289 int off, int bpf_size, enum bpf_access_type t,
4290 int value_regno, bool strict_alignment_once)
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004291{
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07004292 struct bpf_reg_state *regs = cur_regs(env);
4293 struct bpf_reg_state *reg = regs + regno;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08004294 struct bpf_func_state *state;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004295 int size, err = 0;
4296
4297 size = bpf_size_to_bytes(bpf_size);
4298 if (size < 0)
4299 return size;
4300
Edward Creef1174f72017-08-07 15:26:19 +01004301 /* alignment checks will add in reg->off themselves */
Daniel Borkmannca369602018-02-23 22:29:05 +01004302 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
Alexei Starovoitov969bf052016-05-05 19:49:10 -07004303 if (err)
4304 return err;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004305
Edward Creef1174f72017-08-07 15:26:19 +01004306 /* for access checks, reg->off is just part of off */
4307 off += reg->off;
4308
Yonghong Song69c087b2021-02-26 12:49:25 -08004309 if (reg->type == PTR_TO_MAP_KEY) {
4310 if (t == BPF_WRITE) {
4311 verbose(env, "write to change key R%d not allowed\n", regno);
4312 return -EACCES;
4313 }
4314
4315 err = check_mem_region_access(env, regno, off, size,
4316 reg->map_ptr->key_size, false);
4317 if (err)
4318 return err;
4319 if (value_regno >= 0)
4320 mark_reg_unknown(env, regs, value_regno);
4321 } else if (reg->type == PTR_TO_MAP_VALUE) {
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07004322 if (t == BPF_WRITE && value_regno >= 0 &&
4323 is_pointer_value(env, value_regno)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07004324 verbose(env, "R%d leaks addr into map\n", value_regno);
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07004325 return -EACCES;
4326 }
Daniel Borkmann591fe982019-04-09 23:20:05 +02004327 err = check_map_access_type(env, regno, off, size, t);
4328 if (err)
4329 return err;
Yonghong Song9fd29c02017-11-12 14:49:09 -08004330 err = check_map_access(env, regno, off, size, false);
Andrii Nakryikoa23740e2019-10-09 13:14:57 -07004331 if (!err && t == BPF_READ && value_regno >= 0) {
4332 struct bpf_map *map = reg->map_ptr;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004333
Andrii Nakryikoa23740e2019-10-09 13:14:57 -07004334 /* if map is read-only, track its contents as scalars */
4335 if (tnum_is_const(reg->var_off) &&
4336 bpf_map_is_rdonly(map) &&
4337 map->ops->map_direct_value_addr) {
4338 int map_off = off + reg->var_off.value;
4339 u64 val = 0;
4340
4341 err = bpf_map_direct_read(map, map_off, size,
4342 &val);
4343 if (err)
4344 return err;
4345
4346 regs[value_regno].type = SCALAR_VALUE;
4347 __mark_reg_known(&regs[value_regno], val);
4348 } else {
4349 mark_reg_unknown(env, regs, value_regno);
4350 }
4351 }
Andrii Nakryiko457f4432020-05-29 00:54:20 -07004352 } else if (reg->type == PTR_TO_MEM) {
4353 if (t == BPF_WRITE && value_regno >= 0 &&
4354 is_pointer_value(env, value_regno)) {
4355 verbose(env, "R%d leaks addr into mem\n", value_regno);
4356 return -EACCES;
4357 }
4358 err = check_mem_region_access(env, regno, off, size,
4359 reg->mem_size, false);
4360 if (!err && t == BPF_READ && value_regno >= 0)
4361 mark_reg_unknown(env, regs, value_regno);
Alexei Starovoitov1a0dc1a2016-05-05 19:49:09 -07004362 } else if (reg->type == PTR_TO_CTX) {
Edward Creef1174f72017-08-07 15:26:19 +01004363 enum bpf_reg_type reg_type = SCALAR_VALUE;
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08004364 struct btf *btf = NULL;
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07004365 u32 btf_id = 0;
Alexei Starovoitov19de99f2016-06-15 18:25:38 -07004366
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07004367 if (t == BPF_WRITE && value_regno >= 0 &&
4368 is_pointer_value(env, value_regno)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07004369 verbose(env, "R%d leaks addr into ctx\n", value_regno);
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07004370 return -EACCES;
4371 }
Edward Creef1174f72017-08-07 15:26:19 +01004372
Daniel Borkmann58990d12018-06-07 17:40:03 +02004373 err = check_ctx_reg(env, reg, regno);
4374 if (err < 0)
4375 return err;
4376
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08004377 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf, &btf_id);
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07004378 if (err)
4379 verbose_linfo(env, insn_idx, "; ");
Alexei Starovoitov969bf052016-05-05 19:49:10 -07004380 if (!err && t == BPF_READ && value_regno >= 0) {
Edward Creef1174f72017-08-07 15:26:19 +01004381 /* ctx access returns either a scalar, or a
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02004382 * PTR_TO_PACKET[_META,_END]. In the latter
4383 * case, we know the offset is zero.
Edward Creef1174f72017-08-07 15:26:19 +01004384 */
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -08004385 if (reg_type == SCALAR_VALUE) {
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07004386 mark_reg_unknown(env, regs, value_regno);
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -08004387 } else {
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07004388 mark_reg_known_zero(env, regs,
Jakub Kicinski61bd5212017-10-09 10:30:11 -07004389 value_regno);
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -08004390 if (reg_type_may_be_null(reg_type))
4391 regs[value_regno].id = ++env->id_gen;
Jiong Wang5327ed32019-05-24 23:25:12 +01004392 /* A load of ctx field could have different
4393 * actual load size with the one encoded in the
4394 * insn. When the dst is PTR, it is for sure not
4395 * a sub-register.
4396 */
4397 regs[value_regno].subreg_def = DEF_NOT_SUBREG;
Yonghong Songb121b342020-05-09 10:59:12 -07004398 if (reg_type == PTR_TO_BTF_ID ||
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08004399 reg_type == PTR_TO_BTF_ID_OR_NULL) {
4400 regs[value_regno].btf = btf;
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07004401 regs[value_regno].btf_id = btf_id;
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08004402 }
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -08004403 }
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07004404 regs[value_regno].type = reg_type;
Alexei Starovoitov969bf052016-05-05 19:49:10 -07004405 }
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004406
Edward Creef1174f72017-08-07 15:26:19 +01004407 } else if (reg->type == PTR_TO_STACK) {
Andrei Matei01f810a2021-02-06 20:10:24 -05004408 /* Basic bounds checks. */
4409 err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
Daniel Borkmanne4298d22019-01-03 00:58:31 +01004410 if (err)
4411 return err;
Alexei Starovoitov87266792017-05-30 13:31:29 -07004412
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08004413 state = func(env, reg);
4414 err = update_stack_depth(env, state, off);
4415 if (err)
4416 return err;
Alexei Starovoitov87266792017-05-30 13:31:29 -07004417
Andrei Matei01f810a2021-02-06 20:10:24 -05004418 if (t == BPF_READ)
4419 err = check_stack_read(env, regno, off, size,
Jakub Kicinski61bd5212017-10-09 10:30:11 -07004420 value_regno);
Andrei Matei01f810a2021-02-06 20:10:24 -05004421 else
4422 err = check_stack_write(env, regno, off, size,
4423 value_regno, insn_idx);
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02004424 } else if (reg_is_pkt_pointer(reg)) {
Thomas Graf3a0af8f2016-11-30 17:10:10 +01004425 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07004426 verbose(env, "cannot write into packet\n");
Alexei Starovoitov969bf052016-05-05 19:49:10 -07004427 return -EACCES;
4428 }
Brenden Blanco4acf6c02016-07-19 12:16:56 -07004429 if (t == BPF_WRITE && value_regno >= 0 &&
4430 is_pointer_value(env, value_regno)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07004431 verbose(env, "R%d leaks addr into packet\n",
4432 value_regno);
Brenden Blanco4acf6c02016-07-19 12:16:56 -07004433 return -EACCES;
4434 }
Yonghong Song9fd29c02017-11-12 14:49:09 -08004435 err = check_packet_access(env, regno, off, size, false);
Alexei Starovoitov969bf052016-05-05 19:49:10 -07004436 if (!err && t == BPF_READ && value_regno >= 0)
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07004437 mark_reg_unknown(env, regs, value_regno);
Petar Penkovd58e4682018-09-14 07:46:18 -07004438 } else if (reg->type == PTR_TO_FLOW_KEYS) {
4439 if (t == BPF_WRITE && value_regno >= 0 &&
4440 is_pointer_value(env, value_regno)) {
4441 verbose(env, "R%d leaks addr into flow keys\n",
4442 value_regno);
4443 return -EACCES;
4444 }
4445
4446 err = check_flow_keys_access(env, off, size);
4447 if (!err && t == BPF_READ && value_regno >= 0)
4448 mark_reg_unknown(env, regs, value_regno);
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -08004449 } else if (type_is_sk_pointer(reg->type)) {
Joe Stringerc64b7982018-10-02 13:35:33 -07004450 if (t == BPF_WRITE) {
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -08004451 verbose(env, "R%d cannot write into %s\n",
4452 regno, reg_type_str[reg->type]);
Joe Stringerc64b7982018-10-02 13:35:33 -07004453 return -EACCES;
4454 }
Martin KaFai Lau5f456642019-02-08 22:25:54 -08004455 err = check_sock_access(env, insn_idx, regno, off, size, t);
Joe Stringerc64b7982018-10-02 13:35:33 -07004456 if (!err && value_regno >= 0)
4457 mark_reg_unknown(env, regs, value_regno);
Matt Mullins9df1c282019-04-26 11:49:47 -07004458 } else if (reg->type == PTR_TO_TP_BUFFER) {
4459 err = check_tp_buffer_access(env, reg, regno, off, size);
4460 if (!err && t == BPF_READ && value_regno >= 0)
4461 mark_reg_unknown(env, regs, value_regno);
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07004462 } else if (reg->type == PTR_TO_BTF_ID) {
4463 err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
4464 value_regno);
Andrey Ignatov41c48f32020-06-19 14:11:43 -07004465 } else if (reg->type == CONST_PTR_TO_MAP) {
4466 err = check_ptr_to_map_access(env, regs, regno, off, size, t,
4467 value_regno);
Yonghong Songafbf21d2020-07-23 11:41:11 -07004468 } else if (reg->type == PTR_TO_RDONLY_BUF) {
4469 if (t == BPF_WRITE) {
4470 verbose(env, "R%d cannot write into %s\n",
4471 regno, reg_type_str[reg->type]);
4472 return -EACCES;
4473 }
Colin Ian Kingf6dfbe312020-07-27 18:54:11 +01004474 err = check_buffer_access(env, reg, regno, off, size, false,
4475 "rdonly",
Yonghong Songafbf21d2020-07-23 11:41:11 -07004476 &env->prog->aux->max_rdonly_access);
4477 if (!err && value_regno >= 0)
4478 mark_reg_unknown(env, regs, value_regno);
4479 } else if (reg->type == PTR_TO_RDWR_BUF) {
Colin Ian Kingf6dfbe312020-07-27 18:54:11 +01004480 err = check_buffer_access(env, reg, regno, off, size, false,
4481 "rdwr",
Yonghong Songafbf21d2020-07-23 11:41:11 -07004482 &env->prog->aux->max_rdwr_access);
4483 if (!err && t == BPF_READ && value_regno >= 0)
4484 mark_reg_unknown(env, regs, value_regno);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004485 } else {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07004486 verbose(env, "R%d invalid mem access '%s'\n", regno,
4487 reg_type_str[reg->type]);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004488 return -EACCES;
4489 }
Alexei Starovoitov969bf052016-05-05 19:49:10 -07004490
Edward Creef1174f72017-08-07 15:26:19 +01004491 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07004492 regs[value_regno].type == SCALAR_VALUE) {
Edward Creef1174f72017-08-07 15:26:19 +01004493 /* b/h/w load zero-extends, mark upper bits as known 0 */
Jann Horn0c17d1d2017-12-18 20:11:55 -08004494 coerce_reg_to_size(&regs[value_regno], size);
Alexei Starovoitov969bf052016-05-05 19:49:10 -07004495 }
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004496 return err;
4497}
4498
Brendan Jackman91c960b2021-01-14 18:17:44 +00004499static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004500{
Brendan Jackman5ffa2552021-01-14 18:17:47 +00004501 int load_reg;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004502 int err;
4503
Brendan Jackman5ca419f2021-01-14 18:17:46 +00004504 switch (insn->imm) {
4505 case BPF_ADD:
4506 case BPF_ADD | BPF_FETCH:
Brendan Jackman981f94c2021-01-14 18:17:49 +00004507 case BPF_AND:
4508 case BPF_AND | BPF_FETCH:
4509 case BPF_OR:
4510 case BPF_OR | BPF_FETCH:
4511 case BPF_XOR:
4512 case BPF_XOR | BPF_FETCH:
Brendan Jackman5ffa2552021-01-14 18:17:47 +00004513 case BPF_XCHG:
4514 case BPF_CMPXCHG:
Brendan Jackman5ca419f2021-01-14 18:17:46 +00004515 break;
4516 default:
Brendan Jackman91c960b2021-01-14 18:17:44 +00004517 verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
4518 return -EINVAL;
4519 }
4520
4521 if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) {
4522 verbose(env, "invalid atomic operand size\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004523 return -EINVAL;
4524 }
4525
4526 /* check src1 operand */
Edward Creedc503a82017-08-15 20:34:35 +01004527 err = check_reg_arg(env, insn->src_reg, SRC_OP);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004528 if (err)
4529 return err;
4530
4531 /* check src2 operand */
Edward Creedc503a82017-08-15 20:34:35 +01004532 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004533 if (err)
4534 return err;
4535
Brendan Jackman5ffa2552021-01-14 18:17:47 +00004536 if (insn->imm == BPF_CMPXCHG) {
4537 /* Check comparison of R0 with memory location */
4538 err = check_reg_arg(env, BPF_REG_0, SRC_OP);
4539 if (err)
4540 return err;
4541 }
4542
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004543 if (is_pointer_value(env, insn->src_reg)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07004544 verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004545 return -EACCES;
4546 }
4547
Daniel Borkmannca369602018-02-23 22:29:05 +01004548 if (is_ctx_reg(env, insn->dst_reg) ||
Daniel Borkmann4b5defd2018-10-21 02:09:25 +02004549 is_pkt_reg(env, insn->dst_reg) ||
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -08004550 is_flow_key_reg(env, insn->dst_reg) ||
4551 is_sk_reg(env, insn->dst_reg)) {
Brendan Jackman91c960b2021-01-14 18:17:44 +00004552 verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
Daniel Borkmann2a159c62018-10-21 02:09:24 +02004553 insn->dst_reg,
4554 reg_type_str[reg_state(env, insn->dst_reg)->type]);
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01004555 return -EACCES;
4556 }
4557
Brendan Jackman37086bf2021-02-02 13:50:02 +00004558 if (insn->imm & BPF_FETCH) {
4559 if (insn->imm == BPF_CMPXCHG)
4560 load_reg = BPF_REG_0;
4561 else
4562 load_reg = insn->src_reg;
4563
4564 /* check and record load of old value */
4565 err = check_reg_arg(env, load_reg, DST_OP);
4566 if (err)
4567 return err;
4568 } else {
4569 /* This instruction accesses a memory location but doesn't
4570 * actually load it into a register.
4571 */
4572 load_reg = -1;
4573 }
4574
Brendan Jackman91c960b2021-01-14 18:17:44 +00004575 /* check whether we can read the memory */
Yonghong Song31fd8582017-06-13 15:52:13 -07004576 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
Brendan Jackman37086bf2021-02-02 13:50:02 +00004577 BPF_SIZE(insn->code), BPF_READ, load_reg, true);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004578 if (err)
4579 return err;
4580
Brendan Jackman91c960b2021-01-14 18:17:44 +00004581 /* check whether we can write into the same memory */
Brendan Jackman5ca419f2021-01-14 18:17:46 +00004582 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4583 BPF_SIZE(insn->code), BPF_WRITE, -1, true);
4584 if (err)
4585 return err;
4586
Brendan Jackman5ca419f2021-01-14 18:17:46 +00004587 return 0;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004588}
4589
Andrei Matei01f810a2021-02-06 20:10:24 -05004590/* When register 'regno' is used to read the stack (either directly or through
4591 * a helper function) make sure that it's within stack boundary and, depending
4592 * on the access type, that all elements of the stack are initialized.
4593 *
4594 * 'off' includes 'regno->off', but not its dynamic part (if any).
4595 *
4596 * All registers that have been spilled on the stack in the slots within the
4597 * read offsets are marked as read.
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004598 */
Andrei Matei01f810a2021-02-06 20:10:24 -05004599static int check_stack_range_initialized(
4600 struct bpf_verifier_env *env, int regno, int off,
4601 int access_size, bool zero_size_allowed,
4602 enum stack_access_src type, struct bpf_call_arg_meta *meta)
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004603{
Daniel Borkmann2a159c62018-10-21 02:09:24 +02004604 struct bpf_reg_state *reg = reg_state(env, regno);
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08004605 struct bpf_func_state *state = func(env, reg);
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07004606 int err, min_off, max_off, i, j, slot, spi;
Andrei Matei01f810a2021-02-06 20:10:24 -05004607 char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
4608 enum bpf_access_type bounds_check_type;
4609 /* Some accesses can write anything into the stack, others are
4610 * read-only.
4611 */
4612 bool clobber = false;
4613
4614 if (access_size == 0 && !zero_size_allowed) {
4615 verbose(env, "invalid zero-sized read\n");
4616 return -EACCES;
4617 }
4618
4619 if (type == ACCESS_HELPER) {
4620 /* The bounds checks for writes are more permissive than for
4621 * reads. However, if raw_mode is not set, we'll do extra
4622 * checks below.
4623 */
4624 bounds_check_type = BPF_WRITE;
4625 clobber = true;
4626 } else {
4627 bounds_check_type = BPF_READ;
4628 }
4629 err = check_stack_access_within_bounds(env, regno, off, access_size,
4630 type, bounds_check_type);
4631 if (err)
4632 return err;
4633
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004634
Andrey Ignatov2011fcc2019-03-28 18:01:57 -07004635 if (tnum_is_const(reg->var_off)) {
Andrei Matei01f810a2021-02-06 20:10:24 -05004636 min_off = max_off = reg->var_off.value + off;
Andrey Ignatov2011fcc2019-03-28 18:01:57 -07004637 } else {
Andrey Ignatov088ec262019-04-03 23:22:39 -07004638 /* Variable offset is prohibited for unprivileged mode for
4639 * simplicity since it requires corresponding support in
4640 * Spectre masking for stack ALU.
4641 * See also retrieve_ptr_limit().
4642 */
Alexei Starovoitov2c78ee82020-05-13 16:03:54 -07004643 if (!env->bypass_spec_v1) {
Andrey Ignatov088ec262019-04-03 23:22:39 -07004644 char tn_buf[48];
Edward Creef1174f72017-08-07 15:26:19 +01004645
Andrey Ignatov088ec262019-04-03 23:22:39 -07004646 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
Andrei Matei01f810a2021-02-06 20:10:24 -05004647 verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
4648 regno, err_extra, tn_buf);
Andrey Ignatov088ec262019-04-03 23:22:39 -07004649 return -EACCES;
4650 }
Andrey Ignatovf2bcd052019-04-03 23:22:37 -07004651 /* Only initialized buffer on stack is allowed to be accessed
4652 * with variable offset. With uninitialized buffer it's hard to
4653 * guarantee that whole memory is marked as initialized on
4654 * helper return since specific bounds are unknown what may
4655 * cause uninitialized stack leaking.
4656 */
4657 if (meta && meta->raw_mode)
4658 meta = NULL;
4659
Andrei Matei01f810a2021-02-06 20:10:24 -05004660 min_off = reg->smin_value + off;
4661 max_off = reg->smax_value + off;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004662 }
4663
Daniel Borkmann435faee12016-04-13 00:10:51 +02004664 if (meta && meta->raw_mode) {
4665 meta->access_size = access_size;
4666 meta->regno = regno;
4667 return 0;
4668 }
4669
Andrey Ignatov2011fcc2019-03-28 18:01:57 -07004670 for (i = min_off; i < max_off + access_size; i++) {
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -08004671 u8 *stype;
4672
Andrey Ignatov2011fcc2019-03-28 18:01:57 -07004673 slot = -i - 1;
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07004674 spi = slot / BPF_REG_SIZE;
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -08004675 if (state->allocated_stack <= slot)
4676 goto err;
4677 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
4678 if (*stype == STACK_MISC)
4679 goto mark;
4680 if (*stype == STACK_ZERO) {
Andrei Matei01f810a2021-02-06 20:10:24 -05004681 if (clobber) {
4682 /* helper can write anything into the stack */
4683 *stype = STACK_MISC;
4684 }
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -08004685 goto mark;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004686 }
Yonghong Song1d68f222020-05-09 10:59:15 -07004687
Martin KaFai Lau27113c52021-09-21 17:49:34 -07004688 if (is_spilled_reg(&state->stack[spi]) &&
Yonghong Song1d68f222020-05-09 10:59:15 -07004689 state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID)
4690 goto mark;
4691
Martin KaFai Lau27113c52021-09-21 17:49:34 -07004692 if (is_spilled_reg(&state->stack[spi]) &&
Yonghong Songcd17d382020-12-09 17:33:49 -08004693 (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
4694 env->allow_ptr_leaks)) {
Andrei Matei01f810a2021-02-06 20:10:24 -05004695 if (clobber) {
4696 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
4697 for (j = 0; j < BPF_REG_SIZE; j++)
Martin KaFai Lau354e8f12021-09-21 17:49:41 -07004698 scrub_spilled_slot(&state->stack[spi].slot_type[j]);
Andrei Matei01f810a2021-02-06 20:10:24 -05004699 }
Alexei Starovoitovf7cf25b2019-06-15 12:12:17 -07004700 goto mark;
4701 }
4702
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -08004703err:
Andrey Ignatov2011fcc2019-03-28 18:01:57 -07004704 if (tnum_is_const(reg->var_off)) {
Andrei Matei01f810a2021-02-06 20:10:24 -05004705 verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
4706 err_extra, regno, min_off, i - min_off, access_size);
Andrey Ignatov2011fcc2019-03-28 18:01:57 -07004707 } else {
4708 char tn_buf[48];
4709
4710 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
Andrei Matei01f810a2021-02-06 20:10:24 -05004711 verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
4712 err_extra, regno, tn_buf, i - min_off, access_size);
Andrey Ignatov2011fcc2019-03-28 18:01:57 -07004713 }
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -08004714 return -EACCES;
4715mark:
4716 /* reading any byte out of 8-byte 'spill_slot' will cause
4717 * the whole slot to be marked as 'read'
4718 */
Edward Cree679c7822018-08-22 20:02:19 +01004719 mark_reg_read(env, &state->stack[spi].spilled_ptr,
Jiong Wang5327ed32019-05-24 23:25:12 +01004720 state->stack[spi].spilled_ptr.parent,
4721 REG_LIVE_READ64);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004722 }
Andrey Ignatov2011fcc2019-03-28 18:01:57 -07004723 return update_stack_depth(env, state, min_off);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07004724}
4725
Gianluca Borello06c1c042017-01-09 10:19:49 -08004726static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
4727 int access_size, bool zero_size_allowed,
4728 struct bpf_call_arg_meta *meta)
4729{
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07004730 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
Gianluca Borello06c1c042017-01-09 10:19:49 -08004731
Edward Creef1174f72017-08-07 15:26:19 +01004732 switch (reg->type) {
Gianluca Borello06c1c042017-01-09 10:19:49 -08004733 case PTR_TO_PACKET:
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02004734 case PTR_TO_PACKET_META:
Yonghong Song9fd29c02017-11-12 14:49:09 -08004735 return check_packet_access(env, regno, reg->off, access_size,
4736 zero_size_allowed);
Yonghong Song69c087b2021-02-26 12:49:25 -08004737 case PTR_TO_MAP_KEY:
4738 return check_mem_region_access(env, regno, reg->off, access_size,
4739 reg->map_ptr->key_size, false);
Gianluca Borello06c1c042017-01-09 10:19:49 -08004740 case PTR_TO_MAP_VALUE:
Daniel Borkmann591fe982019-04-09 23:20:05 +02004741 if (check_map_access_type(env, regno, reg->off, access_size,
4742 meta && meta->raw_mode ? BPF_WRITE :
4743 BPF_READ))
4744 return -EACCES;
Yonghong Song9fd29c02017-11-12 14:49:09 -08004745 return check_map_access(env, regno, reg->off, access_size,
4746 zero_size_allowed);
Andrii Nakryiko457f4432020-05-29 00:54:20 -07004747 case PTR_TO_MEM:
4748 return check_mem_region_access(env, regno, reg->off,
4749 access_size, reg->mem_size,
4750 zero_size_allowed);
Yonghong Songafbf21d2020-07-23 11:41:11 -07004751 case PTR_TO_RDONLY_BUF:
4752 if (meta && meta->raw_mode)
4753 return -EACCES;
4754 return check_buffer_access(env, reg, regno, reg->off,
4755 access_size, zero_size_allowed,
4756 "rdonly",
4757 &env->prog->aux->max_rdonly_access);
4758 case PTR_TO_RDWR_BUF:
4759 return check_buffer_access(env, reg, regno, reg->off,
4760 access_size, zero_size_allowed,
4761 "rdwr",
4762 &env->prog->aux->max_rdwr_access);
Lorenz Bauer0d004c022020-09-21 13:12:18 +01004763 case PTR_TO_STACK:
Andrei Matei01f810a2021-02-06 20:10:24 -05004764 return check_stack_range_initialized(
4765 env,
4766 regno, reg->off, access_size,
4767 zero_size_allowed, ACCESS_HELPER, meta);
Lorenz Bauer0d004c022020-09-21 13:12:18 +01004768 default: /* scalar_value or invalid ptr */
4769 /* Allow zero-byte read from NULL, regardless of pointer type */
4770 if (zero_size_allowed && access_size == 0 &&
4771 register_is_null(reg))
4772 return 0;
4773
4774 verbose(env, "R%d type=%s expected=%s\n", regno,
4775 reg_type_str[reg->type],
4776 reg_type_str[PTR_TO_STACK]);
4777 return -EACCES;
Gianluca Borello06c1c042017-01-09 10:19:49 -08004778 }
4779}
4780
Dmitrii Banshchikove5069b9c2021-02-13 00:56:41 +04004781int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
4782 u32 regno, u32 mem_size)
4783{
4784 if (register_is_null(reg))
4785 return 0;
4786
4787 if (reg_type_may_be_null(reg->type)) {
4788 /* Assuming that the register contains a value check if the memory
4789 * access is safe. Temporarily save and restore the register's state as
4790 * the conversion shouldn't be visible to a caller.
4791 */
4792 const struct bpf_reg_state saved_reg = *reg;
4793 int rv;
4794
4795 mark_ptr_not_null_reg(reg);
4796 rv = check_helper_mem_access(env, regno, mem_size, true, NULL);
4797 *reg = saved_reg;
4798 return rv;
4799 }
4800
4801 return check_helper_mem_access(env, regno, mem_size, true, NULL);
4802}
4803
Alexei Starovoitovd83525c2019-01-31 15:40:04 -08004804/* Implementation details:
4805 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
4806 * Two bpf_map_lookups (even with the same key) will have different reg->id.
4807 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
4808 * value_or_null->value transition, since the verifier only cares about
4809 * the range of access to valid map value pointer and doesn't care about actual
4810 * address of the map element.
4811 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
4812 * reg->id > 0 after value_or_null->value transition. By doing so
4813 * two bpf_map_lookups will be considered two different pointers that
4814 * point to different bpf_spin_locks.
4815 * The verifier allows taking only one bpf_spin_lock at a time to avoid
4816 * dead-locks.
4817 * Since only one bpf_spin_lock is allowed the checks are simpler than
4818 * reg_is_refcounted() logic. The verifier needs to remember only
4819 * one spin_lock instead of array of acquired_refs.
4820 * cur_state->active_spin_lock remembers which map value element got locked
4821 * and clears it after bpf_spin_unlock.
4822 */
4823static int process_spin_lock(struct bpf_verifier_env *env, int regno,
4824 bool is_lock)
4825{
4826 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
4827 struct bpf_verifier_state *cur = env->cur_state;
4828 bool is_const = tnum_is_const(reg->var_off);
4829 struct bpf_map *map = reg->map_ptr;
4830 u64 val = reg->var_off.value;
4831
Alexei Starovoitovd83525c2019-01-31 15:40:04 -08004832 if (!is_const) {
4833 verbose(env,
4834 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
4835 regno);
4836 return -EINVAL;
4837 }
4838 if (!map->btf) {
4839 verbose(env,
4840 "map '%s' has to have BTF in order to use bpf_spin_lock\n",
4841 map->name);
4842 return -EINVAL;
4843 }
4844 if (!map_value_has_spin_lock(map)) {
4845 if (map->spin_lock_off == -E2BIG)
4846 verbose(env,
4847 "map '%s' has more than one 'struct bpf_spin_lock'\n",
4848 map->name);
4849 else if (map->spin_lock_off == -ENOENT)
4850 verbose(env,
4851 "map '%s' doesn't have 'struct bpf_spin_lock'\n",
4852 map->name);
4853 else
4854 verbose(env,
4855 "map '%s' is not a struct type or bpf_spin_lock is mangled\n",
4856 map->name);
4857 return -EINVAL;
4858 }
4859 if (map->spin_lock_off != val + reg->off) {
4860 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n",
4861 val + reg->off);
4862 return -EINVAL;
4863 }
4864 if (is_lock) {
4865 if (cur->active_spin_lock) {
4866 verbose(env,
4867 "Locking two bpf_spin_locks are not allowed\n");
4868 return -EINVAL;
4869 }
4870 cur->active_spin_lock = reg->id;
4871 } else {
4872 if (!cur->active_spin_lock) {
4873 verbose(env, "bpf_spin_unlock without taking a lock\n");
4874 return -EINVAL;
4875 }
4876 if (cur->active_spin_lock != reg->id) {
4877 verbose(env, "bpf_spin_unlock of different lock\n");
4878 return -EINVAL;
4879 }
4880 cur->active_spin_lock = 0;
4881 }
4882 return 0;
4883}
4884
Alexei Starovoitovb00628b2021-07-14 17:54:09 -07004885static int process_timer_func(struct bpf_verifier_env *env, int regno,
4886 struct bpf_call_arg_meta *meta)
4887{
4888 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
4889 bool is_const = tnum_is_const(reg->var_off);
4890 struct bpf_map *map = reg->map_ptr;
4891 u64 val = reg->var_off.value;
4892
4893 if (!is_const) {
4894 verbose(env,
4895 "R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n",
4896 regno);
4897 return -EINVAL;
4898 }
4899 if (!map->btf) {
4900 verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n",
4901 map->name);
4902 return -EINVAL;
4903 }
Alexei Starovoitov68134662021-07-14 17:54:10 -07004904 if (!map_value_has_timer(map)) {
4905 if (map->timer_off == -E2BIG)
4906 verbose(env,
4907 "map '%s' has more than one 'struct bpf_timer'\n",
4908 map->name);
4909 else if (map->timer_off == -ENOENT)
4910 verbose(env,
4911 "map '%s' doesn't have 'struct bpf_timer'\n",
4912 map->name);
4913 else
4914 verbose(env,
4915 "map '%s' is not a struct type or bpf_timer is mangled\n",
4916 map->name);
4917 return -EINVAL;
4918 }
4919 if (map->timer_off != val + reg->off) {
4920 verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n",
4921 val + reg->off, map->timer_off);
Alexei Starovoitovb00628b2021-07-14 17:54:09 -07004922 return -EINVAL;
4923 }
4924 if (meta->map_ptr) {
4925 verbose(env, "verifier bug. Two map pointers in a timer helper\n");
4926 return -EFAULT;
4927 }
Alexei Starovoitov3e8ce292021-07-14 17:54:11 -07004928 meta->map_uid = reg->map_uid;
Alexei Starovoitovb00628b2021-07-14 17:54:09 -07004929 meta->map_ptr = map;
4930 return 0;
4931}
4932
Daniel Borkmann90133412018-01-20 01:24:29 +01004933static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
4934{
4935 return type == ARG_PTR_TO_MEM ||
4936 type == ARG_PTR_TO_MEM_OR_NULL ||
4937 type == ARG_PTR_TO_UNINIT_MEM;
4938}
4939
4940static bool arg_type_is_mem_size(enum bpf_arg_type type)
4941{
4942 return type == ARG_CONST_SIZE ||
4943 type == ARG_CONST_SIZE_OR_ZERO;
4944}
4945
Andrii Nakryiko457f4432020-05-29 00:54:20 -07004946static bool arg_type_is_alloc_size(enum bpf_arg_type type)
4947{
4948 return type == ARG_CONST_ALLOC_SIZE_OR_ZERO;
4949}
4950
Andrey Ignatov57c3bb72019-03-18 16:57:10 -07004951static bool arg_type_is_int_ptr(enum bpf_arg_type type)
4952{
4953 return type == ARG_PTR_TO_INT ||
4954 type == ARG_PTR_TO_LONG;
4955}
4956
4957static int int_ptr_type_to_size(enum bpf_arg_type type)
4958{
4959 if (type == ARG_PTR_TO_INT)
4960 return sizeof(u32);
4961 else if (type == ARG_PTR_TO_LONG)
4962 return sizeof(u64);
4963
4964 return -EINVAL;
4965}
4966
Lorenz Bauer912f4422020-08-21 11:29:46 +01004967static int resolve_map_arg_type(struct bpf_verifier_env *env,
4968 const struct bpf_call_arg_meta *meta,
4969 enum bpf_arg_type *arg_type)
4970{
4971 if (!meta->map_ptr) {
4972 /* kernel subsystem misconfigured verifier */
4973 verbose(env, "invalid map_ptr to access map->type\n");
4974 return -EACCES;
4975 }
4976
4977 switch (meta->map_ptr->map_type) {
4978 case BPF_MAP_TYPE_SOCKMAP:
4979 case BPF_MAP_TYPE_SOCKHASH:
4980 if (*arg_type == ARG_PTR_TO_MAP_VALUE) {
Lorenz Bauer6550f2d2020-09-28 10:08:02 +01004981 *arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON;
Lorenz Bauer912f4422020-08-21 11:29:46 +01004982 } else {
4983 verbose(env, "invalid arg_type for sockmap/sockhash\n");
4984 return -EINVAL;
4985 }
4986 break;
Joanne Koong93309862021-10-27 16:45:00 -07004987 case BPF_MAP_TYPE_BLOOM_FILTER:
4988 if (meta->func_id == BPF_FUNC_map_peek_elem)
4989 *arg_type = ARG_PTR_TO_MAP_VALUE;
4990 break;
Lorenz Bauer912f4422020-08-21 11:29:46 +01004991 default:
4992 break;
4993 }
4994 return 0;
4995}
4996
Lorenz Bauerf79e7ea2020-09-21 13:12:27 +01004997struct bpf_reg_types {
4998 const enum bpf_reg_type types[10];
Martin KaFai Lau1df8f552020-09-24 17:03:50 -07004999 u32 *btf_id;
Lorenz Bauerf79e7ea2020-09-21 13:12:27 +01005000};
5001
5002static const struct bpf_reg_types map_key_value_types = {
5003 .types = {
5004 PTR_TO_STACK,
5005 PTR_TO_PACKET,
5006 PTR_TO_PACKET_META,
Yonghong Song69c087b2021-02-26 12:49:25 -08005007 PTR_TO_MAP_KEY,
Lorenz Bauerf79e7ea2020-09-21 13:12:27 +01005008 PTR_TO_MAP_VALUE,
5009 },
5010};
5011
5012static const struct bpf_reg_types sock_types = {
5013 .types = {
5014 PTR_TO_SOCK_COMMON,
5015 PTR_TO_SOCKET,
5016 PTR_TO_TCP_SOCK,
5017 PTR_TO_XDP_SOCK,
5018 },
5019};
5020
Randy Dunlap49a2a4d2020-10-06 19:16:13 -07005021#ifdef CONFIG_NET
Martin KaFai Lau1df8f552020-09-24 17:03:50 -07005022static const struct bpf_reg_types btf_id_sock_common_types = {
5023 .types = {
5024 PTR_TO_SOCK_COMMON,
5025 PTR_TO_SOCKET,
5026 PTR_TO_TCP_SOCK,
5027 PTR_TO_XDP_SOCK,
5028 PTR_TO_BTF_ID,
5029 },
5030 .btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
5031};
Randy Dunlap49a2a4d2020-10-06 19:16:13 -07005032#endif
Martin KaFai Lau1df8f552020-09-24 17:03:50 -07005033
Lorenz Bauerf79e7ea2020-09-21 13:12:27 +01005034static const struct bpf_reg_types mem_types = {
5035 .types = {
5036 PTR_TO_STACK,
5037 PTR_TO_PACKET,
5038 PTR_TO_PACKET_META,
Yonghong Song69c087b2021-02-26 12:49:25 -08005039 PTR_TO_MAP_KEY,
Lorenz Bauerf79e7ea2020-09-21 13:12:27 +01005040 PTR_TO_MAP_VALUE,
5041 PTR_TO_MEM,
5042 PTR_TO_RDONLY_BUF,
5043 PTR_TO_RDWR_BUF,
5044 },
5045};
5046
5047static const struct bpf_reg_types int_ptr_types = {
5048 .types = {
5049 PTR_TO_STACK,
5050 PTR_TO_PACKET,
5051 PTR_TO_PACKET_META,
Yonghong Song69c087b2021-02-26 12:49:25 -08005052 PTR_TO_MAP_KEY,
Lorenz Bauerf79e7ea2020-09-21 13:12:27 +01005053 PTR_TO_MAP_VALUE,
5054 },
5055};
5056
5057static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
5058static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
5059static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
5060static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM } };
5061static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
5062static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } };
5063static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } };
Hao Luoeaa6bcb2020-09-29 16:50:47 -07005064static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_PERCPU_BTF_ID } };
Yonghong Song69c087b2021-02-26 12:49:25 -08005065static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
5066static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
Florent Revestfff13c42021-04-19 17:52:39 +02005067static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } };
Alexei Starovoitovb00628b2021-07-14 17:54:09 -07005068static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
Lorenz Bauerf79e7ea2020-09-21 13:12:27 +01005069
Lorenz Bauer0789e13b2020-09-23 17:01:55 +01005070static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
Lorenz Bauerf79e7ea2020-09-21 13:12:27 +01005071 [ARG_PTR_TO_MAP_KEY] = &map_key_value_types,
5072 [ARG_PTR_TO_MAP_VALUE] = &map_key_value_types,
5073 [ARG_PTR_TO_UNINIT_MAP_VALUE] = &map_key_value_types,
5074 [ARG_PTR_TO_MAP_VALUE_OR_NULL] = &map_key_value_types,
5075 [ARG_CONST_SIZE] = &scalar_types,
5076 [ARG_CONST_SIZE_OR_ZERO] = &scalar_types,
5077 [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types,
5078 [ARG_CONST_MAP_PTR] = &const_map_ptr_types,
5079 [ARG_PTR_TO_CTX] = &context_types,
5080 [ARG_PTR_TO_CTX_OR_NULL] = &context_types,
5081 [ARG_PTR_TO_SOCK_COMMON] = &sock_types,
Randy Dunlap49a2a4d2020-10-06 19:16:13 -07005082#ifdef CONFIG_NET
Martin KaFai Lau1df8f552020-09-24 17:03:50 -07005083 [ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types,
Randy Dunlap49a2a4d2020-10-06 19:16:13 -07005084#endif
Lorenz Bauerf79e7ea2020-09-21 13:12:27 +01005085 [ARG_PTR_TO_SOCKET] = &fullsock_types,
5086 [ARG_PTR_TO_SOCKET_OR_NULL] = &fullsock_types,
5087 [ARG_PTR_TO_BTF_ID] = &btf_ptr_types,
5088 [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types,
5089 [ARG_PTR_TO_MEM] = &mem_types,
5090 [ARG_PTR_TO_MEM_OR_NULL] = &mem_types,
5091 [ARG_PTR_TO_UNINIT_MEM] = &mem_types,
5092 [ARG_PTR_TO_ALLOC_MEM] = &alloc_mem_types,
5093 [ARG_PTR_TO_ALLOC_MEM_OR_NULL] = &alloc_mem_types,
5094 [ARG_PTR_TO_INT] = &int_ptr_types,
5095 [ARG_PTR_TO_LONG] = &int_ptr_types,
Hao Luoeaa6bcb2020-09-29 16:50:47 -07005096 [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types,
Yonghong Song69c087b2021-02-26 12:49:25 -08005097 [ARG_PTR_TO_FUNC] = &func_ptr_types,
5098 [ARG_PTR_TO_STACK_OR_NULL] = &stack_ptr_types,
Florent Revestfff13c42021-04-19 17:52:39 +02005099 [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types,
Alexei Starovoitovb00628b2021-07-14 17:54:09 -07005100 [ARG_PTR_TO_TIMER] = &timer_types,
Lorenz Bauerf79e7ea2020-09-21 13:12:27 +01005101};
5102
5103static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
Martin KaFai Laua968d5e2020-09-24 17:03:44 -07005104 enum bpf_arg_type arg_type,
5105 const u32 *arg_btf_id)
Lorenz Bauerf79e7ea2020-09-21 13:12:27 +01005106{
5107 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5108 enum bpf_reg_type expected, type = reg->type;
Martin KaFai Laua968d5e2020-09-24 17:03:44 -07005109 const struct bpf_reg_types *compatible;
Lorenz Bauerf79e7ea2020-09-21 13:12:27 +01005110 int i, j;
5111
Martin KaFai Laua968d5e2020-09-24 17:03:44 -07005112 compatible = compatible_reg_types[arg_type];
5113 if (!compatible) {
5114 verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
5115 return -EFAULT;
5116 }
5117
Lorenz Bauerf79e7ea2020-09-21 13:12:27 +01005118 for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
5119 expected = compatible->types[i];
5120 if (expected == NOT_INIT)
5121 break;
5122
5123 if (type == expected)
Martin KaFai Laua968d5e2020-09-24 17:03:44 -07005124 goto found;
Lorenz Bauerf79e7ea2020-09-21 13:12:27 +01005125 }
5126
5127 verbose(env, "R%d type=%s expected=", regno, reg_type_str[type]);
5128 for (j = 0; j + 1 < i; j++)
5129 verbose(env, "%s, ", reg_type_str[compatible->types[j]]);
5130 verbose(env, "%s\n", reg_type_str[compatible->types[j]]);
5131 return -EACCES;
Martin KaFai Laua968d5e2020-09-24 17:03:44 -07005132
5133found:
5134 if (type == PTR_TO_BTF_ID) {
Martin KaFai Lau1df8f552020-09-24 17:03:50 -07005135 if (!arg_btf_id) {
5136 if (!compatible->btf_id) {
5137 verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
5138 return -EFAULT;
5139 }
5140 arg_btf_id = compatible->btf_id;
5141 }
5142
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08005143 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
5144 btf_vmlinux, *arg_btf_id)) {
Martin KaFai Laua968d5e2020-09-24 17:03:44 -07005145 verbose(env, "R%d is of type %s but %s is expected\n",
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08005146 regno, kernel_type_name(reg->btf, reg->btf_id),
5147 kernel_type_name(btf_vmlinux, *arg_btf_id));
Martin KaFai Laua968d5e2020-09-24 17:03:44 -07005148 return -EACCES;
5149 }
5150
5151 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
5152 verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
5153 regno);
5154 return -EACCES;
5155 }
5156 }
5157
5158 return 0;
Lorenz Bauerf79e7ea2020-09-21 13:12:27 +01005159}
5160
Yonghong Songaf7ec132020-06-23 16:08:09 -07005161static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
5162 struct bpf_call_arg_meta *meta,
5163 const struct bpf_func_proto *fn)
Alexei Starovoitov17a52672014-09-26 00:17:06 -07005164{
Yonghong Songaf7ec132020-06-23 16:08:09 -07005165 u32 regno = BPF_REG_1 + arg;
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07005166 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
Yonghong Songaf7ec132020-06-23 16:08:09 -07005167 enum bpf_arg_type arg_type = fn->arg_type[arg];
Lorenz Bauerf79e7ea2020-09-21 13:12:27 +01005168 enum bpf_reg_type type = reg->type;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07005169 int err = 0;
5170
Daniel Borkmann80f1d682015-03-12 17:21:42 +01005171 if (arg_type == ARG_DONTCARE)
Alexei Starovoitov17a52672014-09-26 00:17:06 -07005172 return 0;
5173
Edward Creedc503a82017-08-15 20:34:35 +01005174 err = check_reg_arg(env, regno, SRC_OP);
5175 if (err)
5176 return err;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07005177
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07005178 if (arg_type == ARG_ANYTHING) {
5179 if (is_pointer_value(env, regno)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07005180 verbose(env, "R%d leaks addr into helper function\n",
5181 regno);
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07005182 return -EACCES;
5183 }
Daniel Borkmann80f1d682015-03-12 17:21:42 +01005184 return 0;
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07005185 }
Daniel Borkmann80f1d682015-03-12 17:21:42 +01005186
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02005187 if (type_is_pkt_pointer(type) &&
Thomas Graf3a0af8f2016-11-30 17:10:10 +01005188 !may_access_direct_pkt_data(env, meta, BPF_READ)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07005189 verbose(env, "helper access to the packet is not allowed\n");
Alexei Starovoitov6841de82016-08-11 18:17:16 -07005190 return -EACCES;
5191 }
5192
Lorenz Bauer912f4422020-08-21 11:29:46 +01005193 if (arg_type == ARG_PTR_TO_MAP_VALUE ||
5194 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE ||
5195 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) {
5196 err = resolve_map_arg_type(env, meta, &arg_type);
5197 if (err)
5198 return err;
5199 }
5200
Lorenz Bauerfd1b0d62020-09-21 13:12:26 +01005201 if (register_is_null(reg) && arg_type_may_be_null(arg_type))
5202 /* A NULL register has a SCALAR_VALUE type, so skip
5203 * type checking.
5204 */
5205 goto skip_type_check;
Jiri Olsafaaf4a72020-08-25 21:21:18 +02005206
Martin KaFai Laua968d5e2020-09-24 17:03:44 -07005207 err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg]);
Lorenz Bauerf79e7ea2020-09-21 13:12:27 +01005208 if (err)
5209 return err;
5210
Martin KaFai Laua968d5e2020-09-24 17:03:44 -07005211 if (type == PTR_TO_CTX) {
Lorenz Bauerfeec7042020-09-21 13:12:23 +01005212 err = check_ctx_reg(env, reg, regno);
5213 if (err < 0)
5214 return err;
Lorenz Bauerd7b94542020-09-21 13:12:21 +01005215 }
5216
Lorenz Bauerfd1b0d62020-09-21 13:12:26 +01005217skip_type_check:
Lorenz Bauer02f7c952020-09-21 13:12:22 +01005218 if (reg->ref_obj_id) {
Andrii Nakryiko457f4432020-05-29 00:54:20 -07005219 if (meta->ref_obj_id) {
5220 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
5221 regno, reg->ref_obj_id,
5222 meta->ref_obj_id);
5223 return -EFAULT;
5224 }
5225 meta->ref_obj_id = reg->ref_obj_id;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07005226 }
5227
Alexei Starovoitov17a52672014-09-26 00:17:06 -07005228 if (arg_type == ARG_CONST_MAP_PTR) {
5229 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
Alexei Starovoitov3e8ce292021-07-14 17:54:11 -07005230 if (meta->map_ptr) {
5231 /* Use map_uid (which is unique id of inner map) to reject:
5232 * inner_map1 = bpf_map_lookup_elem(outer_map, key1)
5233 * inner_map2 = bpf_map_lookup_elem(outer_map, key2)
5234 * if (inner_map1 && inner_map2) {
5235 * timer = bpf_map_lookup_elem(inner_map1);
5236 * if (timer)
5237 * // mismatch would have been allowed
5238 * bpf_timer_init(timer, inner_map2);
5239 * }
5240 *
5241 * Comparing map_ptr is enough to distinguish normal and outer maps.
5242 */
5243 if (meta->map_ptr != reg->map_ptr ||
5244 meta->map_uid != reg->map_uid) {
5245 verbose(env,
5246 "timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n",
5247 meta->map_uid, reg->map_uid);
5248 return -EINVAL;
5249 }
Alexei Starovoitovb00628b2021-07-14 17:54:09 -07005250 }
Daniel Borkmann33ff9822016-04-13 00:10:50 +02005251 meta->map_ptr = reg->map_ptr;
Alexei Starovoitov3e8ce292021-07-14 17:54:11 -07005252 meta->map_uid = reg->map_uid;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07005253 } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
5254 /* bpf_map_xxx(..., map_ptr, ..., key) call:
5255 * check that [key, key + map->key_size) are within
5256 * stack limits and initialized
5257 */
Daniel Borkmann33ff9822016-04-13 00:10:50 +02005258 if (!meta->map_ptr) {
Alexei Starovoitov17a52672014-09-26 00:17:06 -07005259 /* in function declaration map_ptr must come before
5260 * map_key, so that it's verified and known before
5261 * we have to check map_key here. Otherwise it means
5262 * that kernel subsystem misconfigured verifier
5263 */
Jakub Kicinski61bd5212017-10-09 10:30:11 -07005264 verbose(env, "invalid map_ptr to access map->key\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -07005265 return -EACCES;
5266 }
Paul Chaignond71962f2018-04-24 15:07:54 +02005267 err = check_helper_mem_access(env, regno,
5268 meta->map_ptr->key_size, false,
5269 NULL);
Mauricio Vasquez B2ea864c2018-10-18 15:16:20 +02005270 } else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
Martin KaFai Lau6ac99e82019-04-26 16:39:39 -07005271 (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL &&
5272 !register_is_null(reg)) ||
Mauricio Vasquez B2ea864c2018-10-18 15:16:20 +02005273 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
Alexei Starovoitov17a52672014-09-26 00:17:06 -07005274 /* bpf_map_xxx(..., map_ptr, ..., value) call:
5275 * check [value, value + map->value_size) validity
5276 */
Daniel Borkmann33ff9822016-04-13 00:10:50 +02005277 if (!meta->map_ptr) {
Alexei Starovoitov17a52672014-09-26 00:17:06 -07005278 /* kernel subsystem misconfigured verifier */
Jakub Kicinski61bd5212017-10-09 10:30:11 -07005279 verbose(env, "invalid map_ptr to access map->value\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -07005280 return -EACCES;
5281 }
Mauricio Vasquez B2ea864c2018-10-18 15:16:20 +02005282 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
Paul Chaignond71962f2018-04-24 15:07:54 +02005283 err = check_helper_mem_access(env, regno,
5284 meta->map_ptr->value_size, false,
Mauricio Vasquez B2ea864c2018-10-18 15:16:20 +02005285 meta);
Hao Luoeaa6bcb2020-09-29 16:50:47 -07005286 } else if (arg_type == ARG_PTR_TO_PERCPU_BTF_ID) {
5287 if (!reg->btf_id) {
5288 verbose(env, "Helper has invalid btf_id in R%d\n", regno);
5289 return -EACCES;
5290 }
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08005291 meta->ret_btf = reg->btf;
Hao Luoeaa6bcb2020-09-29 16:50:47 -07005292 meta->ret_btf_id = reg->btf_id;
Lorenz Bauerc18f0b62020-09-21 13:12:25 +01005293 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
5294 if (meta->func_id == BPF_FUNC_spin_lock) {
5295 if (process_spin_lock(env, regno, true))
5296 return -EACCES;
5297 } else if (meta->func_id == BPF_FUNC_spin_unlock) {
5298 if (process_spin_lock(env, regno, false))
5299 return -EACCES;
5300 } else {
5301 verbose(env, "verifier internal error\n");
5302 return -EFAULT;
5303 }
Alexei Starovoitovb00628b2021-07-14 17:54:09 -07005304 } else if (arg_type == ARG_PTR_TO_TIMER) {
5305 if (process_timer_func(env, regno, meta))
5306 return -EACCES;
Yonghong Song69c087b2021-02-26 12:49:25 -08005307 } else if (arg_type == ARG_PTR_TO_FUNC) {
5308 meta->subprogno = reg->subprogno;
Lorenz Bauera2bbe7c2020-09-21 13:12:24 +01005309 } else if (arg_type_is_mem_ptr(arg_type)) {
5310 /* The access to this pointer is only checked when we hit the
5311 * next is_mem_size argument below.
5312 */
5313 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MEM);
Daniel Borkmann90133412018-01-20 01:24:29 +01005314 } else if (arg_type_is_mem_size(arg_type)) {
Alexei Starovoitov39f19ebb2017-01-09 10:19:50 -08005315 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07005316
John Fastabend10060502020-03-30 14:36:19 -07005317 /* This is used to refine r0 return value bounds for helpers
5318 * that enforce this value as an upper bound on return values.
5319 * See do_refine_retval_range() for helpers that can refine
5320 * the return value. C type of helper is u32 so we pull register
5321 * bound from umax_value however, if negative verifier errors
5322 * out. Only upper bounds can be learned because retval is an
5323 * int type and negative retvals are allowed.
Yonghong Song849fa502018-04-28 22:28:09 -07005324 */
John Fastabend10060502020-03-30 14:36:19 -07005325 meta->msize_max_value = reg->umax_value;
Yonghong Song849fa502018-04-28 22:28:09 -07005326
Edward Creef1174f72017-08-07 15:26:19 +01005327 /* The register is SCALAR_VALUE; the access check
5328 * happens using its boundaries.
Gianluca Borello06c1c042017-01-09 10:19:49 -08005329 */
Edward Creef1174f72017-08-07 15:26:19 +01005330 if (!tnum_is_const(reg->var_off))
Gianluca Borello06c1c042017-01-09 10:19:49 -08005331 /* For unprivileged variable accesses, disable raw
5332 * mode so that the program is required to
5333 * initialize all the memory that the helper could
5334 * just partially fill up.
5335 */
5336 meta = NULL;
5337
Edward Creeb03c9f92017-08-07 15:26:36 +01005338 if (reg->smin_value < 0) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07005339 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
Edward Creef1174f72017-08-07 15:26:19 +01005340 regno);
5341 return -EACCES;
5342 }
Gianluca Borello06c1c042017-01-09 10:19:49 -08005343
Edward Creeb03c9f92017-08-07 15:26:36 +01005344 if (reg->umin_value == 0) {
Edward Creef1174f72017-08-07 15:26:19 +01005345 err = check_helper_mem_access(env, regno - 1, 0,
5346 zero_size_allowed,
5347 meta);
Gianluca Borello06c1c042017-01-09 10:19:49 -08005348 if (err)
5349 return err;
Gianluca Borello06c1c042017-01-09 10:19:49 -08005350 }
Edward Creef1174f72017-08-07 15:26:19 +01005351
Edward Creeb03c9f92017-08-07 15:26:36 +01005352 if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07005353 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
Edward Creef1174f72017-08-07 15:26:19 +01005354 regno);
5355 return -EACCES;
5356 }
5357 err = check_helper_mem_access(env, regno - 1,
Edward Creeb03c9f92017-08-07 15:26:36 +01005358 reg->umax_value,
Edward Creef1174f72017-08-07 15:26:19 +01005359 zero_size_allowed, meta);
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07005360 if (!err)
5361 err = mark_chain_precision(env, regno);
Andrii Nakryiko457f4432020-05-29 00:54:20 -07005362 } else if (arg_type_is_alloc_size(arg_type)) {
5363 if (!tnum_is_const(reg->var_off)) {
Brendan Jackman28a8add2021-01-12 12:39:13 +00005364 verbose(env, "R%d is not a known constant'\n",
Andrii Nakryiko457f4432020-05-29 00:54:20 -07005365 regno);
5366 return -EACCES;
5367 }
5368 meta->mem_size = reg->var_off.value;
Andrey Ignatov57c3bb72019-03-18 16:57:10 -07005369 } else if (arg_type_is_int_ptr(arg_type)) {
5370 int size = int_ptr_type_to_size(arg_type);
5371
5372 err = check_helper_mem_access(env, regno, size, false, meta);
5373 if (err)
5374 return err;
5375 err = check_ptr_alignment(env, reg, 0, size, true);
Florent Revestfff13c42021-04-19 17:52:39 +02005376 } else if (arg_type == ARG_PTR_TO_CONST_STR) {
5377 struct bpf_map *map = reg->map_ptr;
5378 int map_off;
5379 u64 map_addr;
5380 char *str_ptr;
5381
Florent Revesta8fad732021-04-23 01:55:43 +02005382 if (!bpf_map_is_rdonly(map)) {
Florent Revestfff13c42021-04-19 17:52:39 +02005383 verbose(env, "R%d does not point to a readonly map'\n", regno);
5384 return -EACCES;
5385 }
5386
5387 if (!tnum_is_const(reg->var_off)) {
5388 verbose(env, "R%d is not a constant address'\n", regno);
5389 return -EACCES;
5390 }
5391
5392 if (!map->ops->map_direct_value_addr) {
5393 verbose(env, "no direct value access support for this map type\n");
5394 return -EACCES;
5395 }
5396
5397 err = check_map_access(env, regno, reg->off,
5398 map->value_size - reg->off, false);
5399 if (err)
5400 return err;
5401
5402 map_off = reg->off + reg->var_off.value;
5403 err = map->ops->map_direct_value_addr(map, &map_addr, map_off);
5404 if (err) {
5405 verbose(env, "direct value access on string failed\n");
5406 return err;
5407 }
5408
5409 str_ptr = (char *)(long)(map_addr);
5410 if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) {
5411 verbose(env, "string is not zero-terminated\n");
5412 return -EINVAL;
5413 }
Alexei Starovoitov17a52672014-09-26 00:17:06 -07005414 }
5415
5416 return err;
5417}
5418
Lorenz Bauer01262402020-08-21 11:29:47 +01005419static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
5420{
5421 enum bpf_attach_type eatype = env->prog->expected_attach_type;
Udip Pant7e407812020-08-25 16:20:00 -07005422 enum bpf_prog_type type = resolve_prog_type(env->prog);
Lorenz Bauer01262402020-08-21 11:29:47 +01005423
5424 if (func_id != BPF_FUNC_map_update_elem)
5425 return false;
5426
5427 /* It's not possible to get access to a locked struct sock in these
5428 * contexts, so updating is safe.
5429 */
5430 switch (type) {
5431 case BPF_PROG_TYPE_TRACING:
5432 if (eatype == BPF_TRACE_ITER)
5433 return true;
5434 break;
5435 case BPF_PROG_TYPE_SOCKET_FILTER:
5436 case BPF_PROG_TYPE_SCHED_CLS:
5437 case BPF_PROG_TYPE_SCHED_ACT:
5438 case BPF_PROG_TYPE_XDP:
5439 case BPF_PROG_TYPE_SK_REUSEPORT:
5440 case BPF_PROG_TYPE_FLOW_DISSECTOR:
5441 case BPF_PROG_TYPE_SK_LOOKUP:
5442 return true;
5443 default:
5444 break;
5445 }
5446
5447 verbose(env, "cannot update sockmap in this context\n");
5448 return false;
5449}
5450
Maciej Fijalkowskie4119012020-09-16 23:10:09 +02005451static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
5452{
5453 return env->prog->jit_requested && IS_ENABLED(CONFIG_X86_64);
5454}
5455
Jakub Kicinski61bd5212017-10-09 10:30:11 -07005456static int check_map_func_compatibility(struct bpf_verifier_env *env,
5457 struct bpf_map *map, int func_id)
Kaixu Xia35578d72015-08-06 07:02:35 +00005458{
Kaixu Xia35578d72015-08-06 07:02:35 +00005459 if (!map)
5460 return 0;
5461
Alexei Starovoitov6aff67c2016-04-27 18:56:21 -07005462 /* We need a two way check, first is from map perspective ... */
5463 switch (map->map_type) {
5464 case BPF_MAP_TYPE_PROG_ARRAY:
5465 if (func_id != BPF_FUNC_tail_call)
5466 goto error;
5467 break;
5468 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
5469 if (func_id != BPF_FUNC_perf_event_read &&
Yonghong Song908432c2017-10-05 09:19:20 -07005470 func_id != BPF_FUNC_perf_event_output &&
Alexei Starovoitova7658e12019-10-15 20:25:04 -07005471 func_id != BPF_FUNC_skb_output &&
Eelco Chaudrond831ee82020-03-06 08:59:23 +00005472 func_id != BPF_FUNC_perf_event_read_value &&
5473 func_id != BPF_FUNC_xdp_output)
Alexei Starovoitov6aff67c2016-04-27 18:56:21 -07005474 goto error;
5475 break;
Andrii Nakryiko457f4432020-05-29 00:54:20 -07005476 case BPF_MAP_TYPE_RINGBUF:
5477 if (func_id != BPF_FUNC_ringbuf_output &&
5478 func_id != BPF_FUNC_ringbuf_reserve &&
Andrii Nakryiko457f4432020-05-29 00:54:20 -07005479 func_id != BPF_FUNC_ringbuf_query)
5480 goto error;
5481 break;
Alexei Starovoitov6aff67c2016-04-27 18:56:21 -07005482 case BPF_MAP_TYPE_STACK_TRACE:
5483 if (func_id != BPF_FUNC_get_stackid)
5484 goto error;
5485 break;
Martin KaFai Lau4ed8ec52016-06-30 10:28:43 -07005486 case BPF_MAP_TYPE_CGROUP_ARRAY:
David S. Miller60747ef2016-08-18 01:17:32 -04005487 if (func_id != BPF_FUNC_skb_under_cgroup &&
Sargun Dhillon60d20f92016-08-12 08:56:52 -07005488 func_id != BPF_FUNC_current_task_under_cgroup)
Martin KaFai Lau4a482f32016-06-30 10:28:44 -07005489 goto error;
5490 break;
Roman Gushchincd339432018-08-02 14:27:24 -07005491 case BPF_MAP_TYPE_CGROUP_STORAGE:
Roman Gushchinb741f162018-09-28 14:45:43 +00005492 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
Roman Gushchincd339432018-08-02 14:27:24 -07005493 if (func_id != BPF_FUNC_get_local_storage)
5494 goto error;
5495 break;
John Fastabend546ac1f2017-07-17 09:28:56 -07005496 case BPF_MAP_TYPE_DEVMAP:
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +02005497 case BPF_MAP_TYPE_DEVMAP_HASH:
Toke Høiland-Jørgensen0cdbb4b2019-06-28 11:12:35 +02005498 if (func_id != BPF_FUNC_redirect_map &&
5499 func_id != BPF_FUNC_map_lookup_elem)
John Fastabend546ac1f2017-07-17 09:28:56 -07005500 goto error;
5501 break;
Björn Töpelfbfc504a2018-05-02 13:01:28 +02005502 /* Restrict bpf side of cpumap and xskmap, open when use-cases
5503 * appear.
5504 */
Jesper Dangaard Brouer6710e112017-10-16 12:19:28 +02005505 case BPF_MAP_TYPE_CPUMAP:
5506 if (func_id != BPF_FUNC_redirect_map)
5507 goto error;
5508 break;
Jonathan Lemonfada7fd2019-06-06 13:59:40 -07005509 case BPF_MAP_TYPE_XSKMAP:
5510 if (func_id != BPF_FUNC_redirect_map &&
5511 func_id != BPF_FUNC_map_lookup_elem)
5512 goto error;
5513 break;
Martin KaFai Lau56f668d2017-03-22 10:00:33 -07005514 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
Martin KaFai Laubcc6b1b2017-03-22 10:00:34 -07005515 case BPF_MAP_TYPE_HASH_OF_MAPS:
Martin KaFai Lau56f668d2017-03-22 10:00:33 -07005516 if (func_id != BPF_FUNC_map_lookup_elem)
5517 goto error;
Martin KaFai Lau16a43622017-08-17 18:14:43 -07005518 break;
John Fastabend174a79f2017-08-15 22:32:47 -07005519 case BPF_MAP_TYPE_SOCKMAP:
5520 if (func_id != BPF_FUNC_sk_redirect_map &&
5521 func_id != BPF_FUNC_sock_map_update &&
John Fastabend4f738ad2018-03-18 12:57:10 -07005522 func_id != BPF_FUNC_map_delete_elem &&
Jakub Sitnicki9fed9002020-02-18 17:10:20 +00005523 func_id != BPF_FUNC_msg_redirect_map &&
Jakub Sitnicki64d85292020-04-29 20:11:52 +02005524 func_id != BPF_FUNC_sk_select_reuseport &&
Lorenz Bauer01262402020-08-21 11:29:47 +01005525 func_id != BPF_FUNC_map_lookup_elem &&
5526 !may_update_sockmap(env, func_id))
John Fastabend174a79f2017-08-15 22:32:47 -07005527 goto error;
5528 break;
John Fastabend81110382018-05-14 10:00:17 -07005529 case BPF_MAP_TYPE_SOCKHASH:
5530 if (func_id != BPF_FUNC_sk_redirect_hash &&
5531 func_id != BPF_FUNC_sock_hash_update &&
5532 func_id != BPF_FUNC_map_delete_elem &&
Jakub Sitnicki9fed9002020-02-18 17:10:20 +00005533 func_id != BPF_FUNC_msg_redirect_hash &&
Jakub Sitnicki64d85292020-04-29 20:11:52 +02005534 func_id != BPF_FUNC_sk_select_reuseport &&
Lorenz Bauer01262402020-08-21 11:29:47 +01005535 func_id != BPF_FUNC_map_lookup_elem &&
5536 !may_update_sockmap(env, func_id))
John Fastabend81110382018-05-14 10:00:17 -07005537 goto error;
5538 break;
Martin KaFai Lau2dbb9b92018-08-08 01:01:25 -07005539 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
5540 if (func_id != BPF_FUNC_sk_select_reuseport)
5541 goto error;
5542 break;
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +02005543 case BPF_MAP_TYPE_QUEUE:
5544 case BPF_MAP_TYPE_STACK:
5545 if (func_id != BPF_FUNC_map_peek_elem &&
5546 func_id != BPF_FUNC_map_pop_elem &&
5547 func_id != BPF_FUNC_map_push_elem)
5548 goto error;
5549 break;
Martin KaFai Lau6ac99e82019-04-26 16:39:39 -07005550 case BPF_MAP_TYPE_SK_STORAGE:
5551 if (func_id != BPF_FUNC_sk_storage_get &&
5552 func_id != BPF_FUNC_sk_storage_delete)
5553 goto error;
5554 break;
KP Singh8ea63682020-08-25 20:29:17 +02005555 case BPF_MAP_TYPE_INODE_STORAGE:
5556 if (func_id != BPF_FUNC_inode_storage_get &&
5557 func_id != BPF_FUNC_inode_storage_delete)
5558 goto error;
5559 break;
KP Singh4cf1bc12020-11-06 10:37:40 +00005560 case BPF_MAP_TYPE_TASK_STORAGE:
5561 if (func_id != BPF_FUNC_task_storage_get &&
5562 func_id != BPF_FUNC_task_storage_delete)
5563 goto error;
5564 break;
Joanne Koong93309862021-10-27 16:45:00 -07005565 case BPF_MAP_TYPE_BLOOM_FILTER:
5566 if (func_id != BPF_FUNC_map_peek_elem &&
5567 func_id != BPF_FUNC_map_push_elem)
5568 goto error;
5569 break;
Alexei Starovoitov6aff67c2016-04-27 18:56:21 -07005570 default:
5571 break;
5572 }
5573
5574 /* ... and second from the function itself. */
5575 switch (func_id) {
5576 case BPF_FUNC_tail_call:
5577 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
5578 goto error;
Maciej Fijalkowskie4119012020-09-16 23:10:09 +02005579 if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
5580 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08005581 return -EINVAL;
5582 }
Alexei Starovoitov6aff67c2016-04-27 18:56:21 -07005583 break;
5584 case BPF_FUNC_perf_event_read:
5585 case BPF_FUNC_perf_event_output:
Yonghong Song908432c2017-10-05 09:19:20 -07005586 case BPF_FUNC_perf_event_read_value:
Alexei Starovoitova7658e12019-10-15 20:25:04 -07005587 case BPF_FUNC_skb_output:
Eelco Chaudrond831ee82020-03-06 08:59:23 +00005588 case BPF_FUNC_xdp_output:
Alexei Starovoitov6aff67c2016-04-27 18:56:21 -07005589 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
5590 goto error;
5591 break;
Daniel Borkmann5b029a32021-08-23 21:02:09 +02005592 case BPF_FUNC_ringbuf_output:
5593 case BPF_FUNC_ringbuf_reserve:
5594 case BPF_FUNC_ringbuf_query:
5595 if (map->map_type != BPF_MAP_TYPE_RINGBUF)
5596 goto error;
5597 break;
Alexei Starovoitov6aff67c2016-04-27 18:56:21 -07005598 case BPF_FUNC_get_stackid:
5599 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
5600 goto error;
5601 break;
Sargun Dhillon60d20f92016-08-12 08:56:52 -07005602 case BPF_FUNC_current_task_under_cgroup:
Daniel Borkmann747ea552016-08-12 22:17:17 +02005603 case BPF_FUNC_skb_under_cgroup:
Martin KaFai Lau4a482f32016-06-30 10:28:44 -07005604 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
5605 goto error;
5606 break;
John Fastabend97f91a72017-07-17 09:29:18 -07005607 case BPF_FUNC_redirect_map:
Jesper Dangaard Brouer9c270af2017-10-16 12:19:34 +02005608 if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +02005609 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
Björn Töpelfbfc504a2018-05-02 13:01:28 +02005610 map->map_type != BPF_MAP_TYPE_CPUMAP &&
5611 map->map_type != BPF_MAP_TYPE_XSKMAP)
John Fastabend97f91a72017-07-17 09:29:18 -07005612 goto error;
5613 break;
John Fastabend174a79f2017-08-15 22:32:47 -07005614 case BPF_FUNC_sk_redirect_map:
John Fastabend4f738ad2018-03-18 12:57:10 -07005615 case BPF_FUNC_msg_redirect_map:
John Fastabend81110382018-05-14 10:00:17 -07005616 case BPF_FUNC_sock_map_update:
John Fastabend174a79f2017-08-15 22:32:47 -07005617 if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
5618 goto error;
5619 break;
John Fastabend81110382018-05-14 10:00:17 -07005620 case BPF_FUNC_sk_redirect_hash:
5621 case BPF_FUNC_msg_redirect_hash:
5622 case BPF_FUNC_sock_hash_update:
5623 if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
John Fastabend174a79f2017-08-15 22:32:47 -07005624 goto error;
5625 break;
Roman Gushchincd339432018-08-02 14:27:24 -07005626 case BPF_FUNC_get_local_storage:
Roman Gushchinb741f162018-09-28 14:45:43 +00005627 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
5628 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
Roman Gushchincd339432018-08-02 14:27:24 -07005629 goto error;
5630 break;
Martin KaFai Lau2dbb9b92018-08-08 01:01:25 -07005631 case BPF_FUNC_sk_select_reuseport:
Jakub Sitnicki9fed9002020-02-18 17:10:20 +00005632 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
5633 map->map_type != BPF_MAP_TYPE_SOCKMAP &&
5634 map->map_type != BPF_MAP_TYPE_SOCKHASH)
Martin KaFai Lau2dbb9b92018-08-08 01:01:25 -07005635 goto error;
5636 break;
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +02005637 case BPF_FUNC_map_pop_elem:
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +02005638 if (map->map_type != BPF_MAP_TYPE_QUEUE &&
5639 map->map_type != BPF_MAP_TYPE_STACK)
5640 goto error;
5641 break;
Joanne Koong93309862021-10-27 16:45:00 -07005642 case BPF_FUNC_map_peek_elem:
5643 case BPF_FUNC_map_push_elem:
5644 if (map->map_type != BPF_MAP_TYPE_QUEUE &&
5645 map->map_type != BPF_MAP_TYPE_STACK &&
5646 map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
5647 goto error;
5648 break;
Martin KaFai Lau6ac99e82019-04-26 16:39:39 -07005649 case BPF_FUNC_sk_storage_get:
5650 case BPF_FUNC_sk_storage_delete:
5651 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
5652 goto error;
5653 break;
KP Singh8ea63682020-08-25 20:29:17 +02005654 case BPF_FUNC_inode_storage_get:
5655 case BPF_FUNC_inode_storage_delete:
5656 if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
5657 goto error;
5658 break;
KP Singh4cf1bc12020-11-06 10:37:40 +00005659 case BPF_FUNC_task_storage_get:
5660 case BPF_FUNC_task_storage_delete:
5661 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
5662 goto error;
5663 break;
Alexei Starovoitov6aff67c2016-04-27 18:56:21 -07005664 default:
5665 break;
Kaixu Xia35578d72015-08-06 07:02:35 +00005666 }
5667
5668 return 0;
Alexei Starovoitov6aff67c2016-04-27 18:56:21 -07005669error:
Jakub Kicinski61bd5212017-10-09 10:30:11 -07005670 verbose(env, "cannot pass map_type %d into func %s#%d\n",
Thomas Grafebb676d2016-10-27 11:23:51 +02005671 map->map_type, func_id_name(func_id), func_id);
Alexei Starovoitov6aff67c2016-04-27 18:56:21 -07005672 return -EINVAL;
Kaixu Xia35578d72015-08-06 07:02:35 +00005673}
5674
Daniel Borkmann90133412018-01-20 01:24:29 +01005675static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
Daniel Borkmann435faee12016-04-13 00:10:51 +02005676{
5677 int count = 0;
5678
Alexei Starovoitov39f19ebb2017-01-09 10:19:50 -08005679 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
Daniel Borkmann435faee12016-04-13 00:10:51 +02005680 count++;
Alexei Starovoitov39f19ebb2017-01-09 10:19:50 -08005681 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
Daniel Borkmann435faee12016-04-13 00:10:51 +02005682 count++;
Alexei Starovoitov39f19ebb2017-01-09 10:19:50 -08005683 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
Daniel Borkmann435faee12016-04-13 00:10:51 +02005684 count++;
Alexei Starovoitov39f19ebb2017-01-09 10:19:50 -08005685 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
Daniel Borkmann435faee12016-04-13 00:10:51 +02005686 count++;
Alexei Starovoitov39f19ebb2017-01-09 10:19:50 -08005687 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
Daniel Borkmann435faee12016-04-13 00:10:51 +02005688 count++;
5689
Daniel Borkmann90133412018-01-20 01:24:29 +01005690 /* We only support one arg being in raw mode at the moment,
5691 * which is sufficient for the helper functions we have
5692 * right now.
5693 */
5694 return count <= 1;
5695}
5696
5697static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
5698 enum bpf_arg_type arg_next)
5699{
5700 return (arg_type_is_mem_ptr(arg_curr) &&
5701 !arg_type_is_mem_size(arg_next)) ||
5702 (!arg_type_is_mem_ptr(arg_curr) &&
5703 arg_type_is_mem_size(arg_next));
5704}
5705
5706static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
5707{
5708 /* bpf_xxx(..., buf, len) call will access 'len'
5709 * bytes from memory 'buf'. Both arg types need
5710 * to be paired, so make sure there's no buggy
5711 * helper function specification.
5712 */
5713 if (arg_type_is_mem_size(fn->arg1_type) ||
5714 arg_type_is_mem_ptr(fn->arg5_type) ||
5715 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
5716 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
5717 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
5718 check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
5719 return false;
5720
5721 return true;
5722}
5723
Martin KaFai Lau1b986582019-03-12 10:23:02 -07005724static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
Joe Stringerfd978bf72018-10-02 13:35:35 -07005725{
5726 int count = 0;
5727
Martin KaFai Lau1b986582019-03-12 10:23:02 -07005728 if (arg_type_may_be_refcounted(fn->arg1_type))
Joe Stringerfd978bf72018-10-02 13:35:35 -07005729 count++;
Martin KaFai Lau1b986582019-03-12 10:23:02 -07005730 if (arg_type_may_be_refcounted(fn->arg2_type))
Joe Stringerfd978bf72018-10-02 13:35:35 -07005731 count++;
Martin KaFai Lau1b986582019-03-12 10:23:02 -07005732 if (arg_type_may_be_refcounted(fn->arg3_type))
Joe Stringerfd978bf72018-10-02 13:35:35 -07005733 count++;
Martin KaFai Lau1b986582019-03-12 10:23:02 -07005734 if (arg_type_may_be_refcounted(fn->arg4_type))
Joe Stringerfd978bf72018-10-02 13:35:35 -07005735 count++;
Martin KaFai Lau1b986582019-03-12 10:23:02 -07005736 if (arg_type_may_be_refcounted(fn->arg5_type))
Joe Stringerfd978bf72018-10-02 13:35:35 -07005737 count++;
5738
Martin KaFai Lau1b986582019-03-12 10:23:02 -07005739 /* A reference acquiring function cannot acquire
5740 * another refcounted ptr.
5741 */
Jakub Sitnicki64d85292020-04-29 20:11:52 +02005742 if (may_be_acquire_function(func_id) && count)
Martin KaFai Lau1b986582019-03-12 10:23:02 -07005743 return false;
5744
Joe Stringerfd978bf72018-10-02 13:35:35 -07005745 /* We only support one arg being unreferenced at the moment,
5746 * which is sufficient for the helper functions we have right now.
5747 */
5748 return count <= 1;
5749}
5750
Lorenz Bauer9436ef62020-09-21 13:12:20 +01005751static bool check_btf_id_ok(const struct bpf_func_proto *fn)
5752{
5753 int i;
5754
Martin KaFai Lau1df8f552020-09-24 17:03:50 -07005755 for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
Lorenz Bauer9436ef62020-09-21 13:12:20 +01005756 if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
5757 return false;
5758
Martin KaFai Lau1df8f552020-09-24 17:03:50 -07005759 if (fn->arg_type[i] != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i])
5760 return false;
5761 }
5762
Lorenz Bauer9436ef62020-09-21 13:12:20 +01005763 return true;
5764}
5765
Martin KaFai Lau1b986582019-03-12 10:23:02 -07005766static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
Daniel Borkmann90133412018-01-20 01:24:29 +01005767{
5768 return check_raw_mode_ok(fn) &&
Joe Stringerfd978bf72018-10-02 13:35:35 -07005769 check_arg_pair_ok(fn) &&
Lorenz Bauer9436ef62020-09-21 13:12:20 +01005770 check_btf_id_ok(fn) &&
Martin KaFai Lau1b986582019-03-12 10:23:02 -07005771 check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
Daniel Borkmann435faee12016-04-13 00:10:51 +02005772}
5773
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02005774/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
5775 * are now invalid, so turn them into unknown SCALAR_VALUE.
Edward Creef1174f72017-08-07 15:26:19 +01005776 */
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08005777static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
5778 struct bpf_func_state *state)
Alexei Starovoitov969bf052016-05-05 19:49:10 -07005779{
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +01005780 struct bpf_reg_state *regs = state->regs, *reg;
Alexei Starovoitov969bf052016-05-05 19:49:10 -07005781 int i;
5782
5783 for (i = 0; i < MAX_BPF_REG; i++)
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02005784 if (reg_is_pkt_pointer_any(&regs[i]))
Jakub Kicinski61bd5212017-10-09 10:30:11 -07005785 mark_reg_unknown(env, regs, i);
Alexei Starovoitov969bf052016-05-05 19:49:10 -07005786
Joe Stringerf3709f62018-10-02 13:35:29 -07005787 bpf_for_each_spilled_reg(i, state, reg) {
5788 if (!reg)
Alexei Starovoitov969bf052016-05-05 19:49:10 -07005789 continue;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02005790 if (reg_is_pkt_pointer_any(reg))
Daniel Borkmannf54c7892019-12-22 23:37:40 +01005791 __mark_reg_unknown(env, reg);
Alexei Starovoitov969bf052016-05-05 19:49:10 -07005792 }
5793}
5794
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08005795static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
5796{
5797 struct bpf_verifier_state *vstate = env->cur_state;
5798 int i;
5799
5800 for (i = 0; i <= vstate->curframe; i++)
5801 __clear_all_pkt_pointers(env, vstate->frame[i]);
5802}
5803
Alexei Starovoitov6d94e742020-11-10 19:12:11 -08005804enum {
5805 AT_PKT_END = -1,
5806 BEYOND_PKT_END = -2,
5807};
5808
5809static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
5810{
5811 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5812 struct bpf_reg_state *reg = &state->regs[regn];
5813
5814 if (reg->type != PTR_TO_PACKET)
5815 /* PTR_TO_PACKET_META is not supported yet */
5816 return;
5817
5818 /* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
5819 * How far beyond pkt_end it goes is unknown.
5820 * if (!range_open) it's the case of pkt >= pkt_end
5821 * if (range_open) it's the case of pkt > pkt_end
5822 * hence this pointer is at least 1 byte bigger than pkt_end
5823 */
5824 if (range_open)
5825 reg->range = BEYOND_PKT_END;
5826 else
5827 reg->range = AT_PKT_END;
5828}
5829
Joe Stringerfd978bf72018-10-02 13:35:35 -07005830static void release_reg_references(struct bpf_verifier_env *env,
Martin KaFai Lau1b986582019-03-12 10:23:02 -07005831 struct bpf_func_state *state,
5832 int ref_obj_id)
Joe Stringerfd978bf72018-10-02 13:35:35 -07005833{
5834 struct bpf_reg_state *regs = state->regs, *reg;
5835 int i;
5836
5837 for (i = 0; i < MAX_BPF_REG; i++)
Martin KaFai Lau1b986582019-03-12 10:23:02 -07005838 if (regs[i].ref_obj_id == ref_obj_id)
Joe Stringerfd978bf72018-10-02 13:35:35 -07005839 mark_reg_unknown(env, regs, i);
5840
5841 bpf_for_each_spilled_reg(i, state, reg) {
5842 if (!reg)
5843 continue;
Martin KaFai Lau1b986582019-03-12 10:23:02 -07005844 if (reg->ref_obj_id == ref_obj_id)
Daniel Borkmannf54c7892019-12-22 23:37:40 +01005845 __mark_reg_unknown(env, reg);
Joe Stringerfd978bf72018-10-02 13:35:35 -07005846 }
5847}
5848
5849/* The pointer with the specified id has released its reference to kernel
5850 * resources. Identify all copies of the same pointer and clear the reference.
5851 */
5852static int release_reference(struct bpf_verifier_env *env,
Martin KaFai Lau1b986582019-03-12 10:23:02 -07005853 int ref_obj_id)
Joe Stringerfd978bf72018-10-02 13:35:35 -07005854{
5855 struct bpf_verifier_state *vstate = env->cur_state;
Martin KaFai Lau1b986582019-03-12 10:23:02 -07005856 int err;
Joe Stringerfd978bf72018-10-02 13:35:35 -07005857 int i;
5858
Martin KaFai Lau1b986582019-03-12 10:23:02 -07005859 err = release_reference_state(cur_func(env), ref_obj_id);
5860 if (err)
5861 return err;
Joe Stringerfd978bf72018-10-02 13:35:35 -07005862
Martin KaFai Lau1b986582019-03-12 10:23:02 -07005863 for (i = 0; i <= vstate->curframe; i++)
5864 release_reg_references(env, vstate->frame[i], ref_obj_id);
5865
5866 return 0;
Joe Stringerfd978bf72018-10-02 13:35:35 -07005867}
5868
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -08005869static void clear_caller_saved_regs(struct bpf_verifier_env *env,
5870 struct bpf_reg_state *regs)
5871{
5872 int i;
5873
5874 /* after the call registers r0 - r5 were scratched */
5875 for (i = 0; i < CALLER_SAVED_REGS; i++) {
5876 mark_reg_not_init(env, regs, caller_saved[i]);
5877 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
5878 }
5879}
5880
Yonghong Song14351372021-02-26 12:49:23 -08005881typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
5882 struct bpf_func_state *caller,
5883 struct bpf_func_state *callee,
5884 int insn_idx);
5885
5886static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
5887 int *insn_idx, int subprog,
5888 set_callee_state_fn set_callee_state_cb)
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08005889{
5890 struct bpf_verifier_state *state = env->cur_state;
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -08005891 struct bpf_func_info_aux *func_info_aux;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08005892 struct bpf_func_state *caller, *callee;
Yonghong Song14351372021-02-26 12:49:23 -08005893 int err;
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -08005894 bool is_global = false;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08005895
Alexei Starovoitovaada9ce2017-12-25 13:15:42 -08005896 if (state->curframe + 1 >= MAX_CALL_FRAMES) {
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08005897 verbose(env, "the call stack of %d frames is too deep\n",
Alexei Starovoitovaada9ce2017-12-25 13:15:42 -08005898 state->curframe + 2);
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08005899 return -E2BIG;
5900 }
5901
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08005902 caller = state->frame[state->curframe];
5903 if (state->frame[state->curframe + 1]) {
5904 verbose(env, "verifier bug. Frame %d already allocated\n",
5905 state->curframe + 1);
5906 return -EFAULT;
5907 }
5908
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -08005909 func_info_aux = env->prog->aux->func_info_aux;
5910 if (func_info_aux)
5911 is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
Martin KaFai Lau34747c42021-03-24 18:51:36 -07005912 err = btf_check_subprog_arg_match(env, subprog, caller->regs);
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -08005913 if (err == -EFAULT)
5914 return err;
5915 if (is_global) {
5916 if (err) {
5917 verbose(env, "Caller passes invalid args into func#%d\n",
5918 subprog);
5919 return err;
5920 } else {
5921 if (env->log.level & BPF_LOG_LEVEL)
5922 verbose(env,
5923 "Func#%d is global and valid. Skipping.\n",
5924 subprog);
5925 clear_caller_saved_regs(env, caller->regs);
5926
Ilya Leoshkevich45159b22021-02-12 05:04:08 +01005927 /* All global functions return a 64-bit SCALAR_VALUE */
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -08005928 mark_reg_unknown(env, caller->regs, BPF_REG_0);
Ilya Leoshkevich45159b22021-02-12 05:04:08 +01005929 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -08005930
5931 /* continue with next insn after call */
5932 return 0;
5933 }
5934 }
5935
Alexei Starovoitovbfc6bb72021-07-14 17:54:14 -07005936 if (insn->code == (BPF_JMP | BPF_CALL) &&
5937 insn->imm == BPF_FUNC_timer_set_callback) {
5938 struct bpf_verifier_state *async_cb;
5939
5940 /* there is no real recursion here. timer callbacks are async */
Alexei Starovoitov7ddc80a2021-07-14 17:54:15 -07005941 env->subprog_info[subprog].is_async_cb = true;
Alexei Starovoitovbfc6bb72021-07-14 17:54:14 -07005942 async_cb = push_async_cb(env, env->subprog_info[subprog].start,
5943 *insn_idx, subprog);
5944 if (!async_cb)
5945 return -EFAULT;
5946 callee = async_cb->frame[0];
5947 callee->async_entry_cnt = caller->async_entry_cnt + 1;
5948
5949 /* Convert bpf_timer_set_callback() args into timer callback args */
5950 err = set_callee_state_cb(env, caller, callee, *insn_idx);
5951 if (err)
5952 return err;
5953
5954 clear_caller_saved_regs(env, caller->regs);
5955 mark_reg_unknown(env, caller->regs, BPF_REG_0);
5956 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
5957 /* continue with next insn after call */
5958 return 0;
5959 }
5960
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08005961 callee = kzalloc(sizeof(*callee), GFP_KERNEL);
5962 if (!callee)
5963 return -ENOMEM;
5964 state->frame[state->curframe + 1] = callee;
5965
5966 /* callee cannot access r0, r6 - r9 for reading and has to write
5967 * into its own stack before reading from it.
5968 * callee can read/write into caller's stack
5969 */
5970 init_func_state(env, callee,
5971 /* remember the callsite, it will be used by bpf_exit */
5972 *insn_idx /* callsite */,
5973 state->curframe + 1 /* frameno within this callchain */,
Jiong Wangf910cef2018-05-02 16:17:17 -04005974 subprog /* subprog number within this prog */);
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08005975
Joe Stringerfd978bf72018-10-02 13:35:35 -07005976 /* Transfer references to the callee */
Lorenz Bauerc69431a2021-04-29 14:46:54 +01005977 err = copy_reference_state(callee, caller);
Joe Stringerfd978bf72018-10-02 13:35:35 -07005978 if (err)
5979 return err;
5980
Yonghong Song14351372021-02-26 12:49:23 -08005981 err = set_callee_state_cb(env, caller, callee, *insn_idx);
5982 if (err)
5983 return err;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08005984
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -08005985 clear_caller_saved_regs(env, caller->regs);
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08005986
5987 /* only increment it after check_reg_arg() finished */
5988 state->curframe++;
5989
5990 /* and go analyze first insn of the callee */
Yonghong Song14351372021-02-26 12:49:23 -08005991 *insn_idx = env->subprog_info[subprog].start - 1;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08005992
Alexei Starovoitov06ee7112019-04-01 21:27:40 -07005993 if (env->log.level & BPF_LOG_LEVEL) {
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08005994 verbose(env, "caller:\n");
5995 print_verifier_state(env, caller);
5996 verbose(env, "callee:\n");
5997 print_verifier_state(env, callee);
5998 }
5999 return 0;
6000}
6001
Yonghong Song314ee052021-02-26 12:49:27 -08006002int map_set_for_each_callback_args(struct bpf_verifier_env *env,
6003 struct bpf_func_state *caller,
6004 struct bpf_func_state *callee)
6005{
6006 /* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn,
6007 * void *callback_ctx, u64 flags);
6008 * callback_fn(struct bpf_map *map, void *key, void *value,
6009 * void *callback_ctx);
6010 */
6011 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
6012
6013 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
6014 __mark_reg_known_zero(&callee->regs[BPF_REG_2]);
6015 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr;
6016
6017 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
6018 __mark_reg_known_zero(&callee->regs[BPF_REG_3]);
6019 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr;
6020
6021 /* pointer to stack or null */
6022 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3];
6023
6024 /* unused */
6025 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6026 return 0;
6027}
6028
Yonghong Song14351372021-02-26 12:49:23 -08006029static int set_callee_state(struct bpf_verifier_env *env,
6030 struct bpf_func_state *caller,
6031 struct bpf_func_state *callee, int insn_idx)
6032{
6033 int i;
6034
6035 /* copy r1 - r5 args that callee can access. The copy includes parent
6036 * pointers, which connects us up to the liveness chain
6037 */
6038 for (i = BPF_REG_1; i <= BPF_REG_5; i++)
6039 callee->regs[i] = caller->regs[i];
6040 return 0;
6041}
6042
6043static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
6044 int *insn_idx)
6045{
6046 int subprog, target_insn;
6047
6048 target_insn = *insn_idx + insn->imm + 1;
6049 subprog = find_subprog(env, target_insn);
6050 if (subprog < 0) {
6051 verbose(env, "verifier bug. No program starts at insn %d\n",
6052 target_insn);
6053 return -EFAULT;
6054 }
6055
6056 return __check_func_call(env, insn, insn_idx, subprog, set_callee_state);
6057}
6058
Yonghong Song69c087b2021-02-26 12:49:25 -08006059static int set_map_elem_callback_state(struct bpf_verifier_env *env,
6060 struct bpf_func_state *caller,
6061 struct bpf_func_state *callee,
6062 int insn_idx)
6063{
6064 struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx];
6065 struct bpf_map *map;
6066 int err;
6067
6068 if (bpf_map_ptr_poisoned(insn_aux)) {
6069 verbose(env, "tail_call abusing map_ptr\n");
6070 return -EINVAL;
6071 }
6072
6073 map = BPF_MAP_PTR(insn_aux->map_ptr_state);
6074 if (!map->ops->map_set_for_each_callback_args ||
6075 !map->ops->map_for_each_callback) {
6076 verbose(env, "callback function not allowed for map\n");
6077 return -ENOTSUPP;
6078 }
6079
6080 err = map->ops->map_set_for_each_callback_args(env, caller, callee);
6081 if (err)
6082 return err;
6083
6084 callee->in_callback_fn = true;
6085 return 0;
6086}
6087
Alexei Starovoitovb00628b2021-07-14 17:54:09 -07006088static int set_timer_callback_state(struct bpf_verifier_env *env,
6089 struct bpf_func_state *caller,
6090 struct bpf_func_state *callee,
6091 int insn_idx)
6092{
6093 struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr;
6094
6095 /* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn);
6096 * callback_fn(struct bpf_map *map, void *key, void *value);
6097 */
6098 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP;
6099 __mark_reg_known_zero(&callee->regs[BPF_REG_1]);
6100 callee->regs[BPF_REG_1].map_ptr = map_ptr;
6101
6102 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
6103 __mark_reg_known_zero(&callee->regs[BPF_REG_2]);
6104 callee->regs[BPF_REG_2].map_ptr = map_ptr;
6105
6106 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
6107 __mark_reg_known_zero(&callee->regs[BPF_REG_3]);
6108 callee->regs[BPF_REG_3].map_ptr = map_ptr;
6109
6110 /* unused */
6111 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
6112 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
Alexei Starovoitovbfc6bb72021-07-14 17:54:14 -07006113 callee->in_async_callback_fn = true;
Alexei Starovoitovb00628b2021-07-14 17:54:09 -07006114 return 0;
6115}
6116
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08006117static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
6118{
6119 struct bpf_verifier_state *state = env->cur_state;
6120 struct bpf_func_state *caller, *callee;
6121 struct bpf_reg_state *r0;
Joe Stringerfd978bf72018-10-02 13:35:35 -07006122 int err;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08006123
6124 callee = state->frame[state->curframe];
6125 r0 = &callee->regs[BPF_REG_0];
6126 if (r0->type == PTR_TO_STACK) {
6127 /* technically it's ok to return caller's stack pointer
6128 * (or caller's caller's pointer) back to the caller,
6129 * since these pointers are valid. Only current stack
6130 * pointer will be invalid as soon as function exits,
6131 * but let's be conservative
6132 */
6133 verbose(env, "cannot return stack pointer to the caller\n");
6134 return -EINVAL;
6135 }
6136
6137 state->curframe--;
6138 caller = state->frame[state->curframe];
Yonghong Song69c087b2021-02-26 12:49:25 -08006139 if (callee->in_callback_fn) {
6140 /* enforce R0 return value range [0, 1]. */
6141 struct tnum range = tnum_range(0, 1);
6142
6143 if (r0->type != SCALAR_VALUE) {
6144 verbose(env, "R0 not a scalar value\n");
6145 return -EACCES;
6146 }
6147 if (!tnum_in(range, r0->var_off)) {
6148 verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
6149 return -EINVAL;
6150 }
6151 } else {
6152 /* return to the caller whatever r0 had in the callee */
6153 caller->regs[BPF_REG_0] = *r0;
6154 }
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08006155
Joe Stringerfd978bf72018-10-02 13:35:35 -07006156 /* Transfer references to the caller */
Lorenz Bauerc69431a2021-04-29 14:46:54 +01006157 err = copy_reference_state(caller, callee);
Joe Stringerfd978bf72018-10-02 13:35:35 -07006158 if (err)
6159 return err;
6160
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08006161 *insn_idx = callee->callsite + 1;
Alexei Starovoitov06ee7112019-04-01 21:27:40 -07006162 if (env->log.level & BPF_LOG_LEVEL) {
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08006163 verbose(env, "returning from callee:\n");
6164 print_verifier_state(env, callee);
6165 verbose(env, "to caller at %d:\n", *insn_idx);
6166 print_verifier_state(env, caller);
6167 }
6168 /* clear everything in the callee */
6169 free_func_state(callee);
6170 state->frame[state->curframe + 1] = NULL;
6171 return 0;
6172}
6173
Yonghong Song849fa502018-04-28 22:28:09 -07006174static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
6175 int func_id,
6176 struct bpf_call_arg_meta *meta)
6177{
6178 struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
6179
6180 if (ret_type != RET_INTEGER ||
6181 (func_id != BPF_FUNC_get_stack &&
Dave Marchevskyfd0b88f2021-04-16 13:47:02 -07006182 func_id != BPF_FUNC_get_task_stack &&
Daniel Borkmann47cc0ed2020-05-15 12:11:17 +02006183 func_id != BPF_FUNC_probe_read_str &&
6184 func_id != BPF_FUNC_probe_read_kernel_str &&
6185 func_id != BPF_FUNC_probe_read_user_str))
Yonghong Song849fa502018-04-28 22:28:09 -07006186 return;
6187
John Fastabend10060502020-03-30 14:36:19 -07006188 ret_reg->smax_value = meta->msize_max_value;
John Fastabendfa123ac2020-03-30 14:36:59 -07006189 ret_reg->s32_max_value = meta->msize_max_value;
Alexei Starovoitovb02709582020-12-08 19:01:51 +01006190 ret_reg->smin_value = -MAX_ERRNO;
6191 ret_reg->s32_min_value = -MAX_ERRNO;
Yonghong Song849fa502018-04-28 22:28:09 -07006192 __reg_deduce_bounds(ret_reg);
6193 __reg_bound_offset(ret_reg);
John Fastabend10060502020-03-30 14:36:19 -07006194 __update_reg_bounds(ret_reg);
Yonghong Song849fa502018-04-28 22:28:09 -07006195}
6196
Daniel Borkmannc93552c2018-05-24 02:32:53 +02006197static int
6198record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
6199 int func_id, int insn_idx)
6200{
6201 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
Daniel Borkmann591fe982019-04-09 23:20:05 +02006202 struct bpf_map *map = meta->map_ptr;
Daniel Borkmannc93552c2018-05-24 02:32:53 +02006203
6204 if (func_id != BPF_FUNC_tail_call &&
Daniel Borkmann09772d92018-06-02 23:06:35 +02006205 func_id != BPF_FUNC_map_lookup_elem &&
6206 func_id != BPF_FUNC_map_update_elem &&
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +02006207 func_id != BPF_FUNC_map_delete_elem &&
6208 func_id != BPF_FUNC_map_push_elem &&
6209 func_id != BPF_FUNC_map_pop_elem &&
Yonghong Song69c087b2021-02-26 12:49:25 -08006210 func_id != BPF_FUNC_map_peek_elem &&
Björn Töpele6a47502021-03-08 12:29:06 +01006211 func_id != BPF_FUNC_for_each_map_elem &&
6212 func_id != BPF_FUNC_redirect_map)
Daniel Borkmannc93552c2018-05-24 02:32:53 +02006213 return 0;
Daniel Borkmann09772d92018-06-02 23:06:35 +02006214
Daniel Borkmann591fe982019-04-09 23:20:05 +02006215 if (map == NULL) {
Daniel Borkmannc93552c2018-05-24 02:32:53 +02006216 verbose(env, "kernel subsystem misconfigured verifier\n");
6217 return -EINVAL;
6218 }
6219
Daniel Borkmann591fe982019-04-09 23:20:05 +02006220 /* In case of read-only, some additional restrictions
6221 * need to be applied in order to prevent altering the
6222 * state of the map from program side.
6223 */
6224 if ((map->map_flags & BPF_F_RDONLY_PROG) &&
6225 (func_id == BPF_FUNC_map_delete_elem ||
6226 func_id == BPF_FUNC_map_update_elem ||
6227 func_id == BPF_FUNC_map_push_elem ||
6228 func_id == BPF_FUNC_map_pop_elem)) {
6229 verbose(env, "write into map forbidden\n");
6230 return -EACCES;
6231 }
6232
Daniel Borkmannd2e4c1e2019-11-22 21:07:59 +01006233 if (!BPF_MAP_PTR(aux->map_ptr_state))
Daniel Borkmannc93552c2018-05-24 02:32:53 +02006234 bpf_map_ptr_store(aux, meta->map_ptr,
Alexei Starovoitov2c78ee82020-05-13 16:03:54 -07006235 !meta->map_ptr->bypass_spec_v1);
Daniel Borkmannd2e4c1e2019-11-22 21:07:59 +01006236 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
Daniel Borkmannc93552c2018-05-24 02:32:53 +02006237 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
Alexei Starovoitov2c78ee82020-05-13 16:03:54 -07006238 !meta->map_ptr->bypass_spec_v1);
Daniel Borkmannc93552c2018-05-24 02:32:53 +02006239 return 0;
6240}
6241
Daniel Borkmannd2e4c1e2019-11-22 21:07:59 +01006242static int
6243record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
6244 int func_id, int insn_idx)
6245{
6246 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
6247 struct bpf_reg_state *regs = cur_regs(env), *reg;
6248 struct bpf_map *map = meta->map_ptr;
6249 struct tnum range;
6250 u64 val;
Daniel Borkmanncc52d912019-12-19 22:19:50 +01006251 int err;
Daniel Borkmannd2e4c1e2019-11-22 21:07:59 +01006252
6253 if (func_id != BPF_FUNC_tail_call)
6254 return 0;
6255 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
6256 verbose(env, "kernel subsystem misconfigured verifier\n");
6257 return -EINVAL;
6258 }
6259
6260 range = tnum_range(0, map->max_entries - 1);
6261 reg = &regs[BPF_REG_3];
6262
6263 if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
6264 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
6265 return 0;
6266 }
6267
Daniel Borkmanncc52d912019-12-19 22:19:50 +01006268 err = mark_chain_precision(env, BPF_REG_3);
6269 if (err)
6270 return err;
6271
Daniel Borkmannd2e4c1e2019-11-22 21:07:59 +01006272 val = reg->var_off.value;
6273 if (bpf_map_key_unseen(aux))
6274 bpf_map_key_store(aux, val);
6275 else if (!bpf_map_key_poisoned(aux) &&
6276 bpf_map_key_immediate(aux) != val)
6277 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
6278 return 0;
6279}
6280
Joe Stringerfd978bf72018-10-02 13:35:35 -07006281static int check_reference_leak(struct bpf_verifier_env *env)
6282{
6283 struct bpf_func_state *state = cur_func(env);
6284 int i;
6285
6286 for (i = 0; i < state->acquired_refs; i++) {
6287 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
6288 state->refs[i].id, state->refs[i].insn_idx);
6289 }
6290 return state->acquired_refs ? -EINVAL : 0;
6291}
6292
Florent Revest7b155232021-04-19 17:52:40 +02006293static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
6294 struct bpf_reg_state *regs)
6295{
6296 struct bpf_reg_state *fmt_reg = &regs[BPF_REG_3];
6297 struct bpf_reg_state *data_len_reg = &regs[BPF_REG_5];
6298 struct bpf_map *fmt_map = fmt_reg->map_ptr;
6299 int err, fmt_map_off, num_args;
6300 u64 fmt_addr;
6301 char *fmt;
6302
6303 /* data must be an array of u64 */
6304 if (data_len_reg->var_off.value % 8)
6305 return -EINVAL;
6306 num_args = data_len_reg->var_off.value / 8;
6307
6308 /* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const
6309 * and map_direct_value_addr is set.
6310 */
6311 fmt_map_off = fmt_reg->off + fmt_reg->var_off.value;
6312 err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr,
6313 fmt_map_off);
Florent Revest8e8ee102021-04-23 01:55:42 +02006314 if (err) {
6315 verbose(env, "verifier bug\n");
6316 return -EFAULT;
6317 }
Florent Revest7b155232021-04-19 17:52:40 +02006318 fmt = (char *)(long)fmt_addr + fmt_map_off;
6319
6320 /* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we
6321 * can focus on validating the format specifiers.
6322 */
Florent Revest48cac3f2021-04-27 19:43:13 +02006323 err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, NULL, num_args);
Florent Revest7b155232021-04-19 17:52:40 +02006324 if (err < 0)
6325 verbose(env, "Invalid format string\n");
6326
6327 return err;
6328}
6329
Jiri Olsa9b99edc2021-07-14 11:43:55 +02006330static int check_get_func_ip(struct bpf_verifier_env *env)
6331{
6332 enum bpf_attach_type eatype = env->prog->expected_attach_type;
6333 enum bpf_prog_type type = resolve_prog_type(env->prog);
6334 int func_id = BPF_FUNC_get_func_ip;
6335
6336 if (type == BPF_PROG_TYPE_TRACING) {
6337 if (eatype != BPF_TRACE_FENTRY && eatype != BPF_TRACE_FEXIT &&
6338 eatype != BPF_MODIFY_RETURN) {
6339 verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
6340 func_id_name(func_id), func_id);
6341 return -ENOTSUPP;
6342 }
6343 return 0;
Jiri Olsa9ffd9f32021-07-14 11:43:56 +02006344 } else if (type == BPF_PROG_TYPE_KPROBE) {
6345 return 0;
Jiri Olsa9b99edc2021-07-14 11:43:55 +02006346 }
6347
6348 verbose(env, "func %s#%d not supported for program type %d\n",
6349 func_id_name(func_id), func_id, type);
6350 return -ENOTSUPP;
6351}
6352
Yonghong Song69c087b2021-02-26 12:49:25 -08006353static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
6354 int *insn_idx_p)
Alexei Starovoitov17a52672014-09-26 00:17:06 -07006355{
Alexei Starovoitov17a52672014-09-26 00:17:06 -07006356 const struct bpf_func_proto *fn = NULL;
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07006357 struct bpf_reg_state *regs;
Daniel Borkmann33ff9822016-04-13 00:10:50 +02006358 struct bpf_call_arg_meta meta;
Yonghong Song69c087b2021-02-26 12:49:25 -08006359 int insn_idx = *insn_idx_p;
Alexei Starovoitov969bf052016-05-05 19:49:10 -07006360 bool changes_data;
Yonghong Song69c087b2021-02-26 12:49:25 -08006361 int i, err, func_id;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07006362
6363 /* find function prototype */
Yonghong Song69c087b2021-02-26 12:49:25 -08006364 func_id = insn->imm;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07006365 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07006366 verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
6367 func_id);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07006368 return -EINVAL;
6369 }
6370
Jakub Kicinski00176a32017-10-16 16:40:54 -07006371 if (env->ops->get_func_proto)
Andrey Ignatov5e43f892018-03-30 15:08:00 -07006372 fn = env->ops->get_func_proto(func_id, env->prog);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07006373 if (!fn) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07006374 verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
6375 func_id);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07006376 return -EINVAL;
6377 }
6378
6379 /* eBPF programs must be GPL compatible to use GPL-ed functions */
Daniel Borkmann24701ec2015-03-01 12:31:47 +01006380 if (!env->prog->gpl_compatible && fn->gpl_only) {
Daniel Borkmann3fe28672018-06-02 23:06:33 +02006381 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -07006382 return -EINVAL;
6383 }
6384
Jiri Olsaeae2e832020-08-25 21:21:19 +02006385 if (fn->allowed && !fn->allowed(env->prog)) {
6386 verbose(env, "helper call is not allowed in probe\n");
6387 return -EINVAL;
6388 }
6389
Daniel Borkmann04514d12017-12-14 21:07:25 +01006390 /* With LD_ABS/IND some JITs save/restore skb from r1. */
Martin KaFai Lau17bedab2016-12-07 15:53:11 -08006391 changes_data = bpf_helper_changes_pkt_data(fn->func);
Daniel Borkmann04514d12017-12-14 21:07:25 +01006392 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
6393 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
6394 func_id_name(func_id), func_id);
6395 return -EINVAL;
6396 }
Alexei Starovoitov969bf052016-05-05 19:49:10 -07006397
Daniel Borkmann33ff9822016-04-13 00:10:50 +02006398 memset(&meta, 0, sizeof(meta));
Daniel Borkmann36bbef52016-09-20 00:26:13 +02006399 meta.pkt_access = fn->pkt_access;
Daniel Borkmann33ff9822016-04-13 00:10:50 +02006400
Martin KaFai Lau1b986582019-03-12 10:23:02 -07006401 err = check_func_proto(fn, func_id);
Daniel Borkmann435faee12016-04-13 00:10:51 +02006402 if (err) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07006403 verbose(env, "kernel subsystem misconfigured func %s#%d\n",
Thomas Grafebb676d2016-10-27 11:23:51 +02006404 func_id_name(func_id), func_id);
Daniel Borkmann435faee12016-04-13 00:10:51 +02006405 return err;
6406 }
6407
Alexei Starovoitovd83525c2019-01-31 15:40:04 -08006408 meta.func_id = func_id;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07006409 /* check args */
Dmitrii Banshchikov523a4cf2021-02-26 00:26:29 +04006410 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
Yonghong Songaf7ec132020-06-23 16:08:09 -07006411 err = check_func_arg(env, i, &meta, fn);
Alexei Starovoitova7658e12019-10-15 20:25:04 -07006412 if (err)
6413 return err;
6414 }
Alexei Starovoitov17a52672014-09-26 00:17:06 -07006415
Daniel Borkmannc93552c2018-05-24 02:32:53 +02006416 err = record_func_map(env, &meta, func_id, insn_idx);
6417 if (err)
6418 return err;
6419
Daniel Borkmannd2e4c1e2019-11-22 21:07:59 +01006420 err = record_func_key(env, &meta, func_id, insn_idx);
6421 if (err)
6422 return err;
6423
Daniel Borkmann435faee12016-04-13 00:10:51 +02006424 /* Mark slots with STACK_MISC in case of raw mode, stack offset
6425 * is inferred from register state.
6426 */
6427 for (i = 0; i < meta.access_size; i++) {
Daniel Borkmannca369602018-02-23 22:29:05 +01006428 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
6429 BPF_WRITE, -1, false);
Daniel Borkmann435faee12016-04-13 00:10:51 +02006430 if (err)
6431 return err;
6432 }
6433
Joe Stringerfd978bf72018-10-02 13:35:35 -07006434 if (func_id == BPF_FUNC_tail_call) {
6435 err = check_reference_leak(env);
6436 if (err) {
6437 verbose(env, "tail_call would lead to reference leak\n");
6438 return err;
6439 }
6440 } else if (is_release_function(func_id)) {
Martin KaFai Lau1b986582019-03-12 10:23:02 -07006441 err = release_reference(env, meta.ref_obj_id);
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -08006442 if (err) {
6443 verbose(env, "func %s#%d reference has not been acquired before\n",
6444 func_id_name(func_id), func_id);
Joe Stringerfd978bf72018-10-02 13:35:35 -07006445 return err;
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -08006446 }
Joe Stringerfd978bf72018-10-02 13:35:35 -07006447 }
6448
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07006449 regs = cur_regs(env);
Roman Gushchincd339432018-08-02 14:27:24 -07006450
6451 /* check that flags argument in get_local_storage(map, flags) is 0,
6452 * this is required because get_local_storage() can't return an error.
6453 */
6454 if (func_id == BPF_FUNC_get_local_storage &&
6455 !register_is_null(&regs[BPF_REG_2])) {
6456 verbose(env, "get_local_storage() doesn't support non-zero flags\n");
6457 return -EINVAL;
6458 }
6459
Yonghong Song69c087b2021-02-26 12:49:25 -08006460 if (func_id == BPF_FUNC_for_each_map_elem) {
6461 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
6462 set_map_elem_callback_state);
6463 if (err < 0)
6464 return -EINVAL;
6465 }
6466
Alexei Starovoitovb00628b2021-07-14 17:54:09 -07006467 if (func_id == BPF_FUNC_timer_set_callback) {
6468 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
6469 set_timer_callback_state);
6470 if (err < 0)
6471 return -EINVAL;
6472 }
6473
Florent Revest7b155232021-04-19 17:52:40 +02006474 if (func_id == BPF_FUNC_snprintf) {
6475 err = check_bpf_snprintf_call(env, regs);
6476 if (err < 0)
6477 return err;
6478 }
6479
Alexei Starovoitov17a52672014-09-26 00:17:06 -07006480 /* reset caller saved regs */
Edward Creedc503a82017-08-15 20:34:35 +01006481 for (i = 0; i < CALLER_SAVED_REGS; i++) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07006482 mark_reg_not_init(env, regs, caller_saved[i]);
Edward Creedc503a82017-08-15 20:34:35 +01006483 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
6484 }
Alexei Starovoitov17a52672014-09-26 00:17:06 -07006485
Jiong Wang5327ed32019-05-24 23:25:12 +01006486 /* helper call returns 64-bit value. */
6487 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
6488
Edward Creedc503a82017-08-15 20:34:35 +01006489 /* update return register (already marked as written above) */
Alexei Starovoitov17a52672014-09-26 00:17:06 -07006490 if (fn->ret_type == RET_INTEGER) {
Edward Creef1174f72017-08-07 15:26:19 +01006491 /* sets type to SCALAR_VALUE */
Jakub Kicinski61bd5212017-10-09 10:30:11 -07006492 mark_reg_unknown(env, regs, BPF_REG_0);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07006493 } else if (fn->ret_type == RET_VOID) {
6494 regs[BPF_REG_0].type = NOT_INIT;
Roman Gushchin3e6a4b32018-08-02 14:27:22 -07006495 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
6496 fn->ret_type == RET_PTR_TO_MAP_VALUE) {
Edward Creef1174f72017-08-07 15:26:19 +01006497 /* There is no offset yet applied, variable or fixed */
Jakub Kicinski61bd5212017-10-09 10:30:11 -07006498 mark_reg_known_zero(env, regs, BPF_REG_0);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07006499 /* remember map_ptr, so that check_map_access()
6500 * can check 'value_size' boundary of memory access
6501 * to map element returned from bpf_map_lookup_elem()
6502 */
Daniel Borkmann33ff9822016-04-13 00:10:50 +02006503 if (meta.map_ptr == NULL) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07006504 verbose(env,
6505 "kernel subsystem misconfigured verifier\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -07006506 return -EINVAL;
6507 }
Daniel Borkmann33ff9822016-04-13 00:10:50 +02006508 regs[BPF_REG_0].map_ptr = meta.map_ptr;
Alexei Starovoitov3e8ce292021-07-14 17:54:11 -07006509 regs[BPF_REG_0].map_uid = meta.map_uid;
Daniel Borkmann4d31f302018-11-01 00:05:53 +01006510 if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
6511 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
Alexei Starovoitove16d2f12019-01-31 15:40:05 -08006512 if (map_value_has_spin_lock(meta.map_ptr))
6513 regs[BPF_REG_0].id = ++env->id_gen;
Daniel Borkmann4d31f302018-11-01 00:05:53 +01006514 } else {
6515 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
Daniel Borkmann4d31f302018-11-01 00:05:53 +01006516 }
Joe Stringerc64b7982018-10-02 13:35:33 -07006517 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
6518 mark_reg_known_zero(env, regs, BPF_REG_0);
6519 regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
Lorenz Bauer85a51f82019-03-22 09:54:00 +08006520 } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) {
6521 mark_reg_known_zero(env, regs, BPF_REG_0);
6522 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
Martin KaFai Lau655a51e2019-02-09 23:22:24 -08006523 } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
6524 mark_reg_known_zero(env, regs, BPF_REG_0);
6525 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
Andrii Nakryiko457f4432020-05-29 00:54:20 -07006526 } else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) {
6527 mark_reg_known_zero(env, regs, BPF_REG_0);
6528 regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL;
Andrii Nakryiko457f4432020-05-29 00:54:20 -07006529 regs[BPF_REG_0].mem_size = meta.mem_size;
Hao Luo63d9b802020-09-29 16:50:48 -07006530 } else if (fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL ||
6531 fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID) {
Hao Luoeaa6bcb2020-09-29 16:50:47 -07006532 const struct btf_type *t;
6533
6534 mark_reg_known_zero(env, regs, BPF_REG_0);
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08006535 t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL);
Hao Luoeaa6bcb2020-09-29 16:50:47 -07006536 if (!btf_type_is_struct(t)) {
6537 u32 tsize;
6538 const struct btf_type *ret;
6539 const char *tname;
6540
6541 /* resolve the type size of ksym. */
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08006542 ret = btf_resolve_size(meta.ret_btf, t, &tsize);
Hao Luoeaa6bcb2020-09-29 16:50:47 -07006543 if (IS_ERR(ret)) {
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08006544 tname = btf_name_by_offset(meta.ret_btf, t->name_off);
Hao Luoeaa6bcb2020-09-29 16:50:47 -07006545 verbose(env, "unable to resolve the size of type '%s': %ld\n",
6546 tname, PTR_ERR(ret));
6547 return -EINVAL;
6548 }
Hao Luo63d9b802020-09-29 16:50:48 -07006549 regs[BPF_REG_0].type =
6550 fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ?
6551 PTR_TO_MEM : PTR_TO_MEM_OR_NULL;
Hao Luoeaa6bcb2020-09-29 16:50:47 -07006552 regs[BPF_REG_0].mem_size = tsize;
6553 } else {
Hao Luo63d9b802020-09-29 16:50:48 -07006554 regs[BPF_REG_0].type =
6555 fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ?
6556 PTR_TO_BTF_ID : PTR_TO_BTF_ID_OR_NULL;
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08006557 regs[BPF_REG_0].btf = meta.ret_btf;
Hao Luoeaa6bcb2020-09-29 16:50:47 -07006558 regs[BPF_REG_0].btf_id = meta.ret_btf_id;
6559 }
KP Singh3ca10322020-11-06 10:37:43 +00006560 } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL ||
6561 fn->ret_type == RET_PTR_TO_BTF_ID) {
Yonghong Songaf7ec132020-06-23 16:08:09 -07006562 int ret_btf_id;
6563
6564 mark_reg_known_zero(env, regs, BPF_REG_0);
KP Singh3ca10322020-11-06 10:37:43 +00006565 regs[BPF_REG_0].type = fn->ret_type == RET_PTR_TO_BTF_ID ?
6566 PTR_TO_BTF_ID :
6567 PTR_TO_BTF_ID_OR_NULL;
Yonghong Songaf7ec132020-06-23 16:08:09 -07006568 ret_btf_id = *fn->ret_btf_id;
6569 if (ret_btf_id == 0) {
6570 verbose(env, "invalid return type %d of func %s#%d\n",
6571 fn->ret_type, func_id_name(func_id), func_id);
6572 return -EINVAL;
6573 }
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08006574 /* current BPF helper definitions are only coming from
6575 * built-in code with type IDs from vmlinux BTF
6576 */
6577 regs[BPF_REG_0].btf = btf_vmlinux;
Yonghong Songaf7ec132020-06-23 16:08:09 -07006578 regs[BPF_REG_0].btf_id = ret_btf_id;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07006579 } else {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07006580 verbose(env, "unknown return type %d of func %s#%d\n",
Thomas Grafebb676d2016-10-27 11:23:51 +02006581 fn->ret_type, func_id_name(func_id), func_id);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07006582 return -EINVAL;
6583 }
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07006584
Martin KaFai Lau93c230e2020-10-19 12:42:12 -07006585 if (reg_type_may_be_null(regs[BPF_REG_0].type))
6586 regs[BPF_REG_0].id = ++env->id_gen;
6587
Lorenz Bauer0f3adc22019-03-22 09:53:59 +08006588 if (is_ptr_cast_function(func_id)) {
Martin KaFai Lau1b986582019-03-12 10:23:02 -07006589 /* For release_reference() */
6590 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
Jakub Sitnicki64d85292020-04-29 20:11:52 +02006591 } else if (is_acquire_function(func_id, meta.map_ptr)) {
Lorenz Bauer0f3adc22019-03-22 09:53:59 +08006592 int id = acquire_reference_state(env, insn_idx);
6593
6594 if (id < 0)
6595 return id;
6596 /* For mark_ptr_or_null_reg() */
6597 regs[BPF_REG_0].id = id;
6598 /* For release_reference() */
6599 regs[BPF_REG_0].ref_obj_id = id;
6600 }
Martin KaFai Lau1b986582019-03-12 10:23:02 -07006601
Yonghong Song849fa502018-04-28 22:28:09 -07006602 do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
6603
Jakub Kicinski61bd5212017-10-09 10:30:11 -07006604 err = check_map_func_compatibility(env, meta.map_ptr, func_id);
Kaixu Xia35578d72015-08-06 07:02:35 +00006605 if (err)
6606 return err;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07006607
Song Liufa28dcb2020-06-29 23:28:44 -07006608 if ((func_id == BPF_FUNC_get_stack ||
6609 func_id == BPF_FUNC_get_task_stack) &&
6610 !env->prog->has_callchain_buf) {
Yonghong Songc195651e2018-04-28 22:28:08 -07006611 const char *err_str;
6612
6613#ifdef CONFIG_PERF_EVENTS
6614 err = get_callchain_buffers(sysctl_perf_event_max_stack);
6615 err_str = "cannot get callchain buffer for func %s#%d\n";
6616#else
6617 err = -ENOTSUPP;
6618 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
6619#endif
6620 if (err) {
6621 verbose(env, err_str, func_id_name(func_id), func_id);
6622 return err;
6623 }
6624
6625 env->prog->has_callchain_buf = true;
6626 }
6627
Song Liu5d99cb2c2020-07-23 11:06:45 -07006628 if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
6629 env->prog->call_get_stack = true;
6630
Jiri Olsa9b99edc2021-07-14 11:43:55 +02006631 if (func_id == BPF_FUNC_get_func_ip) {
6632 if (check_get_func_ip(env))
6633 return -ENOTSUPP;
6634 env->prog->call_get_func_ip = true;
6635 }
6636
Alexei Starovoitov969bf052016-05-05 19:49:10 -07006637 if (changes_data)
6638 clear_all_pkt_pointers(env);
6639 return 0;
6640}
6641
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07006642/* mark_btf_func_reg_size() is used when the reg size is determined by
6643 * the BTF func_proto's return value size and argument.
6644 */
6645static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno,
6646 size_t reg_size)
6647{
6648 struct bpf_reg_state *reg = &cur_regs(env)[regno];
6649
6650 if (regno == BPF_REG_0) {
6651 /* Function return value */
6652 reg->live |= REG_LIVE_WRITTEN;
6653 reg->subreg_def = reg_size == sizeof(u64) ?
6654 DEF_NOT_SUBREG : env->insn_idx + 1;
6655 } else {
6656 /* Function argument */
6657 if (reg_size == sizeof(u64)) {
6658 mark_insn_zext(env, reg);
6659 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
6660 } else {
6661 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32);
6662 }
6663 }
6664}
6665
6666static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn)
6667{
6668 const struct btf_type *t, *func, *func_proto, *ptr_type;
6669 struct bpf_reg_state *regs = cur_regs(env);
6670 const char *func_name, *ptr_type_name;
6671 u32 i, nargs, func_id, ptr_type_id;
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05306672 struct module *btf_mod = NULL;
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07006673 const struct btf_param *args;
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05306674 struct btf *desc_btf;
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07006675 int err;
6676
Kumar Kartikeya Dwivedia5d82722021-10-02 06:47:50 +05306677 /* skip for now, but return error when we find this in fixup_kfunc_call */
6678 if (!insn->imm)
6679 return 0;
6680
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05306681 desc_btf = find_kfunc_desc_btf(env, insn->imm, insn->off, &btf_mod);
6682 if (IS_ERR(desc_btf))
6683 return PTR_ERR(desc_btf);
6684
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07006685 func_id = insn->imm;
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05306686 func = btf_type_by_id(desc_btf, func_id);
6687 func_name = btf_name_by_offset(desc_btf, func->name_off);
6688 func_proto = btf_type_by_id(desc_btf, func->type);
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07006689
6690 if (!env->ops->check_kfunc_call ||
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05306691 !env->ops->check_kfunc_call(func_id, btf_mod)) {
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07006692 verbose(env, "calling kernel function %s is not allowed\n",
6693 func_name);
6694 return -EACCES;
6695 }
6696
6697 /* Check the arguments */
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05306698 err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs);
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07006699 if (err)
6700 return err;
6701
6702 for (i = 0; i < CALLER_SAVED_REGS; i++)
6703 mark_reg_not_init(env, regs, caller_saved[i]);
6704
6705 /* Check return type */
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05306706 t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL);
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07006707 if (btf_type_is_scalar(t)) {
6708 mark_reg_unknown(env, regs, BPF_REG_0);
6709 mark_btf_func_reg_size(env, BPF_REG_0, t->size);
6710 } else if (btf_type_is_ptr(t)) {
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05306711 ptr_type = btf_type_skip_modifiers(desc_btf, t->type,
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07006712 &ptr_type_id);
6713 if (!btf_type_is_struct(ptr_type)) {
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05306714 ptr_type_name = btf_name_by_offset(desc_btf,
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07006715 ptr_type->name_off);
6716 verbose(env, "kernel function %s returns pointer type %s %s is not supported\n",
6717 func_name, btf_type_str(ptr_type),
6718 ptr_type_name);
6719 return -EINVAL;
6720 }
6721 mark_reg_known_zero(env, regs, BPF_REG_0);
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05306722 regs[BPF_REG_0].btf = desc_btf;
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07006723 regs[BPF_REG_0].type = PTR_TO_BTF_ID;
6724 regs[BPF_REG_0].btf_id = ptr_type_id;
6725 mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *));
6726 } /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */
6727
6728 nargs = btf_type_vlen(func_proto);
6729 args = (const struct btf_param *)(func_proto + 1);
6730 for (i = 0; i < nargs; i++) {
6731 u32 regno = i + 1;
6732
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +05306733 t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL);
Martin KaFai Laue6ac2452021-03-24 18:51:42 -07006734 if (btf_type_is_ptr(t))
6735 mark_btf_func_reg_size(env, regno, sizeof(void *));
6736 else
6737 /* scalar. ensured by btf_check_kfunc_arg_match() */
6738 mark_btf_func_reg_size(env, regno, t->size);
6739 }
6740
6741 return 0;
6742}
6743
Edward Creeb03c9f92017-08-07 15:26:36 +01006744static bool signed_add_overflows(s64 a, s64 b)
6745{
6746 /* Do the add in u64, where overflow is well-defined */
6747 s64 res = (s64)((u64)a + (u64)b);
6748
6749 if (b < 0)
6750 return res > a;
6751 return res < a;
6752}
6753
Daniel Borkmannbc895e82021-01-20 00:24:24 +01006754static bool signed_add32_overflows(s32 a, s32 b)
John Fastabend3f50f132020-03-30 14:36:39 -07006755{
6756 /* Do the add in u32, where overflow is well-defined */
6757 s32 res = (s32)((u32)a + (u32)b);
6758
6759 if (b < 0)
6760 return res > a;
6761 return res < a;
6762}
6763
Daniel Borkmannbc895e82021-01-20 00:24:24 +01006764static bool signed_sub_overflows(s64 a, s64 b)
Edward Creeb03c9f92017-08-07 15:26:36 +01006765{
6766 /* Do the sub in u64, where overflow is well-defined */
6767 s64 res = (s64)((u64)a - (u64)b);
6768
6769 if (b < 0)
6770 return res < a;
6771 return res > a;
David S. Millerd1174412017-05-10 11:22:52 -07006772}
6773
John Fastabend3f50f132020-03-30 14:36:39 -07006774static bool signed_sub32_overflows(s32 a, s32 b)
6775{
Daniel Borkmannbc895e82021-01-20 00:24:24 +01006776 /* Do the sub in u32, where overflow is well-defined */
John Fastabend3f50f132020-03-30 14:36:39 -07006777 s32 res = (s32)((u32)a - (u32)b);
6778
6779 if (b < 0)
6780 return res < a;
6781 return res > a;
6782}
6783
Alexei Starovoitovbb7f0f92017-12-18 20:12:00 -08006784static bool check_reg_sane_offset(struct bpf_verifier_env *env,
6785 const struct bpf_reg_state *reg,
6786 enum bpf_reg_type type)
6787{
6788 bool known = tnum_is_const(reg->var_off);
6789 s64 val = reg->var_off.value;
6790 s64 smin = reg->smin_value;
6791
6792 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
6793 verbose(env, "math between %s pointer and %lld is not allowed\n",
6794 reg_type_str[type], val);
6795 return false;
6796 }
6797
6798 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
6799 verbose(env, "%s pointer offset %d is not allowed\n",
6800 reg_type_str[type], reg->off);
6801 return false;
6802 }
6803
6804 if (smin == S64_MIN) {
6805 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
6806 reg_type_str[type]);
6807 return false;
6808 }
6809
6810 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
6811 verbose(env, "value %lld makes %s pointer be out of bounds\n",
6812 smin, reg_type_str[type]);
6813 return false;
6814 }
6815
6816 return true;
6817}
6818
Daniel Borkmann979d63d2019-01-03 00:58:34 +01006819static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
6820{
6821 return &env->insn_aux_data[env->insn_idx];
6822}
6823
Daniel Borkmanna6aaece2021-03-23 09:30:01 +01006824enum {
6825 REASON_BOUNDS = -1,
6826 REASON_TYPE = -2,
6827 REASON_PATHS = -3,
6828 REASON_LIMIT = -4,
6829 REASON_STACK = -5,
6830};
6831
Daniel Borkmann979d63d2019-01-03 00:58:34 +01006832static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
Daniel Borkmannbb01a1b2021-05-21 10:19:22 +00006833 u32 *alu_limit, bool mask_to_left)
Daniel Borkmann979d63d2019-01-03 00:58:34 +01006834{
Daniel Borkmann7fedb632021-03-24 10:38:26 +01006835 u32 max = 0, ptr_limit = 0;
Daniel Borkmann979d63d2019-01-03 00:58:34 +01006836
6837 switch (ptr_reg->type) {
6838 case PTR_TO_STACK:
Piotr Krysiuk1b1597e2021-03-16 09:47:02 +01006839 /* Offset 0 is out-of-bounds, but acceptable start for the
Daniel Borkmann7fedb632021-03-24 10:38:26 +01006840 * left direction, see BPF_REG_FP. Also, unknown scalar
6841 * offset where we would need to deal with min/max bounds is
6842 * currently prohibited for unprivileged.
Piotr Krysiuk1b1597e2021-03-16 09:47:02 +01006843 */
6844 max = MAX_BPF_STACK + mask_to_left;
Daniel Borkmann7fedb632021-03-24 10:38:26 +01006845 ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
Daniel Borkmannb658bbb2021-03-23 09:04:10 +01006846 break;
Daniel Borkmann979d63d2019-01-03 00:58:34 +01006847 case PTR_TO_MAP_VALUE:
Piotr Krysiuk1b1597e2021-03-16 09:47:02 +01006848 max = ptr_reg->map_ptr->value_size;
Daniel Borkmann7fedb632021-03-24 10:38:26 +01006849 ptr_limit = (mask_to_left ?
6850 ptr_reg->smin_value :
6851 ptr_reg->umax_value) + ptr_reg->off;
Daniel Borkmannb658bbb2021-03-23 09:04:10 +01006852 break;
Daniel Borkmann979d63d2019-01-03 00:58:34 +01006853 default:
Daniel Borkmanna6aaece2021-03-23 09:30:01 +01006854 return REASON_TYPE;
Daniel Borkmann979d63d2019-01-03 00:58:34 +01006855 }
Daniel Borkmannb658bbb2021-03-23 09:04:10 +01006856
6857 if (ptr_limit >= max)
Daniel Borkmanna6aaece2021-03-23 09:30:01 +01006858 return REASON_LIMIT;
Daniel Borkmannb658bbb2021-03-23 09:04:10 +01006859 *alu_limit = ptr_limit;
6860 return 0;
Daniel Borkmann979d63d2019-01-03 00:58:34 +01006861}
6862
Daniel Borkmannd3bd7412019-01-06 00:54:37 +01006863static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
6864 const struct bpf_insn *insn)
6865{
Alexei Starovoitov2c78ee82020-05-13 16:03:54 -07006866 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
Daniel Borkmannd3bd7412019-01-06 00:54:37 +01006867}
6868
6869static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
6870 u32 alu_state, u32 alu_limit)
6871{
6872 /* If we arrived here from different branches with different
6873 * state or limits to sanitize, then this won't work.
6874 */
6875 if (aux->alu_state &&
6876 (aux->alu_state != alu_state ||
6877 aux->alu_limit != alu_limit))
Daniel Borkmanna6aaece2021-03-23 09:30:01 +01006878 return REASON_PATHS;
Daniel Borkmannd3bd7412019-01-06 00:54:37 +01006879
Brendan Jackmane6ac5932021-02-17 10:45:09 +00006880 /* Corresponding fixup done in do_misc_fixups(). */
Daniel Borkmannd3bd7412019-01-06 00:54:37 +01006881 aux->alu_state = alu_state;
6882 aux->alu_limit = alu_limit;
6883 return 0;
6884}
6885
6886static int sanitize_val_alu(struct bpf_verifier_env *env,
6887 struct bpf_insn *insn)
6888{
6889 struct bpf_insn_aux_data *aux = cur_aux(env);
6890
6891 if (can_skip_alu_sanitation(env, insn))
6892 return 0;
6893
6894 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
6895}
6896
Daniel Borkmannf5288192021-03-24 11:25:39 +01006897static bool sanitize_needed(u8 opcode)
6898{
6899 return opcode == BPF_ADD || opcode == BPF_SUB;
6900}
6901
Daniel Borkmann3d0220f2021-05-21 10:17:36 +00006902struct bpf_sanitize_info {
6903 struct bpf_insn_aux_data aux;
Daniel Borkmannbb01a1b2021-05-21 10:19:22 +00006904 bool mask_to_left;
Daniel Borkmann3d0220f2021-05-21 10:17:36 +00006905};
6906
Daniel Borkmann9183671a2021-05-28 15:47:32 +00006907static struct bpf_verifier_state *
6908sanitize_speculative_path(struct bpf_verifier_env *env,
6909 const struct bpf_insn *insn,
6910 u32 next_idx, u32 curr_idx)
6911{
6912 struct bpf_verifier_state *branch;
6913 struct bpf_reg_state *regs;
6914
6915 branch = push_stack(env, next_idx, curr_idx, true);
6916 if (branch && insn) {
6917 regs = branch->frame[branch->curframe]->regs;
6918 if (BPF_SRC(insn->code) == BPF_K) {
6919 mark_reg_unknown(env, regs, insn->dst_reg);
6920 } else if (BPF_SRC(insn->code) == BPF_X) {
6921 mark_reg_unknown(env, regs, insn->dst_reg);
6922 mark_reg_unknown(env, regs, insn->src_reg);
6923 }
6924 }
6925 return branch;
6926}
6927
Daniel Borkmann979d63d2019-01-03 00:58:34 +01006928static int sanitize_ptr_alu(struct bpf_verifier_env *env,
6929 struct bpf_insn *insn,
6930 const struct bpf_reg_state *ptr_reg,
Daniel Borkmann6f55b2f2021-03-22 15:45:52 +01006931 const struct bpf_reg_state *off_reg,
Daniel Borkmann979d63d2019-01-03 00:58:34 +01006932 struct bpf_reg_state *dst_reg,
Daniel Borkmann3d0220f2021-05-21 10:17:36 +00006933 struct bpf_sanitize_info *info,
Daniel Borkmann7fedb632021-03-24 10:38:26 +01006934 const bool commit_window)
Daniel Borkmann979d63d2019-01-03 00:58:34 +01006935{
Daniel Borkmann3d0220f2021-05-21 10:17:36 +00006936 struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
Daniel Borkmann979d63d2019-01-03 00:58:34 +01006937 struct bpf_verifier_state *vstate = env->cur_state;
Daniel Borkmann801c6052021-04-29 15:19:37 +00006938 bool off_is_imm = tnum_is_const(off_reg->var_off);
Daniel Borkmann6f55b2f2021-03-22 15:45:52 +01006939 bool off_is_neg = off_reg->smin_value < 0;
Daniel Borkmann979d63d2019-01-03 00:58:34 +01006940 bool ptr_is_dst_reg = ptr_reg == dst_reg;
6941 u8 opcode = BPF_OP(insn->code);
6942 u32 alu_state, alu_limit;
6943 struct bpf_reg_state tmp;
6944 bool ret;
Piotr Krysiukf2323262021-03-16 09:47:02 +01006945 int err;
Daniel Borkmann979d63d2019-01-03 00:58:34 +01006946
Daniel Borkmannd3bd7412019-01-06 00:54:37 +01006947 if (can_skip_alu_sanitation(env, insn))
Daniel Borkmann979d63d2019-01-03 00:58:34 +01006948 return 0;
6949
6950 /* We already marked aux for masking from non-speculative
6951 * paths, thus we got here in the first place. We only care
6952 * to explore bad access from here.
6953 */
6954 if (vstate->speculative)
6955 goto do_sim;
6956
Daniel Borkmannbb01a1b2021-05-21 10:19:22 +00006957 if (!commit_window) {
6958 if (!tnum_is_const(off_reg->var_off) &&
6959 (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
6960 return REASON_BOUNDS;
6961
6962 info->mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
6963 (opcode == BPF_SUB && !off_is_neg);
6964 }
6965
6966 err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
Piotr Krysiukf2323262021-03-16 09:47:02 +01006967 if (err < 0)
6968 return err;
6969
Daniel Borkmann7fedb632021-03-24 10:38:26 +01006970 if (commit_window) {
6971 /* In commit phase we narrow the masking window based on
6972 * the observed pointer move after the simulated operation.
6973 */
Daniel Borkmann3d0220f2021-05-21 10:17:36 +00006974 alu_state = info->aux.alu_state;
6975 alu_limit = abs(info->aux.alu_limit - alu_limit);
Daniel Borkmann7fedb632021-03-24 10:38:26 +01006976 } else {
6977 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
Daniel Borkmann801c6052021-04-29 15:19:37 +00006978 alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
Daniel Borkmann7fedb632021-03-24 10:38:26 +01006979 alu_state |= ptr_is_dst_reg ?
6980 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
Daniel Borkmanne042aa52021-07-16 09:18:21 +00006981
6982 /* Limit pruning on unknown scalars to enable deep search for
6983 * potential masking differences from other program paths.
6984 */
6985 if (!off_is_imm)
6986 env->explore_alu_limits = true;
Daniel Borkmann7fedb632021-03-24 10:38:26 +01006987 }
6988
Piotr Krysiukf2323262021-03-16 09:47:02 +01006989 err = update_alu_sanitation_state(aux, alu_state, alu_limit);
6990 if (err < 0)
6991 return err;
Daniel Borkmann979d63d2019-01-03 00:58:34 +01006992do_sim:
Daniel Borkmann7fedb632021-03-24 10:38:26 +01006993 /* If we're in commit phase, we're done here given we already
6994 * pushed the truncated dst_reg into the speculative verification
6995 * stack.
Daniel Borkmanna7036192021-05-04 08:58:25 +00006996 *
6997 * Also, when register is a known constant, we rewrite register-based
6998 * operation to immediate-based, and thus do not need masking (and as
6999 * a consequence, do not need to simulate the zero-truncation either).
Daniel Borkmann7fedb632021-03-24 10:38:26 +01007000 */
Daniel Borkmanna7036192021-05-04 08:58:25 +00007001 if (commit_window || off_is_imm)
Daniel Borkmann7fedb632021-03-24 10:38:26 +01007002 return 0;
7003
Daniel Borkmann979d63d2019-01-03 00:58:34 +01007004 /* Simulate and find potential out-of-bounds access under
7005 * speculative execution from truncation as a result of
7006 * masking when off was not within expected range. If off
7007 * sits in dst, then we temporarily need to move ptr there
7008 * to simulate dst (== 0) +/-= ptr. Needed, for example,
7009 * for cases where we use K-based arithmetic in one direction
7010 * and truncated reg-based in the other in order to explore
7011 * bad access.
7012 */
7013 if (!ptr_is_dst_reg) {
7014 tmp = *dst_reg;
7015 *dst_reg = *ptr_reg;
7016 }
Daniel Borkmann9183671a2021-05-28 15:47:32 +00007017 ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
7018 env->insn_idx);
Xu Yu08032782019-03-21 18:00:35 +08007019 if (!ptr_is_dst_reg && ret)
Daniel Borkmann979d63d2019-01-03 00:58:34 +01007020 *dst_reg = tmp;
Daniel Borkmanna6aaece2021-03-23 09:30:01 +01007021 return !ret ? REASON_STACK : 0;
7022}
7023
Daniel Borkmannfe9a5ca2021-05-28 13:47:27 +00007024static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
7025{
7026 struct bpf_verifier_state *vstate = env->cur_state;
7027
7028 /* If we simulate paths under speculation, we don't update the
7029 * insn as 'seen' such that when we verify unreachable paths in
7030 * the non-speculative domain, sanitize_dead_code() can still
7031 * rewrite/sanitize them.
7032 */
7033 if (!vstate->speculative)
7034 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
7035}
7036
Daniel Borkmanna6aaece2021-03-23 09:30:01 +01007037static int sanitize_err(struct bpf_verifier_env *env,
7038 const struct bpf_insn *insn, int reason,
7039 const struct bpf_reg_state *off_reg,
7040 const struct bpf_reg_state *dst_reg)
7041{
7042 static const char *err = "pointer arithmetic with it prohibited for !root";
7043 const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
7044 u32 dst = insn->dst_reg, src = insn->src_reg;
7045
7046 switch (reason) {
7047 case REASON_BOUNDS:
7048 verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
7049 off_reg == dst_reg ? dst : src, err);
7050 break;
7051 case REASON_TYPE:
7052 verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
7053 off_reg == dst_reg ? src : dst, err);
7054 break;
7055 case REASON_PATHS:
7056 verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
7057 dst, op, err);
7058 break;
7059 case REASON_LIMIT:
7060 verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
7061 dst, op, err);
7062 break;
7063 case REASON_STACK:
7064 verbose(env, "R%d could not be pushed for speculative verification, %s\n",
7065 dst, err);
7066 break;
7067 default:
7068 verbose(env, "verifier internal error: unknown reason (%d)\n",
7069 reason);
7070 break;
7071 }
7072
7073 return -EACCES;
Daniel Borkmann979d63d2019-01-03 00:58:34 +01007074}
7075
Andrei Matei01f810a2021-02-06 20:10:24 -05007076/* check that stack access falls within stack limits and that 'reg' doesn't
7077 * have a variable offset.
7078 *
7079 * Variable offset is prohibited for unprivileged mode for simplicity since it
7080 * requires corresponding support in Spectre masking for stack ALU. See also
7081 * retrieve_ptr_limit().
7082 *
7083 *
7084 * 'off' includes 'reg->off'.
7085 */
7086static int check_stack_access_for_ptr_arithmetic(
7087 struct bpf_verifier_env *env,
7088 int regno,
7089 const struct bpf_reg_state *reg,
7090 int off)
7091{
7092 if (!tnum_is_const(reg->var_off)) {
7093 char tn_buf[48];
7094
7095 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
7096 verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
7097 regno, tn_buf, off);
7098 return -EACCES;
7099 }
7100
7101 if (off >= 0 || off < -MAX_BPF_STACK) {
7102 verbose(env, "R%d stack pointer arithmetic goes out of range, "
7103 "prohibited for !root; off=%d\n", regno, off);
7104 return -EACCES;
7105 }
7106
7107 return 0;
7108}
7109
Daniel Borkmann073815b2021-03-23 15:05:48 +01007110static int sanitize_check_bounds(struct bpf_verifier_env *env,
7111 const struct bpf_insn *insn,
7112 const struct bpf_reg_state *dst_reg)
7113{
7114 u32 dst = insn->dst_reg;
7115
7116 /* For unprivileged we require that resulting offset must be in bounds
7117 * in order to be able to sanitize access later on.
7118 */
7119 if (env->bypass_spec_v1)
7120 return 0;
7121
7122 switch (dst_reg->type) {
7123 case PTR_TO_STACK:
7124 if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
7125 dst_reg->off + dst_reg->var_off.value))
7126 return -EACCES;
7127 break;
7128 case PTR_TO_MAP_VALUE:
7129 if (check_map_access(env, dst, dst_reg->off, 1, false)) {
7130 verbose(env, "R%d pointer arithmetic of map value goes out of range, "
7131 "prohibited for !root\n", dst);
7132 return -EACCES;
7133 }
7134 break;
7135 default:
7136 break;
7137 }
7138
7139 return 0;
7140}
Andrei Matei01f810a2021-02-06 20:10:24 -05007141
Edward Creef1174f72017-08-07 15:26:19 +01007142/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
Edward Creef1174f72017-08-07 15:26:19 +01007143 * Caller should also handle BPF_MOV case separately.
7144 * If we return -EACCES, caller may want to try again treating pointer as a
7145 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
7146 */
7147static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
7148 struct bpf_insn *insn,
7149 const struct bpf_reg_state *ptr_reg,
7150 const struct bpf_reg_state *off_reg)
Josef Bacik48461132016-09-28 10:54:32 -04007151{
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08007152 struct bpf_verifier_state *vstate = env->cur_state;
7153 struct bpf_func_state *state = vstate->frame[vstate->curframe];
7154 struct bpf_reg_state *regs = state->regs, *dst_reg;
Edward Creef1174f72017-08-07 15:26:19 +01007155 bool known = tnum_is_const(off_reg->var_off);
Edward Creeb03c9f92017-08-07 15:26:36 +01007156 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
7157 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
7158 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
7159 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
Daniel Borkmann3d0220f2021-05-21 10:17:36 +00007160 struct bpf_sanitize_info info = {};
Josef Bacik48461132016-09-28 10:54:32 -04007161 u8 opcode = BPF_OP(insn->code);
Daniel Borkmann24c109b2021-03-23 08:51:02 +01007162 u32 dst = insn->dst_reg;
Daniel Borkmann979d63d2019-01-03 00:58:34 +01007163 int ret;
Josef Bacik48461132016-09-28 10:54:32 -04007164
Edward Creef1174f72017-08-07 15:26:19 +01007165 dst_reg = &regs[dst];
Josef Bacik48461132016-09-28 10:54:32 -04007166
Daniel Borkmann6f161012018-01-18 01:15:21 +01007167 if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
7168 smin_val > smax_val || umin_val > umax_val) {
7169 /* Taint dst register if offset had invalid bounds derived from
7170 * e.g. dead branches.
7171 */
Daniel Borkmannf54c7892019-12-22 23:37:40 +01007172 __mark_reg_unknown(env, dst_reg);
Daniel Borkmann6f161012018-01-18 01:15:21 +01007173 return 0;
Josef Bacik48461132016-09-28 10:54:32 -04007174 }
7175
Edward Creef1174f72017-08-07 15:26:19 +01007176 if (BPF_CLASS(insn->code) != BPF_ALU64) {
7177 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
Yonghong Song6c693542020-06-18 16:46:31 -07007178 if (opcode == BPF_SUB && env->allow_ptr_leaks) {
7179 __mark_reg_unknown(env, dst_reg);
7180 return 0;
7181 }
7182
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08007183 verbose(env,
7184 "R%d 32-bit pointer arithmetic prohibited\n",
7185 dst);
Edward Creef1174f72017-08-07 15:26:19 +01007186 return -EACCES;
7187 }
David S. Millerd1174412017-05-10 11:22:52 -07007188
Joe Stringeraad2eea2018-10-02 13:35:30 -07007189 switch (ptr_reg->type) {
7190 case PTR_TO_MAP_VALUE_OR_NULL:
7191 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
7192 dst, reg_type_str[ptr_reg->type]);
Edward Creef1174f72017-08-07 15:26:19 +01007193 return -EACCES;
Joe Stringeraad2eea2018-10-02 13:35:30 -07007194 case CONST_PTR_TO_MAP:
Yonghong Song7c696732020-09-08 10:57:02 -07007195 /* smin_val represents the known value */
7196 if (known && smin_val == 0 && opcode == BPF_ADD)
7197 break;
Gustavo A. R. Silva87317452020-10-02 18:42:17 -05007198 fallthrough;
Joe Stringeraad2eea2018-10-02 13:35:30 -07007199 case PTR_TO_PACKET_END:
Joe Stringerc64b7982018-10-02 13:35:33 -07007200 case PTR_TO_SOCKET:
7201 case PTR_TO_SOCKET_OR_NULL:
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -08007202 case PTR_TO_SOCK_COMMON:
7203 case PTR_TO_SOCK_COMMON_OR_NULL:
Martin KaFai Lau655a51e2019-02-09 23:22:24 -08007204 case PTR_TO_TCP_SOCK:
7205 case PTR_TO_TCP_SOCK_OR_NULL:
Jonathan Lemonfada7fd2019-06-06 13:59:40 -07007206 case PTR_TO_XDP_SOCK:
Joe Stringeraad2eea2018-10-02 13:35:30 -07007207 verbose(env, "R%d pointer arithmetic on %s prohibited\n",
7208 dst, reg_type_str[ptr_reg->type]);
Edward Creef1174f72017-08-07 15:26:19 +01007209 return -EACCES;
Joe Stringeraad2eea2018-10-02 13:35:30 -07007210 default:
7211 break;
Edward Creef1174f72017-08-07 15:26:19 +01007212 }
7213
7214 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
7215 * The id may be overwritten later if we create a new variable offset.
Josef Bacik48461132016-09-28 10:54:32 -04007216 */
Edward Creef1174f72017-08-07 15:26:19 +01007217 dst_reg->type = ptr_reg->type;
7218 dst_reg->id = ptr_reg->id;
Josef Bacikf23cc642016-11-14 15:45:36 -05007219
Alexei Starovoitovbb7f0f92017-12-18 20:12:00 -08007220 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
7221 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
7222 return -EINVAL;
7223
John Fastabend3f50f132020-03-30 14:36:39 -07007224 /* pointer types do not carry 32-bit bounds at the moment. */
7225 __mark_reg32_unbounded(dst_reg);
7226
Daniel Borkmann7fedb632021-03-24 10:38:26 +01007227 if (sanitize_needed(opcode)) {
7228 ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
Daniel Borkmann3d0220f2021-05-21 10:17:36 +00007229 &info, false);
Daniel Borkmanna6aaece2021-03-23 09:30:01 +01007230 if (ret < 0)
7231 return sanitize_err(env, insn, ret, off_reg, dst_reg);
Daniel Borkmann7fedb632021-03-24 10:38:26 +01007232 }
Daniel Borkmanna6aaece2021-03-23 09:30:01 +01007233
Josef Bacik48461132016-09-28 10:54:32 -04007234 switch (opcode) {
7235 case BPF_ADD:
Edward Creef1174f72017-08-07 15:26:19 +01007236 /* We can take a fixed offset as long as it doesn't overflow
7237 * the s32 'off' field
7238 */
Edward Creeb03c9f92017-08-07 15:26:36 +01007239 if (known && (ptr_reg->off + smin_val ==
7240 (s64)(s32)(ptr_reg->off + smin_val))) {
Edward Creef1174f72017-08-07 15:26:19 +01007241 /* pointer += K. Accumulate it into fixed offset */
Edward Creeb03c9f92017-08-07 15:26:36 +01007242 dst_reg->smin_value = smin_ptr;
7243 dst_reg->smax_value = smax_ptr;
7244 dst_reg->umin_value = umin_ptr;
7245 dst_reg->umax_value = umax_ptr;
Edward Creef1174f72017-08-07 15:26:19 +01007246 dst_reg->var_off = ptr_reg->var_off;
Edward Creeb03c9f92017-08-07 15:26:36 +01007247 dst_reg->off = ptr_reg->off + smin_val;
Daniel Borkmann09625902018-11-01 00:05:52 +01007248 dst_reg->raw = ptr_reg->raw;
Edward Creef1174f72017-08-07 15:26:19 +01007249 break;
7250 }
Edward Creef1174f72017-08-07 15:26:19 +01007251 /* A new variable offset is created. Note that off_reg->off
7252 * == 0, since it's a scalar.
7253 * dst_reg gets the pointer type and since some positive
7254 * integer value was added to the pointer, give it a new 'id'
7255 * if it's a PTR_TO_PACKET.
7256 * this creates a new 'base' pointer, off_reg (variable) gets
7257 * added into the variable offset, and we copy the fixed offset
7258 * from ptr_reg.
7259 */
Edward Creeb03c9f92017-08-07 15:26:36 +01007260 if (signed_add_overflows(smin_ptr, smin_val) ||
7261 signed_add_overflows(smax_ptr, smax_val)) {
7262 dst_reg->smin_value = S64_MIN;
7263 dst_reg->smax_value = S64_MAX;
7264 } else {
7265 dst_reg->smin_value = smin_ptr + smin_val;
7266 dst_reg->smax_value = smax_ptr + smax_val;
7267 }
7268 if (umin_ptr + umin_val < umin_ptr ||
7269 umax_ptr + umax_val < umax_ptr) {
7270 dst_reg->umin_value = 0;
7271 dst_reg->umax_value = U64_MAX;
7272 } else {
7273 dst_reg->umin_value = umin_ptr + umin_val;
7274 dst_reg->umax_value = umax_ptr + umax_val;
7275 }
Edward Creef1174f72017-08-07 15:26:19 +01007276 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
7277 dst_reg->off = ptr_reg->off;
Daniel Borkmann09625902018-11-01 00:05:52 +01007278 dst_reg->raw = ptr_reg->raw;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02007279 if (reg_is_pkt_pointer(ptr_reg)) {
Edward Creef1174f72017-08-07 15:26:19 +01007280 dst_reg->id = ++env->id_gen;
7281 /* something was added to pkt_ptr, set range to zero */
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08007282 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
Edward Creef1174f72017-08-07 15:26:19 +01007283 }
Josef Bacik48461132016-09-28 10:54:32 -04007284 break;
7285 case BPF_SUB:
Edward Creef1174f72017-08-07 15:26:19 +01007286 if (dst_reg == off_reg) {
7287 /* scalar -= pointer. Creates an unknown scalar */
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08007288 verbose(env, "R%d tried to subtract pointer from scalar\n",
7289 dst);
Edward Creef1174f72017-08-07 15:26:19 +01007290 return -EACCES;
7291 }
7292 /* We don't allow subtraction from FP, because (according to
7293 * test_verifier.c test "invalid fp arithmetic", JITs might not
7294 * be able to deal with it.
Edward Cree93057062017-07-21 14:37:34 +01007295 */
Edward Creef1174f72017-08-07 15:26:19 +01007296 if (ptr_reg->type == PTR_TO_STACK) {
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08007297 verbose(env, "R%d subtraction from stack pointer prohibited\n",
7298 dst);
Edward Creef1174f72017-08-07 15:26:19 +01007299 return -EACCES;
7300 }
Edward Creeb03c9f92017-08-07 15:26:36 +01007301 if (known && (ptr_reg->off - smin_val ==
7302 (s64)(s32)(ptr_reg->off - smin_val))) {
Edward Creef1174f72017-08-07 15:26:19 +01007303 /* pointer -= K. Subtract it from fixed offset */
Edward Creeb03c9f92017-08-07 15:26:36 +01007304 dst_reg->smin_value = smin_ptr;
7305 dst_reg->smax_value = smax_ptr;
7306 dst_reg->umin_value = umin_ptr;
7307 dst_reg->umax_value = umax_ptr;
Edward Creef1174f72017-08-07 15:26:19 +01007308 dst_reg->var_off = ptr_reg->var_off;
7309 dst_reg->id = ptr_reg->id;
Edward Creeb03c9f92017-08-07 15:26:36 +01007310 dst_reg->off = ptr_reg->off - smin_val;
Daniel Borkmann09625902018-11-01 00:05:52 +01007311 dst_reg->raw = ptr_reg->raw;
Edward Creef1174f72017-08-07 15:26:19 +01007312 break;
7313 }
Edward Creef1174f72017-08-07 15:26:19 +01007314 /* A new variable offset is created. If the subtrahend is known
7315 * nonnegative, then any reg->range we had before is still good.
7316 */
Edward Creeb03c9f92017-08-07 15:26:36 +01007317 if (signed_sub_overflows(smin_ptr, smax_val) ||
7318 signed_sub_overflows(smax_ptr, smin_val)) {
7319 /* Overflow possible, we know nothing */
7320 dst_reg->smin_value = S64_MIN;
7321 dst_reg->smax_value = S64_MAX;
7322 } else {
7323 dst_reg->smin_value = smin_ptr - smax_val;
7324 dst_reg->smax_value = smax_ptr - smin_val;
7325 }
7326 if (umin_ptr < umax_val) {
7327 /* Overflow possible, we know nothing */
7328 dst_reg->umin_value = 0;
7329 dst_reg->umax_value = U64_MAX;
7330 } else {
7331 /* Cannot overflow (as long as bounds are consistent) */
7332 dst_reg->umin_value = umin_ptr - umax_val;
7333 dst_reg->umax_value = umax_ptr - umin_val;
7334 }
Edward Creef1174f72017-08-07 15:26:19 +01007335 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
7336 dst_reg->off = ptr_reg->off;
Daniel Borkmann09625902018-11-01 00:05:52 +01007337 dst_reg->raw = ptr_reg->raw;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02007338 if (reg_is_pkt_pointer(ptr_reg)) {
Edward Creef1174f72017-08-07 15:26:19 +01007339 dst_reg->id = ++env->id_gen;
7340 /* something was added to pkt_ptr, set range to zero */
Edward Creeb03c9f92017-08-07 15:26:36 +01007341 if (smin_val < 0)
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08007342 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
Edward Creef1174f72017-08-07 15:26:19 +01007343 }
7344 break;
7345 case BPF_AND:
7346 case BPF_OR:
7347 case BPF_XOR:
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08007348 /* bitwise ops on pointers are troublesome, prohibit. */
7349 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
7350 dst, bpf_alu_string[opcode >> 4]);
Edward Creef1174f72017-08-07 15:26:19 +01007351 return -EACCES;
7352 default:
7353 /* other operators (e.g. MUL,LSH) produce non-pointer results */
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08007354 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
7355 dst, bpf_alu_string[opcode >> 4]);
Edward Creef1174f72017-08-07 15:26:19 +01007356 return -EACCES;
7357 }
7358
Alexei Starovoitovbb7f0f92017-12-18 20:12:00 -08007359 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
7360 return -EINVAL;
7361
Edward Creeb03c9f92017-08-07 15:26:36 +01007362 __update_reg_bounds(dst_reg);
7363 __reg_deduce_bounds(dst_reg);
7364 __reg_bound_offset(dst_reg);
Daniel Borkmann0d6303d2019-01-03 00:58:30 +01007365
Daniel Borkmann073815b2021-03-23 15:05:48 +01007366 if (sanitize_check_bounds(env, insn, dst_reg) < 0)
7367 return -EACCES;
Daniel Borkmann7fedb632021-03-24 10:38:26 +01007368 if (sanitize_needed(opcode)) {
7369 ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
Daniel Borkmann3d0220f2021-05-21 10:17:36 +00007370 &info, true);
Daniel Borkmann7fedb632021-03-24 10:38:26 +01007371 if (ret < 0)
7372 return sanitize_err(env, insn, ret, off_reg, dst_reg);
Daniel Borkmann0d6303d2019-01-03 00:58:30 +01007373 }
7374
Edward Creef1174f72017-08-07 15:26:19 +01007375 return 0;
7376}
7377
John Fastabend3f50f132020-03-30 14:36:39 -07007378static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
7379 struct bpf_reg_state *src_reg)
7380{
7381 s32 smin_val = src_reg->s32_min_value;
7382 s32 smax_val = src_reg->s32_max_value;
7383 u32 umin_val = src_reg->u32_min_value;
7384 u32 umax_val = src_reg->u32_max_value;
7385
7386 if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
7387 signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
7388 dst_reg->s32_min_value = S32_MIN;
7389 dst_reg->s32_max_value = S32_MAX;
7390 } else {
7391 dst_reg->s32_min_value += smin_val;
7392 dst_reg->s32_max_value += smax_val;
7393 }
7394 if (dst_reg->u32_min_value + umin_val < umin_val ||
7395 dst_reg->u32_max_value + umax_val < umax_val) {
7396 dst_reg->u32_min_value = 0;
7397 dst_reg->u32_max_value = U32_MAX;
7398 } else {
7399 dst_reg->u32_min_value += umin_val;
7400 dst_reg->u32_max_value += umax_val;
7401 }
7402}
7403
John Fastabend07cd2632020-03-24 10:38:15 -07007404static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
7405 struct bpf_reg_state *src_reg)
7406{
7407 s64 smin_val = src_reg->smin_value;
7408 s64 smax_val = src_reg->smax_value;
7409 u64 umin_val = src_reg->umin_value;
7410 u64 umax_val = src_reg->umax_value;
7411
7412 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
7413 signed_add_overflows(dst_reg->smax_value, smax_val)) {
7414 dst_reg->smin_value = S64_MIN;
7415 dst_reg->smax_value = S64_MAX;
7416 } else {
7417 dst_reg->smin_value += smin_val;
7418 dst_reg->smax_value += smax_val;
7419 }
7420 if (dst_reg->umin_value + umin_val < umin_val ||
7421 dst_reg->umax_value + umax_val < umax_val) {
7422 dst_reg->umin_value = 0;
7423 dst_reg->umax_value = U64_MAX;
7424 } else {
7425 dst_reg->umin_value += umin_val;
7426 dst_reg->umax_value += umax_val;
7427 }
John Fastabend3f50f132020-03-30 14:36:39 -07007428}
7429
7430static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
7431 struct bpf_reg_state *src_reg)
7432{
7433 s32 smin_val = src_reg->s32_min_value;
7434 s32 smax_val = src_reg->s32_max_value;
7435 u32 umin_val = src_reg->u32_min_value;
7436 u32 umax_val = src_reg->u32_max_value;
7437
7438 if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
7439 signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
7440 /* Overflow possible, we know nothing */
7441 dst_reg->s32_min_value = S32_MIN;
7442 dst_reg->s32_max_value = S32_MAX;
7443 } else {
7444 dst_reg->s32_min_value -= smax_val;
7445 dst_reg->s32_max_value -= smin_val;
7446 }
7447 if (dst_reg->u32_min_value < umax_val) {
7448 /* Overflow possible, we know nothing */
7449 dst_reg->u32_min_value = 0;
7450 dst_reg->u32_max_value = U32_MAX;
7451 } else {
7452 /* Cannot overflow (as long as bounds are consistent) */
7453 dst_reg->u32_min_value -= umax_val;
7454 dst_reg->u32_max_value -= umin_val;
7455 }
John Fastabend07cd2632020-03-24 10:38:15 -07007456}
7457
7458static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
7459 struct bpf_reg_state *src_reg)
7460{
7461 s64 smin_val = src_reg->smin_value;
7462 s64 smax_val = src_reg->smax_value;
7463 u64 umin_val = src_reg->umin_value;
7464 u64 umax_val = src_reg->umax_value;
7465
7466 if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
7467 signed_sub_overflows(dst_reg->smax_value, smin_val)) {
7468 /* Overflow possible, we know nothing */
7469 dst_reg->smin_value = S64_MIN;
7470 dst_reg->smax_value = S64_MAX;
7471 } else {
7472 dst_reg->smin_value -= smax_val;
7473 dst_reg->smax_value -= smin_val;
7474 }
7475 if (dst_reg->umin_value < umax_val) {
7476 /* Overflow possible, we know nothing */
7477 dst_reg->umin_value = 0;
7478 dst_reg->umax_value = U64_MAX;
7479 } else {
7480 /* Cannot overflow (as long as bounds are consistent) */
7481 dst_reg->umin_value -= umax_val;
7482 dst_reg->umax_value -= umin_val;
7483 }
John Fastabend3f50f132020-03-30 14:36:39 -07007484}
7485
7486static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
7487 struct bpf_reg_state *src_reg)
7488{
7489 s32 smin_val = src_reg->s32_min_value;
7490 u32 umin_val = src_reg->u32_min_value;
7491 u32 umax_val = src_reg->u32_max_value;
7492
7493 if (smin_val < 0 || dst_reg->s32_min_value < 0) {
7494 /* Ain't nobody got time to multiply that sign */
7495 __mark_reg32_unbounded(dst_reg);
7496 return;
7497 }
7498 /* Both values are positive, so we can work with unsigned and
7499 * copy the result to signed (unless it exceeds S32_MAX).
7500 */
7501 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
7502 /* Potential overflow, we know nothing */
7503 __mark_reg32_unbounded(dst_reg);
7504 return;
7505 }
7506 dst_reg->u32_min_value *= umin_val;
7507 dst_reg->u32_max_value *= umax_val;
7508 if (dst_reg->u32_max_value > S32_MAX) {
7509 /* Overflow possible, we know nothing */
7510 dst_reg->s32_min_value = S32_MIN;
7511 dst_reg->s32_max_value = S32_MAX;
7512 } else {
7513 dst_reg->s32_min_value = dst_reg->u32_min_value;
7514 dst_reg->s32_max_value = dst_reg->u32_max_value;
7515 }
John Fastabend07cd2632020-03-24 10:38:15 -07007516}
7517
7518static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
7519 struct bpf_reg_state *src_reg)
7520{
7521 s64 smin_val = src_reg->smin_value;
7522 u64 umin_val = src_reg->umin_value;
7523 u64 umax_val = src_reg->umax_value;
7524
John Fastabend07cd2632020-03-24 10:38:15 -07007525 if (smin_val < 0 || dst_reg->smin_value < 0) {
7526 /* Ain't nobody got time to multiply that sign */
John Fastabend3f50f132020-03-30 14:36:39 -07007527 __mark_reg64_unbounded(dst_reg);
John Fastabend07cd2632020-03-24 10:38:15 -07007528 return;
7529 }
7530 /* Both values are positive, so we can work with unsigned and
7531 * copy the result to signed (unless it exceeds S64_MAX).
7532 */
7533 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
7534 /* Potential overflow, we know nothing */
John Fastabend3f50f132020-03-30 14:36:39 -07007535 __mark_reg64_unbounded(dst_reg);
John Fastabend07cd2632020-03-24 10:38:15 -07007536 return;
7537 }
7538 dst_reg->umin_value *= umin_val;
7539 dst_reg->umax_value *= umax_val;
7540 if (dst_reg->umax_value > S64_MAX) {
7541 /* Overflow possible, we know nothing */
7542 dst_reg->smin_value = S64_MIN;
7543 dst_reg->smax_value = S64_MAX;
7544 } else {
7545 dst_reg->smin_value = dst_reg->umin_value;
7546 dst_reg->smax_value = dst_reg->umax_value;
7547 }
7548}
7549
John Fastabend3f50f132020-03-30 14:36:39 -07007550static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
7551 struct bpf_reg_state *src_reg)
John Fastabend07cd2632020-03-24 10:38:15 -07007552{
John Fastabend3f50f132020-03-30 14:36:39 -07007553 bool src_known = tnum_subreg_is_const(src_reg->var_off);
7554 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
7555 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
7556 s32 smin_val = src_reg->s32_min_value;
7557 u32 umax_val = src_reg->u32_max_value;
7558
Daniel Borkmann049c4e12021-05-10 13:10:44 +00007559 if (src_known && dst_known) {
7560 __mark_reg32_known(dst_reg, var32_off.value);
John Fastabend3f50f132020-03-30 14:36:39 -07007561 return;
Daniel Borkmann049c4e12021-05-10 13:10:44 +00007562 }
John Fastabend07cd2632020-03-24 10:38:15 -07007563
7564 /* We get our minimum from the var_off, since that's inherently
7565 * bitwise. Our maximum is the minimum of the operands' maxima.
7566 */
John Fastabend3f50f132020-03-30 14:36:39 -07007567 dst_reg->u32_min_value = var32_off.value;
7568 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
7569 if (dst_reg->s32_min_value < 0 || smin_val < 0) {
7570 /* Lose signed bounds when ANDing negative numbers,
7571 * ain't nobody got time for that.
7572 */
7573 dst_reg->s32_min_value = S32_MIN;
7574 dst_reg->s32_max_value = S32_MAX;
7575 } else {
7576 /* ANDing two positives gives a positive, so safe to
7577 * cast result into s64.
7578 */
7579 dst_reg->s32_min_value = dst_reg->u32_min_value;
7580 dst_reg->s32_max_value = dst_reg->u32_max_value;
7581 }
John Fastabend3f50f132020-03-30 14:36:39 -07007582}
7583
7584static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
7585 struct bpf_reg_state *src_reg)
7586{
7587 bool src_known = tnum_is_const(src_reg->var_off);
7588 bool dst_known = tnum_is_const(dst_reg->var_off);
7589 s64 smin_val = src_reg->smin_value;
7590 u64 umax_val = src_reg->umax_value;
7591
7592 if (src_known && dst_known) {
John Fastabend4fbb38a2020-09-24 11:45:06 -07007593 __mark_reg_known(dst_reg, dst_reg->var_off.value);
John Fastabend3f50f132020-03-30 14:36:39 -07007594 return;
7595 }
7596
7597 /* We get our minimum from the var_off, since that's inherently
7598 * bitwise. Our maximum is the minimum of the operands' maxima.
7599 */
John Fastabend07cd2632020-03-24 10:38:15 -07007600 dst_reg->umin_value = dst_reg->var_off.value;
7601 dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
7602 if (dst_reg->smin_value < 0 || smin_val < 0) {
7603 /* Lose signed bounds when ANDing negative numbers,
7604 * ain't nobody got time for that.
7605 */
7606 dst_reg->smin_value = S64_MIN;
7607 dst_reg->smax_value = S64_MAX;
7608 } else {
7609 /* ANDing two positives gives a positive, so safe to
7610 * cast result into s64.
7611 */
7612 dst_reg->smin_value = dst_reg->umin_value;
7613 dst_reg->smax_value = dst_reg->umax_value;
7614 }
7615 /* We may learn something more from the var_off */
7616 __update_reg_bounds(dst_reg);
7617}
7618
John Fastabend3f50f132020-03-30 14:36:39 -07007619static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
7620 struct bpf_reg_state *src_reg)
John Fastabend07cd2632020-03-24 10:38:15 -07007621{
John Fastabend3f50f132020-03-30 14:36:39 -07007622 bool src_known = tnum_subreg_is_const(src_reg->var_off);
7623 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
7624 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
Daniel Borkmann5b9fbeb2020-10-07 15:48:58 +02007625 s32 smin_val = src_reg->s32_min_value;
7626 u32 umin_val = src_reg->u32_min_value;
John Fastabend3f50f132020-03-30 14:36:39 -07007627
Daniel Borkmann049c4e12021-05-10 13:10:44 +00007628 if (src_known && dst_known) {
7629 __mark_reg32_known(dst_reg, var32_off.value);
John Fastabend3f50f132020-03-30 14:36:39 -07007630 return;
Daniel Borkmann049c4e12021-05-10 13:10:44 +00007631 }
John Fastabend07cd2632020-03-24 10:38:15 -07007632
7633 /* We get our maximum from the var_off, and our minimum is the
7634 * maximum of the operands' minima
7635 */
John Fastabend3f50f132020-03-30 14:36:39 -07007636 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
7637 dst_reg->u32_max_value = var32_off.value | var32_off.mask;
7638 if (dst_reg->s32_min_value < 0 || smin_val < 0) {
7639 /* Lose signed bounds when ORing negative numbers,
7640 * ain't nobody got time for that.
7641 */
7642 dst_reg->s32_min_value = S32_MIN;
7643 dst_reg->s32_max_value = S32_MAX;
7644 } else {
7645 /* ORing two positives gives a positive, so safe to
7646 * cast result into s64.
7647 */
Daniel Borkmann5b9fbeb2020-10-07 15:48:58 +02007648 dst_reg->s32_min_value = dst_reg->u32_min_value;
7649 dst_reg->s32_max_value = dst_reg->u32_max_value;
John Fastabend3f50f132020-03-30 14:36:39 -07007650 }
7651}
7652
7653static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
7654 struct bpf_reg_state *src_reg)
7655{
7656 bool src_known = tnum_is_const(src_reg->var_off);
7657 bool dst_known = tnum_is_const(dst_reg->var_off);
7658 s64 smin_val = src_reg->smin_value;
7659 u64 umin_val = src_reg->umin_value;
7660
7661 if (src_known && dst_known) {
John Fastabend4fbb38a2020-09-24 11:45:06 -07007662 __mark_reg_known(dst_reg, dst_reg->var_off.value);
John Fastabend3f50f132020-03-30 14:36:39 -07007663 return;
7664 }
7665
7666 /* We get our maximum from the var_off, and our minimum is the
7667 * maximum of the operands' minima
7668 */
John Fastabend07cd2632020-03-24 10:38:15 -07007669 dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
7670 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
7671 if (dst_reg->smin_value < 0 || smin_val < 0) {
7672 /* Lose signed bounds when ORing negative numbers,
7673 * ain't nobody got time for that.
7674 */
7675 dst_reg->smin_value = S64_MIN;
7676 dst_reg->smax_value = S64_MAX;
7677 } else {
7678 /* ORing two positives gives a positive, so safe to
7679 * cast result into s64.
7680 */
7681 dst_reg->smin_value = dst_reg->umin_value;
7682 dst_reg->smax_value = dst_reg->umax_value;
7683 }
7684 /* We may learn something more from the var_off */
7685 __update_reg_bounds(dst_reg);
7686}
7687
Yonghong Song2921c902020-08-24 23:46:08 -07007688static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
7689 struct bpf_reg_state *src_reg)
7690{
7691 bool src_known = tnum_subreg_is_const(src_reg->var_off);
7692 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
7693 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
7694 s32 smin_val = src_reg->s32_min_value;
7695
Daniel Borkmann049c4e12021-05-10 13:10:44 +00007696 if (src_known && dst_known) {
7697 __mark_reg32_known(dst_reg, var32_off.value);
Yonghong Song2921c902020-08-24 23:46:08 -07007698 return;
Daniel Borkmann049c4e12021-05-10 13:10:44 +00007699 }
Yonghong Song2921c902020-08-24 23:46:08 -07007700
7701 /* We get both minimum and maximum from the var32_off. */
7702 dst_reg->u32_min_value = var32_off.value;
7703 dst_reg->u32_max_value = var32_off.value | var32_off.mask;
7704
7705 if (dst_reg->s32_min_value >= 0 && smin_val >= 0) {
7706 /* XORing two positive sign numbers gives a positive,
7707 * so safe to cast u32 result into s32.
7708 */
7709 dst_reg->s32_min_value = dst_reg->u32_min_value;
7710 dst_reg->s32_max_value = dst_reg->u32_max_value;
7711 } else {
7712 dst_reg->s32_min_value = S32_MIN;
7713 dst_reg->s32_max_value = S32_MAX;
7714 }
7715}
7716
7717static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
7718 struct bpf_reg_state *src_reg)
7719{
7720 bool src_known = tnum_is_const(src_reg->var_off);
7721 bool dst_known = tnum_is_const(dst_reg->var_off);
7722 s64 smin_val = src_reg->smin_value;
7723
7724 if (src_known && dst_known) {
7725 /* dst_reg->var_off.value has been updated earlier */
7726 __mark_reg_known(dst_reg, dst_reg->var_off.value);
7727 return;
7728 }
7729
7730 /* We get both minimum and maximum from the var_off. */
7731 dst_reg->umin_value = dst_reg->var_off.value;
7732 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
7733
7734 if (dst_reg->smin_value >= 0 && smin_val >= 0) {
7735 /* XORing two positive sign numbers gives a positive,
7736 * so safe to cast u64 result into s64.
7737 */
7738 dst_reg->smin_value = dst_reg->umin_value;
7739 dst_reg->smax_value = dst_reg->umax_value;
7740 } else {
7741 dst_reg->smin_value = S64_MIN;
7742 dst_reg->smax_value = S64_MAX;
7743 }
7744
7745 __update_reg_bounds(dst_reg);
7746}
7747
John Fastabend3f50f132020-03-30 14:36:39 -07007748static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
7749 u64 umin_val, u64 umax_val)
John Fastabend07cd2632020-03-24 10:38:15 -07007750{
John Fastabend07cd2632020-03-24 10:38:15 -07007751 /* We lose all sign bit information (except what we can pick
7752 * up from var_off)
7753 */
John Fastabend3f50f132020-03-30 14:36:39 -07007754 dst_reg->s32_min_value = S32_MIN;
7755 dst_reg->s32_max_value = S32_MAX;
7756 /* If we might shift our top bit out, then we know nothing */
7757 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
7758 dst_reg->u32_min_value = 0;
7759 dst_reg->u32_max_value = U32_MAX;
7760 } else {
7761 dst_reg->u32_min_value <<= umin_val;
7762 dst_reg->u32_max_value <<= umax_val;
7763 }
7764}
7765
7766static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
7767 struct bpf_reg_state *src_reg)
7768{
7769 u32 umax_val = src_reg->u32_max_value;
7770 u32 umin_val = src_reg->u32_min_value;
7771 /* u32 alu operation will zext upper bits */
7772 struct tnum subreg = tnum_subreg(dst_reg->var_off);
7773
7774 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
7775 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
7776 /* Not required but being careful mark reg64 bounds as unknown so
7777 * that we are forced to pick them up from tnum and zext later and
7778 * if some path skips this step we are still safe.
7779 */
7780 __mark_reg64_unbounded(dst_reg);
7781 __update_reg32_bounds(dst_reg);
7782}
7783
7784static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
7785 u64 umin_val, u64 umax_val)
7786{
7787 /* Special case <<32 because it is a common compiler pattern to sign
7788 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
7789 * positive we know this shift will also be positive so we can track
7790 * bounds correctly. Otherwise we lose all sign bit information except
7791 * what we can pick up from var_off. Perhaps we can generalize this
7792 * later to shifts of any length.
7793 */
7794 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
7795 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
7796 else
7797 dst_reg->smax_value = S64_MAX;
7798
7799 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
7800 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
7801 else
7802 dst_reg->smin_value = S64_MIN;
7803
John Fastabend07cd2632020-03-24 10:38:15 -07007804 /* If we might shift our top bit out, then we know nothing */
7805 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
7806 dst_reg->umin_value = 0;
7807 dst_reg->umax_value = U64_MAX;
7808 } else {
7809 dst_reg->umin_value <<= umin_val;
7810 dst_reg->umax_value <<= umax_val;
7811 }
John Fastabend3f50f132020-03-30 14:36:39 -07007812}
7813
7814static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
7815 struct bpf_reg_state *src_reg)
7816{
7817 u64 umax_val = src_reg->umax_value;
7818 u64 umin_val = src_reg->umin_value;
7819
7820 /* scalar64 calc uses 32bit unshifted bounds so must be called first */
7821 __scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
7822 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
7823
John Fastabend07cd2632020-03-24 10:38:15 -07007824 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
7825 /* We may learn something more from the var_off */
7826 __update_reg_bounds(dst_reg);
7827}
7828
John Fastabend3f50f132020-03-30 14:36:39 -07007829static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
7830 struct bpf_reg_state *src_reg)
7831{
7832 struct tnum subreg = tnum_subreg(dst_reg->var_off);
7833 u32 umax_val = src_reg->u32_max_value;
7834 u32 umin_val = src_reg->u32_min_value;
7835
7836 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
7837 * be negative, then either:
7838 * 1) src_reg might be zero, so the sign bit of the result is
7839 * unknown, so we lose our signed bounds
7840 * 2) it's known negative, thus the unsigned bounds capture the
7841 * signed bounds
7842 * 3) the signed bounds cross zero, so they tell us nothing
7843 * about the result
7844 * If the value in dst_reg is known nonnegative, then again the
Tobias Klauser18b24d72021-01-21 18:43:24 +01007845 * unsigned bounds capture the signed bounds.
John Fastabend3f50f132020-03-30 14:36:39 -07007846 * Thus, in all cases it suffices to blow away our signed bounds
7847 * and rely on inferring new ones from the unsigned bounds and
7848 * var_off of the result.
7849 */
7850 dst_reg->s32_min_value = S32_MIN;
7851 dst_reg->s32_max_value = S32_MAX;
7852
7853 dst_reg->var_off = tnum_rshift(subreg, umin_val);
7854 dst_reg->u32_min_value >>= umax_val;
7855 dst_reg->u32_max_value >>= umin_val;
7856
7857 __mark_reg64_unbounded(dst_reg);
7858 __update_reg32_bounds(dst_reg);
7859}
7860
John Fastabend07cd2632020-03-24 10:38:15 -07007861static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
7862 struct bpf_reg_state *src_reg)
7863{
7864 u64 umax_val = src_reg->umax_value;
7865 u64 umin_val = src_reg->umin_value;
7866
7867 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
7868 * be negative, then either:
7869 * 1) src_reg might be zero, so the sign bit of the result is
7870 * unknown, so we lose our signed bounds
7871 * 2) it's known negative, thus the unsigned bounds capture the
7872 * signed bounds
7873 * 3) the signed bounds cross zero, so they tell us nothing
7874 * about the result
7875 * If the value in dst_reg is known nonnegative, then again the
Tobias Klauser18b24d72021-01-21 18:43:24 +01007876 * unsigned bounds capture the signed bounds.
John Fastabend07cd2632020-03-24 10:38:15 -07007877 * Thus, in all cases it suffices to blow away our signed bounds
7878 * and rely on inferring new ones from the unsigned bounds and
7879 * var_off of the result.
7880 */
7881 dst_reg->smin_value = S64_MIN;
7882 dst_reg->smax_value = S64_MAX;
7883 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
7884 dst_reg->umin_value >>= umax_val;
7885 dst_reg->umax_value >>= umin_val;
John Fastabend3f50f132020-03-30 14:36:39 -07007886
7887 /* Its not easy to operate on alu32 bounds here because it depends
7888 * on bits being shifted in. Take easy way out and mark unbounded
7889 * so we can recalculate later from tnum.
7890 */
7891 __mark_reg32_unbounded(dst_reg);
John Fastabend07cd2632020-03-24 10:38:15 -07007892 __update_reg_bounds(dst_reg);
7893}
7894
John Fastabend3f50f132020-03-30 14:36:39 -07007895static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
7896 struct bpf_reg_state *src_reg)
John Fastabend07cd2632020-03-24 10:38:15 -07007897{
John Fastabend3f50f132020-03-30 14:36:39 -07007898 u64 umin_val = src_reg->u32_min_value;
John Fastabend07cd2632020-03-24 10:38:15 -07007899
7900 /* Upon reaching here, src_known is true and
7901 * umax_val is equal to umin_val.
7902 */
John Fastabend3f50f132020-03-30 14:36:39 -07007903 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
7904 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
John Fastabend07cd2632020-03-24 10:38:15 -07007905
John Fastabend3f50f132020-03-30 14:36:39 -07007906 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
7907
7908 /* blow away the dst_reg umin_value/umax_value and rely on
7909 * dst_reg var_off to refine the result.
7910 */
7911 dst_reg->u32_min_value = 0;
7912 dst_reg->u32_max_value = U32_MAX;
7913
7914 __mark_reg64_unbounded(dst_reg);
7915 __update_reg32_bounds(dst_reg);
7916}
7917
7918static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
7919 struct bpf_reg_state *src_reg)
7920{
7921 u64 umin_val = src_reg->umin_value;
7922
7923 /* Upon reaching here, src_known is true and umax_val is equal
7924 * to umin_val.
7925 */
7926 dst_reg->smin_value >>= umin_val;
7927 dst_reg->smax_value >>= umin_val;
7928
7929 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
John Fastabend07cd2632020-03-24 10:38:15 -07007930
7931 /* blow away the dst_reg umin_value/umax_value and rely on
7932 * dst_reg var_off to refine the result.
7933 */
7934 dst_reg->umin_value = 0;
7935 dst_reg->umax_value = U64_MAX;
John Fastabend3f50f132020-03-30 14:36:39 -07007936
7937 /* Its not easy to operate on alu32 bounds here because it depends
7938 * on bits being shifted in from upper 32-bits. Take easy way out
7939 * and mark unbounded so we can recalculate later from tnum.
7940 */
7941 __mark_reg32_unbounded(dst_reg);
John Fastabend07cd2632020-03-24 10:38:15 -07007942 __update_reg_bounds(dst_reg);
7943}
7944
Jann Horn468f6ea2017-12-18 20:11:56 -08007945/* WARNING: This function does calculations on 64-bit values, but the actual
7946 * execution may occur on 32-bit values. Therefore, things like bitshifts
7947 * need extra checks in the 32-bit case.
7948 */
Edward Creef1174f72017-08-07 15:26:19 +01007949static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
7950 struct bpf_insn *insn,
7951 struct bpf_reg_state *dst_reg,
7952 struct bpf_reg_state src_reg)
7953{
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07007954 struct bpf_reg_state *regs = cur_regs(env);
Edward Creef1174f72017-08-07 15:26:19 +01007955 u8 opcode = BPF_OP(insn->code);
Mao Wenanb0b3fb62020-04-18 09:37:35 +08007956 bool src_known;
Edward Creeb03c9f92017-08-07 15:26:36 +01007957 s64 smin_val, smax_val;
7958 u64 umin_val, umax_val;
John Fastabend3f50f132020-03-30 14:36:39 -07007959 s32 s32_min_val, s32_max_val;
7960 u32 u32_min_val, u32_max_val;
Jann Horn468f6ea2017-12-18 20:11:56 -08007961 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
John Fastabend3f50f132020-03-30 14:36:39 -07007962 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
Daniel Borkmanna6aaece2021-03-23 09:30:01 +01007963 int ret;
Jann Hornb7992072018-10-05 18:17:59 +02007964
Edward Creeb03c9f92017-08-07 15:26:36 +01007965 smin_val = src_reg.smin_value;
7966 smax_val = src_reg.smax_value;
7967 umin_val = src_reg.umin_value;
7968 umax_val = src_reg.umax_value;
Edward Creef1174f72017-08-07 15:26:19 +01007969
John Fastabend3f50f132020-03-30 14:36:39 -07007970 s32_min_val = src_reg.s32_min_value;
7971 s32_max_val = src_reg.s32_max_value;
7972 u32_min_val = src_reg.u32_min_value;
7973 u32_max_val = src_reg.u32_max_value;
7974
7975 if (alu32) {
7976 src_known = tnum_subreg_is_const(src_reg.var_off);
John Fastabend3f50f132020-03-30 14:36:39 -07007977 if ((src_known &&
7978 (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
7979 s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
7980 /* Taint dst register if offset had invalid bounds
7981 * derived from e.g. dead branches.
7982 */
7983 __mark_reg_unknown(env, dst_reg);
7984 return 0;
7985 }
7986 } else {
7987 src_known = tnum_is_const(src_reg.var_off);
John Fastabend3f50f132020-03-30 14:36:39 -07007988 if ((src_known &&
7989 (smin_val != smax_val || umin_val != umax_val)) ||
7990 smin_val > smax_val || umin_val > umax_val) {
7991 /* Taint dst register if offset had invalid bounds
7992 * derived from e.g. dead branches.
7993 */
7994 __mark_reg_unknown(env, dst_reg);
7995 return 0;
7996 }
Daniel Borkmann6f161012018-01-18 01:15:21 +01007997 }
7998
Alexei Starovoitovbb7f0f92017-12-18 20:12:00 -08007999 if (!src_known &&
8000 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
Daniel Borkmannf54c7892019-12-22 23:37:40 +01008001 __mark_reg_unknown(env, dst_reg);
Alexei Starovoitovbb7f0f92017-12-18 20:12:00 -08008002 return 0;
8003 }
8004
Daniel Borkmannf5288192021-03-24 11:25:39 +01008005 if (sanitize_needed(opcode)) {
8006 ret = sanitize_val_alu(env, insn);
8007 if (ret < 0)
8008 return sanitize_err(env, insn, ret, NULL, NULL);
8009 }
8010
John Fastabend3f50f132020-03-30 14:36:39 -07008011 /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
8012 * There are two classes of instructions: The first class we track both
8013 * alu32 and alu64 sign/unsigned bounds independently this provides the
8014 * greatest amount of precision when alu operations are mixed with jmp32
8015 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD,
8016 * and BPF_OR. This is possible because these ops have fairly easy to
8017 * understand and calculate behavior in both 32-bit and 64-bit alu ops.
8018 * See alu32 verifier tests for examples. The second class of
8019 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
8020 * with regards to tracking sign/unsigned bounds because the bits may
8021 * cross subreg boundaries in the alu64 case. When this happens we mark
8022 * the reg unbounded in the subreg bound space and use the resulting
8023 * tnum to calculate an approximation of the sign/unsigned bounds.
8024 */
Edward Creef1174f72017-08-07 15:26:19 +01008025 switch (opcode) {
8026 case BPF_ADD:
John Fastabend3f50f132020-03-30 14:36:39 -07008027 scalar32_min_max_add(dst_reg, &src_reg);
John Fastabend07cd2632020-03-24 10:38:15 -07008028 scalar_min_max_add(dst_reg, &src_reg);
John Fastabend3f50f132020-03-30 14:36:39 -07008029 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
Edward Creef1174f72017-08-07 15:26:19 +01008030 break;
8031 case BPF_SUB:
John Fastabend3f50f132020-03-30 14:36:39 -07008032 scalar32_min_max_sub(dst_reg, &src_reg);
John Fastabend07cd2632020-03-24 10:38:15 -07008033 scalar_min_max_sub(dst_reg, &src_reg);
John Fastabend3f50f132020-03-30 14:36:39 -07008034 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
Josef Bacik48461132016-09-28 10:54:32 -04008035 break;
8036 case BPF_MUL:
John Fastabend3f50f132020-03-30 14:36:39 -07008037 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
8038 scalar32_min_max_mul(dst_reg, &src_reg);
John Fastabend07cd2632020-03-24 10:38:15 -07008039 scalar_min_max_mul(dst_reg, &src_reg);
Josef Bacik48461132016-09-28 10:54:32 -04008040 break;
8041 case BPF_AND:
John Fastabend3f50f132020-03-30 14:36:39 -07008042 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
8043 scalar32_min_max_and(dst_reg, &src_reg);
John Fastabend07cd2632020-03-24 10:38:15 -07008044 scalar_min_max_and(dst_reg, &src_reg);
Edward Creef1174f72017-08-07 15:26:19 +01008045 break;
8046 case BPF_OR:
John Fastabend3f50f132020-03-30 14:36:39 -07008047 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
8048 scalar32_min_max_or(dst_reg, &src_reg);
John Fastabend07cd2632020-03-24 10:38:15 -07008049 scalar_min_max_or(dst_reg, &src_reg);
Josef Bacik48461132016-09-28 10:54:32 -04008050 break;
Yonghong Song2921c902020-08-24 23:46:08 -07008051 case BPF_XOR:
8052 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off);
8053 scalar32_min_max_xor(dst_reg, &src_reg);
8054 scalar_min_max_xor(dst_reg, &src_reg);
8055 break;
Josef Bacik48461132016-09-28 10:54:32 -04008056 case BPF_LSH:
Jann Horn468f6ea2017-12-18 20:11:56 -08008057 if (umax_val >= insn_bitness) {
8058 /* Shifts greater than 31 or 63 are undefined.
8059 * This includes shifts by a negative number.
Edward Creeb03c9f92017-08-07 15:26:36 +01008060 */
Jakub Kicinski61bd5212017-10-09 10:30:11 -07008061 mark_reg_unknown(env, regs, insn->dst_reg);
Edward Creef1174f72017-08-07 15:26:19 +01008062 break;
8063 }
John Fastabend3f50f132020-03-30 14:36:39 -07008064 if (alu32)
8065 scalar32_min_max_lsh(dst_reg, &src_reg);
8066 else
8067 scalar_min_max_lsh(dst_reg, &src_reg);
Josef Bacik48461132016-09-28 10:54:32 -04008068 break;
8069 case BPF_RSH:
Jann Horn468f6ea2017-12-18 20:11:56 -08008070 if (umax_val >= insn_bitness) {
8071 /* Shifts greater than 31 or 63 are undefined.
8072 * This includes shifts by a negative number.
Edward Creeb03c9f92017-08-07 15:26:36 +01008073 */
Jakub Kicinski61bd5212017-10-09 10:30:11 -07008074 mark_reg_unknown(env, regs, insn->dst_reg);
Edward Creef1174f72017-08-07 15:26:19 +01008075 break;
8076 }
John Fastabend3f50f132020-03-30 14:36:39 -07008077 if (alu32)
8078 scalar32_min_max_rsh(dst_reg, &src_reg);
8079 else
8080 scalar_min_max_rsh(dst_reg, &src_reg);
Josef Bacik48461132016-09-28 10:54:32 -04008081 break;
Yonghong Song9cbe1f5a2018-04-28 22:28:11 -07008082 case BPF_ARSH:
8083 if (umax_val >= insn_bitness) {
8084 /* Shifts greater than 31 or 63 are undefined.
8085 * This includes shifts by a negative number.
8086 */
8087 mark_reg_unknown(env, regs, insn->dst_reg);
8088 break;
8089 }
John Fastabend3f50f132020-03-30 14:36:39 -07008090 if (alu32)
8091 scalar32_min_max_arsh(dst_reg, &src_reg);
8092 else
8093 scalar_min_max_arsh(dst_reg, &src_reg);
Yonghong Song9cbe1f5a2018-04-28 22:28:11 -07008094 break;
Josef Bacik48461132016-09-28 10:54:32 -04008095 default:
Jakub Kicinski61bd5212017-10-09 10:30:11 -07008096 mark_reg_unknown(env, regs, insn->dst_reg);
Josef Bacik48461132016-09-28 10:54:32 -04008097 break;
8098 }
8099
John Fastabend3f50f132020-03-30 14:36:39 -07008100 /* ALU32 ops are zero extended into 64bit register */
8101 if (alu32)
8102 zext_32_to_64(dst_reg);
Jann Horn468f6ea2017-12-18 20:11:56 -08008103
John Fastabend294f2fc2020-03-24 10:38:37 -07008104 __update_reg_bounds(dst_reg);
Edward Creeb03c9f92017-08-07 15:26:36 +01008105 __reg_deduce_bounds(dst_reg);
8106 __reg_bound_offset(dst_reg);
Edward Creef1174f72017-08-07 15:26:19 +01008107 return 0;
8108}
8109
8110/* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
8111 * and var_off.
8112 */
8113static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
8114 struct bpf_insn *insn)
8115{
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08008116 struct bpf_verifier_state *vstate = env->cur_state;
8117 struct bpf_func_state *state = vstate->frame[vstate->curframe];
8118 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
Edward Creef1174f72017-08-07 15:26:19 +01008119 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
8120 u8 opcode = BPF_OP(insn->code);
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07008121 int err;
Edward Creef1174f72017-08-07 15:26:19 +01008122
8123 dst_reg = &regs[insn->dst_reg];
Edward Creef1174f72017-08-07 15:26:19 +01008124 src_reg = NULL;
8125 if (dst_reg->type != SCALAR_VALUE)
8126 ptr_reg = dst_reg;
Alexei Starovoitov75748832020-10-08 18:12:37 -07008127 else
8128 /* Make sure ID is cleared otherwise dst_reg min/max could be
8129 * incorrectly propagated into other registers by find_equal_scalars()
8130 */
8131 dst_reg->id = 0;
Edward Creef1174f72017-08-07 15:26:19 +01008132 if (BPF_SRC(insn->code) == BPF_X) {
8133 src_reg = &regs[insn->src_reg];
Edward Creef1174f72017-08-07 15:26:19 +01008134 if (src_reg->type != SCALAR_VALUE) {
8135 if (dst_reg->type != SCALAR_VALUE) {
8136 /* Combining two pointers by any ALU op yields
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008137 * an arbitrary scalar. Disallow all math except
8138 * pointer subtraction
Edward Creef1174f72017-08-07 15:26:19 +01008139 */
Alexei Starovoitovdd066822018-09-12 14:06:10 -07008140 if (opcode == BPF_SUB && env->allow_ptr_leaks) {
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008141 mark_reg_unknown(env, regs, insn->dst_reg);
8142 return 0;
Edward Creef1174f72017-08-07 15:26:19 +01008143 }
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008144 verbose(env, "R%d pointer %s pointer prohibited\n",
8145 insn->dst_reg,
8146 bpf_alu_string[opcode >> 4]);
8147 return -EACCES;
Edward Creef1174f72017-08-07 15:26:19 +01008148 } else {
8149 /* scalar += pointer
8150 * This is legal, but we have to reverse our
8151 * src/dest handling in computing the range
8152 */
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07008153 err = mark_chain_precision(env, insn->dst_reg);
8154 if (err)
8155 return err;
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008156 return adjust_ptr_min_max_vals(env, insn,
8157 src_reg, dst_reg);
Edward Creef1174f72017-08-07 15:26:19 +01008158 }
8159 } else if (ptr_reg) {
8160 /* pointer += scalar */
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07008161 err = mark_chain_precision(env, insn->src_reg);
8162 if (err)
8163 return err;
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008164 return adjust_ptr_min_max_vals(env, insn,
8165 dst_reg, src_reg);
Edward Creef1174f72017-08-07 15:26:19 +01008166 }
8167 } else {
8168 /* Pretend the src is a reg with a known value, since we only
8169 * need to be able to read from this state.
8170 */
8171 off_reg.type = SCALAR_VALUE;
Edward Creeb03c9f92017-08-07 15:26:36 +01008172 __mark_reg_known(&off_reg, insn->imm);
Edward Creef1174f72017-08-07 15:26:19 +01008173 src_reg = &off_reg;
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008174 if (ptr_reg) /* pointer += K */
8175 return adjust_ptr_min_max_vals(env, insn,
8176 ptr_reg, src_reg);
Edward Creef1174f72017-08-07 15:26:19 +01008177 }
8178
8179 /* Got here implies adding two SCALAR_VALUEs */
8180 if (WARN_ON_ONCE(ptr_reg)) {
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08008181 print_verifier_state(env, state);
Jakub Kicinski61bd5212017-10-09 10:30:11 -07008182 verbose(env, "verifier internal error: unexpected ptr_reg\n");
Edward Creef1174f72017-08-07 15:26:19 +01008183 return -EINVAL;
8184 }
8185 if (WARN_ON(!src_reg)) {
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08008186 print_verifier_state(env, state);
Jakub Kicinski61bd5212017-10-09 10:30:11 -07008187 verbose(env, "verifier internal error: no src_reg\n");
Edward Creef1174f72017-08-07 15:26:19 +01008188 return -EINVAL;
8189 }
8190 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
Josef Bacik48461132016-09-28 10:54:32 -04008191}
8192
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008193/* check validity of 32-bit and 64-bit arithmetic operations */
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +01008194static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008195{
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07008196 struct bpf_reg_state *regs = cur_regs(env);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008197 u8 opcode = BPF_OP(insn->code);
8198 int err;
8199
8200 if (opcode == BPF_END || opcode == BPF_NEG) {
8201 if (opcode == BPF_NEG) {
8202 if (BPF_SRC(insn->code) != 0 ||
8203 insn->src_reg != BPF_REG_0 ||
8204 insn->off != 0 || insn->imm != 0) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07008205 verbose(env, "BPF_NEG uses reserved fields\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008206 return -EINVAL;
8207 }
8208 } else {
8209 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
Edward Creee67b8a62017-09-15 14:37:38 +01008210 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
8211 BPF_CLASS(insn->code) == BPF_ALU64) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07008212 verbose(env, "BPF_END uses reserved fields\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008213 return -EINVAL;
8214 }
8215 }
8216
8217 /* check src operand */
Edward Creedc503a82017-08-15 20:34:35 +01008218 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008219 if (err)
8220 return err;
8221
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07008222 if (is_pointer_value(env, insn->dst_reg)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07008223 verbose(env, "R%d pointer arithmetic prohibited\n",
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07008224 insn->dst_reg);
8225 return -EACCES;
8226 }
8227
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008228 /* check dest operand */
Edward Creedc503a82017-08-15 20:34:35 +01008229 err = check_reg_arg(env, insn->dst_reg, DST_OP);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008230 if (err)
8231 return err;
8232
8233 } else if (opcode == BPF_MOV) {
8234
8235 if (BPF_SRC(insn->code) == BPF_X) {
8236 if (insn->imm != 0 || insn->off != 0) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07008237 verbose(env, "BPF_MOV uses reserved fields\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008238 return -EINVAL;
8239 }
8240
8241 /* check src operand */
Edward Creedc503a82017-08-15 20:34:35 +01008242 err = check_reg_arg(env, insn->src_reg, SRC_OP);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008243 if (err)
8244 return err;
8245 } else {
8246 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07008247 verbose(env, "BPF_MOV uses reserved fields\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008248 return -EINVAL;
8249 }
8250 }
8251
Arthur Fabrefbeb1602018-07-31 18:17:22 +01008252 /* check dest operand, mark as required later */
8253 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008254 if (err)
8255 return err;
8256
8257 if (BPF_SRC(insn->code) == BPF_X) {
Jiong Wange434b8c2018-12-07 12:16:18 -05008258 struct bpf_reg_state *src_reg = regs + insn->src_reg;
8259 struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
8260
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008261 if (BPF_CLASS(insn->code) == BPF_ALU64) {
8262 /* case: R1 = R2
8263 * copy register state to dest reg
8264 */
Alexei Starovoitov75748832020-10-08 18:12:37 -07008265 if (src_reg->type == SCALAR_VALUE && !src_reg->id)
8266 /* Assign src and dst registers the same ID
8267 * that will be used by find_equal_scalars()
8268 * to propagate min/max range.
8269 */
8270 src_reg->id = ++env->id_gen;
Jiong Wange434b8c2018-12-07 12:16:18 -05008271 *dst_reg = *src_reg;
8272 dst_reg->live |= REG_LIVE_WRITTEN;
Jiong Wang5327ed32019-05-24 23:25:12 +01008273 dst_reg->subreg_def = DEF_NOT_SUBREG;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008274 } else {
Edward Creef1174f72017-08-07 15:26:19 +01008275 /* R1 = (u32) R2 */
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07008276 if (is_pointer_value(env, insn->src_reg)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07008277 verbose(env,
8278 "R%d partial copy of pointer\n",
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07008279 insn->src_reg);
8280 return -EACCES;
Jiong Wange434b8c2018-12-07 12:16:18 -05008281 } else if (src_reg->type == SCALAR_VALUE) {
8282 *dst_reg = *src_reg;
Alexei Starovoitov75748832020-10-08 18:12:37 -07008283 /* Make sure ID is cleared otherwise
8284 * dst_reg min/max could be incorrectly
8285 * propagated into src_reg by find_equal_scalars()
8286 */
8287 dst_reg->id = 0;
Jiong Wange434b8c2018-12-07 12:16:18 -05008288 dst_reg->live |= REG_LIVE_WRITTEN;
Jiong Wang5327ed32019-05-24 23:25:12 +01008289 dst_reg->subreg_def = env->insn_idx + 1;
Jiong Wange434b8c2018-12-07 12:16:18 -05008290 } else {
8291 mark_reg_unknown(env, regs,
8292 insn->dst_reg);
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07008293 }
John Fastabend3f50f132020-03-30 14:36:39 -07008294 zext_32_to_64(dst_reg);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008295 }
8296 } else {
8297 /* case: R = imm
8298 * remember the value we stored into this reg
8299 */
Arthur Fabrefbeb1602018-07-31 18:17:22 +01008300 /* clear any state __mark_reg_known doesn't set */
8301 mark_reg_unknown(env, regs, insn->dst_reg);
Edward Creef1174f72017-08-07 15:26:19 +01008302 regs[insn->dst_reg].type = SCALAR_VALUE;
Jann Horn95a762e2017-12-18 20:11:54 -08008303 if (BPF_CLASS(insn->code) == BPF_ALU64) {
8304 __mark_reg_known(regs + insn->dst_reg,
8305 insn->imm);
8306 } else {
8307 __mark_reg_known(regs + insn->dst_reg,
8308 (u32)insn->imm);
8309 }
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008310 }
8311
8312 } else if (opcode > BPF_END) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07008313 verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008314 return -EINVAL;
8315
8316 } else { /* all other ALU ops: and, sub, xor, add, ... */
8317
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008318 if (BPF_SRC(insn->code) == BPF_X) {
8319 if (insn->imm != 0 || insn->off != 0) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07008320 verbose(env, "BPF_ALU uses reserved fields\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008321 return -EINVAL;
8322 }
8323 /* check src1 operand */
Edward Creedc503a82017-08-15 20:34:35 +01008324 err = check_reg_arg(env, insn->src_reg, SRC_OP);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008325 if (err)
8326 return err;
8327 } else {
8328 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07008329 verbose(env, "BPF_ALU uses reserved fields\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008330 return -EINVAL;
8331 }
8332 }
8333
8334 /* check src2 operand */
Edward Creedc503a82017-08-15 20:34:35 +01008335 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008336 if (err)
8337 return err;
8338
8339 if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
8340 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07008341 verbose(env, "div by zero\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008342 return -EINVAL;
8343 }
8344
Rabin Vincent229394e82016-01-12 20:17:08 +01008345 if ((opcode == BPF_LSH || opcode == BPF_RSH ||
8346 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
8347 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
8348
8349 if (insn->imm < 0 || insn->imm >= size) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07008350 verbose(env, "invalid shift %d\n", insn->imm);
Rabin Vincent229394e82016-01-12 20:17:08 +01008351 return -EINVAL;
8352 }
8353 }
8354
Alexei Starovoitov1a0dc1a2016-05-05 19:49:09 -07008355 /* check dest operand */
Edward Creedc503a82017-08-15 20:34:35 +01008356 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
Alexei Starovoitov1a0dc1a2016-05-05 19:49:09 -07008357 if (err)
8358 return err;
8359
Edward Creef1174f72017-08-07 15:26:19 +01008360 return adjust_reg_min_max_vals(env, insn);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07008361 }
8362
8363 return 0;
8364}
8365
Paul Chaignonc6a9efa2019-04-24 21:50:42 +02008366static void __find_good_pkt_pointers(struct bpf_func_state *state,
8367 struct bpf_reg_state *dst_reg,
Alexei Starovoitov6d94e742020-11-10 19:12:11 -08008368 enum bpf_reg_type type, int new_range)
Paul Chaignonc6a9efa2019-04-24 21:50:42 +02008369{
8370 struct bpf_reg_state *reg;
8371 int i;
8372
8373 for (i = 0; i < MAX_BPF_REG; i++) {
8374 reg = &state->regs[i];
8375 if (reg->type == type && reg->id == dst_reg->id)
8376 /* keep the maximum range already checked */
8377 reg->range = max(reg->range, new_range);
8378 }
8379
8380 bpf_for_each_spilled_reg(i, state, reg) {
8381 if (!reg)
8382 continue;
8383 if (reg->type == type && reg->id == dst_reg->id)
8384 reg->range = max(reg->range, new_range);
8385 }
8386}
8387
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08008388static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02008389 struct bpf_reg_state *dst_reg,
David S. Millerf8ddadc2017-10-22 13:36:53 +01008390 enum bpf_reg_type type,
Daniel Borkmannfb2a3112017-10-21 02:34:21 +02008391 bool range_right_open)
Alexei Starovoitov969bf052016-05-05 19:49:10 -07008392{
Alexei Starovoitov6d94e742020-11-10 19:12:11 -08008393 int new_range, i;
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02008394
Daniel Borkmannfb2a3112017-10-21 02:34:21 +02008395 if (dst_reg->off < 0 ||
8396 (dst_reg->off == 0 && range_right_open))
Edward Creef1174f72017-08-07 15:26:19 +01008397 /* This doesn't give us any range */
8398 return;
8399
Edward Creeb03c9f92017-08-07 15:26:36 +01008400 if (dst_reg->umax_value > MAX_PACKET_OFF ||
8401 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
Edward Creef1174f72017-08-07 15:26:19 +01008402 /* Risk of overflow. For instance, ptr + (1<<63) may be less
8403 * than pkt_end, but that's because it's also less than pkt.
8404 */
8405 return;
8406
Daniel Borkmannfb2a3112017-10-21 02:34:21 +02008407 new_range = dst_reg->off;
8408 if (range_right_open)
8409 new_range--;
8410
8411 /* Examples for register markings:
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02008412 *
Daniel Borkmannfb2a3112017-10-21 02:34:21 +02008413 * pkt_data in dst register:
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02008414 *
8415 * r2 = r3;
8416 * r2 += 8;
8417 * if (r2 > pkt_end) goto <handle exception>
8418 * <access okay>
8419 *
Daniel Borkmannb4e432f2017-08-10 01:40:02 +02008420 * r2 = r3;
8421 * r2 += 8;
8422 * if (r2 < pkt_end) goto <access okay>
8423 * <handle exception>
8424 *
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02008425 * Where:
8426 * r2 == dst_reg, pkt_end == src_reg
8427 * r2=pkt(id=n,off=8,r=0)
8428 * r3=pkt(id=n,off=0,r=0)
8429 *
Daniel Borkmannfb2a3112017-10-21 02:34:21 +02008430 * pkt_data in src register:
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02008431 *
8432 * r2 = r3;
8433 * r2 += 8;
8434 * if (pkt_end >= r2) goto <access okay>
8435 * <handle exception>
8436 *
Daniel Borkmannb4e432f2017-08-10 01:40:02 +02008437 * r2 = r3;
8438 * r2 += 8;
8439 * if (pkt_end <= r2) goto <handle exception>
8440 * <access okay>
8441 *
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02008442 * Where:
8443 * pkt_end == dst_reg, r2 == src_reg
8444 * r2=pkt(id=n,off=8,r=0)
8445 * r3=pkt(id=n,off=0,r=0)
8446 *
8447 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
Daniel Borkmannfb2a3112017-10-21 02:34:21 +02008448 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
8449 * and [r3, r3 + 8-1) respectively is safe to access depending on
8450 * the check.
Alexei Starovoitov969bf052016-05-05 19:49:10 -07008451 */
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02008452
Edward Creef1174f72017-08-07 15:26:19 +01008453 /* If our ids match, then we must have the same max_value. And we
8454 * don't care about the other reg's fixed offset, since if it's too big
8455 * the range won't allow anything.
8456 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
8457 */
Paul Chaignonc6a9efa2019-04-24 21:50:42 +02008458 for (i = 0; i <= vstate->curframe; i++)
8459 __find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
8460 new_range);
Alexei Starovoitov969bf052016-05-05 19:49:10 -07008461}
8462
John Fastabend3f50f132020-03-30 14:36:39 -07008463static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
Alexei Starovoitov4f7b3e82018-12-03 22:46:05 -08008464{
John Fastabend3f50f132020-03-30 14:36:39 -07008465 struct tnum subreg = tnum_subreg(reg->var_off);
8466 s32 sval = (s32)val;
Jiong Wanga72dafa2019-01-26 12:26:00 -05008467
John Fastabend3f50f132020-03-30 14:36:39 -07008468 switch (opcode) {
8469 case BPF_JEQ:
8470 if (tnum_is_const(subreg))
8471 return !!tnum_equals_const(subreg, val);
8472 break;
8473 case BPF_JNE:
8474 if (tnum_is_const(subreg))
8475 return !tnum_equals_const(subreg, val);
8476 break;
8477 case BPF_JSET:
8478 if ((~subreg.mask & subreg.value) & val)
8479 return 1;
8480 if (!((subreg.mask | subreg.value) & val))
8481 return 0;
8482 break;
8483 case BPF_JGT:
8484 if (reg->u32_min_value > val)
8485 return 1;
8486 else if (reg->u32_max_value <= val)
8487 return 0;
8488 break;
8489 case BPF_JSGT:
8490 if (reg->s32_min_value > sval)
8491 return 1;
Daniel Borkmannee114dd2021-02-05 17:20:14 +01008492 else if (reg->s32_max_value <= sval)
John Fastabend3f50f132020-03-30 14:36:39 -07008493 return 0;
8494 break;
8495 case BPF_JLT:
8496 if (reg->u32_max_value < val)
8497 return 1;
8498 else if (reg->u32_min_value >= val)
8499 return 0;
8500 break;
8501 case BPF_JSLT:
8502 if (reg->s32_max_value < sval)
8503 return 1;
8504 else if (reg->s32_min_value >= sval)
8505 return 0;
8506 break;
8507 case BPF_JGE:
8508 if (reg->u32_min_value >= val)
8509 return 1;
8510 else if (reg->u32_max_value < val)
8511 return 0;
8512 break;
8513 case BPF_JSGE:
8514 if (reg->s32_min_value >= sval)
8515 return 1;
8516 else if (reg->s32_max_value < sval)
8517 return 0;
8518 break;
8519 case BPF_JLE:
8520 if (reg->u32_max_value <= val)
8521 return 1;
8522 else if (reg->u32_min_value > val)
8523 return 0;
8524 break;
8525 case BPF_JSLE:
8526 if (reg->s32_max_value <= sval)
8527 return 1;
8528 else if (reg->s32_min_value > sval)
8529 return 0;
8530 break;
Jiong Wang092ed092019-01-26 12:26:01 -05008531 }
Jiong Wanga72dafa2019-01-26 12:26:00 -05008532
John Fastabend3f50f132020-03-30 14:36:39 -07008533 return -1;
8534}
8535
8536
8537static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
8538{
8539 s64 sval = (s64)val;
8540
Alexei Starovoitov4f7b3e82018-12-03 22:46:05 -08008541 switch (opcode) {
8542 case BPF_JEQ:
8543 if (tnum_is_const(reg->var_off))
8544 return !!tnum_equals_const(reg->var_off, val);
8545 break;
8546 case BPF_JNE:
8547 if (tnum_is_const(reg->var_off))
8548 return !tnum_equals_const(reg->var_off, val);
8549 break;
Jakub Kicinski960ea052018-12-19 22:13:04 -08008550 case BPF_JSET:
8551 if ((~reg->var_off.mask & reg->var_off.value) & val)
8552 return 1;
8553 if (!((reg->var_off.mask | reg->var_off.value) & val))
8554 return 0;
8555 break;
Alexei Starovoitov4f7b3e82018-12-03 22:46:05 -08008556 case BPF_JGT:
8557 if (reg->umin_value > val)
8558 return 1;
8559 else if (reg->umax_value <= val)
8560 return 0;
8561 break;
8562 case BPF_JSGT:
Jiong Wanga72dafa2019-01-26 12:26:00 -05008563 if (reg->smin_value > sval)
Alexei Starovoitov4f7b3e82018-12-03 22:46:05 -08008564 return 1;
Daniel Borkmannee114dd2021-02-05 17:20:14 +01008565 else if (reg->smax_value <= sval)
Alexei Starovoitov4f7b3e82018-12-03 22:46:05 -08008566 return 0;
8567 break;
8568 case BPF_JLT:
8569 if (reg->umax_value < val)
8570 return 1;
8571 else if (reg->umin_value >= val)
8572 return 0;
8573 break;
8574 case BPF_JSLT:
Jiong Wanga72dafa2019-01-26 12:26:00 -05008575 if (reg->smax_value < sval)
Alexei Starovoitov4f7b3e82018-12-03 22:46:05 -08008576 return 1;
Jiong Wanga72dafa2019-01-26 12:26:00 -05008577 else if (reg->smin_value >= sval)
Alexei Starovoitov4f7b3e82018-12-03 22:46:05 -08008578 return 0;
8579 break;
8580 case BPF_JGE:
8581 if (reg->umin_value >= val)
8582 return 1;
8583 else if (reg->umax_value < val)
8584 return 0;
8585 break;
8586 case BPF_JSGE:
Jiong Wanga72dafa2019-01-26 12:26:00 -05008587 if (reg->smin_value >= sval)
Alexei Starovoitov4f7b3e82018-12-03 22:46:05 -08008588 return 1;
Jiong Wanga72dafa2019-01-26 12:26:00 -05008589 else if (reg->smax_value < sval)
Alexei Starovoitov4f7b3e82018-12-03 22:46:05 -08008590 return 0;
8591 break;
8592 case BPF_JLE:
8593 if (reg->umax_value <= val)
8594 return 1;
8595 else if (reg->umin_value > val)
8596 return 0;
8597 break;
8598 case BPF_JSLE:
Jiong Wanga72dafa2019-01-26 12:26:00 -05008599 if (reg->smax_value <= sval)
Alexei Starovoitov4f7b3e82018-12-03 22:46:05 -08008600 return 1;
Jiong Wanga72dafa2019-01-26 12:26:00 -05008601 else if (reg->smin_value > sval)
Alexei Starovoitov4f7b3e82018-12-03 22:46:05 -08008602 return 0;
8603 break;
8604 }
8605
8606 return -1;
8607}
8608
John Fastabend3f50f132020-03-30 14:36:39 -07008609/* compute branch direction of the expression "if (reg opcode val) goto target;"
8610 * and return:
8611 * 1 - branch will be taken and "goto target" will be executed
8612 * 0 - branch will not be taken and fall-through to next insn
8613 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
8614 * range [0,10]
Jiong Wang092ed092019-01-26 12:26:01 -05008615 */
John Fastabend3f50f132020-03-30 14:36:39 -07008616static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
8617 bool is_jmp32)
Jiong Wang092ed092019-01-26 12:26:01 -05008618{
John Fastabendcac616d2020-05-21 13:07:26 -07008619 if (__is_pointer_value(false, reg)) {
8620 if (!reg_type_not_null(reg->type))
8621 return -1;
8622
8623 /* If pointer is valid tests against zero will fail so we can
8624 * use this to direct branch taken.
8625 */
8626 if (val != 0)
8627 return -1;
8628
8629 switch (opcode) {
8630 case BPF_JEQ:
8631 return 0;
8632 case BPF_JNE:
8633 return 1;
8634 default:
8635 return -1;
8636 }
8637 }
Jiong Wang092ed092019-01-26 12:26:01 -05008638
John Fastabend3f50f132020-03-30 14:36:39 -07008639 if (is_jmp32)
8640 return is_branch32_taken(reg, val, opcode);
8641 return is_branch64_taken(reg, val, opcode);
Jann Horn604dca52020-03-30 18:03:23 +02008642}
8643
Alexei Starovoitov6d94e742020-11-10 19:12:11 -08008644static int flip_opcode(u32 opcode)
8645{
8646 /* How can we transform "a <op> b" into "b <op> a"? */
8647 static const u8 opcode_flip[16] = {
8648 /* these stay the same */
8649 [BPF_JEQ >> 4] = BPF_JEQ,
8650 [BPF_JNE >> 4] = BPF_JNE,
8651 [BPF_JSET >> 4] = BPF_JSET,
8652 /* these swap "lesser" and "greater" (L and G in the opcodes) */
8653 [BPF_JGE >> 4] = BPF_JLE,
8654 [BPF_JGT >> 4] = BPF_JLT,
8655 [BPF_JLE >> 4] = BPF_JGE,
8656 [BPF_JLT >> 4] = BPF_JGT,
8657 [BPF_JSGE >> 4] = BPF_JSLE,
8658 [BPF_JSGT >> 4] = BPF_JSLT,
8659 [BPF_JSLE >> 4] = BPF_JSGE,
8660 [BPF_JSLT >> 4] = BPF_JSGT
8661 };
8662 return opcode_flip[opcode >> 4];
8663}
8664
8665static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
8666 struct bpf_reg_state *src_reg,
8667 u8 opcode)
8668{
8669 struct bpf_reg_state *pkt;
8670
8671 if (src_reg->type == PTR_TO_PACKET_END) {
8672 pkt = dst_reg;
8673 } else if (dst_reg->type == PTR_TO_PACKET_END) {
8674 pkt = src_reg;
8675 opcode = flip_opcode(opcode);
8676 } else {
8677 return -1;
8678 }
8679
8680 if (pkt->range >= 0)
8681 return -1;
8682
8683 switch (opcode) {
8684 case BPF_JLE:
8685 /* pkt <= pkt_end */
8686 fallthrough;
8687 case BPF_JGT:
8688 /* pkt > pkt_end */
8689 if (pkt->range == BEYOND_PKT_END)
8690 /* pkt has at last one extra byte beyond pkt_end */
8691 return opcode == BPF_JGT;
8692 break;
8693 case BPF_JLT:
8694 /* pkt < pkt_end */
8695 fallthrough;
8696 case BPF_JGE:
8697 /* pkt >= pkt_end */
8698 if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
8699 return opcode == BPF_JGE;
8700 break;
8701 }
8702 return -1;
8703}
8704
Josef Bacik48461132016-09-28 10:54:32 -04008705/* Adjusts the register min/max values in the case that the dst_reg is the
8706 * variable register that we are working on, and src_reg is a constant or we're
8707 * simply doing a BPF_K check.
Edward Creef1174f72017-08-07 15:26:19 +01008708 * In JEQ/JNE cases we also adjust the var_off values.
Josef Bacik48461132016-09-28 10:54:32 -04008709 */
8710static void reg_set_min_max(struct bpf_reg_state *true_reg,
John Fastabend3f50f132020-03-30 14:36:39 -07008711 struct bpf_reg_state *false_reg,
8712 u64 val, u32 val32,
Jiong Wang092ed092019-01-26 12:26:01 -05008713 u8 opcode, bool is_jmp32)
Josef Bacik48461132016-09-28 10:54:32 -04008714{
John Fastabend3f50f132020-03-30 14:36:39 -07008715 struct tnum false_32off = tnum_subreg(false_reg->var_off);
8716 struct tnum false_64off = false_reg->var_off;
8717 struct tnum true_32off = tnum_subreg(true_reg->var_off);
8718 struct tnum true_64off = true_reg->var_off;
8719 s64 sval = (s64)val;
8720 s32 sval32 = (s32)val32;
Jiong Wanga72dafa2019-01-26 12:26:00 -05008721
Edward Creef1174f72017-08-07 15:26:19 +01008722 /* If the dst_reg is a pointer, we can't learn anything about its
8723 * variable offset from the compare (unless src_reg were a pointer into
8724 * the same object, but we don't bother with that.
8725 * Since false_reg and true_reg have the same type by construction, we
8726 * only need to check one of them for pointerness.
8727 */
8728 if (__is_pointer_value(false, false_reg))
8729 return;
Daniel Borkmann4cabc5b2017-07-21 00:00:21 +02008730
Josef Bacik48461132016-09-28 10:54:32 -04008731 switch (opcode) {
8732 case BPF_JEQ:
Josef Bacik48461132016-09-28 10:54:32 -04008733 case BPF_JNE:
Jiong Wanga72dafa2019-01-26 12:26:00 -05008734 {
8735 struct bpf_reg_state *reg =
8736 opcode == BPF_JEQ ? true_reg : false_reg;
8737
Alexei Starovoitove688c3d2020-10-14 10:56:08 -07008738 /* JEQ/JNE comparison doesn't change the register equivalence.
8739 * r1 = r2;
8740 * if (r1 == 42) goto label;
8741 * ...
8742 * label: // here both r1 and r2 are known to be 42.
8743 *
8744 * Hence when marking register as known preserve it's ID.
Josef Bacik48461132016-09-28 10:54:32 -04008745 */
John Fastabend3f50f132020-03-30 14:36:39 -07008746 if (is_jmp32)
8747 __mark_reg32_known(reg, val32);
8748 else
Alexei Starovoitove688c3d2020-10-14 10:56:08 -07008749 ___mark_reg_known(reg, val);
Josef Bacik48461132016-09-28 10:54:32 -04008750 break;
Jiong Wanga72dafa2019-01-26 12:26:00 -05008751 }
Jakub Kicinski960ea052018-12-19 22:13:04 -08008752 case BPF_JSET:
John Fastabend3f50f132020-03-30 14:36:39 -07008753 if (is_jmp32) {
8754 false_32off = tnum_and(false_32off, tnum_const(~val32));
8755 if (is_power_of_2(val32))
8756 true_32off = tnum_or(true_32off,
8757 tnum_const(val32));
8758 } else {
8759 false_64off = tnum_and(false_64off, tnum_const(~val));
8760 if (is_power_of_2(val))
8761 true_64off = tnum_or(true_64off,
8762 tnum_const(val));
8763 }
Jakub Kicinski960ea052018-12-19 22:13:04 -08008764 break;
Josef Bacik48461132016-09-28 10:54:32 -04008765 case BPF_JGE:
Jiong Wanga72dafa2019-01-26 12:26:00 -05008766 case BPF_JGT:
8767 {
John Fastabend3f50f132020-03-30 14:36:39 -07008768 if (is_jmp32) {
8769 u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1;
8770 u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32;
8771
8772 false_reg->u32_max_value = min(false_reg->u32_max_value,
8773 false_umax);
8774 true_reg->u32_min_value = max(true_reg->u32_min_value,
8775 true_umin);
8776 } else {
8777 u64 false_umax = opcode == BPF_JGT ? val : val - 1;
8778 u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
8779
8780 false_reg->umax_value = min(false_reg->umax_value, false_umax);
8781 true_reg->umin_value = max(true_reg->umin_value, true_umin);
8782 }
Edward Creeb03c9f92017-08-07 15:26:36 +01008783 break;
Jiong Wanga72dafa2019-01-26 12:26:00 -05008784 }
Josef Bacik48461132016-09-28 10:54:32 -04008785 case BPF_JSGE:
Jiong Wanga72dafa2019-01-26 12:26:00 -05008786 case BPF_JSGT:
8787 {
John Fastabend3f50f132020-03-30 14:36:39 -07008788 if (is_jmp32) {
8789 s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1;
8790 s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32;
Jiong Wanga72dafa2019-01-26 12:26:00 -05008791
John Fastabend3f50f132020-03-30 14:36:39 -07008792 false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax);
8793 true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin);
8794 } else {
8795 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1;
8796 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
8797
8798 false_reg->smax_value = min(false_reg->smax_value, false_smax);
8799 true_reg->smin_value = max(true_reg->smin_value, true_smin);
8800 }
Josef Bacik48461132016-09-28 10:54:32 -04008801 break;
Jiong Wanga72dafa2019-01-26 12:26:00 -05008802 }
Daniel Borkmannb4e432f2017-08-10 01:40:02 +02008803 case BPF_JLE:
Jiong Wanga72dafa2019-01-26 12:26:00 -05008804 case BPF_JLT:
8805 {
John Fastabend3f50f132020-03-30 14:36:39 -07008806 if (is_jmp32) {
8807 u32 false_umin = opcode == BPF_JLT ? val32 : val32 + 1;
8808 u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32;
8809
8810 false_reg->u32_min_value = max(false_reg->u32_min_value,
8811 false_umin);
8812 true_reg->u32_max_value = min(true_reg->u32_max_value,
8813 true_umax);
8814 } else {
8815 u64 false_umin = opcode == BPF_JLT ? val : val + 1;
8816 u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
8817
8818 false_reg->umin_value = max(false_reg->umin_value, false_umin);
8819 true_reg->umax_value = min(true_reg->umax_value, true_umax);
8820 }
Daniel Borkmannb4e432f2017-08-10 01:40:02 +02008821 break;
Jiong Wanga72dafa2019-01-26 12:26:00 -05008822 }
Daniel Borkmannb4e432f2017-08-10 01:40:02 +02008823 case BPF_JSLE:
Jiong Wanga72dafa2019-01-26 12:26:00 -05008824 case BPF_JSLT:
8825 {
John Fastabend3f50f132020-03-30 14:36:39 -07008826 if (is_jmp32) {
8827 s32 false_smin = opcode == BPF_JSLT ? sval32 : sval32 + 1;
8828 s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32;
Jiong Wanga72dafa2019-01-26 12:26:00 -05008829
John Fastabend3f50f132020-03-30 14:36:39 -07008830 false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin);
8831 true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax);
8832 } else {
8833 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1;
8834 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
8835
8836 false_reg->smin_value = max(false_reg->smin_value, false_smin);
8837 true_reg->smax_value = min(true_reg->smax_value, true_smax);
8838 }
Daniel Borkmannb4e432f2017-08-10 01:40:02 +02008839 break;
Jiong Wanga72dafa2019-01-26 12:26:00 -05008840 }
Josef Bacik48461132016-09-28 10:54:32 -04008841 default:
Jann Horn0fc31b12020-03-30 18:03:24 +02008842 return;
Josef Bacik48461132016-09-28 10:54:32 -04008843 }
8844
John Fastabend3f50f132020-03-30 14:36:39 -07008845 if (is_jmp32) {
8846 false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off),
8847 tnum_subreg(false_32off));
8848 true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off),
8849 tnum_subreg(true_32off));
8850 __reg_combine_32_into_64(false_reg);
8851 __reg_combine_32_into_64(true_reg);
8852 } else {
8853 false_reg->var_off = false_64off;
8854 true_reg->var_off = true_64off;
8855 __reg_combine_64_into_32(false_reg);
8856 __reg_combine_64_into_32(true_reg);
8857 }
Josef Bacik48461132016-09-28 10:54:32 -04008858}
8859
Edward Creef1174f72017-08-07 15:26:19 +01008860/* Same as above, but for the case that dst_reg holds a constant and src_reg is
8861 * the variable reg.
Josef Bacik48461132016-09-28 10:54:32 -04008862 */
8863static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
John Fastabend3f50f132020-03-30 14:36:39 -07008864 struct bpf_reg_state *false_reg,
8865 u64 val, u32 val32,
Jiong Wang092ed092019-01-26 12:26:01 -05008866 u8 opcode, bool is_jmp32)
Josef Bacik48461132016-09-28 10:54:32 -04008867{
Alexei Starovoitov6d94e742020-11-10 19:12:11 -08008868 opcode = flip_opcode(opcode);
Jann Horn0fc31b12020-03-30 18:03:24 +02008869 /* This uses zero as "not present in table"; luckily the zero opcode,
8870 * BPF_JA, can't get here.
Edward Creeb03c9f92017-08-07 15:26:36 +01008871 */
Jann Horn0fc31b12020-03-30 18:03:24 +02008872 if (opcode)
John Fastabend3f50f132020-03-30 14:36:39 -07008873 reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32);
Edward Creef1174f72017-08-07 15:26:19 +01008874}
8875
8876/* Regs are known to be equal, so intersect their min/max/var_off */
8877static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
8878 struct bpf_reg_state *dst_reg)
8879{
Edward Creeb03c9f92017-08-07 15:26:36 +01008880 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
8881 dst_reg->umin_value);
8882 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
8883 dst_reg->umax_value);
8884 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
8885 dst_reg->smin_value);
8886 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
8887 dst_reg->smax_value);
Edward Creef1174f72017-08-07 15:26:19 +01008888 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
8889 dst_reg->var_off);
Edward Creeb03c9f92017-08-07 15:26:36 +01008890 /* We might have learned new bounds from the var_off. */
8891 __update_reg_bounds(src_reg);
8892 __update_reg_bounds(dst_reg);
8893 /* We might have learned something about the sign bit. */
8894 __reg_deduce_bounds(src_reg);
8895 __reg_deduce_bounds(dst_reg);
8896 /* We might have learned some bits from the bounds. */
8897 __reg_bound_offset(src_reg);
8898 __reg_bound_offset(dst_reg);
8899 /* Intersecting with the old var_off might have improved our bounds
8900 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
8901 * then new var_off is (0; 0x7f...fc) which improves our umax.
8902 */
8903 __update_reg_bounds(src_reg);
8904 __update_reg_bounds(dst_reg);
Edward Creef1174f72017-08-07 15:26:19 +01008905}
8906
8907static void reg_combine_min_max(struct bpf_reg_state *true_src,
8908 struct bpf_reg_state *true_dst,
8909 struct bpf_reg_state *false_src,
8910 struct bpf_reg_state *false_dst,
8911 u8 opcode)
8912{
8913 switch (opcode) {
8914 case BPF_JEQ:
8915 __reg_combine_min_max(true_src, true_dst);
8916 break;
8917 case BPF_JNE:
8918 __reg_combine_min_max(false_src, false_dst);
Edward Creeb03c9f92017-08-07 15:26:36 +01008919 break;
Daniel Borkmann4cabc5b2017-07-21 00:00:21 +02008920 }
Josef Bacik48461132016-09-28 10:54:32 -04008921}
8922
Joe Stringerfd978bf72018-10-02 13:35:35 -07008923static void mark_ptr_or_null_reg(struct bpf_func_state *state,
8924 struct bpf_reg_state *reg, u32 id,
Joe Stringer840b9612018-10-02 13:35:32 -07008925 bool is_null)
Thomas Graf57a09bf2016-10-18 19:51:19 +02008926{
Martin KaFai Lau93c230e2020-10-19 12:42:12 -07008927 if (reg_type_may_be_null(reg->type) && reg->id == id &&
8928 !WARN_ON_ONCE(!reg->id)) {
Edward Creef1174f72017-08-07 15:26:19 +01008929 /* Old offset (both fixed and variable parts) should
8930 * have been known-zero, because we don't allow pointer
8931 * arithmetic on pointers that might be NULL.
8932 */
Edward Creeb03c9f92017-08-07 15:26:36 +01008933 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
8934 !tnum_equals_const(reg->var_off, 0) ||
Edward Creef1174f72017-08-07 15:26:19 +01008935 reg->off)) {
Edward Creeb03c9f92017-08-07 15:26:36 +01008936 __mark_reg_known_zero(reg);
8937 reg->off = 0;
Edward Creef1174f72017-08-07 15:26:19 +01008938 }
8939 if (is_null) {
8940 reg->type = SCALAR_VALUE;
Martin KaFai Lau1b986582019-03-12 10:23:02 -07008941 /* We don't need id and ref_obj_id from this point
8942 * onwards anymore, thus we should better reset it,
8943 * so that state pruning has chances to take effect.
8944 */
8945 reg->id = 0;
8946 reg->ref_obj_id = 0;
Dmitrii Banshchikov4ddb7412021-02-13 00:56:40 +04008947
8948 return;
8949 }
8950
8951 mark_ptr_not_null_reg(reg);
8952
8953 if (!reg_may_point_to_spin_lock(reg)) {
Martin KaFai Lau1b986582019-03-12 10:23:02 -07008954 /* For not-NULL ptr, reg->ref_obj_id will be reset
8955 * in release_reg_references().
8956 *
8957 * reg->id is still used by spin_lock ptr. Other
8958 * than spin_lock ptr type, reg->id can be reset.
Joe Stringerfd978bf72018-10-02 13:35:35 -07008959 */
8960 reg->id = 0;
8961 }
Thomas Graf57a09bf2016-10-18 19:51:19 +02008962 }
8963}
8964
Paul Chaignonc6a9efa2019-04-24 21:50:42 +02008965static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
8966 bool is_null)
8967{
8968 struct bpf_reg_state *reg;
8969 int i;
8970
8971 for (i = 0; i < MAX_BPF_REG; i++)
8972 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
8973
8974 bpf_for_each_spilled_reg(i, state, reg) {
8975 if (!reg)
8976 continue;
8977 mark_ptr_or_null_reg(state, reg, id, is_null);
8978 }
8979}
8980
Thomas Graf57a09bf2016-10-18 19:51:19 +02008981/* The logic is similar to find_good_pkt_pointers(), both could eventually
8982 * be folded together at some point.
8983 */
Joe Stringer840b9612018-10-02 13:35:32 -07008984static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
8985 bool is_null)
Thomas Graf57a09bf2016-10-18 19:51:19 +02008986{
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08008987 struct bpf_func_state *state = vstate->frame[vstate->curframe];
Paul Chaignonc6a9efa2019-04-24 21:50:42 +02008988 struct bpf_reg_state *regs = state->regs;
Martin KaFai Lau1b986582019-03-12 10:23:02 -07008989 u32 ref_obj_id = regs[regno].ref_obj_id;
Daniel Borkmanna08dd0d2016-12-15 01:30:06 +01008990 u32 id = regs[regno].id;
Paul Chaignonc6a9efa2019-04-24 21:50:42 +02008991 int i;
Thomas Graf57a09bf2016-10-18 19:51:19 +02008992
Martin KaFai Lau1b986582019-03-12 10:23:02 -07008993 if (ref_obj_id && ref_obj_id == id && is_null)
8994 /* regs[regno] is in the " == NULL" branch.
8995 * No one could have freed the reference state before
8996 * doing the NULL check.
8997 */
8998 WARN_ON_ONCE(release_reference_state(state, id));
Joe Stringerfd978bf72018-10-02 13:35:35 -07008999
Paul Chaignonc6a9efa2019-04-24 21:50:42 +02009000 for (i = 0; i <= vstate->curframe; i++)
9001 __mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
Thomas Graf57a09bf2016-10-18 19:51:19 +02009002}
9003
Daniel Borkmann5beca082017-11-01 23:58:10 +01009004static bool try_match_pkt_pointers(const struct bpf_insn *insn,
9005 struct bpf_reg_state *dst_reg,
9006 struct bpf_reg_state *src_reg,
9007 struct bpf_verifier_state *this_branch,
9008 struct bpf_verifier_state *other_branch)
9009{
9010 if (BPF_SRC(insn->code) != BPF_X)
9011 return false;
9012
Jiong Wang092ed092019-01-26 12:26:01 -05009013 /* Pointers are always 64-bit. */
9014 if (BPF_CLASS(insn->code) == BPF_JMP32)
9015 return false;
9016
Daniel Borkmann5beca082017-11-01 23:58:10 +01009017 switch (BPF_OP(insn->code)) {
9018 case BPF_JGT:
9019 if ((dst_reg->type == PTR_TO_PACKET &&
9020 src_reg->type == PTR_TO_PACKET_END) ||
9021 (dst_reg->type == PTR_TO_PACKET_META &&
9022 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9023 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */
9024 find_good_pkt_pointers(this_branch, dst_reg,
9025 dst_reg->type, false);
Alexei Starovoitov6d94e742020-11-10 19:12:11 -08009026 mark_pkt_end(other_branch, insn->dst_reg, true);
Daniel Borkmann5beca082017-11-01 23:58:10 +01009027 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
9028 src_reg->type == PTR_TO_PACKET) ||
9029 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9030 src_reg->type == PTR_TO_PACKET_META)) {
9031 /* pkt_end > pkt_data', pkt_data > pkt_meta' */
9032 find_good_pkt_pointers(other_branch, src_reg,
9033 src_reg->type, true);
Alexei Starovoitov6d94e742020-11-10 19:12:11 -08009034 mark_pkt_end(this_branch, insn->src_reg, false);
Daniel Borkmann5beca082017-11-01 23:58:10 +01009035 } else {
9036 return false;
9037 }
9038 break;
9039 case BPF_JLT:
9040 if ((dst_reg->type == PTR_TO_PACKET &&
9041 src_reg->type == PTR_TO_PACKET_END) ||
9042 (dst_reg->type == PTR_TO_PACKET_META &&
9043 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9044 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */
9045 find_good_pkt_pointers(other_branch, dst_reg,
9046 dst_reg->type, true);
Alexei Starovoitov6d94e742020-11-10 19:12:11 -08009047 mark_pkt_end(this_branch, insn->dst_reg, false);
Daniel Borkmann5beca082017-11-01 23:58:10 +01009048 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
9049 src_reg->type == PTR_TO_PACKET) ||
9050 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9051 src_reg->type == PTR_TO_PACKET_META)) {
9052 /* pkt_end < pkt_data', pkt_data > pkt_meta' */
9053 find_good_pkt_pointers(this_branch, src_reg,
9054 src_reg->type, false);
Alexei Starovoitov6d94e742020-11-10 19:12:11 -08009055 mark_pkt_end(other_branch, insn->src_reg, true);
Daniel Borkmann5beca082017-11-01 23:58:10 +01009056 } else {
9057 return false;
9058 }
9059 break;
9060 case BPF_JGE:
9061 if ((dst_reg->type == PTR_TO_PACKET &&
9062 src_reg->type == PTR_TO_PACKET_END) ||
9063 (dst_reg->type == PTR_TO_PACKET_META &&
9064 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9065 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
9066 find_good_pkt_pointers(this_branch, dst_reg,
9067 dst_reg->type, true);
Alexei Starovoitov6d94e742020-11-10 19:12:11 -08009068 mark_pkt_end(other_branch, insn->dst_reg, false);
Daniel Borkmann5beca082017-11-01 23:58:10 +01009069 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
9070 src_reg->type == PTR_TO_PACKET) ||
9071 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9072 src_reg->type == PTR_TO_PACKET_META)) {
9073 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
9074 find_good_pkt_pointers(other_branch, src_reg,
9075 src_reg->type, false);
Alexei Starovoitov6d94e742020-11-10 19:12:11 -08009076 mark_pkt_end(this_branch, insn->src_reg, true);
Daniel Borkmann5beca082017-11-01 23:58:10 +01009077 } else {
9078 return false;
9079 }
9080 break;
9081 case BPF_JLE:
9082 if ((dst_reg->type == PTR_TO_PACKET &&
9083 src_reg->type == PTR_TO_PACKET_END) ||
9084 (dst_reg->type == PTR_TO_PACKET_META &&
9085 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9086 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
9087 find_good_pkt_pointers(other_branch, dst_reg,
9088 dst_reg->type, false);
Alexei Starovoitov6d94e742020-11-10 19:12:11 -08009089 mark_pkt_end(this_branch, insn->dst_reg, true);
Daniel Borkmann5beca082017-11-01 23:58:10 +01009090 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
9091 src_reg->type == PTR_TO_PACKET) ||
9092 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9093 src_reg->type == PTR_TO_PACKET_META)) {
9094 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
9095 find_good_pkt_pointers(this_branch, src_reg,
9096 src_reg->type, true);
Alexei Starovoitov6d94e742020-11-10 19:12:11 -08009097 mark_pkt_end(other_branch, insn->src_reg, false);
Daniel Borkmann5beca082017-11-01 23:58:10 +01009098 } else {
9099 return false;
9100 }
9101 break;
9102 default:
9103 return false;
9104 }
9105
9106 return true;
9107}
9108
Alexei Starovoitov75748832020-10-08 18:12:37 -07009109static void find_equal_scalars(struct bpf_verifier_state *vstate,
9110 struct bpf_reg_state *known_reg)
9111{
9112 struct bpf_func_state *state;
9113 struct bpf_reg_state *reg;
9114 int i, j;
9115
9116 for (i = 0; i <= vstate->curframe; i++) {
9117 state = vstate->frame[i];
9118 for (j = 0; j < MAX_BPF_REG; j++) {
9119 reg = &state->regs[j];
9120 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
9121 *reg = *known_reg;
9122 }
9123
9124 bpf_for_each_spilled_reg(j, state, reg) {
9125 if (!reg)
9126 continue;
9127 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
9128 *reg = *known_reg;
9129 }
9130 }
9131}
9132
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +01009133static int check_cond_jmp_op(struct bpf_verifier_env *env,
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009134 struct bpf_insn *insn, int *insn_idx)
9135{
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08009136 struct bpf_verifier_state *this_branch = env->cur_state;
9137 struct bpf_verifier_state *other_branch;
9138 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
Alexei Starovoitovfb8d2512019-06-15 12:12:19 -07009139 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009140 u8 opcode = BPF_OP(insn->code);
Jiong Wang092ed092019-01-26 12:26:01 -05009141 bool is_jmp32;
Alexei Starovoitovfb8d2512019-06-15 12:12:19 -07009142 int pred = -1;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009143 int err;
9144
Jiong Wang092ed092019-01-26 12:26:01 -05009145 /* Only conditional jumps are expected to reach here. */
9146 if (opcode == BPF_JA || opcode > BPF_JSLE) {
9147 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009148 return -EINVAL;
9149 }
9150
9151 if (BPF_SRC(insn->code) == BPF_X) {
9152 if (insn->imm != 0) {
Jiong Wang092ed092019-01-26 12:26:01 -05009153 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009154 return -EINVAL;
9155 }
9156
9157 /* check src1 operand */
Edward Creedc503a82017-08-15 20:34:35 +01009158 err = check_reg_arg(env, insn->src_reg, SRC_OP);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009159 if (err)
9160 return err;
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07009161
9162 if (is_pointer_value(env, insn->src_reg)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07009163 verbose(env, "R%d pointer comparison prohibited\n",
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07009164 insn->src_reg);
9165 return -EACCES;
9166 }
Alexei Starovoitovfb8d2512019-06-15 12:12:19 -07009167 src_reg = &regs[insn->src_reg];
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009168 } else {
9169 if (insn->src_reg != BPF_REG_0) {
Jiong Wang092ed092019-01-26 12:26:01 -05009170 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009171 return -EINVAL;
9172 }
9173 }
9174
9175 /* check src2 operand */
Edward Creedc503a82017-08-15 20:34:35 +01009176 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009177 if (err)
9178 return err;
9179
Alexei Starovoitov1a0dc1a2016-05-05 19:49:09 -07009180 dst_reg = &regs[insn->dst_reg];
Jiong Wang092ed092019-01-26 12:26:01 -05009181 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
Alexei Starovoitov1a0dc1a2016-05-05 19:49:09 -07009182
John Fastabend3f50f132020-03-30 14:36:39 -07009183 if (BPF_SRC(insn->code) == BPF_K) {
9184 pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
9185 } else if (src_reg->type == SCALAR_VALUE &&
9186 is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) {
9187 pred = is_branch_taken(dst_reg,
9188 tnum_subreg(src_reg->var_off).value,
9189 opcode,
9190 is_jmp32);
9191 } else if (src_reg->type == SCALAR_VALUE &&
9192 !is_jmp32 && tnum_is_const(src_reg->var_off)) {
9193 pred = is_branch_taken(dst_reg,
9194 src_reg->var_off.value,
9195 opcode,
9196 is_jmp32);
Alexei Starovoitov6d94e742020-11-10 19:12:11 -08009197 } else if (reg_is_pkt_pointer_any(dst_reg) &&
9198 reg_is_pkt_pointer_any(src_reg) &&
9199 !is_jmp32) {
9200 pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode);
John Fastabend3f50f132020-03-30 14:36:39 -07009201 }
9202
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07009203 if (pred >= 0) {
John Fastabendcac616d2020-05-21 13:07:26 -07009204 /* If we get here with a dst_reg pointer type it is because
9205 * above is_branch_taken() special cased the 0 comparison.
9206 */
9207 if (!__is_pointer_value(false, dst_reg))
9208 err = mark_chain_precision(env, insn->dst_reg);
Alexei Starovoitov6d94e742020-11-10 19:12:11 -08009209 if (BPF_SRC(insn->code) == BPF_X && !err &&
9210 !__is_pointer_value(false, src_reg))
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -07009211 err = mark_chain_precision(env, insn->src_reg);
9212 if (err)
9213 return err;
9214 }
Daniel Borkmann9183671a2021-05-28 15:47:32 +00009215
Alexei Starovoitovfb8d2512019-06-15 12:12:19 -07009216 if (pred == 1) {
Daniel Borkmann9183671a2021-05-28 15:47:32 +00009217 /* Only follow the goto, ignore fall-through. If needed, push
9218 * the fall-through branch for simulation under speculative
9219 * execution.
9220 */
9221 if (!env->bypass_spec_v1 &&
9222 !sanitize_speculative_path(env, insn, *insn_idx + 1,
9223 *insn_idx))
9224 return -EFAULT;
Alexei Starovoitovfb8d2512019-06-15 12:12:19 -07009225 *insn_idx += insn->off;
9226 return 0;
9227 } else if (pred == 0) {
Daniel Borkmann9183671a2021-05-28 15:47:32 +00009228 /* Only follow the fall-through branch, since that's where the
9229 * program will go. If needed, push the goto branch for
9230 * simulation under speculative execution.
Alexei Starovoitovfb8d2512019-06-15 12:12:19 -07009231 */
Daniel Borkmann9183671a2021-05-28 15:47:32 +00009232 if (!env->bypass_spec_v1 &&
9233 !sanitize_speculative_path(env, insn,
9234 *insn_idx + insn->off + 1,
9235 *insn_idx))
9236 return -EFAULT;
Alexei Starovoitovfb8d2512019-06-15 12:12:19 -07009237 return 0;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009238 }
9239
Daniel Borkmann979d63d2019-01-03 00:58:34 +01009240 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
9241 false);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009242 if (!other_branch)
9243 return -EFAULT;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08009244 other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009245
Josef Bacik48461132016-09-28 10:54:32 -04009246 /* detect if we are comparing against a constant value so we can adjust
9247 * our min/max values for our dst register.
Edward Creef1174f72017-08-07 15:26:19 +01009248 * this is only legit if both are scalars (or pointers to the same
9249 * object, I suppose, but we don't support that right now), because
9250 * otherwise the different base pointers mean the offsets aren't
9251 * comparable.
Josef Bacik48461132016-09-28 10:54:32 -04009252 */
9253 if (BPF_SRC(insn->code) == BPF_X) {
Jiong Wang092ed092019-01-26 12:26:01 -05009254 struct bpf_reg_state *src_reg = &regs[insn->src_reg];
Jiong Wang092ed092019-01-26 12:26:01 -05009255
Edward Creef1174f72017-08-07 15:26:19 +01009256 if (dst_reg->type == SCALAR_VALUE &&
Jiong Wang092ed092019-01-26 12:26:01 -05009257 src_reg->type == SCALAR_VALUE) {
9258 if (tnum_is_const(src_reg->var_off) ||
John Fastabend3f50f132020-03-30 14:36:39 -07009259 (is_jmp32 &&
9260 tnum_is_const(tnum_subreg(src_reg->var_off))))
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08009261 reg_set_min_max(&other_branch_regs[insn->dst_reg],
Jiong Wang092ed092019-01-26 12:26:01 -05009262 dst_reg,
John Fastabend3f50f132020-03-30 14:36:39 -07009263 src_reg->var_off.value,
9264 tnum_subreg(src_reg->var_off).value,
Jiong Wang092ed092019-01-26 12:26:01 -05009265 opcode, is_jmp32);
9266 else if (tnum_is_const(dst_reg->var_off) ||
John Fastabend3f50f132020-03-30 14:36:39 -07009267 (is_jmp32 &&
9268 tnum_is_const(tnum_subreg(dst_reg->var_off))))
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08009269 reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
Jiong Wang092ed092019-01-26 12:26:01 -05009270 src_reg,
John Fastabend3f50f132020-03-30 14:36:39 -07009271 dst_reg->var_off.value,
9272 tnum_subreg(dst_reg->var_off).value,
Jiong Wang092ed092019-01-26 12:26:01 -05009273 opcode, is_jmp32);
9274 else if (!is_jmp32 &&
9275 (opcode == BPF_JEQ || opcode == BPF_JNE))
Edward Creef1174f72017-08-07 15:26:19 +01009276 /* Comparing for equality, we can combine knowledge */
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08009277 reg_combine_min_max(&other_branch_regs[insn->src_reg],
9278 &other_branch_regs[insn->dst_reg],
Jiong Wang092ed092019-01-26 12:26:01 -05009279 src_reg, dst_reg, opcode);
Alexei Starovoitove688c3d2020-10-14 10:56:08 -07009280 if (src_reg->id &&
9281 !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
Alexei Starovoitov75748832020-10-08 18:12:37 -07009282 find_equal_scalars(this_branch, src_reg);
9283 find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
9284 }
9285
Edward Creef1174f72017-08-07 15:26:19 +01009286 }
9287 } else if (dst_reg->type == SCALAR_VALUE) {
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08009288 reg_set_min_max(&other_branch_regs[insn->dst_reg],
John Fastabend3f50f132020-03-30 14:36:39 -07009289 dst_reg, insn->imm, (u32)insn->imm,
9290 opcode, is_jmp32);
Josef Bacik48461132016-09-28 10:54:32 -04009291 }
9292
Alexei Starovoitove688c3d2020-10-14 10:56:08 -07009293 if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
9294 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
Alexei Starovoitov75748832020-10-08 18:12:37 -07009295 find_equal_scalars(this_branch, dst_reg);
9296 find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
9297 }
9298
Jiong Wang092ed092019-01-26 12:26:01 -05009299 /* detect if R == 0 where R is returned from bpf_map_lookup_elem().
9300 * NOTE: these optimizations below are related with pointer comparison
9301 * which will never be JMP32.
9302 */
9303 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
Alexei Starovoitov1a0dc1a2016-05-05 19:49:09 -07009304 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
Joe Stringer840b9612018-10-02 13:35:32 -07009305 reg_type_may_be_null(dst_reg->type)) {
9306 /* Mark all identical registers in each branch as either
Thomas Graf57a09bf2016-10-18 19:51:19 +02009307 * safe or unknown depending R == 0 or R != 0 conditional.
9308 */
Joe Stringer840b9612018-10-02 13:35:32 -07009309 mark_ptr_or_null_regs(this_branch, insn->dst_reg,
9310 opcode == BPF_JNE);
9311 mark_ptr_or_null_regs(other_branch, insn->dst_reg,
9312 opcode == BPF_JEQ);
Daniel Borkmann5beca082017-11-01 23:58:10 +01009313 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
9314 this_branch, other_branch) &&
9315 is_pointer_value(env, insn->dst_reg)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07009316 verbose(env, "R%d pointer comparison prohibited\n",
9317 insn->dst_reg);
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07009318 return -EACCES;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009319 }
Alexei Starovoitov06ee7112019-04-01 21:27:40 -07009320 if (env->log.level & BPF_LOG_LEVEL)
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -08009321 print_verifier_state(env, this_branch->frame[this_branch->curframe]);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009322 return 0;
9323}
9324
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009325/* verify BPF_LD_IMM64 instruction */
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +01009326static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009327{
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +02009328 struct bpf_insn_aux_data *aux = cur_aux(env);
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07009329 struct bpf_reg_state *regs = cur_regs(env);
Hao Luo4976b712020-09-29 16:50:44 -07009330 struct bpf_reg_state *dst_reg;
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +02009331 struct bpf_map *map;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009332 int err;
9333
9334 if (BPF_SIZE(insn->code) != BPF_DW) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07009335 verbose(env, "invalid BPF_LD_IMM insn\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009336 return -EINVAL;
9337 }
9338 if (insn->off != 0) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07009339 verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009340 return -EINVAL;
9341 }
9342
Edward Creedc503a82017-08-15 20:34:35 +01009343 err = check_reg_arg(env, insn->dst_reg, DST_OP);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009344 if (err)
9345 return err;
9346
Hao Luo4976b712020-09-29 16:50:44 -07009347 dst_reg = &regs[insn->dst_reg];
Jakub Kicinski6b173872016-09-21 11:43:59 +01009348 if (insn->src_reg == 0) {
Jakub Kicinski6b173872016-09-21 11:43:59 +01009349 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
9350
Hao Luo4976b712020-09-29 16:50:44 -07009351 dst_reg->type = SCALAR_VALUE;
Edward Creeb03c9f92017-08-07 15:26:36 +01009352 __mark_reg_known(&regs[insn->dst_reg], imm);
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009353 return 0;
Jakub Kicinski6b173872016-09-21 11:43:59 +01009354 }
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009355
Hao Luo4976b712020-09-29 16:50:44 -07009356 if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
9357 mark_reg_known_zero(env, regs, insn->dst_reg);
9358
9359 dst_reg->type = aux->btf_var.reg_type;
9360 switch (dst_reg->type) {
9361 case PTR_TO_MEM:
9362 dst_reg->mem_size = aux->btf_var.mem_size;
9363 break;
9364 case PTR_TO_BTF_ID:
Hao Luoeaa6bcb2020-09-29 16:50:47 -07009365 case PTR_TO_PERCPU_BTF_ID:
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -08009366 dst_reg->btf = aux->btf_var.btf;
Hao Luo4976b712020-09-29 16:50:44 -07009367 dst_reg->btf_id = aux->btf_var.btf_id;
9368 break;
9369 default:
9370 verbose(env, "bpf verifier is misconfigured\n");
9371 return -EFAULT;
9372 }
9373 return 0;
9374 }
9375
Yonghong Song69c087b2021-02-26 12:49:25 -08009376 if (insn->src_reg == BPF_PSEUDO_FUNC) {
9377 struct bpf_prog_aux *aux = env->prog->aux;
Martin KaFai Lau3990ed42021-11-05 18:40:14 -07009378 u32 subprogno = find_subprog(env,
9379 env->insn_idx + insn->imm + 1);
Yonghong Song69c087b2021-02-26 12:49:25 -08009380
9381 if (!aux->func_info) {
9382 verbose(env, "missing btf func_info\n");
9383 return -EINVAL;
9384 }
9385 if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) {
9386 verbose(env, "callback function not static\n");
9387 return -EINVAL;
9388 }
9389
9390 dst_reg->type = PTR_TO_FUNC;
9391 dst_reg->subprogno = subprogno;
9392 return 0;
9393 }
9394
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +02009395 map = env->used_maps[aux->map_index];
9396 mark_reg_known_zero(env, regs, insn->dst_reg);
Hao Luo4976b712020-09-29 16:50:44 -07009397 dst_reg->map_ptr = map;
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009398
Alexei Starovoitov387544b2021-05-13 17:36:10 -07009399 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
9400 insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
Hao Luo4976b712020-09-29 16:50:44 -07009401 dst_reg->type = PTR_TO_MAP_VALUE;
9402 dst_reg->off = aux->map_off;
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +02009403 if (map_value_has_spin_lock(map))
Hao Luo4976b712020-09-29 16:50:44 -07009404 dst_reg->id = ++env->id_gen;
Alexei Starovoitov387544b2021-05-13 17:36:10 -07009405 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD ||
9406 insn->src_reg == BPF_PSEUDO_MAP_IDX) {
Hao Luo4976b712020-09-29 16:50:44 -07009407 dst_reg->type = CONST_PTR_TO_MAP;
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +02009408 } else {
9409 verbose(env, "bpf verifier is misconfigured\n");
9410 return -EINVAL;
9411 }
9412
Alexei Starovoitov17a52672014-09-26 00:17:06 -07009413 return 0;
9414}
9415
Daniel Borkmann96be4322015-03-01 12:31:46 +01009416static bool may_access_skb(enum bpf_prog_type type)
9417{
9418 switch (type) {
9419 case BPF_PROG_TYPE_SOCKET_FILTER:
9420 case BPF_PROG_TYPE_SCHED_CLS:
Daniel Borkmann94caee8c2015-03-20 15:11:11 +01009421 case BPF_PROG_TYPE_SCHED_ACT:
Daniel Borkmann96be4322015-03-01 12:31:46 +01009422 return true;
9423 default:
9424 return false;
9425 }
9426}
9427
Alexei Starovoitovddd872b2014-12-01 15:06:34 -08009428/* verify safety of LD_ABS|LD_IND instructions:
9429 * - they can only appear in the programs where ctx == skb
9430 * - since they are wrappers of function calls, they scratch R1-R5 registers,
9431 * preserve R6-R9, and store return value into R0
9432 *
9433 * Implicit input:
9434 * ctx == skb == R6 == CTX
9435 *
9436 * Explicit input:
9437 * SRC == any register
9438 * IMM == 32-bit immediate
9439 *
9440 * Output:
9441 * R0 - 8/16/32-bit skb data converted to cpu endianness
9442 */
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +01009443static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
Alexei Starovoitovddd872b2014-12-01 15:06:34 -08009444{
Alexei Starovoitov638f5b92017-10-31 18:16:05 -07009445 struct bpf_reg_state *regs = cur_regs(env);
Daniel Borkmann6d4f1512020-01-06 22:51:57 +01009446 static const int ctx_reg = BPF_REG_6;
Alexei Starovoitovddd872b2014-12-01 15:06:34 -08009447 u8 mode = BPF_MODE(insn->code);
Alexei Starovoitovddd872b2014-12-01 15:06:34 -08009448 int i, err;
9449
Udip Pant7e407812020-08-25 16:20:00 -07009450 if (!may_access_skb(resolve_prog_type(env->prog))) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07009451 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
Alexei Starovoitovddd872b2014-12-01 15:06:34 -08009452 return -EINVAL;
9453 }
9454
Daniel Borkmanne0cea7c2018-05-04 01:08:14 +02009455 if (!env->ops->gen_ld_abs) {
9456 verbose(env, "bpf verifier is misconfigured\n");
9457 return -EINVAL;
9458 }
9459
Alexei Starovoitovddd872b2014-12-01 15:06:34 -08009460 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
Alexei Starovoitovd82bccc2016-04-12 10:26:19 -07009461 BPF_SIZE(insn->code) == BPF_DW ||
Alexei Starovoitovddd872b2014-12-01 15:06:34 -08009462 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07009463 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
Alexei Starovoitovddd872b2014-12-01 15:06:34 -08009464 return -EINVAL;
9465 }
9466
9467 /* check whether implicit source operand (register R6) is readable */
Daniel Borkmann6d4f1512020-01-06 22:51:57 +01009468 err = check_reg_arg(env, ctx_reg, SRC_OP);
Alexei Starovoitovddd872b2014-12-01 15:06:34 -08009469 if (err)
9470 return err;
9471
Joe Stringerfd978bf72018-10-02 13:35:35 -07009472 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
9473 * gen_ld_abs() may terminate the program at runtime, leading to
9474 * reference leak.
9475 */
9476 err = check_reference_leak(env);
9477 if (err) {
9478 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
9479 return err;
9480 }
9481
Alexei Starovoitovd83525c2019-01-31 15:40:04 -08009482 if (env->cur_state->active_spin_lock) {
9483 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
9484 return -EINVAL;
9485 }
9486
Daniel Borkmann6d4f1512020-01-06 22:51:57 +01009487 if (regs[ctx_reg].type != PTR_TO_CTX) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07009488 verbose(env,
9489 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
Alexei Starovoitovddd872b2014-12-01 15:06:34 -08009490 return -EINVAL;
9491 }
9492
9493 if (mode == BPF_IND) {
9494 /* check explicit source operand */
Edward Creedc503a82017-08-15 20:34:35 +01009495 err = check_reg_arg(env, insn->src_reg, SRC_OP);
Alexei Starovoitovddd872b2014-12-01 15:06:34 -08009496 if (err)
9497 return err;
9498 }
9499
Daniel Borkmann6d4f1512020-01-06 22:51:57 +01009500 err = check_ctx_reg(env, &regs[ctx_reg], ctx_reg);
9501 if (err < 0)
9502 return err;
9503
Alexei Starovoitovddd872b2014-12-01 15:06:34 -08009504 /* reset caller saved regs to unreadable */
Edward Creedc503a82017-08-15 20:34:35 +01009505 for (i = 0; i < CALLER_SAVED_REGS; i++) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07009506 mark_reg_not_init(env, regs, caller_saved[i]);
Edward Creedc503a82017-08-15 20:34:35 +01009507 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
9508 }
Alexei Starovoitovddd872b2014-12-01 15:06:34 -08009509
9510 /* mark destination R0 register as readable, since it contains
Edward Creedc503a82017-08-15 20:34:35 +01009511 * the value fetched from the packet.
9512 * Already marked as written above.
Alexei Starovoitovddd872b2014-12-01 15:06:34 -08009513 */
Jakub Kicinski61bd5212017-10-09 10:30:11 -07009514 mark_reg_unknown(env, regs, BPF_REG_0);
Jiong Wang5327ed32019-05-24 23:25:12 +01009515 /* ld_abs load up to 32-bit skb data. */
9516 regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
Alexei Starovoitovddd872b2014-12-01 15:06:34 -08009517 return 0;
9518}
9519
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07009520static int check_return_code(struct bpf_verifier_env *env)
9521{
brakmo5cf1e912019-05-28 16:59:36 -07009522 struct tnum enforce_attach_type_range = tnum_unknown;
Martin KaFai Lau27ae79972020-01-08 16:35:03 -08009523 const struct bpf_prog *prog = env->prog;
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07009524 struct bpf_reg_state *reg;
9525 struct tnum range = tnum_range(0, 1);
Udip Pant7e407812020-08-25 16:20:00 -07009526 enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
Martin KaFai Lau27ae79972020-01-08 16:35:03 -08009527 int err;
Alexei Starovoitovbfc6bb72021-07-14 17:54:14 -07009528 struct bpf_func_state *frame = env->cur_state->frame[0];
9529 const bool is_subprog = frame->subprogno;
Martin KaFai Lau27ae79972020-01-08 16:35:03 -08009530
KP Singh9e4e01d2020-03-29 01:43:52 +01009531 /* LSM and struct_ops func-ptr's return type could be "void" */
Dmitrii Banshchikovf782e2c2020-11-13 17:17:56 +00009532 if (!is_subprog &&
9533 (prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
Udip Pant7e407812020-08-25 16:20:00 -07009534 prog_type == BPF_PROG_TYPE_LSM) &&
Martin KaFai Lau27ae79972020-01-08 16:35:03 -08009535 !prog->aux->attach_func_proto->type)
9536 return 0;
9537
Zhen Lei8fb33b62021-05-25 10:56:59 +08009538 /* eBPF calling convention is such that R0 is used
Martin KaFai Lau27ae79972020-01-08 16:35:03 -08009539 * to return the value from eBPF program.
9540 * Make sure that it's readable at this time
9541 * of bpf_exit, which means that program wrote
9542 * something into it earlier
9543 */
9544 err = check_reg_arg(env, BPF_REG_0, SRC_OP);
9545 if (err)
9546 return err;
9547
9548 if (is_pointer_value(env, BPF_REG_0)) {
9549 verbose(env, "R0 leaks addr as return value\n");
9550 return -EACCES;
9551 }
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07009552
Dmitrii Banshchikovf782e2c2020-11-13 17:17:56 +00009553 reg = cur_regs(env) + BPF_REG_0;
Alexei Starovoitovbfc6bb72021-07-14 17:54:14 -07009554
9555 if (frame->in_async_callback_fn) {
9556 /* enforce return zero from async callbacks like timer */
9557 if (reg->type != SCALAR_VALUE) {
9558 verbose(env, "In async callback the register R0 is not a known value (%s)\n",
9559 reg_type_str[reg->type]);
9560 return -EINVAL;
9561 }
9562
9563 if (!tnum_in(tnum_const(0), reg->var_off)) {
9564 verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
9565 return -EINVAL;
9566 }
9567 return 0;
9568 }
9569
Dmitrii Banshchikovf782e2c2020-11-13 17:17:56 +00009570 if (is_subprog) {
9571 if (reg->type != SCALAR_VALUE) {
9572 verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
9573 reg_type_str[reg->type]);
9574 return -EINVAL;
9575 }
9576 return 0;
9577 }
9578
Udip Pant7e407812020-08-25 16:20:00 -07009579 switch (prog_type) {
Daniel Borkmann983695f2019-06-07 01:48:57 +02009580 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
9581 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
Daniel Borkmann1b66d252020-05-19 00:45:45 +02009582 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
9583 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME ||
9584 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME ||
9585 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
9586 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME)
Daniel Borkmann983695f2019-06-07 01:48:57 +02009587 range = tnum_range(1, 1);
Stanislav Fomichev77241212021-01-27 11:31:39 -08009588 if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND ||
9589 env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND)
9590 range = tnum_range(0, 3);
Gustavo A. R. Silvaed4ed402019-07-11 11:22:33 -05009591 break;
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07009592 case BPF_PROG_TYPE_CGROUP_SKB:
brakmo5cf1e912019-05-28 16:59:36 -07009593 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
9594 range = tnum_range(0, 3);
9595 enforce_attach_type_range = tnum_range(2, 3);
9596 }
Gustavo A. R. Silvaed4ed402019-07-11 11:22:33 -05009597 break;
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07009598 case BPF_PROG_TYPE_CGROUP_SOCK:
9599 case BPF_PROG_TYPE_SOCK_OPS:
Roman Gushchinebc614f2017-11-05 08:15:32 -05009600 case BPF_PROG_TYPE_CGROUP_DEVICE:
Andrey Ignatov7b146ce2019-02-27 12:59:24 -08009601 case BPF_PROG_TYPE_CGROUP_SYSCTL:
Stanislav Fomichev0d01da62019-06-27 13:38:47 -07009602 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07009603 break;
Alexei Starovoitov15ab09b2019-10-28 20:24:26 -07009604 case BPF_PROG_TYPE_RAW_TRACEPOINT:
9605 if (!env->prog->aux->attach_btf_id)
9606 return 0;
9607 range = tnum_const(0);
9608 break;
Yonghong Song15d83c42020-05-09 10:59:00 -07009609 case BPF_PROG_TYPE_TRACING:
Yonghong Songe92888c72020-05-13 22:32:05 -07009610 switch (env->prog->expected_attach_type) {
9611 case BPF_TRACE_FENTRY:
9612 case BPF_TRACE_FEXIT:
9613 range = tnum_const(0);
9614 break;
9615 case BPF_TRACE_RAW_TP:
9616 case BPF_MODIFY_RETURN:
Yonghong Song15d83c42020-05-09 10:59:00 -07009617 return 0;
Daniel Borkmann2ec06162020-05-16 00:39:18 +02009618 case BPF_TRACE_ITER:
9619 break;
Yonghong Songe92888c72020-05-13 22:32:05 -07009620 default:
9621 return -ENOTSUPP;
9622 }
Yonghong Song15d83c42020-05-09 10:59:00 -07009623 break;
Jakub Sitnickie9ddbb72020-07-17 12:35:23 +02009624 case BPF_PROG_TYPE_SK_LOOKUP:
9625 range = tnum_range(SK_DROP, SK_PASS);
9626 break;
Yonghong Songe92888c72020-05-13 22:32:05 -07009627 case BPF_PROG_TYPE_EXT:
9628 /* freplace program can return anything as its return value
9629 * depends on the to-be-replaced kernel func or bpf program.
9630 */
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07009631 default:
9632 return 0;
9633 }
9634
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07009635 if (reg->type != SCALAR_VALUE) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07009636 verbose(env, "At program exit the register R0 is not a known value (%s)\n",
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07009637 reg_type_str[reg->type]);
9638 return -EINVAL;
9639 }
9640
9641 if (!tnum_in(range, reg->var_off)) {
Yonghong Songbc2591d2021-02-26 12:49:22 -08009642 verbose_invalid_scalar(env, reg, &range, "program exit", "R0");
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07009643 return -EINVAL;
9644 }
brakmo5cf1e912019-05-28 16:59:36 -07009645
9646 if (!tnum_is_unknown(enforce_attach_type_range) &&
9647 tnum_in(enforce_attach_type_range, reg->var_off))
9648 env->prog->enforce_expected_attach_type = 1;
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07009649 return 0;
9650}
9651
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009652/* non-recursive DFS pseudo code
9653 * 1 procedure DFS-iterative(G,v):
9654 * 2 label v as discovered
9655 * 3 let S be a stack
9656 * 4 S.push(v)
9657 * 5 while S is not empty
9658 * 6 t <- S.pop()
9659 * 7 if t is what we're looking for:
9660 * 8 return t
9661 * 9 for all edges e in G.adjacentEdges(t) do
9662 * 10 if edge e is already labelled
9663 * 11 continue with the next edge
9664 * 12 w <- G.adjacentVertex(t,e)
9665 * 13 if vertex w is not discovered and not explored
9666 * 14 label e as tree-edge
9667 * 15 label w as discovered
9668 * 16 S.push(w)
9669 * 17 continue at 5
9670 * 18 else if vertex w is discovered
9671 * 19 label e as back-edge
9672 * 20 else
9673 * 21 // vertex w is explored
9674 * 22 label e as forward- or cross-edge
9675 * 23 label t as explored
9676 * 24 S.pop()
9677 *
9678 * convention:
9679 * 0x10 - discovered
9680 * 0x11 - discovered and fall-through edge labelled
9681 * 0x12 - discovered and fall-through and branch edges labelled
9682 * 0x20 - explored
9683 */
9684
9685enum {
9686 DISCOVERED = 0x10,
9687 EXPLORED = 0x20,
9688 FALLTHROUGH = 1,
9689 BRANCH = 2,
9690};
9691
Alexei Starovoitovdc2a4eb2019-05-21 20:17:07 -07009692static u32 state_htab_size(struct bpf_verifier_env *env)
9693{
9694 return env->prog->len;
9695}
9696
Alexei Starovoitov5d839022019-05-21 20:17:05 -07009697static struct bpf_verifier_state_list **explored_state(
9698 struct bpf_verifier_env *env,
9699 int idx)
9700{
Alexei Starovoitovdc2a4eb2019-05-21 20:17:07 -07009701 struct bpf_verifier_state *cur = env->cur_state;
9702 struct bpf_func_state *state = cur->frame[cur->curframe];
9703
9704 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
Alexei Starovoitov5d839022019-05-21 20:17:05 -07009705}
9706
9707static void init_explored_state(struct bpf_verifier_env *env, int idx)
9708{
Alexei Starovoitova8f500a2019-05-21 20:17:06 -07009709 env->insn_aux_data[idx].prune_point = true;
Alexei Starovoitov5d839022019-05-21 20:17:05 -07009710}
Alexei Starovoitovf1bca822014-09-29 18:50:01 -07009711
Wedson Almeida Filho59e2e272020-11-21 01:55:09 +00009712enum {
9713 DONE_EXPLORING = 0,
9714 KEEP_EXPLORING = 1,
9715};
9716
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009717/* t, w, e - match pseudo-code above:
9718 * t - index of current instruction
9719 * w - next instruction
9720 * e - edge
9721 */
Alexei Starovoitov25897262019-06-15 12:12:20 -07009722static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
9723 bool loop_ok)
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009724{
Alexei Starovoitov7df737e2019-04-19 07:44:54 -07009725 int *insn_stack = env->cfg.insn_stack;
9726 int *insn_state = env->cfg.insn_state;
9727
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009728 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
Wedson Almeida Filho59e2e272020-11-21 01:55:09 +00009729 return DONE_EXPLORING;
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009730
9731 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
Wedson Almeida Filho59e2e272020-11-21 01:55:09 +00009732 return DONE_EXPLORING;
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009733
9734 if (w < 0 || w >= env->prog->len) {
Martin KaFai Laud9762e82018-12-13 10:41:48 -08009735 verbose_linfo(env, t, "%d: ", t);
Jakub Kicinski61bd5212017-10-09 10:30:11 -07009736 verbose(env, "jump out of range from insn %d to %d\n", t, w);
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009737 return -EINVAL;
9738 }
9739
Alexei Starovoitovf1bca822014-09-29 18:50:01 -07009740 if (e == BRANCH)
9741 /* mark branch target for state pruning */
Alexei Starovoitov5d839022019-05-21 20:17:05 -07009742 init_explored_state(env, w);
Alexei Starovoitovf1bca822014-09-29 18:50:01 -07009743
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009744 if (insn_state[w] == 0) {
9745 /* tree-edge */
9746 insn_state[t] = DISCOVERED | e;
9747 insn_state[w] = DISCOVERED;
Alexei Starovoitov7df737e2019-04-19 07:44:54 -07009748 if (env->cfg.cur_stack >= env->prog->len)
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009749 return -E2BIG;
Alexei Starovoitov7df737e2019-04-19 07:44:54 -07009750 insn_stack[env->cfg.cur_stack++] = w;
Wedson Almeida Filho59e2e272020-11-21 01:55:09 +00009751 return KEEP_EXPLORING;
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009752 } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
Alexei Starovoitov2c78ee82020-05-13 16:03:54 -07009753 if (loop_ok && env->bpf_capable)
Wedson Almeida Filho59e2e272020-11-21 01:55:09 +00009754 return DONE_EXPLORING;
Martin KaFai Laud9762e82018-12-13 10:41:48 -08009755 verbose_linfo(env, t, "%d: ", t);
9756 verbose_linfo(env, w, "%d: ", w);
Jakub Kicinski61bd5212017-10-09 10:30:11 -07009757 verbose(env, "back-edge from insn %d to %d\n", t, w);
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009758 return -EINVAL;
9759 } else if (insn_state[w] == EXPLORED) {
9760 /* forward- or cross-edge */
9761 insn_state[t] = DISCOVERED | e;
9762 } else {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07009763 verbose(env, "insn state internal bug\n");
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009764 return -EFAULT;
9765 }
Wedson Almeida Filho59e2e272020-11-21 01:55:09 +00009766 return DONE_EXPLORING;
9767}
9768
Yonghong Songefdb22d2021-02-26 12:49:20 -08009769static int visit_func_call_insn(int t, int insn_cnt,
9770 struct bpf_insn *insns,
9771 struct bpf_verifier_env *env,
9772 bool visit_callee)
9773{
9774 int ret;
9775
9776 ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
9777 if (ret)
9778 return ret;
9779
9780 if (t + 1 < insn_cnt)
9781 init_explored_state(env, t + 1);
9782 if (visit_callee) {
9783 init_explored_state(env, t);
Alexei Starovoitov86fc6ee2021-07-14 17:54:13 -07009784 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
9785 /* It's ok to allow recursion from CFG point of
9786 * view. __check_func_call() will do the actual
9787 * check.
9788 */
9789 bpf_pseudo_func(insns + t));
Yonghong Songefdb22d2021-02-26 12:49:20 -08009790 }
9791 return ret;
9792}
9793
Wedson Almeida Filho59e2e272020-11-21 01:55:09 +00009794/* Visits the instruction at index t and returns one of the following:
9795 * < 0 - an error occurred
9796 * DONE_EXPLORING - the instruction was fully explored
9797 * KEEP_EXPLORING - there is still work to be done before it is fully explored
9798 */
9799static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env)
9800{
9801 struct bpf_insn *insns = env->prog->insnsi;
9802 int ret;
9803
Yonghong Song69c087b2021-02-26 12:49:25 -08009804 if (bpf_pseudo_func(insns + t))
9805 return visit_func_call_insn(t, insn_cnt, insns, env, true);
9806
Wedson Almeida Filho59e2e272020-11-21 01:55:09 +00009807 /* All non-branch instructions have a single fall-through edge. */
9808 if (BPF_CLASS(insns[t].code) != BPF_JMP &&
9809 BPF_CLASS(insns[t].code) != BPF_JMP32)
9810 return push_insn(t, t + 1, FALLTHROUGH, env, false);
9811
9812 switch (BPF_OP(insns[t].code)) {
9813 case BPF_EXIT:
9814 return DONE_EXPLORING;
9815
9816 case BPF_CALL:
Alexei Starovoitovbfc6bb72021-07-14 17:54:14 -07009817 if (insns[t].imm == BPF_FUNC_timer_set_callback)
9818 /* Mark this call insn to trigger is_state_visited() check
9819 * before call itself is processed by __check_func_call().
9820 * Otherwise new async state will be pushed for further
9821 * exploration.
9822 */
9823 init_explored_state(env, t);
Yonghong Songefdb22d2021-02-26 12:49:20 -08009824 return visit_func_call_insn(t, insn_cnt, insns, env,
9825 insns[t].src_reg == BPF_PSEUDO_CALL);
Wedson Almeida Filho59e2e272020-11-21 01:55:09 +00009826
9827 case BPF_JA:
9828 if (BPF_SRC(insns[t].code) != BPF_K)
9829 return -EINVAL;
9830
9831 /* unconditional jump with single edge */
9832 ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env,
9833 true);
9834 if (ret)
9835 return ret;
9836
9837 /* unconditional jmp is not a good pruning point,
9838 * but it's marked, since backtracking needs
9839 * to record jmp history in is_state_visited().
9840 */
9841 init_explored_state(env, t + insns[t].off + 1);
9842 /* tell verifier to check for equivalent states
9843 * after every call and jump
9844 */
9845 if (t + 1 < insn_cnt)
9846 init_explored_state(env, t + 1);
9847
9848 return ret;
9849
9850 default:
9851 /* conditional jump with two edges */
9852 init_explored_state(env, t);
9853 ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
9854 if (ret)
9855 return ret;
9856
9857 return push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
9858 }
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009859}
9860
9861/* non-recursive depth-first-search to detect loops in BPF program
9862 * loop == back-edge in directed graph
9863 */
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +01009864static int check_cfg(struct bpf_verifier_env *env)
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009865{
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009866 int insn_cnt = env->prog->len;
Alexei Starovoitov7df737e2019-04-19 07:44:54 -07009867 int *insn_stack, *insn_state;
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009868 int ret = 0;
Wedson Almeida Filho59e2e272020-11-21 01:55:09 +00009869 int i;
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009870
Alexei Starovoitov7df737e2019-04-19 07:44:54 -07009871 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009872 if (!insn_state)
9873 return -ENOMEM;
9874
Alexei Starovoitov7df737e2019-04-19 07:44:54 -07009875 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009876 if (!insn_stack) {
Alexei Starovoitov71dde682019-04-01 21:27:43 -07009877 kvfree(insn_state);
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009878 return -ENOMEM;
9879 }
9880
9881 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
9882 insn_stack[0] = 0; /* 0 is the first instruction */
Alexei Starovoitov7df737e2019-04-19 07:44:54 -07009883 env->cfg.cur_stack = 1;
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009884
Wedson Almeida Filho59e2e272020-11-21 01:55:09 +00009885 while (env->cfg.cur_stack > 0) {
9886 int t = insn_stack[env->cfg.cur_stack - 1];
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009887
Wedson Almeida Filho59e2e272020-11-21 01:55:09 +00009888 ret = visit_insn(t, insn_cnt, env);
9889 switch (ret) {
9890 case DONE_EXPLORING:
9891 insn_state[t] = EXPLORED;
9892 env->cfg.cur_stack--;
9893 break;
9894 case KEEP_EXPLORING:
9895 break;
9896 default:
9897 if (ret > 0) {
9898 verbose(env, "visit_insn internal bug\n");
9899 ret = -EFAULT;
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -08009900 }
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009901 goto err_free;
Wedson Almeida Filho59e2e272020-11-21 01:55:09 +00009902 }
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009903 }
9904
Wedson Almeida Filho59e2e272020-11-21 01:55:09 +00009905 if (env->cfg.cur_stack < 0) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07009906 verbose(env, "pop stack internal bug\n");
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009907 ret = -EFAULT;
9908 goto err_free;
9909 }
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009910
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009911 for (i = 0; i < insn_cnt; i++) {
9912 if (insn_state[i] != EXPLORED) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -07009913 verbose(env, "unreachable insn %d\n", i);
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009914 ret = -EINVAL;
9915 goto err_free;
9916 }
9917 }
9918 ret = 0; /* cfg looks good */
9919
9920err_free:
Alexei Starovoitov71dde682019-04-01 21:27:43 -07009921 kvfree(insn_state);
9922 kvfree(insn_stack);
Alexei Starovoitov7df737e2019-04-19 07:44:54 -07009923 env->cfg.insn_state = env->cfg.insn_stack = NULL;
Alexei Starovoitov475fb782014-09-26 00:17:05 -07009924 return ret;
9925}
9926
Alexei Starovoitov09b28d72020-09-17 19:09:18 -07009927static int check_abnormal_return(struct bpf_verifier_env *env)
9928{
9929 int i;
9930
9931 for (i = 1; i < env->subprog_cnt; i++) {
9932 if (env->subprog_info[i].has_ld_abs) {
9933 verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
9934 return -EINVAL;
9935 }
9936 if (env->subprog_info[i].has_tail_call) {
9937 verbose(env, "tail_call is not allowed in subprogs without BTF\n");
9938 return -EINVAL;
9939 }
9940 }
9941 return 0;
9942}
9943
Yonghong Song838e9692018-11-19 15:29:11 -08009944/* The minimum supported BTF func info size */
9945#define MIN_BPF_FUNCINFO_SIZE 8
9946#define MAX_FUNCINFO_REC_SIZE 252
9947
Martin KaFai Lauc454a462018-12-07 16:42:25 -08009948static int check_btf_func(struct bpf_verifier_env *env,
9949 const union bpf_attr *attr,
Alexei Starovoitovaf2ac3e2021-05-13 17:36:05 -07009950 bpfptr_t uattr)
Yonghong Song838e9692018-11-19 15:29:11 -08009951{
Alexei Starovoitov09b28d72020-09-17 19:09:18 -07009952 const struct btf_type *type, *func_proto, *ret_type;
Peter Oskolkovd0b28182019-01-16 10:43:01 -08009953 u32 i, nfuncs, urec_size, min_size;
Yonghong Song838e9692018-11-19 15:29:11 -08009954 u32 krec_size = sizeof(struct bpf_func_info);
Martin KaFai Lauc454a462018-12-07 16:42:25 -08009955 struct bpf_func_info *krecord;
Alexei Starovoitov8c1b6e62019-11-14 10:57:16 -08009956 struct bpf_func_info_aux *info_aux = NULL;
Martin KaFai Lauc454a462018-12-07 16:42:25 -08009957 struct bpf_prog *prog;
9958 const struct btf *btf;
Alexei Starovoitovaf2ac3e2021-05-13 17:36:05 -07009959 bpfptr_t urecord;
Peter Oskolkovd0b28182019-01-16 10:43:01 -08009960 u32 prev_offset = 0;
Alexei Starovoitov09b28d72020-09-17 19:09:18 -07009961 bool scalar_return;
Dan Carpentere7ed83d2020-06-04 11:54:36 +03009962 int ret = -ENOMEM;
Yonghong Song838e9692018-11-19 15:29:11 -08009963
9964 nfuncs = attr->func_info_cnt;
Alexei Starovoitov09b28d72020-09-17 19:09:18 -07009965 if (!nfuncs) {
9966 if (check_abnormal_return(env))
9967 return -EINVAL;
Yonghong Song838e9692018-11-19 15:29:11 -08009968 return 0;
Alexei Starovoitov09b28d72020-09-17 19:09:18 -07009969 }
Yonghong Song838e9692018-11-19 15:29:11 -08009970
9971 if (nfuncs != env->subprog_cnt) {
9972 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
9973 return -EINVAL;
9974 }
9975
9976 urec_size = attr->func_info_rec_size;
9977 if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
9978 urec_size > MAX_FUNCINFO_REC_SIZE ||
9979 urec_size % sizeof(u32)) {
9980 verbose(env, "invalid func info rec size %u\n", urec_size);
9981 return -EINVAL;
9982 }
9983
Martin KaFai Lauc454a462018-12-07 16:42:25 -08009984 prog = env->prog;
9985 btf = prog->aux->btf;
Yonghong Song838e9692018-11-19 15:29:11 -08009986
Alexei Starovoitovaf2ac3e2021-05-13 17:36:05 -07009987 urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
Yonghong Song838e9692018-11-19 15:29:11 -08009988 min_size = min_t(u32, krec_size, urec_size);
9989
Yonghong Songba64e7d2018-11-24 23:20:44 -08009990 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
Martin KaFai Lauc454a462018-12-07 16:42:25 -08009991 if (!krecord)
9992 return -ENOMEM;
Alexei Starovoitov8c1b6e62019-11-14 10:57:16 -08009993 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
9994 if (!info_aux)
9995 goto err_free;
Yonghong Songba64e7d2018-11-24 23:20:44 -08009996
Yonghong Song838e9692018-11-19 15:29:11 -08009997 for (i = 0; i < nfuncs; i++) {
9998 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
9999 if (ret) {
10000 if (ret == -E2BIG) {
10001 verbose(env, "nonzero tailing record in func info");
10002 /* set the size kernel expects so loader can zero
10003 * out the rest of the record.
10004 */
Alexei Starovoitovaf2ac3e2021-05-13 17:36:05 -070010005 if (copy_to_bpfptr_offset(uattr,
10006 offsetof(union bpf_attr, func_info_rec_size),
10007 &min_size, sizeof(min_size)))
Yonghong Song838e9692018-11-19 15:29:11 -080010008 ret = -EFAULT;
10009 }
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010010 goto err_free;
Yonghong Song838e9692018-11-19 15:29:11 -080010011 }
10012
Alexei Starovoitovaf2ac3e2021-05-13 17:36:05 -070010013 if (copy_from_bpfptr(&krecord[i], urecord, min_size)) {
Yonghong Song838e9692018-11-19 15:29:11 -080010014 ret = -EFAULT;
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010015 goto err_free;
Yonghong Song838e9692018-11-19 15:29:11 -080010016 }
10017
Martin KaFai Laud30d42e2018-12-05 17:35:44 -080010018 /* check insn_off */
Alexei Starovoitov09b28d72020-09-17 19:09:18 -070010019 ret = -EINVAL;
Yonghong Song838e9692018-11-19 15:29:11 -080010020 if (i == 0) {
Martin KaFai Laud30d42e2018-12-05 17:35:44 -080010021 if (krecord[i].insn_off) {
Yonghong Song838e9692018-11-19 15:29:11 -080010022 verbose(env,
Martin KaFai Laud30d42e2018-12-05 17:35:44 -080010023 "nonzero insn_off %u for the first func info record",
10024 krecord[i].insn_off);
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010025 goto err_free;
Yonghong Song838e9692018-11-19 15:29:11 -080010026 }
Martin KaFai Laud30d42e2018-12-05 17:35:44 -080010027 } else if (krecord[i].insn_off <= prev_offset) {
Yonghong Song838e9692018-11-19 15:29:11 -080010028 verbose(env,
10029 "same or smaller insn offset (%u) than previous func info record (%u)",
Martin KaFai Laud30d42e2018-12-05 17:35:44 -080010030 krecord[i].insn_off, prev_offset);
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010031 goto err_free;
Yonghong Song838e9692018-11-19 15:29:11 -080010032 }
10033
Martin KaFai Laud30d42e2018-12-05 17:35:44 -080010034 if (env->subprog_info[i].start != krecord[i].insn_off) {
Yonghong Song838e9692018-11-19 15:29:11 -080010035 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010036 goto err_free;
Yonghong Song838e9692018-11-19 15:29:11 -080010037 }
10038
10039 /* check type_id */
Yonghong Songba64e7d2018-11-24 23:20:44 -080010040 type = btf_type_by_id(btf, krecord[i].type_id);
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -080010041 if (!type || !btf_type_is_func(type)) {
Yonghong Song838e9692018-11-19 15:29:11 -080010042 verbose(env, "invalid type id %d in func info",
Yonghong Songba64e7d2018-11-24 23:20:44 -080010043 krecord[i].type_id);
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010044 goto err_free;
Yonghong Song838e9692018-11-19 15:29:11 -080010045 }
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -080010046 info_aux[i].linkage = BTF_INFO_VLEN(type->info);
Alexei Starovoitov09b28d72020-09-17 19:09:18 -070010047
10048 func_proto = btf_type_by_id(btf, type->type);
10049 if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
10050 /* btf_func_check() already verified it during BTF load */
10051 goto err_free;
10052 ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
10053 scalar_return =
10054 btf_type_is_small_int(ret_type) || btf_type_is_enum(ret_type);
10055 if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
10056 verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
10057 goto err_free;
10058 }
10059 if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
10060 verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
10061 goto err_free;
10062 }
10063
Martin KaFai Laud30d42e2018-12-05 17:35:44 -080010064 prev_offset = krecord[i].insn_off;
Alexei Starovoitovaf2ac3e2021-05-13 17:36:05 -070010065 bpfptr_add(&urecord, urec_size);
Yonghong Song838e9692018-11-19 15:29:11 -080010066 }
10067
Yonghong Songba64e7d2018-11-24 23:20:44 -080010068 prog->aux->func_info = krecord;
10069 prog->aux->func_info_cnt = nfuncs;
Alexei Starovoitov8c1b6e62019-11-14 10:57:16 -080010070 prog->aux->func_info_aux = info_aux;
Yonghong Song838e9692018-11-19 15:29:11 -080010071 return 0;
10072
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010073err_free:
Yonghong Songba64e7d2018-11-24 23:20:44 -080010074 kvfree(krecord);
Alexei Starovoitov8c1b6e62019-11-14 10:57:16 -080010075 kfree(info_aux);
Yonghong Song838e9692018-11-19 15:29:11 -080010076 return ret;
10077}
10078
Yonghong Songba64e7d2018-11-24 23:20:44 -080010079static void adjust_btf_func(struct bpf_verifier_env *env)
10080{
Alexei Starovoitov8c1b6e62019-11-14 10:57:16 -080010081 struct bpf_prog_aux *aux = env->prog->aux;
Yonghong Songba64e7d2018-11-24 23:20:44 -080010082 int i;
10083
Alexei Starovoitov8c1b6e62019-11-14 10:57:16 -080010084 if (!aux->func_info)
Yonghong Songba64e7d2018-11-24 23:20:44 -080010085 return;
10086
10087 for (i = 0; i < env->subprog_cnt; i++)
Alexei Starovoitov8c1b6e62019-11-14 10:57:16 -080010088 aux->func_info[i].insn_off = env->subprog_info[i].start;
Yonghong Songba64e7d2018-11-24 23:20:44 -080010089}
10090
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010091#define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \
10092 sizeof(((struct bpf_line_info *)(0))->line_col))
10093#define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
10094
10095static int check_btf_line(struct bpf_verifier_env *env,
10096 const union bpf_attr *attr,
Alexei Starovoitovaf2ac3e2021-05-13 17:36:05 -070010097 bpfptr_t uattr)
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010098{
10099 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
10100 struct bpf_subprog_info *sub;
10101 struct bpf_line_info *linfo;
10102 struct bpf_prog *prog;
10103 const struct btf *btf;
Alexei Starovoitovaf2ac3e2021-05-13 17:36:05 -070010104 bpfptr_t ulinfo;
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010105 int err;
10106
10107 nr_linfo = attr->line_info_cnt;
10108 if (!nr_linfo)
10109 return 0;
Bixuan Cui0e6491b2021-09-11 08:55:57 +080010110 if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
10111 return -EINVAL;
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010112
10113 rec_size = attr->line_info_rec_size;
10114 if (rec_size < MIN_BPF_LINEINFO_SIZE ||
10115 rec_size > MAX_LINEINFO_REC_SIZE ||
10116 rec_size & (sizeof(u32) - 1))
10117 return -EINVAL;
10118
10119 /* Need to zero it in case the userspace may
10120 * pass in a smaller bpf_line_info object.
10121 */
10122 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
10123 GFP_KERNEL | __GFP_NOWARN);
10124 if (!linfo)
10125 return -ENOMEM;
10126
10127 prog = env->prog;
10128 btf = prog->aux->btf;
10129
10130 s = 0;
10131 sub = env->subprog_info;
Alexei Starovoitovaf2ac3e2021-05-13 17:36:05 -070010132 ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel);
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010133 expected_size = sizeof(struct bpf_line_info);
10134 ncopy = min_t(u32, expected_size, rec_size);
10135 for (i = 0; i < nr_linfo; i++) {
10136 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
10137 if (err) {
10138 if (err == -E2BIG) {
10139 verbose(env, "nonzero tailing record in line_info");
Alexei Starovoitovaf2ac3e2021-05-13 17:36:05 -070010140 if (copy_to_bpfptr_offset(uattr,
10141 offsetof(union bpf_attr, line_info_rec_size),
10142 &expected_size, sizeof(expected_size)))
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010143 err = -EFAULT;
10144 }
10145 goto err_free;
10146 }
10147
Alexei Starovoitovaf2ac3e2021-05-13 17:36:05 -070010148 if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) {
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010149 err = -EFAULT;
10150 goto err_free;
10151 }
10152
10153 /*
10154 * Check insn_off to ensure
10155 * 1) strictly increasing AND
10156 * 2) bounded by prog->len
10157 *
10158 * The linfo[0].insn_off == 0 check logically falls into
10159 * the later "missing bpf_line_info for func..." case
10160 * because the first linfo[0].insn_off must be the
10161 * first sub also and the first sub must have
10162 * subprog_info[0].start == 0.
10163 */
10164 if ((i && linfo[i].insn_off <= prev_offset) ||
10165 linfo[i].insn_off >= prog->len) {
10166 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
10167 i, linfo[i].insn_off, prev_offset,
10168 prog->len);
10169 err = -EINVAL;
10170 goto err_free;
10171 }
10172
Martin KaFai Laufdbaa0b2018-12-19 13:01:01 -080010173 if (!prog->insnsi[linfo[i].insn_off].code) {
10174 verbose(env,
10175 "Invalid insn code at line_info[%u].insn_off\n",
10176 i);
10177 err = -EINVAL;
10178 goto err_free;
10179 }
10180
Martin KaFai Lau23127b32018-12-13 10:41:46 -080010181 if (!btf_name_by_offset(btf, linfo[i].line_off) ||
10182 !btf_name_by_offset(btf, linfo[i].file_name_off)) {
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010183 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
10184 err = -EINVAL;
10185 goto err_free;
10186 }
10187
10188 if (s != env->subprog_cnt) {
10189 if (linfo[i].insn_off == sub[s].start) {
10190 sub[s].linfo_idx = i;
10191 s++;
10192 } else if (sub[s].start < linfo[i].insn_off) {
10193 verbose(env, "missing bpf_line_info for func#%u\n", s);
10194 err = -EINVAL;
10195 goto err_free;
10196 }
10197 }
10198
10199 prev_offset = linfo[i].insn_off;
Alexei Starovoitovaf2ac3e2021-05-13 17:36:05 -070010200 bpfptr_add(&ulinfo, rec_size);
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010201 }
10202
10203 if (s != env->subprog_cnt) {
10204 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
10205 env->subprog_cnt - s, s);
10206 err = -EINVAL;
10207 goto err_free;
10208 }
10209
10210 prog->aux->linfo = linfo;
10211 prog->aux->nr_linfo = nr_linfo;
10212
10213 return 0;
10214
10215err_free:
10216 kvfree(linfo);
10217 return err;
10218}
10219
10220static int check_btf_info(struct bpf_verifier_env *env,
10221 const union bpf_attr *attr,
Alexei Starovoitovaf2ac3e2021-05-13 17:36:05 -070010222 bpfptr_t uattr)
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010223{
10224 struct btf *btf;
10225 int err;
10226
Alexei Starovoitov09b28d72020-09-17 19:09:18 -070010227 if (!attr->func_info_cnt && !attr->line_info_cnt) {
10228 if (check_abnormal_return(env))
10229 return -EINVAL;
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010230 return 0;
Alexei Starovoitov09b28d72020-09-17 19:09:18 -070010231 }
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010232
10233 btf = btf_get_by_fd(attr->prog_btf_fd);
10234 if (IS_ERR(btf))
10235 return PTR_ERR(btf);
Alexei Starovoitov350a5c42021-03-07 14:52:48 -080010236 if (btf_is_kernel(btf)) {
10237 btf_put(btf);
10238 return -EACCES;
10239 }
Martin KaFai Lauc454a462018-12-07 16:42:25 -080010240 env->prog->aux->btf = btf;
10241
10242 err = check_btf_func(env, attr, uattr);
10243 if (err)
10244 return err;
10245
10246 err = check_btf_line(env, attr, uattr);
10247 if (err)
10248 return err;
10249
10250 return 0;
Alexei Starovoitov0246e642014-09-26 00:17:04 -070010251}
10252
Edward Creef1174f72017-08-07 15:26:19 +010010253/* check %cur's range satisfies %old's */
10254static bool range_within(struct bpf_reg_state *old,
10255 struct bpf_reg_state *cur)
Alexei Starovoitov969bf052016-05-05 19:49:10 -070010256{
Edward Creeb03c9f92017-08-07 15:26:36 +010010257 return old->umin_value <= cur->umin_value &&
10258 old->umax_value >= cur->umax_value &&
10259 old->smin_value <= cur->smin_value &&
Daniel Borkmannfd675182021-02-05 20:48:21 +010010260 old->smax_value >= cur->smax_value &&
10261 old->u32_min_value <= cur->u32_min_value &&
10262 old->u32_max_value >= cur->u32_max_value &&
10263 old->s32_min_value <= cur->s32_min_value &&
10264 old->s32_max_value >= cur->s32_max_value;
Edward Creef1174f72017-08-07 15:26:19 +010010265}
10266
Edward Creef1174f72017-08-07 15:26:19 +010010267/* If in the old state two registers had the same id, then they need to have
10268 * the same id in the new state as well. But that id could be different from
10269 * the old state, so we need to track the mapping from old to new ids.
10270 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
10271 * regs with old id 5 must also have new id 9 for the new state to be safe. But
10272 * regs with a different old id could still have new id 9, we don't care about
10273 * that.
10274 * So we look through our idmap to see if this old id has been seen before. If
10275 * so, we require the new id to match; otherwise, we add the id pair to the map.
10276 */
Lorenz Bauerc9e73e32021-04-29 14:46:56 +010010277static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
Edward Creef1174f72017-08-07 15:26:19 +010010278{
10279 unsigned int i;
10280
Lorenz Bauerc9e73e32021-04-29 14:46:56 +010010281 for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
Edward Creef1174f72017-08-07 15:26:19 +010010282 if (!idmap[i].old) {
10283 /* Reached an empty slot; haven't seen this id before */
10284 idmap[i].old = old_id;
10285 idmap[i].cur = cur_id;
10286 return true;
10287 }
10288 if (idmap[i].old == old_id)
10289 return idmap[i].cur == cur_id;
10290 }
10291 /* We ran out of idmap slots, which should be impossible */
10292 WARN_ON_ONCE(1);
10293 return false;
10294}
10295
Alexei Starovoitov9242b5f2018-12-13 11:42:34 -080010296static void clean_func_state(struct bpf_verifier_env *env,
10297 struct bpf_func_state *st)
10298{
10299 enum bpf_reg_liveness live;
10300 int i, j;
10301
10302 for (i = 0; i < BPF_REG_FP; i++) {
10303 live = st->regs[i].live;
10304 /* liveness must not touch this register anymore */
10305 st->regs[i].live |= REG_LIVE_DONE;
10306 if (!(live & REG_LIVE_READ))
10307 /* since the register is unused, clear its state
10308 * to make further comparison simpler
10309 */
Daniel Borkmannf54c7892019-12-22 23:37:40 +010010310 __mark_reg_not_init(env, &st->regs[i]);
Alexei Starovoitov9242b5f2018-12-13 11:42:34 -080010311 }
10312
10313 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
10314 live = st->stack[i].spilled_ptr.live;
10315 /* liveness must not touch this stack slot anymore */
10316 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
10317 if (!(live & REG_LIVE_READ)) {
Daniel Borkmannf54c7892019-12-22 23:37:40 +010010318 __mark_reg_not_init(env, &st->stack[i].spilled_ptr);
Alexei Starovoitov9242b5f2018-12-13 11:42:34 -080010319 for (j = 0; j < BPF_REG_SIZE; j++)
10320 st->stack[i].slot_type[j] = STACK_INVALID;
10321 }
10322 }
10323}
10324
10325static void clean_verifier_state(struct bpf_verifier_env *env,
10326 struct bpf_verifier_state *st)
10327{
10328 int i;
10329
10330 if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
10331 /* all regs in this state in all frames were already marked */
10332 return;
10333
10334 for (i = 0; i <= st->curframe; i++)
10335 clean_func_state(env, st->frame[i]);
10336}
10337
10338/* the parentage chains form a tree.
10339 * the verifier states are added to state lists at given insn and
10340 * pushed into state stack for future exploration.
10341 * when the verifier reaches bpf_exit insn some of the verifer states
10342 * stored in the state lists have their final liveness state already,
10343 * but a lot of states will get revised from liveness point of view when
10344 * the verifier explores other branches.
10345 * Example:
10346 * 1: r0 = 1
10347 * 2: if r1 == 100 goto pc+1
10348 * 3: r0 = 2
10349 * 4: exit
10350 * when the verifier reaches exit insn the register r0 in the state list of
10351 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
10352 * of insn 2 and goes exploring further. At the insn 4 it will walk the
10353 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
10354 *
10355 * Since the verifier pushes the branch states as it sees them while exploring
10356 * the program the condition of walking the branch instruction for the second
10357 * time means that all states below this branch were already explored and
Zhen Lei8fb33b62021-05-25 10:56:59 +080010358 * their final liveness marks are already propagated.
Alexei Starovoitov9242b5f2018-12-13 11:42:34 -080010359 * Hence when the verifier completes the search of state list in is_state_visited()
10360 * we can call this clean_live_states() function to mark all liveness states
10361 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
10362 * will not be used.
10363 * This function also clears the registers and stack for states that !READ
10364 * to simplify state merging.
10365 *
10366 * Important note here that walking the same branch instruction in the callee
10367 * doesn't meant that the states are DONE. The verifier has to compare
10368 * the callsites
10369 */
10370static void clean_live_states(struct bpf_verifier_env *env, int insn,
10371 struct bpf_verifier_state *cur)
10372{
10373 struct bpf_verifier_state_list *sl;
10374 int i;
10375
Alexei Starovoitov5d839022019-05-21 20:17:05 -070010376 sl = *explored_state(env, insn);
Alexei Starovoitova8f500a2019-05-21 20:17:06 -070010377 while (sl) {
Alexei Starovoitov25897262019-06-15 12:12:20 -070010378 if (sl->state.branches)
10379 goto next;
Alexei Starovoitovdc2a4eb2019-05-21 20:17:07 -070010380 if (sl->state.insn_idx != insn ||
10381 sl->state.curframe != cur->curframe)
Alexei Starovoitov9242b5f2018-12-13 11:42:34 -080010382 goto next;
10383 for (i = 0; i <= cur->curframe; i++)
10384 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
10385 goto next;
10386 clean_verifier_state(env, &sl->state);
10387next:
10388 sl = sl->next;
10389 }
10390}
10391
Edward Creef1174f72017-08-07 15:26:19 +010010392/* Returns true if (rold safe implies rcur safe) */
Daniel Borkmanne042aa52021-07-16 09:18:21 +000010393static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
10394 struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
Edward Creef1174f72017-08-07 15:26:19 +010010395{
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080010396 bool equal;
10397
Edward Creedc503a82017-08-15 20:34:35 +010010398 if (!(rold->live & REG_LIVE_READ))
10399 /* explored state didn't use this */
10400 return true;
10401
Edward Cree679c7822018-08-22 20:02:19 +010010402 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080010403
10404 if (rold->type == PTR_TO_STACK)
10405 /* two stack pointers are equal only if they're pointing to
10406 * the same stack frame, since fp-8 in foo != fp-8 in bar
10407 */
10408 return equal && rold->frameno == rcur->frameno;
10409
10410 if (equal)
Edward Creef1174f72017-08-07 15:26:19 +010010411 return true;
10412
10413 if (rold->type == NOT_INIT)
10414 /* explored state can't have used this */
10415 return true;
10416 if (rcur->type == NOT_INIT)
Alexei Starovoitov969bf052016-05-05 19:49:10 -070010417 return false;
Edward Creef1174f72017-08-07 15:26:19 +010010418 switch (rold->type) {
10419 case SCALAR_VALUE:
Daniel Borkmanne042aa52021-07-16 09:18:21 +000010420 if (env->explore_alu_limits)
10421 return false;
Edward Creef1174f72017-08-07 15:26:19 +010010422 if (rcur->type == SCALAR_VALUE) {
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -070010423 if (!rold->precise && !rcur->precise)
10424 return true;
Edward Creef1174f72017-08-07 15:26:19 +010010425 /* new val must satisfy old val knowledge */
10426 return range_within(rold, rcur) &&
10427 tnum_in(rold->var_off, rcur->var_off);
10428 } else {
Jann Horn179d1c52017-12-18 20:11:59 -080010429 /* We're trying to use a pointer in place of a scalar.
10430 * Even if the scalar was unbounded, this could lead to
10431 * pointer leaks because scalars are allowed to leak
10432 * while pointers are not. We could make this safe in
10433 * special cases if root is calling us, but it's
10434 * probably not worth the hassle.
Edward Creef1174f72017-08-07 15:26:19 +010010435 */
Jann Horn179d1c52017-12-18 20:11:59 -080010436 return false;
Edward Creef1174f72017-08-07 15:26:19 +010010437 }
Yonghong Song69c087b2021-02-26 12:49:25 -080010438 case PTR_TO_MAP_KEY:
Edward Creef1174f72017-08-07 15:26:19 +010010439 case PTR_TO_MAP_VALUE:
Edward Cree1b688a12017-08-23 15:10:50 +010010440 /* If the new min/max/var_off satisfy the old ones and
10441 * everything else matches, we are OK.
Alexei Starovoitovd83525c2019-01-31 15:40:04 -080010442 * 'id' is not compared, since it's only used for maps with
10443 * bpf_spin_lock inside map element and in such cases if
10444 * the rest of the prog is valid for one map element then
10445 * it's valid for all map elements regardless of the key
10446 * used in bpf_map_lookup()
Edward Cree1b688a12017-08-23 15:10:50 +010010447 */
10448 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
10449 range_within(rold, rcur) &&
10450 tnum_in(rold->var_off, rcur->var_off);
Edward Creef1174f72017-08-07 15:26:19 +010010451 case PTR_TO_MAP_VALUE_OR_NULL:
10452 /* a PTR_TO_MAP_VALUE could be safe to use as a
10453 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
10454 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
10455 * checked, doing so could have affected others with the same
10456 * id, and we can't check for that because we lost the id when
10457 * we converted to a PTR_TO_MAP_VALUE.
10458 */
10459 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
10460 return false;
10461 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
10462 return false;
10463 /* Check our ids match any regs they're supposed to */
10464 return check_ids(rold->id, rcur->id, idmap);
Daniel Borkmannde8f3a82017-09-25 02:25:51 +020010465 case PTR_TO_PACKET_META:
Edward Creef1174f72017-08-07 15:26:19 +010010466 case PTR_TO_PACKET:
Daniel Borkmannde8f3a82017-09-25 02:25:51 +020010467 if (rcur->type != rold->type)
Edward Creef1174f72017-08-07 15:26:19 +010010468 return false;
10469 /* We must have at least as much range as the old ptr
10470 * did, so that any accesses which were safe before are
10471 * still safe. This is true even if old range < old off,
10472 * since someone could have accessed through (ptr - k), or
10473 * even done ptr -= k in a register, to get a safe access.
10474 */
10475 if (rold->range > rcur->range)
10476 return false;
10477 /* If the offsets don't match, we can't trust our alignment;
10478 * nor can we be sure that we won't fall out of range.
10479 */
10480 if (rold->off != rcur->off)
10481 return false;
10482 /* id relations must be preserved */
10483 if (rold->id && !check_ids(rold->id, rcur->id, idmap))
10484 return false;
10485 /* new val must satisfy old val knowledge */
10486 return range_within(rold, rcur) &&
10487 tnum_in(rold->var_off, rcur->var_off);
10488 case PTR_TO_CTX:
10489 case CONST_PTR_TO_MAP:
Edward Creef1174f72017-08-07 15:26:19 +010010490 case PTR_TO_PACKET_END:
Petar Penkovd58e4682018-09-14 07:46:18 -070010491 case PTR_TO_FLOW_KEYS:
Joe Stringerc64b7982018-10-02 13:35:33 -070010492 case PTR_TO_SOCKET:
10493 case PTR_TO_SOCKET_OR_NULL:
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -080010494 case PTR_TO_SOCK_COMMON:
10495 case PTR_TO_SOCK_COMMON_OR_NULL:
Martin KaFai Lau655a51e2019-02-09 23:22:24 -080010496 case PTR_TO_TCP_SOCK:
10497 case PTR_TO_TCP_SOCK_OR_NULL:
Jonathan Lemonfada7fd2019-06-06 13:59:40 -070010498 case PTR_TO_XDP_SOCK:
Edward Creef1174f72017-08-07 15:26:19 +010010499 /* Only valid matches are exact, which memcmp() above
10500 * would have accepted
10501 */
10502 default:
10503 /* Don't know what's going on, just say it's not safe */
10504 return false;
10505 }
Alexei Starovoitov969bf052016-05-05 19:49:10 -070010506
Edward Creef1174f72017-08-07 15:26:19 +010010507 /* Shouldn't get here; if we do, say it's not safe */
10508 WARN_ON_ONCE(1);
Alexei Starovoitov969bf052016-05-05 19:49:10 -070010509 return false;
10510}
10511
Daniel Borkmanne042aa52021-07-16 09:18:21 +000010512static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
10513 struct bpf_func_state *cur, struct bpf_id_pair *idmap)
Alexei Starovoitov638f5b92017-10-31 18:16:05 -070010514{
10515 int i, spi;
10516
Alexei Starovoitov638f5b92017-10-31 18:16:05 -070010517 /* walk slots of the explored stack and ignore any additional
10518 * slots in the current stack, since explored(safe) state
10519 * didn't use them
10520 */
10521 for (i = 0; i < old->allocated_stack; i++) {
10522 spi = i / BPF_REG_SIZE;
10523
Alexei Starovoitovb2339202018-12-13 11:42:31 -080010524 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
10525 i += BPF_REG_SIZE - 1;
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -080010526 /* explored state didn't use this */
Gianluca Borellofd05e572017-12-23 10:09:55 +000010527 continue;
Alexei Starovoitovb2339202018-12-13 11:42:31 -080010528 }
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -080010529
Alexei Starovoitov638f5b92017-10-31 18:16:05 -070010530 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
10531 continue;
Alexei Starovoitov19e2dbb2018-12-13 11:42:33 -080010532
10533 /* explored stack has more populated slots than current stack
10534 * and these slots were used
10535 */
10536 if (i >= cur->allocated_stack)
10537 return false;
10538
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -080010539 /* if old state was safe with misc data in the stack
10540 * it will be safe with zero-initialized stack.
10541 * The opposite is not true
10542 */
10543 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
10544 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
10545 continue;
Alexei Starovoitov638f5b92017-10-31 18:16:05 -070010546 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
10547 cur->stack[spi].slot_type[i % BPF_REG_SIZE])
10548 /* Ex: old explored (safe) state has STACK_SPILL in
Randy Dunlapb8c1a302020-08-06 20:31:41 -070010549 * this stack slot, but current has STACK_MISC ->
Alexei Starovoitov638f5b92017-10-31 18:16:05 -070010550 * this verifier states are not equivalent,
10551 * return false to continue verification of this path
10552 */
10553 return false;
Martin KaFai Lau27113c52021-09-21 17:49:34 -070010554 if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
Alexei Starovoitov638f5b92017-10-31 18:16:05 -070010555 continue;
Martin KaFai Lau27113c52021-09-21 17:49:34 -070010556 if (!is_spilled_reg(&old->stack[spi]))
Alexei Starovoitov638f5b92017-10-31 18:16:05 -070010557 continue;
Daniel Borkmanne042aa52021-07-16 09:18:21 +000010558 if (!regsafe(env, &old->stack[spi].spilled_ptr,
10559 &cur->stack[spi].spilled_ptr, idmap))
Alexei Starovoitov638f5b92017-10-31 18:16:05 -070010560 /* when explored and current stack slot are both storing
10561 * spilled registers, check that stored pointers types
10562 * are the same as well.
10563 * Ex: explored safe path could have stored
10564 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
10565 * but current path has stored:
10566 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
10567 * such verifier states are not equivalent.
10568 * return false to continue verification of this path
10569 */
10570 return false;
10571 }
10572 return true;
10573}
10574
Joe Stringerfd978bf72018-10-02 13:35:35 -070010575static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
10576{
10577 if (old->acquired_refs != cur->acquired_refs)
10578 return false;
10579 return !memcmp(old->refs, cur->refs,
10580 sizeof(*old->refs) * old->acquired_refs);
10581}
10582
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070010583/* compare two verifier states
10584 *
10585 * all states stored in state_list are known to be valid, since
10586 * verifier reached 'bpf_exit' instruction through them
10587 *
10588 * this function is called when verifier exploring different branches of
10589 * execution popped from the state stack. If it sees an old state that has
10590 * more strict register state and more strict stack state then this execution
10591 * branch doesn't need to be explored further, since verifier already
10592 * concluded that more strict state leads to valid finish.
10593 *
10594 * Therefore two states are equivalent if register state is more conservative
10595 * and explored stack state is more conservative than the current one.
10596 * Example:
10597 * explored current
10598 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
10599 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
10600 *
10601 * In other words if current stack state (one being explored) has more
10602 * valid slots than old one that already passed validation, it means
10603 * the verifier can stop exploring and conclude that current state is valid too
10604 *
10605 * Similarly with registers. If explored state has register type as invalid
10606 * whereas register type in current state is meaningful, it means that
10607 * the current state will reach 'bpf_exit' instruction safely
10608 */
Lorenz Bauerc9e73e32021-04-29 14:46:56 +010010609static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080010610 struct bpf_func_state *cur)
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070010611{
10612 int i;
10613
Lorenz Bauerc9e73e32021-04-29 14:46:56 +010010614 memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
10615 for (i = 0; i < MAX_BPF_REG; i++)
Daniel Borkmanne042aa52021-07-16 09:18:21 +000010616 if (!regsafe(env, &old->regs[i], &cur->regs[i],
10617 env->idmap_scratch))
Lorenz Bauerc9e73e32021-04-29 14:46:56 +010010618 return false;
10619
Daniel Borkmanne042aa52021-07-16 09:18:21 +000010620 if (!stacksafe(env, old, cur, env->idmap_scratch))
Alexei Starovoitov1a0dc1a2016-05-05 19:49:09 -070010621 return false;
Edward Creef1174f72017-08-07 15:26:19 +010010622
Joe Stringerfd978bf72018-10-02 13:35:35 -070010623 if (!refsafe(old, cur))
Lorenz Bauerc9e73e32021-04-29 14:46:56 +010010624 return false;
10625
10626 return true;
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070010627}
10628
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080010629static bool states_equal(struct bpf_verifier_env *env,
10630 struct bpf_verifier_state *old,
10631 struct bpf_verifier_state *cur)
Edward Creedc503a82017-08-15 20:34:35 +010010632{
Edward Creedc503a82017-08-15 20:34:35 +010010633 int i;
10634
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080010635 if (old->curframe != cur->curframe)
10636 return false;
10637
Daniel Borkmann979d63d2019-01-03 00:58:34 +010010638 /* Verification state from speculative execution simulation
10639 * must never prune a non-speculative execution one.
10640 */
10641 if (old->speculative && !cur->speculative)
10642 return false;
10643
Alexei Starovoitovd83525c2019-01-31 15:40:04 -080010644 if (old->active_spin_lock != cur->active_spin_lock)
10645 return false;
10646
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080010647 /* for states to be equal callsites have to be the same
10648 * and all frame states need to be equivalent
10649 */
10650 for (i = 0; i <= old->curframe; i++) {
10651 if (old->frame[i]->callsite != cur->frame[i]->callsite)
10652 return false;
Lorenz Bauerc9e73e32021-04-29 14:46:56 +010010653 if (!func_states_equal(env, old->frame[i], cur->frame[i]))
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080010654 return false;
10655 }
10656 return true;
10657}
10658
Jiong Wang5327ed32019-05-24 23:25:12 +010010659/* Return 0 if no propagation happened. Return negative error code if error
10660 * happened. Otherwise, return the propagated bit.
10661 */
Jiong Wang55e7f3b2019-04-12 22:59:36 +010010662static int propagate_liveness_reg(struct bpf_verifier_env *env,
10663 struct bpf_reg_state *reg,
10664 struct bpf_reg_state *parent_reg)
10665{
Jiong Wang5327ed32019-05-24 23:25:12 +010010666 u8 parent_flag = parent_reg->live & REG_LIVE_READ;
10667 u8 flag = reg->live & REG_LIVE_READ;
Jiong Wang55e7f3b2019-04-12 22:59:36 +010010668 int err;
10669
Jiong Wang5327ed32019-05-24 23:25:12 +010010670 /* When comes here, read flags of PARENT_REG or REG could be any of
10671 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
10672 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
10673 */
10674 if (parent_flag == REG_LIVE_READ64 ||
10675 /* Or if there is no read flag from REG. */
10676 !flag ||
10677 /* Or if the read flag from REG is the same as PARENT_REG. */
10678 parent_flag == flag)
Jiong Wang55e7f3b2019-04-12 22:59:36 +010010679 return 0;
10680
Jiong Wang5327ed32019-05-24 23:25:12 +010010681 err = mark_reg_read(env, reg, parent_reg, flag);
Jiong Wang55e7f3b2019-04-12 22:59:36 +010010682 if (err)
10683 return err;
10684
Jiong Wang5327ed32019-05-24 23:25:12 +010010685 return flag;
Jiong Wang55e7f3b2019-04-12 22:59:36 +010010686}
10687
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080010688/* A write screens off any subsequent reads; but write marks come from the
10689 * straight-line code between a state and its parent. When we arrive at an
10690 * equivalent state (jump target or such) we didn't arrive by the straight-line
10691 * code, so read marks in the state must propagate to the parent regardless
10692 * of the state's write marks. That's what 'parent == state->parent' comparison
Edward Cree679c7822018-08-22 20:02:19 +010010693 * in mark_reg_read() is for.
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080010694 */
10695static int propagate_liveness(struct bpf_verifier_env *env,
10696 const struct bpf_verifier_state *vstate,
10697 struct bpf_verifier_state *vparent)
10698{
Jiong Wang3f8cafa2019-04-12 22:59:35 +010010699 struct bpf_reg_state *state_reg, *parent_reg;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080010700 struct bpf_func_state *state, *parent;
Jiong Wang3f8cafa2019-04-12 22:59:35 +010010701 int i, frame, err = 0;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080010702
10703 if (vparent->curframe != vstate->curframe) {
10704 WARN(1, "propagate_live: parent frame %d current frame %d\n",
10705 vparent->curframe, vstate->curframe);
10706 return -EFAULT;
10707 }
Edward Creedc503a82017-08-15 20:34:35 +010010708 /* Propagate read liveness of registers... */
10709 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
Jakub Kicinski83d16312019-03-21 14:34:36 -070010710 for (frame = 0; frame <= vstate->curframe; frame++) {
Jiong Wang3f8cafa2019-04-12 22:59:35 +010010711 parent = vparent->frame[frame];
10712 state = vstate->frame[frame];
10713 parent_reg = parent->regs;
10714 state_reg = state->regs;
Jakub Kicinski83d16312019-03-21 14:34:36 -070010715 /* We don't need to worry about FP liveness, it's read-only */
10716 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
Jiong Wang55e7f3b2019-04-12 22:59:36 +010010717 err = propagate_liveness_reg(env, &state_reg[i],
10718 &parent_reg[i]);
Jiong Wang5327ed32019-05-24 23:25:12 +010010719 if (err < 0)
Jiong Wang3f8cafa2019-04-12 22:59:35 +010010720 return err;
Jiong Wang5327ed32019-05-24 23:25:12 +010010721 if (err == REG_LIVE_READ64)
10722 mark_insn_zext(env, &parent_reg[i]);
Edward Creedc503a82017-08-15 20:34:35 +010010723 }
Edward Creedc503a82017-08-15 20:34:35 +010010724
Jiong Wang1b04aee2019-04-12 22:59:34 +010010725 /* Propagate stack slots. */
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080010726 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
10727 i < parent->allocated_stack / BPF_REG_SIZE; i++) {
Jiong Wang3f8cafa2019-04-12 22:59:35 +010010728 parent_reg = &parent->stack[i].spilled_ptr;
10729 state_reg = &state->stack[i].spilled_ptr;
Jiong Wang55e7f3b2019-04-12 22:59:36 +010010730 err = propagate_liveness_reg(env, state_reg,
10731 parent_reg);
Jiong Wang5327ed32019-05-24 23:25:12 +010010732 if (err < 0)
Jiong Wang3f8cafa2019-04-12 22:59:35 +010010733 return err;
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080010734 }
Edward Creedc503a82017-08-15 20:34:35 +010010735 }
Jiong Wang5327ed32019-05-24 23:25:12 +010010736 return 0;
Edward Creedc503a82017-08-15 20:34:35 +010010737}
10738
Alexei Starovoitova3ce6852019-06-28 09:24:09 -070010739/* find precise scalars in the previous equivalent state and
10740 * propagate them into the current state
10741 */
10742static int propagate_precision(struct bpf_verifier_env *env,
10743 const struct bpf_verifier_state *old)
10744{
10745 struct bpf_reg_state *state_reg;
10746 struct bpf_func_state *state;
10747 int i, err = 0;
10748
10749 state = old->frame[old->curframe];
10750 state_reg = state->regs;
10751 for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
10752 if (state_reg->type != SCALAR_VALUE ||
10753 !state_reg->precise)
10754 continue;
10755 if (env->log.level & BPF_LOG_LEVEL2)
10756 verbose(env, "propagating r%d\n", i);
10757 err = mark_chain_precision(env, i);
10758 if (err < 0)
10759 return err;
10760 }
10761
10762 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
Martin KaFai Lau27113c52021-09-21 17:49:34 -070010763 if (!is_spilled_reg(&state->stack[i]))
Alexei Starovoitova3ce6852019-06-28 09:24:09 -070010764 continue;
10765 state_reg = &state->stack[i].spilled_ptr;
10766 if (state_reg->type != SCALAR_VALUE ||
10767 !state_reg->precise)
10768 continue;
10769 if (env->log.level & BPF_LOG_LEVEL2)
10770 verbose(env, "propagating fp%d\n",
10771 (-i - 1) * BPF_REG_SIZE);
10772 err = mark_chain_precision_stack(env, i);
10773 if (err < 0)
10774 return err;
10775 }
10776 return 0;
10777}
10778
Alexei Starovoitov25897262019-06-15 12:12:20 -070010779static bool states_maybe_looping(struct bpf_verifier_state *old,
10780 struct bpf_verifier_state *cur)
10781{
10782 struct bpf_func_state *fold, *fcur;
10783 int i, fr = cur->curframe;
10784
10785 if (old->curframe != fr)
10786 return false;
10787
10788 fold = old->frame[fr];
10789 fcur = cur->frame[fr];
10790 for (i = 0; i < MAX_BPF_REG; i++)
10791 if (memcmp(&fold->regs[i], &fcur->regs[i],
10792 offsetof(struct bpf_reg_state, parent)))
10793 return false;
10794 return true;
10795}
10796
10797
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010010798static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070010799{
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010010800 struct bpf_verifier_state_list *new_sl;
Alexei Starovoitov9f4686c2019-04-01 21:27:41 -070010801 struct bpf_verifier_state_list *sl, **pprev;
Edward Cree679c7822018-08-22 20:02:19 +010010802 struct bpf_verifier_state *cur = env->cur_state, *new;
Alexei Starovoitovceefbc92018-12-03 22:46:06 -080010803 int i, j, err, states_cnt = 0;
Alexei Starovoitov10d274e2019-08-22 22:52:12 -070010804 bool add_new_state = env->test_state_freq ? true : false;
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070010805
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -070010806 cur->last_insn_idx = env->prev_insn_idx;
Alexei Starovoitova8f500a2019-05-21 20:17:06 -070010807 if (!env->insn_aux_data[insn_idx].prune_point)
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070010808 /* this 'insn_idx' instruction wasn't marked, so we will not
10809 * be doing state search here
10810 */
10811 return 0;
10812
Alexei Starovoitov25897262019-06-15 12:12:20 -070010813 /* bpf progs typically have pruning point every 4 instructions
10814 * http://vger.kernel.org/bpfconf2019.html#session-1
10815 * Do not add new state for future pruning if the verifier hasn't seen
10816 * at least 2 jumps and at least 8 instructions.
10817 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
10818 * In tests that amounts to up to 50% reduction into total verifier
10819 * memory consumption and 20% verifier time speedup.
10820 */
10821 if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
10822 env->insn_processed - env->prev_insn_processed >= 8)
10823 add_new_state = true;
10824
Alexei Starovoitova8f500a2019-05-21 20:17:06 -070010825 pprev = explored_state(env, insn_idx);
10826 sl = *pprev;
10827
Alexei Starovoitov9242b5f2018-12-13 11:42:34 -080010828 clean_live_states(env, insn_idx, cur);
10829
Alexei Starovoitova8f500a2019-05-21 20:17:06 -070010830 while (sl) {
Alexei Starovoitovdc2a4eb2019-05-21 20:17:07 -070010831 states_cnt++;
10832 if (sl->state.insn_idx != insn_idx)
10833 goto next;
Alexei Starovoitovbfc6bb72021-07-14 17:54:14 -070010834
Alexei Starovoitov25897262019-06-15 12:12:20 -070010835 if (sl->state.branches) {
Alexei Starovoitovbfc6bb72021-07-14 17:54:14 -070010836 struct bpf_func_state *frame = sl->state.frame[sl->state.curframe];
10837
10838 if (frame->in_async_callback_fn &&
10839 frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) {
10840 /* Different async_entry_cnt means that the verifier is
10841 * processing another entry into async callback.
10842 * Seeing the same state is not an indication of infinite
10843 * loop or infinite recursion.
10844 * But finding the same state doesn't mean that it's safe
10845 * to stop processing the current state. The previous state
10846 * hasn't yet reached bpf_exit, since state.branches > 0.
10847 * Checking in_async_callback_fn alone is not enough either.
10848 * Since the verifier still needs to catch infinite loops
10849 * inside async callbacks.
10850 */
10851 } else if (states_maybe_looping(&sl->state, cur) &&
10852 states_equal(env, &sl->state, cur)) {
Alexei Starovoitov25897262019-06-15 12:12:20 -070010853 verbose_linfo(env, insn_idx, "; ");
10854 verbose(env, "infinite loop detected at insn %d\n", insn_idx);
10855 return -EINVAL;
10856 }
10857 /* if the verifier is processing a loop, avoid adding new state
10858 * too often, since different loop iterations have distinct
10859 * states and may not help future pruning.
10860 * This threshold shouldn't be too low to make sure that
10861 * a loop with large bound will be rejected quickly.
10862 * The most abusive loop will be:
10863 * r1 += 1
10864 * if r1 < 1000000 goto pc-2
10865 * 1M insn_procssed limit / 100 == 10k peak states.
10866 * This threshold shouldn't be too high either, since states
10867 * at the end of the loop are likely to be useful in pruning.
10868 */
10869 if (env->jmps_processed - env->prev_jmps_processed < 20 &&
10870 env->insn_processed - env->prev_insn_processed < 100)
10871 add_new_state = false;
10872 goto miss;
10873 }
Alexei Starovoitov638f5b92017-10-31 18:16:05 -070010874 if (states_equal(env, &sl->state, cur)) {
Alexei Starovoitov9f4686c2019-04-01 21:27:41 -070010875 sl->hit_cnt++;
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070010876 /* reached equivalent register/stack state,
Edward Creedc503a82017-08-15 20:34:35 +010010877 * prune the search.
10878 * Registers read by the continuation are read by us.
Edward Cree8e9cd9c2017-08-23 15:11:21 +010010879 * If we have any write marks in env->cur_state, they
10880 * will prevent corresponding reads in the continuation
10881 * from reaching our parent (an explored_state). Our
10882 * own state will get the read marks recorded, but
10883 * they'll be immediately forgotten as we're pruning
10884 * this state and will pop a new one.
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070010885 */
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080010886 err = propagate_liveness(env, &sl->state, cur);
Alexei Starovoitova3ce6852019-06-28 09:24:09 -070010887
10888 /* if previous state reached the exit with precision and
10889 * current state is equivalent to it (except precsion marks)
10890 * the precision needs to be propagated back in
10891 * the current state.
10892 */
10893 err = err ? : push_jmp_history(env, cur);
10894 err = err ? : propagate_precision(env, &sl->state);
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080010895 if (err)
10896 return err;
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070010897 return 1;
Edward Creedc503a82017-08-15 20:34:35 +010010898 }
Alexei Starovoitov25897262019-06-15 12:12:20 -070010899miss:
10900 /* when new state is not going to be added do not increase miss count.
10901 * Otherwise several loop iterations will remove the state
10902 * recorded earlier. The goal of these heuristics is to have
10903 * states from some iterations of the loop (some in the beginning
10904 * and some at the end) to help pruning.
10905 */
10906 if (add_new_state)
10907 sl->miss_cnt++;
Alexei Starovoitov9f4686c2019-04-01 21:27:41 -070010908 /* heuristic to determine whether this state is beneficial
10909 * to keep checking from state equivalence point of view.
10910 * Higher numbers increase max_states_per_insn and verification time,
10911 * but do not meaningfully decrease insn_processed.
10912 */
10913 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
10914 /* the state is unlikely to be useful. Remove it to
10915 * speed up verification
10916 */
10917 *pprev = sl->next;
10918 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
Alexei Starovoitov25897262019-06-15 12:12:20 -070010919 u32 br = sl->state.branches;
10920
10921 WARN_ONCE(br,
10922 "BUG live_done but branches_to_explore %d\n",
10923 br);
Alexei Starovoitov9f4686c2019-04-01 21:27:41 -070010924 free_verifier_state(&sl->state, false);
10925 kfree(sl);
10926 env->peak_states--;
10927 } else {
10928 /* cannot free this state, since parentage chain may
10929 * walk it later. Add it for free_list instead to
10930 * be freed at the end of verification
10931 */
10932 sl->next = env->free_list;
10933 env->free_list = sl;
10934 }
10935 sl = *pprev;
10936 continue;
10937 }
Alexei Starovoitovdc2a4eb2019-05-21 20:17:07 -070010938next:
Alexei Starovoitov9f4686c2019-04-01 21:27:41 -070010939 pprev = &sl->next;
10940 sl = *pprev;
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070010941 }
10942
Alexei Starovoitov06ee7112019-04-01 21:27:40 -070010943 if (env->max_states_per_insn < states_cnt)
10944 env->max_states_per_insn = states_cnt;
10945
Alexei Starovoitov2c78ee82020-05-13 16:03:54 -070010946 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -070010947 return push_jmp_history(env, cur);
Alexei Starovoitovceefbc92018-12-03 22:46:06 -080010948
Alexei Starovoitov25897262019-06-15 12:12:20 -070010949 if (!add_new_state)
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -070010950 return push_jmp_history(env, cur);
Alexei Starovoitov25897262019-06-15 12:12:20 -070010951
10952 /* There were no equivalent states, remember the current one.
10953 * Technically the current state is not proven to be safe yet,
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080010954 * but it will either reach outer most bpf_exit (which means it's safe)
Alexei Starovoitov25897262019-06-15 12:12:20 -070010955 * or it will be rejected. When there are no loops the verifier won't be
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080010956 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
Alexei Starovoitov25897262019-06-15 12:12:20 -070010957 * again on the way to bpf_exit.
10958 * When looping the sl->state.branches will be > 0 and this state
10959 * will not be considered for equivalence until branches == 0.
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070010960 */
Alexei Starovoitov638f5b92017-10-31 18:16:05 -070010961 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070010962 if (!new_sl)
10963 return -ENOMEM;
Alexei Starovoitov06ee7112019-04-01 21:27:40 -070010964 env->total_states++;
10965 env->peak_states++;
Alexei Starovoitov25897262019-06-15 12:12:20 -070010966 env->prev_jmps_processed = env->jmps_processed;
10967 env->prev_insn_processed = env->insn_processed;
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070010968
10969 /* add new state to the head of linked list */
Edward Cree679c7822018-08-22 20:02:19 +010010970 new = &new_sl->state;
10971 err = copy_verifier_state(new, cur);
Alexei Starovoitov1969db42017-11-01 00:08:04 -070010972 if (err) {
Edward Cree679c7822018-08-22 20:02:19 +010010973 free_verifier_state(new, false);
Alexei Starovoitov1969db42017-11-01 00:08:04 -070010974 kfree(new_sl);
10975 return err;
10976 }
Alexei Starovoitovdc2a4eb2019-05-21 20:17:07 -070010977 new->insn_idx = insn_idx;
Alexei Starovoitov25897262019-06-15 12:12:20 -070010978 WARN_ONCE(new->branches != 1,
10979 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -070010980
Alexei Starovoitov25897262019-06-15 12:12:20 -070010981 cur->parent = new;
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -070010982 cur->first_insn_idx = insn_idx;
10983 clear_jmp_history(cur);
Alexei Starovoitov5d839022019-05-21 20:17:05 -070010984 new_sl->next = *explored_state(env, insn_idx);
10985 *explored_state(env, insn_idx) = new_sl;
Jakub Kicinski7640ead2018-12-12 16:29:07 -080010986 /* connect new state to parentage chain. Current frame needs all
10987 * registers connected. Only r6 - r9 of the callers are alive (pushed
10988 * to the stack implicitly by JITs) so in callers' frames connect just
10989 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
10990 * the state of the call instruction (with WRITTEN set), and r0 comes
10991 * from callee with its full parentage chain, anyway.
10992 */
Edward Cree8e9cd9c2017-08-23 15:11:21 +010010993 /* clear write marks in current state: the writes we did are not writes
10994 * our child did, so they don't screen off its reads from us.
10995 * (There are no read marks in current state, because reads always mark
10996 * their parent and current state never has children yet. Only
10997 * explored_states can get read marks.)
10998 */
Alexei Starovoitoveea1c222019-06-15 12:12:21 -070010999 for (j = 0; j <= cur->curframe; j++) {
11000 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
11001 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
11002 for (i = 0; i < BPF_REG_FP; i++)
11003 cur->frame[j]->regs[i].live = REG_LIVE_NONE;
11004 }
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080011005
11006 /* all stack frames are accessible from callee, clear them all */
11007 for (j = 0; j <= cur->curframe; j++) {
11008 struct bpf_func_state *frame = cur->frame[j];
Edward Cree679c7822018-08-22 20:02:19 +010011009 struct bpf_func_state *newframe = new->frame[j];
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080011010
Edward Cree679c7822018-08-22 20:02:19 +010011011 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
Alexei Starovoitovcc2b14d2017-12-14 17:55:08 -080011012 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
Edward Cree679c7822018-08-22 20:02:19 +010011013 frame->stack[i].spilled_ptr.parent =
11014 &newframe->stack[i].spilled_ptr;
11015 }
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080011016 }
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070011017 return 0;
11018}
11019
Joe Stringerc64b7982018-10-02 13:35:33 -070011020/* Return true if it's OK to have the same insn return a different type. */
11021static bool reg_type_mismatch_ok(enum bpf_reg_type type)
11022{
11023 switch (type) {
11024 case PTR_TO_CTX:
11025 case PTR_TO_SOCKET:
11026 case PTR_TO_SOCKET_OR_NULL:
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -080011027 case PTR_TO_SOCK_COMMON:
11028 case PTR_TO_SOCK_COMMON_OR_NULL:
Martin KaFai Lau655a51e2019-02-09 23:22:24 -080011029 case PTR_TO_TCP_SOCK:
11030 case PTR_TO_TCP_SOCK_OR_NULL:
Jonathan Lemonfada7fd2019-06-06 13:59:40 -070011031 case PTR_TO_XDP_SOCK:
Alexei Starovoitov2a027592019-10-15 20:25:02 -070011032 case PTR_TO_BTF_ID:
Yonghong Songb121b342020-05-09 10:59:12 -070011033 case PTR_TO_BTF_ID_OR_NULL:
Joe Stringerc64b7982018-10-02 13:35:33 -070011034 return false;
11035 default:
11036 return true;
11037 }
11038}
11039
11040/* If an instruction was previously used with particular pointer types, then we
11041 * need to be careful to avoid cases such as the below, where it may be ok
11042 * for one branch accessing the pointer, but not ok for the other branch:
11043 *
11044 * R1 = sock_ptr
11045 * goto X;
11046 * ...
11047 * R1 = some_other_valid_ptr;
11048 * goto X;
11049 * ...
11050 * R2 = *(u32 *)(R1 + 0);
11051 */
11052static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
11053{
11054 return src != prev && (!reg_type_mismatch_ok(src) ||
11055 !reg_type_mismatch_ok(prev));
11056}
11057
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010011058static int do_check(struct bpf_verifier_env *env)
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011059{
Andrii Nakryiko6f8a57c2020-04-23 12:58:50 -070011060 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -080011061 struct bpf_verifier_state *state = env->cur_state;
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011062 struct bpf_insn *insns = env->prog->insnsi;
Alexei Starovoitov638f5b92017-10-31 18:16:05 -070011063 struct bpf_reg_state *regs;
Alexei Starovoitov06ee7112019-04-01 21:27:40 -070011064 int insn_cnt = env->prog->len;
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011065 bool do_print_state = false;
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -070011066 int prev_insn_idx = -1;
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011067
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011068 for (;;) {
11069 struct bpf_insn *insn;
11070 u8 class;
11071 int err;
11072
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -070011073 env->prev_insn_idx = prev_insn_idx;
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011074 if (env->insn_idx >= insn_cnt) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -070011075 verbose(env, "invalid insn idx %d insn_cnt %d\n",
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011076 env->insn_idx, insn_cnt);
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011077 return -EFAULT;
11078 }
11079
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011080 insn = &insns[env->insn_idx];
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011081 class = BPF_CLASS(insn->code);
11082
Alexei Starovoitov06ee7112019-04-01 21:27:40 -070011083 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -070011084 verbose(env,
11085 "BPF program is too large. Processed %d insn\n",
Alexei Starovoitov06ee7112019-04-01 21:27:40 -070011086 env->insn_processed);
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011087 return -E2BIG;
11088 }
11089
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011090 err = is_state_visited(env, env->insn_idx);
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070011091 if (err < 0)
11092 return err;
11093 if (err == 1) {
11094 /* found equivalent state, can prune the search */
Alexei Starovoitov06ee7112019-04-01 21:27:40 -070011095 if (env->log.level & BPF_LOG_LEVEL) {
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070011096 if (do_print_state)
Daniel Borkmann979d63d2019-01-03 00:58:34 +010011097 verbose(env, "\nfrom %d to %d%s: safe\n",
11098 env->prev_insn_idx, env->insn_idx,
11099 env->cur_state->speculative ?
11100 " (speculative execution)" : "");
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070011101 else
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011102 verbose(env, "%d: safe\n", env->insn_idx);
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070011103 }
11104 goto process_bpf_exit;
11105 }
11106
Alexei Starovoitovc3494802018-12-03 22:46:04 -080011107 if (signal_pending(current))
11108 return -EAGAIN;
11109
Daniel Borkmann3c2ce602017-05-18 03:00:06 +020011110 if (need_resched())
11111 cond_resched();
11112
Alexei Starovoitov06ee7112019-04-01 21:27:40 -070011113 if (env->log.level & BPF_LOG_LEVEL2 ||
11114 (env->log.level & BPF_LOG_LEVEL && do_print_state)) {
11115 if (env->log.level & BPF_LOG_LEVEL2)
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011116 verbose(env, "%d:", env->insn_idx);
David S. Millerc5fc9692017-05-10 11:25:17 -070011117 else
Daniel Borkmann979d63d2019-01-03 00:58:34 +010011118 verbose(env, "\nfrom %d to %d%s:",
11119 env->prev_insn_idx, env->insn_idx,
11120 env->cur_state->speculative ?
11121 " (speculative execution)" : "");
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080011122 print_verifier_state(env, state->frame[state->curframe]);
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011123 do_print_state = false;
11124 }
11125
Alexei Starovoitov06ee7112019-04-01 21:27:40 -070011126 if (env->log.level & BPF_LOG_LEVEL) {
Daniel Borkmann7105e822017-12-20 13:42:57 +010011127 const struct bpf_insn_cbs cbs = {
Martin KaFai Laue6ac2452021-03-24 18:51:42 -070011128 .cb_call = disasm_kfunc_name,
Daniel Borkmann7105e822017-12-20 13:42:57 +010011129 .cb_print = verbose,
Jiri Olsaabe08842018-03-23 11:41:28 +010011130 .private_data = env,
Daniel Borkmann7105e822017-12-20 13:42:57 +010011131 };
11132
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011133 verbose_linfo(env, env->insn_idx, "; ");
11134 verbose(env, "%d: ", env->insn_idx);
Jiri Olsaabe08842018-03-23 11:41:28 +010011135 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011136 }
11137
Jakub Kicinskicae19272017-12-27 18:39:05 -080011138 if (bpf_prog_is_dev_bound(env->prog->aux)) {
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011139 err = bpf_prog_offload_verify_insn(env, env->insn_idx,
11140 env->prev_insn_idx);
Jakub Kicinskicae19272017-12-27 18:39:05 -080011141 if (err)
11142 return err;
11143 }
Jakub Kicinski13a27df2016-09-21 11:43:58 +010011144
Alexei Starovoitov638f5b92017-10-31 18:16:05 -070011145 regs = cur_regs(env);
Daniel Borkmannfe9a5ca2021-05-28 13:47:27 +000011146 sanitize_mark_insn_seen(env);
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -070011147 prev_insn_idx = env->insn_idx;
Joe Stringerfd978bf72018-10-02 13:35:35 -070011148
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011149 if (class == BPF_ALU || class == BPF_ALU64) {
Alexei Starovoitov1be7f752015-10-07 22:23:21 -070011150 err = check_alu_op(env, insn);
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011151 if (err)
11152 return err;
11153
11154 } else if (class == BPF_LDX) {
Jakub Kicinski3df126f2016-09-21 11:43:56 +010011155 enum bpf_reg_type *prev_src_type, src_reg_type;
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070011156
11157 /* check for reserved fields is already done */
11158
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011159 /* check src operand */
Edward Creedc503a82017-08-15 20:34:35 +010011160 err = check_reg_arg(env, insn->src_reg, SRC_OP);
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011161 if (err)
11162 return err;
11163
Edward Creedc503a82017-08-15 20:34:35 +010011164 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011165 if (err)
11166 return err;
11167
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -070011168 src_reg_type = regs[insn->src_reg].type;
11169
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011170 /* check that memory (src_reg + off) is readable,
11171 * the state of dst_reg will be updated by this func
11172 */
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011173 err = check_mem_access(env, env->insn_idx, insn->src_reg,
11174 insn->off, BPF_SIZE(insn->code),
11175 BPF_READ, insn->dst_reg, false);
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011176 if (err)
11177 return err;
11178
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011179 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
Jakub Kicinski3df126f2016-09-21 11:43:56 +010011180
11181 if (*prev_src_type == NOT_INIT) {
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070011182 /* saw a valid insn
11183 * dst_reg = *(u32 *)(src_reg + off)
Jakub Kicinski3df126f2016-09-21 11:43:56 +010011184 * save type to validate intersecting paths
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070011185 */
Jakub Kicinski3df126f2016-09-21 11:43:56 +010011186 *prev_src_type = src_reg_type;
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070011187
Joe Stringerc64b7982018-10-02 13:35:33 -070011188 } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070011189 /* ABuser program is trying to use the same insn
11190 * dst_reg = *(u32*) (src_reg + off)
11191 * with different pointer types:
11192 * src_reg == ctx in one branch and
11193 * src_reg == stack|map in some other branch.
11194 * Reject it.
11195 */
Jakub Kicinski61bd5212017-10-09 10:30:11 -070011196 verbose(env, "same insn cannot be used with different pointers\n");
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070011197 return -EINVAL;
11198 }
11199
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011200 } else if (class == BPF_STX) {
Jakub Kicinski3df126f2016-09-21 11:43:56 +010011201 enum bpf_reg_type *prev_dst_type, dst_reg_type;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070011202
Brendan Jackman91c960b2021-01-14 18:17:44 +000011203 if (BPF_MODE(insn->code) == BPF_ATOMIC) {
11204 err = check_atomic(env, env->insn_idx, insn);
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011205 if (err)
11206 return err;
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011207 env->insn_idx++;
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011208 continue;
11209 }
11210
Brendan Jackman5ca419f2021-01-14 18:17:46 +000011211 if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) {
11212 verbose(env, "BPF_STX uses reserved fields\n");
11213 return -EINVAL;
11214 }
11215
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011216 /* check src1 operand */
Edward Creedc503a82017-08-15 20:34:35 +010011217 err = check_reg_arg(env, insn->src_reg, SRC_OP);
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011218 if (err)
11219 return err;
11220 /* check src2 operand */
Edward Creedc503a82017-08-15 20:34:35 +010011221 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011222 if (err)
11223 return err;
11224
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070011225 dst_reg_type = regs[insn->dst_reg].type;
11226
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011227 /* check that memory (dst_reg + off) is writeable */
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011228 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
11229 insn->off, BPF_SIZE(insn->code),
11230 BPF_WRITE, insn->src_reg, false);
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011231 if (err)
11232 return err;
11233
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011234 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
Jakub Kicinski3df126f2016-09-21 11:43:56 +010011235
11236 if (*prev_dst_type == NOT_INIT) {
11237 *prev_dst_type = dst_reg_type;
Joe Stringerc64b7982018-10-02 13:35:33 -070011238 } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -070011239 verbose(env, "same insn cannot be used with different pointers\n");
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070011240 return -EINVAL;
11241 }
11242
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011243 } else if (class == BPF_ST) {
11244 if (BPF_MODE(insn->code) != BPF_MEM ||
11245 insn->src_reg != BPF_REG_0) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -070011246 verbose(env, "BPF_ST uses reserved fields\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011247 return -EINVAL;
11248 }
11249 /* check src operand */
Edward Creedc503a82017-08-15 20:34:35 +010011250 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011251 if (err)
11252 return err;
11253
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +010011254 if (is_ctx_reg(env, insn->dst_reg)) {
Joe Stringer9d2be442018-10-02 13:35:31 -070011255 verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
Daniel Borkmann2a159c62018-10-21 02:09:24 +020011256 insn->dst_reg,
11257 reg_type_str[reg_state(env, insn->dst_reg)->type]);
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +010011258 return -EACCES;
11259 }
11260
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011261 /* check that memory (dst_reg + off) is writeable */
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011262 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
11263 insn->off, BPF_SIZE(insn->code),
11264 BPF_WRITE, -1, false);
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011265 if (err)
11266 return err;
11267
Jiong Wang092ed092019-01-26 12:26:01 -050011268 } else if (class == BPF_JMP || class == BPF_JMP32) {
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011269 u8 opcode = BPF_OP(insn->code);
11270
Alexei Starovoitov25897262019-06-15 12:12:20 -070011271 env->jmps_processed++;
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011272 if (opcode == BPF_CALL) {
11273 if (BPF_SRC(insn->code) != BPF_K ||
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +053011274 (insn->src_reg != BPF_PSEUDO_KFUNC_CALL
11275 && insn->off != 0) ||
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080011276 (insn->src_reg != BPF_REG_0 &&
Martin KaFai Laue6ac2452021-03-24 18:51:42 -070011277 insn->src_reg != BPF_PSEUDO_CALL &&
11278 insn->src_reg != BPF_PSEUDO_KFUNC_CALL) ||
Jiong Wang092ed092019-01-26 12:26:01 -050011279 insn->dst_reg != BPF_REG_0 ||
11280 class == BPF_JMP32) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -070011281 verbose(env, "BPF_CALL uses reserved fields\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011282 return -EINVAL;
11283 }
11284
Alexei Starovoitovd83525c2019-01-31 15:40:04 -080011285 if (env->cur_state->active_spin_lock &&
11286 (insn->src_reg == BPF_PSEUDO_CALL ||
11287 insn->imm != BPF_FUNC_spin_unlock)) {
11288 verbose(env, "function calls are not allowed while holding a lock\n");
11289 return -EINVAL;
11290 }
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080011291 if (insn->src_reg == BPF_PSEUDO_CALL)
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011292 err = check_func_call(env, insn, &env->insn_idx);
Martin KaFai Laue6ac2452021-03-24 18:51:42 -070011293 else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL)
11294 err = check_kfunc_call(env, insn);
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080011295 else
Yonghong Song69c087b2021-02-26 12:49:25 -080011296 err = check_helper_call(env, insn, &env->insn_idx);
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011297 if (err)
11298 return err;
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011299 } else if (opcode == BPF_JA) {
11300 if (BPF_SRC(insn->code) != BPF_K ||
11301 insn->imm != 0 ||
11302 insn->src_reg != BPF_REG_0 ||
Jiong Wang092ed092019-01-26 12:26:01 -050011303 insn->dst_reg != BPF_REG_0 ||
11304 class == BPF_JMP32) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -070011305 verbose(env, "BPF_JA uses reserved fields\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011306 return -EINVAL;
11307 }
11308
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011309 env->insn_idx += insn->off + 1;
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011310 continue;
11311
11312 } else if (opcode == BPF_EXIT) {
11313 if (BPF_SRC(insn->code) != BPF_K ||
11314 insn->imm != 0 ||
11315 insn->src_reg != BPF_REG_0 ||
Jiong Wang092ed092019-01-26 12:26:01 -050011316 insn->dst_reg != BPF_REG_0 ||
11317 class == BPF_JMP32) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -070011318 verbose(env, "BPF_EXIT uses reserved fields\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011319 return -EINVAL;
11320 }
11321
Alexei Starovoitovd83525c2019-01-31 15:40:04 -080011322 if (env->cur_state->active_spin_lock) {
11323 verbose(env, "bpf_spin_unlock is missing\n");
11324 return -EINVAL;
11325 }
11326
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080011327 if (state->curframe) {
11328 /* exit from nested function */
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011329 err = prepare_func_exit(env, &env->insn_idx);
Alexei Starovoitovf4d7e402017-12-14 17:55:06 -080011330 if (err)
11331 return err;
11332 do_print_state = true;
11333 continue;
11334 }
11335
Joe Stringerfd978bf72018-10-02 13:35:35 -070011336 err = check_reference_leak(env);
11337 if (err)
11338 return err;
11339
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -070011340 err = check_return_code(env);
11341 if (err)
11342 return err;
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070011343process_bpf_exit:
Alexei Starovoitov25897262019-06-15 12:12:20 -070011344 update_branch_counts(env, env->cur_state);
Alexei Starovoitovb5dc0162019-06-15 12:12:25 -070011345 err = pop_stack(env, &prev_insn_idx,
Andrii Nakryiko6f8a57c2020-04-23 12:58:50 -070011346 &env->insn_idx, pop_log);
Alexei Starovoitov638f5b92017-10-31 18:16:05 -070011347 if (err < 0) {
11348 if (err != -ENOENT)
11349 return err;
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011350 break;
11351 } else {
11352 do_print_state = true;
11353 continue;
11354 }
11355 } else {
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011356 err = check_cond_jmp_op(env, insn, &env->insn_idx);
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011357 if (err)
11358 return err;
11359 }
11360 } else if (class == BPF_LD) {
11361 u8 mode = BPF_MODE(insn->code);
11362
11363 if (mode == BPF_ABS || mode == BPF_IND) {
Alexei Starovoitovddd872b2014-12-01 15:06:34 -080011364 err = check_ld_abs(env, insn);
11365 if (err)
11366 return err;
11367
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011368 } else if (mode == BPF_IMM) {
11369 err = check_ld_imm(env, insn);
11370 if (err)
11371 return err;
11372
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011373 env->insn_idx++;
Daniel Borkmannfe9a5ca2021-05-28 13:47:27 +000011374 sanitize_mark_insn_seen(env);
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011375 } else {
Jakub Kicinski61bd5212017-10-09 10:30:11 -070011376 verbose(env, "invalid BPF_LD mode\n");
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011377 return -EINVAL;
11378 }
11379 } else {
Jakub Kicinski61bd5212017-10-09 10:30:11 -070011380 verbose(env, "unknown insn class %d\n", class);
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011381 return -EINVAL;
11382 }
11383
Daniel Borkmannc08435e2019-01-03 00:58:27 +010011384 env->insn_idx++;
Alexei Starovoitov17a52672014-09-26 00:17:06 -070011385 }
11386
11387 return 0;
11388}
11389
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011390static int find_btf_percpu_datasec(struct btf *btf)
11391{
11392 const struct btf_type *t;
11393 const char *tname;
11394 int i, n;
11395
11396 /*
11397 * Both vmlinux and module each have their own ".data..percpu"
11398 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF
11399 * types to look at only module's own BTF types.
11400 */
11401 n = btf_nr_types(btf);
11402 if (btf_is_module(btf))
11403 i = btf_nr_types(btf_vmlinux);
11404 else
11405 i = 1;
11406
11407 for(; i < n; i++) {
11408 t = btf_type_by_id(btf, i);
11409 if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC)
11410 continue;
11411
11412 tname = btf_name_by_offset(btf, t->name_off);
11413 if (!strcmp(tname, ".data..percpu"))
11414 return i;
11415 }
11416
11417 return -ENOENT;
11418}
11419
Hao Luo4976b712020-09-29 16:50:44 -070011420/* replace pseudo btf_id with kernel symbol address */
11421static int check_pseudo_btf_id(struct bpf_verifier_env *env,
11422 struct bpf_insn *insn,
11423 struct bpf_insn_aux_data *aux)
11424{
Hao Luoeaa6bcb2020-09-29 16:50:47 -070011425 const struct btf_var_secinfo *vsi;
11426 const struct btf_type *datasec;
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011427 struct btf_mod_pair *btf_mod;
Hao Luo4976b712020-09-29 16:50:44 -070011428 const struct btf_type *t;
11429 const char *sym_name;
Hao Luoeaa6bcb2020-09-29 16:50:47 -070011430 bool percpu = false;
Kaixu Xiaf16e6312020-11-11 13:03:46 +080011431 u32 type, id = insn->imm;
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011432 struct btf *btf;
Kaixu Xiaf16e6312020-11-11 13:03:46 +080011433 s32 datasec_id;
Hao Luo4976b712020-09-29 16:50:44 -070011434 u64 addr;
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011435 int i, btf_fd, err;
Hao Luo4976b712020-09-29 16:50:44 -070011436
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011437 btf_fd = insn[1].imm;
11438 if (btf_fd) {
11439 btf = btf_get_by_fd(btf_fd);
11440 if (IS_ERR(btf)) {
11441 verbose(env, "invalid module BTF object FD specified.\n");
11442 return -EINVAL;
11443 }
11444 } else {
11445 if (!btf_vmlinux) {
11446 verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
11447 return -EINVAL;
11448 }
11449 btf = btf_vmlinux;
11450 btf_get(btf);
Hao Luo4976b712020-09-29 16:50:44 -070011451 }
11452
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011453 t = btf_type_by_id(btf, id);
Hao Luo4976b712020-09-29 16:50:44 -070011454 if (!t) {
11455 verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011456 err = -ENOENT;
11457 goto err_put;
Hao Luo4976b712020-09-29 16:50:44 -070011458 }
11459
11460 if (!btf_type_is_var(t)) {
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011461 verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", id);
11462 err = -EINVAL;
11463 goto err_put;
Hao Luo4976b712020-09-29 16:50:44 -070011464 }
11465
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011466 sym_name = btf_name_by_offset(btf, t->name_off);
Hao Luo4976b712020-09-29 16:50:44 -070011467 addr = kallsyms_lookup_name(sym_name);
11468 if (!addr) {
11469 verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
11470 sym_name);
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011471 err = -ENOENT;
11472 goto err_put;
Hao Luo4976b712020-09-29 16:50:44 -070011473 }
11474
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011475 datasec_id = find_btf_percpu_datasec(btf);
Hao Luoeaa6bcb2020-09-29 16:50:47 -070011476 if (datasec_id > 0) {
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011477 datasec = btf_type_by_id(btf, datasec_id);
Hao Luoeaa6bcb2020-09-29 16:50:47 -070011478 for_each_vsi(i, datasec, vsi) {
11479 if (vsi->type == id) {
11480 percpu = true;
11481 break;
11482 }
11483 }
11484 }
11485
Hao Luo4976b712020-09-29 16:50:44 -070011486 insn[0].imm = (u32)addr;
11487 insn[1].imm = addr >> 32;
11488
11489 type = t->type;
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011490 t = btf_type_skip_modifiers(btf, type, NULL);
Hao Luoeaa6bcb2020-09-29 16:50:47 -070011491 if (percpu) {
11492 aux->btf_var.reg_type = PTR_TO_PERCPU_BTF_ID;
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011493 aux->btf_var.btf = btf;
Hao Luoeaa6bcb2020-09-29 16:50:47 -070011494 aux->btf_var.btf_id = type;
11495 } else if (!btf_type_is_struct(t)) {
Hao Luo4976b712020-09-29 16:50:44 -070011496 const struct btf_type *ret;
11497 const char *tname;
11498 u32 tsize;
11499
11500 /* resolve the type size of ksym. */
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011501 ret = btf_resolve_size(btf, t, &tsize);
Hao Luo4976b712020-09-29 16:50:44 -070011502 if (IS_ERR(ret)) {
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011503 tname = btf_name_by_offset(btf, t->name_off);
Hao Luo4976b712020-09-29 16:50:44 -070011504 verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
11505 tname, PTR_ERR(ret));
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011506 err = -EINVAL;
11507 goto err_put;
Hao Luo4976b712020-09-29 16:50:44 -070011508 }
11509 aux->btf_var.reg_type = PTR_TO_MEM;
11510 aux->btf_var.mem_size = tsize;
11511 } else {
11512 aux->btf_var.reg_type = PTR_TO_BTF_ID;
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011513 aux->btf_var.btf = btf;
Hao Luo4976b712020-09-29 16:50:44 -070011514 aux->btf_var.btf_id = type;
11515 }
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011516
11517 /* check whether we recorded this BTF (and maybe module) already */
11518 for (i = 0; i < env->used_btf_cnt; i++) {
11519 if (env->used_btfs[i].btf == btf) {
11520 btf_put(btf);
11521 return 0;
11522 }
11523 }
11524
11525 if (env->used_btf_cnt >= MAX_USED_BTFS) {
11526 err = -E2BIG;
11527 goto err_put;
11528 }
11529
11530 btf_mod = &env->used_btfs[env->used_btf_cnt];
11531 btf_mod->btf = btf;
11532 btf_mod->module = NULL;
11533
11534 /* if we reference variables from kernel module, bump its refcount */
11535 if (btf_is_module(btf)) {
11536 btf_mod->module = btf_try_get_module(btf);
11537 if (!btf_mod->module) {
11538 err = -ENXIO;
11539 goto err_put;
11540 }
11541 }
11542
11543 env->used_btf_cnt++;
11544
Hao Luo4976b712020-09-29 16:50:44 -070011545 return 0;
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011546err_put:
11547 btf_put(btf);
11548 return err;
Hao Luo4976b712020-09-29 16:50:44 -070011549}
11550
Martin KaFai Lau56f668d2017-03-22 10:00:33 -070011551static int check_map_prealloc(struct bpf_map *map)
11552{
11553 return (map->map_type != BPF_MAP_TYPE_HASH &&
Martin KaFai Laubcc6b1b2017-03-22 10:00:34 -070011554 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
11555 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
Martin KaFai Lau56f668d2017-03-22 10:00:33 -070011556 !(map->map_flags & BPF_F_NO_PREALLOC);
11557}
11558
Alexei Starovoitovd83525c2019-01-31 15:40:04 -080011559static bool is_tracing_prog_type(enum bpf_prog_type type)
11560{
11561 switch (type) {
11562 case BPF_PROG_TYPE_KPROBE:
11563 case BPF_PROG_TYPE_TRACEPOINT:
11564 case BPF_PROG_TYPE_PERF_EVENT:
11565 case BPF_PROG_TYPE_RAW_TRACEPOINT:
11566 return true;
11567 default:
11568 return false;
11569 }
11570}
11571
Thomas Gleixner94dacdb2020-02-24 15:01:32 +010011572static bool is_preallocated_map(struct bpf_map *map)
11573{
11574 if (!check_map_prealloc(map))
11575 return false;
11576 if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta))
11577 return false;
11578 return true;
11579}
11580
Jakub Kicinski61bd5212017-10-09 10:30:11 -070011581static int check_map_prog_compatibility(struct bpf_verifier_env *env,
11582 struct bpf_map *map,
Alexei Starovoitovfdc15d32016-09-01 18:37:23 -070011583 struct bpf_prog *prog)
11584
11585{
Udip Pant7e407812020-08-25 16:20:00 -070011586 enum bpf_prog_type prog_type = resolve_prog_type(prog);
Thomas Gleixner94dacdb2020-02-24 15:01:32 +010011587 /*
11588 * Validate that trace type programs use preallocated hash maps.
11589 *
11590 * For programs attached to PERF events this is mandatory as the
11591 * perf NMI can hit any arbitrary code sequence.
11592 *
11593 * All other trace types using preallocated hash maps are unsafe as
11594 * well because tracepoint or kprobes can be inside locked regions
11595 * of the memory allocator or at a place where a recursion into the
11596 * memory allocator would see inconsistent state.
11597 *
Thomas Gleixner2ed905c2020-02-24 15:01:33 +010011598 * On RT enabled kernels run-time allocation of all trace type
11599 * programs is strictly prohibited due to lock type constraints. On
11600 * !RT kernels it is allowed for backwards compatibility reasons for
11601 * now, but warnings are emitted so developers are made aware of
11602 * the unsafety and can fix their programs before this is enforced.
Martin KaFai Lau56f668d2017-03-22 10:00:33 -070011603 */
Udip Pant7e407812020-08-25 16:20:00 -070011604 if (is_tracing_prog_type(prog_type) && !is_preallocated_map(map)) {
11605 if (prog_type == BPF_PROG_TYPE_PERF_EVENT) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -070011606 verbose(env, "perf_event programs can only use preallocated hash map\n");
Martin KaFai Lau56f668d2017-03-22 10:00:33 -070011607 return -EINVAL;
11608 }
Thomas Gleixner2ed905c2020-02-24 15:01:33 +010011609 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
11610 verbose(env, "trace type programs can only use preallocated hash map\n");
11611 return -EINVAL;
11612 }
Thomas Gleixner94dacdb2020-02-24 15:01:32 +010011613 WARN_ONCE(1, "trace type BPF program uses run-time allocation\n");
11614 verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n");
Alexei Starovoitovfdc15d32016-09-01 18:37:23 -070011615 }
Jakub Kicinskia3884572018-01-11 20:29:09 -080011616
KP Singh9e7a4d92020-11-06 10:37:39 +000011617 if (map_value_has_spin_lock(map)) {
11618 if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
11619 verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n");
11620 return -EINVAL;
11621 }
11622
11623 if (is_tracing_prog_type(prog_type)) {
11624 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
11625 return -EINVAL;
11626 }
11627
11628 if (prog->aux->sleepable) {
11629 verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n");
11630 return -EINVAL;
11631 }
Alexei Starovoitovd83525c2019-01-31 15:40:04 -080011632 }
11633
Jakub Kicinskia3884572018-01-11 20:29:09 -080011634 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
Jakub Kicinski09728262018-07-17 10:53:23 -070011635 !bpf_offload_prog_map_match(prog, map)) {
Jakub Kicinskia3884572018-01-11 20:29:09 -080011636 verbose(env, "offload device mismatch between prog and map\n");
11637 return -EINVAL;
11638 }
11639
Martin KaFai Lau85d33df2020-01-08 16:35:05 -080011640 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
11641 verbose(env, "bpf_struct_ops map cannot be used in prog\n");
11642 return -EINVAL;
11643 }
11644
Alexei Starovoitov1e6c62a2020-08-27 15:01:11 -070011645 if (prog->aux->sleepable)
11646 switch (map->map_type) {
11647 case BPF_MAP_TYPE_HASH:
11648 case BPF_MAP_TYPE_LRU_HASH:
11649 case BPF_MAP_TYPE_ARRAY:
Alexei Starovoitov638e4b82021-02-09 19:36:33 -080011650 case BPF_MAP_TYPE_PERCPU_HASH:
11651 case BPF_MAP_TYPE_PERCPU_ARRAY:
11652 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
11653 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
11654 case BPF_MAP_TYPE_HASH_OF_MAPS:
Alexei Starovoitov1e6c62a2020-08-27 15:01:11 -070011655 if (!is_preallocated_map(map)) {
11656 verbose(env,
Alexei Starovoitov638e4b82021-02-09 19:36:33 -080011657 "Sleepable programs can only use preallocated maps\n");
Alexei Starovoitov1e6c62a2020-08-27 15:01:11 -070011658 return -EINVAL;
11659 }
11660 break;
KP Singhba90c2c2021-02-04 19:36:21 +000011661 case BPF_MAP_TYPE_RINGBUF:
11662 break;
Alexei Starovoitov1e6c62a2020-08-27 15:01:11 -070011663 default:
11664 verbose(env,
KP Singhba90c2c2021-02-04 19:36:21 +000011665 "Sleepable programs can only use array, hash, and ringbuf maps\n");
Alexei Starovoitov1e6c62a2020-08-27 15:01:11 -070011666 return -EINVAL;
11667 }
11668
Alexei Starovoitovfdc15d32016-09-01 18:37:23 -070011669 return 0;
11670}
11671
Roman Gushchinb741f162018-09-28 14:45:43 +000011672static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
11673{
11674 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
11675 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
11676}
11677
Hao Luo4976b712020-09-29 16:50:44 -070011678/* find and rewrite pseudo imm in ld_imm64 instructions:
11679 *
11680 * 1. if it accesses map FD, replace it with actual map pointer.
11681 * 2. if it accesses btf_id of a VAR, replace it with pointer to the var.
11682 *
11683 * NOTE: btf_vmlinux is required for converting pseudo btf_id.
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011684 */
Hao Luo4976b712020-09-29 16:50:44 -070011685static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011686{
11687 struct bpf_insn *insn = env->prog->insnsi;
11688 int insn_cnt = env->prog->len;
Alexei Starovoitovfdc15d32016-09-01 18:37:23 -070011689 int i, j, err;
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011690
Daniel Borkmannf1f77142017-01-13 23:38:15 +010011691 err = bpf_prog_calc_tag(env->prog);
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +010011692 if (err)
11693 return err;
11694
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011695 for (i = 0; i < insn_cnt; i++, insn++) {
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070011696 if (BPF_CLASS(insn->code) == BPF_LDX &&
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070011697 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -070011698 verbose(env, "BPF_LDX uses reserved fields\n");
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070011699 return -EINVAL;
11700 }
11701
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011702 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +020011703 struct bpf_insn_aux_data *aux;
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011704 struct bpf_map *map;
11705 struct fd f;
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +020011706 u64 addr;
Alexei Starovoitov387544b2021-05-13 17:36:10 -070011707 u32 fd;
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011708
11709 if (i == insn_cnt - 1 || insn[1].code != 0 ||
11710 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
11711 insn[1].off != 0) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -070011712 verbose(env, "invalid bpf_ld_imm64 insn\n");
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011713 return -EINVAL;
11714 }
11715
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +020011716 if (insn[0].src_reg == 0)
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011717 /* valid generic load 64-bit imm */
11718 goto next_insn;
11719
Hao Luo4976b712020-09-29 16:50:44 -070011720 if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) {
11721 aux = &env->insn_aux_data[i];
11722 err = check_pseudo_btf_id(env, insn, aux);
11723 if (err)
11724 return err;
11725 goto next_insn;
11726 }
11727
Yonghong Song69c087b2021-02-26 12:49:25 -080011728 if (insn[0].src_reg == BPF_PSEUDO_FUNC) {
11729 aux = &env->insn_aux_data[i];
11730 aux->ptr_type = PTR_TO_FUNC;
11731 goto next_insn;
11732 }
11733
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +020011734 /* In final convert_pseudo_ld_imm64() step, this is
11735 * converted into regular 64-bit imm load insn.
11736 */
Alexei Starovoitov387544b2021-05-13 17:36:10 -070011737 switch (insn[0].src_reg) {
11738 case BPF_PSEUDO_MAP_VALUE:
11739 case BPF_PSEUDO_MAP_IDX_VALUE:
11740 break;
11741 case BPF_PSEUDO_MAP_FD:
11742 case BPF_PSEUDO_MAP_IDX:
11743 if (insn[1].imm == 0)
11744 break;
11745 fallthrough;
11746 default:
11747 verbose(env, "unrecognized bpf_ld_imm64 insn\n");
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011748 return -EINVAL;
11749 }
11750
Alexei Starovoitov387544b2021-05-13 17:36:10 -070011751 switch (insn[0].src_reg) {
11752 case BPF_PSEUDO_MAP_IDX_VALUE:
11753 case BPF_PSEUDO_MAP_IDX:
11754 if (bpfptr_is_null(env->fd_array)) {
11755 verbose(env, "fd_idx without fd_array is invalid\n");
11756 return -EPROTO;
11757 }
11758 if (copy_from_bpfptr_offset(&fd, env->fd_array,
11759 insn[0].imm * sizeof(fd),
11760 sizeof(fd)))
11761 return -EFAULT;
11762 break;
11763 default:
11764 fd = insn[0].imm;
11765 break;
11766 }
11767
11768 f = fdget(fd);
Daniel Borkmannc2101292015-10-29 14:58:07 +010011769 map = __bpf_map_get(f);
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011770 if (IS_ERR(map)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -070011771 verbose(env, "fd %d is not pointing to valid bpf_map\n",
Daniel Borkmann20182392019-03-04 21:08:53 +010011772 insn[0].imm);
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011773 return PTR_ERR(map);
11774 }
11775
Jakub Kicinski61bd5212017-10-09 10:30:11 -070011776 err = check_map_prog_compatibility(env, map, env->prog);
Alexei Starovoitovfdc15d32016-09-01 18:37:23 -070011777 if (err) {
11778 fdput(f);
11779 return err;
11780 }
11781
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +020011782 aux = &env->insn_aux_data[i];
Alexei Starovoitov387544b2021-05-13 17:36:10 -070011783 if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
11784 insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +020011785 addr = (unsigned long)map;
11786 } else {
11787 u32 off = insn[1].imm;
11788
11789 if (off >= BPF_MAX_VAR_OFF) {
11790 verbose(env, "direct value offset of %u is not allowed\n", off);
11791 fdput(f);
11792 return -EINVAL;
11793 }
11794
11795 if (!map->ops->map_direct_value_addr) {
11796 verbose(env, "no direct value access support for this map type\n");
11797 fdput(f);
11798 return -EINVAL;
11799 }
11800
11801 err = map->ops->map_direct_value_addr(map, &addr, off);
11802 if (err) {
11803 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
11804 map->value_size, off);
11805 fdput(f);
11806 return err;
11807 }
11808
11809 aux->map_off = off;
11810 addr += off;
11811 }
11812
11813 insn[0].imm = (u32)addr;
11814 insn[1].imm = addr >> 32;
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011815
11816 /* check whether we recorded this map already */
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +020011817 for (j = 0; j < env->used_map_cnt; j++) {
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011818 if (env->used_maps[j] == map) {
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +020011819 aux->map_index = j;
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011820 fdput(f);
11821 goto next_insn;
11822 }
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +020011823 }
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011824
11825 if (env->used_map_cnt >= MAX_USED_MAPS) {
11826 fdput(f);
11827 return -E2BIG;
11828 }
11829
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011830 /* hold the map. If the program is rejected by verifier,
11831 * the map will be released by release_maps() or it
11832 * will be used by the valid program until it's unloaded
Jakub Kicinskiab7f5bf2018-05-03 18:37:17 -070011833 * and all maps are released in free_used_maps()
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011834 */
Andrii Nakryiko1e0bd5a2019-11-17 09:28:02 -080011835 bpf_map_inc(map);
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +020011836
11837 aux->map_index = env->used_map_cnt;
Alexei Starovoitov92117d82016-04-27 18:56:20 -070011838 env->used_maps[env->used_map_cnt++] = map;
11839
Roman Gushchinb741f162018-09-28 14:45:43 +000011840 if (bpf_map_is_cgroup_storage(map) &&
Daniel Borkmanne4730422019-12-17 13:28:16 +010011841 bpf_cgroup_storage_assign(env->prog->aux, map)) {
Roman Gushchinb741f162018-09-28 14:45:43 +000011842 verbose(env, "only one cgroup storage of each type is allowed\n");
Roman Gushchinde9cbba2018-08-02 14:27:18 -070011843 fdput(f);
11844 return -EBUSY;
11845 }
11846
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011847 fdput(f);
11848next_insn:
11849 insn++;
11850 i++;
Daniel Borkmann5e581da2018-01-26 23:33:38 +010011851 continue;
11852 }
11853
11854 /* Basic sanity check before we invest more work here. */
11855 if (!bpf_opcode_in_insntable(insn->code)) {
11856 verbose(env, "unknown opcode %02x\n", insn->code);
11857 return -EINVAL;
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011858 }
11859 }
11860
11861 /* now all pseudo BPF_LD_IMM64 instructions load valid
11862 * 'struct bpf_map *' into a register instead of user map_fd.
11863 * These pointers will be used later by verifier to validate map access.
11864 */
11865 return 0;
11866}
11867
11868/* drop refcnt of maps used by the rejected program */
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010011869static void release_maps(struct bpf_verifier_env *env)
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011870{
Daniel Borkmanna2ea0742019-12-16 17:49:00 +010011871 __bpf_free_used_maps(env->prog->aux, env->used_maps,
11872 env->used_map_cnt);
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011873}
11874
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080011875/* drop refcnt of maps used by the rejected program */
11876static void release_btfs(struct bpf_verifier_env *env)
11877{
11878 __bpf_free_used_btfs(env->prog->aux, env->used_btfs,
11879 env->used_btf_cnt);
11880}
11881
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011882/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010011883static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011884{
11885 struct bpf_insn *insn = env->prog->insnsi;
11886 int insn_cnt = env->prog->len;
11887 int i;
11888
Yonghong Song69c087b2021-02-26 12:49:25 -080011889 for (i = 0; i < insn_cnt; i++, insn++) {
11890 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW))
11891 continue;
11892 if (insn->src_reg == BPF_PSEUDO_FUNC)
11893 continue;
11894 insn->src_reg = 0;
11895 }
Alexei Starovoitov0246e642014-09-26 00:17:04 -070011896}
11897
Alexei Starovoitov80419022017-03-15 18:26:41 -070011898/* single env->prog->insni[off] instruction was replaced with the range
11899 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
11900 * [0, off) and [off, end) to new locations, so the patched range stays zero
11901 */
He Fengqing75f0fc72021-07-14 10:18:15 +000011902static void adjust_insn_aux_data(struct bpf_verifier_env *env,
11903 struct bpf_insn_aux_data *new_data,
11904 struct bpf_prog *new_prog, u32 off, u32 cnt)
Alexei Starovoitov80419022017-03-15 18:26:41 -070011905{
He Fengqing75f0fc72021-07-14 10:18:15 +000011906 struct bpf_insn_aux_data *old_data = env->insn_aux_data;
Jiong Wangb325fbc2019-05-24 23:25:13 +010011907 struct bpf_insn *insn = new_prog->insnsi;
Daniel Borkmannd203b0f2021-05-28 13:03:30 +000011908 u32 old_seen = old_data[off].seen;
Jiong Wangb325fbc2019-05-24 23:25:13 +010011909 u32 prog_len;
Alexei Starovoitovc1311872017-11-22 16:42:05 -080011910 int i;
Alexei Starovoitov80419022017-03-15 18:26:41 -070011911
Jiong Wangb325fbc2019-05-24 23:25:13 +010011912 /* aux info at OFF always needs adjustment, no matter fast path
11913 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
11914 * original insn at old prog.
11915 */
11916 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
11917
Alexei Starovoitov80419022017-03-15 18:26:41 -070011918 if (cnt == 1)
He Fengqing75f0fc72021-07-14 10:18:15 +000011919 return;
Jiong Wangb325fbc2019-05-24 23:25:13 +010011920 prog_len = new_prog->len;
He Fengqing75f0fc72021-07-14 10:18:15 +000011921
Alexei Starovoitov80419022017-03-15 18:26:41 -070011922 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
11923 memcpy(new_data + off + cnt - 1, old_data + off,
11924 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
Jiong Wangb325fbc2019-05-24 23:25:13 +010011925 for (i = off; i < off + cnt - 1; i++) {
Daniel Borkmannd203b0f2021-05-28 13:03:30 +000011926 /* Expand insni[off]'s seen count to the patched range. */
11927 new_data[i].seen = old_seen;
Jiong Wangb325fbc2019-05-24 23:25:13 +010011928 new_data[i].zext_dst = insn_has_def32(env, insn + i);
11929 }
Alexei Starovoitov80419022017-03-15 18:26:41 -070011930 env->insn_aux_data = new_data;
11931 vfree(old_data);
Alexei Starovoitov80419022017-03-15 18:26:41 -070011932}
11933
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -080011934static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
11935{
11936 int i;
11937
11938 if (len == 1)
11939 return;
Jiong Wang4cb3d992018-05-02 16:17:19 -040011940 /* NOTE: fake 'exit' subprog should be updated as well. */
11941 for (i = 0; i <= env->subprog_cnt; i++) {
Edward Creeafd59422018-11-16 12:00:07 +000011942 if (env->subprog_info[i].start <= off)
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -080011943 continue;
Jiong Wang9c8105b2018-05-02 16:17:18 -040011944 env->subprog_info[i].start += len - 1;
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -080011945 }
11946}
11947
John Fastabend7506d212021-06-16 15:55:00 -070011948static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
Maciej Fijalkowskia748c692020-09-16 23:10:05 +020011949{
11950 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
11951 int i, sz = prog->aux->size_poke_tab;
11952 struct bpf_jit_poke_descriptor *desc;
11953
11954 for (i = 0; i < sz; i++) {
11955 desc = &tab[i];
John Fastabend7506d212021-06-16 15:55:00 -070011956 if (desc->insn_idx <= off)
11957 continue;
Maciej Fijalkowskia748c692020-09-16 23:10:05 +020011958 desc->insn_idx += len - 1;
11959 }
11960}
11961
Alexei Starovoitov80419022017-03-15 18:26:41 -070011962static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
11963 const struct bpf_insn *patch, u32 len)
11964{
11965 struct bpf_prog *new_prog;
He Fengqing75f0fc72021-07-14 10:18:15 +000011966 struct bpf_insn_aux_data *new_data = NULL;
11967
11968 if (len > 1) {
11969 new_data = vzalloc(array_size(env->prog->len + len - 1,
11970 sizeof(struct bpf_insn_aux_data)));
11971 if (!new_data)
11972 return NULL;
11973 }
Alexei Starovoitov80419022017-03-15 18:26:41 -070011974
11975 new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
Alexei Starovoitov4f733792019-04-01 21:27:44 -070011976 if (IS_ERR(new_prog)) {
11977 if (PTR_ERR(new_prog) == -ERANGE)
11978 verbose(env,
11979 "insn %d cannot be patched due to 16-bit range\n",
11980 env->insn_aux_data[off].orig_idx);
He Fengqing75f0fc72021-07-14 10:18:15 +000011981 vfree(new_data);
Alexei Starovoitov80419022017-03-15 18:26:41 -070011982 return NULL;
Alexei Starovoitov4f733792019-04-01 21:27:44 -070011983 }
He Fengqing75f0fc72021-07-14 10:18:15 +000011984 adjust_insn_aux_data(env, new_data, new_prog, off, len);
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -080011985 adjust_subprog_starts(env, off, len);
John Fastabend7506d212021-06-16 15:55:00 -070011986 adjust_poke_descs(new_prog, off, len);
Alexei Starovoitov80419022017-03-15 18:26:41 -070011987 return new_prog;
11988}
11989
Jakub Kicinski52875a02019-01-22 22:45:20 -080011990static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
11991 u32 off, u32 cnt)
11992{
11993 int i, j;
11994
11995 /* find first prog starting at or after off (first to remove) */
11996 for (i = 0; i < env->subprog_cnt; i++)
11997 if (env->subprog_info[i].start >= off)
11998 break;
11999 /* find first prog starting at or after off + cnt (first to stay) */
12000 for (j = i; j < env->subprog_cnt; j++)
12001 if (env->subprog_info[j].start >= off + cnt)
12002 break;
12003 /* if j doesn't start exactly at off + cnt, we are just removing
12004 * the front of previous prog
12005 */
12006 if (env->subprog_info[j].start != off + cnt)
12007 j--;
12008
12009 if (j > i) {
12010 struct bpf_prog_aux *aux = env->prog->aux;
12011 int move;
12012
12013 /* move fake 'exit' subprog as well */
12014 move = env->subprog_cnt + 1 - j;
12015
12016 memmove(env->subprog_info + i,
12017 env->subprog_info + j,
12018 sizeof(*env->subprog_info) * move);
12019 env->subprog_cnt -= j - i;
12020
12021 /* remove func_info */
12022 if (aux->func_info) {
12023 move = aux->func_info_cnt - j;
12024
12025 memmove(aux->func_info + i,
12026 aux->func_info + j,
12027 sizeof(*aux->func_info) * move);
12028 aux->func_info_cnt -= j - i;
12029 /* func_info->insn_off is set after all code rewrites,
12030 * in adjust_btf_func() - no need to adjust
12031 */
12032 }
12033 } else {
12034 /* convert i from "first prog to remove" to "first to adjust" */
12035 if (env->subprog_info[i].start == off)
12036 i++;
12037 }
12038
12039 /* update fake 'exit' subprog as well */
12040 for (; i <= env->subprog_cnt; i++)
12041 env->subprog_info[i].start -= cnt;
12042
12043 return 0;
12044}
12045
12046static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
12047 u32 cnt)
12048{
12049 struct bpf_prog *prog = env->prog;
12050 u32 i, l_off, l_cnt, nr_linfo;
12051 struct bpf_line_info *linfo;
12052
12053 nr_linfo = prog->aux->nr_linfo;
12054 if (!nr_linfo)
12055 return 0;
12056
12057 linfo = prog->aux->linfo;
12058
12059 /* find first line info to remove, count lines to be removed */
12060 for (i = 0; i < nr_linfo; i++)
12061 if (linfo[i].insn_off >= off)
12062 break;
12063
12064 l_off = i;
12065 l_cnt = 0;
12066 for (; i < nr_linfo; i++)
12067 if (linfo[i].insn_off < off + cnt)
12068 l_cnt++;
12069 else
12070 break;
12071
12072 /* First live insn doesn't match first live linfo, it needs to "inherit"
12073 * last removed linfo. prog is already modified, so prog->len == off
12074 * means no live instructions after (tail of the program was removed).
12075 */
12076 if (prog->len != off && l_cnt &&
12077 (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
12078 l_cnt--;
12079 linfo[--i].insn_off = off + cnt;
12080 }
12081
12082 /* remove the line info which refer to the removed instructions */
12083 if (l_cnt) {
12084 memmove(linfo + l_off, linfo + i,
12085 sizeof(*linfo) * (nr_linfo - i));
12086
12087 prog->aux->nr_linfo -= l_cnt;
12088 nr_linfo = prog->aux->nr_linfo;
12089 }
12090
12091 /* pull all linfo[i].insn_off >= off + cnt in by cnt */
12092 for (i = l_off; i < nr_linfo; i++)
12093 linfo[i].insn_off -= cnt;
12094
12095 /* fix up all subprogs (incl. 'exit') which start >= off */
12096 for (i = 0; i <= env->subprog_cnt; i++)
12097 if (env->subprog_info[i].linfo_idx > l_off) {
12098 /* program may have started in the removed region but
12099 * may not be fully removed
12100 */
12101 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
12102 env->subprog_info[i].linfo_idx -= l_cnt;
12103 else
12104 env->subprog_info[i].linfo_idx = l_off;
12105 }
12106
12107 return 0;
12108}
12109
12110static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
12111{
12112 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
12113 unsigned int orig_prog_len = env->prog->len;
12114 int err;
12115
Jakub Kicinski08ca90a2019-01-22 22:45:24 -080012116 if (bpf_prog_is_dev_bound(env->prog->aux))
12117 bpf_prog_offload_remove_insns(env, off, cnt);
12118
Jakub Kicinski52875a02019-01-22 22:45:20 -080012119 err = bpf_remove_insns(env->prog, off, cnt);
12120 if (err)
12121 return err;
12122
12123 err = adjust_subprog_starts_after_remove(env, off, cnt);
12124 if (err)
12125 return err;
12126
12127 err = bpf_adj_linfo_after_remove(env, off, cnt);
12128 if (err)
12129 return err;
12130
12131 memmove(aux_data + off, aux_data + off + cnt,
12132 sizeof(*aux_data) * (orig_prog_len - off - cnt));
12133
12134 return 0;
12135}
12136
Daniel Borkmann2a5418a2018-01-26 23:33:37 +010012137/* The verifier does more data flow analysis than llvm and will not
12138 * explore branches that are dead at run time. Malicious programs can
12139 * have dead code too. Therefore replace all dead at-run-time code
12140 * with 'ja -1'.
12141 *
12142 * Just nops are not optimal, e.g. if they would sit at the end of the
12143 * program and through another bug we would manage to jump there, then
12144 * we'd execute beyond program memory otherwise. Returning exception
12145 * code also wouldn't work since we can have subprogs where the dead
12146 * code could be located.
Alexei Starovoitovc1311872017-11-22 16:42:05 -080012147 */
12148static void sanitize_dead_code(struct bpf_verifier_env *env)
12149{
12150 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
Daniel Borkmann2a5418a2018-01-26 23:33:37 +010012151 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
Alexei Starovoitovc1311872017-11-22 16:42:05 -080012152 struct bpf_insn *insn = env->prog->insnsi;
12153 const int insn_cnt = env->prog->len;
12154 int i;
12155
12156 for (i = 0; i < insn_cnt; i++) {
12157 if (aux_data[i].seen)
12158 continue;
Daniel Borkmann2a5418a2018-01-26 23:33:37 +010012159 memcpy(insn + i, &trap, sizeof(trap));
Ilya Leoshkevich45c709f2021-08-12 17:18:10 +020012160 aux_data[i].zext_dst = false;
Alexei Starovoitovc1311872017-11-22 16:42:05 -080012161 }
12162}
12163
Jakub Kicinskie2ae4ca2019-01-22 22:45:19 -080012164static bool insn_is_cond_jump(u8 code)
12165{
12166 u8 op;
12167
Jiong Wang092ed092019-01-26 12:26:01 -050012168 if (BPF_CLASS(code) == BPF_JMP32)
12169 return true;
12170
Jakub Kicinskie2ae4ca2019-01-22 22:45:19 -080012171 if (BPF_CLASS(code) != BPF_JMP)
12172 return false;
12173
12174 op = BPF_OP(code);
12175 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
12176}
12177
12178static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
12179{
12180 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
12181 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
12182 struct bpf_insn *insn = env->prog->insnsi;
12183 const int insn_cnt = env->prog->len;
12184 int i;
12185
12186 for (i = 0; i < insn_cnt; i++, insn++) {
12187 if (!insn_is_cond_jump(insn->code))
12188 continue;
12189
12190 if (!aux_data[i + 1].seen)
12191 ja.off = insn->off;
12192 else if (!aux_data[i + 1 + insn->off].seen)
12193 ja.off = 0;
12194 else
12195 continue;
12196
Jakub Kicinski08ca90a2019-01-22 22:45:24 -080012197 if (bpf_prog_is_dev_bound(env->prog->aux))
12198 bpf_prog_offload_replace_insn(env, i, &ja);
12199
Jakub Kicinskie2ae4ca2019-01-22 22:45:19 -080012200 memcpy(insn, &ja, sizeof(ja));
12201 }
12202}
12203
Jakub Kicinski52875a02019-01-22 22:45:20 -080012204static int opt_remove_dead_code(struct bpf_verifier_env *env)
12205{
12206 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
12207 int insn_cnt = env->prog->len;
12208 int i, err;
12209
12210 for (i = 0; i < insn_cnt; i++) {
12211 int j;
12212
12213 j = 0;
12214 while (i + j < insn_cnt && !aux_data[i + j].seen)
12215 j++;
12216 if (!j)
12217 continue;
12218
12219 err = verifier_remove_insns(env, i, j);
12220 if (err)
12221 return err;
12222 insn_cnt = env->prog->len;
12223 }
12224
12225 return 0;
12226}
12227
Jakub Kicinskia1b14ab2019-01-22 22:45:21 -080012228static int opt_remove_nops(struct bpf_verifier_env *env)
12229{
12230 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
12231 struct bpf_insn *insn = env->prog->insnsi;
12232 int insn_cnt = env->prog->len;
12233 int i, err;
12234
12235 for (i = 0; i < insn_cnt; i++) {
12236 if (memcmp(&insn[i], &ja, sizeof(ja)))
12237 continue;
12238
12239 err = verifier_remove_insns(env, i, 1);
12240 if (err)
12241 return err;
12242 insn_cnt--;
12243 i--;
12244 }
12245
12246 return 0;
12247}
12248
Jiong Wangd6c23082019-05-24 23:25:18 +010012249static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
12250 const union bpf_attr *attr)
Jiong Wanga4b1d3c2019-05-24 23:25:15 +010012251{
Jiong Wangd6c23082019-05-24 23:25:18 +010012252 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
Jiong Wanga4b1d3c2019-05-24 23:25:15 +010012253 struct bpf_insn_aux_data *aux = env->insn_aux_data;
Jiong Wangd6c23082019-05-24 23:25:18 +010012254 int i, patch_len, delta = 0, len = env->prog->len;
Jiong Wanga4b1d3c2019-05-24 23:25:15 +010012255 struct bpf_insn *insns = env->prog->insnsi;
Jiong Wanga4b1d3c2019-05-24 23:25:15 +010012256 struct bpf_prog *new_prog;
Jiong Wangd6c23082019-05-24 23:25:18 +010012257 bool rnd_hi32;
Jiong Wanga4b1d3c2019-05-24 23:25:15 +010012258
Jiong Wangd6c23082019-05-24 23:25:18 +010012259 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
Jiong Wanga4b1d3c2019-05-24 23:25:15 +010012260 zext_patch[1] = BPF_ZEXT_REG(0);
Jiong Wangd6c23082019-05-24 23:25:18 +010012261 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
12262 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
12263 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
Jiong Wanga4b1d3c2019-05-24 23:25:15 +010012264 for (i = 0; i < len; i++) {
12265 int adj_idx = i + delta;
12266 struct bpf_insn insn;
Ilya Leoshkevich83a28812021-03-01 16:40:19 +010012267 int load_reg;
Jiong Wanga4b1d3c2019-05-24 23:25:15 +010012268
Jiong Wangd6c23082019-05-24 23:25:18 +010012269 insn = insns[adj_idx];
Ilya Leoshkevich83a28812021-03-01 16:40:19 +010012270 load_reg = insn_def_regno(&insn);
Jiong Wangd6c23082019-05-24 23:25:18 +010012271 if (!aux[adj_idx].zext_dst) {
12272 u8 code, class;
12273 u32 imm_rnd;
12274
12275 if (!rnd_hi32)
12276 continue;
12277
12278 code = insn.code;
12279 class = BPF_CLASS(code);
Ilya Leoshkevich83a28812021-03-01 16:40:19 +010012280 if (load_reg == -1)
Jiong Wangd6c23082019-05-24 23:25:18 +010012281 continue;
12282
12283 /* NOTE: arg "reg" (the fourth one) is only used for
Ilya Leoshkevich83a28812021-03-01 16:40:19 +010012284 * BPF_STX + SRC_OP, so it is safe to pass NULL
12285 * here.
Jiong Wangd6c23082019-05-24 23:25:18 +010012286 */
Ilya Leoshkevich83a28812021-03-01 16:40:19 +010012287 if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) {
Jiong Wangd6c23082019-05-24 23:25:18 +010012288 if (class == BPF_LD &&
12289 BPF_MODE(code) == BPF_IMM)
12290 i++;
12291 continue;
12292 }
12293
12294 /* ctx load could be transformed into wider load. */
12295 if (class == BPF_LDX &&
12296 aux[adj_idx].ptr_type == PTR_TO_CTX)
12297 continue;
12298
12299 imm_rnd = get_random_int();
12300 rnd_hi32_patch[0] = insn;
12301 rnd_hi32_patch[1].imm = imm_rnd;
Ilya Leoshkevich83a28812021-03-01 16:40:19 +010012302 rnd_hi32_patch[3].dst_reg = load_reg;
Jiong Wangd6c23082019-05-24 23:25:18 +010012303 patch = rnd_hi32_patch;
12304 patch_len = 4;
12305 goto apply_patch_buffer;
12306 }
12307
Brendan Jackman39491862021-03-04 18:56:46 -080012308 /* Add in an zero-extend instruction if a) the JIT has requested
12309 * it or b) it's a CMPXCHG.
12310 *
12311 * The latter is because: BPF_CMPXCHG always loads a value into
12312 * R0, therefore always zero-extends. However some archs'
12313 * equivalent instruction only does this load when the
12314 * comparison is successful. This detail of CMPXCHG is
12315 * orthogonal to the general zero-extension behaviour of the
12316 * CPU, so it's treated independently of bpf_jit_needs_zext.
12317 */
12318 if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn))
Jiong Wanga4b1d3c2019-05-24 23:25:15 +010012319 continue;
12320
Ilya Leoshkevich83a28812021-03-01 16:40:19 +010012321 if (WARN_ON(load_reg == -1)) {
12322 verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n");
12323 return -EFAULT;
Ilya Leoshkevichb2e37a72021-02-10 21:45:02 +010012324 }
12325
Jiong Wanga4b1d3c2019-05-24 23:25:15 +010012326 zext_patch[0] = insn;
Ilya Leoshkevichb2e37a72021-02-10 21:45:02 +010012327 zext_patch[1].dst_reg = load_reg;
12328 zext_patch[1].src_reg = load_reg;
Jiong Wangd6c23082019-05-24 23:25:18 +010012329 patch = zext_patch;
12330 patch_len = 2;
12331apply_patch_buffer:
12332 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
Jiong Wanga4b1d3c2019-05-24 23:25:15 +010012333 if (!new_prog)
12334 return -ENOMEM;
12335 env->prog = new_prog;
12336 insns = new_prog->insnsi;
12337 aux = env->insn_aux_data;
Jiong Wangd6c23082019-05-24 23:25:18 +010012338 delta += patch_len - 1;
Jiong Wanga4b1d3c2019-05-24 23:25:15 +010012339 }
12340
12341 return 0;
12342}
12343
Joe Stringerc64b7982018-10-02 13:35:33 -070012344/* convert load instructions that access fields of a context type into a
12345 * sequence of instructions that access fields of the underlying structure:
12346 * struct __sk_buff -> struct sk_buff
12347 * struct bpf_sock_ops -> struct sock
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070012348 */
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010012349static int convert_ctx_accesses(struct bpf_verifier_env *env)
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070012350{
Jakub Kicinski00176a32017-10-16 16:40:54 -070012351 const struct bpf_verifier_ops *ops = env->ops;
Daniel Borkmannf96da092017-07-02 02:13:27 +020012352 int i, cnt, size, ctx_field_size, delta = 0;
Jakub Kicinski3df126f2016-09-21 11:43:56 +010012353 const int insn_cnt = env->prog->len;
Daniel Borkmann36bbef52016-09-20 00:26:13 +020012354 struct bpf_insn insn_buf[16], *insn;
Andrey Ignatov46f53a62018-11-10 22:15:13 -080012355 u32 target_size, size_default, off;
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070012356 struct bpf_prog *new_prog;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070012357 enum bpf_access_type type;
Daniel Borkmannf96da092017-07-02 02:13:27 +020012358 bool is_narrower_load;
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070012359
Daniel Borkmannb09928b2018-10-24 22:05:49 +020012360 if (ops->gen_prologue || env->seen_direct_write) {
12361 if (!ops->gen_prologue) {
12362 verbose(env, "bpf verifier is misconfigured\n");
12363 return -EINVAL;
12364 }
Daniel Borkmann36bbef52016-09-20 00:26:13 +020012365 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
12366 env->prog);
12367 if (cnt >= ARRAY_SIZE(insn_buf)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -070012368 verbose(env, "bpf verifier is misconfigured\n");
Daniel Borkmann36bbef52016-09-20 00:26:13 +020012369 return -EINVAL;
12370 } else if (cnt) {
Alexei Starovoitov80419022017-03-15 18:26:41 -070012371 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
Daniel Borkmann36bbef52016-09-20 00:26:13 +020012372 if (!new_prog)
12373 return -ENOMEM;
Alexei Starovoitov80419022017-03-15 18:26:41 -070012374
Daniel Borkmann36bbef52016-09-20 00:26:13 +020012375 env->prog = new_prog;
Jakub Kicinski3df126f2016-09-21 11:43:56 +010012376 delta += cnt - 1;
Daniel Borkmann36bbef52016-09-20 00:26:13 +020012377 }
12378 }
12379
Joe Stringerc64b7982018-10-02 13:35:33 -070012380 if (bpf_prog_is_dev_bound(env->prog->aux))
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070012381 return 0;
12382
Jakub Kicinski3df126f2016-09-21 11:43:56 +010012383 insn = env->prog->insnsi + delta;
Daniel Borkmann36bbef52016-09-20 00:26:13 +020012384
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070012385 for (i = 0; i < insn_cnt; i++, insn++) {
Joe Stringerc64b7982018-10-02 13:35:33 -070012386 bpf_convert_ctx_access_t convert_ctx_access;
Daniel Borkmann2039f262021-07-13 08:18:31 +000012387 bool ctx_access;
Joe Stringerc64b7982018-10-02 13:35:33 -070012388
Daniel Borkmann62c79892017-01-12 11:51:33 +010012389 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
12390 insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
12391 insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
Daniel Borkmann2039f262021-07-13 08:18:31 +000012392 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070012393 type = BPF_READ;
Daniel Borkmann2039f262021-07-13 08:18:31 +000012394 ctx_access = true;
12395 } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
12396 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
12397 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
12398 insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
12399 insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
12400 insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
12401 insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
12402 insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070012403 type = BPF_WRITE;
Daniel Borkmann2039f262021-07-13 08:18:31 +000012404 ctx_access = BPF_CLASS(insn->code) == BPF_STX;
12405 } else {
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070012406 continue;
Daniel Borkmann2039f262021-07-13 08:18:31 +000012407 }
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070012408
Alexei Starovoitovaf86ca42018-05-15 09:27:05 -070012409 if (type == BPF_WRITE &&
Daniel Borkmann2039f262021-07-13 08:18:31 +000012410 env->insn_aux_data[i + delta].sanitize_stack_spill) {
Alexei Starovoitovaf86ca42018-05-15 09:27:05 -070012411 struct bpf_insn patch[] = {
Alexei Starovoitovaf86ca42018-05-15 09:27:05 -070012412 *insn,
Daniel Borkmann2039f262021-07-13 08:18:31 +000012413 BPF_ST_NOSPEC(),
Alexei Starovoitovaf86ca42018-05-15 09:27:05 -070012414 };
12415
12416 cnt = ARRAY_SIZE(patch);
12417 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
12418 if (!new_prog)
12419 return -ENOMEM;
12420
12421 delta += cnt - 1;
12422 env->prog = new_prog;
12423 insn = new_prog->insnsi + i + delta;
12424 continue;
12425 }
12426
Daniel Borkmann2039f262021-07-13 08:18:31 +000012427 if (!ctx_access)
12428 continue;
12429
Joe Stringerc64b7982018-10-02 13:35:33 -070012430 switch (env->insn_aux_data[i + delta].ptr_type) {
12431 case PTR_TO_CTX:
12432 if (!ops->convert_ctx_access)
12433 continue;
12434 convert_ctx_access = ops->convert_ctx_access;
12435 break;
12436 case PTR_TO_SOCKET:
Martin KaFai Lau46f8bc92019-02-09 23:22:20 -080012437 case PTR_TO_SOCK_COMMON:
Joe Stringerc64b7982018-10-02 13:35:33 -070012438 convert_ctx_access = bpf_sock_convert_ctx_access;
12439 break;
Martin KaFai Lau655a51e2019-02-09 23:22:24 -080012440 case PTR_TO_TCP_SOCK:
12441 convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
12442 break;
Jonathan Lemonfada7fd2019-06-06 13:59:40 -070012443 case PTR_TO_XDP_SOCK:
12444 convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
12445 break;
Alexei Starovoitov2a027592019-10-15 20:25:02 -070012446 case PTR_TO_BTF_ID:
Martin KaFai Lau27ae79972020-01-08 16:35:03 -080012447 if (type == BPF_READ) {
12448 insn->code = BPF_LDX | BPF_PROBE_MEM |
12449 BPF_SIZE((insn)->code);
12450 env->prog->aux->num_exentries++;
Udip Pant7e407812020-08-25 16:20:00 -070012451 } else if (resolve_prog_type(env->prog) != BPF_PROG_TYPE_STRUCT_OPS) {
Alexei Starovoitov2a027592019-10-15 20:25:02 -070012452 verbose(env, "Writes through BTF pointers are not allowed\n");
12453 return -EINVAL;
12454 }
Alexei Starovoitov2a027592019-10-15 20:25:02 -070012455 continue;
Joe Stringerc64b7982018-10-02 13:35:33 -070012456 default:
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070012457 continue;
Joe Stringerc64b7982018-10-02 13:35:33 -070012458 }
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070012459
Yonghong Song31fd8582017-06-13 15:52:13 -070012460 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
Daniel Borkmannf96da092017-07-02 02:13:27 +020012461 size = BPF_LDST_BYTES(insn);
Yonghong Song31fd8582017-06-13 15:52:13 -070012462
12463 /* If the read access is a narrower load of the field,
12464 * convert to a 4/8-byte load, to minimum program type specific
12465 * convert_ctx_access changes. If conversion is successful,
12466 * we will apply proper mask to the result.
12467 */
Daniel Borkmannf96da092017-07-02 02:13:27 +020012468 is_narrower_load = size < ctx_field_size;
Andrey Ignatov46f53a62018-11-10 22:15:13 -080012469 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
12470 off = insn->off;
Yonghong Song31fd8582017-06-13 15:52:13 -070012471 if (is_narrower_load) {
Daniel Borkmannf96da092017-07-02 02:13:27 +020012472 u8 size_code;
Yonghong Song31fd8582017-06-13 15:52:13 -070012473
Daniel Borkmannf96da092017-07-02 02:13:27 +020012474 if (type == BPF_WRITE) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -070012475 verbose(env, "bpf verifier narrow ctx access misconfigured\n");
Daniel Borkmannf96da092017-07-02 02:13:27 +020012476 return -EINVAL;
12477 }
12478
12479 size_code = BPF_H;
Yonghong Song31fd8582017-06-13 15:52:13 -070012480 if (ctx_field_size == 4)
12481 size_code = BPF_W;
12482 else if (ctx_field_size == 8)
12483 size_code = BPF_DW;
Daniel Borkmannf96da092017-07-02 02:13:27 +020012484
Daniel Borkmannbc231052018-06-02 23:06:39 +020012485 insn->off = off & ~(size_default - 1);
Yonghong Song31fd8582017-06-13 15:52:13 -070012486 insn->code = BPF_LDX | BPF_MEM | size_code;
12487 }
Daniel Borkmannf96da092017-07-02 02:13:27 +020012488
12489 target_size = 0;
Joe Stringerc64b7982018-10-02 13:35:33 -070012490 cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
12491 &target_size);
Daniel Borkmannf96da092017-07-02 02:13:27 +020012492 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
12493 (ctx_field_size && !target_size)) {
Jakub Kicinski61bd5212017-10-09 10:30:11 -070012494 verbose(env, "bpf verifier is misconfigured\n");
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070012495 return -EINVAL;
12496 }
Daniel Borkmannf96da092017-07-02 02:13:27 +020012497
12498 if (is_narrower_load && size < target_size) {
Ilya Leoshkevichd895a0f2019-08-16 12:53:00 +020012499 u8 shift = bpf_ctx_narrow_access_offset(
12500 off, size, size_default) * 8;
Andrey Ignatovd7af7e42021-08-20 09:39:35 -070012501 if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
12502 verbose(env, "bpf verifier narrow ctx load misconfigured\n");
12503 return -EINVAL;
12504 }
Andrey Ignatov46f53a62018-11-10 22:15:13 -080012505 if (ctx_field_size <= 4) {
12506 if (shift)
12507 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
12508 insn->dst_reg,
12509 shift);
Yonghong Song31fd8582017-06-13 15:52:13 -070012510 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
Daniel Borkmannf96da092017-07-02 02:13:27 +020012511 (1 << size * 8) - 1);
Andrey Ignatov46f53a62018-11-10 22:15:13 -080012512 } else {
12513 if (shift)
12514 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
12515 insn->dst_reg,
12516 shift);
Yonghong Song31fd8582017-06-13 15:52:13 -070012517 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
Krzesimir Nowake2f7fc02019-05-08 18:08:58 +020012518 (1ULL << size * 8) - 1);
Andrey Ignatov46f53a62018-11-10 22:15:13 -080012519 }
Yonghong Song31fd8582017-06-13 15:52:13 -070012520 }
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070012521
Alexei Starovoitov80419022017-03-15 18:26:41 -070012522 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070012523 if (!new_prog)
12524 return -ENOMEM;
12525
Jakub Kicinski3df126f2016-09-21 11:43:56 +010012526 delta += cnt - 1;
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070012527
12528 /* keep walking new program and skip insns we just inserted */
12529 env->prog = new_prog;
Jakub Kicinski3df126f2016-09-21 11:43:56 +010012530 insn = new_prog->insnsi + i + delta;
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070012531 }
12532
12533 return 0;
12534}
12535
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012536static int jit_subprogs(struct bpf_verifier_env *env)
12537{
12538 struct bpf_prog *prog = env->prog, **func, *tmp;
12539 int i, j, subprog_start, subprog_end = 0, len, subprog;
Maciej Fijalkowskia748c692020-09-16 23:10:05 +020012540 struct bpf_map *map_ptr;
Daniel Borkmann7105e822017-12-20 13:42:57 +010012541 struct bpf_insn *insn;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012542 void *old_bpf_func;
Yonghong Songc4c0bdc2020-06-23 17:10:54 -070012543 int err, num_exentries;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012544
Jiong Wangf910cef2018-05-02 16:17:17 -040012545 if (env->subprog_cnt <= 1)
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012546 return 0;
12547
Daniel Borkmann7105e822017-12-20 13:42:57 +010012548 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
Martin KaFai Lau3990ed42021-11-05 18:40:14 -070012549 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn))
Yonghong Song69c087b2021-02-26 12:49:25 -080012550 continue;
Yonghong Song69c087b2021-02-26 12:49:25 -080012551
Daniel Borkmannc7a89782018-07-12 21:44:28 +020012552 /* Upon error here we cannot fall back to interpreter but
12553 * need a hard reject of the program. Thus -EFAULT is
12554 * propagated in any case.
12555 */
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012556 subprog = find_subprog(env, i + insn->imm + 1);
12557 if (subprog < 0) {
12558 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
12559 i + insn->imm + 1);
12560 return -EFAULT;
12561 }
12562 /* temporarily remember subprog id inside insn instead of
12563 * aux_data, since next loop will split up all insns into funcs
12564 */
Jiong Wangf910cef2018-05-02 16:17:17 -040012565 insn->off = subprog;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012566 /* remember original imm in case JIT fails and fallback
12567 * to interpreter will be needed
12568 */
12569 env->insn_aux_data[i].call_imm = insn->imm;
12570 /* point imm to __bpf_call_base+1 from JITs point of view */
12571 insn->imm = 1;
Martin KaFai Lau3990ed42021-11-05 18:40:14 -070012572 if (bpf_pseudo_func(insn))
12573 /* jit (e.g. x86_64) may emit fewer instructions
12574 * if it learns a u32 imm is the same as a u64 imm.
12575 * Force a non zero here.
12576 */
12577 insn[1].imm = 1;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012578 }
12579
Martin KaFai Lauc454a462018-12-07 16:42:25 -080012580 err = bpf_prog_alloc_jited_linfo(prog);
12581 if (err)
12582 goto out_undo_insn;
12583
12584 err = -ENOMEM;
Kees Cook6396bb22018-06-12 14:03:40 -070012585 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012586 if (!func)
Daniel Borkmannc7a89782018-07-12 21:44:28 +020012587 goto out_undo_insn;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012588
Jiong Wangf910cef2018-05-02 16:17:17 -040012589 for (i = 0; i < env->subprog_cnt; i++) {
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012590 subprog_start = subprog_end;
Jiong Wang4cb3d992018-05-02 16:17:19 -040012591 subprog_end = env->subprog_info[i + 1].start;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012592
12593 len = subprog_end - subprog_start;
Andrii Nakryikofb7dd8b2021-08-15 00:05:54 -070012594 /* bpf_prog_run() doesn't call subprogs directly,
Alexei Starovoitov492ecee2019-02-25 14:28:39 -080012595 * hence main prog stats include the runtime of subprogs.
12596 * subprogs don't have IDs and not reachable via prog_get_next_id
Alexei Starovoitov700d4792021-02-09 19:36:26 -080012597 * func[i]->stats will never be accessed and stays NULL
Alexei Starovoitov492ecee2019-02-25 14:28:39 -080012598 */
12599 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012600 if (!func[i])
12601 goto out_free;
12602 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
12603 len * sizeof(struct bpf_insn));
Daniel Borkmann4f74d802017-12-20 13:42:56 +010012604 func[i]->type = prog->type;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012605 func[i]->len = len;
Daniel Borkmann4f74d802017-12-20 13:42:56 +010012606 if (bpf_prog_calc_tag(func[i]))
12607 goto out_free;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012608 func[i]->is_func = 1;
Yonghong Songba64e7d2018-11-24 23:20:44 -080012609 func[i]->aux->func_idx = i;
John Fastabendf263a812021-07-07 15:38:47 -070012610 /* Below members will be freed only at prog->aux */
Yonghong Songba64e7d2018-11-24 23:20:44 -080012611 func[i]->aux->btf = prog->aux->btf;
12612 func[i]->aux->func_info = prog->aux->func_info;
John Fastabendf263a812021-07-07 15:38:47 -070012613 func[i]->aux->poke_tab = prog->aux->poke_tab;
12614 func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
Yonghong Songba64e7d2018-11-24 23:20:44 -080012615
Maciej Fijalkowskia748c692020-09-16 23:10:05 +020012616 for (j = 0; j < prog->aux->size_poke_tab; j++) {
John Fastabendf263a812021-07-07 15:38:47 -070012617 struct bpf_jit_poke_descriptor *poke;
Maciej Fijalkowskia748c692020-09-16 23:10:05 +020012618
John Fastabendf263a812021-07-07 15:38:47 -070012619 poke = &prog->aux->poke_tab[j];
12620 if (poke->insn_idx < subprog_end &&
12621 poke->insn_idx >= subprog_start)
12622 poke->aux = func[i]->aux;
Maciej Fijalkowskia748c692020-09-16 23:10:05 +020012623 }
12624
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012625 /* Use bpf_prog_F_tag to indicate functions in stack traces.
12626 * Long term would need debug info to populate names
12627 */
12628 func[i]->aux->name[0] = 'F';
Jiong Wang9c8105b2018-05-02 16:17:18 -040012629 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012630 func[i]->jit_requested = 1;
Martin KaFai Laue6ac2452021-03-24 18:51:42 -070012631 func[i]->aux->kfunc_tab = prog->aux->kfunc_tab;
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +053012632 func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab;
Martin KaFai Lauc454a462018-12-07 16:42:25 -080012633 func[i]->aux->linfo = prog->aux->linfo;
12634 func[i]->aux->nr_linfo = prog->aux->nr_linfo;
12635 func[i]->aux->jited_linfo = prog->aux->jited_linfo;
12636 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
Yonghong Songc4c0bdc2020-06-23 17:10:54 -070012637 num_exentries = 0;
12638 insn = func[i]->insnsi;
12639 for (j = 0; j < func[i]->len; j++, insn++) {
12640 if (BPF_CLASS(insn->code) == BPF_LDX &&
12641 BPF_MODE(insn->code) == BPF_PROBE_MEM)
12642 num_exentries++;
12643 }
12644 func[i]->aux->num_exentries = num_exentries;
Maciej Fijalkowskiebf7d1f2020-09-16 23:10:08 +020012645 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012646 func[i] = bpf_int_jit_compile(func[i]);
12647 if (!func[i]->jited) {
12648 err = -ENOTSUPP;
12649 goto out_free;
12650 }
12651 cond_resched();
12652 }
Maciej Fijalkowskia748c692020-09-16 23:10:05 +020012653
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012654 /* at this point all bpf functions were successfully JITed
12655 * now populate all bpf_calls with correct addresses and
12656 * run last pass of JIT
12657 */
Jiong Wangf910cef2018-05-02 16:17:17 -040012658 for (i = 0; i < env->subprog_cnt; i++) {
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012659 insn = func[i]->insnsi;
12660 for (j = 0; j < func[i]->len; j++, insn++) {
Yonghong Song69c087b2021-02-26 12:49:25 -080012661 if (bpf_pseudo_func(insn)) {
Martin KaFai Lau3990ed42021-11-05 18:40:14 -070012662 subprog = insn->off;
Yonghong Song69c087b2021-02-26 12:49:25 -080012663 insn[0].imm = (u32)(long)func[subprog]->bpf_func;
12664 insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32;
12665 continue;
12666 }
Yonghong Song23a2d702021-02-04 15:48:27 -080012667 if (!bpf_pseudo_call(insn))
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012668 continue;
12669 subprog = insn->off;
Kees Cook3d717fa2021-09-28 16:09:45 -070012670 insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func);
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012671 }
Sandipan Das2162fed2018-05-24 12:26:45 +053012672
12673 /* we use the aux data to keep a list of the start addresses
12674 * of the JITed images for each function in the program
12675 *
12676 * for some architectures, such as powerpc64, the imm field
12677 * might not be large enough to hold the offset of the start
12678 * address of the callee's JITed image from __bpf_call_base
12679 *
12680 * in such cases, we can lookup the start address of a callee
12681 * by using its subprog id, available from the off field of
12682 * the call instruction, as an index for this list
12683 */
12684 func[i]->aux->func = func;
12685 func[i]->aux->func_cnt = env->subprog_cnt;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012686 }
Jiong Wangf910cef2018-05-02 16:17:17 -040012687 for (i = 0; i < env->subprog_cnt; i++) {
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012688 old_bpf_func = func[i]->bpf_func;
12689 tmp = bpf_int_jit_compile(func[i]);
12690 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
12691 verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
Daniel Borkmannc7a89782018-07-12 21:44:28 +020012692 err = -ENOTSUPP;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012693 goto out_free;
12694 }
12695 cond_resched();
12696 }
12697
12698 /* finally lock prog and jit images for all functions and
12699 * populate kallsysm
12700 */
Jiong Wangf910cef2018-05-02 16:17:17 -040012701 for (i = 0; i < env->subprog_cnt; i++) {
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012702 bpf_prog_lock_ro(func[i]);
12703 bpf_prog_kallsyms_add(func[i]);
12704 }
Daniel Borkmann7105e822017-12-20 13:42:57 +010012705
12706 /* Last step: make now unused interpreter insns from main
12707 * prog consistent for later dump requests, so they can
12708 * later look the same as if they were interpreted only.
12709 */
12710 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
Yonghong Song69c087b2021-02-26 12:49:25 -080012711 if (bpf_pseudo_func(insn)) {
12712 insn[0].imm = env->insn_aux_data[i].call_imm;
Martin KaFai Lau3990ed42021-11-05 18:40:14 -070012713 insn[1].imm = insn->off;
12714 insn->off = 0;
Yonghong Song69c087b2021-02-26 12:49:25 -080012715 continue;
12716 }
Yonghong Song23a2d702021-02-04 15:48:27 -080012717 if (!bpf_pseudo_call(insn))
Daniel Borkmann7105e822017-12-20 13:42:57 +010012718 continue;
12719 insn->off = env->insn_aux_data[i].call_imm;
12720 subprog = find_subprog(env, i + insn->off + 1);
Sandipan Dasdbecd732018-05-24 12:26:48 +053012721 insn->imm = subprog;
Daniel Borkmann7105e822017-12-20 13:42:57 +010012722 }
12723
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012724 prog->jited = 1;
12725 prog->bpf_func = func[0]->bpf_func;
12726 prog->aux->func = func;
Jiong Wangf910cef2018-05-02 16:17:17 -040012727 prog->aux->func_cnt = env->subprog_cnt;
Martin KaFai Laue16301f2021-03-24 18:51:30 -070012728 bpf_prog_jit_attempt_done(prog);
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012729 return 0;
12730out_free:
John Fastabendf263a812021-07-07 15:38:47 -070012731 /* We failed JIT'ing, so at this point we need to unregister poke
12732 * descriptors from subprogs, so that kernel is not attempting to
12733 * patch it anymore as we're freeing the subprog JIT memory.
12734 */
12735 for (i = 0; i < prog->aux->size_poke_tab; i++) {
12736 map_ptr = prog->aux->poke_tab[i].tail_call.map;
12737 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
12738 }
12739 /* At this point we're guaranteed that poke descriptors are not
12740 * live anymore. We can just unlink its descriptor table as it's
12741 * released with the main prog.
12742 */
Maciej Fijalkowskia748c692020-09-16 23:10:05 +020012743 for (i = 0; i < env->subprog_cnt; i++) {
12744 if (!func[i])
12745 continue;
John Fastabendf263a812021-07-07 15:38:47 -070012746 func[i]->aux->poke_tab = NULL;
Maciej Fijalkowskia748c692020-09-16 23:10:05 +020012747 bpf_jit_free(func[i]);
12748 }
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012749 kfree(func);
Daniel Borkmannc7a89782018-07-12 21:44:28 +020012750out_undo_insn:
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012751 /* cleanup main prog to be interpreted */
12752 prog->jit_requested = 0;
12753 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
Yonghong Song23a2d702021-02-04 15:48:27 -080012754 if (!bpf_pseudo_call(insn))
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012755 continue;
12756 insn->off = 0;
12757 insn->imm = env->insn_aux_data[i].call_imm;
12758 }
Martin KaFai Laue16301f2021-03-24 18:51:30 -070012759 bpf_prog_jit_attempt_done(prog);
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012760 return err;
12761}
12762
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -080012763static int fixup_call_args(struct bpf_verifier_env *env)
12764{
David S. Miller19d28fb2018-01-11 21:27:54 -050012765#ifndef CONFIG_BPF_JIT_ALWAYS_ON
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -080012766 struct bpf_prog *prog = env->prog;
12767 struct bpf_insn *insn = prog->insnsi;
Martin KaFai Laue6ac2452021-03-24 18:51:42 -070012768 bool has_kfunc_call = bpf_prog_has_kfunc_call(prog);
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -080012769 int i, depth;
David S. Miller19d28fb2018-01-11 21:27:54 -050012770#endif
Quentin Monnete4052d02018-10-07 12:56:58 +010012771 int err = 0;
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -080012772
Quentin Monnete4052d02018-10-07 12:56:58 +010012773 if (env->prog->jit_requested &&
12774 !bpf_prog_is_dev_bound(env->prog->aux)) {
David S. Miller19d28fb2018-01-11 21:27:54 -050012775 err = jit_subprogs(env);
12776 if (err == 0)
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -080012777 return 0;
Daniel Borkmannc7a89782018-07-12 21:44:28 +020012778 if (err == -EFAULT)
12779 return err;
David S. Miller19d28fb2018-01-11 21:27:54 -050012780 }
12781#ifndef CONFIG_BPF_JIT_ALWAYS_ON
Martin KaFai Laue6ac2452021-03-24 18:51:42 -070012782 if (has_kfunc_call) {
12783 verbose(env, "calling kernel functions are not allowed in non-JITed programs\n");
12784 return -EINVAL;
12785 }
Maciej Fijalkowskie4119012020-09-16 23:10:09 +020012786 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
12787 /* When JIT fails the progs with bpf2bpf calls and tail_calls
12788 * have to be rejected, since interpreter doesn't support them yet.
12789 */
12790 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
12791 return -EINVAL;
12792 }
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -080012793 for (i = 0; i < prog->len; i++, insn++) {
Yonghong Song69c087b2021-02-26 12:49:25 -080012794 if (bpf_pseudo_func(insn)) {
12795 /* When JIT fails the progs with callback calls
12796 * have to be rejected, since interpreter doesn't support them yet.
12797 */
12798 verbose(env, "callbacks are not allowed in non-JITed programs\n");
12799 return -EINVAL;
12800 }
12801
Yonghong Song23a2d702021-02-04 15:48:27 -080012802 if (!bpf_pseudo_call(insn))
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -080012803 continue;
12804 depth = get_callee_stack_depth(env, insn, i);
12805 if (depth < 0)
12806 return depth;
12807 bpf_patch_call_args(insn, depth);
12808 }
David S. Miller19d28fb2018-01-11 21:27:54 -050012809 err = 0;
12810#endif
12811 return err;
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -080012812}
12813
Martin KaFai Laue6ac2452021-03-24 18:51:42 -070012814static int fixup_kfunc_call(struct bpf_verifier_env *env,
12815 struct bpf_insn *insn)
12816{
12817 const struct bpf_kfunc_desc *desc;
12818
Kumar Kartikeya Dwivedia5d82722021-10-02 06:47:50 +053012819 if (!insn->imm) {
12820 verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
12821 return -EINVAL;
12822 }
12823
Martin KaFai Laue6ac2452021-03-24 18:51:42 -070012824 /* insn->imm has the btf func_id. Replace it with
12825 * an address (relative to __bpf_base_call).
12826 */
Kumar Kartikeya Dwivedi23576722021-10-02 06:47:49 +053012827 desc = find_kfunc_desc(env->prog, insn->imm, insn->off);
Martin KaFai Laue6ac2452021-03-24 18:51:42 -070012828 if (!desc) {
12829 verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n",
12830 insn->imm);
12831 return -EFAULT;
12832 }
12833
12834 insn->imm = desc->imm;
12835
12836 return 0;
12837}
12838
Brendan Jackmane6ac5932021-02-17 10:45:09 +000012839/* Do various post-verification rewrites in a single program pass.
12840 * These rewrites simplify JIT and interpreter implementations.
Alexei Starovoitove245c5c62017-03-15 18:26:39 -070012841 */
Brendan Jackmane6ac5932021-02-17 10:45:09 +000012842static int do_misc_fixups(struct bpf_verifier_env *env)
Alexei Starovoitove245c5c62017-03-15 18:26:39 -070012843{
Alexei Starovoitov79741b32017-03-15 18:26:40 -070012844 struct bpf_prog *prog = env->prog;
Daniel Borkmannd2e4c1e2019-11-22 21:07:59 +010012845 bool expect_blinding = bpf_jit_blinding_enabled(prog);
Jiri Olsa9b99edc2021-07-14 11:43:55 +020012846 enum bpf_prog_type prog_type = resolve_prog_type(prog);
Alexei Starovoitov79741b32017-03-15 18:26:40 -070012847 struct bpf_insn *insn = prog->insnsi;
Alexei Starovoitove245c5c62017-03-15 18:26:39 -070012848 const struct bpf_func_proto *fn;
Alexei Starovoitov79741b32017-03-15 18:26:40 -070012849 const int insn_cnt = prog->len;
Daniel Borkmann09772d92018-06-02 23:06:35 +020012850 const struct bpf_map_ops *ops;
Daniel Borkmannc93552c2018-05-24 02:32:53 +020012851 struct bpf_insn_aux_data *aux;
Alexei Starovoitov81ed18a2017-03-15 18:26:42 -070012852 struct bpf_insn insn_buf[16];
12853 struct bpf_prog *new_prog;
12854 struct bpf_map *map_ptr;
Daniel Borkmannd2e4c1e2019-11-22 21:07:59 +010012855 int i, ret, cnt, delta = 0;
Alexei Starovoitove245c5c62017-03-15 18:26:39 -070012856
Alexei Starovoitov79741b32017-03-15 18:26:40 -070012857 for (i = 0; i < insn_cnt; i++, insn++) {
Brendan Jackmane6ac5932021-02-17 10:45:09 +000012858 /* Make divide-by-zero exceptions impossible. */
Daniel Borkmannf6b1b3b2018-01-26 23:33:39 +010012859 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
12860 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
12861 insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
Alexei Starovoitov68fda452018-01-12 18:59:52 -080012862 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
Daniel Borkmannf6b1b3b2018-01-26 23:33:39 +010012863 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
Daniel Borkmanne88b2c62021-02-09 18:46:10 +000012864 bool isdiv = BPF_OP(insn->code) == BPF_DIV;
12865 struct bpf_insn *patchlet;
12866 struct bpf_insn chk_and_div[] = {
Daniel Borkmann9b00f1b2021-02-10 14:14:42 +010012867 /* [R,W]x div 0 -> 0 */
Daniel Borkmanne88b2c62021-02-09 18:46:10 +000012868 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
12869 BPF_JNE | BPF_K, insn->src_reg,
12870 0, 2, 0),
Daniel Borkmannf6b1b3b2018-01-26 23:33:39 +010012871 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
12872 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12873 *insn,
12874 };
Daniel Borkmanne88b2c62021-02-09 18:46:10 +000012875 struct bpf_insn chk_and_mod[] = {
Daniel Borkmann9b00f1b2021-02-10 14:14:42 +010012876 /* [R,W]x mod 0 -> [R,W]x */
Daniel Borkmanne88b2c62021-02-09 18:46:10 +000012877 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
12878 BPF_JEQ | BPF_K, insn->src_reg,
Daniel Borkmann9b00f1b2021-02-10 14:14:42 +010012879 0, 1 + (is64 ? 0 : 1), 0),
Daniel Borkmannf6b1b3b2018-01-26 23:33:39 +010012880 *insn,
Daniel Borkmann9b00f1b2021-02-10 14:14:42 +010012881 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12882 BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
Daniel Borkmannf6b1b3b2018-01-26 23:33:39 +010012883 };
Daniel Borkmannf6b1b3b2018-01-26 23:33:39 +010012884
Daniel Borkmanne88b2c62021-02-09 18:46:10 +000012885 patchlet = isdiv ? chk_and_div : chk_and_mod;
12886 cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
Daniel Borkmann9b00f1b2021-02-10 14:14:42 +010012887 ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
Daniel Borkmannf6b1b3b2018-01-26 23:33:39 +010012888
12889 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
Alexei Starovoitov68fda452018-01-12 18:59:52 -080012890 if (!new_prog)
12891 return -ENOMEM;
12892
12893 delta += cnt - 1;
12894 env->prog = prog = new_prog;
12895 insn = new_prog->insnsi + i + delta;
12896 continue;
12897 }
12898
Brendan Jackmane6ac5932021-02-17 10:45:09 +000012899 /* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
Daniel Borkmanne0cea7c2018-05-04 01:08:14 +020012900 if (BPF_CLASS(insn->code) == BPF_LD &&
12901 (BPF_MODE(insn->code) == BPF_ABS ||
12902 BPF_MODE(insn->code) == BPF_IND)) {
12903 cnt = env->ops->gen_ld_abs(insn, insn_buf);
12904 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
12905 verbose(env, "bpf verifier is misconfigured\n");
12906 return -EINVAL;
12907 }
12908
12909 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
12910 if (!new_prog)
12911 return -ENOMEM;
12912
12913 delta += cnt - 1;
12914 env->prog = prog = new_prog;
12915 insn = new_prog->insnsi + i + delta;
12916 continue;
12917 }
12918
Brendan Jackmane6ac5932021-02-17 10:45:09 +000012919 /* Rewrite pointer arithmetic to mitigate speculation attacks. */
Daniel Borkmann979d63d2019-01-03 00:58:34 +010012920 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
12921 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
12922 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
12923 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
Daniel Borkmann979d63d2019-01-03 00:58:34 +010012924 struct bpf_insn *patch = &insn_buf[0];
Daniel Borkmann801c6052021-04-29 15:19:37 +000012925 bool issrc, isneg, isimm;
Daniel Borkmann979d63d2019-01-03 00:58:34 +010012926 u32 off_reg;
12927
12928 aux = &env->insn_aux_data[i + delta];
Daniel Borkmann3612af72019-03-01 22:05:29 +010012929 if (!aux->alu_state ||
12930 aux->alu_state == BPF_ALU_NON_POINTER)
Daniel Borkmann979d63d2019-01-03 00:58:34 +010012931 continue;
12932
12933 isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
12934 issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
12935 BPF_ALU_SANITIZE_SRC;
Daniel Borkmann801c6052021-04-29 15:19:37 +000012936 isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
Daniel Borkmann979d63d2019-01-03 00:58:34 +010012937
12938 off_reg = issrc ? insn->src_reg : insn->dst_reg;
Daniel Borkmann801c6052021-04-29 15:19:37 +000012939 if (isimm) {
12940 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
12941 } else {
12942 if (isneg)
12943 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
12944 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
12945 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
12946 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
12947 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
12948 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
12949 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
12950 }
Daniel Borkmannb9b34dd2021-04-30 16:21:46 +020012951 if (!issrc)
12952 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
12953 insn->src_reg = BPF_REG_AX;
Daniel Borkmann979d63d2019-01-03 00:58:34 +010012954 if (isneg)
12955 insn->code = insn->code == code_add ?
12956 code_sub : code_add;
12957 *patch++ = *insn;
Daniel Borkmann801c6052021-04-29 15:19:37 +000012958 if (issrc && isneg && !isimm)
Daniel Borkmann979d63d2019-01-03 00:58:34 +010012959 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
12960 cnt = patch - insn_buf;
12961
12962 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
12963 if (!new_prog)
12964 return -ENOMEM;
12965
12966 delta += cnt - 1;
12967 env->prog = prog = new_prog;
12968 insn = new_prog->insnsi + i + delta;
12969 continue;
12970 }
12971
Alexei Starovoitov79741b32017-03-15 18:26:40 -070012972 if (insn->code != (BPF_JMP | BPF_CALL))
12973 continue;
Alexei Starovoitovcc8b0b92017-12-14 17:55:05 -080012974 if (insn->src_reg == BPF_PSEUDO_CALL)
12975 continue;
Martin KaFai Laue6ac2452021-03-24 18:51:42 -070012976 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
12977 ret = fixup_kfunc_call(env, insn);
12978 if (ret)
12979 return ret;
12980 continue;
12981 }
Alexei Starovoitove245c5c62017-03-15 18:26:39 -070012982
Alexei Starovoitov79741b32017-03-15 18:26:40 -070012983 if (insn->imm == BPF_FUNC_get_route_realm)
12984 prog->dst_needed = 1;
12985 if (insn->imm == BPF_FUNC_get_prandom_u32)
12986 bpf_user_rnd_init_once();
Josef Bacik9802d862017-12-11 11:36:48 -050012987 if (insn->imm == BPF_FUNC_override_return)
12988 prog->kprobe_override = 1;
Alexei Starovoitov79741b32017-03-15 18:26:40 -070012989 if (insn->imm == BPF_FUNC_tail_call) {
David S. Miller7b9f6da2017-04-20 10:35:33 -040012990 /* If we tail call into other programs, we
12991 * cannot make any assumptions since they can
12992 * be replaced dynamically during runtime in
12993 * the program array.
12994 */
12995 prog->cb_access = 1;
Maciej Fijalkowskie4119012020-09-16 23:10:09 +020012996 if (!allow_tail_call_in_subprogs(env))
12997 prog->aux->stack_depth = MAX_BPF_STACK;
12998 prog->aux->max_pkt_offset = MAX_PACKET_OFF;
David S. Miller7b9f6da2017-04-20 10:35:33 -040012999
Alexei Starovoitov79741b32017-03-15 18:26:40 -070013000 /* mark bpf_tail_call as different opcode to avoid
Zhen Lei8fb33b62021-05-25 10:56:59 +080013001 * conditional branch in the interpreter for every normal
Alexei Starovoitov79741b32017-03-15 18:26:40 -070013002 * call and to prevent accidental JITing by JIT compiler
13003 * that doesn't support bpf_tail_call yet
Alexei Starovoitove245c5c62017-03-15 18:26:39 -070013004 */
Alexei Starovoitov79741b32017-03-15 18:26:40 -070013005 insn->imm = 0;
Alexei Starovoitov71189fa2017-05-30 13:31:27 -070013006 insn->code = BPF_JMP | BPF_TAIL_CALL;
Alexei Starovoitovb2157392018-01-07 17:33:02 -080013007
Daniel Borkmannc93552c2018-05-24 02:32:53 +020013008 aux = &env->insn_aux_data[i + delta];
Alexei Starovoitov2c78ee82020-05-13 16:03:54 -070013009 if (env->bpf_capable && !expect_blinding &&
Daniel Borkmanncc52d912019-12-19 22:19:50 +010013010 prog->jit_requested &&
Daniel Borkmannd2e4c1e2019-11-22 21:07:59 +010013011 !bpf_map_key_poisoned(aux) &&
13012 !bpf_map_ptr_poisoned(aux) &&
13013 !bpf_map_ptr_unpriv(aux)) {
13014 struct bpf_jit_poke_descriptor desc = {
13015 .reason = BPF_POKE_REASON_TAIL_CALL,
13016 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
13017 .tail_call.key = bpf_map_key_immediate(aux),
Maciej Fijalkowskia748c692020-09-16 23:10:05 +020013018 .insn_idx = i + delta,
Daniel Borkmannd2e4c1e2019-11-22 21:07:59 +010013019 };
13020
13021 ret = bpf_jit_add_poke_descriptor(prog, &desc);
13022 if (ret < 0) {
13023 verbose(env, "adding tail call poke descriptor failed\n");
13024 return ret;
13025 }
13026
13027 insn->imm = ret + 1;
13028 continue;
13029 }
13030
Daniel Borkmannc93552c2018-05-24 02:32:53 +020013031 if (!bpf_map_ptr_unpriv(aux))
13032 continue;
13033
Alexei Starovoitovb2157392018-01-07 17:33:02 -080013034 /* instead of changing every JIT dealing with tail_call
13035 * emit two extra insns:
13036 * if (index >= max_entries) goto out;
13037 * index &= array->index_mask;
13038 * to avoid out-of-bounds cpu speculation
13039 */
Daniel Borkmannc93552c2018-05-24 02:32:53 +020013040 if (bpf_map_ptr_poisoned(aux)) {
Colin Ian King40950342018-01-10 09:20:54 +000013041 verbose(env, "tail_call abusing map_ptr\n");
Alexei Starovoitovb2157392018-01-07 17:33:02 -080013042 return -EINVAL;
13043 }
Daniel Borkmannc93552c2018-05-24 02:32:53 +020013044
Daniel Borkmannd2e4c1e2019-11-22 21:07:59 +010013045 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
Alexei Starovoitovb2157392018-01-07 17:33:02 -080013046 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
13047 map_ptr->max_entries, 2);
13048 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
13049 container_of(map_ptr,
13050 struct bpf_array,
13051 map)->index_mask);
13052 insn_buf[2] = *insn;
13053 cnt = 3;
13054 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13055 if (!new_prog)
13056 return -ENOMEM;
13057
13058 delta += cnt - 1;
13059 env->prog = prog = new_prog;
13060 insn = new_prog->insnsi + i + delta;
Alexei Starovoitov79741b32017-03-15 18:26:40 -070013061 continue;
Alexei Starovoitove245c5c62017-03-15 18:26:39 -070013062 }
Alexei Starovoitove245c5c62017-03-15 18:26:39 -070013063
Alexei Starovoitovb00628b2021-07-14 17:54:09 -070013064 if (insn->imm == BPF_FUNC_timer_set_callback) {
13065 /* The verifier will process callback_fn as many times as necessary
13066 * with different maps and the register states prepared by
13067 * set_timer_callback_state will be accurate.
13068 *
13069 * The following use case is valid:
13070 * map1 is shared by prog1, prog2, prog3.
13071 * prog1 calls bpf_timer_init for some map1 elements
13072 * prog2 calls bpf_timer_set_callback for some map1 elements.
13073 * Those that were not bpf_timer_init-ed will return -EINVAL.
13074 * prog3 calls bpf_timer_start for some map1 elements.
13075 * Those that were not both bpf_timer_init-ed and
13076 * bpf_timer_set_callback-ed will return -EINVAL.
13077 */
13078 struct bpf_insn ld_addrs[2] = {
13079 BPF_LD_IMM64(BPF_REG_3, (long)prog->aux),
13080 };
13081
13082 insn_buf[0] = ld_addrs[0];
13083 insn_buf[1] = ld_addrs[1];
13084 insn_buf[2] = *insn;
13085 cnt = 3;
13086
13087 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13088 if (!new_prog)
13089 return -ENOMEM;
13090
13091 delta += cnt - 1;
13092 env->prog = prog = new_prog;
13093 insn = new_prog->insnsi + i + delta;
13094 goto patch_call_imm;
13095 }
13096
Daniel Borkmann89c63072017-08-19 03:12:45 +020013097 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
Daniel Borkmann09772d92018-06-02 23:06:35 +020013098 * and other inlining handlers are currently limited to 64 bit
13099 * only.
Daniel Borkmann89c63072017-08-19 03:12:45 +020013100 */
Alexei Starovoitov60b58afc2017-12-14 17:55:14 -080013101 if (prog->jit_requested && BITS_PER_LONG == 64 &&
Daniel Borkmann09772d92018-06-02 23:06:35 +020013102 (insn->imm == BPF_FUNC_map_lookup_elem ||
13103 insn->imm == BPF_FUNC_map_update_elem ||
Daniel Borkmann84430d42018-10-21 02:09:27 +020013104 insn->imm == BPF_FUNC_map_delete_elem ||
13105 insn->imm == BPF_FUNC_map_push_elem ||
13106 insn->imm == BPF_FUNC_map_pop_elem ||
Björn Töpele6a47502021-03-08 12:29:06 +010013107 insn->imm == BPF_FUNC_map_peek_elem ||
Andrey Ignatov0640c772021-10-05 17:18:38 -070013108 insn->imm == BPF_FUNC_redirect_map ||
13109 insn->imm == BPF_FUNC_for_each_map_elem)) {
Daniel Borkmannc93552c2018-05-24 02:32:53 +020013110 aux = &env->insn_aux_data[i + delta];
13111 if (bpf_map_ptr_poisoned(aux))
13112 goto patch_call_imm;
13113
Daniel Borkmannd2e4c1e2019-11-22 21:07:59 +010013114 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
Daniel Borkmann09772d92018-06-02 23:06:35 +020013115 ops = map_ptr->ops;
13116 if (insn->imm == BPF_FUNC_map_lookup_elem &&
13117 ops->map_gen_lookup) {
13118 cnt = ops->map_gen_lookup(map_ptr, insn_buf);
Daniel Borkmann4a8f87e2020-10-11 01:40:03 +020013119 if (cnt == -EOPNOTSUPP)
13120 goto patch_map_ops_generic;
13121 if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) {
Daniel Borkmann09772d92018-06-02 23:06:35 +020013122 verbose(env, "bpf verifier is misconfigured\n");
13123 return -EINVAL;
13124 }
Alexei Starovoitov81ed18a2017-03-15 18:26:42 -070013125
Daniel Borkmann09772d92018-06-02 23:06:35 +020013126 new_prog = bpf_patch_insn_data(env, i + delta,
13127 insn_buf, cnt);
13128 if (!new_prog)
13129 return -ENOMEM;
13130
13131 delta += cnt - 1;
13132 env->prog = prog = new_prog;
13133 insn = new_prog->insnsi + i + delta;
13134 continue;
Alexei Starovoitov81ed18a2017-03-15 18:26:42 -070013135 }
13136
Daniel Borkmann09772d92018-06-02 23:06:35 +020013137 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
13138 (void *(*)(struct bpf_map *map, void *key))NULL));
13139 BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
13140 (int (*)(struct bpf_map *map, void *key))NULL));
13141 BUILD_BUG_ON(!__same_type(ops->map_update_elem,
13142 (int (*)(struct bpf_map *map, void *key, void *value,
13143 u64 flags))NULL));
Daniel Borkmann84430d42018-10-21 02:09:27 +020013144 BUILD_BUG_ON(!__same_type(ops->map_push_elem,
13145 (int (*)(struct bpf_map *map, void *value,
13146 u64 flags))NULL));
13147 BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
13148 (int (*)(struct bpf_map *map, void *value))NULL));
13149 BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
13150 (int (*)(struct bpf_map *map, void *value))NULL));
Björn Töpele6a47502021-03-08 12:29:06 +010013151 BUILD_BUG_ON(!__same_type(ops->map_redirect,
13152 (int (*)(struct bpf_map *map, u32 ifindex, u64 flags))NULL));
Andrey Ignatov0640c772021-10-05 17:18:38 -070013153 BUILD_BUG_ON(!__same_type(ops->map_for_each_callback,
13154 (int (*)(struct bpf_map *map,
13155 bpf_callback_t callback_fn,
13156 void *callback_ctx,
13157 u64 flags))NULL));
Björn Töpele6a47502021-03-08 12:29:06 +010013158
Daniel Borkmann4a8f87e2020-10-11 01:40:03 +020013159patch_map_ops_generic:
Daniel Borkmann09772d92018-06-02 23:06:35 +020013160 switch (insn->imm) {
13161 case BPF_FUNC_map_lookup_elem:
Kees Cook3d717fa2021-09-28 16:09:45 -070013162 insn->imm = BPF_CALL_IMM(ops->map_lookup_elem);
Daniel Borkmann09772d92018-06-02 23:06:35 +020013163 continue;
13164 case BPF_FUNC_map_update_elem:
Kees Cook3d717fa2021-09-28 16:09:45 -070013165 insn->imm = BPF_CALL_IMM(ops->map_update_elem);
Daniel Borkmann09772d92018-06-02 23:06:35 +020013166 continue;
13167 case BPF_FUNC_map_delete_elem:
Kees Cook3d717fa2021-09-28 16:09:45 -070013168 insn->imm = BPF_CALL_IMM(ops->map_delete_elem);
Daniel Borkmann09772d92018-06-02 23:06:35 +020013169 continue;
Daniel Borkmann84430d42018-10-21 02:09:27 +020013170 case BPF_FUNC_map_push_elem:
Kees Cook3d717fa2021-09-28 16:09:45 -070013171 insn->imm = BPF_CALL_IMM(ops->map_push_elem);
Daniel Borkmann84430d42018-10-21 02:09:27 +020013172 continue;
13173 case BPF_FUNC_map_pop_elem:
Kees Cook3d717fa2021-09-28 16:09:45 -070013174 insn->imm = BPF_CALL_IMM(ops->map_pop_elem);
Daniel Borkmann84430d42018-10-21 02:09:27 +020013175 continue;
13176 case BPF_FUNC_map_peek_elem:
Kees Cook3d717fa2021-09-28 16:09:45 -070013177 insn->imm = BPF_CALL_IMM(ops->map_peek_elem);
Daniel Borkmann84430d42018-10-21 02:09:27 +020013178 continue;
Björn Töpele6a47502021-03-08 12:29:06 +010013179 case BPF_FUNC_redirect_map:
Kees Cook3d717fa2021-09-28 16:09:45 -070013180 insn->imm = BPF_CALL_IMM(ops->map_redirect);
Björn Töpele6a47502021-03-08 12:29:06 +010013181 continue;
Andrey Ignatov0640c772021-10-05 17:18:38 -070013182 case BPF_FUNC_for_each_map_elem:
13183 insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010013184 continue;
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070013185 }
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010013186
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070013187 goto patch_call_imm;
13188 }
13189
Brendan Jackmane6ac5932021-02-17 10:45:09 +000013190 /* Implement bpf_jiffies64 inline. */
Martin KaFai Lau5576b992020-01-22 15:36:46 -080013191 if (prog->jit_requested && BITS_PER_LONG == 64 &&
13192 insn->imm == BPF_FUNC_jiffies64) {
13193 struct bpf_insn ld_jiffies_addr[2] = {
13194 BPF_LD_IMM64(BPF_REG_0,
13195 (unsigned long)&jiffies),
13196 };
13197
13198 insn_buf[0] = ld_jiffies_addr[0];
13199 insn_buf[1] = ld_jiffies_addr[1];
13200 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
13201 BPF_REG_0, 0);
13202 cnt = 3;
13203
13204 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
13205 cnt);
13206 if (!new_prog)
13207 return -ENOMEM;
13208
13209 delta += cnt - 1;
13210 env->prog = prog = new_prog;
13211 insn = new_prog->insnsi + i + delta;
13212 continue;
13213 }
13214
Jiri Olsa9b99edc2021-07-14 11:43:55 +020013215 /* Implement bpf_get_func_ip inline. */
13216 if (prog_type == BPF_PROG_TYPE_TRACING &&
13217 insn->imm == BPF_FUNC_get_func_ip) {
13218 /* Load IP address from ctx - 8 */
13219 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
13220
13221 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
13222 if (!new_prog)
13223 return -ENOMEM;
13224
13225 env->prog = prog = new_prog;
13226 insn = new_prog->insnsi + i + delta;
13227 continue;
13228 }
13229
Alexei Starovoitovcbd35702014-09-26 00:17:03 -070013230patch_call_imm:
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070013231 fn = env->ops->get_func_proto(insn->imm, env->prog);
13232 /* all functions that have prototype and verifier allowed
13233 * programs to call them, must be real in-kernel functions
13234 */
13235 if (!fn->func) {
13236 verbose(env,
13237 "kernel subsystem misconfigured func %s#%d\n",
13238 func_id_name(insn->imm), insn->imm);
13239 return -EFAULT;
13240 }
13241 insn->imm = fn->func - __bpf_call_base;
13242 }
13243
Daniel Borkmannd2e4c1e2019-11-22 21:07:59 +010013244 /* Since poke tab is now finalized, publish aux to tracker. */
13245 for (i = 0; i < prog->aux->size_poke_tab; i++) {
13246 map_ptr = prog->aux->poke_tab[i].tail_call.map;
13247 if (!map_ptr->ops->map_poke_track ||
13248 !map_ptr->ops->map_poke_untrack ||
13249 !map_ptr->ops->map_poke_run) {
13250 verbose(env, "bpf verifier is misconfigured\n");
13251 return -EINVAL;
13252 }
13253
13254 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
13255 if (ret < 0) {
13256 verbose(env, "tracking tail call prog failed\n");
13257 return ret;
13258 }
13259 }
13260
Martin KaFai Laue6ac2452021-03-24 18:51:42 -070013261 sort_kfunc_descs_by_imm(env->prog);
13262
Alexei Starovoitov51580e72014-09-26 00:17:02 -070013263 return 0;
13264}
13265
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070013266static void free_states(struct bpf_verifier_env *env)
13267{
13268 struct bpf_verifier_state_list *sl, *sln;
13269 int i;
13270
Alexei Starovoitov9f4686c2019-04-01 21:27:41 -070013271 sl = env->free_list;
13272 while (sl) {
13273 sln = sl->next;
13274 free_verifier_state(&sl->state, false);
13275 kfree(sl);
13276 sl = sln;
13277 }
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -080013278 env->free_list = NULL;
Alexei Starovoitov9f4686c2019-04-01 21:27:41 -070013279
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070013280 if (!env->explored_states)
13281 return;
13282
Alexei Starovoitovdc2a4eb2019-05-21 20:17:07 -070013283 for (i = 0; i < state_htab_size(env); i++) {
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070013284 sl = env->explored_states[i];
13285
Alexei Starovoitova8f500a2019-05-21 20:17:06 -070013286 while (sl) {
13287 sln = sl->next;
13288 free_verifier_state(&sl->state, false);
13289 kfree(sl);
13290 sl = sln;
13291 }
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -080013292 env->explored_states[i] = NULL;
13293 }
13294}
13295
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -080013296static int do_check_common(struct bpf_verifier_env *env, int subprog)
13297{
Andrii Nakryiko6f8a57c2020-04-23 12:58:50 -070013298 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -080013299 struct bpf_verifier_state *state;
13300 struct bpf_reg_state *regs;
13301 int ret, i;
13302
13303 env->prev_linfo = NULL;
13304 env->pass_cnt++;
13305
13306 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
13307 if (!state)
13308 return -ENOMEM;
13309 state->curframe = 0;
13310 state->speculative = false;
13311 state->branches = 1;
13312 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
13313 if (!state->frame[0]) {
13314 kfree(state);
13315 return -ENOMEM;
13316 }
13317 env->cur_state = state;
13318 init_func_state(env, state->frame[0],
13319 BPF_MAIN_FUNC /* callsite */,
13320 0 /* frameno */,
13321 subprog);
13322
13323 regs = state->frame[state->curframe]->regs;
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -080013324 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -080013325 ret = btf_prepare_func_args(env, subprog, regs);
13326 if (ret)
13327 goto out;
13328 for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
13329 if (regs[i].type == PTR_TO_CTX)
13330 mark_reg_known_zero(env, regs, i);
13331 else if (regs[i].type == SCALAR_VALUE)
13332 mark_reg_unknown(env, regs, i);
Dmitrii Banshchikove5069b9c2021-02-13 00:56:41 +040013333 else if (regs[i].type == PTR_TO_MEM_OR_NULL) {
13334 const u32 mem_size = regs[i].mem_size;
13335
13336 mark_reg_known_zero(env, regs, i);
13337 regs[i].mem_size = mem_size;
13338 regs[i].id = ++env->id_gen;
13339 }
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -080013340 }
13341 } else {
13342 /* 1st arg to a function */
13343 regs[BPF_REG_1].type = PTR_TO_CTX;
13344 mark_reg_known_zero(env, regs, BPF_REG_1);
Martin KaFai Lau34747c42021-03-24 18:51:36 -070013345 ret = btf_check_subprog_arg_match(env, subprog, regs);
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -080013346 if (ret == -EFAULT)
13347 /* unlikely verifier bug. abort.
13348 * ret == 0 and ret < 0 are sadly acceptable for
13349 * main() function due to backward compatibility.
13350 * Like socket filter program may be written as:
13351 * int bpf_prog(struct pt_regs *ctx)
13352 * and never dereference that ctx in the program.
13353 * 'struct pt_regs' is a type mismatch for socket
13354 * filter that should be using 'struct __sk_buff'.
13355 */
13356 goto out;
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070013357 }
13358
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -080013359 ret = do_check(env);
13360out:
Alexei Starovoitovf59bbfc2020-01-21 18:41:38 -080013361 /* check for NULL is necessary, since cur_state can be freed inside
13362 * do_check() under memory pressure.
13363 */
13364 if (env->cur_state) {
13365 free_verifier_state(env->cur_state, true);
13366 env->cur_state = NULL;
13367 }
Andrii Nakryiko6f8a57c2020-04-23 12:58:50 -070013368 while (!pop_stack(env, NULL, NULL, false));
13369 if (!ret && pop_log)
13370 bpf_vlog_reset(&env->log, 0);
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -080013371 free_states(env);
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -080013372 return ret;
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070013373}
13374
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -080013375/* Verify all global functions in a BPF program one by one based on their BTF.
13376 * All global functions must pass verification. Otherwise the whole program is rejected.
13377 * Consider:
13378 * int bar(int);
13379 * int foo(int f)
13380 * {
13381 * return bar(f);
13382 * }
13383 * int bar(int b)
13384 * {
13385 * ...
13386 * }
13387 * foo() will be verified first for R1=any_scalar_value. During verification it
13388 * will be assumed that bar() already verified successfully and call to bar()
13389 * from foo() will be checked for type match only. Later bar() will be verified
13390 * independently to check that it's safe for R1=any_scalar_value.
13391 */
13392static int do_check_subprogs(struct bpf_verifier_env *env)
13393{
13394 struct bpf_prog_aux *aux = env->prog->aux;
13395 int i, ret;
13396
13397 if (!aux->func_info)
13398 return 0;
13399
13400 for (i = 1; i < env->subprog_cnt; i++) {
13401 if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
13402 continue;
13403 env->insn_idx = env->subprog_info[i].start;
13404 WARN_ON_ONCE(env->insn_idx == 0);
13405 ret = do_check_common(env, i);
13406 if (ret) {
13407 return ret;
13408 } else if (env->log.level & BPF_LOG_LEVEL) {
13409 verbose(env,
13410 "Func#%d is safe for any args that match its prototype\n",
13411 i);
13412 }
13413 }
13414 return 0;
13415}
13416
13417static int do_check_main(struct bpf_verifier_env *env)
13418{
13419 int ret;
13420
13421 env->insn_idx = 0;
13422 ret = do_check_common(env, 0);
13423 if (!ret)
13424 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
13425 return ret;
13426}
13427
13428
Alexei Starovoitov06ee7112019-04-01 21:27:40 -070013429static void print_verification_stats(struct bpf_verifier_env *env)
13430{
13431 int i;
13432
13433 if (env->log.level & BPF_LOG_STATS) {
13434 verbose(env, "verification time %lld usec\n",
13435 div_u64(env->verification_time, 1000));
13436 verbose(env, "stack depth ");
13437 for (i = 0; i < env->subprog_cnt; i++) {
13438 u32 depth = env->subprog_info[i].stack_depth;
13439
13440 verbose(env, "%d", depth);
13441 if (i + 1 < env->subprog_cnt)
13442 verbose(env, "+");
13443 }
13444 verbose(env, "\n");
13445 }
13446 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
13447 "total_states %d peak_states %d mark_read %d\n",
13448 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
13449 env->max_states_per_insn, env->total_states,
13450 env->peak_states, env->longest_mark_read_walk);
Alexei Starovoitov51580e72014-09-26 00:17:02 -070013451}
13452
Martin KaFai Lau27ae79972020-01-08 16:35:03 -080013453static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
13454{
13455 const struct btf_type *t, *func_proto;
13456 const struct bpf_struct_ops *st_ops;
13457 const struct btf_member *member;
13458 struct bpf_prog *prog = env->prog;
13459 u32 btf_id, member_idx;
13460 const char *mname;
13461
Toke Høiland-Jørgensen12aa8a92021-03-26 11:03:13 +010013462 if (!prog->gpl_compatible) {
13463 verbose(env, "struct ops programs must have a GPL compatible license\n");
13464 return -EINVAL;
13465 }
13466
Martin KaFai Lau27ae79972020-01-08 16:35:03 -080013467 btf_id = prog->aux->attach_btf_id;
13468 st_ops = bpf_struct_ops_find(btf_id);
13469 if (!st_ops) {
13470 verbose(env, "attach_btf_id %u is not a supported struct\n",
13471 btf_id);
13472 return -ENOTSUPP;
13473 }
13474
13475 t = st_ops->type;
13476 member_idx = prog->expected_attach_type;
13477 if (member_idx >= btf_type_vlen(t)) {
13478 verbose(env, "attach to invalid member idx %u of struct %s\n",
13479 member_idx, st_ops->name);
13480 return -EINVAL;
13481 }
13482
13483 member = &btf_type_member(t)[member_idx];
13484 mname = btf_name_by_offset(btf_vmlinux, member->name_off);
13485 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
13486 NULL);
13487 if (!func_proto) {
13488 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
13489 mname, member_idx, st_ops->name);
13490 return -EINVAL;
13491 }
13492
13493 if (st_ops->check_member) {
13494 int err = st_ops->check_member(t, member);
13495
13496 if (err) {
13497 verbose(env, "attach to unsupported member %s of struct %s\n",
13498 mname, st_ops->name);
13499 return err;
13500 }
13501 }
13502
13503 prog->aux->attach_func_proto = func_proto;
13504 prog->aux->attach_func_name = mname;
13505 env->ops = st_ops->verifier_ops;
13506
13507 return 0;
13508}
KP Singh6ba43b72020-03-04 20:18:50 +010013509#define SECURITY_PREFIX "security_"
13510
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013511static int check_attach_modify_return(unsigned long addr, const char *func_name)
KP Singh6ba43b72020-03-04 20:18:50 +010013512{
KP Singh69191752020-03-05 21:49:55 +010013513 if (within_error_injection_list(addr) ||
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013514 !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1))
KP Singh6ba43b72020-03-04 20:18:50 +010013515 return 0;
KP Singh6ba43b72020-03-04 20:18:50 +010013516
KP Singh6ba43b72020-03-04 20:18:50 +010013517 return -EINVAL;
13518}
Martin KaFai Lau27ae79972020-01-08 16:35:03 -080013519
Alexei Starovoitov1e6c62a2020-08-27 15:01:11 -070013520/* list of non-sleepable functions that are otherwise on
13521 * ALLOW_ERROR_INJECTION list
13522 */
13523BTF_SET_START(btf_non_sleepable_error_inject)
13524/* Three functions below can be called from sleepable and non-sleepable context.
13525 * Assume non-sleepable from bpf safety point of view.
13526 */
Matthew Wilcox (Oracle)9dd3d062020-12-08 08:56:28 -050013527BTF_ID(func, __filemap_add_folio)
Alexei Starovoitov1e6c62a2020-08-27 15:01:11 -070013528BTF_ID(func, should_fail_alloc_page)
13529BTF_ID(func, should_failslab)
13530BTF_SET_END(btf_non_sleepable_error_inject)
13531
13532static int check_non_sleepable_error_inject(u32 btf_id)
13533{
13534 return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
13535}
13536
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013537int bpf_check_attach_target(struct bpf_verifier_log *log,
13538 const struct bpf_prog *prog,
13539 const struct bpf_prog *tgt_prog,
13540 u32 btf_id,
13541 struct bpf_attach_target_info *tgt_info)
Martin KaFai Lau38207292019-10-24 17:18:11 -070013542{
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -080013543 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
Alexei Starovoitovf1b95092019-10-30 15:32:11 -070013544 const char prefix[] = "btf_trace_";
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013545 int ret = 0, subprog = -1, i;
Martin KaFai Lau38207292019-10-24 17:18:11 -070013546 const struct btf_type *t;
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013547 bool conservative = true;
Martin KaFai Lau38207292019-10-24 17:18:11 -070013548 const char *tname;
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013549 struct btf *btf;
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013550 long addr = 0;
Martin KaFai Lau38207292019-10-24 17:18:11 -070013551
Alexei Starovoitovf1b95092019-10-30 15:32:11 -070013552 if (!btf_id) {
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013553 bpf_log(log, "Tracing programs must provide btf_id\n");
Alexei Starovoitovf1b95092019-10-30 15:32:11 -070013554 return -EINVAL;
13555 }
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -080013556 btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf;
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013557 if (!btf) {
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013558 bpf_log(log,
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013559 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
13560 return -EINVAL;
13561 }
13562 t = btf_type_by_id(btf, btf_id);
Alexei Starovoitovf1b95092019-10-30 15:32:11 -070013563 if (!t) {
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013564 bpf_log(log, "attach_btf_id %u is invalid\n", btf_id);
Alexei Starovoitovf1b95092019-10-30 15:32:11 -070013565 return -EINVAL;
13566 }
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013567 tname = btf_name_by_offset(btf, t->name_off);
Alexei Starovoitovf1b95092019-10-30 15:32:11 -070013568 if (!tname) {
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013569 bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id);
Alexei Starovoitovf1b95092019-10-30 15:32:11 -070013570 return -EINVAL;
13571 }
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013572 if (tgt_prog) {
13573 struct bpf_prog_aux *aux = tgt_prog->aux;
13574
13575 for (i = 0; i < aux->func_info_cnt; i++)
13576 if (aux->func_info[i].type_id == btf_id) {
13577 subprog = i;
13578 break;
13579 }
13580 if (subprog == -1) {
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013581 bpf_log(log, "Subprog %s doesn't exist\n", tname);
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013582 return -EINVAL;
13583 }
13584 conservative = aux->func_info_aux[subprog].unreliable;
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -080013585 if (prog_extension) {
13586 if (conservative) {
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013587 bpf_log(log,
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -080013588 "Cannot replace static functions\n");
13589 return -EINVAL;
13590 }
13591 if (!prog->jit_requested) {
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013592 bpf_log(log,
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -080013593 "Extension programs should be JITed\n");
13594 return -EINVAL;
13595 }
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -080013596 }
13597 if (!tgt_prog->jited) {
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013598 bpf_log(log, "Can attach to only JITed progs\n");
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -080013599 return -EINVAL;
13600 }
13601 if (tgt_prog->type == prog->type) {
13602 /* Cannot fentry/fexit another fentry/fexit program.
13603 * Cannot attach program extension to another extension.
13604 * It's ok to attach fentry/fexit to extension program.
13605 */
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013606 bpf_log(log, "Cannot recursively attach\n");
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -080013607 return -EINVAL;
13608 }
13609 if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
13610 prog_extension &&
13611 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
13612 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
13613 /* Program extensions can extend all program types
13614 * except fentry/fexit. The reason is the following.
13615 * The fentry/fexit programs are used for performance
13616 * analysis, stats and can be attached to any program
13617 * type except themselves. When extension program is
13618 * replacing XDP function it is necessary to allow
13619 * performance analysis of all functions. Both original
13620 * XDP program and its program extension. Hence
13621 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is
13622 * allowed. If extending of fentry/fexit was allowed it
13623 * would be possible to create long call chain
13624 * fentry->extension->fentry->extension beyond
13625 * reasonable stack size. Hence extending fentry is not
13626 * allowed.
13627 */
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013628 bpf_log(log, "Cannot extend fentry/fexit\n");
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -080013629 return -EINVAL;
13630 }
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013631 } else {
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -080013632 if (prog_extension) {
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013633 bpf_log(log, "Cannot replace kernel functions\n");
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -080013634 return -EINVAL;
13635 }
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013636 }
Alexei Starovoitovf1b95092019-10-30 15:32:11 -070013637
13638 switch (prog->expected_attach_type) {
13639 case BPF_TRACE_RAW_TP:
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013640 if (tgt_prog) {
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013641 bpf_log(log,
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013642 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
13643 return -EINVAL;
13644 }
Martin KaFai Lau38207292019-10-24 17:18:11 -070013645 if (!btf_type_is_typedef(t)) {
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013646 bpf_log(log, "attach_btf_id %u is not a typedef\n",
Martin KaFai Lau38207292019-10-24 17:18:11 -070013647 btf_id);
13648 return -EINVAL;
13649 }
Alexei Starovoitovf1b95092019-10-30 15:32:11 -070013650 if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013651 bpf_log(log, "attach_btf_id %u points to wrong type name %s\n",
Martin KaFai Lau38207292019-10-24 17:18:11 -070013652 btf_id, tname);
13653 return -EINVAL;
13654 }
13655 tname += sizeof(prefix) - 1;
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013656 t = btf_type_by_id(btf, t->type);
Martin KaFai Lau38207292019-10-24 17:18:11 -070013657 if (!btf_type_is_ptr(t))
13658 /* should never happen in valid vmlinux build */
13659 return -EINVAL;
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013660 t = btf_type_by_id(btf, t->type);
Martin KaFai Lau38207292019-10-24 17:18:11 -070013661 if (!btf_type_is_func_proto(t))
13662 /* should never happen in valid vmlinux build */
13663 return -EINVAL;
13664
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013665 break;
Yonghong Song15d83c42020-05-09 10:59:00 -070013666 case BPF_TRACE_ITER:
13667 if (!btf_type_is_func(t)) {
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013668 bpf_log(log, "attach_btf_id %u is not a function\n",
Yonghong Song15d83c42020-05-09 10:59:00 -070013669 btf_id);
13670 return -EINVAL;
13671 }
13672 t = btf_type_by_id(btf, t->type);
13673 if (!btf_type_is_func_proto(t))
13674 return -EINVAL;
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013675 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
13676 if (ret)
13677 return ret;
13678 break;
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -080013679 default:
13680 if (!prog_extension)
13681 return -EINVAL;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -050013682 fallthrough;
KP Singhae240822020-03-04 20:18:49 +010013683 case BPF_MODIFY_RETURN:
KP Singh9e4e01d2020-03-29 01:43:52 +010013684 case BPF_LSM_MAC:
Alexei Starovoitovfec56f52019-11-14 10:57:04 -080013685 case BPF_TRACE_FENTRY:
13686 case BPF_TRACE_FEXIT:
13687 if (!btf_type_is_func(t)) {
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013688 bpf_log(log, "attach_btf_id %u is not a function\n",
Alexei Starovoitovfec56f52019-11-14 10:57:04 -080013689 btf_id);
13690 return -EINVAL;
13691 }
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -080013692 if (prog_extension &&
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013693 btf_check_type_match(log, prog, btf, t))
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -080013694 return -EINVAL;
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013695 t = btf_type_by_id(btf, t->type);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -080013696 if (!btf_type_is_func_proto(t))
13697 return -EINVAL;
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013698
Toke Høiland-Jørgensen4a1e7c02020-09-29 14:45:51 +020013699 if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) &&
13700 (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type ||
13701 prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type))
13702 return -EINVAL;
13703
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013704 if (tgt_prog && conservative)
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013705 t = NULL;
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013706
13707 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -080013708 if (ret < 0)
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013709 return ret;
13710
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013711 if (tgt_prog) {
Yonghong Songe9eeec52019-12-04 17:06:06 -080013712 if (subprog == 0)
13713 addr = (long) tgt_prog->bpf_func;
13714 else
13715 addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013716 } else {
13717 addr = kallsyms_lookup_name(tname);
13718 if (!addr) {
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013719 bpf_log(log,
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013720 "The address of function %s cannot be found\n",
13721 tname);
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013722 return -ENOENT;
Alexei Starovoitov5b92a282019-11-14 10:57:17 -080013723 }
Alexei Starovoitovfec56f52019-11-14 10:57:04 -080013724 }
Alexei Starovoitov18644ce2020-05-28 21:38:36 -070013725
Alexei Starovoitov1e6c62a2020-08-27 15:01:11 -070013726 if (prog->aux->sleepable) {
13727 ret = -EINVAL;
13728 switch (prog->type) {
13729 case BPF_PROG_TYPE_TRACING:
13730 /* fentry/fexit/fmod_ret progs can be sleepable only if they are
13731 * attached to ALLOW_ERROR_INJECTION and are not in denylist.
13732 */
13733 if (!check_non_sleepable_error_inject(btf_id) &&
13734 within_error_injection_list(addr))
13735 ret = 0;
13736 break;
13737 case BPF_PROG_TYPE_LSM:
13738 /* LSM progs check that they are attached to bpf_lsm_*() funcs.
13739 * Only some of them are sleepable.
13740 */
KP Singh423f1612020-11-13 00:59:29 +000013741 if (bpf_lsm_is_sleepable_hook(btf_id))
Alexei Starovoitov1e6c62a2020-08-27 15:01:11 -070013742 ret = 0;
13743 break;
13744 default:
13745 break;
13746 }
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013747 if (ret) {
13748 bpf_log(log, "%s is not sleepable\n", tname);
13749 return ret;
13750 }
Alexei Starovoitov1e6c62a2020-08-27 15:01:11 -070013751 } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
Toke Høiland-Jørgensen1af92702020-09-25 23:25:00 +020013752 if (tgt_prog) {
Toke Høiland-Jørgensenefc68152020-09-25 23:25:01 +020013753 bpf_log(log, "can't modify return codes of BPF programs\n");
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013754 return -EINVAL;
Toke Høiland-Jørgensen1af92702020-09-25 23:25:00 +020013755 }
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013756 ret = check_attach_modify_return(addr, tname);
13757 if (ret) {
13758 bpf_log(log, "%s() is not modifiable\n", tname);
13759 return ret;
13760 }
Alexei Starovoitov18644ce2020-05-28 21:38:36 -070013761 }
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013762
13763 break;
Martin KaFai Lau38207292019-10-24 17:18:11 -070013764 }
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013765 tgt_info->tgt_addr = addr;
13766 tgt_info->tgt_name = tname;
13767 tgt_info->tgt_type = t;
13768 return 0;
13769}
13770
Jiri Olsa35e38152021-04-29 13:47:12 +020013771BTF_SET_START(btf_id_deny)
13772BTF_ID_UNUSED
13773#ifdef CONFIG_SMP
13774BTF_ID(func, migrate_disable)
13775BTF_ID(func, migrate_enable)
13776#endif
13777#if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
13778BTF_ID(func, rcu_read_unlock_strict)
13779#endif
13780BTF_SET_END(btf_id_deny)
13781
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013782static int check_attach_btf_id(struct bpf_verifier_env *env)
13783{
13784 struct bpf_prog *prog = env->prog;
Toke Høiland-Jørgensen3aac1ea2020-09-29 14:45:50 +020013785 struct bpf_prog *tgt_prog = prog->aux->dst_prog;
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013786 struct bpf_attach_target_info tgt_info = {};
13787 u32 btf_id = prog->aux->attach_btf_id;
13788 struct bpf_trampoline *tr;
13789 int ret;
13790 u64 key;
13791
Alexei Starovoitov79a7f8b2021-05-13 17:36:03 -070013792 if (prog->type == BPF_PROG_TYPE_SYSCALL) {
13793 if (prog->aux->sleepable)
13794 /* attach_btf_id checked to be zero already */
13795 return 0;
13796 verbose(env, "Syscall programs can only be sleepable\n");
13797 return -EINVAL;
13798 }
13799
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013800 if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING &&
13801 prog->type != BPF_PROG_TYPE_LSM) {
13802 verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n");
13803 return -EINVAL;
13804 }
13805
13806 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
13807 return check_struct_ops_btf_id(env);
13808
13809 if (prog->type != BPF_PROG_TYPE_TRACING &&
13810 prog->type != BPF_PROG_TYPE_LSM &&
13811 prog->type != BPF_PROG_TYPE_EXT)
13812 return 0;
13813
13814 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info);
13815 if (ret)
13816 return ret;
13817
13818 if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) {
Toke Høiland-Jørgensen3aac1ea2020-09-29 14:45:50 +020013819 /* to make freplace equivalent to their targets, they need to
13820 * inherit env->ops and expected_attach_type for the rest of the
13821 * verification
13822 */
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013823 env->ops = bpf_verifier_ops[tgt_prog->type];
13824 prog->expected_attach_type = tgt_prog->expected_attach_type;
13825 }
13826
13827 /* store info about the attachment target that will be used later */
13828 prog->aux->attach_func_proto = tgt_info.tgt_type;
13829 prog->aux->attach_func_name = tgt_info.tgt_name;
13830
Toke Høiland-Jørgensen4a1e7c02020-09-29 14:45:51 +020013831 if (tgt_prog) {
13832 prog->aux->saved_dst_prog_type = tgt_prog->type;
13833 prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type;
13834 }
13835
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013836 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
13837 prog->aux->attach_btf_trace = true;
13838 return 0;
13839 } else if (prog->expected_attach_type == BPF_TRACE_ITER) {
13840 if (!bpf_iter_prog_supported(prog))
13841 return -EINVAL;
13842 return 0;
13843 }
13844
13845 if (prog->type == BPF_PROG_TYPE_LSM) {
13846 ret = bpf_lsm_verify_prog(&env->log, prog);
13847 if (ret < 0)
13848 return ret;
Jiri Olsa35e38152021-04-29 13:47:12 +020013849 } else if (prog->type == BPF_PROG_TYPE_TRACING &&
13850 btf_id_set_contains(&btf_id_deny, btf_id)) {
13851 return -EINVAL;
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013852 }
13853
Andrii Nakryiko22dc4a02020-12-03 12:46:29 -080013854 key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013855 tr = bpf_trampoline_get(key, &tgt_info);
13856 if (!tr)
13857 return -ENOMEM;
13858
Toke Høiland-Jørgensen3aac1ea2020-09-29 14:45:50 +020013859 prog->aux->dst_trampoline = tr;
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020013860 return 0;
Martin KaFai Lau38207292019-10-24 17:18:11 -070013861}
13862
Alan Maguire76654e62020-09-28 12:31:03 +010013863struct btf *bpf_get_btf_vmlinux(void)
13864{
13865 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
13866 mutex_lock(&bpf_verifier_lock);
13867 if (!btf_vmlinux)
13868 btf_vmlinux = btf_parse_vmlinux();
13869 mutex_unlock(&bpf_verifier_lock);
13870 }
13871 return btf_vmlinux;
13872}
13873
Alexei Starovoitovaf2ac3e2021-05-13 17:36:05 -070013874int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
Alexei Starovoitov51580e72014-09-26 00:17:02 -070013875{
Alexei Starovoitov06ee7112019-04-01 21:27:40 -070013876 u64 start_time = ktime_get_ns();
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010013877 struct bpf_verifier_env *env;
Martin KaFai Laub9193c12018-03-24 11:44:22 -070013878 struct bpf_verifier_log *log;
Jakub Kicinski9e4c24e2019-01-22 22:45:23 -080013879 int i, len, ret = -EINVAL;
Jakub Kicinskie2ae4ca2019-01-22 22:45:19 -080013880 bool is_priv;
Alexei Starovoitov51580e72014-09-26 00:17:02 -070013881
Arnd Bergmanneba0c922017-11-02 12:05:52 +010013882 /* no program is valid */
13883 if (ARRAY_SIZE(bpf_verifier_ops) == 0)
13884 return -EINVAL;
13885
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010013886 /* 'struct bpf_verifier_env' can be global, but since it's not small,
Alexei Starovoitovcbd35702014-09-26 00:17:03 -070013887 * allocate/free it every time bpf_check() is called
13888 */
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010013889 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
Alexei Starovoitovcbd35702014-09-26 00:17:03 -070013890 if (!env)
13891 return -ENOMEM;
Jakub Kicinski61bd5212017-10-09 10:30:11 -070013892 log = &env->log;
Alexei Starovoitovcbd35702014-09-26 00:17:03 -070013893
Jakub Kicinski9e4c24e2019-01-22 22:45:23 -080013894 len = (*prog)->len;
Kees Cookfad953c2018-06-12 14:27:37 -070013895 env->insn_aux_data =
Jakub Kicinski9e4c24e2019-01-22 22:45:23 -080013896 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
Jakub Kicinski3df126f2016-09-21 11:43:56 +010013897 ret = -ENOMEM;
13898 if (!env->insn_aux_data)
13899 goto err_free_env;
Jakub Kicinski9e4c24e2019-01-22 22:45:23 -080013900 for (i = 0; i < len; i++)
13901 env->insn_aux_data[i].orig_idx = i;
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070013902 env->prog = *prog;
Jakub Kicinski00176a32017-10-16 16:40:54 -070013903 env->ops = bpf_verifier_ops[env->prog->type];
Alexei Starovoitov387544b2021-05-13 17:36:10 -070013904 env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
Alexei Starovoitov2c78ee82020-05-13 16:03:54 -070013905 is_priv = bpf_capable();
Alexei Starovoitov0246e642014-09-26 00:17:04 -070013906
Alan Maguire76654e62020-09-28 12:31:03 +010013907 bpf_get_btf_vmlinux();
Alexei Starovoitov8580ac92019-10-15 20:24:57 -070013908
Alexei Starovoitovcbd35702014-09-26 00:17:03 -070013909 /* grab the mutex to protect few globals used by verifier */
Alexei Starovoitov45a73c12019-04-19 07:44:55 -070013910 if (!is_priv)
13911 mutex_lock(&bpf_verifier_lock);
Alexei Starovoitovcbd35702014-09-26 00:17:03 -070013912
13913 if (attr->log_level || attr->log_buf || attr->log_size) {
13914 /* user requested verbose verifier output
13915 * and supplied buffer to store the verification trace
13916 */
Jakub Kicinskie7bf8242017-10-09 10:30:10 -070013917 log->level = attr->log_level;
13918 log->ubuf = (char __user *) (unsigned long) attr->log_buf;
13919 log->len_total = attr->log_size;
Alexei Starovoitovcbd35702014-09-26 00:17:03 -070013920
13921 ret = -EINVAL;
Jakub Kicinskie7bf8242017-10-09 10:30:10 -070013922 /* log attributes have to be sane */
Alexei Starovoitov7a9f5c62019-04-01 21:27:46 -070013923 if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 ||
Alexei Starovoitov06ee7112019-04-01 21:27:40 -070013924 !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK)
Jakub Kicinski3df126f2016-09-21 11:43:56 +010013925 goto err_unlock;
Alexei Starovoitovcbd35702014-09-26 00:17:03 -070013926 }
Daniel Borkmann1ad2f582017-05-25 01:05:05 +020013927
Alexei Starovoitov8580ac92019-10-15 20:24:57 -070013928 if (IS_ERR(btf_vmlinux)) {
13929 /* Either gcc or pahole or kernel are broken. */
13930 verbose(env, "in-kernel BTF is malformed\n");
13931 ret = PTR_ERR(btf_vmlinux);
Martin KaFai Lau38207292019-10-24 17:18:11 -070013932 goto skip_full_check;
Alexei Starovoitov8580ac92019-10-15 20:24:57 -070013933 }
13934
Daniel Borkmann1ad2f582017-05-25 01:05:05 +020013935 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
13936 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
David S. Millere07b98d2017-05-10 11:38:07 -070013937 env->strict_alignment = true;
David Millere9ee9ef2018-11-30 21:08:14 -080013938 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
13939 env->strict_alignment = false;
Alexei Starovoitovcbd35702014-09-26 00:17:03 -070013940
Alexei Starovoitov2c78ee82020-05-13 16:03:54 -070013941 env->allow_ptr_leaks = bpf_allow_ptr_leaks();
Andrei Matei01f810a2021-02-06 20:10:24 -050013942 env->allow_uninit_stack = bpf_allow_uninit_stack();
Andrey Ignatov41c48f32020-06-19 14:11:43 -070013943 env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access();
Alexei Starovoitov2c78ee82020-05-13 16:03:54 -070013944 env->bypass_spec_v1 = bpf_bypass_spec_v1();
13945 env->bypass_spec_v4 = bpf_bypass_spec_v4();
13946 env->bpf_capable = bpf_capable();
Jakub Kicinskie2ae4ca2019-01-22 22:45:19 -080013947
Alexei Starovoitov10d274e2019-08-22 22:52:12 -070013948 if (is_priv)
13949 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
13950
Alexei Starovoitovdc2a4eb2019-05-21 20:17:07 -070013951 env->explored_states = kvcalloc(state_htab_size(env),
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010013952 sizeof(struct bpf_verifier_state_list *),
Alexei Starovoitovf1bca822014-09-29 18:50:01 -070013953 GFP_USER);
13954 ret = -ENOMEM;
13955 if (!env->explored_states)
13956 goto skip_full_check;
13957
Martin KaFai Laue6ac2452021-03-24 18:51:42 -070013958 ret = add_subprog_and_kfunc(env);
13959 if (ret < 0)
13960 goto skip_full_check;
13961
Martin KaFai Laud9762e82018-12-13 10:41:48 -080013962 ret = check_subprogs(env);
Alexei Starovoitov475fb782014-09-26 00:17:05 -070013963 if (ret < 0)
13964 goto skip_full_check;
13965
Martin KaFai Lauc454a462018-12-07 16:42:25 -080013966 ret = check_btf_info(env, attr, uattr);
Yonghong Song838e9692018-11-19 15:29:11 -080013967 if (ret < 0)
13968 goto skip_full_check;
13969
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -080013970 ret = check_attach_btf_id(env);
13971 if (ret)
13972 goto skip_full_check;
13973
Hao Luo4976b712020-09-29 16:50:44 -070013974 ret = resolve_pseudo_ldimm64(env);
13975 if (ret < 0)
13976 goto skip_full_check;
13977
Yinjun Zhangceb11672021-05-20 10:58:34 +020013978 if (bpf_prog_is_dev_bound(env->prog->aux)) {
13979 ret = bpf_prog_offload_verifier_prep(env->prog);
13980 if (ret)
13981 goto skip_full_check;
13982 }
13983
Martin KaFai Laud9762e82018-12-13 10:41:48 -080013984 ret = check_cfg(env);
13985 if (ret < 0)
13986 goto skip_full_check;
13987
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -080013988 ret = do_check_subprogs(env);
13989 ret = ret ?: do_check_main(env);
Alexei Starovoitovcbd35702014-09-26 00:17:03 -070013990
Quentin Monnetc941ce92018-10-07 12:56:47 +010013991 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
13992 ret = bpf_prog_offload_finalize(env);
13993
Alexei Starovoitov0246e642014-09-26 00:17:04 -070013994skip_full_check:
Alexei Starovoitov51c39bb2020-01-09 22:41:20 -080013995 kvfree(env->explored_states);
Alexei Starovoitov0246e642014-09-26 00:17:04 -070013996
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070013997 if (ret == 0)
Alexei Starovoitov70a87ff2017-12-25 13:15:40 -080013998 ret = check_max_stack_depth(env);
13999
Jakub Kicinski9b38c402018-12-19 22:13:06 -080014000 /* instruction rewrites happen after this point */
Jakub Kicinskie2ae4ca2019-01-22 22:45:19 -080014001 if (is_priv) {
14002 if (ret == 0)
14003 opt_hard_wire_dead_code_branches(env);
Jakub Kicinski52875a02019-01-22 22:45:20 -080014004 if (ret == 0)
14005 ret = opt_remove_dead_code(env);
Jakub Kicinskia1b14ab2019-01-22 22:45:21 -080014006 if (ret == 0)
14007 ret = opt_remove_nops(env);
Jakub Kicinski52875a02019-01-22 22:45:20 -080014008 } else {
14009 if (ret == 0)
14010 sanitize_dead_code(env);
Jakub Kicinskie2ae4ca2019-01-22 22:45:19 -080014011 }
14012
Jakub Kicinski9b38c402018-12-19 22:13:06 -080014013 if (ret == 0)
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070014014 /* program is valid, convert *(u32*)(ctx + off) accesses */
14015 ret = convert_ctx_accesses(env);
14016
Alexei Starovoitove245c5c62017-03-15 18:26:39 -070014017 if (ret == 0)
Brendan Jackmane6ac5932021-02-17 10:45:09 +000014018 ret = do_misc_fixups(env);
Alexei Starovoitove245c5c62017-03-15 18:26:39 -070014019
Jiong Wanga4b1d3c2019-05-24 23:25:15 +010014020 /* do 32-bit optimization after insn patching has done so those patched
14021 * insns could be handled correctly.
14022 */
Jiong Wangd6c23082019-05-24 23:25:18 +010014023 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
14024 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
14025 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
14026 : false;
Jiong Wanga4b1d3c2019-05-24 23:25:15 +010014027 }
14028
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -080014029 if (ret == 0)
14030 ret = fixup_call_args(env);
14031
Alexei Starovoitov06ee7112019-04-01 21:27:40 -070014032 env->verification_time = ktime_get_ns() - start_time;
14033 print_verification_stats(env);
Dave Marchevskyaba64c72021-10-20 00:48:17 -070014034 env->prog->aux->verified_insns = env->insn_processed;
Alexei Starovoitov06ee7112019-04-01 21:27:40 -070014035
Jakub Kicinskia2a7d572017-10-09 10:30:15 -070014036 if (log->level && bpf_verifier_log_full(log))
Alexei Starovoitovcbd35702014-09-26 00:17:03 -070014037 ret = -ENOSPC;
Jakub Kicinskia2a7d572017-10-09 10:30:15 -070014038 if (log->level && !log->ubuf) {
Alexei Starovoitovcbd35702014-09-26 00:17:03 -070014039 ret = -EFAULT;
Jakub Kicinskia2a7d572017-10-09 10:30:15 -070014040 goto err_release_maps;
Alexei Starovoitovcbd35702014-09-26 00:17:03 -070014041 }
14042
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080014043 if (ret)
14044 goto err_release_maps;
14045
14046 if (env->used_map_cnt) {
Alexei Starovoitov0246e642014-09-26 00:17:04 -070014047 /* if program passed verifier, update used_maps in bpf_prog_info */
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070014048 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
14049 sizeof(env->used_maps[0]),
14050 GFP_KERNEL);
Alexei Starovoitov0246e642014-09-26 00:17:04 -070014051
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070014052 if (!env->prog->aux->used_maps) {
Alexei Starovoitov0246e642014-09-26 00:17:04 -070014053 ret = -ENOMEM;
Jakub Kicinskia2a7d572017-10-09 10:30:15 -070014054 goto err_release_maps;
Alexei Starovoitov0246e642014-09-26 00:17:04 -070014055 }
14056
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070014057 memcpy(env->prog->aux->used_maps, env->used_maps,
Alexei Starovoitov0246e642014-09-26 00:17:04 -070014058 sizeof(env->used_maps[0]) * env->used_map_cnt);
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070014059 env->prog->aux->used_map_cnt = env->used_map_cnt;
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080014060 }
14061 if (env->used_btf_cnt) {
14062 /* if program passed verifier, update used_btfs in bpf_prog_aux */
14063 env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt,
14064 sizeof(env->used_btfs[0]),
14065 GFP_KERNEL);
14066 if (!env->prog->aux->used_btfs) {
14067 ret = -ENOMEM;
14068 goto err_release_maps;
14069 }
Alexei Starovoitov0246e642014-09-26 00:17:04 -070014070
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080014071 memcpy(env->prog->aux->used_btfs, env->used_btfs,
14072 sizeof(env->used_btfs[0]) * env->used_btf_cnt);
14073 env->prog->aux->used_btf_cnt = env->used_btf_cnt;
14074 }
14075 if (env->used_map_cnt || env->used_btf_cnt) {
Alexei Starovoitov0246e642014-09-26 00:17:04 -070014076 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
14077 * bpf_ld_imm64 instructions
14078 */
14079 convert_pseudo_ld_imm64(env);
14080 }
Alexei Starovoitovcbd35702014-09-26 00:17:03 -070014081
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080014082 adjust_btf_func(env);
Yonghong Songba64e7d2018-11-24 23:20:44 -080014083
Jakub Kicinskia2a7d572017-10-09 10:30:15 -070014084err_release_maps:
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070014085 if (!env->prog->aux->used_maps)
Alexei Starovoitov0246e642014-09-26 00:17:04 -070014086 /* if we didn't copy map pointers into bpf_prog_info, release
Jakub Kicinskiab7f5bf2018-05-03 18:37:17 -070014087 * them now. Otherwise free_used_maps() will release them.
Alexei Starovoitov0246e642014-09-26 00:17:04 -070014088 */
14089 release_maps(env);
Andrii Nakryiko541c3ba2021-01-11 23:55:18 -080014090 if (!env->prog->aux->used_btfs)
14091 release_btfs(env);
Toke Høiland-Jørgensen03f87c02020-04-24 15:34:27 +020014092
14093 /* extension progs temporarily inherit the attach_type of their targets
14094 for verification purposes, so set it back to zero before returning
14095 */
14096 if (env->prog->type == BPF_PROG_TYPE_EXT)
14097 env->prog->expected_attach_type = 0;
14098
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -070014099 *prog = env->prog;
Jakub Kicinski3df126f2016-09-21 11:43:56 +010014100err_unlock:
Alexei Starovoitov45a73c12019-04-19 07:44:55 -070014101 if (!is_priv)
14102 mutex_unlock(&bpf_verifier_lock);
Jakub Kicinski3df126f2016-09-21 11:43:56 +010014103 vfree(env->insn_aux_data);
14104err_free_env:
14105 kfree(env);
Alexei Starovoitov51580e72014-09-26 00:17:02 -070014106 return ret;
14107}