blob: 3e7718b1a9ae49c176a407d9f14ad31704255701 [file] [log] [blame]
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08005 * Copyright (c) 2017 Facebook
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
10 */
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011
Daniel Borkmann2c460622017-08-04 22:24:41 +020012#include <endian.h>
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080013#include <asm/types.h>
14#include <linux/types.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010015#include <stdint.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070016#include <stdio.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010017#include <stdlib.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070018#include <unistd.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070019#include <errno.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070020#include <string.h>
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -070021#include <stddef.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070022#include <stdbool.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020023#include <sched.h>
Daniel Borkmann21ccaf22018-01-26 23:33:48 +010024#include <limits.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020025
Mickaël Salaünd02d8982017-02-10 00:21:37 +010026#include <sys/capability.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070027
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020028#include <linux/unistd.h>
29#include <linux/filter.h>
30#include <linux/bpf_perf_event.h>
31#include <linux/bpf.h>
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080032#include <linux/if_ether.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070033
Mickaël Salaün2ee89fb2017-02-10 00:21:38 +010034#include <bpf/bpf.h>
35
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020036#ifdef HAVE_GENHDR
37# include "autoconf.h"
38#else
39# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
40# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
41# endif
42#endif
Daniel Borkmannfe8d6622018-02-26 22:34:32 +010043#include "bpf_rlimit.h"
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020044#include "../../../include/linux/filter.h"
45
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020046#ifndef ARRAY_SIZE
47# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
48#endif
49
50#define MAX_INSNS 512
51#define MAX_FIXUPS 8
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070052#define MAX_NR_MAPS 4
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080053#define POINTER_VALUE 0xcafe4all
54#define TEST_DATA_LEN 64
Alexei Starovoitovbf508872015-10-07 22:23:23 -070055
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020056#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
Daniel Borkmann614d0d72017-05-25 01:05:09 +020057#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020058
Joe Stringer0a6748742018-02-14 13:50:36 -080059#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
60static bool unpriv_disabled = false;
61
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070062struct bpf_test {
63 const char *descr;
64 struct bpf_insn insns[MAX_INSNS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020065 int fixup_map1[MAX_FIXUPS];
66 int fixup_map2[MAX_FIXUPS];
67 int fixup_prog[MAX_FIXUPS];
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070068 int fixup_map_in_map[MAX_FIXUPS];
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070069 const char *errstr;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070070 const char *errstr_unpriv;
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080071 uint32_t retval;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070072 enum {
Alexei Starovoitovbf508872015-10-07 22:23:23 -070073 UNDEF,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070074 ACCEPT,
75 REJECT
Alexei Starovoitovbf508872015-10-07 22:23:23 -070076 } result, result_unpriv;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070077 enum bpf_prog_type prog_type;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020078 uint8_t flags;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070079};
80
Josef Bacik48461132016-09-28 10:54:32 -040081/* Note we want this to be 64 bit aligned so that the end of our array is
82 * actually the end of the structure.
83 */
84#define MAX_ENTRIES 11
Josef Bacik48461132016-09-28 10:54:32 -040085
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020086struct test_val {
87 unsigned int index;
88 int foo[MAX_ENTRIES];
Josef Bacik48461132016-09-28 10:54:32 -040089};
90
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070091static struct bpf_test tests[] = {
92 {
93 "add+sub+mul",
94 .insns = {
95 BPF_MOV64_IMM(BPF_REG_1, 1),
96 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
97 BPF_MOV64_IMM(BPF_REG_2, 3),
98 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
99 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
100 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
101 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
102 BPF_EXIT_INSN(),
103 },
104 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800105 .retval = -3,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700106 },
107 {
Daniel Borkmann87c17932018-01-20 01:24:32 +0100108 "DIV32 by 0, zero check 1",
109 .insns = {
110 BPF_MOV32_IMM(BPF_REG_0, 42),
111 BPF_MOV32_IMM(BPF_REG_1, 0),
112 BPF_MOV32_IMM(BPF_REG_2, 1),
113 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
114 BPF_EXIT_INSN(),
115 },
116 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100117 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100118 },
119 {
120 "DIV32 by 0, zero check 2",
121 .insns = {
122 BPF_MOV32_IMM(BPF_REG_0, 42),
123 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
124 BPF_MOV32_IMM(BPF_REG_2, 1),
125 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
126 BPF_EXIT_INSN(),
127 },
128 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100129 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100130 },
131 {
132 "DIV64 by 0, zero check",
133 .insns = {
134 BPF_MOV32_IMM(BPF_REG_0, 42),
135 BPF_MOV32_IMM(BPF_REG_1, 0),
136 BPF_MOV32_IMM(BPF_REG_2, 1),
137 BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
138 BPF_EXIT_INSN(),
139 },
140 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100141 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100142 },
143 {
144 "MOD32 by 0, zero check 1",
145 .insns = {
146 BPF_MOV32_IMM(BPF_REG_0, 42),
147 BPF_MOV32_IMM(BPF_REG_1, 0),
148 BPF_MOV32_IMM(BPF_REG_2, 1),
149 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
150 BPF_EXIT_INSN(),
151 },
152 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100153 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100154 },
155 {
156 "MOD32 by 0, zero check 2",
157 .insns = {
158 BPF_MOV32_IMM(BPF_REG_0, 42),
159 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
160 BPF_MOV32_IMM(BPF_REG_2, 1),
161 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
162 BPF_EXIT_INSN(),
163 },
164 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100165 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100166 },
167 {
168 "MOD64 by 0, zero check",
169 .insns = {
170 BPF_MOV32_IMM(BPF_REG_0, 42),
171 BPF_MOV32_IMM(BPF_REG_1, 0),
172 BPF_MOV32_IMM(BPF_REG_2, 1),
173 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
174 BPF_EXIT_INSN(),
175 },
176 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100177 .retval = 42,
178 },
179 {
180 "DIV32 by 0, zero check ok, cls",
181 .insns = {
182 BPF_MOV32_IMM(BPF_REG_0, 42),
183 BPF_MOV32_IMM(BPF_REG_1, 2),
184 BPF_MOV32_IMM(BPF_REG_2, 16),
185 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
186 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
187 BPF_EXIT_INSN(),
188 },
189 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
190 .result = ACCEPT,
191 .retval = 8,
192 },
193 {
194 "DIV32 by 0, zero check 1, cls",
195 .insns = {
196 BPF_MOV32_IMM(BPF_REG_1, 0),
197 BPF_MOV32_IMM(BPF_REG_0, 1),
198 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
199 BPF_EXIT_INSN(),
200 },
201 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
202 .result = ACCEPT,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100203 .retval = 0,
204 },
205 {
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100206 "DIV32 by 0, zero check 2, cls",
207 .insns = {
208 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
209 BPF_MOV32_IMM(BPF_REG_0, 1),
210 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
211 BPF_EXIT_INSN(),
212 },
213 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
214 .result = ACCEPT,
215 .retval = 0,
216 },
217 {
218 "DIV64 by 0, zero check, cls",
219 .insns = {
220 BPF_MOV32_IMM(BPF_REG_1, 0),
221 BPF_MOV32_IMM(BPF_REG_0, 1),
222 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
223 BPF_EXIT_INSN(),
224 },
225 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
226 .result = ACCEPT,
227 .retval = 0,
228 },
229 {
230 "MOD32 by 0, zero check ok, cls",
231 .insns = {
232 BPF_MOV32_IMM(BPF_REG_0, 42),
233 BPF_MOV32_IMM(BPF_REG_1, 3),
234 BPF_MOV32_IMM(BPF_REG_2, 5),
235 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
236 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
237 BPF_EXIT_INSN(),
238 },
239 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
240 .result = ACCEPT,
241 .retval = 2,
242 },
243 {
244 "MOD32 by 0, zero check 1, cls",
245 .insns = {
246 BPF_MOV32_IMM(BPF_REG_1, 0),
247 BPF_MOV32_IMM(BPF_REG_0, 1),
248 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
249 BPF_EXIT_INSN(),
250 },
251 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
252 .result = ACCEPT,
253 .retval = 1,
254 },
255 {
256 "MOD32 by 0, zero check 2, cls",
257 .insns = {
258 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
259 BPF_MOV32_IMM(BPF_REG_0, 1),
260 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
261 BPF_EXIT_INSN(),
262 },
263 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
264 .result = ACCEPT,
265 .retval = 1,
266 },
267 {
268 "MOD64 by 0, zero check 1, cls",
269 .insns = {
270 BPF_MOV32_IMM(BPF_REG_1, 0),
271 BPF_MOV32_IMM(BPF_REG_0, 2),
272 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
273 BPF_EXIT_INSN(),
274 },
275 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
276 .result = ACCEPT,
277 .retval = 2,
278 },
279 {
280 "MOD64 by 0, zero check 2, cls",
281 .insns = {
282 BPF_MOV32_IMM(BPF_REG_1, 0),
283 BPF_MOV32_IMM(BPF_REG_0, -1),
284 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
285 BPF_EXIT_INSN(),
286 },
287 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
288 .result = ACCEPT,
289 .retval = -1,
290 },
291 /* Just make sure that JITs used udiv/umod as otherwise we get
292 * an exception from INT_MIN/-1 overflow similarly as with div
293 * by zero.
294 */
295 {
296 "DIV32 overflow, check 1",
297 .insns = {
298 BPF_MOV32_IMM(BPF_REG_1, -1),
299 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
300 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
301 BPF_EXIT_INSN(),
302 },
303 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
304 .result = ACCEPT,
305 .retval = 0,
306 },
307 {
308 "DIV32 overflow, check 2",
309 .insns = {
310 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
311 BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
312 BPF_EXIT_INSN(),
313 },
314 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
315 .result = ACCEPT,
316 .retval = 0,
317 },
318 {
319 "DIV64 overflow, check 1",
320 .insns = {
321 BPF_MOV64_IMM(BPF_REG_1, -1),
322 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
323 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
324 BPF_EXIT_INSN(),
325 },
326 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
327 .result = ACCEPT,
328 .retval = 0,
329 },
330 {
331 "DIV64 overflow, check 2",
332 .insns = {
333 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
334 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
335 BPF_EXIT_INSN(),
336 },
337 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
338 .result = ACCEPT,
339 .retval = 0,
340 },
341 {
342 "MOD32 overflow, check 1",
343 .insns = {
344 BPF_MOV32_IMM(BPF_REG_1, -1),
345 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
346 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
347 BPF_EXIT_INSN(),
348 },
349 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
350 .result = ACCEPT,
351 .retval = INT_MIN,
352 },
353 {
354 "MOD32 overflow, check 2",
355 .insns = {
356 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
357 BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
358 BPF_EXIT_INSN(),
359 },
360 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
361 .result = ACCEPT,
362 .retval = INT_MIN,
363 },
364 {
365 "MOD64 overflow, check 1",
366 .insns = {
367 BPF_MOV64_IMM(BPF_REG_1, -1),
368 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
369 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
370 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
371 BPF_MOV32_IMM(BPF_REG_0, 0),
372 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
373 BPF_MOV32_IMM(BPF_REG_0, 1),
374 BPF_EXIT_INSN(),
375 },
376 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
377 .result = ACCEPT,
378 .retval = 1,
379 },
380 {
381 "MOD64 overflow, check 2",
382 .insns = {
383 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
384 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
385 BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
386 BPF_MOV32_IMM(BPF_REG_0, 0),
387 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
388 BPF_MOV32_IMM(BPF_REG_0, 1),
389 BPF_EXIT_INSN(),
390 },
391 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
392 .result = ACCEPT,
393 .retval = 1,
394 },
395 {
396 "xor32 zero extend check",
397 .insns = {
398 BPF_MOV32_IMM(BPF_REG_2, -1),
399 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
400 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
401 BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
402 BPF_MOV32_IMM(BPF_REG_0, 2),
403 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
404 BPF_MOV32_IMM(BPF_REG_0, 1),
405 BPF_EXIT_INSN(),
406 },
407 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
408 .result = ACCEPT,
409 .retval = 1,
410 },
411 {
Daniel Borkmann87c17932018-01-20 01:24:32 +0100412 "empty prog",
413 .insns = {
414 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100415 .errstr = "unknown opcode 00",
Daniel Borkmann87c17932018-01-20 01:24:32 +0100416 .result = REJECT,
417 },
418 {
419 "only exit insn",
420 .insns = {
421 BPF_EXIT_INSN(),
422 },
423 .errstr = "R0 !read_ok",
424 .result = REJECT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700425 },
426 {
427 "unreachable",
428 .insns = {
429 BPF_EXIT_INSN(),
430 BPF_EXIT_INSN(),
431 },
432 .errstr = "unreachable",
433 .result = REJECT,
434 },
435 {
436 "unreachable2",
437 .insns = {
438 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
439 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
440 BPF_EXIT_INSN(),
441 },
442 .errstr = "unreachable",
443 .result = REJECT,
444 },
445 {
446 "out of range jump",
447 .insns = {
448 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
449 BPF_EXIT_INSN(),
450 },
451 .errstr = "jump out of range",
452 .result = REJECT,
453 },
454 {
455 "out of range jump2",
456 .insns = {
457 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
458 BPF_EXIT_INSN(),
459 },
460 .errstr = "jump out of range",
461 .result = REJECT,
462 },
463 {
464 "test1 ld_imm64",
465 .insns = {
466 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
467 BPF_LD_IMM64(BPF_REG_0, 0),
468 BPF_LD_IMM64(BPF_REG_0, 0),
469 BPF_LD_IMM64(BPF_REG_0, 1),
470 BPF_LD_IMM64(BPF_REG_0, 1),
471 BPF_MOV64_IMM(BPF_REG_0, 2),
472 BPF_EXIT_INSN(),
473 },
474 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700475 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700476 .result = REJECT,
477 },
478 {
479 "test2 ld_imm64",
480 .insns = {
481 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
482 BPF_LD_IMM64(BPF_REG_0, 0),
483 BPF_LD_IMM64(BPF_REG_0, 0),
484 BPF_LD_IMM64(BPF_REG_0, 1),
485 BPF_LD_IMM64(BPF_REG_0, 1),
486 BPF_EXIT_INSN(),
487 },
488 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700489 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700490 .result = REJECT,
491 },
492 {
493 "test3 ld_imm64",
494 .insns = {
495 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
496 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
497 BPF_LD_IMM64(BPF_REG_0, 0),
498 BPF_LD_IMM64(BPF_REG_0, 0),
499 BPF_LD_IMM64(BPF_REG_0, 1),
500 BPF_LD_IMM64(BPF_REG_0, 1),
501 BPF_EXIT_INSN(),
502 },
503 .errstr = "invalid bpf_ld_imm64 insn",
504 .result = REJECT,
505 },
506 {
507 "test4 ld_imm64",
508 .insns = {
509 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
510 BPF_EXIT_INSN(),
511 },
512 .errstr = "invalid bpf_ld_imm64 insn",
513 .result = REJECT,
514 },
515 {
516 "test5 ld_imm64",
517 .insns = {
518 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
519 },
520 .errstr = "invalid bpf_ld_imm64 insn",
521 .result = REJECT,
522 },
523 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200524 "test6 ld_imm64",
525 .insns = {
526 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
527 BPF_RAW_INSN(0, 0, 0, 0, 0),
528 BPF_EXIT_INSN(),
529 },
530 .result = ACCEPT,
531 },
532 {
533 "test7 ld_imm64",
534 .insns = {
535 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
536 BPF_RAW_INSN(0, 0, 0, 0, 1),
537 BPF_EXIT_INSN(),
538 },
539 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800540 .retval = 1,
Daniel Borkmann728a8532017-04-27 01:39:32 +0200541 },
542 {
543 "test8 ld_imm64",
544 .insns = {
545 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
546 BPF_RAW_INSN(0, 0, 0, 0, 1),
547 BPF_EXIT_INSN(),
548 },
549 .errstr = "uses reserved fields",
550 .result = REJECT,
551 },
552 {
553 "test9 ld_imm64",
554 .insns = {
555 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
556 BPF_RAW_INSN(0, 0, 0, 1, 1),
557 BPF_EXIT_INSN(),
558 },
559 .errstr = "invalid bpf_ld_imm64 insn",
560 .result = REJECT,
561 },
562 {
563 "test10 ld_imm64",
564 .insns = {
565 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
566 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
567 BPF_EXIT_INSN(),
568 },
569 .errstr = "invalid bpf_ld_imm64 insn",
570 .result = REJECT,
571 },
572 {
573 "test11 ld_imm64",
574 .insns = {
575 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
576 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
577 BPF_EXIT_INSN(),
578 },
579 .errstr = "invalid bpf_ld_imm64 insn",
580 .result = REJECT,
581 },
582 {
583 "test12 ld_imm64",
584 .insns = {
585 BPF_MOV64_IMM(BPF_REG_1, 0),
586 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
587 BPF_RAW_INSN(0, 0, 0, 0, 1),
588 BPF_EXIT_INSN(),
589 },
590 .errstr = "not pointing to valid bpf_map",
591 .result = REJECT,
592 },
593 {
594 "test13 ld_imm64",
595 .insns = {
596 BPF_MOV64_IMM(BPF_REG_1, 0),
597 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
598 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
599 BPF_EXIT_INSN(),
600 },
601 .errstr = "invalid bpf_ld_imm64 insn",
602 .result = REJECT,
603 },
604 {
Daniel Borkmann7891a872018-01-10 20:04:37 +0100605 "arsh32 on imm",
606 .insns = {
607 BPF_MOV64_IMM(BPF_REG_0, 1),
608 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
609 BPF_EXIT_INSN(),
610 },
611 .result = REJECT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100612 .errstr = "unknown opcode c4",
Daniel Borkmann7891a872018-01-10 20:04:37 +0100613 },
614 {
615 "arsh32 on reg",
616 .insns = {
617 BPF_MOV64_IMM(BPF_REG_0, 1),
618 BPF_MOV64_IMM(BPF_REG_1, 5),
619 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
620 BPF_EXIT_INSN(),
621 },
622 .result = REJECT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100623 .errstr = "unknown opcode cc",
Daniel Borkmann7891a872018-01-10 20:04:37 +0100624 },
625 {
626 "arsh64 on imm",
627 .insns = {
628 BPF_MOV64_IMM(BPF_REG_0, 1),
629 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
630 BPF_EXIT_INSN(),
631 },
632 .result = ACCEPT,
633 },
634 {
635 "arsh64 on reg",
636 .insns = {
637 BPF_MOV64_IMM(BPF_REG_0, 1),
638 BPF_MOV64_IMM(BPF_REG_1, 5),
639 BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
640 BPF_EXIT_INSN(),
641 },
642 .result = ACCEPT,
643 },
644 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700645 "no bpf_exit",
646 .insns = {
647 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
648 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -0800649 .errstr = "not an exit",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700650 .result = REJECT,
651 },
652 {
653 "loop (back-edge)",
654 .insns = {
655 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
656 BPF_EXIT_INSN(),
657 },
658 .errstr = "back-edge",
659 .result = REJECT,
660 },
661 {
662 "loop2 (back-edge)",
663 .insns = {
664 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
665 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
666 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
667 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
668 BPF_EXIT_INSN(),
669 },
670 .errstr = "back-edge",
671 .result = REJECT,
672 },
673 {
674 "conditional loop",
675 .insns = {
676 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
677 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
678 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
679 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
680 BPF_EXIT_INSN(),
681 },
682 .errstr = "back-edge",
683 .result = REJECT,
684 },
685 {
686 "read uninitialized register",
687 .insns = {
688 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
689 BPF_EXIT_INSN(),
690 },
691 .errstr = "R2 !read_ok",
692 .result = REJECT,
693 },
694 {
695 "read invalid register",
696 .insns = {
697 BPF_MOV64_REG(BPF_REG_0, -1),
698 BPF_EXIT_INSN(),
699 },
700 .errstr = "R15 is invalid",
701 .result = REJECT,
702 },
703 {
704 "program doesn't init R0 before exit",
705 .insns = {
706 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
707 BPF_EXIT_INSN(),
708 },
709 .errstr = "R0 !read_ok",
710 .result = REJECT,
711 },
712 {
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700713 "program doesn't init R0 before exit in all branches",
714 .insns = {
715 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
716 BPF_MOV64_IMM(BPF_REG_0, 1),
717 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
718 BPF_EXIT_INSN(),
719 },
720 .errstr = "R0 !read_ok",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700721 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700722 .result = REJECT,
723 },
724 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700725 "stack out of bounds",
726 .insns = {
727 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
728 BPF_EXIT_INSN(),
729 },
730 .errstr = "invalid stack",
731 .result = REJECT,
732 },
733 {
734 "invalid call insn1",
735 .insns = {
736 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
737 BPF_EXIT_INSN(),
738 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100739 .errstr = "unknown opcode 8d",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700740 .result = REJECT,
741 },
742 {
743 "invalid call insn2",
744 .insns = {
745 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
746 BPF_EXIT_INSN(),
747 },
748 .errstr = "BPF_CALL uses reserved",
749 .result = REJECT,
750 },
751 {
752 "invalid function call",
753 .insns = {
754 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
755 BPF_EXIT_INSN(),
756 },
Daniel Borkmanne00c7b22016-11-26 01:28:09 +0100757 .errstr = "invalid func unknown#1234567",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700758 .result = REJECT,
759 },
760 {
761 "uninitialized stack1",
762 .insns = {
763 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
764 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
765 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200766 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
767 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700768 BPF_EXIT_INSN(),
769 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200770 .fixup_map1 = { 2 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700771 .errstr = "invalid indirect read from stack",
772 .result = REJECT,
773 },
774 {
775 "uninitialized stack2",
776 .insns = {
777 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
778 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
779 BPF_EXIT_INSN(),
780 },
781 .errstr = "invalid read from stack",
782 .result = REJECT,
783 },
784 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200785 "invalid fp arithmetic",
786 /* If this gets ever changed, make sure JITs can deal with it. */
787 .insns = {
788 BPF_MOV64_IMM(BPF_REG_0, 0),
789 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
790 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
791 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
792 BPF_EXIT_INSN(),
793 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -0800794 .errstr = "R1 subtraction from stack pointer",
Daniel Borkmann728a8532017-04-27 01:39:32 +0200795 .result = REJECT,
796 },
797 {
798 "non-invalid fp arithmetic",
799 .insns = {
800 BPF_MOV64_IMM(BPF_REG_0, 0),
801 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
802 BPF_EXIT_INSN(),
803 },
804 .result = ACCEPT,
805 },
806 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200807 "invalid argument register",
808 .insns = {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200809 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
810 BPF_FUNC_get_cgroup_classid),
811 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
812 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200813 BPF_EXIT_INSN(),
814 },
815 .errstr = "R1 !read_ok",
816 .result = REJECT,
817 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
818 },
819 {
820 "non-invalid argument register",
821 .insns = {
822 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200823 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
824 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200825 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200826 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
827 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200828 BPF_EXIT_INSN(),
829 },
830 .result = ACCEPT,
831 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
832 },
833 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700834 "check valid spill/fill",
835 .insns = {
836 /* spill R1(ctx) into stack */
837 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700838 /* fill it back into R2 */
839 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700840 /* should be able to access R0 = *(R2 + 8) */
Daniel Borkmannf91fe172015-03-01 12:31:41 +0100841 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
842 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700843 BPF_EXIT_INSN(),
844 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700845 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700846 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700847 .result_unpriv = REJECT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800848 .retval = POINTER_VALUE,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700849 },
850 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +0200851 "check valid spill/fill, skb mark",
852 .insns = {
853 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
854 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
855 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
856 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
857 offsetof(struct __sk_buff, mark)),
858 BPF_EXIT_INSN(),
859 },
860 .result = ACCEPT,
861 .result_unpriv = ACCEPT,
862 },
863 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700864 "check corrupted spill/fill",
865 .insns = {
866 /* spill R1(ctx) into stack */
867 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700868 /* mess up with R1 pointer on stack */
869 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700870 /* fill back into R0 should fail */
871 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700872 BPF_EXIT_INSN(),
873 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700874 .errstr_unpriv = "attempt to corrupt spilled",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700875 .errstr = "corrupted spill",
876 .result = REJECT,
877 },
878 {
879 "invalid src register in STX",
880 .insns = {
881 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
882 BPF_EXIT_INSN(),
883 },
884 .errstr = "R15 is invalid",
885 .result = REJECT,
886 },
887 {
888 "invalid dst register in STX",
889 .insns = {
890 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
891 BPF_EXIT_INSN(),
892 },
893 .errstr = "R14 is invalid",
894 .result = REJECT,
895 },
896 {
897 "invalid dst register in ST",
898 .insns = {
899 BPF_ST_MEM(BPF_B, 14, -1, -1),
900 BPF_EXIT_INSN(),
901 },
902 .errstr = "R14 is invalid",
903 .result = REJECT,
904 },
905 {
906 "invalid src register in LDX",
907 .insns = {
908 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
909 BPF_EXIT_INSN(),
910 },
911 .errstr = "R12 is invalid",
912 .result = REJECT,
913 },
914 {
915 "invalid dst register in LDX",
916 .insns = {
917 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
918 BPF_EXIT_INSN(),
919 },
920 .errstr = "R11 is invalid",
921 .result = REJECT,
922 },
923 {
924 "junk insn",
925 .insns = {
926 BPF_RAW_INSN(0, 0, 0, 0, 0),
927 BPF_EXIT_INSN(),
928 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100929 .errstr = "unknown opcode 00",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700930 .result = REJECT,
931 },
932 {
933 "junk insn2",
934 .insns = {
935 BPF_RAW_INSN(1, 0, 0, 0, 0),
936 BPF_EXIT_INSN(),
937 },
938 .errstr = "BPF_LDX uses reserved fields",
939 .result = REJECT,
940 },
941 {
942 "junk insn3",
943 .insns = {
944 BPF_RAW_INSN(-1, 0, 0, 0, 0),
945 BPF_EXIT_INSN(),
946 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100947 .errstr = "unknown opcode ff",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700948 .result = REJECT,
949 },
950 {
951 "junk insn4",
952 .insns = {
953 BPF_RAW_INSN(-1, -1, -1, -1, -1),
954 BPF_EXIT_INSN(),
955 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100956 .errstr = "unknown opcode ff",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700957 .result = REJECT,
958 },
959 {
960 "junk insn5",
961 .insns = {
962 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
963 BPF_EXIT_INSN(),
964 },
965 .errstr = "BPF_ALU uses reserved fields",
966 .result = REJECT,
967 },
968 {
969 "misaligned read from stack",
970 .insns = {
971 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
972 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
973 BPF_EXIT_INSN(),
974 },
Edward Creef65b1842017-08-07 15:27:12 +0100975 .errstr = "misaligned stack access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700976 .result = REJECT,
977 },
978 {
979 "invalid map_fd for function call",
980 .insns = {
981 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
982 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
983 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
984 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200985 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
986 BPF_FUNC_map_delete_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700987 BPF_EXIT_INSN(),
988 },
989 .errstr = "fd 0 is not pointing to valid bpf_map",
990 .result = REJECT,
991 },
992 {
993 "don't check return value before access",
994 .insns = {
995 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
996 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
997 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
998 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200999 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1000 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001001 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1002 BPF_EXIT_INSN(),
1003 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001004 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001005 .errstr = "R0 invalid mem access 'map_value_or_null'",
1006 .result = REJECT,
1007 },
1008 {
1009 "access memory with incorrect alignment",
1010 .insns = {
1011 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1012 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1014 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001015 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1016 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001017 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1018 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1019 BPF_EXIT_INSN(),
1020 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001021 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01001022 .errstr = "misaligned value access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001023 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001024 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001025 },
1026 {
1027 "sometimes access memory with incorrect alignment",
1028 .insns = {
1029 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1030 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1032 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001033 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1034 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001035 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1036 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1037 BPF_EXIT_INSN(),
1038 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1039 BPF_EXIT_INSN(),
1040 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001041 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001042 .errstr = "R0 invalid mem access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001043 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001044 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001045 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001046 },
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001047 {
1048 "jump test 1",
1049 .insns = {
1050 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1051 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1052 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1053 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1054 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1055 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1056 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1057 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1058 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1059 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1060 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1061 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1062 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1063 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1064 BPF_MOV64_IMM(BPF_REG_0, 0),
1065 BPF_EXIT_INSN(),
1066 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001067 .errstr_unpriv = "R1 pointer comparison",
1068 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001069 .result = ACCEPT,
1070 },
1071 {
1072 "jump test 2",
1073 .insns = {
1074 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1075 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1076 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1077 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1078 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1079 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1080 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1081 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1082 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1083 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1084 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1085 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1086 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1087 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1088 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1089 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1090 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1091 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1092 BPF_MOV64_IMM(BPF_REG_0, 0),
1093 BPF_EXIT_INSN(),
1094 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001095 .errstr_unpriv = "R1 pointer comparison",
1096 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001097 .result = ACCEPT,
1098 },
1099 {
1100 "jump test 3",
1101 .insns = {
1102 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1103 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1104 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1105 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1106 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1107 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1108 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1109 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1110 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1111 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1112 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1113 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1114 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1115 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1116 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1118 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1119 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1120 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1121 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1122 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1123 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1124 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1125 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1126 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001127 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1128 BPF_FUNC_map_delete_elem),
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001129 BPF_EXIT_INSN(),
1130 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001131 .fixup_map1 = { 24 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001132 .errstr_unpriv = "R1 pointer comparison",
1133 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001134 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08001135 .retval = -ENOENT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001136 },
1137 {
1138 "jump test 4",
1139 .insns = {
1140 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1141 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1142 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1143 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1144 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1145 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1146 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1147 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1148 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1149 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1150 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1151 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1152 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1154 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1155 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1156 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1157 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1158 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1159 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1160 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1161 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1162 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1163 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1164 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1166 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1168 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1169 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1170 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1171 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1172 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1173 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1174 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1175 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1176 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1177 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1178 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1179 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1180 BPF_MOV64_IMM(BPF_REG_0, 0),
1181 BPF_EXIT_INSN(),
1182 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001183 .errstr_unpriv = "R1 pointer comparison",
1184 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001185 .result = ACCEPT,
1186 },
Alexei Starovoitov342ded42014-10-28 15:11:42 -07001187 {
1188 "jump test 5",
1189 .insns = {
1190 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1191 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1192 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1193 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1194 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1195 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1196 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1197 BPF_MOV64_IMM(BPF_REG_0, 0),
1198 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1199 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1200 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1201 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1202 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1203 BPF_MOV64_IMM(BPF_REG_0, 0),
1204 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1205 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1206 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1207 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1208 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1209 BPF_MOV64_IMM(BPF_REG_0, 0),
1210 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1211 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1212 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1213 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1214 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1215 BPF_MOV64_IMM(BPF_REG_0, 0),
1216 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1217 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1218 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1219 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1220 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1221 BPF_MOV64_IMM(BPF_REG_0, 0),
1222 BPF_EXIT_INSN(),
1223 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001224 .errstr_unpriv = "R1 pointer comparison",
1225 .result_unpriv = REJECT,
Alexei Starovoitov342ded42014-10-28 15:11:42 -07001226 .result = ACCEPT,
1227 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001228 {
1229 "access skb fields ok",
1230 .insns = {
1231 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1232 offsetof(struct __sk_buff, len)),
1233 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1234 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1235 offsetof(struct __sk_buff, mark)),
1236 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1237 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1238 offsetof(struct __sk_buff, pkt_type)),
1239 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1240 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1241 offsetof(struct __sk_buff, queue_mapping)),
1242 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitovc2497392015-03-16 18:06:02 -07001243 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1244 offsetof(struct __sk_buff, protocol)),
1245 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1246 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1247 offsetof(struct __sk_buff, vlan_present)),
1248 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1249 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1250 offsetof(struct __sk_buff, vlan_tci)),
1251 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Daniel Borkmannb1d9fc42017-04-19 23:01:17 +02001252 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1253 offsetof(struct __sk_buff, napi_id)),
1254 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001255 BPF_EXIT_INSN(),
1256 },
1257 .result = ACCEPT,
1258 },
1259 {
1260 "access skb fields bad1",
1261 .insns = {
1262 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1263 BPF_EXIT_INSN(),
1264 },
1265 .errstr = "invalid bpf_context access",
1266 .result = REJECT,
1267 },
1268 {
1269 "access skb fields bad2",
1270 .insns = {
1271 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1272 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1273 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1275 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1277 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001278 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1279 BPF_EXIT_INSN(),
1280 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1281 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1282 offsetof(struct __sk_buff, pkt_type)),
1283 BPF_EXIT_INSN(),
1284 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001285 .fixup_map1 = { 4 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001286 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001287 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001288 .result = REJECT,
1289 },
1290 {
1291 "access skb fields bad3",
1292 .insns = {
1293 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1294 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1295 offsetof(struct __sk_buff, pkt_type)),
1296 BPF_EXIT_INSN(),
1297 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1298 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1299 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1300 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001301 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1302 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001303 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1304 BPF_EXIT_INSN(),
1305 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1306 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1307 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001308 .fixup_map1 = { 6 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001309 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001310 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001311 .result = REJECT,
1312 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001313 {
1314 "access skb fields bad4",
1315 .insns = {
1316 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1317 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1318 offsetof(struct __sk_buff, len)),
1319 BPF_MOV64_IMM(BPF_REG_0, 0),
1320 BPF_EXIT_INSN(),
1321 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1322 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1323 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1324 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001325 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1326 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001327 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1328 BPF_EXIT_INSN(),
1329 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1330 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1331 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001332 .fixup_map1 = { 7 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001333 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001334 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001335 .result = REJECT,
1336 },
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001337 {
John Fastabend41bc94f2017-08-15 22:33:56 -07001338 "invalid access __sk_buff family",
1339 .insns = {
1340 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1341 offsetof(struct __sk_buff, family)),
1342 BPF_EXIT_INSN(),
1343 },
1344 .errstr = "invalid bpf_context access",
1345 .result = REJECT,
1346 },
1347 {
1348 "invalid access __sk_buff remote_ip4",
1349 .insns = {
1350 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1351 offsetof(struct __sk_buff, remote_ip4)),
1352 BPF_EXIT_INSN(),
1353 },
1354 .errstr = "invalid bpf_context access",
1355 .result = REJECT,
1356 },
1357 {
1358 "invalid access __sk_buff local_ip4",
1359 .insns = {
1360 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1361 offsetof(struct __sk_buff, local_ip4)),
1362 BPF_EXIT_INSN(),
1363 },
1364 .errstr = "invalid bpf_context access",
1365 .result = REJECT,
1366 },
1367 {
1368 "invalid access __sk_buff remote_ip6",
1369 .insns = {
1370 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1371 offsetof(struct __sk_buff, remote_ip6)),
1372 BPF_EXIT_INSN(),
1373 },
1374 .errstr = "invalid bpf_context access",
1375 .result = REJECT,
1376 },
1377 {
1378 "invalid access __sk_buff local_ip6",
1379 .insns = {
1380 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1381 offsetof(struct __sk_buff, local_ip6)),
1382 BPF_EXIT_INSN(),
1383 },
1384 .errstr = "invalid bpf_context access",
1385 .result = REJECT,
1386 },
1387 {
1388 "invalid access __sk_buff remote_port",
1389 .insns = {
1390 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1391 offsetof(struct __sk_buff, remote_port)),
1392 BPF_EXIT_INSN(),
1393 },
1394 .errstr = "invalid bpf_context access",
1395 .result = REJECT,
1396 },
1397 {
1398 "invalid access __sk_buff remote_port",
1399 .insns = {
1400 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1401 offsetof(struct __sk_buff, local_port)),
1402 BPF_EXIT_INSN(),
1403 },
1404 .errstr = "invalid bpf_context access",
1405 .result = REJECT,
1406 },
1407 {
1408 "valid access __sk_buff family",
1409 .insns = {
1410 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1411 offsetof(struct __sk_buff, family)),
1412 BPF_EXIT_INSN(),
1413 },
1414 .result = ACCEPT,
1415 .prog_type = BPF_PROG_TYPE_SK_SKB,
1416 },
1417 {
1418 "valid access __sk_buff remote_ip4",
1419 .insns = {
1420 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1421 offsetof(struct __sk_buff, remote_ip4)),
1422 BPF_EXIT_INSN(),
1423 },
1424 .result = ACCEPT,
1425 .prog_type = BPF_PROG_TYPE_SK_SKB,
1426 },
1427 {
1428 "valid access __sk_buff local_ip4",
1429 .insns = {
1430 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1431 offsetof(struct __sk_buff, local_ip4)),
1432 BPF_EXIT_INSN(),
1433 },
1434 .result = ACCEPT,
1435 .prog_type = BPF_PROG_TYPE_SK_SKB,
1436 },
1437 {
1438 "valid access __sk_buff remote_ip6",
1439 .insns = {
1440 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1441 offsetof(struct __sk_buff, remote_ip6[0])),
1442 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1443 offsetof(struct __sk_buff, remote_ip6[1])),
1444 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1445 offsetof(struct __sk_buff, remote_ip6[2])),
1446 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1447 offsetof(struct __sk_buff, remote_ip6[3])),
1448 BPF_EXIT_INSN(),
1449 },
1450 .result = ACCEPT,
1451 .prog_type = BPF_PROG_TYPE_SK_SKB,
1452 },
1453 {
1454 "valid access __sk_buff local_ip6",
1455 .insns = {
1456 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1457 offsetof(struct __sk_buff, local_ip6[0])),
1458 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1459 offsetof(struct __sk_buff, local_ip6[1])),
1460 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1461 offsetof(struct __sk_buff, local_ip6[2])),
1462 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1463 offsetof(struct __sk_buff, local_ip6[3])),
1464 BPF_EXIT_INSN(),
1465 },
1466 .result = ACCEPT,
1467 .prog_type = BPF_PROG_TYPE_SK_SKB,
1468 },
1469 {
1470 "valid access __sk_buff remote_port",
1471 .insns = {
1472 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1473 offsetof(struct __sk_buff, remote_port)),
1474 BPF_EXIT_INSN(),
1475 },
1476 .result = ACCEPT,
1477 .prog_type = BPF_PROG_TYPE_SK_SKB,
1478 },
1479 {
1480 "valid access __sk_buff remote_port",
1481 .insns = {
1482 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1483 offsetof(struct __sk_buff, local_port)),
1484 BPF_EXIT_INSN(),
1485 },
1486 .result = ACCEPT,
1487 .prog_type = BPF_PROG_TYPE_SK_SKB,
1488 },
1489 {
John Fastabended850542017-08-28 07:11:24 -07001490 "invalid access of tc_classid for SK_SKB",
1491 .insns = {
1492 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1493 offsetof(struct __sk_buff, tc_classid)),
1494 BPF_EXIT_INSN(),
1495 },
1496 .result = REJECT,
1497 .prog_type = BPF_PROG_TYPE_SK_SKB,
1498 .errstr = "invalid bpf_context access",
1499 },
1500 {
John Fastabendf7e9cb12017-10-18 07:10:58 -07001501 "invalid access of skb->mark for SK_SKB",
1502 .insns = {
1503 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1504 offsetof(struct __sk_buff, mark)),
1505 BPF_EXIT_INSN(),
1506 },
1507 .result = REJECT,
1508 .prog_type = BPF_PROG_TYPE_SK_SKB,
1509 .errstr = "invalid bpf_context access",
1510 },
1511 {
1512 "check skb->mark is not writeable by SK_SKB",
John Fastabended850542017-08-28 07:11:24 -07001513 .insns = {
1514 BPF_MOV64_IMM(BPF_REG_0, 0),
1515 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1516 offsetof(struct __sk_buff, mark)),
1517 BPF_EXIT_INSN(),
1518 },
John Fastabendf7e9cb12017-10-18 07:10:58 -07001519 .result = REJECT,
John Fastabended850542017-08-28 07:11:24 -07001520 .prog_type = BPF_PROG_TYPE_SK_SKB,
John Fastabendf7e9cb12017-10-18 07:10:58 -07001521 .errstr = "invalid bpf_context access",
John Fastabended850542017-08-28 07:11:24 -07001522 },
1523 {
1524 "check skb->tc_index is writeable by SK_SKB",
1525 .insns = {
1526 BPF_MOV64_IMM(BPF_REG_0, 0),
1527 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1528 offsetof(struct __sk_buff, tc_index)),
1529 BPF_EXIT_INSN(),
1530 },
1531 .result = ACCEPT,
1532 .prog_type = BPF_PROG_TYPE_SK_SKB,
1533 },
1534 {
1535 "check skb->priority is writeable by SK_SKB",
1536 .insns = {
1537 BPF_MOV64_IMM(BPF_REG_0, 0),
1538 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1539 offsetof(struct __sk_buff, priority)),
1540 BPF_EXIT_INSN(),
1541 },
1542 .result = ACCEPT,
1543 .prog_type = BPF_PROG_TYPE_SK_SKB,
1544 },
1545 {
1546 "direct packet read for SK_SKB",
1547 .insns = {
1548 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1549 offsetof(struct __sk_buff, data)),
1550 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1551 offsetof(struct __sk_buff, data_end)),
1552 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1553 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1554 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1555 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1556 BPF_MOV64_IMM(BPF_REG_0, 0),
1557 BPF_EXIT_INSN(),
1558 },
1559 .result = ACCEPT,
1560 .prog_type = BPF_PROG_TYPE_SK_SKB,
1561 },
1562 {
1563 "direct packet write for SK_SKB",
1564 .insns = {
1565 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1566 offsetof(struct __sk_buff, data)),
1567 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1568 offsetof(struct __sk_buff, data_end)),
1569 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1570 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1571 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1572 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1573 BPF_MOV64_IMM(BPF_REG_0, 0),
1574 BPF_EXIT_INSN(),
1575 },
1576 .result = ACCEPT,
1577 .prog_type = BPF_PROG_TYPE_SK_SKB,
1578 },
1579 {
1580 "overlapping checks for direct packet access SK_SKB",
1581 .insns = {
1582 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1583 offsetof(struct __sk_buff, data)),
1584 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1585 offsetof(struct __sk_buff, data_end)),
1586 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1587 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1588 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1589 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1591 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1592 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1593 BPF_MOV64_IMM(BPF_REG_0, 0),
1594 BPF_EXIT_INSN(),
1595 },
1596 .result = ACCEPT,
1597 .prog_type = BPF_PROG_TYPE_SK_SKB,
1598 },
1599 {
John Fastabend1acc60b2018-03-18 12:57:36 -07001600 "direct packet read for SK_MSG",
1601 .insns = {
1602 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1603 offsetof(struct sk_msg_md, data)),
1604 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1605 offsetof(struct sk_msg_md, data_end)),
1606 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1607 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1608 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1609 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1610 BPF_MOV64_IMM(BPF_REG_0, 0),
1611 BPF_EXIT_INSN(),
1612 },
1613 .result = ACCEPT,
1614 .prog_type = BPF_PROG_TYPE_SK_MSG,
1615 },
1616 {
1617 "direct packet write for SK_MSG",
1618 .insns = {
1619 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1620 offsetof(struct sk_msg_md, data)),
1621 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1622 offsetof(struct sk_msg_md, data_end)),
1623 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1624 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1625 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1626 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1627 BPF_MOV64_IMM(BPF_REG_0, 0),
1628 BPF_EXIT_INSN(),
1629 },
1630 .result = ACCEPT,
1631 .prog_type = BPF_PROG_TYPE_SK_MSG,
1632 },
1633 {
1634 "overlapping checks for direct packet access SK_MSG",
1635 .insns = {
1636 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1637 offsetof(struct sk_msg_md, data)),
1638 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1639 offsetof(struct sk_msg_md, data_end)),
1640 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1641 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1642 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1643 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1644 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1645 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1646 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1647 BPF_MOV64_IMM(BPF_REG_0, 0),
1648 BPF_EXIT_INSN(),
1649 },
1650 .result = ACCEPT,
1651 .prog_type = BPF_PROG_TYPE_SK_MSG,
1652 },
1653 {
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001654 "check skb->mark is not writeable by sockets",
1655 .insns = {
1656 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1657 offsetof(struct __sk_buff, mark)),
1658 BPF_EXIT_INSN(),
1659 },
1660 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001661 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001662 .result = REJECT,
1663 },
1664 {
1665 "check skb->tc_index is not writeable by sockets",
1666 .insns = {
1667 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1668 offsetof(struct __sk_buff, tc_index)),
1669 BPF_EXIT_INSN(),
1670 },
1671 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001672 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001673 .result = REJECT,
1674 },
1675 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001676 "check cb access: byte",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001677 .insns = {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001678 BPF_MOV64_IMM(BPF_REG_0, 0),
1679 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1680 offsetof(struct __sk_buff, cb[0])),
1681 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1682 offsetof(struct __sk_buff, cb[0]) + 1),
1683 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1684 offsetof(struct __sk_buff, cb[0]) + 2),
1685 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1686 offsetof(struct __sk_buff, cb[0]) + 3),
1687 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1688 offsetof(struct __sk_buff, cb[1])),
1689 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1690 offsetof(struct __sk_buff, cb[1]) + 1),
1691 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1692 offsetof(struct __sk_buff, cb[1]) + 2),
1693 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1694 offsetof(struct __sk_buff, cb[1]) + 3),
1695 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1696 offsetof(struct __sk_buff, cb[2])),
1697 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1698 offsetof(struct __sk_buff, cb[2]) + 1),
1699 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1700 offsetof(struct __sk_buff, cb[2]) + 2),
1701 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1702 offsetof(struct __sk_buff, cb[2]) + 3),
1703 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1704 offsetof(struct __sk_buff, cb[3])),
1705 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1706 offsetof(struct __sk_buff, cb[3]) + 1),
1707 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1708 offsetof(struct __sk_buff, cb[3]) + 2),
1709 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1710 offsetof(struct __sk_buff, cb[3]) + 3),
1711 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1712 offsetof(struct __sk_buff, cb[4])),
1713 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1714 offsetof(struct __sk_buff, cb[4]) + 1),
1715 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1716 offsetof(struct __sk_buff, cb[4]) + 2),
1717 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1718 offsetof(struct __sk_buff, cb[4]) + 3),
1719 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1720 offsetof(struct __sk_buff, cb[0])),
1721 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1722 offsetof(struct __sk_buff, cb[0]) + 1),
1723 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1724 offsetof(struct __sk_buff, cb[0]) + 2),
1725 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1726 offsetof(struct __sk_buff, cb[0]) + 3),
1727 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1728 offsetof(struct __sk_buff, cb[1])),
1729 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1730 offsetof(struct __sk_buff, cb[1]) + 1),
1731 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1732 offsetof(struct __sk_buff, cb[1]) + 2),
1733 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1734 offsetof(struct __sk_buff, cb[1]) + 3),
1735 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1736 offsetof(struct __sk_buff, cb[2])),
1737 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1738 offsetof(struct __sk_buff, cb[2]) + 1),
1739 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1740 offsetof(struct __sk_buff, cb[2]) + 2),
1741 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1742 offsetof(struct __sk_buff, cb[2]) + 3),
1743 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1744 offsetof(struct __sk_buff, cb[3])),
1745 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1746 offsetof(struct __sk_buff, cb[3]) + 1),
1747 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1748 offsetof(struct __sk_buff, cb[3]) + 2),
1749 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1750 offsetof(struct __sk_buff, cb[3]) + 3),
1751 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1752 offsetof(struct __sk_buff, cb[4])),
1753 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1754 offsetof(struct __sk_buff, cb[4]) + 1),
1755 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1756 offsetof(struct __sk_buff, cb[4]) + 2),
1757 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1758 offsetof(struct __sk_buff, cb[4]) + 3),
1759 BPF_EXIT_INSN(),
1760 },
1761 .result = ACCEPT,
1762 },
1763 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001764 "__sk_buff->hash, offset 0, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001765 .insns = {
1766 BPF_MOV64_IMM(BPF_REG_0, 0),
1767 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001768 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001769 BPF_EXIT_INSN(),
1770 },
1771 .errstr = "invalid bpf_context access",
1772 .result = REJECT,
1773 },
1774 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001775 "__sk_buff->tc_index, offset 3, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001776 .insns = {
1777 BPF_MOV64_IMM(BPF_REG_0, 0),
1778 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001779 offsetof(struct __sk_buff, tc_index) + 3),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001780 BPF_EXIT_INSN(),
1781 },
1782 .errstr = "invalid bpf_context access",
1783 .result = REJECT,
1784 },
1785 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001786 "check skb->hash byte load permitted",
1787 .insns = {
1788 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001789#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001790 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1791 offsetof(struct __sk_buff, hash)),
1792#else
1793 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1794 offsetof(struct __sk_buff, hash) + 3),
1795#endif
1796 BPF_EXIT_INSN(),
1797 },
1798 .result = ACCEPT,
1799 },
1800 {
1801 "check skb->hash byte load not permitted 1",
1802 .insns = {
1803 BPF_MOV64_IMM(BPF_REG_0, 0),
1804 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1805 offsetof(struct __sk_buff, hash) + 1),
1806 BPF_EXIT_INSN(),
1807 },
1808 .errstr = "invalid bpf_context access",
1809 .result = REJECT,
1810 },
1811 {
1812 "check skb->hash byte load not permitted 2",
1813 .insns = {
1814 BPF_MOV64_IMM(BPF_REG_0, 0),
1815 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1816 offsetof(struct __sk_buff, hash) + 2),
1817 BPF_EXIT_INSN(),
1818 },
1819 .errstr = "invalid bpf_context access",
1820 .result = REJECT,
1821 },
1822 {
1823 "check skb->hash byte load not permitted 3",
1824 .insns = {
1825 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001826#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001827 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1828 offsetof(struct __sk_buff, hash) + 3),
1829#else
1830 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1831 offsetof(struct __sk_buff, hash)),
1832#endif
1833 BPF_EXIT_INSN(),
1834 },
1835 .errstr = "invalid bpf_context access",
1836 .result = REJECT,
1837 },
1838 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001839 "check cb access: byte, wrong type",
1840 .insns = {
1841 BPF_MOV64_IMM(BPF_REG_0, 0),
1842 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001843 offsetof(struct __sk_buff, cb[0])),
1844 BPF_EXIT_INSN(),
1845 },
1846 .errstr = "invalid bpf_context access",
1847 .result = REJECT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001848 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1849 },
1850 {
1851 "check cb access: half",
1852 .insns = {
1853 BPF_MOV64_IMM(BPF_REG_0, 0),
1854 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1855 offsetof(struct __sk_buff, cb[0])),
1856 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1857 offsetof(struct __sk_buff, cb[0]) + 2),
1858 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1859 offsetof(struct __sk_buff, cb[1])),
1860 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1861 offsetof(struct __sk_buff, cb[1]) + 2),
1862 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1863 offsetof(struct __sk_buff, cb[2])),
1864 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1865 offsetof(struct __sk_buff, cb[2]) + 2),
1866 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1867 offsetof(struct __sk_buff, cb[3])),
1868 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1869 offsetof(struct __sk_buff, cb[3]) + 2),
1870 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1871 offsetof(struct __sk_buff, cb[4])),
1872 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1873 offsetof(struct __sk_buff, cb[4]) + 2),
1874 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1875 offsetof(struct __sk_buff, cb[0])),
1876 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1877 offsetof(struct __sk_buff, cb[0]) + 2),
1878 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1879 offsetof(struct __sk_buff, cb[1])),
1880 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1881 offsetof(struct __sk_buff, cb[1]) + 2),
1882 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1883 offsetof(struct __sk_buff, cb[2])),
1884 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1885 offsetof(struct __sk_buff, cb[2]) + 2),
1886 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1887 offsetof(struct __sk_buff, cb[3])),
1888 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1889 offsetof(struct __sk_buff, cb[3]) + 2),
1890 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1891 offsetof(struct __sk_buff, cb[4])),
1892 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1893 offsetof(struct __sk_buff, cb[4]) + 2),
1894 BPF_EXIT_INSN(),
1895 },
1896 .result = ACCEPT,
1897 },
1898 {
1899 "check cb access: half, unaligned",
1900 .insns = {
1901 BPF_MOV64_IMM(BPF_REG_0, 0),
1902 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1903 offsetof(struct __sk_buff, cb[0]) + 1),
1904 BPF_EXIT_INSN(),
1905 },
Edward Creef65b1842017-08-07 15:27:12 +01001906 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001907 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001908 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001909 },
1910 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001911 "check __sk_buff->hash, offset 0, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001912 .insns = {
1913 BPF_MOV64_IMM(BPF_REG_0, 0),
1914 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001915 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001916 BPF_EXIT_INSN(),
1917 },
1918 .errstr = "invalid bpf_context access",
1919 .result = REJECT,
1920 },
1921 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001922 "check __sk_buff->tc_index, offset 2, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001923 .insns = {
1924 BPF_MOV64_IMM(BPF_REG_0, 0),
1925 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001926 offsetof(struct __sk_buff, tc_index) + 2),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001927 BPF_EXIT_INSN(),
1928 },
1929 .errstr = "invalid bpf_context access",
1930 .result = REJECT,
1931 },
1932 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001933 "check skb->hash half load permitted",
1934 .insns = {
1935 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001936#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001937 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1938 offsetof(struct __sk_buff, hash)),
1939#else
1940 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1941 offsetof(struct __sk_buff, hash) + 2),
1942#endif
1943 BPF_EXIT_INSN(),
1944 },
1945 .result = ACCEPT,
1946 },
1947 {
1948 "check skb->hash half load not permitted",
1949 .insns = {
1950 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001951#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001952 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1953 offsetof(struct __sk_buff, hash) + 2),
1954#else
1955 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1956 offsetof(struct __sk_buff, hash)),
1957#endif
1958 BPF_EXIT_INSN(),
1959 },
1960 .errstr = "invalid bpf_context access",
1961 .result = REJECT,
1962 },
1963 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001964 "check cb access: half, wrong type",
1965 .insns = {
1966 BPF_MOV64_IMM(BPF_REG_0, 0),
1967 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1968 offsetof(struct __sk_buff, cb[0])),
1969 BPF_EXIT_INSN(),
1970 },
1971 .errstr = "invalid bpf_context access",
1972 .result = REJECT,
1973 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1974 },
1975 {
1976 "check cb access: word",
1977 .insns = {
1978 BPF_MOV64_IMM(BPF_REG_0, 0),
1979 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1980 offsetof(struct __sk_buff, cb[0])),
1981 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1982 offsetof(struct __sk_buff, cb[1])),
1983 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1984 offsetof(struct __sk_buff, cb[2])),
1985 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1986 offsetof(struct __sk_buff, cb[3])),
1987 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1988 offsetof(struct __sk_buff, cb[4])),
1989 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1990 offsetof(struct __sk_buff, cb[0])),
1991 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1992 offsetof(struct __sk_buff, cb[1])),
1993 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1994 offsetof(struct __sk_buff, cb[2])),
1995 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1996 offsetof(struct __sk_buff, cb[3])),
1997 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1998 offsetof(struct __sk_buff, cb[4])),
1999 BPF_EXIT_INSN(),
2000 },
2001 .result = ACCEPT,
2002 },
2003 {
2004 "check cb access: word, unaligned 1",
2005 .insns = {
2006 BPF_MOV64_IMM(BPF_REG_0, 0),
2007 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2008 offsetof(struct __sk_buff, cb[0]) + 2),
2009 BPF_EXIT_INSN(),
2010 },
Edward Creef65b1842017-08-07 15:27:12 +01002011 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002012 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002013 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002014 },
2015 {
2016 "check cb access: word, unaligned 2",
2017 .insns = {
2018 BPF_MOV64_IMM(BPF_REG_0, 0),
2019 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2020 offsetof(struct __sk_buff, cb[4]) + 1),
2021 BPF_EXIT_INSN(),
2022 },
Edward Creef65b1842017-08-07 15:27:12 +01002023 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002024 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002025 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002026 },
2027 {
2028 "check cb access: word, unaligned 3",
2029 .insns = {
2030 BPF_MOV64_IMM(BPF_REG_0, 0),
2031 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2032 offsetof(struct __sk_buff, cb[4]) + 2),
2033 BPF_EXIT_INSN(),
2034 },
Edward Creef65b1842017-08-07 15:27:12 +01002035 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002036 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002037 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002038 },
2039 {
2040 "check cb access: word, unaligned 4",
2041 .insns = {
2042 BPF_MOV64_IMM(BPF_REG_0, 0),
2043 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2044 offsetof(struct __sk_buff, cb[4]) + 3),
2045 BPF_EXIT_INSN(),
2046 },
Edward Creef65b1842017-08-07 15:27:12 +01002047 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002048 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002049 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002050 },
2051 {
2052 "check cb access: double",
2053 .insns = {
2054 BPF_MOV64_IMM(BPF_REG_0, 0),
2055 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2056 offsetof(struct __sk_buff, cb[0])),
2057 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2058 offsetof(struct __sk_buff, cb[2])),
2059 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2060 offsetof(struct __sk_buff, cb[0])),
2061 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2062 offsetof(struct __sk_buff, cb[2])),
2063 BPF_EXIT_INSN(),
2064 },
2065 .result = ACCEPT,
2066 },
2067 {
2068 "check cb access: double, unaligned 1",
2069 .insns = {
2070 BPF_MOV64_IMM(BPF_REG_0, 0),
2071 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2072 offsetof(struct __sk_buff, cb[1])),
2073 BPF_EXIT_INSN(),
2074 },
Edward Creef65b1842017-08-07 15:27:12 +01002075 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002076 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002077 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002078 },
2079 {
2080 "check cb access: double, unaligned 2",
2081 .insns = {
2082 BPF_MOV64_IMM(BPF_REG_0, 0),
2083 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2084 offsetof(struct __sk_buff, cb[3])),
2085 BPF_EXIT_INSN(),
2086 },
Edward Creef65b1842017-08-07 15:27:12 +01002087 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002088 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002089 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002090 },
2091 {
2092 "check cb access: double, oob 1",
2093 .insns = {
2094 BPF_MOV64_IMM(BPF_REG_0, 0),
2095 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2096 offsetof(struct __sk_buff, cb[4])),
2097 BPF_EXIT_INSN(),
2098 },
2099 .errstr = "invalid bpf_context access",
2100 .result = REJECT,
2101 },
2102 {
2103 "check cb access: double, oob 2",
2104 .insns = {
2105 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002106 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2107 offsetof(struct __sk_buff, cb[4])),
2108 BPF_EXIT_INSN(),
2109 },
2110 .errstr = "invalid bpf_context access",
2111 .result = REJECT,
2112 },
2113 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002114 "check __sk_buff->ifindex dw store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002115 .insns = {
2116 BPF_MOV64_IMM(BPF_REG_0, 0),
Yonghong Song31fd8582017-06-13 15:52:13 -07002117 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2118 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002119 BPF_EXIT_INSN(),
2120 },
2121 .errstr = "invalid bpf_context access",
2122 .result = REJECT,
2123 },
2124 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002125 "check __sk_buff->ifindex dw load not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002126 .insns = {
2127 BPF_MOV64_IMM(BPF_REG_0, 0),
2128 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
Yonghong Song31fd8582017-06-13 15:52:13 -07002129 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002130 BPF_EXIT_INSN(),
2131 },
2132 .errstr = "invalid bpf_context access",
2133 .result = REJECT,
2134 },
2135 {
2136 "check cb access: double, wrong type",
2137 .insns = {
2138 BPF_MOV64_IMM(BPF_REG_0, 0),
2139 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2140 offsetof(struct __sk_buff, cb[0])),
2141 BPF_EXIT_INSN(),
2142 },
2143 .errstr = "invalid bpf_context access",
2144 .result = REJECT,
2145 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002146 },
2147 {
2148 "check out of range skb->cb access",
2149 .insns = {
2150 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002151 offsetof(struct __sk_buff, cb[0]) + 256),
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002152 BPF_EXIT_INSN(),
2153 },
2154 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002155 .errstr_unpriv = "",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002156 .result = REJECT,
2157 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
2158 },
2159 {
2160 "write skb fields from socket prog",
2161 .insns = {
2162 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2163 offsetof(struct __sk_buff, cb[4])),
2164 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2165 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2166 offsetof(struct __sk_buff, mark)),
2167 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2168 offsetof(struct __sk_buff, tc_index)),
2169 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2170 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2171 offsetof(struct __sk_buff, cb[0])),
2172 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2173 offsetof(struct __sk_buff, cb[2])),
2174 BPF_EXIT_INSN(),
2175 },
2176 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002177 .errstr_unpriv = "R1 leaks addr",
2178 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002179 },
2180 {
2181 "write skb fields from tc_cls_act prog",
2182 .insns = {
2183 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2184 offsetof(struct __sk_buff, cb[0])),
2185 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2186 offsetof(struct __sk_buff, mark)),
2187 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2188 offsetof(struct __sk_buff, tc_index)),
2189 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2190 offsetof(struct __sk_buff, tc_index)),
2191 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2192 offsetof(struct __sk_buff, cb[3])),
2193 BPF_EXIT_INSN(),
2194 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002195 .errstr_unpriv = "",
2196 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002197 .result = ACCEPT,
2198 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2199 },
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002200 {
2201 "PTR_TO_STACK store/load",
2202 .insns = {
2203 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2204 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2205 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2206 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2207 BPF_EXIT_INSN(),
2208 },
2209 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002210 .retval = 0xfaceb00c,
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002211 },
2212 {
2213 "PTR_TO_STACK store/load - bad alignment on off",
2214 .insns = {
2215 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2216 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2217 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2218 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2219 BPF_EXIT_INSN(),
2220 },
2221 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002222 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002223 },
2224 {
2225 "PTR_TO_STACK store/load - bad alignment on reg",
2226 .insns = {
2227 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2228 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2229 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2230 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2231 BPF_EXIT_INSN(),
2232 },
2233 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002234 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002235 },
2236 {
2237 "PTR_TO_STACK store/load - out of bounds low",
2238 .insns = {
2239 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2240 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2241 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2242 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2243 BPF_EXIT_INSN(),
2244 },
2245 .result = REJECT,
2246 .errstr = "invalid stack off=-79992 size=8",
2247 },
2248 {
2249 "PTR_TO_STACK store/load - out of bounds high",
2250 .insns = {
2251 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2252 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2253 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2254 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2255 BPF_EXIT_INSN(),
2256 },
2257 .result = REJECT,
2258 .errstr = "invalid stack off=0 size=8",
2259 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002260 {
2261 "unpriv: return pointer",
2262 .insns = {
2263 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2264 BPF_EXIT_INSN(),
2265 },
2266 .result = ACCEPT,
2267 .result_unpriv = REJECT,
2268 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002269 .retval = POINTER_VALUE,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002270 },
2271 {
2272 "unpriv: add const to pointer",
2273 .insns = {
2274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2275 BPF_MOV64_IMM(BPF_REG_0, 0),
2276 BPF_EXIT_INSN(),
2277 },
2278 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002279 },
2280 {
2281 "unpriv: add pointer to pointer",
2282 .insns = {
2283 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2284 BPF_MOV64_IMM(BPF_REG_0, 0),
2285 BPF_EXIT_INSN(),
2286 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08002287 .result = REJECT,
2288 .errstr = "R1 pointer += pointer",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002289 },
2290 {
2291 "unpriv: neg pointer",
2292 .insns = {
2293 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2294 BPF_MOV64_IMM(BPF_REG_0, 0),
2295 BPF_EXIT_INSN(),
2296 },
2297 .result = ACCEPT,
2298 .result_unpriv = REJECT,
2299 .errstr_unpriv = "R1 pointer arithmetic",
2300 },
2301 {
2302 "unpriv: cmp pointer with const",
2303 .insns = {
2304 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2305 BPF_MOV64_IMM(BPF_REG_0, 0),
2306 BPF_EXIT_INSN(),
2307 },
2308 .result = ACCEPT,
2309 .result_unpriv = REJECT,
2310 .errstr_unpriv = "R1 pointer comparison",
2311 },
2312 {
2313 "unpriv: cmp pointer with pointer",
2314 .insns = {
2315 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2316 BPF_MOV64_IMM(BPF_REG_0, 0),
2317 BPF_EXIT_INSN(),
2318 },
2319 .result = ACCEPT,
2320 .result_unpriv = REJECT,
2321 .errstr_unpriv = "R10 pointer comparison",
2322 },
2323 {
2324 "unpriv: check that printk is disallowed",
2325 .insns = {
2326 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2327 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2328 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2329 BPF_MOV64_IMM(BPF_REG_2, 8),
2330 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002331 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2332 BPF_FUNC_trace_printk),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002333 BPF_MOV64_IMM(BPF_REG_0, 0),
2334 BPF_EXIT_INSN(),
2335 },
Daniel Borkmann0eb69842016-12-15 01:39:10 +01002336 .errstr_unpriv = "unknown func bpf_trace_printk#6",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002337 .result_unpriv = REJECT,
2338 .result = ACCEPT,
2339 },
2340 {
2341 "unpriv: pass pointer to helper function",
2342 .insns = {
2343 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2344 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2345 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2346 BPF_LD_MAP_FD(BPF_REG_1, 0),
2347 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2348 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002349 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2350 BPF_FUNC_map_update_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002351 BPF_MOV64_IMM(BPF_REG_0, 0),
2352 BPF_EXIT_INSN(),
2353 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002354 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002355 .errstr_unpriv = "R4 leaks addr",
2356 .result_unpriv = REJECT,
2357 .result = ACCEPT,
2358 },
2359 {
2360 "unpriv: indirectly pass pointer on stack to helper function",
2361 .insns = {
2362 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2363 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2364 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2365 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002366 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2367 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002368 BPF_MOV64_IMM(BPF_REG_0, 0),
2369 BPF_EXIT_INSN(),
2370 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002371 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002372 .errstr = "invalid indirect read from stack off -8+0 size 8",
2373 .result = REJECT,
2374 },
2375 {
2376 "unpriv: mangle pointer on stack 1",
2377 .insns = {
2378 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2379 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2380 BPF_MOV64_IMM(BPF_REG_0, 0),
2381 BPF_EXIT_INSN(),
2382 },
2383 .errstr_unpriv = "attempt to corrupt spilled",
2384 .result_unpriv = REJECT,
2385 .result = ACCEPT,
2386 },
2387 {
2388 "unpriv: mangle pointer on stack 2",
2389 .insns = {
2390 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2391 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2392 BPF_MOV64_IMM(BPF_REG_0, 0),
2393 BPF_EXIT_INSN(),
2394 },
2395 .errstr_unpriv = "attempt to corrupt spilled",
2396 .result_unpriv = REJECT,
2397 .result = ACCEPT,
2398 },
2399 {
2400 "unpriv: read pointer from stack in small chunks",
2401 .insns = {
2402 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2403 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2404 BPF_MOV64_IMM(BPF_REG_0, 0),
2405 BPF_EXIT_INSN(),
2406 },
2407 .errstr = "invalid size",
2408 .result = REJECT,
2409 },
2410 {
2411 "unpriv: write pointer into ctx",
2412 .insns = {
2413 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2414 BPF_MOV64_IMM(BPF_REG_0, 0),
2415 BPF_EXIT_INSN(),
2416 },
2417 .errstr_unpriv = "R1 leaks addr",
2418 .result_unpriv = REJECT,
2419 .errstr = "invalid bpf_context access",
2420 .result = REJECT,
2421 },
2422 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002423 "unpriv: spill/fill of ctx",
2424 .insns = {
2425 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2426 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2427 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2428 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2429 BPF_MOV64_IMM(BPF_REG_0, 0),
2430 BPF_EXIT_INSN(),
2431 },
2432 .result = ACCEPT,
2433 },
2434 {
2435 "unpriv: spill/fill of ctx 2",
2436 .insns = {
2437 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2438 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2439 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2440 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002441 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2442 BPF_FUNC_get_hash_recalc),
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002443 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002444 BPF_EXIT_INSN(),
2445 },
2446 .result = ACCEPT,
2447 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2448 },
2449 {
2450 "unpriv: spill/fill of ctx 3",
2451 .insns = {
2452 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2454 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2455 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2456 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002457 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2458 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002459 BPF_EXIT_INSN(),
2460 },
2461 .result = REJECT,
2462 .errstr = "R1 type=fp expected=ctx",
2463 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2464 },
2465 {
2466 "unpriv: spill/fill of ctx 4",
2467 .insns = {
2468 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2469 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2470 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2471 BPF_MOV64_IMM(BPF_REG_0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002472 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2473 BPF_REG_0, -8, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002474 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002475 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2476 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002477 BPF_EXIT_INSN(),
2478 },
2479 .result = REJECT,
2480 .errstr = "R1 type=inv expected=ctx",
2481 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2482 },
2483 {
2484 "unpriv: spill/fill of different pointers stx",
2485 .insns = {
2486 BPF_MOV64_IMM(BPF_REG_3, 42),
2487 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2488 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2489 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2490 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2491 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2492 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2493 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2494 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2495 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2496 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2497 offsetof(struct __sk_buff, mark)),
2498 BPF_MOV64_IMM(BPF_REG_0, 0),
2499 BPF_EXIT_INSN(),
2500 },
2501 .result = REJECT,
2502 .errstr = "same insn cannot be used with different pointers",
2503 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2504 },
2505 {
2506 "unpriv: spill/fill of different pointers ldx",
2507 .insns = {
2508 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2509 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2510 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2511 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2512 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2513 -(__s32)offsetof(struct bpf_perf_event_data,
2514 sample_period) - 8),
2515 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2516 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2517 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2518 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2519 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2520 offsetof(struct bpf_perf_event_data,
2521 sample_period)),
2522 BPF_MOV64_IMM(BPF_REG_0, 0),
2523 BPF_EXIT_INSN(),
2524 },
2525 .result = REJECT,
2526 .errstr = "same insn cannot be used with different pointers",
2527 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
2528 },
2529 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002530 "unpriv: write pointer into map elem value",
2531 .insns = {
2532 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2533 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2534 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2535 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002536 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2537 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002538 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2539 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2540 BPF_EXIT_INSN(),
2541 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002542 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002543 .errstr_unpriv = "R0 leaks addr",
2544 .result_unpriv = REJECT,
2545 .result = ACCEPT,
2546 },
2547 {
2548 "unpriv: partial copy of pointer",
2549 .insns = {
2550 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2551 BPF_MOV64_IMM(BPF_REG_0, 0),
2552 BPF_EXIT_INSN(),
2553 },
2554 .errstr_unpriv = "R10 partial copy",
2555 .result_unpriv = REJECT,
2556 .result = ACCEPT,
2557 },
2558 {
2559 "unpriv: pass pointer to tail_call",
2560 .insns = {
2561 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2562 BPF_LD_MAP_FD(BPF_REG_2, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002563 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2564 BPF_FUNC_tail_call),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002565 BPF_MOV64_IMM(BPF_REG_0, 0),
2566 BPF_EXIT_INSN(),
2567 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002568 .fixup_prog = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002569 .errstr_unpriv = "R3 leaks addr into helper",
2570 .result_unpriv = REJECT,
2571 .result = ACCEPT,
2572 },
2573 {
2574 "unpriv: cmp map pointer with zero",
2575 .insns = {
2576 BPF_MOV64_IMM(BPF_REG_1, 0),
2577 BPF_LD_MAP_FD(BPF_REG_1, 0),
2578 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2579 BPF_MOV64_IMM(BPF_REG_0, 0),
2580 BPF_EXIT_INSN(),
2581 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002582 .fixup_map1 = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002583 .errstr_unpriv = "R1 pointer comparison",
2584 .result_unpriv = REJECT,
2585 .result = ACCEPT,
2586 },
2587 {
2588 "unpriv: write into frame pointer",
2589 .insns = {
2590 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2591 BPF_MOV64_IMM(BPF_REG_0, 0),
2592 BPF_EXIT_INSN(),
2593 },
2594 .errstr = "frame pointer is read only",
2595 .result = REJECT,
2596 },
2597 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002598 "unpriv: spill/fill frame pointer",
2599 .insns = {
2600 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2601 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2602 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2603 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2604 BPF_MOV64_IMM(BPF_REG_0, 0),
2605 BPF_EXIT_INSN(),
2606 },
2607 .errstr = "frame pointer is read only",
2608 .result = REJECT,
2609 },
2610 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002611 "unpriv: cmp of frame pointer",
2612 .insns = {
2613 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2614 BPF_MOV64_IMM(BPF_REG_0, 0),
2615 BPF_EXIT_INSN(),
2616 },
2617 .errstr_unpriv = "R10 pointer comparison",
2618 .result_unpriv = REJECT,
2619 .result = ACCEPT,
2620 },
2621 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02002622 "unpriv: adding of fp",
2623 .insns = {
2624 BPF_MOV64_IMM(BPF_REG_0, 0),
2625 BPF_MOV64_IMM(BPF_REG_1, 0),
2626 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2627 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2628 BPF_EXIT_INSN(),
2629 },
Edward Creef65b1842017-08-07 15:27:12 +01002630 .result = ACCEPT,
Daniel Borkmann728a8532017-04-27 01:39:32 +02002631 },
2632 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002633 "unpriv: cmp of stack pointer",
2634 .insns = {
2635 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2636 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2637 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2638 BPF_MOV64_IMM(BPF_REG_0, 0),
2639 BPF_EXIT_INSN(),
2640 },
2641 .errstr_unpriv = "R2 pointer comparison",
2642 .result_unpriv = REJECT,
2643 .result = ACCEPT,
2644 },
2645 {
Daniel Borkmannb33eb732018-02-26 22:34:33 +01002646 "runtime/jit: tail_call within bounds, prog once",
2647 .insns = {
2648 BPF_MOV64_IMM(BPF_REG_3, 0),
2649 BPF_LD_MAP_FD(BPF_REG_2, 0),
2650 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2651 BPF_FUNC_tail_call),
2652 BPF_MOV64_IMM(BPF_REG_0, 1),
2653 BPF_EXIT_INSN(),
2654 },
2655 .fixup_prog = { 1 },
2656 .result = ACCEPT,
2657 .retval = 42,
2658 },
2659 {
2660 "runtime/jit: tail_call within bounds, prog loop",
2661 .insns = {
2662 BPF_MOV64_IMM(BPF_REG_3, 1),
2663 BPF_LD_MAP_FD(BPF_REG_2, 0),
2664 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2665 BPF_FUNC_tail_call),
2666 BPF_MOV64_IMM(BPF_REG_0, 1),
2667 BPF_EXIT_INSN(),
2668 },
2669 .fixup_prog = { 1 },
2670 .result = ACCEPT,
2671 .retval = 41,
2672 },
2673 {
2674 "runtime/jit: tail_call within bounds, no prog",
2675 .insns = {
2676 BPF_MOV64_IMM(BPF_REG_3, 2),
2677 BPF_LD_MAP_FD(BPF_REG_2, 0),
2678 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2679 BPF_FUNC_tail_call),
2680 BPF_MOV64_IMM(BPF_REG_0, 1),
2681 BPF_EXIT_INSN(),
2682 },
2683 .fixup_prog = { 1 },
2684 .result = ACCEPT,
2685 .retval = 1,
2686 },
2687 {
2688 "runtime/jit: tail_call out of bounds",
2689 .insns = {
2690 BPF_MOV64_IMM(BPF_REG_3, 256),
2691 BPF_LD_MAP_FD(BPF_REG_2, 0),
2692 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2693 BPF_FUNC_tail_call),
2694 BPF_MOV64_IMM(BPF_REG_0, 2),
2695 BPF_EXIT_INSN(),
2696 },
2697 .fixup_prog = { 1 },
2698 .result = ACCEPT,
2699 .retval = 2,
2700 },
2701 {
Daniel Borkmann16338a92018-02-23 01:03:43 +01002702 "runtime/jit: pass negative index to tail_call",
2703 .insns = {
2704 BPF_MOV64_IMM(BPF_REG_3, -1),
2705 BPF_LD_MAP_FD(BPF_REG_2, 0),
2706 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2707 BPF_FUNC_tail_call),
Daniel Borkmannb33eb732018-02-26 22:34:33 +01002708 BPF_MOV64_IMM(BPF_REG_0, 2),
Daniel Borkmann16338a92018-02-23 01:03:43 +01002709 BPF_EXIT_INSN(),
2710 },
2711 .fixup_prog = { 1 },
2712 .result = ACCEPT,
Daniel Borkmannb33eb732018-02-26 22:34:33 +01002713 .retval = 2,
Daniel Borkmann16338a92018-02-23 01:03:43 +01002714 },
2715 {
2716 "runtime/jit: pass > 32bit index to tail_call",
2717 .insns = {
2718 BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
2719 BPF_LD_MAP_FD(BPF_REG_2, 0),
2720 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2721 BPF_FUNC_tail_call),
Daniel Borkmannb33eb732018-02-26 22:34:33 +01002722 BPF_MOV64_IMM(BPF_REG_0, 2),
Daniel Borkmann16338a92018-02-23 01:03:43 +01002723 BPF_EXIT_INSN(),
2724 },
2725 .fixup_prog = { 2 },
2726 .result = ACCEPT,
Daniel Borkmannb33eb732018-02-26 22:34:33 +01002727 .retval = 42,
Daniel Borkmann16338a92018-02-23 01:03:43 +01002728 },
2729 {
Yonghong Song332270f2017-04-29 22:52:42 -07002730 "stack pointer arithmetic",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002731 .insns = {
Yonghong Song332270f2017-04-29 22:52:42 -07002732 BPF_MOV64_IMM(BPF_REG_1, 4),
2733 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2734 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
2735 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2736 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2737 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2738 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
2739 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2740 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2741 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2742 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002743 BPF_MOV64_IMM(BPF_REG_0, 0),
2744 BPF_EXIT_INSN(),
2745 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002746 .result = ACCEPT,
2747 },
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002748 {
2749 "raw_stack: no skb_load_bytes",
2750 .insns = {
2751 BPF_MOV64_IMM(BPF_REG_2, 4),
2752 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2753 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2754 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2755 BPF_MOV64_IMM(BPF_REG_4, 8),
2756 /* Call to skb_load_bytes() omitted. */
2757 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2758 BPF_EXIT_INSN(),
2759 },
2760 .result = REJECT,
2761 .errstr = "invalid read from stack off -8+0 size 8",
2762 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2763 },
2764 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002765 "raw_stack: skb_load_bytes, negative len",
2766 .insns = {
2767 BPF_MOV64_IMM(BPF_REG_2, 4),
2768 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2769 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2770 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2771 BPF_MOV64_IMM(BPF_REG_4, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002772 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2773 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002774 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2775 BPF_EXIT_INSN(),
2776 },
2777 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002778 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002779 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2780 },
2781 {
2782 "raw_stack: skb_load_bytes, negative len 2",
2783 .insns = {
2784 BPF_MOV64_IMM(BPF_REG_2, 4),
2785 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2786 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2787 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2788 BPF_MOV64_IMM(BPF_REG_4, ~0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002789 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2790 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002791 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2792 BPF_EXIT_INSN(),
2793 },
2794 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002795 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002796 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2797 },
2798 {
2799 "raw_stack: skb_load_bytes, zero len",
2800 .insns = {
2801 BPF_MOV64_IMM(BPF_REG_2, 4),
2802 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2803 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2804 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2805 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002806 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2807 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002808 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2809 BPF_EXIT_INSN(),
2810 },
2811 .result = REJECT,
2812 .errstr = "invalid stack type R3",
2813 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2814 },
2815 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002816 "raw_stack: skb_load_bytes, no init",
2817 .insns = {
2818 BPF_MOV64_IMM(BPF_REG_2, 4),
2819 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2820 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2821 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2822 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002823 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2824 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002825 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2826 BPF_EXIT_INSN(),
2827 },
2828 .result = ACCEPT,
2829 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2830 },
2831 {
2832 "raw_stack: skb_load_bytes, init",
2833 .insns = {
2834 BPF_MOV64_IMM(BPF_REG_2, 4),
2835 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2836 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2837 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
2838 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2839 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002840 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2841 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002842 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2843 BPF_EXIT_INSN(),
2844 },
2845 .result = ACCEPT,
2846 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2847 },
2848 {
2849 "raw_stack: skb_load_bytes, spilled regs around bounds",
2850 .insns = {
2851 BPF_MOV64_IMM(BPF_REG_2, 4),
2852 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002854 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2855 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002856 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2857 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002858 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2859 BPF_FUNC_skb_load_bytes),
2860 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2861 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002862 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2863 offsetof(struct __sk_buff, mark)),
2864 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2865 offsetof(struct __sk_buff, priority)),
2866 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2867 BPF_EXIT_INSN(),
2868 },
2869 .result = ACCEPT,
2870 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2871 },
2872 {
2873 "raw_stack: skb_load_bytes, spilled regs corruption",
2874 .insns = {
2875 BPF_MOV64_IMM(BPF_REG_2, 4),
2876 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2877 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002878 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002879 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2880 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002881 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2882 BPF_FUNC_skb_load_bytes),
2883 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002884 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2885 offsetof(struct __sk_buff, mark)),
2886 BPF_EXIT_INSN(),
2887 },
2888 .result = REJECT,
2889 .errstr = "R0 invalid mem access 'inv'",
2890 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2891 },
2892 {
2893 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2894 .insns = {
2895 BPF_MOV64_IMM(BPF_REG_2, 4),
2896 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2897 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002898 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2899 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2900 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002901 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2902 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002903 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2904 BPF_FUNC_skb_load_bytes),
2905 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2906 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2907 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002908 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2909 offsetof(struct __sk_buff, mark)),
2910 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2911 offsetof(struct __sk_buff, priority)),
2912 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2913 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2914 offsetof(struct __sk_buff, pkt_type)),
2915 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2916 BPF_EXIT_INSN(),
2917 },
2918 .result = REJECT,
2919 .errstr = "R3 invalid mem access 'inv'",
2920 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2921 },
2922 {
2923 "raw_stack: skb_load_bytes, spilled regs + data",
2924 .insns = {
2925 BPF_MOV64_IMM(BPF_REG_2, 4),
2926 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2927 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002928 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2929 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2930 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002931 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2932 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002933 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2934 BPF_FUNC_skb_load_bytes),
2935 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2936 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2937 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002938 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2939 offsetof(struct __sk_buff, mark)),
2940 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2941 offsetof(struct __sk_buff, priority)),
2942 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2943 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2944 BPF_EXIT_INSN(),
2945 },
2946 .result = ACCEPT,
2947 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2948 },
2949 {
2950 "raw_stack: skb_load_bytes, invalid access 1",
2951 .insns = {
2952 BPF_MOV64_IMM(BPF_REG_2, 4),
2953 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2954 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2955 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2956 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002957 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2958 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002959 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2960 BPF_EXIT_INSN(),
2961 },
2962 .result = REJECT,
2963 .errstr = "invalid stack type R3 off=-513 access_size=8",
2964 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2965 },
2966 {
2967 "raw_stack: skb_load_bytes, invalid access 2",
2968 .insns = {
2969 BPF_MOV64_IMM(BPF_REG_2, 4),
2970 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2971 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2972 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2973 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002974 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2975 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002976 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2977 BPF_EXIT_INSN(),
2978 },
2979 .result = REJECT,
2980 .errstr = "invalid stack type R3 off=-1 access_size=8",
2981 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2982 },
2983 {
2984 "raw_stack: skb_load_bytes, invalid access 3",
2985 .insns = {
2986 BPF_MOV64_IMM(BPF_REG_2, 4),
2987 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2988 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2989 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2990 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002991 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2992 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002993 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2994 BPF_EXIT_INSN(),
2995 },
2996 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002997 .errstr = "R4 min value is negative",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002998 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2999 },
3000 {
3001 "raw_stack: skb_load_bytes, invalid access 4",
3002 .insns = {
3003 BPF_MOV64_IMM(BPF_REG_2, 4),
3004 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3006 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3007 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003008 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3009 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003010 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3011 BPF_EXIT_INSN(),
3012 },
3013 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003014 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003015 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3016 },
3017 {
3018 "raw_stack: skb_load_bytes, invalid access 5",
3019 .insns = {
3020 BPF_MOV64_IMM(BPF_REG_2, 4),
3021 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3022 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3023 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3024 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003025 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3026 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003027 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3028 BPF_EXIT_INSN(),
3029 },
3030 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003031 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003032 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3033 },
3034 {
3035 "raw_stack: skb_load_bytes, invalid access 6",
3036 .insns = {
3037 BPF_MOV64_IMM(BPF_REG_2, 4),
3038 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3039 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3040 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3041 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003042 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3043 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003044 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3045 BPF_EXIT_INSN(),
3046 },
3047 .result = REJECT,
3048 .errstr = "invalid stack type R3 off=-512 access_size=0",
3049 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3050 },
3051 {
3052 "raw_stack: skb_load_bytes, large access",
3053 .insns = {
3054 BPF_MOV64_IMM(BPF_REG_2, 4),
3055 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3056 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3057 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3058 BPF_MOV64_IMM(BPF_REG_4, 512),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003059 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3060 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003061 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3062 BPF_EXIT_INSN(),
3063 },
3064 .result = ACCEPT,
3065 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3066 },
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003067 {
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01003068 "context stores via ST",
3069 .insns = {
3070 BPF_MOV64_IMM(BPF_REG_0, 0),
3071 BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
3072 BPF_EXIT_INSN(),
3073 },
3074 .errstr = "BPF_ST stores into R1 context is not allowed",
3075 .result = REJECT,
3076 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3077 },
3078 {
3079 "context stores via XADD",
3080 .insns = {
3081 BPF_MOV64_IMM(BPF_REG_0, 0),
3082 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
3083 BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
3084 BPF_EXIT_INSN(),
3085 },
3086 .errstr = "BPF_XADD stores into R1 context is not allowed",
3087 .result = REJECT,
3088 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3089 },
3090 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003091 "direct packet access: test1",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003092 .insns = {
3093 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3094 offsetof(struct __sk_buff, data)),
3095 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3096 offsetof(struct __sk_buff, data_end)),
3097 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3098 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3099 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3100 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3101 BPF_MOV64_IMM(BPF_REG_0, 0),
3102 BPF_EXIT_INSN(),
3103 },
3104 .result = ACCEPT,
3105 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3106 },
3107 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003108 "direct packet access: test2",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003109 .insns = {
3110 BPF_MOV64_IMM(BPF_REG_0, 1),
3111 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
3112 offsetof(struct __sk_buff, data_end)),
3113 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3114 offsetof(struct __sk_buff, data)),
3115 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3116 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3117 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3118 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3119 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3120 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3121 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3122 offsetof(struct __sk_buff, data)),
3123 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08003124 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3125 offsetof(struct __sk_buff, len)),
Edward Cree1f9ab382017-08-07 15:29:11 +01003126 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3127 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003128 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3129 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3130 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3131 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3132 offsetof(struct __sk_buff, data_end)),
3133 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3134 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3135 BPF_MOV64_IMM(BPF_REG_0, 0),
3136 BPF_EXIT_INSN(),
3137 },
3138 .result = ACCEPT,
3139 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3140 },
3141 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003142 "direct packet access: test3",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003143 .insns = {
3144 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3145 offsetof(struct __sk_buff, data)),
3146 BPF_MOV64_IMM(BPF_REG_0, 0),
3147 BPF_EXIT_INSN(),
3148 },
3149 .errstr = "invalid bpf_context access off=76",
3150 .result = REJECT,
3151 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3152 },
3153 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003154 "direct packet access: test4 (write)",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003155 .insns = {
3156 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3157 offsetof(struct __sk_buff, data)),
3158 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3159 offsetof(struct __sk_buff, data_end)),
3160 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3161 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3162 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3163 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3164 BPF_MOV64_IMM(BPF_REG_0, 0),
3165 BPF_EXIT_INSN(),
3166 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003167 .result = ACCEPT,
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003168 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3169 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003170 {
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02003171 "direct packet access: test5 (pkt_end >= reg, good access)",
3172 .insns = {
3173 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3174 offsetof(struct __sk_buff, data)),
3175 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3176 offsetof(struct __sk_buff, data_end)),
3177 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3178 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3179 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3180 BPF_MOV64_IMM(BPF_REG_0, 1),
3181 BPF_EXIT_INSN(),
3182 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3183 BPF_MOV64_IMM(BPF_REG_0, 0),
3184 BPF_EXIT_INSN(),
3185 },
3186 .result = ACCEPT,
3187 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3188 },
3189 {
3190 "direct packet access: test6 (pkt_end >= reg, bad access)",
3191 .insns = {
3192 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3193 offsetof(struct __sk_buff, data)),
3194 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3195 offsetof(struct __sk_buff, data_end)),
3196 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3197 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3198 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3199 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3200 BPF_MOV64_IMM(BPF_REG_0, 1),
3201 BPF_EXIT_INSN(),
3202 BPF_MOV64_IMM(BPF_REG_0, 0),
3203 BPF_EXIT_INSN(),
3204 },
3205 .errstr = "invalid access to packet",
3206 .result = REJECT,
3207 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3208 },
3209 {
3210 "direct packet access: test7 (pkt_end >= reg, both accesses)",
3211 .insns = {
3212 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3213 offsetof(struct __sk_buff, data)),
3214 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3215 offsetof(struct __sk_buff, data_end)),
3216 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3217 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3218 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3219 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3220 BPF_MOV64_IMM(BPF_REG_0, 1),
3221 BPF_EXIT_INSN(),
3222 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3223 BPF_MOV64_IMM(BPF_REG_0, 0),
3224 BPF_EXIT_INSN(),
3225 },
3226 .errstr = "invalid access to packet",
3227 .result = REJECT,
3228 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3229 },
3230 {
3231 "direct packet access: test8 (double test, variant 1)",
3232 .insns = {
3233 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3234 offsetof(struct __sk_buff, data)),
3235 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3236 offsetof(struct __sk_buff, data_end)),
3237 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3239 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3240 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3241 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3242 BPF_MOV64_IMM(BPF_REG_0, 1),
3243 BPF_EXIT_INSN(),
3244 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3245 BPF_MOV64_IMM(BPF_REG_0, 0),
3246 BPF_EXIT_INSN(),
3247 },
3248 .result = ACCEPT,
3249 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3250 },
3251 {
3252 "direct packet access: test9 (double test, variant 2)",
3253 .insns = {
3254 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3255 offsetof(struct __sk_buff, data)),
3256 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3257 offsetof(struct __sk_buff, data_end)),
3258 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3259 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3260 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3261 BPF_MOV64_IMM(BPF_REG_0, 1),
3262 BPF_EXIT_INSN(),
3263 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3264 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3265 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3266 BPF_MOV64_IMM(BPF_REG_0, 0),
3267 BPF_EXIT_INSN(),
3268 },
3269 .result = ACCEPT,
3270 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3271 },
3272 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003273 "direct packet access: test10 (write invalid)",
3274 .insns = {
3275 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3276 offsetof(struct __sk_buff, data)),
3277 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3278 offsetof(struct __sk_buff, data_end)),
3279 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3281 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3282 BPF_MOV64_IMM(BPF_REG_0, 0),
3283 BPF_EXIT_INSN(),
3284 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3285 BPF_MOV64_IMM(BPF_REG_0, 0),
3286 BPF_EXIT_INSN(),
3287 },
3288 .errstr = "invalid access to packet",
3289 .result = REJECT,
3290 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3291 },
3292 {
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003293 "direct packet access: test11 (shift, good access)",
3294 .insns = {
3295 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3296 offsetof(struct __sk_buff, data)),
3297 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3298 offsetof(struct __sk_buff, data_end)),
3299 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3300 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3301 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3302 BPF_MOV64_IMM(BPF_REG_3, 144),
3303 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3304 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3305 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3306 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3307 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3308 BPF_MOV64_IMM(BPF_REG_0, 1),
3309 BPF_EXIT_INSN(),
3310 BPF_MOV64_IMM(BPF_REG_0, 0),
3311 BPF_EXIT_INSN(),
3312 },
3313 .result = ACCEPT,
3314 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003315 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003316 },
3317 {
3318 "direct packet access: test12 (and, good access)",
3319 .insns = {
3320 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3321 offsetof(struct __sk_buff, data)),
3322 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3323 offsetof(struct __sk_buff, data_end)),
3324 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3325 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3326 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3327 BPF_MOV64_IMM(BPF_REG_3, 144),
3328 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3329 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3330 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3331 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3332 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3333 BPF_MOV64_IMM(BPF_REG_0, 1),
3334 BPF_EXIT_INSN(),
3335 BPF_MOV64_IMM(BPF_REG_0, 0),
3336 BPF_EXIT_INSN(),
3337 },
3338 .result = ACCEPT,
3339 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003340 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003341 },
3342 {
3343 "direct packet access: test13 (branches, good access)",
3344 .insns = {
3345 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3346 offsetof(struct __sk_buff, data)),
3347 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3348 offsetof(struct __sk_buff, data_end)),
3349 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3350 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3351 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3352 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3353 offsetof(struct __sk_buff, mark)),
3354 BPF_MOV64_IMM(BPF_REG_4, 1),
3355 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3356 BPF_MOV64_IMM(BPF_REG_3, 14),
3357 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3358 BPF_MOV64_IMM(BPF_REG_3, 24),
3359 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3361 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3362 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3363 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3364 BPF_MOV64_IMM(BPF_REG_0, 1),
3365 BPF_EXIT_INSN(),
3366 BPF_MOV64_IMM(BPF_REG_0, 0),
3367 BPF_EXIT_INSN(),
3368 },
3369 .result = ACCEPT,
3370 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003371 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003372 },
3373 {
William Tu63dfef72017-02-04 08:37:29 -08003374 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3375 .insns = {
3376 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3377 offsetof(struct __sk_buff, data)),
3378 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3379 offsetof(struct __sk_buff, data_end)),
3380 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3381 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3382 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3383 BPF_MOV64_IMM(BPF_REG_5, 12),
3384 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3385 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3386 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3387 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3388 BPF_MOV64_IMM(BPF_REG_0, 1),
3389 BPF_EXIT_INSN(),
3390 BPF_MOV64_IMM(BPF_REG_0, 0),
3391 BPF_EXIT_INSN(),
3392 },
3393 .result = ACCEPT,
3394 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003395 .retval = 1,
William Tu63dfef72017-02-04 08:37:29 -08003396 },
3397 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003398 "direct packet access: test15 (spill with xadd)",
3399 .insns = {
3400 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3401 offsetof(struct __sk_buff, data)),
3402 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3403 offsetof(struct __sk_buff, data_end)),
3404 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3406 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3407 BPF_MOV64_IMM(BPF_REG_5, 4096),
3408 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3409 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3410 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3411 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3412 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3413 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3414 BPF_MOV64_IMM(BPF_REG_0, 0),
3415 BPF_EXIT_INSN(),
3416 },
3417 .errstr = "R2 invalid mem access 'inv'",
3418 .result = REJECT,
3419 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3420 },
3421 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02003422 "direct packet access: test16 (arith on data_end)",
3423 .insns = {
3424 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3425 offsetof(struct __sk_buff, data)),
3426 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3427 offsetof(struct __sk_buff, data_end)),
3428 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3429 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3430 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3431 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3432 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3433 BPF_MOV64_IMM(BPF_REG_0, 0),
3434 BPF_EXIT_INSN(),
3435 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08003436 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmann728a8532017-04-27 01:39:32 +02003437 .result = REJECT,
3438 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3439 },
3440 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003441 "direct packet access: test17 (pruning, alignment)",
3442 .insns = {
3443 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3444 offsetof(struct __sk_buff, data)),
3445 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3446 offsetof(struct __sk_buff, data_end)),
3447 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3448 offsetof(struct __sk_buff, mark)),
3449 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3450 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3451 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3452 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3453 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3454 BPF_MOV64_IMM(BPF_REG_0, 0),
3455 BPF_EXIT_INSN(),
3456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3457 BPF_JMP_A(-6),
3458 },
Edward Creef65b1842017-08-07 15:27:12 +01003459 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003460 .result = REJECT,
3461 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3462 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3463 },
3464 {
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003465 "direct packet access: test18 (imm += pkt_ptr, 1)",
3466 .insns = {
3467 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3468 offsetof(struct __sk_buff, data)),
3469 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3470 offsetof(struct __sk_buff, data_end)),
3471 BPF_MOV64_IMM(BPF_REG_0, 8),
3472 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3473 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3474 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3475 BPF_MOV64_IMM(BPF_REG_0, 0),
3476 BPF_EXIT_INSN(),
3477 },
3478 .result = ACCEPT,
3479 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3480 },
3481 {
3482 "direct packet access: test19 (imm += pkt_ptr, 2)",
3483 .insns = {
3484 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3485 offsetof(struct __sk_buff, data)),
3486 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3487 offsetof(struct __sk_buff, data_end)),
3488 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3489 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3490 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3491 BPF_MOV64_IMM(BPF_REG_4, 4),
3492 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3493 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3494 BPF_MOV64_IMM(BPF_REG_0, 0),
3495 BPF_EXIT_INSN(),
3496 },
3497 .result = ACCEPT,
3498 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3499 },
3500 {
3501 "direct packet access: test20 (x += pkt_ptr, 1)",
3502 .insns = {
3503 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3504 offsetof(struct __sk_buff, data)),
3505 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3506 offsetof(struct __sk_buff, data_end)),
3507 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3508 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3509 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003510 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003511 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3512 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3513 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003514 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003515 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3516 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3517 BPF_MOV64_IMM(BPF_REG_0, 0),
3518 BPF_EXIT_INSN(),
3519 },
3520 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3521 .result = ACCEPT,
3522 },
3523 {
3524 "direct packet access: test21 (x += pkt_ptr, 2)",
3525 .insns = {
3526 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3527 offsetof(struct __sk_buff, data)),
3528 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3529 offsetof(struct __sk_buff, data_end)),
3530 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3532 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3533 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3534 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3535 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003536 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003537 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3538 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003539 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003540 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3541 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3542 BPF_MOV64_IMM(BPF_REG_0, 0),
3543 BPF_EXIT_INSN(),
3544 },
3545 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3546 .result = ACCEPT,
3547 },
3548 {
3549 "direct packet access: test22 (x += pkt_ptr, 3)",
3550 .insns = {
3551 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3552 offsetof(struct __sk_buff, data)),
3553 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3554 offsetof(struct __sk_buff, data_end)),
3555 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3557 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3558 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3559 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3560 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3561 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3562 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3563 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3564 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003565 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003566 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3567 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3569 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3570 BPF_MOV64_IMM(BPF_REG_2, 1),
3571 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3572 BPF_MOV64_IMM(BPF_REG_0, 0),
3573 BPF_EXIT_INSN(),
3574 },
3575 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3576 .result = ACCEPT,
3577 },
3578 {
3579 "direct packet access: test23 (x += pkt_ptr, 4)",
3580 .insns = {
3581 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3582 offsetof(struct __sk_buff, data)),
3583 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3584 offsetof(struct __sk_buff, data_end)),
3585 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3586 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3587 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3588 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3589 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3590 BPF_MOV64_IMM(BPF_REG_0, 31),
3591 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3592 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3593 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3594 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3595 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3596 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3597 BPF_MOV64_IMM(BPF_REG_0, 0),
3598 BPF_EXIT_INSN(),
3599 },
3600 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3601 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003602 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003603 },
3604 {
3605 "direct packet access: test24 (x += pkt_ptr, 5)",
3606 .insns = {
3607 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3608 offsetof(struct __sk_buff, data)),
3609 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3610 offsetof(struct __sk_buff, data_end)),
3611 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3612 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3613 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3614 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3615 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3616 BPF_MOV64_IMM(BPF_REG_0, 64),
3617 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3618 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3619 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
Edward Cree1f9ab382017-08-07 15:29:11 +01003620 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003621 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3622 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3623 BPF_MOV64_IMM(BPF_REG_0, 0),
3624 BPF_EXIT_INSN(),
3625 },
3626 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3627 .result = ACCEPT,
3628 },
3629 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02003630 "direct packet access: test25 (marking on <, good access)",
3631 .insns = {
3632 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3633 offsetof(struct __sk_buff, data)),
3634 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3635 offsetof(struct __sk_buff, data_end)),
3636 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3637 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3638 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3639 BPF_MOV64_IMM(BPF_REG_0, 0),
3640 BPF_EXIT_INSN(),
3641 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3642 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3643 },
3644 .result = ACCEPT,
3645 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3646 },
3647 {
3648 "direct packet access: test26 (marking on <, bad access)",
3649 .insns = {
3650 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3651 offsetof(struct __sk_buff, data)),
3652 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3653 offsetof(struct __sk_buff, data_end)),
3654 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3655 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3656 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
3657 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3658 BPF_MOV64_IMM(BPF_REG_0, 0),
3659 BPF_EXIT_INSN(),
3660 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
3661 },
3662 .result = REJECT,
3663 .errstr = "invalid access to packet",
3664 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3665 },
3666 {
3667 "direct packet access: test27 (marking on <=, good access)",
3668 .insns = {
3669 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3670 offsetof(struct __sk_buff, data)),
3671 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3672 offsetof(struct __sk_buff, data_end)),
3673 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3674 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3675 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
3676 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3677 BPF_MOV64_IMM(BPF_REG_0, 1),
3678 BPF_EXIT_INSN(),
3679 },
3680 .result = ACCEPT,
3681 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003682 .retval = 1,
Daniel Borkmann31e482b2017-08-10 01:40:03 +02003683 },
3684 {
3685 "direct packet access: test28 (marking on <=, bad access)",
3686 .insns = {
3687 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3688 offsetof(struct __sk_buff, data)),
3689 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3690 offsetof(struct __sk_buff, data_end)),
3691 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3692 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3693 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
3694 BPF_MOV64_IMM(BPF_REG_0, 1),
3695 BPF_EXIT_INSN(),
3696 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3697 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3698 },
3699 .result = REJECT,
3700 .errstr = "invalid access to packet",
3701 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3702 },
3703 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003704 "helper access to packet: test1, valid packet_ptr range",
3705 .insns = {
3706 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3707 offsetof(struct xdp_md, data)),
3708 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3709 offsetof(struct xdp_md, data_end)),
3710 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3711 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3712 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3713 BPF_LD_MAP_FD(BPF_REG_1, 0),
3714 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3715 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003716 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3717 BPF_FUNC_map_update_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003718 BPF_MOV64_IMM(BPF_REG_0, 0),
3719 BPF_EXIT_INSN(),
3720 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003721 .fixup_map1 = { 5 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003722 .result_unpriv = ACCEPT,
3723 .result = ACCEPT,
3724 .prog_type = BPF_PROG_TYPE_XDP,
3725 },
3726 {
3727 "helper access to packet: test2, unchecked packet_ptr",
3728 .insns = {
3729 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3730 offsetof(struct xdp_md, data)),
3731 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003732 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3733 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003734 BPF_MOV64_IMM(BPF_REG_0, 0),
3735 BPF_EXIT_INSN(),
3736 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003737 .fixup_map1 = { 1 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003738 .result = REJECT,
3739 .errstr = "invalid access to packet",
3740 .prog_type = BPF_PROG_TYPE_XDP,
3741 },
3742 {
3743 "helper access to packet: test3, variable add",
3744 .insns = {
3745 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3746 offsetof(struct xdp_md, data)),
3747 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3748 offsetof(struct xdp_md, data_end)),
3749 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3750 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3751 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3752 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3753 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3754 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3755 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3756 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3757 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3758 BPF_LD_MAP_FD(BPF_REG_1, 0),
3759 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003760 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3761 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003762 BPF_MOV64_IMM(BPF_REG_0, 0),
3763 BPF_EXIT_INSN(),
3764 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003765 .fixup_map1 = { 11 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003766 .result = ACCEPT,
3767 .prog_type = BPF_PROG_TYPE_XDP,
3768 },
3769 {
3770 "helper access to packet: test4, packet_ptr with bad range",
3771 .insns = {
3772 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3773 offsetof(struct xdp_md, data)),
3774 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3775 offsetof(struct xdp_md, data_end)),
3776 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3777 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3778 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3779 BPF_MOV64_IMM(BPF_REG_0, 0),
3780 BPF_EXIT_INSN(),
3781 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003782 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3783 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003784 BPF_MOV64_IMM(BPF_REG_0, 0),
3785 BPF_EXIT_INSN(),
3786 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003787 .fixup_map1 = { 7 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003788 .result = REJECT,
3789 .errstr = "invalid access to packet",
3790 .prog_type = BPF_PROG_TYPE_XDP,
3791 },
3792 {
3793 "helper access to packet: test5, packet_ptr with too short range",
3794 .insns = {
3795 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3796 offsetof(struct xdp_md, data)),
3797 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3798 offsetof(struct xdp_md, data_end)),
3799 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3800 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3802 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3803 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003804 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3805 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003806 BPF_MOV64_IMM(BPF_REG_0, 0),
3807 BPF_EXIT_INSN(),
3808 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003809 .fixup_map1 = { 6 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003810 .result = REJECT,
3811 .errstr = "invalid access to packet",
3812 .prog_type = BPF_PROG_TYPE_XDP,
3813 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003814 {
3815 "helper access to packet: test6, cls valid packet_ptr range",
3816 .insns = {
3817 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3818 offsetof(struct __sk_buff, data)),
3819 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3820 offsetof(struct __sk_buff, data_end)),
3821 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3822 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3823 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3824 BPF_LD_MAP_FD(BPF_REG_1, 0),
3825 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3826 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003827 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3828 BPF_FUNC_map_update_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003829 BPF_MOV64_IMM(BPF_REG_0, 0),
3830 BPF_EXIT_INSN(),
3831 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003832 .fixup_map1 = { 5 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003833 .result = ACCEPT,
3834 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3835 },
3836 {
3837 "helper access to packet: test7, cls unchecked packet_ptr",
3838 .insns = {
3839 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3840 offsetof(struct __sk_buff, data)),
3841 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003842 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3843 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003844 BPF_MOV64_IMM(BPF_REG_0, 0),
3845 BPF_EXIT_INSN(),
3846 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003847 .fixup_map1 = { 1 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003848 .result = REJECT,
3849 .errstr = "invalid access to packet",
3850 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3851 },
3852 {
3853 "helper access to packet: test8, cls variable add",
3854 .insns = {
3855 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3856 offsetof(struct __sk_buff, data)),
3857 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3858 offsetof(struct __sk_buff, data_end)),
3859 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3860 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3861 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3862 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3863 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3864 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3865 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3866 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3867 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3868 BPF_LD_MAP_FD(BPF_REG_1, 0),
3869 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003870 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3871 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003872 BPF_MOV64_IMM(BPF_REG_0, 0),
3873 BPF_EXIT_INSN(),
3874 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003875 .fixup_map1 = { 11 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003876 .result = ACCEPT,
3877 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3878 },
3879 {
3880 "helper access to packet: test9, cls packet_ptr with bad range",
3881 .insns = {
3882 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3883 offsetof(struct __sk_buff, data)),
3884 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3885 offsetof(struct __sk_buff, data_end)),
3886 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3888 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3889 BPF_MOV64_IMM(BPF_REG_0, 0),
3890 BPF_EXIT_INSN(),
3891 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003892 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3893 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003894 BPF_MOV64_IMM(BPF_REG_0, 0),
3895 BPF_EXIT_INSN(),
3896 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003897 .fixup_map1 = { 7 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003898 .result = REJECT,
3899 .errstr = "invalid access to packet",
3900 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3901 },
3902 {
3903 "helper access to packet: test10, cls packet_ptr with too short range",
3904 .insns = {
3905 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3906 offsetof(struct __sk_buff, data)),
3907 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3908 offsetof(struct __sk_buff, data_end)),
3909 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3910 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3912 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3913 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003914 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3915 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003916 BPF_MOV64_IMM(BPF_REG_0, 0),
3917 BPF_EXIT_INSN(),
3918 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003919 .fixup_map1 = { 6 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003920 .result = REJECT,
3921 .errstr = "invalid access to packet",
3922 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3923 },
3924 {
3925 "helper access to packet: test11, cls unsuitable helper 1",
3926 .insns = {
3927 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3928 offsetof(struct __sk_buff, data)),
3929 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3930 offsetof(struct __sk_buff, data_end)),
3931 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3932 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3933 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
3934 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
3935 BPF_MOV64_IMM(BPF_REG_2, 0),
3936 BPF_MOV64_IMM(BPF_REG_4, 42),
3937 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003938 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3939 BPF_FUNC_skb_store_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003940 BPF_MOV64_IMM(BPF_REG_0, 0),
3941 BPF_EXIT_INSN(),
3942 },
3943 .result = REJECT,
3944 .errstr = "helper access to the packet",
3945 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3946 },
3947 {
3948 "helper access to packet: test12, cls unsuitable helper 2",
3949 .insns = {
3950 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3951 offsetof(struct __sk_buff, data)),
3952 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3953 offsetof(struct __sk_buff, data_end)),
3954 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3955 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
3956 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
3957 BPF_MOV64_IMM(BPF_REG_2, 0),
3958 BPF_MOV64_IMM(BPF_REG_4, 4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003959 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3960 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003961 BPF_MOV64_IMM(BPF_REG_0, 0),
3962 BPF_EXIT_INSN(),
3963 },
3964 .result = REJECT,
3965 .errstr = "helper access to the packet",
3966 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3967 },
3968 {
3969 "helper access to packet: test13, cls helper ok",
3970 .insns = {
3971 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3972 offsetof(struct __sk_buff, data)),
3973 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3974 offsetof(struct __sk_buff, data_end)),
3975 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3976 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3977 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3978 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3979 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3980 BPF_MOV64_IMM(BPF_REG_2, 4),
3981 BPF_MOV64_IMM(BPF_REG_3, 0),
3982 BPF_MOV64_IMM(BPF_REG_4, 0),
3983 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003984 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3985 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003986 BPF_MOV64_IMM(BPF_REG_0, 0),
3987 BPF_EXIT_INSN(),
3988 },
3989 .result = ACCEPT,
3990 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3991 },
3992 {
Edward Creef65b1842017-08-07 15:27:12 +01003993 "helper access to packet: test14, cls helper ok sub",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003994 .insns = {
3995 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3996 offsetof(struct __sk_buff, data)),
3997 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3998 offsetof(struct __sk_buff, data_end)),
3999 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4000 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4001 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4002 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4003 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
4004 BPF_MOV64_IMM(BPF_REG_2, 4),
4005 BPF_MOV64_IMM(BPF_REG_3, 0),
4006 BPF_MOV64_IMM(BPF_REG_4, 0),
4007 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004008 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4009 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004010 BPF_MOV64_IMM(BPF_REG_0, 0),
4011 BPF_EXIT_INSN(),
4012 },
Edward Creef65b1842017-08-07 15:27:12 +01004013 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004014 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4015 },
4016 {
Edward Creef65b1842017-08-07 15:27:12 +01004017 "helper access to packet: test15, cls helper fail sub",
4018 .insns = {
4019 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4020 offsetof(struct __sk_buff, data)),
4021 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4022 offsetof(struct __sk_buff, data_end)),
4023 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4024 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4025 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4026 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4027 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
4028 BPF_MOV64_IMM(BPF_REG_2, 4),
4029 BPF_MOV64_IMM(BPF_REG_3, 0),
4030 BPF_MOV64_IMM(BPF_REG_4, 0),
4031 BPF_MOV64_IMM(BPF_REG_5, 0),
4032 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4033 BPF_FUNC_csum_diff),
4034 BPF_MOV64_IMM(BPF_REG_0, 0),
4035 BPF_EXIT_INSN(),
4036 },
4037 .result = REJECT,
4038 .errstr = "invalid access to packet",
4039 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4040 },
4041 {
4042 "helper access to packet: test16, cls helper fail range 1",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004043 .insns = {
4044 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4045 offsetof(struct __sk_buff, data)),
4046 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4047 offsetof(struct __sk_buff, data_end)),
4048 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4049 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4050 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4051 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4052 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4053 BPF_MOV64_IMM(BPF_REG_2, 8),
4054 BPF_MOV64_IMM(BPF_REG_3, 0),
4055 BPF_MOV64_IMM(BPF_REG_4, 0),
4056 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004057 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4058 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004059 BPF_MOV64_IMM(BPF_REG_0, 0),
4060 BPF_EXIT_INSN(),
4061 },
4062 .result = REJECT,
4063 .errstr = "invalid access to packet",
4064 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4065 },
4066 {
Edward Creef65b1842017-08-07 15:27:12 +01004067 "helper access to packet: test17, cls helper fail range 2",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004068 .insns = {
4069 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4070 offsetof(struct __sk_buff, data)),
4071 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4072 offsetof(struct __sk_buff, data_end)),
4073 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4074 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4075 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4076 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4077 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4078 BPF_MOV64_IMM(BPF_REG_2, -9),
4079 BPF_MOV64_IMM(BPF_REG_3, 0),
4080 BPF_MOV64_IMM(BPF_REG_4, 0),
4081 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004082 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4083 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004084 BPF_MOV64_IMM(BPF_REG_0, 0),
4085 BPF_EXIT_INSN(),
4086 },
4087 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01004088 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004089 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4090 },
4091 {
Edward Creef65b1842017-08-07 15:27:12 +01004092 "helper access to packet: test18, cls helper fail range 3",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004093 .insns = {
4094 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4095 offsetof(struct __sk_buff, data)),
4096 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4097 offsetof(struct __sk_buff, data_end)),
4098 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4099 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4100 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4101 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4102 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4103 BPF_MOV64_IMM(BPF_REG_2, ~0),
4104 BPF_MOV64_IMM(BPF_REG_3, 0),
4105 BPF_MOV64_IMM(BPF_REG_4, 0),
4106 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004107 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4108 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004109 BPF_MOV64_IMM(BPF_REG_0, 0),
4110 BPF_EXIT_INSN(),
4111 },
4112 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01004113 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004114 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4115 },
4116 {
Yonghong Songb6ff6392017-11-12 14:49:11 -08004117 "helper access to packet: test19, cls helper range zero",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004118 .insns = {
4119 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4120 offsetof(struct __sk_buff, data)),
4121 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4122 offsetof(struct __sk_buff, data_end)),
4123 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4124 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4125 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4126 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4127 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4128 BPF_MOV64_IMM(BPF_REG_2, 0),
4129 BPF_MOV64_IMM(BPF_REG_3, 0),
4130 BPF_MOV64_IMM(BPF_REG_4, 0),
4131 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004132 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4133 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004134 BPF_MOV64_IMM(BPF_REG_0, 0),
4135 BPF_EXIT_INSN(),
4136 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08004137 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004138 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4139 },
4140 {
Edward Creef65b1842017-08-07 15:27:12 +01004141 "helper access to packet: test20, pkt end as input",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004142 .insns = {
4143 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4144 offsetof(struct __sk_buff, data)),
4145 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4146 offsetof(struct __sk_buff, data_end)),
4147 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4148 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4149 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4150 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4151 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4152 BPF_MOV64_IMM(BPF_REG_2, 4),
4153 BPF_MOV64_IMM(BPF_REG_3, 0),
4154 BPF_MOV64_IMM(BPF_REG_4, 0),
4155 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004156 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4157 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004158 BPF_MOV64_IMM(BPF_REG_0, 0),
4159 BPF_EXIT_INSN(),
4160 },
4161 .result = REJECT,
4162 .errstr = "R1 type=pkt_end expected=fp",
4163 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4164 },
4165 {
Edward Creef65b1842017-08-07 15:27:12 +01004166 "helper access to packet: test21, wrong reg",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004167 .insns = {
4168 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4169 offsetof(struct __sk_buff, data)),
4170 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4171 offsetof(struct __sk_buff, data_end)),
4172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4173 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4174 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4175 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4176 BPF_MOV64_IMM(BPF_REG_2, 4),
4177 BPF_MOV64_IMM(BPF_REG_3, 0),
4178 BPF_MOV64_IMM(BPF_REG_4, 0),
4179 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004180 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4181 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004182 BPF_MOV64_IMM(BPF_REG_0, 0),
4183 BPF_EXIT_INSN(),
4184 },
4185 .result = REJECT,
4186 .errstr = "invalid access to packet",
4187 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4188 },
Josef Bacik48461132016-09-28 10:54:32 -04004189 {
4190 "valid map access into an array with a constant",
4191 .insns = {
4192 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4193 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4195 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004196 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4197 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004198 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004199 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4200 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004201 BPF_EXIT_INSN(),
4202 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004203 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004204 .errstr_unpriv = "R0 leaks addr",
4205 .result_unpriv = REJECT,
4206 .result = ACCEPT,
4207 },
4208 {
4209 "valid map access into an array with a register",
4210 .insns = {
4211 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4212 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4213 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4214 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004215 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4216 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004217 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4218 BPF_MOV64_IMM(BPF_REG_1, 4),
4219 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4220 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004221 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4222 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004223 BPF_EXIT_INSN(),
4224 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004225 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004226 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004227 .result_unpriv = REJECT,
4228 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004229 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004230 },
4231 {
4232 "valid map access into an array with a variable",
4233 .insns = {
4234 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4235 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4236 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4237 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004238 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4239 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004240 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4241 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4242 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4243 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4244 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004245 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4246 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004247 BPF_EXIT_INSN(),
4248 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004249 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004250 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004251 .result_unpriv = REJECT,
4252 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004253 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004254 },
4255 {
4256 "valid map access into an array with a signed variable",
4257 .insns = {
4258 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4259 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4260 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4261 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004262 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4263 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004264 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4265 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4266 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4267 BPF_MOV32_IMM(BPF_REG_1, 0),
4268 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4269 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4270 BPF_MOV32_IMM(BPF_REG_1, 0),
4271 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4272 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004273 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4274 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004275 BPF_EXIT_INSN(),
4276 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004277 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004278 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004279 .result_unpriv = REJECT,
4280 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004281 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004282 },
4283 {
4284 "invalid map access into an array with a constant",
4285 .insns = {
4286 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4287 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4288 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4289 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004290 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4291 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004292 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4293 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4294 offsetof(struct test_val, foo)),
4295 BPF_EXIT_INSN(),
4296 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004297 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004298 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
4299 .result = REJECT,
4300 },
4301 {
4302 "invalid map access into an array with a register",
4303 .insns = {
4304 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4305 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4306 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4307 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004308 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4309 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004310 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4311 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4312 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4313 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004314 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4315 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004316 BPF_EXIT_INSN(),
4317 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004318 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004319 .errstr = "R0 min value is outside of the array range",
4320 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004321 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004322 },
4323 {
4324 "invalid map access into an array with a variable",
4325 .insns = {
4326 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4327 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4328 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4329 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004330 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4331 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004332 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4333 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4334 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4335 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004336 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4337 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004338 BPF_EXIT_INSN(),
4339 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004340 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004341 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
Josef Bacik48461132016-09-28 10:54:32 -04004342 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004343 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004344 },
4345 {
4346 "invalid map access into an array with no floor check",
4347 .insns = {
4348 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4349 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4350 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4351 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004352 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4353 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004354 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
Edward Creef65b1842017-08-07 15:27:12 +01004355 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik48461132016-09-28 10:54:32 -04004356 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4357 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4358 BPF_MOV32_IMM(BPF_REG_1, 0),
4359 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4360 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004361 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4362 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004363 BPF_EXIT_INSN(),
4364 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004365 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004366 .errstr_unpriv = "R0 leaks addr",
4367 .errstr = "R0 unbounded memory access",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004368 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04004369 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004370 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004371 },
4372 {
4373 "invalid map access into an array with a invalid max check",
4374 .insns = {
4375 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4376 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4378 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004379 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4380 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004381 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4382 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4383 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4384 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4385 BPF_MOV32_IMM(BPF_REG_1, 0),
4386 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4387 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004388 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4389 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004390 BPF_EXIT_INSN(),
4391 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004392 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004393 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004394 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004395 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04004396 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004397 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004398 },
4399 {
4400 "invalid map access into an array with a invalid max check",
4401 .insns = {
4402 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4403 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4404 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4405 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004406 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4407 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004408 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4409 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4410 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4411 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4412 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4413 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004414 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4415 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004416 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4417 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004418 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4419 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004420 BPF_EXIT_INSN(),
4421 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004422 .fixup_map2 = { 3, 11 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004423 .errstr = "R0 pointer += pointer",
Josef Bacik48461132016-09-28 10:54:32 -04004424 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004425 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004426 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02004427 {
4428 "multiple registers share map_lookup_elem result",
4429 .insns = {
4430 BPF_MOV64_IMM(BPF_REG_1, 10),
4431 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4432 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4433 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4434 BPF_LD_MAP_FD(BPF_REG_1, 0),
4435 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4436 BPF_FUNC_map_lookup_elem),
4437 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4438 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4439 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4440 BPF_EXIT_INSN(),
4441 },
4442 .fixup_map1 = { 4 },
4443 .result = ACCEPT,
4444 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4445 },
4446 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004447 "alu ops on ptr_to_map_value_or_null, 1",
4448 .insns = {
4449 BPF_MOV64_IMM(BPF_REG_1, 10),
4450 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4451 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4453 BPF_LD_MAP_FD(BPF_REG_1, 0),
4454 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4455 BPF_FUNC_map_lookup_elem),
4456 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4457 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
4458 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
4459 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4460 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4461 BPF_EXIT_INSN(),
4462 },
4463 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004464 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004465 .result = REJECT,
4466 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4467 },
4468 {
4469 "alu ops on ptr_to_map_value_or_null, 2",
4470 .insns = {
4471 BPF_MOV64_IMM(BPF_REG_1, 10),
4472 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4473 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4474 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4475 BPF_LD_MAP_FD(BPF_REG_1, 0),
4476 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4477 BPF_FUNC_map_lookup_elem),
4478 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4479 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
4480 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4481 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4482 BPF_EXIT_INSN(),
4483 },
4484 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004485 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004486 .result = REJECT,
4487 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4488 },
4489 {
4490 "alu ops on ptr_to_map_value_or_null, 3",
4491 .insns = {
4492 BPF_MOV64_IMM(BPF_REG_1, 10),
4493 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4494 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4495 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4496 BPF_LD_MAP_FD(BPF_REG_1, 0),
4497 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4498 BPF_FUNC_map_lookup_elem),
4499 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4500 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
4501 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4502 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4503 BPF_EXIT_INSN(),
4504 },
4505 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004506 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004507 .result = REJECT,
4508 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4509 },
4510 {
Thomas Graf57a09bf2016-10-18 19:51:19 +02004511 "invalid memory access with multiple map_lookup_elem calls",
4512 .insns = {
4513 BPF_MOV64_IMM(BPF_REG_1, 10),
4514 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4515 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4516 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4517 BPF_LD_MAP_FD(BPF_REG_1, 0),
4518 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4519 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4520 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4521 BPF_FUNC_map_lookup_elem),
4522 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4523 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4524 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4525 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4526 BPF_FUNC_map_lookup_elem),
4527 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4528 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4529 BPF_EXIT_INSN(),
4530 },
4531 .fixup_map1 = { 4 },
4532 .result = REJECT,
4533 .errstr = "R4 !read_ok",
4534 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4535 },
4536 {
4537 "valid indirect map_lookup_elem access with 2nd lookup in branch",
4538 .insns = {
4539 BPF_MOV64_IMM(BPF_REG_1, 10),
4540 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4541 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4542 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4543 BPF_LD_MAP_FD(BPF_REG_1, 0),
4544 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4545 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4546 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4547 BPF_FUNC_map_lookup_elem),
4548 BPF_MOV64_IMM(BPF_REG_2, 10),
4549 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
4550 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4551 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4552 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4553 BPF_FUNC_map_lookup_elem),
4554 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4555 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4556 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4557 BPF_EXIT_INSN(),
4558 },
4559 .fixup_map1 = { 4 },
4560 .result = ACCEPT,
4561 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4562 },
Josef Bacike9548902016-11-29 12:35:19 -05004563 {
4564 "invalid map access from else condition",
4565 .insns = {
4566 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4567 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4569 BPF_LD_MAP_FD(BPF_REG_1, 0),
4570 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
4571 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4572 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4573 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
4574 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4575 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4576 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4577 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
4578 BPF_EXIT_INSN(),
4579 },
4580 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004581 .errstr = "R0 unbounded memory access",
Josef Bacike9548902016-11-29 12:35:19 -05004582 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01004583 .errstr_unpriv = "R0 leaks addr",
Josef Bacike9548902016-11-29 12:35:19 -05004584 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004585 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacike9548902016-11-29 12:35:19 -05004586 },
Gianluca Borello3c8397442016-12-03 12:31:33 -08004587 {
4588 "constant register |= constant should keep constant type",
4589 .insns = {
4590 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4591 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4592 BPF_MOV64_IMM(BPF_REG_2, 34),
4593 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
4594 BPF_MOV64_IMM(BPF_REG_3, 0),
4595 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4596 BPF_EXIT_INSN(),
4597 },
4598 .result = ACCEPT,
4599 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4600 },
4601 {
4602 "constant register |= constant should not bypass stack boundary checks",
4603 .insns = {
4604 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4605 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4606 BPF_MOV64_IMM(BPF_REG_2, 34),
4607 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
4608 BPF_MOV64_IMM(BPF_REG_3, 0),
4609 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4610 BPF_EXIT_INSN(),
4611 },
4612 .errstr = "invalid stack type R1 off=-48 access_size=58",
4613 .result = REJECT,
4614 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4615 },
4616 {
4617 "constant register |= constant register should keep constant type",
4618 .insns = {
4619 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4620 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4621 BPF_MOV64_IMM(BPF_REG_2, 34),
4622 BPF_MOV64_IMM(BPF_REG_4, 13),
4623 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4624 BPF_MOV64_IMM(BPF_REG_3, 0),
4625 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4626 BPF_EXIT_INSN(),
4627 },
4628 .result = ACCEPT,
4629 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4630 },
4631 {
4632 "constant register |= constant register should not bypass stack boundary checks",
4633 .insns = {
4634 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4635 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4636 BPF_MOV64_IMM(BPF_REG_2, 34),
4637 BPF_MOV64_IMM(BPF_REG_4, 24),
4638 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4639 BPF_MOV64_IMM(BPF_REG_3, 0),
4640 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4641 BPF_EXIT_INSN(),
4642 },
4643 .errstr = "invalid stack type R1 off=-48 access_size=58",
4644 .result = REJECT,
4645 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4646 },
Thomas Graf3f731d82016-12-05 10:30:52 +01004647 {
4648 "invalid direct packet write for LWT_IN",
4649 .insns = {
4650 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4651 offsetof(struct __sk_buff, data)),
4652 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4653 offsetof(struct __sk_buff, data_end)),
4654 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4655 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4656 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4657 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4658 BPF_MOV64_IMM(BPF_REG_0, 0),
4659 BPF_EXIT_INSN(),
4660 },
4661 .errstr = "cannot write into packet",
4662 .result = REJECT,
4663 .prog_type = BPF_PROG_TYPE_LWT_IN,
4664 },
4665 {
4666 "invalid direct packet write for LWT_OUT",
4667 .insns = {
4668 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4669 offsetof(struct __sk_buff, data)),
4670 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4671 offsetof(struct __sk_buff, data_end)),
4672 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4673 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4674 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4675 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4676 BPF_MOV64_IMM(BPF_REG_0, 0),
4677 BPF_EXIT_INSN(),
4678 },
4679 .errstr = "cannot write into packet",
4680 .result = REJECT,
4681 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4682 },
4683 {
4684 "direct packet write for LWT_XMIT",
4685 .insns = {
4686 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4687 offsetof(struct __sk_buff, data)),
4688 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4689 offsetof(struct __sk_buff, data_end)),
4690 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4691 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4692 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4693 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4694 BPF_MOV64_IMM(BPF_REG_0, 0),
4695 BPF_EXIT_INSN(),
4696 },
4697 .result = ACCEPT,
4698 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4699 },
4700 {
4701 "direct packet read for LWT_IN",
4702 .insns = {
4703 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4704 offsetof(struct __sk_buff, data)),
4705 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4706 offsetof(struct __sk_buff, data_end)),
4707 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4709 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4710 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4711 BPF_MOV64_IMM(BPF_REG_0, 0),
4712 BPF_EXIT_INSN(),
4713 },
4714 .result = ACCEPT,
4715 .prog_type = BPF_PROG_TYPE_LWT_IN,
4716 },
4717 {
4718 "direct packet read for LWT_OUT",
4719 .insns = {
4720 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4721 offsetof(struct __sk_buff, data)),
4722 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4723 offsetof(struct __sk_buff, data_end)),
4724 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4725 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4726 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4727 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4728 BPF_MOV64_IMM(BPF_REG_0, 0),
4729 BPF_EXIT_INSN(),
4730 },
4731 .result = ACCEPT,
4732 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4733 },
4734 {
4735 "direct packet read for LWT_XMIT",
4736 .insns = {
4737 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4738 offsetof(struct __sk_buff, data)),
4739 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4740 offsetof(struct __sk_buff, data_end)),
4741 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4742 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4743 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4744 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4745 BPF_MOV64_IMM(BPF_REG_0, 0),
4746 BPF_EXIT_INSN(),
4747 },
4748 .result = ACCEPT,
4749 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4750 },
4751 {
Alexei Starovoitovb1977682017-03-24 15:57:33 -07004752 "overlapping checks for direct packet access",
4753 .insns = {
4754 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4755 offsetof(struct __sk_buff, data)),
4756 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4757 offsetof(struct __sk_buff, data_end)),
4758 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4759 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4760 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
4761 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4762 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
4763 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
4764 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
4765 BPF_MOV64_IMM(BPF_REG_0, 0),
4766 BPF_EXIT_INSN(),
4767 },
4768 .result = ACCEPT,
4769 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4770 },
4771 {
Thomas Graf3f731d82016-12-05 10:30:52 +01004772 "invalid access of tc_classid for LWT_IN",
4773 .insns = {
4774 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4775 offsetof(struct __sk_buff, tc_classid)),
4776 BPF_EXIT_INSN(),
4777 },
4778 .result = REJECT,
4779 .errstr = "invalid bpf_context access",
4780 },
4781 {
4782 "invalid access of tc_classid for LWT_OUT",
4783 .insns = {
4784 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4785 offsetof(struct __sk_buff, tc_classid)),
4786 BPF_EXIT_INSN(),
4787 },
4788 .result = REJECT,
4789 .errstr = "invalid bpf_context access",
4790 },
4791 {
4792 "invalid access of tc_classid for LWT_XMIT",
4793 .insns = {
4794 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4795 offsetof(struct __sk_buff, tc_classid)),
4796 BPF_EXIT_INSN(),
4797 },
4798 .result = REJECT,
4799 .errstr = "invalid bpf_context access",
4800 },
Gianluca Borello57225692017-01-09 10:19:47 -08004801 {
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004802 "leak pointer into ctx 1",
4803 .insns = {
4804 BPF_MOV64_IMM(BPF_REG_0, 0),
4805 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4806 offsetof(struct __sk_buff, cb[0])),
4807 BPF_LD_MAP_FD(BPF_REG_2, 0),
4808 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
4809 offsetof(struct __sk_buff, cb[0])),
4810 BPF_EXIT_INSN(),
4811 },
4812 .fixup_map1 = { 2 },
4813 .errstr_unpriv = "R2 leaks addr into mem",
4814 .result_unpriv = REJECT,
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01004815 .result = REJECT,
4816 .errstr = "BPF_XADD stores into R1 context is not allowed",
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004817 },
4818 {
4819 "leak pointer into ctx 2",
4820 .insns = {
4821 BPF_MOV64_IMM(BPF_REG_0, 0),
4822 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4823 offsetof(struct __sk_buff, cb[0])),
4824 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
4825 offsetof(struct __sk_buff, cb[0])),
4826 BPF_EXIT_INSN(),
4827 },
4828 .errstr_unpriv = "R10 leaks addr into mem",
4829 .result_unpriv = REJECT,
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01004830 .result = REJECT,
4831 .errstr = "BPF_XADD stores into R1 context is not allowed",
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004832 },
4833 {
4834 "leak pointer into ctx 3",
4835 .insns = {
4836 BPF_MOV64_IMM(BPF_REG_0, 0),
4837 BPF_LD_MAP_FD(BPF_REG_2, 0),
4838 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
4839 offsetof(struct __sk_buff, cb[0])),
4840 BPF_EXIT_INSN(),
4841 },
4842 .fixup_map1 = { 1 },
4843 .errstr_unpriv = "R2 leaks addr into ctx",
4844 .result_unpriv = REJECT,
4845 .result = ACCEPT,
4846 },
4847 {
4848 "leak pointer into map val",
4849 .insns = {
4850 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4851 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4852 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4854 BPF_LD_MAP_FD(BPF_REG_1, 0),
4855 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4856 BPF_FUNC_map_lookup_elem),
4857 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4858 BPF_MOV64_IMM(BPF_REG_3, 0),
4859 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
4860 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
4861 BPF_MOV64_IMM(BPF_REG_0, 0),
4862 BPF_EXIT_INSN(),
4863 },
4864 .fixup_map1 = { 4 },
4865 .errstr_unpriv = "R6 leaks addr into mem",
4866 .result_unpriv = REJECT,
4867 .result = ACCEPT,
4868 },
4869 {
Gianluca Borello57225692017-01-09 10:19:47 -08004870 "helper access to map: full range",
4871 .insns = {
4872 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4873 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4874 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4875 BPF_LD_MAP_FD(BPF_REG_1, 0),
4876 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4877 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4878 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4879 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4880 BPF_MOV64_IMM(BPF_REG_3, 0),
4881 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4882 BPF_EXIT_INSN(),
4883 },
4884 .fixup_map2 = { 3 },
4885 .result = ACCEPT,
4886 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4887 },
4888 {
4889 "helper access to map: partial range",
4890 .insns = {
4891 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4892 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4893 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4894 BPF_LD_MAP_FD(BPF_REG_1, 0),
4895 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4896 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4897 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4898 BPF_MOV64_IMM(BPF_REG_2, 8),
4899 BPF_MOV64_IMM(BPF_REG_3, 0),
4900 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4901 BPF_EXIT_INSN(),
4902 },
4903 .fixup_map2 = { 3 },
4904 .result = ACCEPT,
4905 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4906 },
4907 {
4908 "helper access to map: empty range",
4909 .insns = {
4910 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4912 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4913 BPF_LD_MAP_FD(BPF_REG_1, 0),
4914 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004915 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4916 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4917 BPF_MOV64_IMM(BPF_REG_2, 0),
4918 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08004919 BPF_EXIT_INSN(),
4920 },
4921 .fixup_map2 = { 3 },
4922 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
4923 .result = REJECT,
4924 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4925 },
4926 {
4927 "helper access to map: out-of-bound range",
4928 .insns = {
4929 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4930 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4931 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4932 BPF_LD_MAP_FD(BPF_REG_1, 0),
4933 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4934 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4935 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4936 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
4937 BPF_MOV64_IMM(BPF_REG_3, 0),
4938 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4939 BPF_EXIT_INSN(),
4940 },
4941 .fixup_map2 = { 3 },
4942 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
4943 .result = REJECT,
4944 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4945 },
4946 {
4947 "helper access to map: negative range",
4948 .insns = {
4949 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4950 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4951 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4952 BPF_LD_MAP_FD(BPF_REG_1, 0),
4953 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4954 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4955 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4956 BPF_MOV64_IMM(BPF_REG_2, -8),
4957 BPF_MOV64_IMM(BPF_REG_3, 0),
4958 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4959 BPF_EXIT_INSN(),
4960 },
4961 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004962 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004963 .result = REJECT,
4964 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4965 },
4966 {
4967 "helper access to adjusted map (via const imm): full range",
4968 .insns = {
4969 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4970 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4971 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4972 BPF_LD_MAP_FD(BPF_REG_1, 0),
4973 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4974 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4975 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4976 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4977 offsetof(struct test_val, foo)),
4978 BPF_MOV64_IMM(BPF_REG_2,
4979 sizeof(struct test_val) -
4980 offsetof(struct test_val, foo)),
4981 BPF_MOV64_IMM(BPF_REG_3, 0),
4982 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4983 BPF_EXIT_INSN(),
4984 },
4985 .fixup_map2 = { 3 },
4986 .result = ACCEPT,
4987 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4988 },
4989 {
4990 "helper access to adjusted map (via const imm): partial range",
4991 .insns = {
4992 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4993 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4994 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4995 BPF_LD_MAP_FD(BPF_REG_1, 0),
4996 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4997 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4998 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4999 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5000 offsetof(struct test_val, foo)),
5001 BPF_MOV64_IMM(BPF_REG_2, 8),
5002 BPF_MOV64_IMM(BPF_REG_3, 0),
5003 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5004 BPF_EXIT_INSN(),
5005 },
5006 .fixup_map2 = { 3 },
5007 .result = ACCEPT,
5008 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5009 },
5010 {
5011 "helper access to adjusted map (via const imm): empty range",
5012 .insns = {
5013 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5014 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5015 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5016 BPF_LD_MAP_FD(BPF_REG_1, 0),
5017 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005018 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Gianluca Borello57225692017-01-09 10:19:47 -08005019 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5020 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5021 offsetof(struct test_val, foo)),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005022 BPF_MOV64_IMM(BPF_REG_2, 0),
5023 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005024 BPF_EXIT_INSN(),
5025 },
5026 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005027 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
Gianluca Borello57225692017-01-09 10:19:47 -08005028 .result = REJECT,
5029 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5030 },
5031 {
5032 "helper access to adjusted map (via const imm): out-of-bound range",
5033 .insns = {
5034 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5035 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5036 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5037 BPF_LD_MAP_FD(BPF_REG_1, 0),
5038 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5039 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5040 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5041 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5042 offsetof(struct test_val, foo)),
5043 BPF_MOV64_IMM(BPF_REG_2,
5044 sizeof(struct test_val) -
5045 offsetof(struct test_val, foo) + 8),
5046 BPF_MOV64_IMM(BPF_REG_3, 0),
5047 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5048 BPF_EXIT_INSN(),
5049 },
5050 .fixup_map2 = { 3 },
5051 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
5052 .result = REJECT,
5053 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5054 },
5055 {
5056 "helper access to adjusted map (via const imm): negative range (> adjustment)",
5057 .insns = {
5058 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5059 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5060 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5061 BPF_LD_MAP_FD(BPF_REG_1, 0),
5062 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5063 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5064 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5065 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5066 offsetof(struct test_val, foo)),
5067 BPF_MOV64_IMM(BPF_REG_2, -8),
5068 BPF_MOV64_IMM(BPF_REG_3, 0),
5069 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5070 BPF_EXIT_INSN(),
5071 },
5072 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005073 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005074 .result = REJECT,
5075 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5076 },
5077 {
5078 "helper access to adjusted map (via const imm): negative range (< adjustment)",
5079 .insns = {
5080 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5081 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5082 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5083 BPF_LD_MAP_FD(BPF_REG_1, 0),
5084 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5085 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5086 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5087 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5088 offsetof(struct test_val, foo)),
5089 BPF_MOV64_IMM(BPF_REG_2, -1),
5090 BPF_MOV64_IMM(BPF_REG_3, 0),
5091 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5092 BPF_EXIT_INSN(),
5093 },
5094 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005095 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005096 .result = REJECT,
5097 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5098 },
5099 {
5100 "helper access to adjusted map (via const reg): full range",
5101 .insns = {
5102 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5104 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5105 BPF_LD_MAP_FD(BPF_REG_1, 0),
5106 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5107 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5108 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5109 BPF_MOV64_IMM(BPF_REG_3,
5110 offsetof(struct test_val, foo)),
5111 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5112 BPF_MOV64_IMM(BPF_REG_2,
5113 sizeof(struct test_val) -
5114 offsetof(struct test_val, foo)),
5115 BPF_MOV64_IMM(BPF_REG_3, 0),
5116 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5117 BPF_EXIT_INSN(),
5118 },
5119 .fixup_map2 = { 3 },
5120 .result = ACCEPT,
5121 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5122 },
5123 {
5124 "helper access to adjusted map (via const reg): partial range",
5125 .insns = {
5126 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5127 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5128 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5129 BPF_LD_MAP_FD(BPF_REG_1, 0),
5130 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5131 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5132 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5133 BPF_MOV64_IMM(BPF_REG_3,
5134 offsetof(struct test_val, foo)),
5135 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5136 BPF_MOV64_IMM(BPF_REG_2, 8),
5137 BPF_MOV64_IMM(BPF_REG_3, 0),
5138 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5139 BPF_EXIT_INSN(),
5140 },
5141 .fixup_map2 = { 3 },
5142 .result = ACCEPT,
5143 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5144 },
5145 {
5146 "helper access to adjusted map (via const reg): empty range",
5147 .insns = {
5148 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5149 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5150 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5151 BPF_LD_MAP_FD(BPF_REG_1, 0),
5152 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
Gianluca Borello57225692017-01-09 10:19:47 -08005154 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5155 BPF_MOV64_IMM(BPF_REG_3, 0),
5156 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005157 BPF_MOV64_IMM(BPF_REG_2, 0),
5158 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005159 BPF_EXIT_INSN(),
5160 },
5161 .fixup_map2 = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005162 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08005163 .result = REJECT,
5164 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5165 },
5166 {
5167 "helper access to adjusted map (via const reg): out-of-bound range",
5168 .insns = {
5169 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5170 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5171 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5172 BPF_LD_MAP_FD(BPF_REG_1, 0),
5173 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5174 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5175 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5176 BPF_MOV64_IMM(BPF_REG_3,
5177 offsetof(struct test_val, foo)),
5178 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5179 BPF_MOV64_IMM(BPF_REG_2,
5180 sizeof(struct test_val) -
5181 offsetof(struct test_val, foo) + 8),
5182 BPF_MOV64_IMM(BPF_REG_3, 0),
5183 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5184 BPF_EXIT_INSN(),
5185 },
5186 .fixup_map2 = { 3 },
5187 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
5188 .result = REJECT,
5189 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5190 },
5191 {
5192 "helper access to adjusted map (via const reg): negative range (> adjustment)",
5193 .insns = {
5194 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5195 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5196 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5197 BPF_LD_MAP_FD(BPF_REG_1, 0),
5198 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5199 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5200 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5201 BPF_MOV64_IMM(BPF_REG_3,
5202 offsetof(struct test_val, foo)),
5203 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5204 BPF_MOV64_IMM(BPF_REG_2, -8),
5205 BPF_MOV64_IMM(BPF_REG_3, 0),
5206 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5207 BPF_EXIT_INSN(),
5208 },
5209 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005210 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005211 .result = REJECT,
5212 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5213 },
5214 {
5215 "helper access to adjusted map (via const reg): negative range (< adjustment)",
5216 .insns = {
5217 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5219 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5220 BPF_LD_MAP_FD(BPF_REG_1, 0),
5221 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5222 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5223 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5224 BPF_MOV64_IMM(BPF_REG_3,
5225 offsetof(struct test_val, foo)),
5226 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5227 BPF_MOV64_IMM(BPF_REG_2, -1),
5228 BPF_MOV64_IMM(BPF_REG_3, 0),
5229 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5230 BPF_EXIT_INSN(),
5231 },
5232 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005233 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005234 .result = REJECT,
5235 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5236 },
5237 {
5238 "helper access to adjusted map (via variable): full range",
5239 .insns = {
5240 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5241 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5242 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5243 BPF_LD_MAP_FD(BPF_REG_1, 0),
5244 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5245 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5246 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5247 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5248 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5249 offsetof(struct test_val, foo), 4),
5250 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5251 BPF_MOV64_IMM(BPF_REG_2,
5252 sizeof(struct test_val) -
5253 offsetof(struct test_val, foo)),
5254 BPF_MOV64_IMM(BPF_REG_3, 0),
5255 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5256 BPF_EXIT_INSN(),
5257 },
5258 .fixup_map2 = { 3 },
5259 .result = ACCEPT,
5260 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5261 },
5262 {
5263 "helper access to adjusted map (via variable): partial range",
5264 .insns = {
5265 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5266 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5267 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5268 BPF_LD_MAP_FD(BPF_REG_1, 0),
5269 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5270 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5271 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5272 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5273 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5274 offsetof(struct test_val, foo), 4),
5275 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5276 BPF_MOV64_IMM(BPF_REG_2, 8),
5277 BPF_MOV64_IMM(BPF_REG_3, 0),
5278 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5279 BPF_EXIT_INSN(),
5280 },
5281 .fixup_map2 = { 3 },
5282 .result = ACCEPT,
5283 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5284 },
5285 {
5286 "helper access to adjusted map (via variable): empty range",
5287 .insns = {
5288 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5289 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5290 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5291 BPF_LD_MAP_FD(BPF_REG_1, 0),
5292 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005293 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
Gianluca Borello57225692017-01-09 10:19:47 -08005294 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5295 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5296 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005297 offsetof(struct test_val, foo), 3),
Gianluca Borello57225692017-01-09 10:19:47 -08005298 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005299 BPF_MOV64_IMM(BPF_REG_2, 0),
5300 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005301 BPF_EXIT_INSN(),
5302 },
5303 .fixup_map2 = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005304 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08005305 .result = REJECT,
5306 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5307 },
5308 {
5309 "helper access to adjusted map (via variable): no max check",
5310 .insns = {
5311 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5312 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5313 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5314 BPF_LD_MAP_FD(BPF_REG_1, 0),
5315 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5316 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5317 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5318 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5319 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Edward Creef65b1842017-08-07 15:27:12 +01005320 BPF_MOV64_IMM(BPF_REG_2, 1),
Gianluca Borello57225692017-01-09 10:19:47 -08005321 BPF_MOV64_IMM(BPF_REG_3, 0),
5322 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5323 BPF_EXIT_INSN(),
5324 },
5325 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005326 .errstr = "R1 unbounded memory access",
Gianluca Borello57225692017-01-09 10:19:47 -08005327 .result = REJECT,
5328 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5329 },
5330 {
5331 "helper access to adjusted map (via variable): wrong max check",
5332 .insns = {
5333 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5335 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5336 BPF_LD_MAP_FD(BPF_REG_1, 0),
5337 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5338 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5339 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5340 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5341 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5342 offsetof(struct test_val, foo), 4),
5343 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5344 BPF_MOV64_IMM(BPF_REG_2,
5345 sizeof(struct test_val) -
5346 offsetof(struct test_val, foo) + 1),
5347 BPF_MOV64_IMM(BPF_REG_3, 0),
5348 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5349 BPF_EXIT_INSN(),
5350 },
5351 .fixup_map2 = { 3 },
5352 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
5353 .result = REJECT,
5354 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5355 },
Gianluca Borellof0318d02017-01-09 10:19:48 -08005356 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02005357 "helper access to map: bounds check using <, good access",
5358 .insns = {
5359 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5361 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5362 BPF_LD_MAP_FD(BPF_REG_1, 0),
5363 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5364 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5365 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5366 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5367 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
5368 BPF_MOV64_IMM(BPF_REG_0, 0),
5369 BPF_EXIT_INSN(),
5370 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5371 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5372 BPF_MOV64_IMM(BPF_REG_0, 0),
5373 BPF_EXIT_INSN(),
5374 },
5375 .fixup_map2 = { 3 },
5376 .result = ACCEPT,
5377 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5378 },
5379 {
5380 "helper access to map: bounds check using <, bad access",
5381 .insns = {
5382 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5384 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5385 BPF_LD_MAP_FD(BPF_REG_1, 0),
5386 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5387 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5388 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5389 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5390 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
5391 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5392 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5393 BPF_MOV64_IMM(BPF_REG_0, 0),
5394 BPF_EXIT_INSN(),
5395 BPF_MOV64_IMM(BPF_REG_0, 0),
5396 BPF_EXIT_INSN(),
5397 },
5398 .fixup_map2 = { 3 },
5399 .result = REJECT,
5400 .errstr = "R1 unbounded memory access",
5401 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5402 },
5403 {
5404 "helper access to map: bounds check using <=, good access",
5405 .insns = {
5406 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5407 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5408 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5409 BPF_LD_MAP_FD(BPF_REG_1, 0),
5410 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5411 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5412 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5413 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5414 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
5415 BPF_MOV64_IMM(BPF_REG_0, 0),
5416 BPF_EXIT_INSN(),
5417 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5418 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5419 BPF_MOV64_IMM(BPF_REG_0, 0),
5420 BPF_EXIT_INSN(),
5421 },
5422 .fixup_map2 = { 3 },
5423 .result = ACCEPT,
5424 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5425 },
5426 {
5427 "helper access to map: bounds check using <=, bad access",
5428 .insns = {
5429 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5430 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5431 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5432 BPF_LD_MAP_FD(BPF_REG_1, 0),
5433 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5434 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5435 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5436 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5437 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
5438 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5439 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5440 BPF_MOV64_IMM(BPF_REG_0, 0),
5441 BPF_EXIT_INSN(),
5442 BPF_MOV64_IMM(BPF_REG_0, 0),
5443 BPF_EXIT_INSN(),
5444 },
5445 .fixup_map2 = { 3 },
5446 .result = REJECT,
5447 .errstr = "R1 unbounded memory access",
5448 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5449 },
5450 {
5451 "helper access to map: bounds check using s<, good access",
5452 .insns = {
5453 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5454 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5455 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5456 BPF_LD_MAP_FD(BPF_REG_1, 0),
5457 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5458 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5459 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5460 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5461 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5462 BPF_MOV64_IMM(BPF_REG_0, 0),
5463 BPF_EXIT_INSN(),
5464 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
5465 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5466 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5467 BPF_MOV64_IMM(BPF_REG_0, 0),
5468 BPF_EXIT_INSN(),
5469 },
5470 .fixup_map2 = { 3 },
5471 .result = ACCEPT,
5472 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5473 },
5474 {
5475 "helper access to map: bounds check using s<, good access 2",
5476 .insns = {
5477 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5478 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5479 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5480 BPF_LD_MAP_FD(BPF_REG_1, 0),
5481 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5482 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5483 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5484 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5485 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5486 BPF_MOV64_IMM(BPF_REG_0, 0),
5487 BPF_EXIT_INSN(),
5488 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5489 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5490 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5491 BPF_MOV64_IMM(BPF_REG_0, 0),
5492 BPF_EXIT_INSN(),
5493 },
5494 .fixup_map2 = { 3 },
5495 .result = ACCEPT,
5496 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5497 },
5498 {
5499 "helper access to map: bounds check using s<, bad access",
5500 .insns = {
5501 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5502 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5503 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5504 BPF_LD_MAP_FD(BPF_REG_1, 0),
5505 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5506 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5507 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5508 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5509 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5510 BPF_MOV64_IMM(BPF_REG_0, 0),
5511 BPF_EXIT_INSN(),
5512 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5513 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5514 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5515 BPF_MOV64_IMM(BPF_REG_0, 0),
5516 BPF_EXIT_INSN(),
5517 },
5518 .fixup_map2 = { 3 },
5519 .result = REJECT,
5520 .errstr = "R1 min value is negative",
5521 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5522 },
5523 {
5524 "helper access to map: bounds check using s<=, good access",
5525 .insns = {
5526 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5527 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5528 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5529 BPF_LD_MAP_FD(BPF_REG_1, 0),
5530 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5531 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5532 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5533 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5534 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5535 BPF_MOV64_IMM(BPF_REG_0, 0),
5536 BPF_EXIT_INSN(),
5537 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
5538 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5539 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5540 BPF_MOV64_IMM(BPF_REG_0, 0),
5541 BPF_EXIT_INSN(),
5542 },
5543 .fixup_map2 = { 3 },
5544 .result = ACCEPT,
5545 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5546 },
5547 {
5548 "helper access to map: bounds check using s<=, good access 2",
5549 .insns = {
5550 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5551 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5552 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5553 BPF_LD_MAP_FD(BPF_REG_1, 0),
5554 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5555 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5556 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5557 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5558 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5559 BPF_MOV64_IMM(BPF_REG_0, 0),
5560 BPF_EXIT_INSN(),
5561 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5562 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5563 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5564 BPF_MOV64_IMM(BPF_REG_0, 0),
5565 BPF_EXIT_INSN(),
5566 },
5567 .fixup_map2 = { 3 },
5568 .result = ACCEPT,
5569 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5570 },
5571 {
5572 "helper access to map: bounds check using s<=, bad access",
5573 .insns = {
5574 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5575 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5576 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5577 BPF_LD_MAP_FD(BPF_REG_1, 0),
5578 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5579 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5580 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5581 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5582 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5583 BPF_MOV64_IMM(BPF_REG_0, 0),
5584 BPF_EXIT_INSN(),
5585 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5586 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5587 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5588 BPF_MOV64_IMM(BPF_REG_0, 0),
5589 BPF_EXIT_INSN(),
5590 },
5591 .fixup_map2 = { 3 },
5592 .result = REJECT,
5593 .errstr = "R1 min value is negative",
5594 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5595 },
5596 {
Gianluca Borellof0318d02017-01-09 10:19:48 -08005597 "map element value is preserved across register spilling",
5598 .insns = {
5599 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5600 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5601 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5602 BPF_LD_MAP_FD(BPF_REG_1, 0),
5603 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5604 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5605 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5606 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5607 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5608 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5609 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5610 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5611 BPF_EXIT_INSN(),
5612 },
5613 .fixup_map2 = { 3 },
5614 .errstr_unpriv = "R0 leaks addr",
5615 .result = ACCEPT,
5616 .result_unpriv = REJECT,
5617 },
5618 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005619 "map element value or null is marked on register spilling",
5620 .insns = {
5621 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5622 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5623 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5624 BPF_LD_MAP_FD(BPF_REG_1, 0),
5625 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5626 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5627 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
5628 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5629 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5630 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5631 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5632 BPF_EXIT_INSN(),
5633 },
5634 .fixup_map2 = { 3 },
5635 .errstr_unpriv = "R0 leaks addr",
5636 .result = ACCEPT,
5637 .result_unpriv = REJECT,
5638 },
5639 {
5640 "map element value store of cleared call register",
5641 .insns = {
5642 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5643 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5644 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5645 BPF_LD_MAP_FD(BPF_REG_1, 0),
5646 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5647 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5648 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
5649 BPF_EXIT_INSN(),
5650 },
5651 .fixup_map2 = { 3 },
5652 .errstr_unpriv = "R1 !read_ok",
5653 .errstr = "R1 !read_ok",
5654 .result = REJECT,
5655 .result_unpriv = REJECT,
5656 },
5657 {
5658 "map element value with unaligned store",
5659 .insns = {
5660 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5661 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5662 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5663 BPF_LD_MAP_FD(BPF_REG_1, 0),
5664 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5665 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
5666 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5667 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5668 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
5669 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
5670 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5671 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
5672 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
5673 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
5674 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
5675 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
5676 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
5677 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
5678 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
5679 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
5680 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
5681 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
5682 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
5683 BPF_EXIT_INSN(),
5684 },
5685 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005686 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005687 .result = ACCEPT,
5688 .result_unpriv = REJECT,
5689 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5690 },
5691 {
5692 "map element value with unaligned load",
5693 .insns = {
5694 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5695 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5696 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5697 BPF_LD_MAP_FD(BPF_REG_1, 0),
5698 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5699 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5700 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5701 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
5702 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5703 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5704 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
5705 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5706 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
5707 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
5708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
5709 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5710 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
5711 BPF_EXIT_INSN(),
5712 },
5713 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005714 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005715 .result = ACCEPT,
5716 .result_unpriv = REJECT,
5717 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5718 },
5719 {
5720 "map element value illegal alu op, 1",
5721 .insns = {
5722 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5723 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5724 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5725 BPF_LD_MAP_FD(BPF_REG_1, 0),
5726 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5727 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5728 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
5729 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5730 BPF_EXIT_INSN(),
5731 },
5732 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005733 .errstr = "R0 bitwise operator &= on pointer",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005734 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005735 },
5736 {
5737 "map element value illegal alu op, 2",
5738 .insns = {
5739 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5740 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5741 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5742 BPF_LD_MAP_FD(BPF_REG_1, 0),
5743 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5744 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5745 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
5746 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5747 BPF_EXIT_INSN(),
5748 },
5749 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005750 .errstr = "R0 32-bit pointer arithmetic prohibited",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005751 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005752 },
5753 {
5754 "map element value illegal alu op, 3",
5755 .insns = {
5756 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5757 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5758 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5759 BPF_LD_MAP_FD(BPF_REG_1, 0),
5760 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5761 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5762 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
5763 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5764 BPF_EXIT_INSN(),
5765 },
5766 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005767 .errstr = "R0 pointer arithmetic with /= operator",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005768 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005769 },
5770 {
5771 "map element value illegal alu op, 4",
5772 .insns = {
5773 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5774 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5775 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5776 BPF_LD_MAP_FD(BPF_REG_1, 0),
5777 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5778 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5779 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
5780 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5781 BPF_EXIT_INSN(),
5782 },
5783 .fixup_map2 = { 3 },
5784 .errstr_unpriv = "R0 pointer arithmetic prohibited",
5785 .errstr = "invalid mem access 'inv'",
5786 .result = REJECT,
5787 .result_unpriv = REJECT,
5788 },
5789 {
5790 "map element value illegal alu op, 5",
5791 .insns = {
5792 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5794 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5795 BPF_LD_MAP_FD(BPF_REG_1, 0),
5796 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5797 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5798 BPF_MOV64_IMM(BPF_REG_3, 4096),
5799 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5800 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5801 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5802 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
5803 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
5804 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5805 BPF_EXIT_INSN(),
5806 },
5807 .fixup_map2 = { 3 },
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005808 .errstr = "R0 invalid mem access 'inv'",
5809 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005810 },
5811 {
5812 "map element value is preserved across register spilling",
Gianluca Borellof0318d02017-01-09 10:19:48 -08005813 .insns = {
5814 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5816 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5817 BPF_LD_MAP_FD(BPF_REG_1, 0),
5818 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5819 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5820 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
5821 offsetof(struct test_val, foo)),
5822 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5823 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5824 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5825 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5826 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5827 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5828 BPF_EXIT_INSN(),
5829 },
5830 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005831 .errstr_unpriv = "R0 leaks addr",
Gianluca Borellof0318d02017-01-09 10:19:48 -08005832 .result = ACCEPT,
5833 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005834 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Gianluca Borellof0318d02017-01-09 10:19:48 -08005835 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08005836 {
5837 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5838 .insns = {
5839 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5840 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5841 BPF_MOV64_IMM(BPF_REG_0, 0),
5842 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5843 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5844 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5845 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5846 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5847 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5848 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5849 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5850 BPF_MOV64_IMM(BPF_REG_2, 16),
5851 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5852 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5853 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5854 BPF_MOV64_IMM(BPF_REG_4, 0),
5855 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5856 BPF_MOV64_IMM(BPF_REG_3, 0),
5857 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5858 BPF_MOV64_IMM(BPF_REG_0, 0),
5859 BPF_EXIT_INSN(),
5860 },
5861 .result = ACCEPT,
5862 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5863 },
5864 {
5865 "helper access to variable memory: stack, bitwise AND, zero included",
5866 .insns = {
5867 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5868 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5869 BPF_MOV64_IMM(BPF_REG_2, 16),
5870 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5871 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5872 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5873 BPF_MOV64_IMM(BPF_REG_3, 0),
5874 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5875 BPF_EXIT_INSN(),
5876 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08005877 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005878 .result = REJECT,
5879 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5880 },
5881 {
5882 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5883 .insns = {
5884 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5885 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5886 BPF_MOV64_IMM(BPF_REG_2, 16),
5887 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5888 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5889 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
5890 BPF_MOV64_IMM(BPF_REG_4, 0),
5891 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5892 BPF_MOV64_IMM(BPF_REG_3, 0),
5893 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5894 BPF_MOV64_IMM(BPF_REG_0, 0),
5895 BPF_EXIT_INSN(),
5896 },
5897 .errstr = "invalid stack type R1 off=-64 access_size=65",
5898 .result = REJECT,
5899 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5900 },
5901 {
5902 "helper access to variable memory: stack, JMP, correct bounds",
5903 .insns = {
5904 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5905 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5906 BPF_MOV64_IMM(BPF_REG_0, 0),
5907 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5908 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5909 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5910 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5911 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5912 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5913 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5914 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5915 BPF_MOV64_IMM(BPF_REG_2, 16),
5916 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5917 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5918 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
5919 BPF_MOV64_IMM(BPF_REG_4, 0),
5920 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5921 BPF_MOV64_IMM(BPF_REG_3, 0),
5922 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5923 BPF_MOV64_IMM(BPF_REG_0, 0),
5924 BPF_EXIT_INSN(),
5925 },
5926 .result = ACCEPT,
5927 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5928 },
5929 {
5930 "helper access to variable memory: stack, JMP (signed), correct bounds",
5931 .insns = {
5932 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5933 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5934 BPF_MOV64_IMM(BPF_REG_0, 0),
5935 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5936 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5937 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5938 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5939 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5940 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5941 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5942 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5943 BPF_MOV64_IMM(BPF_REG_2, 16),
5944 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5945 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5946 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
5947 BPF_MOV64_IMM(BPF_REG_4, 0),
5948 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5949 BPF_MOV64_IMM(BPF_REG_3, 0),
5950 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5951 BPF_MOV64_IMM(BPF_REG_0, 0),
5952 BPF_EXIT_INSN(),
5953 },
5954 .result = ACCEPT,
5955 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5956 },
5957 {
5958 "helper access to variable memory: stack, JMP, bounds + offset",
5959 .insns = {
5960 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5961 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5962 BPF_MOV64_IMM(BPF_REG_2, 16),
5963 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5964 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5965 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
5966 BPF_MOV64_IMM(BPF_REG_4, 0),
5967 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
5968 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5969 BPF_MOV64_IMM(BPF_REG_3, 0),
5970 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5971 BPF_MOV64_IMM(BPF_REG_0, 0),
5972 BPF_EXIT_INSN(),
5973 },
5974 .errstr = "invalid stack type R1 off=-64 access_size=65",
5975 .result = REJECT,
5976 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5977 },
5978 {
5979 "helper access to variable memory: stack, JMP, wrong max",
5980 .insns = {
5981 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5982 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5983 BPF_MOV64_IMM(BPF_REG_2, 16),
5984 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5985 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5986 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
5987 BPF_MOV64_IMM(BPF_REG_4, 0),
5988 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5989 BPF_MOV64_IMM(BPF_REG_3, 0),
5990 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5991 BPF_MOV64_IMM(BPF_REG_0, 0),
5992 BPF_EXIT_INSN(),
5993 },
5994 .errstr = "invalid stack type R1 off=-64 access_size=65",
5995 .result = REJECT,
5996 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5997 },
5998 {
5999 "helper access to variable memory: stack, JMP, no max check",
6000 .insns = {
6001 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6002 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6003 BPF_MOV64_IMM(BPF_REG_2, 16),
6004 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6005 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6006 BPF_MOV64_IMM(BPF_REG_4, 0),
6007 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6008 BPF_MOV64_IMM(BPF_REG_3, 0),
6009 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6010 BPF_MOV64_IMM(BPF_REG_0, 0),
6011 BPF_EXIT_INSN(),
6012 },
Edward Creef65b1842017-08-07 15:27:12 +01006013 /* because max wasn't checked, signed min is negative */
6014 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006015 .result = REJECT,
6016 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6017 },
6018 {
6019 "helper access to variable memory: stack, JMP, no min check",
6020 .insns = {
6021 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6022 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6023 BPF_MOV64_IMM(BPF_REG_2, 16),
6024 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6025 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6026 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
6027 BPF_MOV64_IMM(BPF_REG_3, 0),
6028 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6029 BPF_MOV64_IMM(BPF_REG_0, 0),
6030 BPF_EXIT_INSN(),
6031 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08006032 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006033 .result = REJECT,
6034 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6035 },
6036 {
6037 "helper access to variable memory: stack, JMP (signed), no min check",
6038 .insns = {
6039 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6040 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6041 BPF_MOV64_IMM(BPF_REG_2, 16),
6042 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6043 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6044 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
6045 BPF_MOV64_IMM(BPF_REG_3, 0),
6046 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6047 BPF_MOV64_IMM(BPF_REG_0, 0),
6048 BPF_EXIT_INSN(),
6049 },
6050 .errstr = "R2 min value is negative",
6051 .result = REJECT,
6052 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6053 },
6054 {
6055 "helper access to variable memory: map, JMP, correct bounds",
6056 .insns = {
6057 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6058 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6059 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6060 BPF_LD_MAP_FD(BPF_REG_1, 0),
6061 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6062 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6063 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6064 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6065 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6066 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6067 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6068 sizeof(struct test_val), 4),
6069 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02006070 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006071 BPF_MOV64_IMM(BPF_REG_3, 0),
6072 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6073 BPF_MOV64_IMM(BPF_REG_0, 0),
6074 BPF_EXIT_INSN(),
6075 },
6076 .fixup_map2 = { 3 },
6077 .result = ACCEPT,
6078 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6079 },
6080 {
6081 "helper access to variable memory: map, JMP, wrong max",
6082 .insns = {
6083 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6084 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6085 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6086 BPF_LD_MAP_FD(BPF_REG_1, 0),
6087 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6088 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6089 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6090 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6091 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6092 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6093 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6094 sizeof(struct test_val) + 1, 4),
6095 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02006096 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006097 BPF_MOV64_IMM(BPF_REG_3, 0),
6098 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6099 BPF_MOV64_IMM(BPF_REG_0, 0),
6100 BPF_EXIT_INSN(),
6101 },
6102 .fixup_map2 = { 3 },
6103 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
6104 .result = REJECT,
6105 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6106 },
6107 {
6108 "helper access to variable memory: map adjusted, JMP, correct bounds",
6109 .insns = {
6110 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6112 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6113 BPF_LD_MAP_FD(BPF_REG_1, 0),
6114 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6115 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6116 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
6118 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6119 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6120 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6121 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6122 sizeof(struct test_val) - 20, 4),
6123 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02006124 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006125 BPF_MOV64_IMM(BPF_REG_3, 0),
6126 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6127 BPF_MOV64_IMM(BPF_REG_0, 0),
6128 BPF_EXIT_INSN(),
6129 },
6130 .fixup_map2 = { 3 },
6131 .result = ACCEPT,
6132 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6133 },
6134 {
6135 "helper access to variable memory: map adjusted, JMP, wrong max",
6136 .insns = {
6137 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6138 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6139 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6140 BPF_LD_MAP_FD(BPF_REG_1, 0),
6141 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6142 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6143 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6144 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
6145 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6146 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6147 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6148 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6149 sizeof(struct test_val) - 19, 4),
6150 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02006151 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006152 BPF_MOV64_IMM(BPF_REG_3, 0),
6153 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6154 BPF_MOV64_IMM(BPF_REG_0, 0),
6155 BPF_EXIT_INSN(),
6156 },
6157 .fixup_map2 = { 3 },
6158 .errstr = "R1 min value is outside of the array range",
6159 .result = REJECT,
6160 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6161 },
6162 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006163 "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Edward Creef65b1842017-08-07 15:27:12 +01006164 .insns = {
6165 BPF_MOV64_IMM(BPF_REG_1, 0),
6166 BPF_MOV64_IMM(BPF_REG_2, 0),
6167 BPF_MOV64_IMM(BPF_REG_3, 0),
6168 BPF_MOV64_IMM(BPF_REG_4, 0),
6169 BPF_MOV64_IMM(BPF_REG_5, 0),
6170 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6171 BPF_EXIT_INSN(),
6172 },
6173 .result = ACCEPT,
6174 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6175 },
6176 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006177 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006178 .insns = {
6179 BPF_MOV64_IMM(BPF_REG_1, 0),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08006180 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01006181 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6182 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006183 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6184 BPF_MOV64_IMM(BPF_REG_3, 0),
6185 BPF_MOV64_IMM(BPF_REG_4, 0),
6186 BPF_MOV64_IMM(BPF_REG_5, 0),
6187 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6188 BPF_EXIT_INSN(),
6189 },
Edward Creef65b1842017-08-07 15:27:12 +01006190 .errstr = "R1 type=inv expected=fp",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006191 .result = REJECT,
6192 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6193 },
6194 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006195 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006196 .insns = {
6197 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6198 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6199 BPF_MOV64_IMM(BPF_REG_2, 0),
6200 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
6201 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
6202 BPF_MOV64_IMM(BPF_REG_3, 0),
6203 BPF_MOV64_IMM(BPF_REG_4, 0),
6204 BPF_MOV64_IMM(BPF_REG_5, 0),
6205 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6206 BPF_EXIT_INSN(),
6207 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08006208 .result = ACCEPT,
6209 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6210 },
6211 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006212 "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006213 .insns = {
6214 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6215 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6216 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6217 BPF_LD_MAP_FD(BPF_REG_1, 0),
6218 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6219 BPF_FUNC_map_lookup_elem),
6220 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6221 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6222 BPF_MOV64_IMM(BPF_REG_2, 0),
6223 BPF_MOV64_IMM(BPF_REG_3, 0),
6224 BPF_MOV64_IMM(BPF_REG_4, 0),
6225 BPF_MOV64_IMM(BPF_REG_5, 0),
6226 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6227 BPF_EXIT_INSN(),
6228 },
6229 .fixup_map1 = { 3 },
6230 .result = ACCEPT,
6231 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6232 },
6233 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006234 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006235 .insns = {
6236 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6237 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6239 BPF_LD_MAP_FD(BPF_REG_1, 0),
6240 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6241 BPF_FUNC_map_lookup_elem),
6242 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6243 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6244 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
6245 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6247 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
6248 BPF_MOV64_IMM(BPF_REG_3, 0),
6249 BPF_MOV64_IMM(BPF_REG_4, 0),
6250 BPF_MOV64_IMM(BPF_REG_5, 0),
6251 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6252 BPF_EXIT_INSN(),
6253 },
6254 .fixup_map1 = { 3 },
6255 .result = ACCEPT,
6256 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6257 },
6258 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006259 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006260 .insns = {
6261 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6262 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6263 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6264 BPF_LD_MAP_FD(BPF_REG_1, 0),
6265 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6266 BPF_FUNC_map_lookup_elem),
6267 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6268 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6269 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6270 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6271 BPF_MOV64_IMM(BPF_REG_3, 0),
6272 BPF_MOV64_IMM(BPF_REG_4, 0),
6273 BPF_MOV64_IMM(BPF_REG_5, 0),
6274 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6275 BPF_EXIT_INSN(),
6276 },
6277 .fixup_map1 = { 3 },
6278 .result = ACCEPT,
6279 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6280 },
6281 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006282 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006283 .insns = {
6284 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6285 offsetof(struct __sk_buff, data)),
6286 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6287 offsetof(struct __sk_buff, data_end)),
6288 BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
6289 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
6290 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
6291 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
6292 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
6293 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6294 BPF_MOV64_IMM(BPF_REG_3, 0),
6295 BPF_MOV64_IMM(BPF_REG_4, 0),
6296 BPF_MOV64_IMM(BPF_REG_5, 0),
6297 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6298 BPF_EXIT_INSN(),
6299 },
6300 .result = ACCEPT,
Gianluca Borello06c1c042017-01-09 10:19:49 -08006301 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006302 .retval = 0 /* csum_diff of 64-byte packet */,
Gianluca Borello06c1c042017-01-09 10:19:49 -08006303 },
6304 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006305 "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
6306 .insns = {
6307 BPF_MOV64_IMM(BPF_REG_1, 0),
6308 BPF_MOV64_IMM(BPF_REG_2, 0),
6309 BPF_MOV64_IMM(BPF_REG_3, 0),
6310 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6311 BPF_EXIT_INSN(),
6312 },
6313 .errstr = "R1 type=inv expected=fp",
6314 .result = REJECT,
6315 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6316 },
6317 {
6318 "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
6319 .insns = {
6320 BPF_MOV64_IMM(BPF_REG_1, 0),
6321 BPF_MOV64_IMM(BPF_REG_2, 1),
6322 BPF_MOV64_IMM(BPF_REG_3, 0),
6323 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6324 BPF_EXIT_INSN(),
6325 },
6326 .errstr = "R1 type=inv expected=fp",
6327 .result = REJECT,
6328 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6329 },
6330 {
6331 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6332 .insns = {
6333 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6335 BPF_MOV64_IMM(BPF_REG_2, 0),
6336 BPF_MOV64_IMM(BPF_REG_3, 0),
6337 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6338 BPF_EXIT_INSN(),
6339 },
6340 .result = ACCEPT,
6341 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6342 },
6343 {
6344 "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6345 .insns = {
6346 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6347 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6348 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6349 BPF_LD_MAP_FD(BPF_REG_1, 0),
6350 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6351 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6352 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6353 BPF_MOV64_IMM(BPF_REG_2, 0),
6354 BPF_MOV64_IMM(BPF_REG_3, 0),
6355 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6356 BPF_EXIT_INSN(),
6357 },
6358 .fixup_map1 = { 3 },
6359 .result = ACCEPT,
6360 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6361 },
6362 {
6363 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6364 .insns = {
6365 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6366 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6367 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6368 BPF_LD_MAP_FD(BPF_REG_1, 0),
6369 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6370 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6371 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6372 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6373 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6374 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6375 BPF_MOV64_IMM(BPF_REG_3, 0),
6376 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6377 BPF_EXIT_INSN(),
6378 },
6379 .fixup_map1 = { 3 },
6380 .result = ACCEPT,
6381 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6382 },
6383 {
6384 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6385 .insns = {
6386 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6387 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6389 BPF_LD_MAP_FD(BPF_REG_1, 0),
6390 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6391 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6392 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6393 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6394 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
6395 BPF_MOV64_IMM(BPF_REG_3, 0),
6396 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6397 BPF_EXIT_INSN(),
6398 },
6399 .fixup_map1 = { 3 },
6400 .result = ACCEPT,
6401 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6402 },
6403 {
Gianluca Borello06c1c042017-01-09 10:19:49 -08006404 "helper access to variable memory: 8 bytes leak",
6405 .insns = {
6406 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6407 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6408 BPF_MOV64_IMM(BPF_REG_0, 0),
6409 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6410 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6411 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6412 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6413 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6414 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6415 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08006416 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01006417 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6418 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006419 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
6420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
6421 BPF_MOV64_IMM(BPF_REG_3, 0),
6422 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6423 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6424 BPF_EXIT_INSN(),
6425 },
6426 .errstr = "invalid indirect read from stack off -64+32 size 64",
6427 .result = REJECT,
6428 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6429 },
6430 {
6431 "helper access to variable memory: 8 bytes no leak (init memory)",
6432 .insns = {
6433 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6434 BPF_MOV64_IMM(BPF_REG_0, 0),
6435 BPF_MOV64_IMM(BPF_REG_0, 0),
6436 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6437 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6438 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6439 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6440 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6441 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6442 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6443 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6444 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6445 BPF_MOV64_IMM(BPF_REG_2, 0),
6446 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
6447 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
6448 BPF_MOV64_IMM(BPF_REG_3, 0),
6449 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6450 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6451 BPF_EXIT_INSN(),
6452 },
6453 .result = ACCEPT,
6454 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6455 },
Josef Bacik29200c12017-02-03 16:25:23 -05006456 {
6457 "invalid and of negative number",
6458 .insns = {
6459 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6460 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6462 BPF_LD_MAP_FD(BPF_REG_1, 0),
6463 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6464 BPF_FUNC_map_lookup_elem),
6465 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Edward Creef65b1842017-08-07 15:27:12 +01006466 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik29200c12017-02-03 16:25:23 -05006467 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
6468 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
6469 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6470 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
6471 offsetof(struct test_val, foo)),
6472 BPF_EXIT_INSN(),
6473 },
6474 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006475 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05006476 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006477 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik29200c12017-02-03 16:25:23 -05006478 },
6479 {
6480 "invalid range check",
6481 .insns = {
6482 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6483 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6484 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6485 BPF_LD_MAP_FD(BPF_REG_1, 0),
6486 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6487 BPF_FUNC_map_lookup_elem),
6488 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
6489 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
6490 BPF_MOV64_IMM(BPF_REG_9, 1),
6491 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
6492 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
6493 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
6494 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
6495 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
6496 BPF_MOV32_IMM(BPF_REG_3, 1),
6497 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
6498 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
6499 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
6500 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
6501 BPF_MOV64_REG(BPF_REG_0, 0),
6502 BPF_EXIT_INSN(),
6503 },
6504 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006505 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05006506 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006507 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006508 },
6509 {
6510 "map in map access",
6511 .insns = {
6512 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6513 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6514 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6515 BPF_LD_MAP_FD(BPF_REG_1, 0),
6516 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6517 BPF_FUNC_map_lookup_elem),
6518 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6519 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6520 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6522 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6523 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6524 BPF_FUNC_map_lookup_elem),
6525 BPF_MOV64_REG(BPF_REG_0, 0),
6526 BPF_EXIT_INSN(),
6527 },
6528 .fixup_map_in_map = { 3 },
6529 .result = ACCEPT,
6530 },
6531 {
6532 "invalid inner map pointer",
6533 .insns = {
6534 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6535 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6536 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6537 BPF_LD_MAP_FD(BPF_REG_1, 0),
6538 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6539 BPF_FUNC_map_lookup_elem),
6540 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6541 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6542 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6543 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6544 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6546 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6547 BPF_FUNC_map_lookup_elem),
6548 BPF_MOV64_REG(BPF_REG_0, 0),
6549 BPF_EXIT_INSN(),
6550 },
6551 .fixup_map_in_map = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08006552 .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006553 .result = REJECT,
6554 },
6555 {
6556 "forgot null checking on the inner map pointer",
6557 .insns = {
6558 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6559 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6560 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6561 BPF_LD_MAP_FD(BPF_REG_1, 0),
6562 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6563 BPF_FUNC_map_lookup_elem),
6564 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6565 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6566 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6567 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6568 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6569 BPF_FUNC_map_lookup_elem),
6570 BPF_MOV64_REG(BPF_REG_0, 0),
6571 BPF_EXIT_INSN(),
6572 },
6573 .fixup_map_in_map = { 3 },
6574 .errstr = "R1 type=map_value_or_null expected=map_ptr",
6575 .result = REJECT,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006576 },
6577 {
6578 "ld_abs: check calling conv, r1",
6579 .insns = {
6580 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6581 BPF_MOV64_IMM(BPF_REG_1, 0),
6582 BPF_LD_ABS(BPF_W, -0x200000),
6583 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
6584 BPF_EXIT_INSN(),
6585 },
6586 .errstr = "R1 !read_ok",
6587 .result = REJECT,
6588 },
6589 {
6590 "ld_abs: check calling conv, r2",
6591 .insns = {
6592 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6593 BPF_MOV64_IMM(BPF_REG_2, 0),
6594 BPF_LD_ABS(BPF_W, -0x200000),
6595 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6596 BPF_EXIT_INSN(),
6597 },
6598 .errstr = "R2 !read_ok",
6599 .result = REJECT,
6600 },
6601 {
6602 "ld_abs: check calling conv, r3",
6603 .insns = {
6604 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6605 BPF_MOV64_IMM(BPF_REG_3, 0),
6606 BPF_LD_ABS(BPF_W, -0x200000),
6607 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6608 BPF_EXIT_INSN(),
6609 },
6610 .errstr = "R3 !read_ok",
6611 .result = REJECT,
6612 },
6613 {
6614 "ld_abs: check calling conv, r4",
6615 .insns = {
6616 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6617 BPF_MOV64_IMM(BPF_REG_4, 0),
6618 BPF_LD_ABS(BPF_W, -0x200000),
6619 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6620 BPF_EXIT_INSN(),
6621 },
6622 .errstr = "R4 !read_ok",
6623 .result = REJECT,
6624 },
6625 {
6626 "ld_abs: check calling conv, r5",
6627 .insns = {
6628 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6629 BPF_MOV64_IMM(BPF_REG_5, 0),
6630 BPF_LD_ABS(BPF_W, -0x200000),
6631 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6632 BPF_EXIT_INSN(),
6633 },
6634 .errstr = "R5 !read_ok",
6635 .result = REJECT,
6636 },
6637 {
6638 "ld_abs: check calling conv, r7",
6639 .insns = {
6640 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6641 BPF_MOV64_IMM(BPF_REG_7, 0),
6642 BPF_LD_ABS(BPF_W, -0x200000),
6643 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6644 BPF_EXIT_INSN(),
6645 },
6646 .result = ACCEPT,
6647 },
6648 {
Daniel Borkmann87ab8192017-12-14 21:07:27 +01006649 "ld_abs: tests on r6 and skb data reload helper",
6650 .insns = {
6651 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6652 BPF_LD_ABS(BPF_B, 0),
6653 BPF_LD_ABS(BPF_H, 0),
6654 BPF_LD_ABS(BPF_W, 0),
6655 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
6656 BPF_MOV64_IMM(BPF_REG_6, 0),
6657 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
6658 BPF_MOV64_IMM(BPF_REG_2, 1),
6659 BPF_MOV64_IMM(BPF_REG_3, 2),
6660 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6661 BPF_FUNC_skb_vlan_push),
6662 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
6663 BPF_LD_ABS(BPF_B, 0),
6664 BPF_LD_ABS(BPF_H, 0),
6665 BPF_LD_ABS(BPF_W, 0),
6666 BPF_MOV64_IMM(BPF_REG_0, 42),
6667 BPF_EXIT_INSN(),
6668 },
6669 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6670 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006671 .retval = 42 /* ultimate return value */,
Daniel Borkmann87ab8192017-12-14 21:07:27 +01006672 },
6673 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006674 "ld_ind: check calling conv, r1",
6675 .insns = {
6676 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6677 BPF_MOV64_IMM(BPF_REG_1, 1),
6678 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
6679 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
6680 BPF_EXIT_INSN(),
6681 },
6682 .errstr = "R1 !read_ok",
6683 .result = REJECT,
6684 },
6685 {
6686 "ld_ind: check calling conv, r2",
6687 .insns = {
6688 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6689 BPF_MOV64_IMM(BPF_REG_2, 1),
6690 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
6691 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6692 BPF_EXIT_INSN(),
6693 },
6694 .errstr = "R2 !read_ok",
6695 .result = REJECT,
6696 },
6697 {
6698 "ld_ind: check calling conv, r3",
6699 .insns = {
6700 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6701 BPF_MOV64_IMM(BPF_REG_3, 1),
6702 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
6703 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6704 BPF_EXIT_INSN(),
6705 },
6706 .errstr = "R3 !read_ok",
6707 .result = REJECT,
6708 },
6709 {
6710 "ld_ind: check calling conv, r4",
6711 .insns = {
6712 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6713 BPF_MOV64_IMM(BPF_REG_4, 1),
6714 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
6715 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6716 BPF_EXIT_INSN(),
6717 },
6718 .errstr = "R4 !read_ok",
6719 .result = REJECT,
6720 },
6721 {
6722 "ld_ind: check calling conv, r5",
6723 .insns = {
6724 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6725 BPF_MOV64_IMM(BPF_REG_5, 1),
6726 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
6727 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6728 BPF_EXIT_INSN(),
6729 },
6730 .errstr = "R5 !read_ok",
6731 .result = REJECT,
6732 },
6733 {
6734 "ld_ind: check calling conv, r7",
6735 .insns = {
6736 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6737 BPF_MOV64_IMM(BPF_REG_7, 1),
6738 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
6739 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6740 BPF_EXIT_INSN(),
6741 },
6742 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006743 .retval = 1,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006744 },
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006745 {
6746 "check bpf_perf_event_data->sample_period byte load permitted",
6747 .insns = {
6748 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006749#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006750 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6751 offsetof(struct bpf_perf_event_data, sample_period)),
6752#else
6753 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6754 offsetof(struct bpf_perf_event_data, sample_period) + 7),
6755#endif
6756 BPF_EXIT_INSN(),
6757 },
6758 .result = ACCEPT,
6759 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6760 },
6761 {
6762 "check bpf_perf_event_data->sample_period half load permitted",
6763 .insns = {
6764 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006765#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006766 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6767 offsetof(struct bpf_perf_event_data, sample_period)),
6768#else
6769 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6770 offsetof(struct bpf_perf_event_data, sample_period) + 6),
6771#endif
6772 BPF_EXIT_INSN(),
6773 },
6774 .result = ACCEPT,
6775 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6776 },
6777 {
6778 "check bpf_perf_event_data->sample_period word load permitted",
6779 .insns = {
6780 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006781#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006782 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6783 offsetof(struct bpf_perf_event_data, sample_period)),
6784#else
6785 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6786 offsetof(struct bpf_perf_event_data, sample_period) + 4),
6787#endif
6788 BPF_EXIT_INSN(),
6789 },
6790 .result = ACCEPT,
6791 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6792 },
6793 {
6794 "check bpf_perf_event_data->sample_period dword load permitted",
6795 .insns = {
6796 BPF_MOV64_IMM(BPF_REG_0, 0),
6797 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
6798 offsetof(struct bpf_perf_event_data, sample_period)),
6799 BPF_EXIT_INSN(),
6800 },
6801 .result = ACCEPT,
6802 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6803 },
6804 {
6805 "check skb->data half load not permitted",
6806 .insns = {
6807 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006808#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006809 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6810 offsetof(struct __sk_buff, data)),
6811#else
6812 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6813 offsetof(struct __sk_buff, data) + 2),
6814#endif
6815 BPF_EXIT_INSN(),
6816 },
6817 .result = REJECT,
6818 .errstr = "invalid bpf_context access",
6819 },
6820 {
6821 "check skb->tc_classid half load not permitted for lwt prog",
6822 .insns = {
6823 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006824#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006825 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6826 offsetof(struct __sk_buff, tc_classid)),
6827#else
6828 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6829 offsetof(struct __sk_buff, tc_classid) + 2),
6830#endif
6831 BPF_EXIT_INSN(),
6832 },
6833 .result = REJECT,
6834 .errstr = "invalid bpf_context access",
6835 .prog_type = BPF_PROG_TYPE_LWT_IN,
6836 },
Edward Creeb7122962017-07-21 00:00:24 +02006837 {
6838 "bounds checks mixing signed and unsigned, positive bounds",
6839 .insns = {
6840 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6841 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6842 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6843 BPF_LD_MAP_FD(BPF_REG_1, 0),
6844 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6845 BPF_FUNC_map_lookup_elem),
6846 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6847 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6848 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6849 BPF_MOV64_IMM(BPF_REG_2, 2),
6850 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
6851 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
6852 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6853 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6854 BPF_MOV64_IMM(BPF_REG_0, 0),
6855 BPF_EXIT_INSN(),
6856 },
6857 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006858 .errstr = "unbounded min value",
Edward Creeb7122962017-07-21 00:00:24 +02006859 .result = REJECT,
Edward Creeb7122962017-07-21 00:00:24 +02006860 },
6861 {
6862 "bounds checks mixing signed and unsigned",
6863 .insns = {
6864 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6865 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6866 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6867 BPF_LD_MAP_FD(BPF_REG_1, 0),
6868 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6869 BPF_FUNC_map_lookup_elem),
6870 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6871 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6872 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6873 BPF_MOV64_IMM(BPF_REG_2, -1),
6874 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6875 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6876 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6877 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6878 BPF_MOV64_IMM(BPF_REG_0, 0),
6879 BPF_EXIT_INSN(),
6880 },
6881 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006882 .errstr = "unbounded min value",
Edward Creeb7122962017-07-21 00:00:24 +02006883 .result = REJECT,
Edward Creeb7122962017-07-21 00:00:24 +02006884 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006885 {
6886 "bounds checks mixing signed and unsigned, variant 2",
6887 .insns = {
6888 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6889 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6890 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6891 BPF_LD_MAP_FD(BPF_REG_1, 0),
6892 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6893 BPF_FUNC_map_lookup_elem),
6894 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6895 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6896 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6897 BPF_MOV64_IMM(BPF_REG_2, -1),
6898 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6899 BPF_MOV64_IMM(BPF_REG_8, 0),
6900 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
6901 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6902 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6903 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6904 BPF_MOV64_IMM(BPF_REG_0, 0),
6905 BPF_EXIT_INSN(),
6906 },
6907 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006908 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006909 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006910 },
6911 {
6912 "bounds checks mixing signed and unsigned, variant 3",
6913 .insns = {
6914 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6915 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6916 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6917 BPF_LD_MAP_FD(BPF_REG_1, 0),
6918 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6919 BPF_FUNC_map_lookup_elem),
6920 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6921 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6922 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6923 BPF_MOV64_IMM(BPF_REG_2, -1),
6924 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
6925 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
6926 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6927 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6928 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6929 BPF_MOV64_IMM(BPF_REG_0, 0),
6930 BPF_EXIT_INSN(),
6931 },
6932 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006933 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006934 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006935 },
6936 {
6937 "bounds checks mixing signed and unsigned, variant 4",
6938 .insns = {
6939 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6940 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6941 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6942 BPF_LD_MAP_FD(BPF_REG_1, 0),
6943 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6944 BPF_FUNC_map_lookup_elem),
6945 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6946 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6947 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6948 BPF_MOV64_IMM(BPF_REG_2, 1),
6949 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
6950 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6951 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6952 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6953 BPF_MOV64_IMM(BPF_REG_0, 0),
6954 BPF_EXIT_INSN(),
6955 },
6956 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006957 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006958 },
6959 {
6960 "bounds checks mixing signed and unsigned, variant 5",
6961 .insns = {
6962 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6963 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6964 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6965 BPF_LD_MAP_FD(BPF_REG_1, 0),
6966 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6967 BPF_FUNC_map_lookup_elem),
6968 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6969 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6970 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6971 BPF_MOV64_IMM(BPF_REG_2, -1),
6972 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6973 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
6974 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
6975 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6976 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6977 BPF_MOV64_IMM(BPF_REG_0, 0),
6978 BPF_EXIT_INSN(),
6979 },
6980 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006981 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006982 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006983 },
6984 {
6985 "bounds checks mixing signed and unsigned, variant 6",
6986 .insns = {
6987 BPF_MOV64_IMM(BPF_REG_2, 0),
6988 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
6989 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
6990 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6991 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
6992 BPF_MOV64_IMM(BPF_REG_6, -1),
6993 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
6994 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
6995 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
6996 BPF_MOV64_IMM(BPF_REG_5, 0),
6997 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
6998 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6999 BPF_FUNC_skb_load_bytes),
7000 BPF_MOV64_IMM(BPF_REG_0, 0),
7001 BPF_EXIT_INSN(),
7002 },
Daniel Borkmann86412502017-07-21 00:00:25 +02007003 .errstr = "R4 min value is negative, either use unsigned",
7004 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007005 },
7006 {
7007 "bounds checks mixing signed and unsigned, variant 7",
7008 .insns = {
7009 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7010 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7012 BPF_LD_MAP_FD(BPF_REG_1, 0),
7013 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7014 BPF_FUNC_map_lookup_elem),
7015 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7016 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7017 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7018 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
7019 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
7020 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7021 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7022 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7023 BPF_MOV64_IMM(BPF_REG_0, 0),
7024 BPF_EXIT_INSN(),
7025 },
7026 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01007027 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007028 },
7029 {
7030 "bounds checks mixing signed and unsigned, variant 8",
7031 .insns = {
7032 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7033 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7034 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7035 BPF_LD_MAP_FD(BPF_REG_1, 0),
7036 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7037 BPF_FUNC_map_lookup_elem),
Daniel Borkmann86412502017-07-21 00:00:25 +02007038 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7039 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7040 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7041 BPF_MOV64_IMM(BPF_REG_2, -1),
7042 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7043 BPF_MOV64_IMM(BPF_REG_0, 0),
7044 BPF_EXIT_INSN(),
7045 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7046 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7047 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7048 BPF_MOV64_IMM(BPF_REG_0, 0),
7049 BPF_EXIT_INSN(),
7050 },
7051 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007052 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007053 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007054 },
7055 {
Edward Creef65b1842017-08-07 15:27:12 +01007056 "bounds checks mixing signed and unsigned, variant 9",
Daniel Borkmann86412502017-07-21 00:00:25 +02007057 .insns = {
7058 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7059 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7060 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7061 BPF_LD_MAP_FD(BPF_REG_1, 0),
7062 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7063 BPF_FUNC_map_lookup_elem),
7064 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7065 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7066 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7067 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
7068 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7069 BPF_MOV64_IMM(BPF_REG_0, 0),
7070 BPF_EXIT_INSN(),
7071 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7072 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7073 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7074 BPF_MOV64_IMM(BPF_REG_0, 0),
7075 BPF_EXIT_INSN(),
7076 },
7077 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01007078 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007079 },
7080 {
Edward Creef65b1842017-08-07 15:27:12 +01007081 "bounds checks mixing signed and unsigned, variant 10",
Daniel Borkmann86412502017-07-21 00:00:25 +02007082 .insns = {
7083 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7084 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7086 BPF_LD_MAP_FD(BPF_REG_1, 0),
7087 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7088 BPF_FUNC_map_lookup_elem),
7089 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7090 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7091 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7092 BPF_MOV64_IMM(BPF_REG_2, 0),
7093 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7094 BPF_MOV64_IMM(BPF_REG_0, 0),
7095 BPF_EXIT_INSN(),
7096 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7097 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7098 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7099 BPF_MOV64_IMM(BPF_REG_0, 0),
7100 BPF_EXIT_INSN(),
7101 },
7102 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007103 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007104 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007105 },
7106 {
Edward Creef65b1842017-08-07 15:27:12 +01007107 "bounds checks mixing signed and unsigned, variant 11",
Daniel Borkmann86412502017-07-21 00:00:25 +02007108 .insns = {
7109 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7110 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7112 BPF_LD_MAP_FD(BPF_REG_1, 0),
7113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7114 BPF_FUNC_map_lookup_elem),
7115 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7116 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7117 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7118 BPF_MOV64_IMM(BPF_REG_2, -1),
7119 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7120 /* Dead branch. */
7121 BPF_MOV64_IMM(BPF_REG_0, 0),
7122 BPF_EXIT_INSN(),
7123 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7124 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7125 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7126 BPF_MOV64_IMM(BPF_REG_0, 0),
7127 BPF_EXIT_INSN(),
7128 },
7129 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007130 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007131 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007132 },
7133 {
Edward Creef65b1842017-08-07 15:27:12 +01007134 "bounds checks mixing signed and unsigned, variant 12",
Daniel Borkmann86412502017-07-21 00:00:25 +02007135 .insns = {
7136 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7137 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7138 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7139 BPF_LD_MAP_FD(BPF_REG_1, 0),
7140 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7141 BPF_FUNC_map_lookup_elem),
7142 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7143 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7144 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7145 BPF_MOV64_IMM(BPF_REG_2, -6),
7146 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7147 BPF_MOV64_IMM(BPF_REG_0, 0),
7148 BPF_EXIT_INSN(),
7149 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7150 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7151 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7152 BPF_MOV64_IMM(BPF_REG_0, 0),
7153 BPF_EXIT_INSN(),
7154 },
7155 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007156 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007157 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007158 },
7159 {
Edward Creef65b1842017-08-07 15:27:12 +01007160 "bounds checks mixing signed and unsigned, variant 13",
Daniel Borkmann86412502017-07-21 00:00:25 +02007161 .insns = {
7162 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7163 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7164 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7165 BPF_LD_MAP_FD(BPF_REG_1, 0),
7166 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7167 BPF_FUNC_map_lookup_elem),
7168 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7169 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7170 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7171 BPF_MOV64_IMM(BPF_REG_2, 2),
7172 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7173 BPF_MOV64_IMM(BPF_REG_7, 1),
7174 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
7175 BPF_MOV64_IMM(BPF_REG_0, 0),
7176 BPF_EXIT_INSN(),
7177 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
7178 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
7179 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
7180 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7181 BPF_MOV64_IMM(BPF_REG_0, 0),
7182 BPF_EXIT_INSN(),
7183 },
7184 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007185 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007186 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007187 },
7188 {
Edward Creef65b1842017-08-07 15:27:12 +01007189 "bounds checks mixing signed and unsigned, variant 14",
Daniel Borkmann86412502017-07-21 00:00:25 +02007190 .insns = {
7191 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
7192 offsetof(struct __sk_buff, mark)),
7193 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7194 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7195 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7196 BPF_LD_MAP_FD(BPF_REG_1, 0),
7197 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7198 BPF_FUNC_map_lookup_elem),
7199 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7200 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7201 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7202 BPF_MOV64_IMM(BPF_REG_2, -1),
7203 BPF_MOV64_IMM(BPF_REG_8, 2),
7204 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
7205 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
7206 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7207 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7208 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7209 BPF_MOV64_IMM(BPF_REG_0, 0),
7210 BPF_EXIT_INSN(),
7211 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
7212 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
7213 },
7214 .fixup_map1 = { 4 },
Daniel Borkmann6f161012018-01-18 01:15:21 +01007215 .errstr = "R0 invalid mem access 'inv'",
Daniel Borkmann86412502017-07-21 00:00:25 +02007216 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007217 },
7218 {
Edward Creef65b1842017-08-07 15:27:12 +01007219 "bounds checks mixing signed and unsigned, variant 15",
Daniel Borkmann86412502017-07-21 00:00:25 +02007220 .insns = {
7221 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7222 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7223 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7224 BPF_LD_MAP_FD(BPF_REG_1, 0),
7225 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7226 BPF_FUNC_map_lookup_elem),
7227 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7228 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7229 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7230 BPF_MOV64_IMM(BPF_REG_2, -6),
7231 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7232 BPF_MOV64_IMM(BPF_REG_0, 0),
7233 BPF_EXIT_INSN(),
7234 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7235 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
7236 BPF_MOV64_IMM(BPF_REG_0, 0),
7237 BPF_EXIT_INSN(),
7238 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7239 BPF_MOV64_IMM(BPF_REG_0, 0),
7240 BPF_EXIT_INSN(),
7241 },
7242 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007243 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007244 .result = REJECT,
7245 .result_unpriv = REJECT,
7246 },
Edward Cree545722c2017-07-21 14:36:57 +01007247 {
Edward Creef65b1842017-08-07 15:27:12 +01007248 "subtraction bounds (map value) variant 1",
Edward Cree545722c2017-07-21 14:36:57 +01007249 .insns = {
7250 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7251 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7252 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7253 BPF_LD_MAP_FD(BPF_REG_1, 0),
7254 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7255 BPF_FUNC_map_lookup_elem),
7256 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7257 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7258 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
7259 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
7260 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
7261 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
7262 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
7263 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7264 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7265 BPF_EXIT_INSN(),
7266 BPF_MOV64_IMM(BPF_REG_0, 0),
7267 BPF_EXIT_INSN(),
7268 },
7269 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01007270 .errstr = "R0 max value is outside of the array range",
7271 .result = REJECT,
7272 },
7273 {
7274 "subtraction bounds (map value) variant 2",
7275 .insns = {
7276 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7277 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7278 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7279 BPF_LD_MAP_FD(BPF_REG_1, 0),
7280 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7281 BPF_FUNC_map_lookup_elem),
7282 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7283 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7284 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
7285 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
7286 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
7287 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
7288 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7289 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7290 BPF_EXIT_INSN(),
7291 BPF_MOV64_IMM(BPF_REG_0, 0),
7292 BPF_EXIT_INSN(),
7293 },
7294 .fixup_map1 = { 3 },
Edward Cree545722c2017-07-21 14:36:57 +01007295 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
7296 .result = REJECT,
Edward Cree545722c2017-07-21 14:36:57 +01007297 },
Edward Cree69c4e8a2017-08-07 15:29:51 +01007298 {
Jann Horn2255f8d2017-12-18 20:12:01 -08007299 "bounds check based on zero-extended MOV",
7300 .insns = {
7301 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7302 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7303 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7304 BPF_LD_MAP_FD(BPF_REG_1, 0),
7305 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7306 BPF_FUNC_map_lookup_elem),
7307 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7308 /* r2 = 0x0000'0000'ffff'ffff */
7309 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
7310 /* r2 = 0 */
7311 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
7312 /* no-op */
7313 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7314 /* access at offset 0 */
7315 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7316 /* exit */
7317 BPF_MOV64_IMM(BPF_REG_0, 0),
7318 BPF_EXIT_INSN(),
7319 },
7320 .fixup_map1 = { 3 },
7321 .result = ACCEPT
7322 },
7323 {
7324 "bounds check based on sign-extended MOV. test1",
7325 .insns = {
7326 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7327 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7328 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7329 BPF_LD_MAP_FD(BPF_REG_1, 0),
7330 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7331 BPF_FUNC_map_lookup_elem),
7332 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7333 /* r2 = 0xffff'ffff'ffff'ffff */
7334 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
7335 /* r2 = 0xffff'ffff */
7336 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
7337 /* r0 = <oob pointer> */
7338 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7339 /* access to OOB pointer */
7340 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7341 /* exit */
7342 BPF_MOV64_IMM(BPF_REG_0, 0),
7343 BPF_EXIT_INSN(),
7344 },
7345 .fixup_map1 = { 3 },
7346 .errstr = "map_value pointer and 4294967295",
7347 .result = REJECT
7348 },
7349 {
7350 "bounds check based on sign-extended MOV. test2",
7351 .insns = {
7352 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7353 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7354 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7355 BPF_LD_MAP_FD(BPF_REG_1, 0),
7356 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7357 BPF_FUNC_map_lookup_elem),
7358 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7359 /* r2 = 0xffff'ffff'ffff'ffff */
7360 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
7361 /* r2 = 0xfff'ffff */
7362 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
7363 /* r0 = <oob pointer> */
7364 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7365 /* access to OOB pointer */
7366 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7367 /* exit */
7368 BPF_MOV64_IMM(BPF_REG_0, 0),
7369 BPF_EXIT_INSN(),
7370 },
7371 .fixup_map1 = { 3 },
7372 .errstr = "R0 min value is outside of the array range",
7373 .result = REJECT
7374 },
7375 {
7376 "bounds check based on reg_off + var_off + insn_off. test1",
7377 .insns = {
7378 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7379 offsetof(struct __sk_buff, mark)),
7380 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7381 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7382 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7383 BPF_LD_MAP_FD(BPF_REG_1, 0),
7384 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7385 BPF_FUNC_map_lookup_elem),
7386 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7387 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
7388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
7389 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
7390 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
7391 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
7392 BPF_MOV64_IMM(BPF_REG_0, 0),
7393 BPF_EXIT_INSN(),
7394 },
7395 .fixup_map1 = { 4 },
7396 .errstr = "value_size=8 off=1073741825",
7397 .result = REJECT,
7398 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7399 },
7400 {
7401 "bounds check based on reg_off + var_off + insn_off. test2",
7402 .insns = {
7403 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7404 offsetof(struct __sk_buff, mark)),
7405 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7406 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7407 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7408 BPF_LD_MAP_FD(BPF_REG_1, 0),
7409 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7410 BPF_FUNC_map_lookup_elem),
7411 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7412 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
7413 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
7414 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
7415 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
7416 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
7417 BPF_MOV64_IMM(BPF_REG_0, 0),
7418 BPF_EXIT_INSN(),
7419 },
7420 .fixup_map1 = { 4 },
7421 .errstr = "value 1073741823",
7422 .result = REJECT,
7423 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7424 },
7425 {
7426 "bounds check after truncation of non-boundary-crossing range",
7427 .insns = {
7428 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7429 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7430 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7431 BPF_LD_MAP_FD(BPF_REG_1, 0),
7432 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7433 BPF_FUNC_map_lookup_elem),
7434 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7435 /* r1 = [0x00, 0xff] */
7436 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7437 BPF_MOV64_IMM(BPF_REG_2, 1),
7438 /* r2 = 0x10'0000'0000 */
7439 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
7440 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
7441 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
7442 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
7443 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7444 /* r1 = [0x00, 0xff] */
7445 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
7446 /* r1 = 0 */
7447 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7448 /* no-op */
7449 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7450 /* access at offset 0 */
7451 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7452 /* exit */
7453 BPF_MOV64_IMM(BPF_REG_0, 0),
7454 BPF_EXIT_INSN(),
7455 },
7456 .fixup_map1 = { 3 },
7457 .result = ACCEPT
7458 },
7459 {
7460 "bounds check after truncation of boundary-crossing range (1)",
7461 .insns = {
7462 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7463 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7464 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7465 BPF_LD_MAP_FD(BPF_REG_1, 0),
7466 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7467 BPF_FUNC_map_lookup_elem),
7468 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7469 /* r1 = [0x00, 0xff] */
7470 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7471 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7472 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
7473 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7474 /* r1 = [0xffff'ff80, 0xffff'ffff] or
7475 * [0x0000'0000, 0x0000'007f]
7476 */
7477 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
7478 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7479 /* r1 = [0x00, 0xff] or
7480 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7481 */
7482 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7483 /* r1 = 0 or
7484 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7485 */
7486 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7487 /* no-op or OOB pointer computation */
7488 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7489 /* potentially OOB access */
7490 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7491 /* exit */
7492 BPF_MOV64_IMM(BPF_REG_0, 0),
7493 BPF_EXIT_INSN(),
7494 },
7495 .fixup_map1 = { 3 },
7496 /* not actually fully unbounded, but the bound is very high */
7497 .errstr = "R0 unbounded memory access",
7498 .result = REJECT
7499 },
7500 {
7501 "bounds check after truncation of boundary-crossing range (2)",
7502 .insns = {
7503 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7504 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7505 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7506 BPF_LD_MAP_FD(BPF_REG_1, 0),
7507 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7508 BPF_FUNC_map_lookup_elem),
7509 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7510 /* r1 = [0x00, 0xff] */
7511 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7512 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7513 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
7514 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7515 /* r1 = [0xffff'ff80, 0xffff'ffff] or
7516 * [0x0000'0000, 0x0000'007f]
7517 * difference to previous test: truncation via MOV32
7518 * instead of ALU32.
7519 */
7520 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
7521 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7522 /* r1 = [0x00, 0xff] or
7523 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7524 */
7525 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7526 /* r1 = 0 or
7527 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7528 */
7529 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7530 /* no-op or OOB pointer computation */
7531 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7532 /* potentially OOB access */
7533 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7534 /* exit */
7535 BPF_MOV64_IMM(BPF_REG_0, 0),
7536 BPF_EXIT_INSN(),
7537 },
7538 .fixup_map1 = { 3 },
7539 /* not actually fully unbounded, but the bound is very high */
7540 .errstr = "R0 unbounded memory access",
7541 .result = REJECT
7542 },
7543 {
7544 "bounds check after wrapping 32-bit addition",
7545 .insns = {
7546 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7547 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7548 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7549 BPF_LD_MAP_FD(BPF_REG_1, 0),
7550 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7551 BPF_FUNC_map_lookup_elem),
7552 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7553 /* r1 = 0x7fff'ffff */
7554 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
7555 /* r1 = 0xffff'fffe */
7556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7557 /* r1 = 0 */
7558 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
7559 /* no-op */
7560 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7561 /* access at offset 0 */
7562 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7563 /* exit */
7564 BPF_MOV64_IMM(BPF_REG_0, 0),
7565 BPF_EXIT_INSN(),
7566 },
7567 .fixup_map1 = { 3 },
7568 .result = ACCEPT
7569 },
7570 {
7571 "bounds check after shift with oversized count operand",
7572 .insns = {
7573 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7574 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7575 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7576 BPF_LD_MAP_FD(BPF_REG_1, 0),
7577 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7578 BPF_FUNC_map_lookup_elem),
7579 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7580 BPF_MOV64_IMM(BPF_REG_2, 32),
7581 BPF_MOV64_IMM(BPF_REG_1, 1),
7582 /* r1 = (u32)1 << (u32)32 = ? */
7583 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
7584 /* r1 = [0x0000, 0xffff] */
7585 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
7586 /* computes unknown pointer, potentially OOB */
7587 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7588 /* potentially OOB access */
7589 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7590 /* exit */
7591 BPF_MOV64_IMM(BPF_REG_0, 0),
7592 BPF_EXIT_INSN(),
7593 },
7594 .fixup_map1 = { 3 },
7595 .errstr = "R0 max value is outside of the array range",
7596 .result = REJECT
7597 },
7598 {
7599 "bounds check after right shift of maybe-negative number",
7600 .insns = {
7601 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7602 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7603 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7604 BPF_LD_MAP_FD(BPF_REG_1, 0),
7605 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7606 BPF_FUNC_map_lookup_elem),
7607 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7608 /* r1 = [0x00, 0xff] */
7609 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7610 /* r1 = [-0x01, 0xfe] */
7611 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
7612 /* r1 = 0 or 0xff'ffff'ffff'ffff */
7613 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7614 /* r1 = 0 or 0xffff'ffff'ffff */
7615 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7616 /* computes unknown pointer, potentially OOB */
7617 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7618 /* potentially OOB access */
7619 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7620 /* exit */
7621 BPF_MOV64_IMM(BPF_REG_0, 0),
7622 BPF_EXIT_INSN(),
7623 },
7624 .fixup_map1 = { 3 },
7625 .errstr = "R0 unbounded memory access",
7626 .result = REJECT
7627 },
7628 {
7629 "bounds check map access with off+size signed 32bit overflow. test1",
7630 .insns = {
7631 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7632 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7633 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7634 BPF_LD_MAP_FD(BPF_REG_1, 0),
7635 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7636 BPF_FUNC_map_lookup_elem),
7637 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7638 BPF_EXIT_INSN(),
7639 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
7640 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7641 BPF_JMP_A(0),
7642 BPF_EXIT_INSN(),
7643 },
7644 .fixup_map1 = { 3 },
7645 .errstr = "map_value pointer and 2147483646",
7646 .result = REJECT
7647 },
7648 {
7649 "bounds check map access with off+size signed 32bit overflow. test2",
7650 .insns = {
7651 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7652 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7653 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7654 BPF_LD_MAP_FD(BPF_REG_1, 0),
7655 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7656 BPF_FUNC_map_lookup_elem),
7657 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7658 BPF_EXIT_INSN(),
7659 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7661 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7662 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7663 BPF_JMP_A(0),
7664 BPF_EXIT_INSN(),
7665 },
7666 .fixup_map1 = { 3 },
7667 .errstr = "pointer offset 1073741822",
7668 .result = REJECT
7669 },
7670 {
7671 "bounds check map access with off+size signed 32bit overflow. test3",
7672 .insns = {
7673 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7674 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7675 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7676 BPF_LD_MAP_FD(BPF_REG_1, 0),
7677 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7678 BPF_FUNC_map_lookup_elem),
7679 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7680 BPF_EXIT_INSN(),
7681 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7682 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7683 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7684 BPF_JMP_A(0),
7685 BPF_EXIT_INSN(),
7686 },
7687 .fixup_map1 = { 3 },
7688 .errstr = "pointer offset -1073741822",
7689 .result = REJECT
7690 },
7691 {
7692 "bounds check map access with off+size signed 32bit overflow. test4",
7693 .insns = {
7694 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7695 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7696 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7697 BPF_LD_MAP_FD(BPF_REG_1, 0),
7698 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7699 BPF_FUNC_map_lookup_elem),
7700 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7701 BPF_EXIT_INSN(),
7702 BPF_MOV64_IMM(BPF_REG_1, 1000000),
7703 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
7704 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7705 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7706 BPF_JMP_A(0),
7707 BPF_EXIT_INSN(),
7708 },
7709 .fixup_map1 = { 3 },
7710 .errstr = "map_value pointer and 1000000000000",
7711 .result = REJECT
7712 },
7713 {
7714 "pointer/scalar confusion in state equality check (way 1)",
7715 .insns = {
7716 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7717 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7718 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7719 BPF_LD_MAP_FD(BPF_REG_1, 0),
7720 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7721 BPF_FUNC_map_lookup_elem),
7722 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7723 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7724 BPF_JMP_A(1),
7725 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7726 BPF_JMP_A(0),
7727 BPF_EXIT_INSN(),
7728 },
7729 .fixup_map1 = { 3 },
7730 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007731 .retval = POINTER_VALUE,
Jann Horn2255f8d2017-12-18 20:12:01 -08007732 .result_unpriv = REJECT,
7733 .errstr_unpriv = "R0 leaks addr as return value"
7734 },
7735 {
7736 "pointer/scalar confusion in state equality check (way 2)",
7737 .insns = {
7738 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7739 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7740 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7741 BPF_LD_MAP_FD(BPF_REG_1, 0),
7742 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7743 BPF_FUNC_map_lookup_elem),
7744 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
7745 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7746 BPF_JMP_A(1),
7747 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7748 BPF_EXIT_INSN(),
7749 },
7750 .fixup_map1 = { 3 },
7751 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007752 .retval = POINTER_VALUE,
Jann Horn2255f8d2017-12-18 20:12:01 -08007753 .result_unpriv = REJECT,
7754 .errstr_unpriv = "R0 leaks addr as return value"
7755 },
7756 {
Edward Cree69c4e8a2017-08-07 15:29:51 +01007757 "variable-offset ctx access",
7758 .insns = {
7759 /* Get an unknown value */
7760 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7761 /* Make it small and 4-byte aligned */
7762 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7763 /* add it to skb. We now have either &skb->len or
7764 * &skb->pkt_type, but we don't know which
7765 */
7766 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
7767 /* dereference it */
7768 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
7769 BPF_EXIT_INSN(),
7770 },
7771 .errstr = "variable ctx access var_off=(0x0; 0x4)",
7772 .result = REJECT,
7773 .prog_type = BPF_PROG_TYPE_LWT_IN,
7774 },
7775 {
7776 "variable-offset stack access",
7777 .insns = {
7778 /* Fill the top 8 bytes of the stack */
7779 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7780 /* Get an unknown value */
7781 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7782 /* Make it small and 4-byte aligned */
7783 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7784 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7785 /* add it to fp. We now have either fp-4 or fp-8, but
7786 * we don't know which
7787 */
7788 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7789 /* dereference it */
7790 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
7791 BPF_EXIT_INSN(),
7792 },
7793 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
7794 .result = REJECT,
7795 .prog_type = BPF_PROG_TYPE_LWT_IN,
7796 },
Edward Creed893dc22017-08-23 15:09:46 +01007797 {
Jann Horn2255f8d2017-12-18 20:12:01 -08007798 "indirect variable-offset stack access",
7799 .insns = {
7800 /* Fill the top 8 bytes of the stack */
7801 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7802 /* Get an unknown value */
7803 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7804 /* Make it small and 4-byte aligned */
7805 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7806 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7807 /* add it to fp. We now have either fp-4 or fp-8, but
7808 * we don't know which
7809 */
7810 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7811 /* dereference it indirectly */
7812 BPF_LD_MAP_FD(BPF_REG_1, 0),
7813 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7814 BPF_FUNC_map_lookup_elem),
7815 BPF_MOV64_IMM(BPF_REG_0, 0),
7816 BPF_EXIT_INSN(),
7817 },
7818 .fixup_map1 = { 5 },
7819 .errstr = "variable stack read R2",
7820 .result = REJECT,
7821 .prog_type = BPF_PROG_TYPE_LWT_IN,
7822 },
7823 {
7824 "direct stack access with 32-bit wraparound. test1",
7825 .insns = {
7826 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7827 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7828 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7829 BPF_MOV32_IMM(BPF_REG_0, 0),
7830 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7831 BPF_EXIT_INSN()
7832 },
7833 .errstr = "fp pointer and 2147483647",
7834 .result = REJECT
7835 },
7836 {
7837 "direct stack access with 32-bit wraparound. test2",
7838 .insns = {
7839 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7840 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7841 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7842 BPF_MOV32_IMM(BPF_REG_0, 0),
7843 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7844 BPF_EXIT_INSN()
7845 },
7846 .errstr = "fp pointer and 1073741823",
7847 .result = REJECT
7848 },
7849 {
7850 "direct stack access with 32-bit wraparound. test3",
7851 .insns = {
7852 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7854 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7855 BPF_MOV32_IMM(BPF_REG_0, 0),
7856 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7857 BPF_EXIT_INSN()
7858 },
7859 .errstr = "fp pointer offset 1073741822",
7860 .result = REJECT
7861 },
7862 {
Edward Creed893dc22017-08-23 15:09:46 +01007863 "liveness pruning and write screening",
7864 .insns = {
7865 /* Get an unknown value */
7866 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7867 /* branch conditions teach us nothing about R2 */
7868 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
7869 BPF_MOV64_IMM(BPF_REG_0, 0),
7870 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
7871 BPF_MOV64_IMM(BPF_REG_0, 0),
7872 BPF_EXIT_INSN(),
7873 },
7874 .errstr = "R0 !read_ok",
7875 .result = REJECT,
7876 .prog_type = BPF_PROG_TYPE_LWT_IN,
7877 },
Alexei Starovoitovdf20cb72017-08-23 15:10:26 +01007878 {
7879 "varlen_map_value_access pruning",
7880 .insns = {
7881 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7882 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7883 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7884 BPF_LD_MAP_FD(BPF_REG_1, 0),
7885 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7886 BPF_FUNC_map_lookup_elem),
7887 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7888 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7889 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
7890 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
7891 BPF_MOV32_IMM(BPF_REG_1, 0),
7892 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
7893 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7894 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
7895 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7896 offsetof(struct test_val, foo)),
7897 BPF_EXIT_INSN(),
7898 },
7899 .fixup_map2 = { 3 },
7900 .errstr_unpriv = "R0 leaks addr",
7901 .errstr = "R0 unbounded memory access",
7902 .result_unpriv = REJECT,
7903 .result = REJECT,
7904 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7905 },
Edward Creee67b8a62017-09-15 14:37:38 +01007906 {
7907 "invalid 64-bit BPF_END",
7908 .insns = {
7909 BPF_MOV32_IMM(BPF_REG_0, 0),
7910 {
7911 .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
7912 .dst_reg = BPF_REG_0,
7913 .src_reg = 0,
7914 .off = 0,
7915 .imm = 32,
7916 },
7917 BPF_EXIT_INSN(),
7918 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01007919 .errstr = "unknown opcode d7",
Edward Creee67b8a62017-09-15 14:37:38 +01007920 .result = REJECT,
7921 },
Daniel Borkmann22c88522017-09-25 02:25:53 +02007922 {
Daniel Borkmann65073a62018-01-31 12:58:56 +01007923 "XDP, using ifindex from netdev",
7924 .insns = {
7925 BPF_MOV64_IMM(BPF_REG_0, 0),
7926 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7927 offsetof(struct xdp_md, ingress_ifindex)),
7928 BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
7929 BPF_MOV64_IMM(BPF_REG_0, 1),
7930 BPF_EXIT_INSN(),
7931 },
7932 .result = ACCEPT,
7933 .prog_type = BPF_PROG_TYPE_XDP,
7934 .retval = 1,
7935 },
7936 {
Daniel Borkmann22c88522017-09-25 02:25:53 +02007937 "meta access, test1",
7938 .insns = {
7939 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7940 offsetof(struct xdp_md, data_meta)),
7941 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7942 offsetof(struct xdp_md, data)),
7943 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7945 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7946 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7947 BPF_MOV64_IMM(BPF_REG_0, 0),
7948 BPF_EXIT_INSN(),
7949 },
7950 .result = ACCEPT,
7951 .prog_type = BPF_PROG_TYPE_XDP,
7952 },
7953 {
7954 "meta access, test2",
7955 .insns = {
7956 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7957 offsetof(struct xdp_md, data_meta)),
7958 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7959 offsetof(struct xdp_md, data)),
7960 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7961 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
7962 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7964 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7965 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7966 BPF_MOV64_IMM(BPF_REG_0, 0),
7967 BPF_EXIT_INSN(),
7968 },
7969 .result = REJECT,
7970 .errstr = "invalid access to packet, off=-8",
7971 .prog_type = BPF_PROG_TYPE_XDP,
7972 },
7973 {
7974 "meta access, test3",
7975 .insns = {
7976 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7977 offsetof(struct xdp_md, data_meta)),
7978 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7979 offsetof(struct xdp_md, data_end)),
7980 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7981 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7982 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7983 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7984 BPF_MOV64_IMM(BPF_REG_0, 0),
7985 BPF_EXIT_INSN(),
7986 },
7987 .result = REJECT,
7988 .errstr = "invalid access to packet",
7989 .prog_type = BPF_PROG_TYPE_XDP,
7990 },
7991 {
7992 "meta access, test4",
7993 .insns = {
7994 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7995 offsetof(struct xdp_md, data_meta)),
7996 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7997 offsetof(struct xdp_md, data_end)),
7998 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7999 offsetof(struct xdp_md, data)),
8000 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8001 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8002 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8003 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8004 BPF_MOV64_IMM(BPF_REG_0, 0),
8005 BPF_EXIT_INSN(),
8006 },
8007 .result = REJECT,
8008 .errstr = "invalid access to packet",
8009 .prog_type = BPF_PROG_TYPE_XDP,
8010 },
8011 {
8012 "meta access, test5",
8013 .insns = {
8014 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8015 offsetof(struct xdp_md, data_meta)),
8016 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8017 offsetof(struct xdp_md, data)),
8018 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8019 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8020 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
8021 BPF_MOV64_IMM(BPF_REG_2, -8),
8022 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8023 BPF_FUNC_xdp_adjust_meta),
8024 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
8025 BPF_MOV64_IMM(BPF_REG_0, 0),
8026 BPF_EXIT_INSN(),
8027 },
8028 .result = REJECT,
8029 .errstr = "R3 !read_ok",
8030 .prog_type = BPF_PROG_TYPE_XDP,
8031 },
8032 {
8033 "meta access, test6",
8034 .insns = {
8035 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8036 offsetof(struct xdp_md, data_meta)),
8037 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8038 offsetof(struct xdp_md, data)),
8039 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8040 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8041 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8042 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8043 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
8044 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8045 BPF_MOV64_IMM(BPF_REG_0, 0),
8046 BPF_EXIT_INSN(),
8047 },
8048 .result = REJECT,
8049 .errstr = "invalid access to packet",
8050 .prog_type = BPF_PROG_TYPE_XDP,
8051 },
8052 {
8053 "meta access, test7",
8054 .insns = {
8055 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8056 offsetof(struct xdp_md, data_meta)),
8057 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8058 offsetof(struct xdp_md, data)),
8059 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8060 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8061 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8062 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8063 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8064 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8065 BPF_MOV64_IMM(BPF_REG_0, 0),
8066 BPF_EXIT_INSN(),
8067 },
8068 .result = ACCEPT,
8069 .prog_type = BPF_PROG_TYPE_XDP,
8070 },
8071 {
8072 "meta access, test8",
8073 .insns = {
8074 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8075 offsetof(struct xdp_md, data_meta)),
8076 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8077 offsetof(struct xdp_md, data)),
8078 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8079 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
8080 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8081 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8082 BPF_MOV64_IMM(BPF_REG_0, 0),
8083 BPF_EXIT_INSN(),
8084 },
8085 .result = ACCEPT,
8086 .prog_type = BPF_PROG_TYPE_XDP,
8087 },
8088 {
8089 "meta access, test9",
8090 .insns = {
8091 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8092 offsetof(struct xdp_md, data_meta)),
8093 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8094 offsetof(struct xdp_md, data)),
8095 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8096 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
8097 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
8098 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8099 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8100 BPF_MOV64_IMM(BPF_REG_0, 0),
8101 BPF_EXIT_INSN(),
8102 },
8103 .result = REJECT,
8104 .errstr = "invalid access to packet",
8105 .prog_type = BPF_PROG_TYPE_XDP,
8106 },
8107 {
8108 "meta access, test10",
8109 .insns = {
8110 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8111 offsetof(struct xdp_md, data_meta)),
8112 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8113 offsetof(struct xdp_md, data)),
8114 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8115 offsetof(struct xdp_md, data_end)),
8116 BPF_MOV64_IMM(BPF_REG_5, 42),
8117 BPF_MOV64_IMM(BPF_REG_6, 24),
8118 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
8119 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
8120 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
8121 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
8122 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
8123 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
8124 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
8125 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
8126 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
8127 BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
8128 BPF_MOV64_IMM(BPF_REG_0, 0),
8129 BPF_EXIT_INSN(),
8130 },
8131 .result = REJECT,
8132 .errstr = "invalid access to packet",
8133 .prog_type = BPF_PROG_TYPE_XDP,
8134 },
8135 {
8136 "meta access, test11",
8137 .insns = {
8138 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8139 offsetof(struct xdp_md, data_meta)),
8140 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8141 offsetof(struct xdp_md, data)),
8142 BPF_MOV64_IMM(BPF_REG_5, 42),
8143 BPF_MOV64_IMM(BPF_REG_6, 24),
8144 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
8145 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
8146 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
8147 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
8148 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
8149 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
8150 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
8151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
8152 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
8153 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
8154 BPF_MOV64_IMM(BPF_REG_0, 0),
8155 BPF_EXIT_INSN(),
8156 },
8157 .result = ACCEPT,
8158 .prog_type = BPF_PROG_TYPE_XDP,
8159 },
8160 {
8161 "meta access, test12",
8162 .insns = {
8163 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8164 offsetof(struct xdp_md, data_meta)),
8165 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8166 offsetof(struct xdp_md, data)),
8167 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8168 offsetof(struct xdp_md, data_end)),
8169 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
8170 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
8171 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
8172 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
8173 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
8174 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
8175 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
8176 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8177 BPF_MOV64_IMM(BPF_REG_0, 0),
8178 BPF_EXIT_INSN(),
8179 },
8180 .result = ACCEPT,
8181 .prog_type = BPF_PROG_TYPE_XDP,
8182 },
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07008183 {
Jakub Kicinski28e33f92017-10-16 11:16:55 -07008184 "arithmetic ops make PTR_TO_CTX unusable",
8185 .insns = {
8186 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
8187 offsetof(struct __sk_buff, data) -
8188 offsetof(struct __sk_buff, mark)),
8189 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8190 offsetof(struct __sk_buff, mark)),
8191 BPF_EXIT_INSN(),
8192 },
8193 .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
8194 .result = REJECT,
8195 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8196 },
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008197 {
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008198 "pkt_end - pkt_start is allowed",
8199 .insns = {
8200 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8201 offsetof(struct __sk_buff, data_end)),
8202 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8203 offsetof(struct __sk_buff, data)),
8204 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
8205 BPF_EXIT_INSN(),
8206 },
8207 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08008208 .retval = TEST_DATA_LEN,
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008209 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8210 },
8211 {
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008212 "XDP pkt read, pkt_end mangling, bad access 1",
8213 .insns = {
8214 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8215 offsetof(struct xdp_md, data)),
8216 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8217 offsetof(struct xdp_md, data_end)),
8218 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8219 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
8221 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8222 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8223 BPF_MOV64_IMM(BPF_REG_0, 0),
8224 BPF_EXIT_INSN(),
8225 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008226 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008227 .result = REJECT,
8228 .prog_type = BPF_PROG_TYPE_XDP,
8229 },
8230 {
8231 "XDP pkt read, pkt_end mangling, bad access 2",
8232 .insns = {
8233 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8234 offsetof(struct xdp_md, data)),
8235 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8236 offsetof(struct xdp_md, data_end)),
8237 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8239 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
8240 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8241 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8242 BPF_MOV64_IMM(BPF_REG_0, 0),
8243 BPF_EXIT_INSN(),
8244 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008245 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008246 .result = REJECT,
8247 .prog_type = BPF_PROG_TYPE_XDP,
8248 },
8249 {
8250 "XDP pkt read, pkt_data' > pkt_end, good access",
8251 .insns = {
8252 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8253 offsetof(struct xdp_md, data)),
8254 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8255 offsetof(struct xdp_md, data_end)),
8256 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8257 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8258 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8259 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8260 BPF_MOV64_IMM(BPF_REG_0, 0),
8261 BPF_EXIT_INSN(),
8262 },
8263 .result = ACCEPT,
8264 .prog_type = BPF_PROG_TYPE_XDP,
8265 },
8266 {
8267 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
8268 .insns = {
8269 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8270 offsetof(struct xdp_md, data)),
8271 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8272 offsetof(struct xdp_md, data_end)),
8273 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8275 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8276 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8277 BPF_MOV64_IMM(BPF_REG_0, 0),
8278 BPF_EXIT_INSN(),
8279 },
8280 .errstr = "R1 offset is outside of the packet",
8281 .result = REJECT,
8282 .prog_type = BPF_PROG_TYPE_XDP,
8283 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8284 },
8285 {
8286 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
8287 .insns = {
8288 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8289 offsetof(struct xdp_md, data)),
8290 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8291 offsetof(struct xdp_md, data_end)),
8292 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8294 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
8295 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8296 BPF_MOV64_IMM(BPF_REG_0, 0),
8297 BPF_EXIT_INSN(),
8298 },
8299 .errstr = "R1 offset is outside of the packet",
8300 .result = REJECT,
8301 .prog_type = BPF_PROG_TYPE_XDP,
8302 },
8303 {
8304 "XDP pkt read, pkt_end > pkt_data', good access",
8305 .insns = {
8306 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8307 offsetof(struct xdp_md, data)),
8308 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8309 offsetof(struct xdp_md, data_end)),
8310 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8311 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8312 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8313 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8314 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8315 BPF_MOV64_IMM(BPF_REG_0, 0),
8316 BPF_EXIT_INSN(),
8317 },
8318 .result = ACCEPT,
8319 .prog_type = BPF_PROG_TYPE_XDP,
8320 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8321 },
8322 {
8323 "XDP pkt read, pkt_end > pkt_data', bad access 1",
8324 .insns = {
8325 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8326 offsetof(struct xdp_md, data)),
8327 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8328 offsetof(struct xdp_md, data_end)),
8329 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8330 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8331 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8332 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8333 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8334 BPF_MOV64_IMM(BPF_REG_0, 0),
8335 BPF_EXIT_INSN(),
8336 },
8337 .errstr = "R1 offset is outside of the packet",
8338 .result = REJECT,
8339 .prog_type = BPF_PROG_TYPE_XDP,
8340 },
8341 {
8342 "XDP pkt read, pkt_end > pkt_data', bad access 2",
8343 .insns = {
8344 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8345 offsetof(struct xdp_md, data)),
8346 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8347 offsetof(struct xdp_md, data_end)),
8348 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8349 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8350 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8351 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8352 BPF_MOV64_IMM(BPF_REG_0, 0),
8353 BPF_EXIT_INSN(),
8354 },
8355 .errstr = "R1 offset is outside of the packet",
8356 .result = REJECT,
8357 .prog_type = BPF_PROG_TYPE_XDP,
8358 },
8359 {
8360 "XDP pkt read, pkt_data' < pkt_end, good access",
8361 .insns = {
8362 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8363 offsetof(struct xdp_md, data)),
8364 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8365 offsetof(struct xdp_md, data_end)),
8366 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8367 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8368 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8369 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8370 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8371 BPF_MOV64_IMM(BPF_REG_0, 0),
8372 BPF_EXIT_INSN(),
8373 },
8374 .result = ACCEPT,
8375 .prog_type = BPF_PROG_TYPE_XDP,
8376 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8377 },
8378 {
8379 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
8380 .insns = {
8381 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8382 offsetof(struct xdp_md, data)),
8383 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8384 offsetof(struct xdp_md, data_end)),
8385 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8386 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8387 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8388 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8389 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8390 BPF_MOV64_IMM(BPF_REG_0, 0),
8391 BPF_EXIT_INSN(),
8392 },
8393 .errstr = "R1 offset is outside of the packet",
8394 .result = REJECT,
8395 .prog_type = BPF_PROG_TYPE_XDP,
8396 },
8397 {
8398 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
8399 .insns = {
8400 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8401 offsetof(struct xdp_md, data)),
8402 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8403 offsetof(struct xdp_md, data_end)),
8404 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8406 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8407 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8408 BPF_MOV64_IMM(BPF_REG_0, 0),
8409 BPF_EXIT_INSN(),
8410 },
8411 .errstr = "R1 offset is outside of the packet",
8412 .result = REJECT,
8413 .prog_type = BPF_PROG_TYPE_XDP,
8414 },
8415 {
8416 "XDP pkt read, pkt_end < pkt_data', good access",
8417 .insns = {
8418 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8419 offsetof(struct xdp_md, data)),
8420 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8421 offsetof(struct xdp_md, data_end)),
8422 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8423 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8424 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8425 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8426 BPF_MOV64_IMM(BPF_REG_0, 0),
8427 BPF_EXIT_INSN(),
8428 },
8429 .result = ACCEPT,
8430 .prog_type = BPF_PROG_TYPE_XDP,
8431 },
8432 {
8433 "XDP pkt read, pkt_end < pkt_data', bad access 1",
8434 .insns = {
8435 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8436 offsetof(struct xdp_md, data)),
8437 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8438 offsetof(struct xdp_md, data_end)),
8439 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8440 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8441 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8442 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8443 BPF_MOV64_IMM(BPF_REG_0, 0),
8444 BPF_EXIT_INSN(),
8445 },
8446 .errstr = "R1 offset is outside of the packet",
8447 .result = REJECT,
8448 .prog_type = BPF_PROG_TYPE_XDP,
8449 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8450 },
8451 {
8452 "XDP pkt read, pkt_end < pkt_data', bad access 2",
8453 .insns = {
8454 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8455 offsetof(struct xdp_md, data)),
8456 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8457 offsetof(struct xdp_md, data_end)),
8458 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8459 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8460 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
8461 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8462 BPF_MOV64_IMM(BPF_REG_0, 0),
8463 BPF_EXIT_INSN(),
8464 },
8465 .errstr = "R1 offset is outside of the packet",
8466 .result = REJECT,
8467 .prog_type = BPF_PROG_TYPE_XDP,
8468 },
8469 {
8470 "XDP pkt read, pkt_data' >= pkt_end, good access",
8471 .insns = {
8472 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8473 offsetof(struct xdp_md, data)),
8474 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8475 offsetof(struct xdp_md, data_end)),
8476 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8477 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8478 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8479 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8480 BPF_MOV64_IMM(BPF_REG_0, 0),
8481 BPF_EXIT_INSN(),
8482 },
8483 .result = ACCEPT,
8484 .prog_type = BPF_PROG_TYPE_XDP,
8485 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8486 },
8487 {
8488 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
8489 .insns = {
8490 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8491 offsetof(struct xdp_md, data)),
8492 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8493 offsetof(struct xdp_md, data_end)),
8494 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8495 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8496 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8497 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8498 BPF_MOV64_IMM(BPF_REG_0, 0),
8499 BPF_EXIT_INSN(),
8500 },
8501 .errstr = "R1 offset is outside of the packet",
8502 .result = REJECT,
8503 .prog_type = BPF_PROG_TYPE_XDP,
8504 },
8505 {
8506 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
8507 .insns = {
8508 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8509 offsetof(struct xdp_md, data)),
8510 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8511 offsetof(struct xdp_md, data_end)),
8512 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8513 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8514 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
8515 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8516 BPF_MOV64_IMM(BPF_REG_0, 0),
8517 BPF_EXIT_INSN(),
8518 },
8519 .errstr = "R1 offset is outside of the packet",
8520 .result = REJECT,
8521 .prog_type = BPF_PROG_TYPE_XDP,
8522 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8523 },
8524 {
8525 "XDP pkt read, pkt_end >= pkt_data', good access",
8526 .insns = {
8527 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8528 offsetof(struct xdp_md, data)),
8529 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8530 offsetof(struct xdp_md, data_end)),
8531 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8532 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8533 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8534 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8535 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8536 BPF_MOV64_IMM(BPF_REG_0, 0),
8537 BPF_EXIT_INSN(),
8538 },
8539 .result = ACCEPT,
8540 .prog_type = BPF_PROG_TYPE_XDP,
8541 },
8542 {
8543 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
8544 .insns = {
8545 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8546 offsetof(struct xdp_md, data)),
8547 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8548 offsetof(struct xdp_md, data_end)),
8549 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8550 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8551 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8552 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8553 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8554 BPF_MOV64_IMM(BPF_REG_0, 0),
8555 BPF_EXIT_INSN(),
8556 },
8557 .errstr = "R1 offset is outside of the packet",
8558 .result = REJECT,
8559 .prog_type = BPF_PROG_TYPE_XDP,
8560 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8561 },
8562 {
8563 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
8564 .insns = {
8565 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8566 offsetof(struct xdp_md, data)),
8567 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8568 offsetof(struct xdp_md, data_end)),
8569 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8570 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8571 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8572 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8573 BPF_MOV64_IMM(BPF_REG_0, 0),
8574 BPF_EXIT_INSN(),
8575 },
8576 .errstr = "R1 offset is outside of the packet",
8577 .result = REJECT,
8578 .prog_type = BPF_PROG_TYPE_XDP,
8579 },
8580 {
8581 "XDP pkt read, pkt_data' <= pkt_end, good access",
8582 .insns = {
8583 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8584 offsetof(struct xdp_md, data)),
8585 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8586 offsetof(struct xdp_md, data_end)),
8587 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8588 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8589 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8590 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8591 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8592 BPF_MOV64_IMM(BPF_REG_0, 0),
8593 BPF_EXIT_INSN(),
8594 },
8595 .result = ACCEPT,
8596 .prog_type = BPF_PROG_TYPE_XDP,
8597 },
8598 {
8599 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
8600 .insns = {
8601 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8602 offsetof(struct xdp_md, data)),
8603 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8604 offsetof(struct xdp_md, data_end)),
8605 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8606 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8607 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8608 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8609 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8610 BPF_MOV64_IMM(BPF_REG_0, 0),
8611 BPF_EXIT_INSN(),
8612 },
8613 .errstr = "R1 offset is outside of the packet",
8614 .result = REJECT,
8615 .prog_type = BPF_PROG_TYPE_XDP,
8616 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8617 },
8618 {
8619 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
8620 .insns = {
8621 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8622 offsetof(struct xdp_md, data)),
8623 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8624 offsetof(struct xdp_md, data_end)),
8625 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8627 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8628 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8629 BPF_MOV64_IMM(BPF_REG_0, 0),
8630 BPF_EXIT_INSN(),
8631 },
8632 .errstr = "R1 offset is outside of the packet",
8633 .result = REJECT,
8634 .prog_type = BPF_PROG_TYPE_XDP,
8635 },
8636 {
8637 "XDP pkt read, pkt_end <= pkt_data', good access",
8638 .insns = {
8639 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8640 offsetof(struct xdp_md, data)),
8641 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8642 offsetof(struct xdp_md, data_end)),
8643 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8644 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8645 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8646 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8647 BPF_MOV64_IMM(BPF_REG_0, 0),
8648 BPF_EXIT_INSN(),
8649 },
8650 .result = ACCEPT,
8651 .prog_type = BPF_PROG_TYPE_XDP,
8652 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8653 },
8654 {
8655 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
8656 .insns = {
8657 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8658 offsetof(struct xdp_md, data)),
8659 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8660 offsetof(struct xdp_md, data_end)),
8661 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8663 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8664 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8665 BPF_MOV64_IMM(BPF_REG_0, 0),
8666 BPF_EXIT_INSN(),
8667 },
8668 .errstr = "R1 offset is outside of the packet",
8669 .result = REJECT,
8670 .prog_type = BPF_PROG_TYPE_XDP,
8671 },
8672 {
8673 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
8674 .insns = {
8675 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8676 offsetof(struct xdp_md, data)),
8677 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8678 offsetof(struct xdp_md, data_end)),
8679 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8680 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8681 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
8682 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8683 BPF_MOV64_IMM(BPF_REG_0, 0),
8684 BPF_EXIT_INSN(),
8685 },
8686 .errstr = "R1 offset is outside of the packet",
8687 .result = REJECT,
8688 .prog_type = BPF_PROG_TYPE_XDP,
8689 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8690 },
Daniel Borkmannb06723d2017-11-01 23:58:09 +01008691 {
Daniel Borkmann634eab12017-11-01 23:58:11 +01008692 "XDP pkt read, pkt_meta' > pkt_data, good access",
8693 .insns = {
8694 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8695 offsetof(struct xdp_md, data_meta)),
8696 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8697 offsetof(struct xdp_md, data)),
8698 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8699 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8700 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8701 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8702 BPF_MOV64_IMM(BPF_REG_0, 0),
8703 BPF_EXIT_INSN(),
8704 },
8705 .result = ACCEPT,
8706 .prog_type = BPF_PROG_TYPE_XDP,
8707 },
8708 {
8709 "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
8710 .insns = {
8711 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8712 offsetof(struct xdp_md, data_meta)),
8713 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8714 offsetof(struct xdp_md, data)),
8715 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8716 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8717 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8718 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8719 BPF_MOV64_IMM(BPF_REG_0, 0),
8720 BPF_EXIT_INSN(),
8721 },
8722 .errstr = "R1 offset is outside of the packet",
8723 .result = REJECT,
8724 .prog_type = BPF_PROG_TYPE_XDP,
8725 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8726 },
8727 {
8728 "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
8729 .insns = {
8730 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8731 offsetof(struct xdp_md, data_meta)),
8732 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8733 offsetof(struct xdp_md, data)),
8734 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8735 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8736 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
8737 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8738 BPF_MOV64_IMM(BPF_REG_0, 0),
8739 BPF_EXIT_INSN(),
8740 },
8741 .errstr = "R1 offset is outside of the packet",
8742 .result = REJECT,
8743 .prog_type = BPF_PROG_TYPE_XDP,
8744 },
8745 {
8746 "XDP pkt read, pkt_data > pkt_meta', good access",
8747 .insns = {
8748 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8749 offsetof(struct xdp_md, data_meta)),
8750 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8751 offsetof(struct xdp_md, data)),
8752 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8753 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8754 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8755 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8756 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8757 BPF_MOV64_IMM(BPF_REG_0, 0),
8758 BPF_EXIT_INSN(),
8759 },
8760 .result = ACCEPT,
8761 .prog_type = BPF_PROG_TYPE_XDP,
8762 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8763 },
8764 {
8765 "XDP pkt read, pkt_data > pkt_meta', bad access 1",
8766 .insns = {
8767 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8768 offsetof(struct xdp_md, data_meta)),
8769 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8770 offsetof(struct xdp_md, data)),
8771 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8772 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8773 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8774 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8775 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8776 BPF_MOV64_IMM(BPF_REG_0, 0),
8777 BPF_EXIT_INSN(),
8778 },
8779 .errstr = "R1 offset is outside of the packet",
8780 .result = REJECT,
8781 .prog_type = BPF_PROG_TYPE_XDP,
8782 },
8783 {
8784 "XDP pkt read, pkt_data > pkt_meta', bad access 2",
8785 .insns = {
8786 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8787 offsetof(struct xdp_md, data_meta)),
8788 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8789 offsetof(struct xdp_md, data)),
8790 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8791 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8792 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8793 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8794 BPF_MOV64_IMM(BPF_REG_0, 0),
8795 BPF_EXIT_INSN(),
8796 },
8797 .errstr = "R1 offset is outside of the packet",
8798 .result = REJECT,
8799 .prog_type = BPF_PROG_TYPE_XDP,
8800 },
8801 {
8802 "XDP pkt read, pkt_meta' < pkt_data, good access",
8803 .insns = {
8804 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8805 offsetof(struct xdp_md, data_meta)),
8806 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8807 offsetof(struct xdp_md, data)),
8808 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8809 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8810 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8811 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8812 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8813 BPF_MOV64_IMM(BPF_REG_0, 0),
8814 BPF_EXIT_INSN(),
8815 },
8816 .result = ACCEPT,
8817 .prog_type = BPF_PROG_TYPE_XDP,
8818 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8819 },
8820 {
8821 "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
8822 .insns = {
8823 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8824 offsetof(struct xdp_md, data_meta)),
8825 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8826 offsetof(struct xdp_md, data)),
8827 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8828 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8829 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8830 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8831 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8832 BPF_MOV64_IMM(BPF_REG_0, 0),
8833 BPF_EXIT_INSN(),
8834 },
8835 .errstr = "R1 offset is outside of the packet",
8836 .result = REJECT,
8837 .prog_type = BPF_PROG_TYPE_XDP,
8838 },
8839 {
8840 "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
8841 .insns = {
8842 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8843 offsetof(struct xdp_md, data_meta)),
8844 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8845 offsetof(struct xdp_md, data)),
8846 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8847 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8848 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8849 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8850 BPF_MOV64_IMM(BPF_REG_0, 0),
8851 BPF_EXIT_INSN(),
8852 },
8853 .errstr = "R1 offset is outside of the packet",
8854 .result = REJECT,
8855 .prog_type = BPF_PROG_TYPE_XDP,
8856 },
8857 {
8858 "XDP pkt read, pkt_data < pkt_meta', good access",
8859 .insns = {
8860 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8861 offsetof(struct xdp_md, data_meta)),
8862 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8863 offsetof(struct xdp_md, data)),
8864 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8865 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8866 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8867 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8868 BPF_MOV64_IMM(BPF_REG_0, 0),
8869 BPF_EXIT_INSN(),
8870 },
8871 .result = ACCEPT,
8872 .prog_type = BPF_PROG_TYPE_XDP,
8873 },
8874 {
8875 "XDP pkt read, pkt_data < pkt_meta', bad access 1",
8876 .insns = {
8877 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8878 offsetof(struct xdp_md, data_meta)),
8879 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8880 offsetof(struct xdp_md, data)),
8881 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8882 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8883 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8884 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8885 BPF_MOV64_IMM(BPF_REG_0, 0),
8886 BPF_EXIT_INSN(),
8887 },
8888 .errstr = "R1 offset is outside of the packet",
8889 .result = REJECT,
8890 .prog_type = BPF_PROG_TYPE_XDP,
8891 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8892 },
8893 {
8894 "XDP pkt read, pkt_data < pkt_meta', bad access 2",
8895 .insns = {
8896 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8897 offsetof(struct xdp_md, data_meta)),
8898 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8899 offsetof(struct xdp_md, data)),
8900 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8901 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8902 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
8903 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8904 BPF_MOV64_IMM(BPF_REG_0, 0),
8905 BPF_EXIT_INSN(),
8906 },
8907 .errstr = "R1 offset is outside of the packet",
8908 .result = REJECT,
8909 .prog_type = BPF_PROG_TYPE_XDP,
8910 },
8911 {
8912 "XDP pkt read, pkt_meta' >= pkt_data, good access",
8913 .insns = {
8914 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8915 offsetof(struct xdp_md, data_meta)),
8916 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8917 offsetof(struct xdp_md, data)),
8918 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8919 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8920 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8921 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8922 BPF_MOV64_IMM(BPF_REG_0, 0),
8923 BPF_EXIT_INSN(),
8924 },
8925 .result = ACCEPT,
8926 .prog_type = BPF_PROG_TYPE_XDP,
8927 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8928 },
8929 {
8930 "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
8931 .insns = {
8932 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8933 offsetof(struct xdp_md, data_meta)),
8934 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8935 offsetof(struct xdp_md, data)),
8936 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8937 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8938 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8939 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8940 BPF_MOV64_IMM(BPF_REG_0, 0),
8941 BPF_EXIT_INSN(),
8942 },
8943 .errstr = "R1 offset is outside of the packet",
8944 .result = REJECT,
8945 .prog_type = BPF_PROG_TYPE_XDP,
8946 },
8947 {
8948 "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
8949 .insns = {
8950 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8951 offsetof(struct xdp_md, data_meta)),
8952 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8953 offsetof(struct xdp_md, data)),
8954 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8955 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8956 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
8957 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8958 BPF_MOV64_IMM(BPF_REG_0, 0),
8959 BPF_EXIT_INSN(),
8960 },
8961 .errstr = "R1 offset is outside of the packet",
8962 .result = REJECT,
8963 .prog_type = BPF_PROG_TYPE_XDP,
8964 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8965 },
8966 {
8967 "XDP pkt read, pkt_data >= pkt_meta', good access",
8968 .insns = {
8969 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8970 offsetof(struct xdp_md, data_meta)),
8971 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8972 offsetof(struct xdp_md, data)),
8973 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8974 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8975 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8976 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8977 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8978 BPF_MOV64_IMM(BPF_REG_0, 0),
8979 BPF_EXIT_INSN(),
8980 },
8981 .result = ACCEPT,
8982 .prog_type = BPF_PROG_TYPE_XDP,
8983 },
8984 {
8985 "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
8986 .insns = {
8987 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8988 offsetof(struct xdp_md, data_meta)),
8989 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8990 offsetof(struct xdp_md, data)),
8991 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8992 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8993 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8994 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8995 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8996 BPF_MOV64_IMM(BPF_REG_0, 0),
8997 BPF_EXIT_INSN(),
8998 },
8999 .errstr = "R1 offset is outside of the packet",
9000 .result = REJECT,
9001 .prog_type = BPF_PROG_TYPE_XDP,
9002 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9003 },
9004 {
9005 "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
9006 .insns = {
9007 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9008 offsetof(struct xdp_md, data_meta)),
9009 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9010 offsetof(struct xdp_md, data)),
9011 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9012 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9013 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9014 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9015 BPF_MOV64_IMM(BPF_REG_0, 0),
9016 BPF_EXIT_INSN(),
9017 },
9018 .errstr = "R1 offset is outside of the packet",
9019 .result = REJECT,
9020 .prog_type = BPF_PROG_TYPE_XDP,
9021 },
9022 {
9023 "XDP pkt read, pkt_meta' <= pkt_data, good access",
9024 .insns = {
9025 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9026 offsetof(struct xdp_md, data_meta)),
9027 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9028 offsetof(struct xdp_md, data)),
9029 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9031 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9032 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9033 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9034 BPF_MOV64_IMM(BPF_REG_0, 0),
9035 BPF_EXIT_INSN(),
9036 },
9037 .result = ACCEPT,
9038 .prog_type = BPF_PROG_TYPE_XDP,
9039 },
9040 {
9041 "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
9042 .insns = {
9043 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9044 offsetof(struct xdp_md, data_meta)),
9045 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9046 offsetof(struct xdp_md, data)),
9047 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9048 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9049 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9050 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9051 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9052 BPF_MOV64_IMM(BPF_REG_0, 0),
9053 BPF_EXIT_INSN(),
9054 },
9055 .errstr = "R1 offset is outside of the packet",
9056 .result = REJECT,
9057 .prog_type = BPF_PROG_TYPE_XDP,
9058 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9059 },
9060 {
9061 "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
9062 .insns = {
9063 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9064 offsetof(struct xdp_md, data_meta)),
9065 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9066 offsetof(struct xdp_md, data)),
9067 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9068 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9069 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9070 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9071 BPF_MOV64_IMM(BPF_REG_0, 0),
9072 BPF_EXIT_INSN(),
9073 },
9074 .errstr = "R1 offset is outside of the packet",
9075 .result = REJECT,
9076 .prog_type = BPF_PROG_TYPE_XDP,
9077 },
9078 {
9079 "XDP pkt read, pkt_data <= pkt_meta', good access",
9080 .insns = {
9081 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9082 offsetof(struct xdp_md, data_meta)),
9083 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9084 offsetof(struct xdp_md, data)),
9085 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9086 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9087 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9088 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9089 BPF_MOV64_IMM(BPF_REG_0, 0),
9090 BPF_EXIT_INSN(),
9091 },
9092 .result = ACCEPT,
9093 .prog_type = BPF_PROG_TYPE_XDP,
9094 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9095 },
9096 {
9097 "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
9098 .insns = {
9099 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9100 offsetof(struct xdp_md, data_meta)),
9101 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9102 offsetof(struct xdp_md, data)),
9103 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9105 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9106 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9107 BPF_MOV64_IMM(BPF_REG_0, 0),
9108 BPF_EXIT_INSN(),
9109 },
9110 .errstr = "R1 offset is outside of the packet",
9111 .result = REJECT,
9112 .prog_type = BPF_PROG_TYPE_XDP,
9113 },
9114 {
9115 "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
9116 .insns = {
9117 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9118 offsetof(struct xdp_md, data_meta)),
9119 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9120 offsetof(struct xdp_md, data)),
9121 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9123 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
9124 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9125 BPF_MOV64_IMM(BPF_REG_0, 0),
9126 BPF_EXIT_INSN(),
9127 },
9128 .errstr = "R1 offset is outside of the packet",
9129 .result = REJECT,
9130 .prog_type = BPF_PROG_TYPE_XDP,
9131 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9132 },
9133 {
Daniel Borkmann6f161012018-01-18 01:15:21 +01009134 "check deducing bounds from const, 1",
9135 .insns = {
9136 BPF_MOV64_IMM(BPF_REG_0, 1),
9137 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
9138 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9139 BPF_EXIT_INSN(),
9140 },
9141 .result = REJECT,
9142 .errstr = "R0 tried to subtract pointer from scalar",
9143 },
9144 {
9145 "check deducing bounds from const, 2",
9146 .insns = {
9147 BPF_MOV64_IMM(BPF_REG_0, 1),
9148 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
9149 BPF_EXIT_INSN(),
9150 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
9151 BPF_EXIT_INSN(),
9152 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9153 BPF_EXIT_INSN(),
9154 },
9155 .result = ACCEPT,
Yonghong Song35136922018-01-22 22:10:59 -08009156 .retval = 1,
Daniel Borkmann6f161012018-01-18 01:15:21 +01009157 },
9158 {
9159 "check deducing bounds from const, 3",
9160 .insns = {
9161 BPF_MOV64_IMM(BPF_REG_0, 0),
9162 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
9163 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9164 BPF_EXIT_INSN(),
9165 },
9166 .result = REJECT,
9167 .errstr = "R0 tried to subtract pointer from scalar",
9168 },
9169 {
9170 "check deducing bounds from const, 4",
9171 .insns = {
9172 BPF_MOV64_IMM(BPF_REG_0, 0),
9173 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
9174 BPF_EXIT_INSN(),
9175 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9176 BPF_EXIT_INSN(),
9177 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9178 BPF_EXIT_INSN(),
9179 },
9180 .result = ACCEPT,
9181 },
9182 {
9183 "check deducing bounds from const, 5",
9184 .insns = {
9185 BPF_MOV64_IMM(BPF_REG_0, 0),
9186 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9187 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9188 BPF_EXIT_INSN(),
9189 },
9190 .result = REJECT,
9191 .errstr = "R0 tried to subtract pointer from scalar",
9192 },
9193 {
9194 "check deducing bounds from const, 6",
9195 .insns = {
9196 BPF_MOV64_IMM(BPF_REG_0, 0),
9197 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9198 BPF_EXIT_INSN(),
9199 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9200 BPF_EXIT_INSN(),
9201 },
9202 .result = REJECT,
9203 .errstr = "R0 tried to subtract pointer from scalar",
9204 },
9205 {
9206 "check deducing bounds from const, 7",
9207 .insns = {
9208 BPF_MOV64_IMM(BPF_REG_0, ~0),
9209 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
9210 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9211 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9212 offsetof(struct __sk_buff, mark)),
9213 BPF_EXIT_INSN(),
9214 },
9215 .result = REJECT,
9216 .errstr = "dereference of modified ctx ptr",
9217 },
9218 {
9219 "check deducing bounds from const, 8",
9220 .insns = {
9221 BPF_MOV64_IMM(BPF_REG_0, ~0),
9222 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9223 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
9224 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9225 offsetof(struct __sk_buff, mark)),
9226 BPF_EXIT_INSN(),
9227 },
9228 .result = REJECT,
9229 .errstr = "dereference of modified ctx ptr",
9230 },
9231 {
9232 "check deducing bounds from const, 9",
9233 .insns = {
9234 BPF_MOV64_IMM(BPF_REG_0, 0),
9235 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
9236 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9237 BPF_EXIT_INSN(),
9238 },
9239 .result = REJECT,
9240 .errstr = "R0 tried to subtract pointer from scalar",
9241 },
9242 {
9243 "check deducing bounds from const, 10",
9244 .insns = {
9245 BPF_MOV64_IMM(BPF_REG_0, 0),
9246 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
9247 /* Marks reg as unknown. */
9248 BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
9249 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9250 BPF_EXIT_INSN(),
9251 },
9252 .result = REJECT,
9253 .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
9254 },
9255 {
Daniel Borkmannb06723d2017-11-01 23:58:09 +01009256 "bpf_exit with invalid return code. test1",
9257 .insns = {
9258 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9259 BPF_EXIT_INSN(),
9260 },
9261 .errstr = "R0 has value (0x0; 0xffffffff)",
9262 .result = REJECT,
9263 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9264 },
9265 {
9266 "bpf_exit with invalid return code. test2",
9267 .insns = {
9268 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9269 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
9270 BPF_EXIT_INSN(),
9271 },
9272 .result = ACCEPT,
9273 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9274 },
9275 {
9276 "bpf_exit with invalid return code. test3",
9277 .insns = {
9278 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9279 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
9280 BPF_EXIT_INSN(),
9281 },
9282 .errstr = "R0 has value (0x0; 0x3)",
9283 .result = REJECT,
9284 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9285 },
9286 {
9287 "bpf_exit with invalid return code. test4",
9288 .insns = {
9289 BPF_MOV64_IMM(BPF_REG_0, 1),
9290 BPF_EXIT_INSN(),
9291 },
9292 .result = ACCEPT,
9293 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9294 },
9295 {
9296 "bpf_exit with invalid return code. test5",
9297 .insns = {
9298 BPF_MOV64_IMM(BPF_REG_0, 2),
9299 BPF_EXIT_INSN(),
9300 },
9301 .errstr = "R0 has value (0x2; 0x0)",
9302 .result = REJECT,
9303 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9304 },
9305 {
9306 "bpf_exit with invalid return code. test6",
9307 .insns = {
9308 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9309 BPF_EXIT_INSN(),
9310 },
9311 .errstr = "R0 is not a known value (ctx)",
9312 .result = REJECT,
9313 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9314 },
9315 {
9316 "bpf_exit with invalid return code. test7",
9317 .insns = {
9318 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9319 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
9320 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
9321 BPF_EXIT_INSN(),
9322 },
9323 .errstr = "R0 has unknown scalar value",
9324 .result = REJECT,
9325 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9326 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009327 {
9328 "calls: basic sanity",
9329 .insns = {
9330 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9331 BPF_MOV64_IMM(BPF_REG_0, 1),
9332 BPF_EXIT_INSN(),
9333 BPF_MOV64_IMM(BPF_REG_0, 2),
9334 BPF_EXIT_INSN(),
9335 },
9336 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9337 .result = ACCEPT,
9338 },
9339 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009340 "calls: not on unpriviledged",
9341 .insns = {
9342 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9343 BPF_MOV64_IMM(BPF_REG_0, 1),
9344 BPF_EXIT_INSN(),
9345 BPF_MOV64_IMM(BPF_REG_0, 2),
9346 BPF_EXIT_INSN(),
9347 },
9348 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
9349 .result_unpriv = REJECT,
9350 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009351 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009352 },
9353 {
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01009354 "calls: div by 0 in subprog",
9355 .insns = {
9356 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9357 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9358 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9359 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
9360 offsetof(struct __sk_buff, data_end)),
9361 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
9362 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
9363 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
9364 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9365 BPF_MOV64_IMM(BPF_REG_0, 1),
9366 BPF_EXIT_INSN(),
9367 BPF_MOV32_IMM(BPF_REG_2, 0),
9368 BPF_MOV32_IMM(BPF_REG_3, 1),
9369 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
9370 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9371 offsetof(struct __sk_buff, data)),
9372 BPF_EXIT_INSN(),
9373 },
9374 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9375 .result = ACCEPT,
9376 .retval = 1,
9377 },
9378 {
9379 "calls: multiple ret types in subprog 1",
9380 .insns = {
9381 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9382 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9383 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9384 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
9385 offsetof(struct __sk_buff, data_end)),
9386 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
9387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
9388 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
9389 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9390 BPF_MOV64_IMM(BPF_REG_0, 1),
9391 BPF_EXIT_INSN(),
9392 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9393 offsetof(struct __sk_buff, data)),
9394 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9395 BPF_MOV32_IMM(BPF_REG_0, 42),
9396 BPF_EXIT_INSN(),
9397 },
9398 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9399 .result = REJECT,
9400 .errstr = "R0 invalid mem access 'inv'",
9401 },
9402 {
9403 "calls: multiple ret types in subprog 2",
9404 .insns = {
9405 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9406 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9407 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9408 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
9409 offsetof(struct __sk_buff, data_end)),
9410 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
9411 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
9412 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
9413 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9414 BPF_MOV64_IMM(BPF_REG_0, 1),
9415 BPF_EXIT_INSN(),
9416 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9417 offsetof(struct __sk_buff, data)),
9418 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9419 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
9420 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9421 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9422 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9423 BPF_LD_MAP_FD(BPF_REG_1, 0),
9424 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9425 BPF_FUNC_map_lookup_elem),
9426 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9427 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
9428 offsetof(struct __sk_buff, data)),
9429 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
9430 BPF_EXIT_INSN(),
9431 },
9432 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9433 .fixup_map1 = { 16 },
9434 .result = REJECT,
9435 .errstr = "R0 min value is outside of the array range",
9436 },
9437 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009438 "calls: overlapping caller/callee",
9439 .insns = {
9440 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
9441 BPF_MOV64_IMM(BPF_REG_0, 1),
9442 BPF_EXIT_INSN(),
9443 },
9444 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9445 .errstr = "last insn is not an exit or jmp",
9446 .result = REJECT,
9447 },
9448 {
9449 "calls: wrong recursive calls",
9450 .insns = {
9451 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
9452 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
9453 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
9454 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
9455 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
9456 BPF_MOV64_IMM(BPF_REG_0, 1),
9457 BPF_EXIT_INSN(),
9458 },
9459 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9460 .errstr = "jump out of range",
9461 .result = REJECT,
9462 },
9463 {
9464 "calls: wrong src reg",
9465 .insns = {
9466 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
9467 BPF_MOV64_IMM(BPF_REG_0, 1),
9468 BPF_EXIT_INSN(),
9469 },
9470 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9471 .errstr = "BPF_CALL uses reserved fields",
9472 .result = REJECT,
9473 },
9474 {
9475 "calls: wrong off value",
9476 .insns = {
9477 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
9478 BPF_MOV64_IMM(BPF_REG_0, 1),
9479 BPF_EXIT_INSN(),
9480 BPF_MOV64_IMM(BPF_REG_0, 2),
9481 BPF_EXIT_INSN(),
9482 },
9483 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9484 .errstr = "BPF_CALL uses reserved fields",
9485 .result = REJECT,
9486 },
9487 {
9488 "calls: jump back loop",
9489 .insns = {
9490 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
9491 BPF_MOV64_IMM(BPF_REG_0, 1),
9492 BPF_EXIT_INSN(),
9493 },
9494 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9495 .errstr = "back-edge from insn 0 to 0",
9496 .result = REJECT,
9497 },
9498 {
9499 "calls: conditional call",
9500 .insns = {
9501 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9502 offsetof(struct __sk_buff, mark)),
9503 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9504 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9505 BPF_MOV64_IMM(BPF_REG_0, 1),
9506 BPF_EXIT_INSN(),
9507 BPF_MOV64_IMM(BPF_REG_0, 2),
9508 BPF_EXIT_INSN(),
9509 },
9510 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9511 .errstr = "jump out of range",
9512 .result = REJECT,
9513 },
9514 {
9515 "calls: conditional call 2",
9516 .insns = {
9517 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9518 offsetof(struct __sk_buff, mark)),
9519 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9520 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9521 BPF_MOV64_IMM(BPF_REG_0, 1),
9522 BPF_EXIT_INSN(),
9523 BPF_MOV64_IMM(BPF_REG_0, 2),
9524 BPF_EXIT_INSN(),
9525 BPF_MOV64_IMM(BPF_REG_0, 3),
9526 BPF_EXIT_INSN(),
9527 },
9528 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9529 .result = ACCEPT,
9530 },
9531 {
9532 "calls: conditional call 3",
9533 .insns = {
9534 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9535 offsetof(struct __sk_buff, mark)),
9536 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9537 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
9538 BPF_MOV64_IMM(BPF_REG_0, 1),
9539 BPF_EXIT_INSN(),
9540 BPF_MOV64_IMM(BPF_REG_0, 1),
9541 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
9542 BPF_MOV64_IMM(BPF_REG_0, 3),
9543 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
9544 },
9545 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9546 .errstr = "back-edge from insn",
9547 .result = REJECT,
9548 },
9549 {
9550 "calls: conditional call 4",
9551 .insns = {
9552 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9553 offsetof(struct __sk_buff, mark)),
9554 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9555 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9556 BPF_MOV64_IMM(BPF_REG_0, 1),
9557 BPF_EXIT_INSN(),
9558 BPF_MOV64_IMM(BPF_REG_0, 1),
9559 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
9560 BPF_MOV64_IMM(BPF_REG_0, 3),
9561 BPF_EXIT_INSN(),
9562 },
9563 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9564 .result = ACCEPT,
9565 },
9566 {
9567 "calls: conditional call 5",
9568 .insns = {
9569 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9570 offsetof(struct __sk_buff, mark)),
9571 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9572 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9573 BPF_MOV64_IMM(BPF_REG_0, 1),
9574 BPF_EXIT_INSN(),
9575 BPF_MOV64_IMM(BPF_REG_0, 1),
9576 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
9577 BPF_MOV64_IMM(BPF_REG_0, 3),
9578 BPF_EXIT_INSN(),
9579 },
9580 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9581 .errstr = "back-edge from insn",
9582 .result = REJECT,
9583 },
9584 {
9585 "calls: conditional call 6",
9586 .insns = {
9587 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9588 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
9589 BPF_EXIT_INSN(),
9590 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9591 offsetof(struct __sk_buff, mark)),
9592 BPF_EXIT_INSN(),
9593 },
9594 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9595 .errstr = "back-edge from insn",
9596 .result = REJECT,
9597 },
9598 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009599 "calls: using r0 returned by callee",
9600 .insns = {
9601 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9602 BPF_EXIT_INSN(),
9603 BPF_MOV64_IMM(BPF_REG_0, 2),
9604 BPF_EXIT_INSN(),
9605 },
9606 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9607 .result = ACCEPT,
9608 },
9609 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009610 "calls: using uninit r0 from callee",
9611 .insns = {
9612 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9613 BPF_EXIT_INSN(),
9614 BPF_EXIT_INSN(),
9615 },
9616 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9617 .errstr = "!read_ok",
9618 .result = REJECT,
9619 },
9620 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009621 "calls: callee is using r1",
9622 .insns = {
9623 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9624 BPF_EXIT_INSN(),
9625 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9626 offsetof(struct __sk_buff, len)),
9627 BPF_EXIT_INSN(),
9628 },
9629 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
9630 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009631 .retval = TEST_DATA_LEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009632 },
9633 {
9634 "calls: callee using args1",
9635 .insns = {
9636 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9637 BPF_EXIT_INSN(),
9638 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9639 BPF_EXIT_INSN(),
9640 },
9641 .errstr_unpriv = "allowed for root only",
9642 .result_unpriv = REJECT,
9643 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009644 .retval = POINTER_VALUE,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009645 },
9646 {
9647 "calls: callee using wrong args2",
9648 .insns = {
9649 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9650 BPF_EXIT_INSN(),
9651 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9652 BPF_EXIT_INSN(),
9653 },
9654 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9655 .errstr = "R2 !read_ok",
9656 .result = REJECT,
9657 },
9658 {
9659 "calls: callee using two args",
9660 .insns = {
9661 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9662 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
9663 offsetof(struct __sk_buff, len)),
9664 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
9665 offsetof(struct __sk_buff, len)),
9666 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9667 BPF_EXIT_INSN(),
9668 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9669 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
9670 BPF_EXIT_INSN(),
9671 },
9672 .errstr_unpriv = "allowed for root only",
9673 .result_unpriv = REJECT,
9674 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009675 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009676 },
9677 {
9678 "calls: callee changing pkt pointers",
9679 .insns = {
9680 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
9681 offsetof(struct xdp_md, data)),
9682 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
9683 offsetof(struct xdp_md, data_end)),
9684 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
9685 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
9686 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
9687 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9688 /* clear_all_pkt_pointers() has to walk all frames
9689 * to make sure that pkt pointers in the caller
9690 * are cleared when callee is calling a helper that
9691 * adjusts packet size
9692 */
9693 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
9694 BPF_MOV32_IMM(BPF_REG_0, 0),
9695 BPF_EXIT_INSN(),
9696 BPF_MOV64_IMM(BPF_REG_2, 0),
9697 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9698 BPF_FUNC_xdp_adjust_head),
9699 BPF_EXIT_INSN(),
9700 },
9701 .result = REJECT,
9702 .errstr = "R6 invalid mem access 'inv'",
9703 .prog_type = BPF_PROG_TYPE_XDP,
9704 },
9705 {
9706 "calls: two calls with args",
9707 .insns = {
9708 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9709 BPF_EXIT_INSN(),
9710 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9711 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9712 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9713 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9714 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9715 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9716 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9717 BPF_EXIT_INSN(),
9718 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9719 offsetof(struct __sk_buff, len)),
9720 BPF_EXIT_INSN(),
9721 },
9722 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9723 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009724 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009725 },
9726 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009727 "calls: calls with stack arith",
9728 .insns = {
9729 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9731 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9732 BPF_EXIT_INSN(),
9733 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9734 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9735 BPF_EXIT_INSN(),
9736 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9737 BPF_MOV64_IMM(BPF_REG_0, 42),
9738 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
9739 BPF_EXIT_INSN(),
9740 },
9741 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9742 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009743 .retval = 42,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009744 },
9745 {
9746 "calls: calls with misaligned stack access",
9747 .insns = {
9748 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9749 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
9750 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9751 BPF_EXIT_INSN(),
9752 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
9753 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9754 BPF_EXIT_INSN(),
9755 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
9756 BPF_MOV64_IMM(BPF_REG_0, 42),
9757 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
9758 BPF_EXIT_INSN(),
9759 },
9760 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9761 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
9762 .errstr = "misaligned stack access",
9763 .result = REJECT,
9764 },
9765 {
9766 "calls: calls control flow, jump test",
9767 .insns = {
9768 BPF_MOV64_IMM(BPF_REG_0, 42),
9769 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9770 BPF_MOV64_IMM(BPF_REG_0, 43),
9771 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9772 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
9773 BPF_EXIT_INSN(),
9774 },
9775 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9776 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009777 .retval = 43,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009778 },
9779 {
9780 "calls: calls control flow, jump test 2",
9781 .insns = {
9782 BPF_MOV64_IMM(BPF_REG_0, 42),
9783 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9784 BPF_MOV64_IMM(BPF_REG_0, 43),
9785 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9786 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
9787 BPF_EXIT_INSN(),
9788 },
9789 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9790 .errstr = "jump out of range from insn 1 to 4",
9791 .result = REJECT,
9792 },
9793 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009794 "calls: two calls with bad jump",
9795 .insns = {
9796 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9797 BPF_EXIT_INSN(),
9798 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9799 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9800 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9801 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9802 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9803 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9804 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9805 BPF_EXIT_INSN(),
9806 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9807 offsetof(struct __sk_buff, len)),
9808 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
9809 BPF_EXIT_INSN(),
9810 },
9811 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9812 .errstr = "jump out of range from insn 11 to 9",
9813 .result = REJECT,
9814 },
9815 {
9816 "calls: recursive call. test1",
9817 .insns = {
9818 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9819 BPF_EXIT_INSN(),
9820 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
9821 BPF_EXIT_INSN(),
9822 },
9823 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9824 .errstr = "back-edge",
9825 .result = REJECT,
9826 },
9827 {
9828 "calls: recursive call. test2",
9829 .insns = {
9830 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9831 BPF_EXIT_INSN(),
9832 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
9833 BPF_EXIT_INSN(),
9834 },
9835 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9836 .errstr = "back-edge",
9837 .result = REJECT,
9838 },
9839 {
9840 "calls: unreachable code",
9841 .insns = {
9842 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9843 BPF_EXIT_INSN(),
9844 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9845 BPF_EXIT_INSN(),
9846 BPF_MOV64_IMM(BPF_REG_0, 0),
9847 BPF_EXIT_INSN(),
9848 BPF_MOV64_IMM(BPF_REG_0, 0),
9849 BPF_EXIT_INSN(),
9850 },
9851 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9852 .errstr = "unreachable insn 6",
9853 .result = REJECT,
9854 },
9855 {
9856 "calls: invalid call",
9857 .insns = {
9858 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9859 BPF_EXIT_INSN(),
9860 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
9861 BPF_EXIT_INSN(),
9862 },
9863 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9864 .errstr = "invalid destination",
9865 .result = REJECT,
9866 },
9867 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009868 "calls: invalid call 2",
9869 .insns = {
9870 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9871 BPF_EXIT_INSN(),
9872 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
9873 BPF_EXIT_INSN(),
9874 },
9875 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9876 .errstr = "invalid destination",
9877 .result = REJECT,
9878 },
9879 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009880 "calls: jumping across function bodies. test1",
9881 .insns = {
9882 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9883 BPF_MOV64_IMM(BPF_REG_0, 0),
9884 BPF_EXIT_INSN(),
9885 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
9886 BPF_EXIT_INSN(),
9887 },
9888 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9889 .errstr = "jump out of range",
9890 .result = REJECT,
9891 },
9892 {
9893 "calls: jumping across function bodies. test2",
9894 .insns = {
9895 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
9896 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9897 BPF_MOV64_IMM(BPF_REG_0, 0),
9898 BPF_EXIT_INSN(),
9899 BPF_EXIT_INSN(),
9900 },
9901 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9902 .errstr = "jump out of range",
9903 .result = REJECT,
9904 },
9905 {
9906 "calls: call without exit",
9907 .insns = {
9908 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9909 BPF_EXIT_INSN(),
9910 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9911 BPF_EXIT_INSN(),
9912 BPF_MOV64_IMM(BPF_REG_0, 0),
9913 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
9914 },
9915 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9916 .errstr = "not an exit",
9917 .result = REJECT,
9918 },
9919 {
9920 "calls: call into middle of ld_imm64",
9921 .insns = {
9922 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9923 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9924 BPF_MOV64_IMM(BPF_REG_0, 0),
9925 BPF_EXIT_INSN(),
9926 BPF_LD_IMM64(BPF_REG_0, 0),
9927 BPF_EXIT_INSN(),
9928 },
9929 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9930 .errstr = "last insn",
9931 .result = REJECT,
9932 },
9933 {
9934 "calls: call into middle of other call",
9935 .insns = {
9936 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9937 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9938 BPF_MOV64_IMM(BPF_REG_0, 0),
9939 BPF_EXIT_INSN(),
9940 BPF_MOV64_IMM(BPF_REG_0, 0),
9941 BPF_MOV64_IMM(BPF_REG_0, 0),
9942 BPF_EXIT_INSN(),
9943 },
9944 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9945 .errstr = "last insn",
9946 .result = REJECT,
9947 },
9948 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009949 "calls: ld_abs with changing ctx data in callee",
9950 .insns = {
9951 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9952 BPF_LD_ABS(BPF_B, 0),
9953 BPF_LD_ABS(BPF_H, 0),
9954 BPF_LD_ABS(BPF_W, 0),
9955 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
9956 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
9957 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
9958 BPF_LD_ABS(BPF_B, 0),
9959 BPF_LD_ABS(BPF_H, 0),
9960 BPF_LD_ABS(BPF_W, 0),
9961 BPF_EXIT_INSN(),
9962 BPF_MOV64_IMM(BPF_REG_2, 1),
9963 BPF_MOV64_IMM(BPF_REG_3, 2),
9964 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9965 BPF_FUNC_skb_vlan_push),
9966 BPF_EXIT_INSN(),
9967 },
9968 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9969 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
9970 .result = REJECT,
9971 },
9972 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009973 "calls: two calls with bad fallthrough",
9974 .insns = {
9975 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9976 BPF_EXIT_INSN(),
9977 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9978 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9979 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9980 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9981 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9982 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9983 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9984 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
9985 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9986 offsetof(struct __sk_buff, len)),
9987 BPF_EXIT_INSN(),
9988 },
9989 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9990 .errstr = "not an exit",
9991 .result = REJECT,
9992 },
9993 {
9994 "calls: two calls with stack read",
9995 .insns = {
9996 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9997 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9998 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9999 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10000 BPF_EXIT_INSN(),
10001 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10002 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10003 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10004 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10005 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10006 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10007 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10008 BPF_EXIT_INSN(),
10009 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10010 BPF_EXIT_INSN(),
10011 },
10012 .prog_type = BPF_PROG_TYPE_XDP,
10013 .result = ACCEPT,
10014 },
10015 {
10016 "calls: two calls with stack write",
10017 .insns = {
10018 /* main prog */
10019 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10020 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10021 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10022 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10023 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10024 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10025 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10026 BPF_EXIT_INSN(),
10027
10028 /* subprog 1 */
10029 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10030 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10031 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
10032 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
10033 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10034 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10035 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
10036 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
10037 /* write into stack frame of main prog */
10038 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10039 BPF_EXIT_INSN(),
10040
10041 /* subprog 2 */
10042 /* read from stack frame of main prog */
10043 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10044 BPF_EXIT_INSN(),
10045 },
10046 .prog_type = BPF_PROG_TYPE_XDP,
10047 .result = ACCEPT,
10048 },
10049 {
Jann Horn6b80ad22017-12-22 19:12:35 +010010050 "calls: stack overflow using two frames (pre-call access)",
10051 .insns = {
10052 /* prog 1 */
10053 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10054 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
10055 BPF_EXIT_INSN(),
10056
10057 /* prog 2 */
10058 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10059 BPF_MOV64_IMM(BPF_REG_0, 0),
10060 BPF_EXIT_INSN(),
10061 },
10062 .prog_type = BPF_PROG_TYPE_XDP,
10063 .errstr = "combined stack size",
10064 .result = REJECT,
10065 },
10066 {
10067 "calls: stack overflow using two frames (post-call access)",
10068 .insns = {
10069 /* prog 1 */
10070 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
10071 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10072 BPF_EXIT_INSN(),
10073
10074 /* prog 2 */
10075 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10076 BPF_MOV64_IMM(BPF_REG_0, 0),
10077 BPF_EXIT_INSN(),
10078 },
10079 .prog_type = BPF_PROG_TYPE_XDP,
10080 .errstr = "combined stack size",
10081 .result = REJECT,
10082 },
10083 {
Alexei Starovoitov6b86c422017-12-25 13:15:41 -080010084 "calls: stack depth check using three frames. test1",
10085 .insns = {
10086 /* main */
10087 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10088 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
10089 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
10090 BPF_MOV64_IMM(BPF_REG_0, 0),
10091 BPF_EXIT_INSN(),
10092 /* A */
10093 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10094 BPF_EXIT_INSN(),
10095 /* B */
10096 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
10097 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10098 BPF_EXIT_INSN(),
10099 },
10100 .prog_type = BPF_PROG_TYPE_XDP,
10101 /* stack_main=32, stack_A=256, stack_B=64
10102 * and max(main+A, main+A+B) < 512
10103 */
10104 .result = ACCEPT,
10105 },
10106 {
10107 "calls: stack depth check using three frames. test2",
10108 .insns = {
10109 /* main */
10110 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10111 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
10112 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
10113 BPF_MOV64_IMM(BPF_REG_0, 0),
10114 BPF_EXIT_INSN(),
10115 /* A */
10116 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10117 BPF_EXIT_INSN(),
10118 /* B */
10119 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
10120 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10121 BPF_EXIT_INSN(),
10122 },
10123 .prog_type = BPF_PROG_TYPE_XDP,
10124 /* stack_main=32, stack_A=64, stack_B=256
10125 * and max(main+A, main+A+B) < 512
10126 */
10127 .result = ACCEPT,
10128 },
10129 {
10130 "calls: stack depth check using three frames. test3",
10131 .insns = {
10132 /* main */
10133 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10134 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
10135 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10136 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
10137 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
10138 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10139 BPF_MOV64_IMM(BPF_REG_0, 0),
10140 BPF_EXIT_INSN(),
10141 /* A */
10142 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
10143 BPF_EXIT_INSN(),
10144 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
10145 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
10146 /* B */
10147 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
10148 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
10149 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10150 BPF_EXIT_INSN(),
10151 },
10152 .prog_type = BPF_PROG_TYPE_XDP,
10153 /* stack_main=64, stack_A=224, stack_B=256
10154 * and max(main+A, main+A+B) > 512
10155 */
10156 .errstr = "combined stack",
10157 .result = REJECT,
10158 },
10159 {
10160 "calls: stack depth check using three frames. test4",
10161 /* void main(void) {
10162 * func1(0);
10163 * func1(1);
10164 * func2(1);
10165 * }
10166 * void func1(int alloc_or_recurse) {
10167 * if (alloc_or_recurse) {
10168 * frame_pointer[-300] = 1;
10169 * } else {
10170 * func2(alloc_or_recurse);
10171 * }
10172 * }
10173 * void func2(int alloc_or_recurse) {
10174 * if (alloc_or_recurse) {
10175 * frame_pointer[-300] = 1;
10176 * }
10177 * }
10178 */
10179 .insns = {
10180 /* main */
10181 BPF_MOV64_IMM(BPF_REG_1, 0),
10182 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
10183 BPF_MOV64_IMM(BPF_REG_1, 1),
10184 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10185 BPF_MOV64_IMM(BPF_REG_1, 1),
10186 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
10187 BPF_MOV64_IMM(BPF_REG_0, 0),
10188 BPF_EXIT_INSN(),
10189 /* A */
10190 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
10191 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10192 BPF_EXIT_INSN(),
10193 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
10194 BPF_EXIT_INSN(),
10195 /* B */
10196 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
10197 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10198 BPF_EXIT_INSN(),
10199 },
10200 .prog_type = BPF_PROG_TYPE_XDP,
10201 .result = REJECT,
10202 .errstr = "combined stack",
10203 },
10204 {
Alexei Starovoitovaada9ce2017-12-25 13:15:42 -080010205 "calls: stack depth check using three frames. test5",
10206 .insns = {
10207 /* main */
10208 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
10209 BPF_EXIT_INSN(),
10210 /* A */
10211 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
10212 BPF_EXIT_INSN(),
10213 /* B */
10214 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
10215 BPF_EXIT_INSN(),
10216 /* C */
10217 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
10218 BPF_EXIT_INSN(),
10219 /* D */
10220 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
10221 BPF_EXIT_INSN(),
10222 /* E */
10223 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
10224 BPF_EXIT_INSN(),
10225 /* F */
10226 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
10227 BPF_EXIT_INSN(),
10228 /* G */
10229 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
10230 BPF_EXIT_INSN(),
10231 /* H */
10232 BPF_MOV64_IMM(BPF_REG_0, 0),
10233 BPF_EXIT_INSN(),
10234 },
10235 .prog_type = BPF_PROG_TYPE_XDP,
10236 .errstr = "call stack",
10237 .result = REJECT,
10238 },
10239 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010240 "calls: spill into caller stack frame",
10241 .insns = {
10242 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10243 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10244 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10245 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10246 BPF_EXIT_INSN(),
10247 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
10248 BPF_MOV64_IMM(BPF_REG_0, 0),
10249 BPF_EXIT_INSN(),
10250 },
10251 .prog_type = BPF_PROG_TYPE_XDP,
10252 .errstr = "cannot spill",
10253 .result = REJECT,
10254 },
10255 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010256 "calls: write into caller stack frame",
10257 .insns = {
10258 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10259 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10260 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10261 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10262 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10263 BPF_EXIT_INSN(),
10264 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
10265 BPF_MOV64_IMM(BPF_REG_0, 0),
10266 BPF_EXIT_INSN(),
10267 },
10268 .prog_type = BPF_PROG_TYPE_XDP,
10269 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010270 .retval = 42,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010271 },
10272 {
10273 "calls: write into callee stack frame",
10274 .insns = {
10275 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10276 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
10277 BPF_EXIT_INSN(),
10278 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
10279 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
10280 BPF_EXIT_INSN(),
10281 },
10282 .prog_type = BPF_PROG_TYPE_XDP,
10283 .errstr = "cannot return stack pointer",
10284 .result = REJECT,
10285 },
10286 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010287 "calls: two calls with stack write and void return",
10288 .insns = {
10289 /* main prog */
10290 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10291 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10292 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10293 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10295 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10296 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10297 BPF_EXIT_INSN(),
10298
10299 /* subprog 1 */
10300 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10301 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10302 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10303 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10304 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10305 BPF_EXIT_INSN(),
10306
10307 /* subprog 2 */
10308 /* write into stack frame of main prog */
10309 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
10310 BPF_EXIT_INSN(), /* void return */
10311 },
10312 .prog_type = BPF_PROG_TYPE_XDP,
10313 .result = ACCEPT,
10314 },
10315 {
10316 "calls: ambiguous return value",
10317 .insns = {
10318 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10319 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
10320 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
10321 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10322 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10323 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
10324 BPF_EXIT_INSN(),
10325 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
10326 BPF_MOV64_IMM(BPF_REG_0, 0),
10327 BPF_EXIT_INSN(),
10328 },
10329 .errstr_unpriv = "allowed for root only",
10330 .result_unpriv = REJECT,
10331 .errstr = "R0 !read_ok",
10332 .result = REJECT,
10333 },
10334 {
10335 "calls: two calls that return map_value",
10336 .insns = {
10337 /* main prog */
10338 /* pass fp-16, fp-8 into a function */
10339 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10340 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10341 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10342 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10343 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10344
10345 /* fetch map_value_ptr from the stack of this function */
10346 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
10347 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
10348 /* write into map value */
10349 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10350 /* fetch secound map_value_ptr from the stack */
10351 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10352 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
10353 /* write into map value */
10354 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10355 BPF_MOV64_IMM(BPF_REG_0, 0),
10356 BPF_EXIT_INSN(),
10357
10358 /* subprog 1 */
10359 /* call 3rd function twice */
10360 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10361 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10362 /* first time with fp-8 */
10363 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10364 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10365 /* second time with fp-16 */
10366 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10367 BPF_EXIT_INSN(),
10368
10369 /* subprog 2 */
10370 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10371 /* lookup from map */
10372 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10373 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10374 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10375 BPF_LD_MAP_FD(BPF_REG_1, 0),
10376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10377 BPF_FUNC_map_lookup_elem),
10378 /* write map_value_ptr into stack frame of main prog */
10379 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10380 BPF_MOV64_IMM(BPF_REG_0, 0),
10381 BPF_EXIT_INSN(), /* return 0 */
10382 },
10383 .prog_type = BPF_PROG_TYPE_XDP,
10384 .fixup_map1 = { 23 },
10385 .result = ACCEPT,
10386 },
10387 {
10388 "calls: two calls that return map_value with bool condition",
10389 .insns = {
10390 /* main prog */
10391 /* pass fp-16, fp-8 into a function */
10392 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10393 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10394 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10395 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10396 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10397 BPF_MOV64_IMM(BPF_REG_0, 0),
10398 BPF_EXIT_INSN(),
10399
10400 /* subprog 1 */
10401 /* call 3rd function twice */
10402 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10403 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10404 /* first time with fp-8 */
10405 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
10406 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
10407 /* fetch map_value_ptr from the stack of this function */
10408 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10409 /* write into map value */
10410 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10411 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10412 /* second time with fp-16 */
10413 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10414 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
10415 /* fetch secound map_value_ptr from the stack */
10416 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
10417 /* write into map value */
10418 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10419 BPF_EXIT_INSN(),
10420
10421 /* subprog 2 */
10422 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10423 /* lookup from map */
10424 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10425 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10426 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10427 BPF_LD_MAP_FD(BPF_REG_1, 0),
10428 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10429 BPF_FUNC_map_lookup_elem),
10430 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10431 BPF_MOV64_IMM(BPF_REG_0, 0),
10432 BPF_EXIT_INSN(), /* return 0 */
10433 /* write map_value_ptr into stack frame of main prog */
10434 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10435 BPF_MOV64_IMM(BPF_REG_0, 1),
10436 BPF_EXIT_INSN(), /* return 1 */
10437 },
10438 .prog_type = BPF_PROG_TYPE_XDP,
10439 .fixup_map1 = { 23 },
10440 .result = ACCEPT,
10441 },
10442 {
10443 "calls: two calls that return map_value with incorrect bool check",
10444 .insns = {
10445 /* main prog */
10446 /* pass fp-16, fp-8 into a function */
10447 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10448 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10449 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10450 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10451 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10452 BPF_MOV64_IMM(BPF_REG_0, 0),
10453 BPF_EXIT_INSN(),
10454
10455 /* subprog 1 */
10456 /* call 3rd function twice */
10457 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10458 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10459 /* first time with fp-8 */
10460 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
10461 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
10462 /* fetch map_value_ptr from the stack of this function */
10463 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10464 /* write into map value */
10465 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10466 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10467 /* second time with fp-16 */
10468 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10469 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10470 /* fetch secound map_value_ptr from the stack */
10471 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
10472 /* write into map value */
10473 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10474 BPF_EXIT_INSN(),
10475
10476 /* subprog 2 */
10477 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10478 /* lookup from map */
10479 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10480 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10481 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10482 BPF_LD_MAP_FD(BPF_REG_1, 0),
10483 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10484 BPF_FUNC_map_lookup_elem),
10485 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10486 BPF_MOV64_IMM(BPF_REG_0, 0),
10487 BPF_EXIT_INSN(), /* return 0 */
10488 /* write map_value_ptr into stack frame of main prog */
10489 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10490 BPF_MOV64_IMM(BPF_REG_0, 1),
10491 BPF_EXIT_INSN(), /* return 1 */
10492 },
10493 .prog_type = BPF_PROG_TYPE_XDP,
10494 .fixup_map1 = { 23 },
10495 .result = REJECT,
10496 .errstr = "invalid read from stack off -16+0 size 8",
10497 },
10498 {
10499 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
10500 .insns = {
10501 /* main prog */
10502 /* pass fp-16, fp-8 into a function */
10503 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10504 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10505 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10506 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10507 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10508 BPF_MOV64_IMM(BPF_REG_0, 0),
10509 BPF_EXIT_INSN(),
10510
10511 /* subprog 1 */
10512 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10513 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10514 /* 1st lookup from map */
10515 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10516 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10517 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10518 BPF_LD_MAP_FD(BPF_REG_1, 0),
10519 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10520 BPF_FUNC_map_lookup_elem),
10521 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10522 BPF_MOV64_IMM(BPF_REG_8, 0),
10523 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10524 /* write map_value_ptr into stack frame of main prog at fp-8 */
10525 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10526 BPF_MOV64_IMM(BPF_REG_8, 1),
10527
10528 /* 2nd lookup from map */
10529 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
10530 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10531 BPF_LD_MAP_FD(BPF_REG_1, 0),
10532 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
10533 BPF_FUNC_map_lookup_elem),
10534 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10535 BPF_MOV64_IMM(BPF_REG_9, 0),
10536 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10537 /* write map_value_ptr into stack frame of main prog at fp-16 */
10538 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10539 BPF_MOV64_IMM(BPF_REG_9, 1),
10540
10541 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10542 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
10543 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10544 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10545 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10546 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
10547 BPF_EXIT_INSN(),
10548
10549 /* subprog 2 */
10550 /* if arg2 == 1 do *arg1 = 0 */
10551 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10552 /* fetch map_value_ptr from the stack of this function */
10553 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10554 /* write into map value */
10555 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10556
10557 /* if arg4 == 1 do *arg3 = 0 */
10558 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10559 /* fetch map_value_ptr from the stack of this function */
10560 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10561 /* write into map value */
10562 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
10563 BPF_EXIT_INSN(),
10564 },
10565 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10566 .fixup_map1 = { 12, 22 },
10567 .result = REJECT,
10568 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
10569 },
10570 {
10571 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
10572 .insns = {
10573 /* main prog */
10574 /* pass fp-16, fp-8 into a function */
10575 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10576 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10577 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10578 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10579 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10580 BPF_MOV64_IMM(BPF_REG_0, 0),
10581 BPF_EXIT_INSN(),
10582
10583 /* subprog 1 */
10584 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10585 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10586 /* 1st lookup from map */
10587 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10588 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10589 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10590 BPF_LD_MAP_FD(BPF_REG_1, 0),
10591 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10592 BPF_FUNC_map_lookup_elem),
10593 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10594 BPF_MOV64_IMM(BPF_REG_8, 0),
10595 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10596 /* write map_value_ptr into stack frame of main prog at fp-8 */
10597 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10598 BPF_MOV64_IMM(BPF_REG_8, 1),
10599
10600 /* 2nd lookup from map */
10601 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
10602 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10603 BPF_LD_MAP_FD(BPF_REG_1, 0),
10604 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
10605 BPF_FUNC_map_lookup_elem),
10606 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10607 BPF_MOV64_IMM(BPF_REG_9, 0),
10608 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10609 /* write map_value_ptr into stack frame of main prog at fp-16 */
10610 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10611 BPF_MOV64_IMM(BPF_REG_9, 1),
10612
10613 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10614 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
10615 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10616 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10617 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10618 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
10619 BPF_EXIT_INSN(),
10620
10621 /* subprog 2 */
10622 /* if arg2 == 1 do *arg1 = 0 */
10623 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10624 /* fetch map_value_ptr from the stack of this function */
10625 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10626 /* write into map value */
10627 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10628
10629 /* if arg4 == 1 do *arg3 = 0 */
10630 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10631 /* fetch map_value_ptr from the stack of this function */
10632 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10633 /* write into map value */
10634 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10635 BPF_EXIT_INSN(),
10636 },
10637 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10638 .fixup_map1 = { 12, 22 },
10639 .result = ACCEPT,
10640 },
10641 {
10642 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
10643 .insns = {
10644 /* main prog */
10645 /* pass fp-16, fp-8 into a function */
10646 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10647 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10648 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10649 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10650 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
10651 BPF_MOV64_IMM(BPF_REG_0, 0),
10652 BPF_EXIT_INSN(),
10653
10654 /* subprog 1 */
10655 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10656 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10657 /* 1st lookup from map */
10658 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
10659 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
10661 BPF_LD_MAP_FD(BPF_REG_1, 0),
10662 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10663 BPF_FUNC_map_lookup_elem),
10664 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10665 BPF_MOV64_IMM(BPF_REG_8, 0),
10666 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10667 /* write map_value_ptr into stack frame of main prog at fp-8 */
10668 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10669 BPF_MOV64_IMM(BPF_REG_8, 1),
10670
10671 /* 2nd lookup from map */
10672 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10673 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
10674 BPF_LD_MAP_FD(BPF_REG_1, 0),
10675 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10676 BPF_FUNC_map_lookup_elem),
10677 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10678 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
10679 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10680 /* write map_value_ptr into stack frame of main prog at fp-16 */
10681 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10682 BPF_MOV64_IMM(BPF_REG_9, 1),
10683
10684 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10685 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
10686 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10687 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10688 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10689 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
10690 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
10691
10692 /* subprog 2 */
10693 /* if arg2 == 1 do *arg1 = 0 */
10694 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10695 /* fetch map_value_ptr from the stack of this function */
10696 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10697 /* write into map value */
10698 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10699
10700 /* if arg4 == 1 do *arg3 = 0 */
10701 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10702 /* fetch map_value_ptr from the stack of this function */
10703 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10704 /* write into map value */
10705 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
10706 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
10707 },
10708 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10709 .fixup_map1 = { 12, 22 },
10710 .result = REJECT,
10711 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
10712 },
10713 {
10714 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
10715 .insns = {
10716 /* main prog */
10717 /* pass fp-16, fp-8 into a function */
10718 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10719 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10720 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10722 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10723 BPF_MOV64_IMM(BPF_REG_0, 0),
10724 BPF_EXIT_INSN(),
10725
10726 /* subprog 1 */
10727 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10728 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10729 /* 1st lookup from map */
10730 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10731 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10732 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10733 BPF_LD_MAP_FD(BPF_REG_1, 0),
10734 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10735 BPF_FUNC_map_lookup_elem),
10736 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
10737 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10738 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10739 BPF_MOV64_IMM(BPF_REG_8, 0),
10740 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10741 BPF_MOV64_IMM(BPF_REG_8, 1),
10742
10743 /* 2nd lookup from map */
10744 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10745 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10746 BPF_LD_MAP_FD(BPF_REG_1, 0),
10747 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10748 BPF_FUNC_map_lookup_elem),
10749 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
10750 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10751 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10752 BPF_MOV64_IMM(BPF_REG_9, 0),
10753 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10754 BPF_MOV64_IMM(BPF_REG_9, 1),
10755
10756 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10757 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10758 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10759 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10760 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10761 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10762 BPF_EXIT_INSN(),
10763
10764 /* subprog 2 */
10765 /* if arg2 == 1 do *arg1 = 0 */
10766 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10767 /* fetch map_value_ptr from the stack of this function */
10768 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10769 /* write into map value */
10770 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10771
10772 /* if arg4 == 1 do *arg3 = 0 */
10773 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10774 /* fetch map_value_ptr from the stack of this function */
10775 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10776 /* write into map value */
10777 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10778 BPF_EXIT_INSN(),
10779 },
10780 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10781 .fixup_map1 = { 12, 22 },
10782 .result = ACCEPT,
10783 },
10784 {
10785 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
10786 .insns = {
10787 /* main prog */
10788 /* pass fp-16, fp-8 into a function */
10789 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10790 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10791 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10792 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10793 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10794 BPF_MOV64_IMM(BPF_REG_0, 0),
10795 BPF_EXIT_INSN(),
10796
10797 /* subprog 1 */
10798 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10799 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10800 /* 1st lookup from map */
10801 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10802 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10803 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10804 BPF_LD_MAP_FD(BPF_REG_1, 0),
10805 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10806 BPF_FUNC_map_lookup_elem),
10807 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
10808 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10809 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10810 BPF_MOV64_IMM(BPF_REG_8, 0),
10811 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10812 BPF_MOV64_IMM(BPF_REG_8, 1),
10813
10814 /* 2nd lookup from map */
10815 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10816 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10817 BPF_LD_MAP_FD(BPF_REG_1, 0),
10818 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10819 BPF_FUNC_map_lookup_elem),
10820 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
10821 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10822 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10823 BPF_MOV64_IMM(BPF_REG_9, 0),
10824 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10825 BPF_MOV64_IMM(BPF_REG_9, 1),
10826
10827 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10828 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10829 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10830 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10831 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10832 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10833 BPF_EXIT_INSN(),
10834
10835 /* subprog 2 */
10836 /* if arg2 == 1 do *arg1 = 0 */
10837 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10838 /* fetch map_value_ptr from the stack of this function */
10839 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10840 /* write into map value */
10841 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10842
10843 /* if arg4 == 0 do *arg3 = 0 */
10844 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
10845 /* fetch map_value_ptr from the stack of this function */
10846 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10847 /* write into map value */
10848 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10849 BPF_EXIT_INSN(),
10850 },
10851 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10852 .fixup_map1 = { 12, 22 },
10853 .result = REJECT,
10854 .errstr = "R0 invalid mem access 'inv'",
10855 },
10856 {
10857 "calls: pkt_ptr spill into caller stack",
10858 .insns = {
10859 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10860 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10861 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10862 BPF_EXIT_INSN(),
10863
10864 /* subprog 1 */
10865 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10866 offsetof(struct __sk_buff, data)),
10867 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10868 offsetof(struct __sk_buff, data_end)),
10869 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10870 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10871 /* spill unchecked pkt_ptr into stack of caller */
10872 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10873 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10874 /* now the pkt range is verified, read pkt_ptr from stack */
10875 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
10876 /* write 4 bytes into packet */
10877 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10878 BPF_EXIT_INSN(),
10879 },
10880 .result = ACCEPT,
10881 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010882 .retval = POINTER_VALUE,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010883 },
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080010884 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010885 "calls: pkt_ptr spill into caller stack 2",
10886 .insns = {
10887 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10888 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10889 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10890 /* Marking is still kept, but not in all cases safe. */
10891 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10892 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
10893 BPF_EXIT_INSN(),
10894
10895 /* subprog 1 */
10896 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10897 offsetof(struct __sk_buff, data)),
10898 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10899 offsetof(struct __sk_buff, data_end)),
10900 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10901 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10902 /* spill unchecked pkt_ptr into stack of caller */
10903 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10904 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10905 /* now the pkt range is verified, read pkt_ptr from stack */
10906 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
10907 /* write 4 bytes into packet */
10908 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10909 BPF_EXIT_INSN(),
10910 },
10911 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10912 .errstr = "invalid access to packet",
10913 .result = REJECT,
10914 },
10915 {
10916 "calls: pkt_ptr spill into caller stack 3",
10917 .insns = {
10918 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10919 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10920 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10921 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
10922 /* Marking is still kept and safe here. */
10923 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10924 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
10925 BPF_EXIT_INSN(),
10926
10927 /* subprog 1 */
10928 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10929 offsetof(struct __sk_buff, data)),
10930 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10931 offsetof(struct __sk_buff, data_end)),
10932 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10933 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10934 /* spill unchecked pkt_ptr into stack of caller */
10935 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10936 BPF_MOV64_IMM(BPF_REG_5, 0),
10937 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10938 BPF_MOV64_IMM(BPF_REG_5, 1),
10939 /* now the pkt range is verified, read pkt_ptr from stack */
10940 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
10941 /* write 4 bytes into packet */
10942 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10943 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10944 BPF_EXIT_INSN(),
10945 },
10946 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10947 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010948 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010949 },
10950 {
10951 "calls: pkt_ptr spill into caller stack 4",
10952 .insns = {
10953 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10954 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10955 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10956 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
10957 /* Check marking propagated. */
10958 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10959 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
10960 BPF_EXIT_INSN(),
10961
10962 /* subprog 1 */
10963 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10964 offsetof(struct __sk_buff, data)),
10965 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10966 offsetof(struct __sk_buff, data_end)),
10967 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10968 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10969 /* spill unchecked pkt_ptr into stack of caller */
10970 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10971 BPF_MOV64_IMM(BPF_REG_5, 0),
10972 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10973 BPF_MOV64_IMM(BPF_REG_5, 1),
10974 /* don't read back pkt_ptr from stack here */
10975 /* write 4 bytes into packet */
10976 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10977 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10978 BPF_EXIT_INSN(),
10979 },
10980 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10981 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010982 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010983 },
10984 {
10985 "calls: pkt_ptr spill into caller stack 5",
10986 .insns = {
10987 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10988 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10989 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
10990 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10991 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10992 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10993 BPF_EXIT_INSN(),
10994
10995 /* subprog 1 */
10996 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10997 offsetof(struct __sk_buff, data)),
10998 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10999 offsetof(struct __sk_buff, data_end)),
11000 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11001 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11002 BPF_MOV64_IMM(BPF_REG_5, 0),
11003 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11004 /* spill checked pkt_ptr into stack of caller */
11005 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11006 BPF_MOV64_IMM(BPF_REG_5, 1),
11007 /* don't read back pkt_ptr from stack here */
11008 /* write 4 bytes into packet */
11009 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11010 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11011 BPF_EXIT_INSN(),
11012 },
11013 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11014 .errstr = "same insn cannot be used with different",
11015 .result = REJECT,
11016 },
11017 {
11018 "calls: pkt_ptr spill into caller stack 6",
11019 .insns = {
11020 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11021 offsetof(struct __sk_buff, data_end)),
11022 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11023 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11024 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11025 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11026 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11027 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11028 BPF_EXIT_INSN(),
11029
11030 /* subprog 1 */
11031 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11032 offsetof(struct __sk_buff, data)),
11033 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11034 offsetof(struct __sk_buff, data_end)),
11035 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11036 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11037 BPF_MOV64_IMM(BPF_REG_5, 0),
11038 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11039 /* spill checked pkt_ptr into stack of caller */
11040 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11041 BPF_MOV64_IMM(BPF_REG_5, 1),
11042 /* don't read back pkt_ptr from stack here */
11043 /* write 4 bytes into packet */
11044 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11045 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11046 BPF_EXIT_INSN(),
11047 },
11048 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11049 .errstr = "R4 invalid mem access",
11050 .result = REJECT,
11051 },
11052 {
11053 "calls: pkt_ptr spill into caller stack 7",
11054 .insns = {
11055 BPF_MOV64_IMM(BPF_REG_2, 0),
11056 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11057 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11058 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11059 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11060 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11061 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11062 BPF_EXIT_INSN(),
11063
11064 /* subprog 1 */
11065 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11066 offsetof(struct __sk_buff, data)),
11067 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11068 offsetof(struct __sk_buff, data_end)),
11069 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11070 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11071 BPF_MOV64_IMM(BPF_REG_5, 0),
11072 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11073 /* spill checked pkt_ptr into stack of caller */
11074 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11075 BPF_MOV64_IMM(BPF_REG_5, 1),
11076 /* don't read back pkt_ptr from stack here */
11077 /* write 4 bytes into packet */
11078 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11079 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11080 BPF_EXIT_INSN(),
11081 },
11082 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11083 .errstr = "R4 invalid mem access",
11084 .result = REJECT,
11085 },
11086 {
11087 "calls: pkt_ptr spill into caller stack 8",
11088 .insns = {
11089 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11090 offsetof(struct __sk_buff, data)),
11091 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11092 offsetof(struct __sk_buff, data_end)),
11093 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11094 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11095 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
11096 BPF_EXIT_INSN(),
11097 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11098 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11099 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11100 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11101 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11102 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11103 BPF_EXIT_INSN(),
11104
11105 /* subprog 1 */
11106 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11107 offsetof(struct __sk_buff, data)),
11108 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11109 offsetof(struct __sk_buff, data_end)),
11110 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11112 BPF_MOV64_IMM(BPF_REG_5, 0),
11113 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11114 /* spill checked pkt_ptr into stack of caller */
11115 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11116 BPF_MOV64_IMM(BPF_REG_5, 1),
11117 /* don't read back pkt_ptr from stack here */
11118 /* write 4 bytes into packet */
11119 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11120 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11121 BPF_EXIT_INSN(),
11122 },
11123 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11124 .result = ACCEPT,
11125 },
11126 {
11127 "calls: pkt_ptr spill into caller stack 9",
11128 .insns = {
11129 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11130 offsetof(struct __sk_buff, data)),
11131 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11132 offsetof(struct __sk_buff, data_end)),
11133 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11134 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11135 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
11136 BPF_EXIT_INSN(),
11137 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11138 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11139 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11140 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11141 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11142 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11143 BPF_EXIT_INSN(),
11144
11145 /* subprog 1 */
11146 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11147 offsetof(struct __sk_buff, data)),
11148 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11149 offsetof(struct __sk_buff, data_end)),
11150 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11152 BPF_MOV64_IMM(BPF_REG_5, 0),
11153 /* spill unchecked pkt_ptr into stack of caller */
11154 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11155 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11156 BPF_MOV64_IMM(BPF_REG_5, 1),
11157 /* don't read back pkt_ptr from stack here */
11158 /* write 4 bytes into packet */
11159 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11160 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11161 BPF_EXIT_INSN(),
11162 },
11163 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11164 .errstr = "invalid access to packet",
11165 .result = REJECT,
11166 },
11167 {
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080011168 "calls: caller stack init to zero or map_value_or_null",
11169 .insns = {
11170 BPF_MOV64_IMM(BPF_REG_0, 0),
11171 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
11172 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11173 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11174 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11175 /* fetch map_value_or_null or const_zero from stack */
11176 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11177 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11178 /* store into map_value */
11179 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
11180 BPF_EXIT_INSN(),
11181
11182 /* subprog 1 */
11183 /* if (ctx == 0) return; */
11184 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
11185 /* else bpf_map_lookup() and *(fp - 8) = r0 */
11186 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
11187 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11189 BPF_LD_MAP_FD(BPF_REG_1, 0),
11190 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11191 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11192 BPF_FUNC_map_lookup_elem),
11193 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11194 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11195 BPF_EXIT_INSN(),
11196 },
11197 .fixup_map1 = { 13 },
11198 .result = ACCEPT,
11199 .prog_type = BPF_PROG_TYPE_XDP,
11200 },
11201 {
11202 "calls: stack init to zero and pruning",
11203 .insns = {
11204 /* first make allocated_stack 16 byte */
11205 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
11206 /* now fork the execution such that the false branch
11207 * of JGT insn will be verified second and it skisp zero
11208 * init of fp-8 stack slot. If stack liveness marking
11209 * is missing live_read marks from call map_lookup
11210 * processing then pruning will incorrectly assume
11211 * that fp-8 stack slot was unused in the fall-through
11212 * branch and will accept the program incorrectly
11213 */
11214 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
11215 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11216 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
11217 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11219 BPF_LD_MAP_FD(BPF_REG_1, 0),
11220 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11221 BPF_FUNC_map_lookup_elem),
11222 BPF_EXIT_INSN(),
11223 },
11224 .fixup_map2 = { 6 },
11225 .errstr = "invalid indirect read from stack off -8+0 size 8",
11226 .result = REJECT,
11227 .prog_type = BPF_PROG_TYPE_XDP,
11228 },
Gianluca Borellofd05e572017-12-23 10:09:55 +000011229 {
11230 "search pruning: all branches should be verified (nop operation)",
11231 .insns = {
11232 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11233 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11234 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
11235 BPF_LD_MAP_FD(BPF_REG_1, 0),
11236 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
11237 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
11238 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
11239 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
11240 BPF_MOV64_IMM(BPF_REG_4, 0),
11241 BPF_JMP_A(1),
11242 BPF_MOV64_IMM(BPF_REG_4, 1),
11243 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
11244 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
11245 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
11246 BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
11247 BPF_MOV64_IMM(BPF_REG_6, 0),
11248 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
11249 BPF_EXIT_INSN(),
11250 },
11251 .fixup_map1 = { 3 },
11252 .errstr = "R6 invalid mem access 'inv'",
11253 .result = REJECT,
11254 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11255 },
11256 {
11257 "search pruning: all branches should be verified (invalid stack access)",
11258 .insns = {
11259 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11260 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11261 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
11262 BPF_LD_MAP_FD(BPF_REG_1, 0),
11263 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
11264 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
11265 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
11266 BPF_MOV64_IMM(BPF_REG_4, 0),
11267 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
11268 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
11269 BPF_JMP_A(1),
11270 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
11271 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
11272 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
11273 BPF_EXIT_INSN(),
11274 },
11275 .fixup_map1 = { 3 },
11276 .errstr = "invalid read from stack off -16+0 size 8",
11277 .result = REJECT,
11278 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11279 },
Daniel Borkmann23d191a2018-02-24 01:08:03 +010011280 {
11281 "jit: lsh, rsh, arsh by 1",
11282 .insns = {
11283 BPF_MOV64_IMM(BPF_REG_0, 1),
11284 BPF_MOV64_IMM(BPF_REG_1, 0xff),
11285 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
11286 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
11287 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
11288 BPF_EXIT_INSN(),
11289 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
11290 BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
11291 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
11292 BPF_EXIT_INSN(),
11293 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
11294 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
11295 BPF_EXIT_INSN(),
11296 BPF_MOV64_IMM(BPF_REG_0, 2),
11297 BPF_EXIT_INSN(),
11298 },
11299 .result = ACCEPT,
11300 .retval = 2,
11301 },
11302 {
11303 "jit: mov32 for ldimm64, 1",
11304 .insns = {
11305 BPF_MOV64_IMM(BPF_REG_0, 2),
11306 BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
11307 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
11308 BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
11309 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
11310 BPF_MOV64_IMM(BPF_REG_0, 1),
11311 BPF_EXIT_INSN(),
11312 },
11313 .result = ACCEPT,
11314 .retval = 2,
11315 },
11316 {
11317 "jit: mov32 for ldimm64, 2",
11318 .insns = {
11319 BPF_MOV64_IMM(BPF_REG_0, 1),
11320 BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
11321 BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
11322 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
11323 BPF_MOV64_IMM(BPF_REG_0, 2),
11324 BPF_EXIT_INSN(),
11325 },
11326 .result = ACCEPT,
11327 .retval = 2,
11328 },
11329 {
11330 "jit: various mul tests",
11331 .insns = {
11332 BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
11333 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
11334 BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
11335 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
11336 BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
11337 BPF_MOV64_IMM(BPF_REG_0, 1),
11338 BPF_EXIT_INSN(),
11339 BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
11340 BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
11341 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
11342 BPF_MOV64_IMM(BPF_REG_0, 1),
11343 BPF_EXIT_INSN(),
11344 BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
11345 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
11346 BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
11347 BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
11348 BPF_MOV64_IMM(BPF_REG_0, 1),
11349 BPF_EXIT_INSN(),
11350 BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
11351 BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
11352 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
11353 BPF_MOV64_IMM(BPF_REG_0, 1),
11354 BPF_EXIT_INSN(),
11355 BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
11356 BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
11357 BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
11358 BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
11359 BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
11360 BPF_MOV64_IMM(BPF_REG_0, 1),
11361 BPF_EXIT_INSN(),
11362 BPF_MOV64_IMM(BPF_REG_0, 2),
11363 BPF_EXIT_INSN(),
11364 },
11365 .result = ACCEPT,
11366 .retval = 2,
11367 },
David S. Miller0f3e9c92018-03-06 00:53:44 -050011368 {
Daniel Borkmannca369602018-02-23 22:29:05 +010011369 "xadd/w check unaligned stack",
11370 .insns = {
11371 BPF_MOV64_IMM(BPF_REG_0, 1),
11372 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
11373 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
11374 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11375 BPF_EXIT_INSN(),
11376 },
11377 .result = REJECT,
11378 .errstr = "misaligned stack access off",
11379 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11380 },
11381 {
11382 "xadd/w check unaligned map",
11383 .insns = {
11384 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11385 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11386 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11387 BPF_LD_MAP_FD(BPF_REG_1, 0),
11388 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11389 BPF_FUNC_map_lookup_elem),
11390 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
11391 BPF_EXIT_INSN(),
11392 BPF_MOV64_IMM(BPF_REG_1, 1),
11393 BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
11394 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
11395 BPF_EXIT_INSN(),
11396 },
11397 .fixup_map1 = { 3 },
11398 .result = REJECT,
11399 .errstr = "misaligned value access off",
11400 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11401 },
11402 {
11403 "xadd/w check unaligned pkt",
11404 .insns = {
11405 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11406 offsetof(struct xdp_md, data)),
11407 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11408 offsetof(struct xdp_md, data_end)),
11409 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11410 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11411 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
11412 BPF_MOV64_IMM(BPF_REG_0, 99),
11413 BPF_JMP_IMM(BPF_JA, 0, 0, 6),
11414 BPF_MOV64_IMM(BPF_REG_0, 1),
11415 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11416 BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
11417 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
11418 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
11419 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
11420 BPF_EXIT_INSN(),
11421 },
11422 .result = REJECT,
11423 .errstr = "BPF_XADD stores into R2 packet",
11424 .prog_type = BPF_PROG_TYPE_XDP,
11425 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011426};
11427
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011428static int probe_filter_length(const struct bpf_insn *fp)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011429{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011430 int len;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011431
11432 for (len = MAX_INSNS - 1; len > 0; --len)
11433 if (fp[len].code != 0 || fp[len].imm != 0)
11434 break;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011435 return len + 1;
11436}
11437
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011438static int create_map(uint32_t size_value, uint32_t max_elem)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011439{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011440 int fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011441
Mickaël Salaünf4874d02017-02-10 00:21:43 +010011442 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011443 size_value, max_elem, BPF_F_NO_PREALLOC);
11444 if (fd < 0)
11445 printf("Failed to create hash map '%s'!\n", strerror(errno));
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011446
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011447 return fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011448}
11449
Daniel Borkmannb33eb732018-02-26 22:34:33 +010011450static int create_prog_dummy1(void)
11451{
11452 struct bpf_insn prog[] = {
11453 BPF_MOV64_IMM(BPF_REG_0, 42),
11454 BPF_EXIT_INSN(),
11455 };
11456
11457 return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog,
11458 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
11459}
11460
11461static int create_prog_dummy2(int mfd, int idx)
11462{
11463 struct bpf_insn prog[] = {
11464 BPF_MOV64_IMM(BPF_REG_3, idx),
11465 BPF_LD_MAP_FD(BPF_REG_2, mfd),
11466 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11467 BPF_FUNC_tail_call),
11468 BPF_MOV64_IMM(BPF_REG_0, 41),
11469 BPF_EXIT_INSN(),
11470 };
11471
11472 return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog,
11473 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
11474}
11475
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011476static int create_prog_array(void)
11477{
Daniel Borkmannb33eb732018-02-26 22:34:33 +010011478 int p1key = 0, p2key = 1;
11479 int mfd, p1fd, p2fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011480
Daniel Borkmannb33eb732018-02-26 22:34:33 +010011481 mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
11482 sizeof(int), 4, 0);
11483 if (mfd < 0) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011484 printf("Failed to create prog array '%s'!\n", strerror(errno));
Daniel Borkmannb33eb732018-02-26 22:34:33 +010011485 return -1;
11486 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011487
Daniel Borkmannb33eb732018-02-26 22:34:33 +010011488 p1fd = create_prog_dummy1();
11489 p2fd = create_prog_dummy2(mfd, p2key);
11490 if (p1fd < 0 || p2fd < 0)
11491 goto out;
11492 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
11493 goto out;
11494 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
11495 goto out;
11496 close(p2fd);
11497 close(p1fd);
11498
11499 return mfd;
11500out:
11501 close(p2fd);
11502 close(p1fd);
11503 close(mfd);
11504 return -1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011505}
11506
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011507static int create_map_in_map(void)
11508{
11509 int inner_map_fd, outer_map_fd;
11510
11511 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
11512 sizeof(int), 1, 0);
11513 if (inner_map_fd < 0) {
11514 printf("Failed to create array '%s'!\n", strerror(errno));
11515 return inner_map_fd;
11516 }
11517
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070011518 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011519 sizeof(int), inner_map_fd, 1, 0);
11520 if (outer_map_fd < 0)
11521 printf("Failed to create array of maps '%s'!\n",
11522 strerror(errno));
11523
11524 close(inner_map_fd);
11525
11526 return outer_map_fd;
11527}
11528
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011529static char bpf_vlog[32768];
11530
11531static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011532 int *map_fds)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011533{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011534 int *fixup_map1 = test->fixup_map1;
11535 int *fixup_map2 = test->fixup_map2;
11536 int *fixup_prog = test->fixup_prog;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011537 int *fixup_map_in_map = test->fixup_map_in_map;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011538
11539 /* Allocating HTs with 1 elem is fine here, since we only test
11540 * for verifier and not do a runtime lookup, so the only thing
11541 * that really matters is value size in this case.
11542 */
11543 if (*fixup_map1) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011544 map_fds[0] = create_map(sizeof(long long), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011545 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011546 prog[*fixup_map1].imm = map_fds[0];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011547 fixup_map1++;
11548 } while (*fixup_map1);
11549 }
11550
11551 if (*fixup_map2) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011552 map_fds[1] = create_map(sizeof(struct test_val), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011553 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011554 prog[*fixup_map2].imm = map_fds[1];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011555 fixup_map2++;
11556 } while (*fixup_map2);
11557 }
11558
11559 if (*fixup_prog) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011560 map_fds[2] = create_prog_array();
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011561 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011562 prog[*fixup_prog].imm = map_fds[2];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011563 fixup_prog++;
11564 } while (*fixup_prog);
11565 }
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011566
11567 if (*fixup_map_in_map) {
11568 map_fds[3] = create_map_in_map();
11569 do {
11570 prog[*fixup_map_in_map].imm = map_fds[3];
11571 fixup_map_in_map++;
11572 } while (*fixup_map_in_map);
11573 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011574}
11575
11576static void do_test_single(struct bpf_test *test, bool unpriv,
11577 int *passes, int *errors)
11578{
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011579 int fd_prog, expected_ret, reject_from_alignment;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011580 struct bpf_insn *prog = test->insns;
11581 int prog_len = probe_filter_length(prog);
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011582 char data_in[TEST_DATA_LEN] = {};
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011583 int prog_type = test->prog_type;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011584 int map_fds[MAX_NR_MAPS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011585 const char *expected_err;
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011586 uint32_t retval;
11587 int i, err;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011588
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011589 for (i = 0; i < MAX_NR_MAPS; i++)
11590 map_fds[i] = -1;
11591
11592 do_test_fixup(test, prog, map_fds);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011593
Daniel Borkmann614d0d72017-05-25 01:05:09 +020011594 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
11595 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmannd6554902017-07-21 00:00:22 +020011596 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011597
11598 expected_ret = unpriv && test->result_unpriv != UNDEF ?
11599 test->result_unpriv : test->result;
11600 expected_err = unpriv && test->errstr_unpriv ?
11601 test->errstr_unpriv : test->errstr;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011602
11603 reject_from_alignment = fd_prog < 0 &&
11604 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
11605 strstr(bpf_vlog, "Unknown alignment.");
11606#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
11607 if (reject_from_alignment) {
11608 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
11609 strerror(errno));
11610 goto fail_log;
11611 }
11612#endif
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011613 if (expected_ret == ACCEPT) {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011614 if (fd_prog < 0 && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011615 printf("FAIL\nFailed to load prog '%s'!\n",
11616 strerror(errno));
11617 goto fail_log;
11618 }
11619 } else {
11620 if (fd_prog >= 0) {
11621 printf("FAIL\nUnexpected success to load!\n");
11622 goto fail_log;
11623 }
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011624 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
Joe Stringer95f87a92018-02-14 13:50:34 -080011625 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
11626 expected_err, bpf_vlog);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011627 goto fail_log;
11628 }
11629 }
11630
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011631 if (fd_prog >= 0) {
11632 err = bpf_prog_test_run(fd_prog, 1, data_in, sizeof(data_in),
11633 NULL, NULL, &retval, NULL);
11634 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
11635 printf("Unexpected bpf_prog_test_run error\n");
11636 goto fail_log;
11637 }
11638 if (!err && retval != test->retval &&
11639 test->retval != POINTER_VALUE) {
11640 printf("FAIL retval %d != %d\n", retval, test->retval);
11641 goto fail_log;
11642 }
11643 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011644 (*passes)++;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011645 printf("OK%s\n", reject_from_alignment ?
11646 " (NOTE: reject due to unknown alignment)" : "");
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011647close_fds:
11648 close(fd_prog);
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011649 for (i = 0; i < MAX_NR_MAPS; i++)
11650 close(map_fds[i]);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011651 sched_yield();
11652 return;
11653fail_log:
11654 (*errors)++;
11655 printf("%s", bpf_vlog);
11656 goto close_fds;
11657}
11658
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011659static bool is_admin(void)
11660{
11661 cap_t caps;
11662 cap_flag_value_t sysadmin = CAP_CLEAR;
11663 const cap_value_t cap_val = CAP_SYS_ADMIN;
11664
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080011665#ifdef CAP_IS_SUPPORTED
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011666 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
11667 perror("cap_get_flag");
11668 return false;
11669 }
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080011670#endif
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011671 caps = cap_get_proc();
11672 if (!caps) {
11673 perror("cap_get_proc");
11674 return false;
11675 }
11676 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
11677 perror("cap_get_flag");
11678 if (cap_free(caps))
11679 perror("cap_free");
11680 return (sysadmin == CAP_SET);
11681}
11682
11683static int set_admin(bool admin)
11684{
11685 cap_t caps;
11686 const cap_value_t cap_val = CAP_SYS_ADMIN;
11687 int ret = -1;
11688
11689 caps = cap_get_proc();
11690 if (!caps) {
11691 perror("cap_get_proc");
11692 return -1;
11693 }
11694 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
11695 admin ? CAP_SET : CAP_CLEAR)) {
11696 perror("cap_set_flag");
11697 goto out;
11698 }
11699 if (cap_set_proc(caps)) {
11700 perror("cap_set_proc");
11701 goto out;
11702 }
11703 ret = 0;
11704out:
11705 if (cap_free(caps))
11706 perror("cap_free");
11707 return ret;
11708}
11709
Joe Stringer0a6748742018-02-14 13:50:36 -080011710static void get_unpriv_disabled()
11711{
11712 char buf[2];
11713 FILE *fd;
11714
11715 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
11716 if (fgets(buf, 2, fd) == buf && atoi(buf))
11717 unpriv_disabled = true;
11718 fclose(fd);
11719}
11720
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011721static int do_test(bool unpriv, unsigned int from, unsigned int to)
11722{
Joe Stringerd0a0e492018-02-14 13:50:35 -080011723 int i, passes = 0, errors = 0, skips = 0;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011724
11725 for (i = from; i < to; i++) {
11726 struct bpf_test *test = &tests[i];
11727
11728 /* Program types that are not supported by non-root we
11729 * skip right away.
11730 */
Joe Stringer0a6748742018-02-14 13:50:36 -080011731 if (!test->prog_type && unpriv_disabled) {
11732 printf("#%d/u %s SKIP\n", i, test->descr);
11733 skips++;
11734 } else if (!test->prog_type) {
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011735 if (!unpriv)
11736 set_admin(false);
11737 printf("#%d/u %s ", i, test->descr);
11738 do_test_single(test, true, &passes, &errors);
11739 if (!unpriv)
11740 set_admin(true);
11741 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011742
Joe Stringerd0a0e492018-02-14 13:50:35 -080011743 if (unpriv) {
11744 printf("#%d/p %s SKIP\n", i, test->descr);
11745 skips++;
11746 } else {
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011747 printf("#%d/p %s ", i, test->descr);
11748 do_test_single(test, false, &passes, &errors);
11749 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011750 }
11751
Joe Stringerd0a0e492018-02-14 13:50:35 -080011752 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
11753 skips, errors);
Jesper Dangaard Brouerefe5f9c2017-06-13 15:17:19 +020011754 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011755}
11756
11757int main(int argc, char **argv)
11758{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011759 unsigned int from = 0, to = ARRAY_SIZE(tests);
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011760 bool unpriv = !is_admin();
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011761
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011762 if (argc == 3) {
11763 unsigned int l = atoi(argv[argc - 2]);
11764 unsigned int u = atoi(argv[argc - 1]);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011765
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011766 if (l < to && u < to) {
11767 from = l;
11768 to = u + 1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011769 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011770 } else if (argc == 2) {
11771 unsigned int t = atoi(argv[argc - 1]);
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011772
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011773 if (t < to) {
11774 from = t;
11775 to = t + 1;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011776 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011777 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011778
Joe Stringer0a6748742018-02-14 13:50:36 -080011779 get_unpriv_disabled();
11780 if (unpriv && unpriv_disabled) {
11781 printf("Cannot run as unprivileged user with sysctl %s.\n",
11782 UNPRIV_SYSCTL);
11783 return EXIT_FAILURE;
11784 }
11785
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011786 return do_test(unpriv, from, to);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011787}