blob: c0f16e93f9bd14b3ef98b323c6830ca14645509b [file] [log] [blame]
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08005 * Copyright (c) 2017 Facebook
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
10 */
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011
Daniel Borkmann2c460622017-08-04 22:24:41 +020012#include <endian.h>
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080013#include <asm/types.h>
14#include <linux/types.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010015#include <stdint.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070016#include <stdio.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010017#include <stdlib.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070018#include <unistd.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070019#include <errno.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070020#include <string.h>
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -070021#include <stddef.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070022#include <stdbool.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020023#include <sched.h>
Daniel Borkmann21ccaf22018-01-26 23:33:48 +010024#include <limits.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020025
Mickaël Salaünd02d8982017-02-10 00:21:37 +010026#include <sys/capability.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070027#include <sys/resource.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070028
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020029#include <linux/unistd.h>
30#include <linux/filter.h>
31#include <linux/bpf_perf_event.h>
32#include <linux/bpf.h>
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080033#include <linux/if_ether.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070034
Mickaël Salaün2ee89fb2017-02-10 00:21:38 +010035#include <bpf/bpf.h>
36
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020037#ifdef HAVE_GENHDR
38# include "autoconf.h"
39#else
40# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42# endif
43#endif
44
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020045#include "../../../include/linux/filter.h"
46
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020047#ifndef ARRAY_SIZE
48# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
49#endif
50
51#define MAX_INSNS 512
52#define MAX_FIXUPS 8
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070053#define MAX_NR_MAPS 4
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080054#define POINTER_VALUE 0xcafe4all
55#define TEST_DATA_LEN 64
Alexei Starovoitovbf508872015-10-07 22:23:23 -070056
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020057#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
Daniel Borkmann614d0d72017-05-25 01:05:09 +020058#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020059
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070060struct bpf_test {
61 const char *descr;
62 struct bpf_insn insns[MAX_INSNS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020063 int fixup_map1[MAX_FIXUPS];
64 int fixup_map2[MAX_FIXUPS];
65 int fixup_prog[MAX_FIXUPS];
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070066 int fixup_map_in_map[MAX_FIXUPS];
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070067 const char *errstr;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070068 const char *errstr_unpriv;
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080069 uint32_t retval;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070070 enum {
Alexei Starovoitovbf508872015-10-07 22:23:23 -070071 UNDEF,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070072 ACCEPT,
73 REJECT
Alexei Starovoitovbf508872015-10-07 22:23:23 -070074 } result, result_unpriv;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070075 enum bpf_prog_type prog_type;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020076 uint8_t flags;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070077};
78
Josef Bacik48461132016-09-28 10:54:32 -040079/* Note we want this to be 64 bit aligned so that the end of our array is
80 * actually the end of the structure.
81 */
82#define MAX_ENTRIES 11
Josef Bacik48461132016-09-28 10:54:32 -040083
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020084struct test_val {
85 unsigned int index;
86 int foo[MAX_ENTRIES];
Josef Bacik48461132016-09-28 10:54:32 -040087};
88
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070089static struct bpf_test tests[] = {
90 {
91 "add+sub+mul",
92 .insns = {
93 BPF_MOV64_IMM(BPF_REG_1, 1),
94 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
95 BPF_MOV64_IMM(BPF_REG_2, 3),
96 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
97 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
98 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
99 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
100 BPF_EXIT_INSN(),
101 },
102 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800103 .retval = -3,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700104 },
105 {
Daniel Borkmann87c17932018-01-20 01:24:32 +0100106 "DIV32 by 0, zero check 1",
107 .insns = {
108 BPF_MOV32_IMM(BPF_REG_0, 42),
109 BPF_MOV32_IMM(BPF_REG_1, 0),
110 BPF_MOV32_IMM(BPF_REG_2, 1),
111 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
112 BPF_EXIT_INSN(),
113 },
114 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100115 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100116 },
117 {
118 "DIV32 by 0, zero check 2",
119 .insns = {
120 BPF_MOV32_IMM(BPF_REG_0, 42),
121 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
122 BPF_MOV32_IMM(BPF_REG_2, 1),
123 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
124 BPF_EXIT_INSN(),
125 },
126 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100127 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100128 },
129 {
130 "DIV64 by 0, zero check",
131 .insns = {
132 BPF_MOV32_IMM(BPF_REG_0, 42),
133 BPF_MOV32_IMM(BPF_REG_1, 0),
134 BPF_MOV32_IMM(BPF_REG_2, 1),
135 BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
136 BPF_EXIT_INSN(),
137 },
138 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100139 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100140 },
141 {
142 "MOD32 by 0, zero check 1",
143 .insns = {
144 BPF_MOV32_IMM(BPF_REG_0, 42),
145 BPF_MOV32_IMM(BPF_REG_1, 0),
146 BPF_MOV32_IMM(BPF_REG_2, 1),
147 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
148 BPF_EXIT_INSN(),
149 },
150 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100151 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100152 },
153 {
154 "MOD32 by 0, zero check 2",
155 .insns = {
156 BPF_MOV32_IMM(BPF_REG_0, 42),
157 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
158 BPF_MOV32_IMM(BPF_REG_2, 1),
159 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
160 BPF_EXIT_INSN(),
161 },
162 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100163 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100164 },
165 {
166 "MOD64 by 0, zero check",
167 .insns = {
168 BPF_MOV32_IMM(BPF_REG_0, 42),
169 BPF_MOV32_IMM(BPF_REG_1, 0),
170 BPF_MOV32_IMM(BPF_REG_2, 1),
171 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
172 BPF_EXIT_INSN(),
173 },
174 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100175 .retval = 42,
176 },
177 {
178 "DIV32 by 0, zero check ok, cls",
179 .insns = {
180 BPF_MOV32_IMM(BPF_REG_0, 42),
181 BPF_MOV32_IMM(BPF_REG_1, 2),
182 BPF_MOV32_IMM(BPF_REG_2, 16),
183 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
184 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
185 BPF_EXIT_INSN(),
186 },
187 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
188 .result = ACCEPT,
189 .retval = 8,
190 },
191 {
192 "DIV32 by 0, zero check 1, cls",
193 .insns = {
194 BPF_MOV32_IMM(BPF_REG_1, 0),
195 BPF_MOV32_IMM(BPF_REG_0, 1),
196 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
197 BPF_EXIT_INSN(),
198 },
199 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
200 .result = ACCEPT,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100201 .retval = 0,
202 },
203 {
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100204 "DIV32 by 0, zero check 2, cls",
205 .insns = {
206 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
207 BPF_MOV32_IMM(BPF_REG_0, 1),
208 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
209 BPF_EXIT_INSN(),
210 },
211 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
212 .result = ACCEPT,
213 .retval = 0,
214 },
215 {
216 "DIV64 by 0, zero check, cls",
217 .insns = {
218 BPF_MOV32_IMM(BPF_REG_1, 0),
219 BPF_MOV32_IMM(BPF_REG_0, 1),
220 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
221 BPF_EXIT_INSN(),
222 },
223 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
224 .result = ACCEPT,
225 .retval = 0,
226 },
227 {
228 "MOD32 by 0, zero check ok, cls",
229 .insns = {
230 BPF_MOV32_IMM(BPF_REG_0, 42),
231 BPF_MOV32_IMM(BPF_REG_1, 3),
232 BPF_MOV32_IMM(BPF_REG_2, 5),
233 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
234 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
235 BPF_EXIT_INSN(),
236 },
237 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
238 .result = ACCEPT,
239 .retval = 2,
240 },
241 {
242 "MOD32 by 0, zero check 1, cls",
243 .insns = {
244 BPF_MOV32_IMM(BPF_REG_1, 0),
245 BPF_MOV32_IMM(BPF_REG_0, 1),
246 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
247 BPF_EXIT_INSN(),
248 },
249 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
250 .result = ACCEPT,
251 .retval = 1,
252 },
253 {
254 "MOD32 by 0, zero check 2, cls",
255 .insns = {
256 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
257 BPF_MOV32_IMM(BPF_REG_0, 1),
258 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
259 BPF_EXIT_INSN(),
260 },
261 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
262 .result = ACCEPT,
263 .retval = 1,
264 },
265 {
266 "MOD64 by 0, zero check 1, cls",
267 .insns = {
268 BPF_MOV32_IMM(BPF_REG_1, 0),
269 BPF_MOV32_IMM(BPF_REG_0, 2),
270 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
271 BPF_EXIT_INSN(),
272 },
273 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
274 .result = ACCEPT,
275 .retval = 2,
276 },
277 {
278 "MOD64 by 0, zero check 2, cls",
279 .insns = {
280 BPF_MOV32_IMM(BPF_REG_1, 0),
281 BPF_MOV32_IMM(BPF_REG_0, -1),
282 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
283 BPF_EXIT_INSN(),
284 },
285 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
286 .result = ACCEPT,
287 .retval = -1,
288 },
289 /* Just make sure that JITs used udiv/umod as otherwise we get
290 * an exception from INT_MIN/-1 overflow similarly as with div
291 * by zero.
292 */
293 {
294 "DIV32 overflow, check 1",
295 .insns = {
296 BPF_MOV32_IMM(BPF_REG_1, -1),
297 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
298 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
299 BPF_EXIT_INSN(),
300 },
301 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
302 .result = ACCEPT,
303 .retval = 0,
304 },
305 {
306 "DIV32 overflow, check 2",
307 .insns = {
308 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
309 BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
310 BPF_EXIT_INSN(),
311 },
312 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
313 .result = ACCEPT,
314 .retval = 0,
315 },
316 {
317 "DIV64 overflow, check 1",
318 .insns = {
319 BPF_MOV64_IMM(BPF_REG_1, -1),
320 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
321 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
322 BPF_EXIT_INSN(),
323 },
324 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
325 .result = ACCEPT,
326 .retval = 0,
327 },
328 {
329 "DIV64 overflow, check 2",
330 .insns = {
331 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
332 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
333 BPF_EXIT_INSN(),
334 },
335 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
336 .result = ACCEPT,
337 .retval = 0,
338 },
339 {
340 "MOD32 overflow, check 1",
341 .insns = {
342 BPF_MOV32_IMM(BPF_REG_1, -1),
343 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
344 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
345 BPF_EXIT_INSN(),
346 },
347 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
348 .result = ACCEPT,
349 .retval = INT_MIN,
350 },
351 {
352 "MOD32 overflow, check 2",
353 .insns = {
354 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
355 BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
356 BPF_EXIT_INSN(),
357 },
358 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
359 .result = ACCEPT,
360 .retval = INT_MIN,
361 },
362 {
363 "MOD64 overflow, check 1",
364 .insns = {
365 BPF_MOV64_IMM(BPF_REG_1, -1),
366 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
367 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
368 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
369 BPF_MOV32_IMM(BPF_REG_0, 0),
370 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
371 BPF_MOV32_IMM(BPF_REG_0, 1),
372 BPF_EXIT_INSN(),
373 },
374 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
375 .result = ACCEPT,
376 .retval = 1,
377 },
378 {
379 "MOD64 overflow, check 2",
380 .insns = {
381 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
382 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
383 BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
384 BPF_MOV32_IMM(BPF_REG_0, 0),
385 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
386 BPF_MOV32_IMM(BPF_REG_0, 1),
387 BPF_EXIT_INSN(),
388 },
389 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
390 .result = ACCEPT,
391 .retval = 1,
392 },
393 {
394 "xor32 zero extend check",
395 .insns = {
396 BPF_MOV32_IMM(BPF_REG_2, -1),
397 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
398 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
399 BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
400 BPF_MOV32_IMM(BPF_REG_0, 2),
401 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
402 BPF_MOV32_IMM(BPF_REG_0, 1),
403 BPF_EXIT_INSN(),
404 },
405 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
406 .result = ACCEPT,
407 .retval = 1,
408 },
409 {
Daniel Borkmann87c17932018-01-20 01:24:32 +0100410 "empty prog",
411 .insns = {
412 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100413 .errstr = "unknown opcode 00",
Daniel Borkmann87c17932018-01-20 01:24:32 +0100414 .result = REJECT,
415 },
416 {
417 "only exit insn",
418 .insns = {
419 BPF_EXIT_INSN(),
420 },
421 .errstr = "R0 !read_ok",
422 .result = REJECT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700423 },
424 {
425 "unreachable",
426 .insns = {
427 BPF_EXIT_INSN(),
428 BPF_EXIT_INSN(),
429 },
430 .errstr = "unreachable",
431 .result = REJECT,
432 },
433 {
434 "unreachable2",
435 .insns = {
436 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
437 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
438 BPF_EXIT_INSN(),
439 },
440 .errstr = "unreachable",
441 .result = REJECT,
442 },
443 {
444 "out of range jump",
445 .insns = {
446 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
447 BPF_EXIT_INSN(),
448 },
449 .errstr = "jump out of range",
450 .result = REJECT,
451 },
452 {
453 "out of range jump2",
454 .insns = {
455 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
456 BPF_EXIT_INSN(),
457 },
458 .errstr = "jump out of range",
459 .result = REJECT,
460 },
461 {
462 "test1 ld_imm64",
463 .insns = {
464 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
465 BPF_LD_IMM64(BPF_REG_0, 0),
466 BPF_LD_IMM64(BPF_REG_0, 0),
467 BPF_LD_IMM64(BPF_REG_0, 1),
468 BPF_LD_IMM64(BPF_REG_0, 1),
469 BPF_MOV64_IMM(BPF_REG_0, 2),
470 BPF_EXIT_INSN(),
471 },
472 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700473 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700474 .result = REJECT,
475 },
476 {
477 "test2 ld_imm64",
478 .insns = {
479 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
480 BPF_LD_IMM64(BPF_REG_0, 0),
481 BPF_LD_IMM64(BPF_REG_0, 0),
482 BPF_LD_IMM64(BPF_REG_0, 1),
483 BPF_LD_IMM64(BPF_REG_0, 1),
484 BPF_EXIT_INSN(),
485 },
486 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700487 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700488 .result = REJECT,
489 },
490 {
491 "test3 ld_imm64",
492 .insns = {
493 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
494 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
495 BPF_LD_IMM64(BPF_REG_0, 0),
496 BPF_LD_IMM64(BPF_REG_0, 0),
497 BPF_LD_IMM64(BPF_REG_0, 1),
498 BPF_LD_IMM64(BPF_REG_0, 1),
499 BPF_EXIT_INSN(),
500 },
501 .errstr = "invalid bpf_ld_imm64 insn",
502 .result = REJECT,
503 },
504 {
505 "test4 ld_imm64",
506 .insns = {
507 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
508 BPF_EXIT_INSN(),
509 },
510 .errstr = "invalid bpf_ld_imm64 insn",
511 .result = REJECT,
512 },
513 {
514 "test5 ld_imm64",
515 .insns = {
516 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
517 },
518 .errstr = "invalid bpf_ld_imm64 insn",
519 .result = REJECT,
520 },
521 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200522 "test6 ld_imm64",
523 .insns = {
524 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
525 BPF_RAW_INSN(0, 0, 0, 0, 0),
526 BPF_EXIT_INSN(),
527 },
528 .result = ACCEPT,
529 },
530 {
531 "test7 ld_imm64",
532 .insns = {
533 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
534 BPF_RAW_INSN(0, 0, 0, 0, 1),
535 BPF_EXIT_INSN(),
536 },
537 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800538 .retval = 1,
Daniel Borkmann728a8532017-04-27 01:39:32 +0200539 },
540 {
541 "test8 ld_imm64",
542 .insns = {
543 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
544 BPF_RAW_INSN(0, 0, 0, 0, 1),
545 BPF_EXIT_INSN(),
546 },
547 .errstr = "uses reserved fields",
548 .result = REJECT,
549 },
550 {
551 "test9 ld_imm64",
552 .insns = {
553 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
554 BPF_RAW_INSN(0, 0, 0, 1, 1),
555 BPF_EXIT_INSN(),
556 },
557 .errstr = "invalid bpf_ld_imm64 insn",
558 .result = REJECT,
559 },
560 {
561 "test10 ld_imm64",
562 .insns = {
563 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
564 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
565 BPF_EXIT_INSN(),
566 },
567 .errstr = "invalid bpf_ld_imm64 insn",
568 .result = REJECT,
569 },
570 {
571 "test11 ld_imm64",
572 .insns = {
573 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
574 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
575 BPF_EXIT_INSN(),
576 },
577 .errstr = "invalid bpf_ld_imm64 insn",
578 .result = REJECT,
579 },
580 {
581 "test12 ld_imm64",
582 .insns = {
583 BPF_MOV64_IMM(BPF_REG_1, 0),
584 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
585 BPF_RAW_INSN(0, 0, 0, 0, 1),
586 BPF_EXIT_INSN(),
587 },
588 .errstr = "not pointing to valid bpf_map",
589 .result = REJECT,
590 },
591 {
592 "test13 ld_imm64",
593 .insns = {
594 BPF_MOV64_IMM(BPF_REG_1, 0),
595 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
596 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
597 BPF_EXIT_INSN(),
598 },
599 .errstr = "invalid bpf_ld_imm64 insn",
600 .result = REJECT,
601 },
602 {
Daniel Borkmann7891a872018-01-10 20:04:37 +0100603 "arsh32 on imm",
604 .insns = {
605 BPF_MOV64_IMM(BPF_REG_0, 1),
606 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
607 BPF_EXIT_INSN(),
608 },
609 .result = REJECT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100610 .errstr = "unknown opcode c4",
Daniel Borkmann7891a872018-01-10 20:04:37 +0100611 },
612 {
613 "arsh32 on reg",
614 .insns = {
615 BPF_MOV64_IMM(BPF_REG_0, 1),
616 BPF_MOV64_IMM(BPF_REG_1, 5),
617 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
618 BPF_EXIT_INSN(),
619 },
620 .result = REJECT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100621 .errstr = "unknown opcode cc",
Daniel Borkmann7891a872018-01-10 20:04:37 +0100622 },
623 {
624 "arsh64 on imm",
625 .insns = {
626 BPF_MOV64_IMM(BPF_REG_0, 1),
627 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
628 BPF_EXIT_INSN(),
629 },
630 .result = ACCEPT,
631 },
632 {
633 "arsh64 on reg",
634 .insns = {
635 BPF_MOV64_IMM(BPF_REG_0, 1),
636 BPF_MOV64_IMM(BPF_REG_1, 5),
637 BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
638 BPF_EXIT_INSN(),
639 },
640 .result = ACCEPT,
641 },
642 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700643 "no bpf_exit",
644 .insns = {
645 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
646 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -0800647 .errstr = "not an exit",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700648 .result = REJECT,
649 },
650 {
651 "loop (back-edge)",
652 .insns = {
653 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
654 BPF_EXIT_INSN(),
655 },
656 .errstr = "back-edge",
657 .result = REJECT,
658 },
659 {
660 "loop2 (back-edge)",
661 .insns = {
662 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
663 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
664 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
665 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
666 BPF_EXIT_INSN(),
667 },
668 .errstr = "back-edge",
669 .result = REJECT,
670 },
671 {
672 "conditional loop",
673 .insns = {
674 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
675 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
676 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
677 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
678 BPF_EXIT_INSN(),
679 },
680 .errstr = "back-edge",
681 .result = REJECT,
682 },
683 {
684 "read uninitialized register",
685 .insns = {
686 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
687 BPF_EXIT_INSN(),
688 },
689 .errstr = "R2 !read_ok",
690 .result = REJECT,
691 },
692 {
693 "read invalid register",
694 .insns = {
695 BPF_MOV64_REG(BPF_REG_0, -1),
696 BPF_EXIT_INSN(),
697 },
698 .errstr = "R15 is invalid",
699 .result = REJECT,
700 },
701 {
702 "program doesn't init R0 before exit",
703 .insns = {
704 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
705 BPF_EXIT_INSN(),
706 },
707 .errstr = "R0 !read_ok",
708 .result = REJECT,
709 },
710 {
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700711 "program doesn't init R0 before exit in all branches",
712 .insns = {
713 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
714 BPF_MOV64_IMM(BPF_REG_0, 1),
715 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
716 BPF_EXIT_INSN(),
717 },
718 .errstr = "R0 !read_ok",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700719 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700720 .result = REJECT,
721 },
722 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700723 "stack out of bounds",
724 .insns = {
725 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
726 BPF_EXIT_INSN(),
727 },
728 .errstr = "invalid stack",
729 .result = REJECT,
730 },
731 {
732 "invalid call insn1",
733 .insns = {
734 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
735 BPF_EXIT_INSN(),
736 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100737 .errstr = "unknown opcode 8d",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700738 .result = REJECT,
739 },
740 {
741 "invalid call insn2",
742 .insns = {
743 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
744 BPF_EXIT_INSN(),
745 },
746 .errstr = "BPF_CALL uses reserved",
747 .result = REJECT,
748 },
749 {
750 "invalid function call",
751 .insns = {
752 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
753 BPF_EXIT_INSN(),
754 },
Daniel Borkmanne00c7b22016-11-26 01:28:09 +0100755 .errstr = "invalid func unknown#1234567",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700756 .result = REJECT,
757 },
758 {
759 "uninitialized stack1",
760 .insns = {
761 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
762 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
763 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200764 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
765 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700766 BPF_EXIT_INSN(),
767 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200768 .fixup_map1 = { 2 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700769 .errstr = "invalid indirect read from stack",
770 .result = REJECT,
771 },
772 {
773 "uninitialized stack2",
774 .insns = {
775 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
776 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
777 BPF_EXIT_INSN(),
778 },
779 .errstr = "invalid read from stack",
780 .result = REJECT,
781 },
782 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200783 "invalid fp arithmetic",
784 /* If this gets ever changed, make sure JITs can deal with it. */
785 .insns = {
786 BPF_MOV64_IMM(BPF_REG_0, 0),
787 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
788 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
789 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
790 BPF_EXIT_INSN(),
791 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -0800792 .errstr = "R1 subtraction from stack pointer",
Daniel Borkmann728a8532017-04-27 01:39:32 +0200793 .result = REJECT,
794 },
795 {
796 "non-invalid fp arithmetic",
797 .insns = {
798 BPF_MOV64_IMM(BPF_REG_0, 0),
799 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
800 BPF_EXIT_INSN(),
801 },
802 .result = ACCEPT,
803 },
804 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200805 "invalid argument register",
806 .insns = {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200807 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
808 BPF_FUNC_get_cgroup_classid),
809 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
810 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200811 BPF_EXIT_INSN(),
812 },
813 .errstr = "R1 !read_ok",
814 .result = REJECT,
815 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
816 },
817 {
818 "non-invalid argument register",
819 .insns = {
820 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200821 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
822 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200823 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200824 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
825 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200826 BPF_EXIT_INSN(),
827 },
828 .result = ACCEPT,
829 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
830 },
831 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700832 "check valid spill/fill",
833 .insns = {
834 /* spill R1(ctx) into stack */
835 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700836 /* fill it back into R2 */
837 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700838 /* should be able to access R0 = *(R2 + 8) */
Daniel Borkmannf91fe172015-03-01 12:31:41 +0100839 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
840 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700841 BPF_EXIT_INSN(),
842 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700843 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700844 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700845 .result_unpriv = REJECT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800846 .retval = POINTER_VALUE,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700847 },
848 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +0200849 "check valid spill/fill, skb mark",
850 .insns = {
851 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
852 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
853 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
854 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
855 offsetof(struct __sk_buff, mark)),
856 BPF_EXIT_INSN(),
857 },
858 .result = ACCEPT,
859 .result_unpriv = ACCEPT,
860 },
861 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700862 "check corrupted spill/fill",
863 .insns = {
864 /* spill R1(ctx) into stack */
865 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700866 /* mess up with R1 pointer on stack */
867 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700868 /* fill back into R0 should fail */
869 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700870 BPF_EXIT_INSN(),
871 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700872 .errstr_unpriv = "attempt to corrupt spilled",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700873 .errstr = "corrupted spill",
874 .result = REJECT,
875 },
876 {
877 "invalid src register in STX",
878 .insns = {
879 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
880 BPF_EXIT_INSN(),
881 },
882 .errstr = "R15 is invalid",
883 .result = REJECT,
884 },
885 {
886 "invalid dst register in STX",
887 .insns = {
888 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
889 BPF_EXIT_INSN(),
890 },
891 .errstr = "R14 is invalid",
892 .result = REJECT,
893 },
894 {
895 "invalid dst register in ST",
896 .insns = {
897 BPF_ST_MEM(BPF_B, 14, -1, -1),
898 BPF_EXIT_INSN(),
899 },
900 .errstr = "R14 is invalid",
901 .result = REJECT,
902 },
903 {
904 "invalid src register in LDX",
905 .insns = {
906 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
907 BPF_EXIT_INSN(),
908 },
909 .errstr = "R12 is invalid",
910 .result = REJECT,
911 },
912 {
913 "invalid dst register in LDX",
914 .insns = {
915 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
916 BPF_EXIT_INSN(),
917 },
918 .errstr = "R11 is invalid",
919 .result = REJECT,
920 },
921 {
922 "junk insn",
923 .insns = {
924 BPF_RAW_INSN(0, 0, 0, 0, 0),
925 BPF_EXIT_INSN(),
926 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100927 .errstr = "unknown opcode 00",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700928 .result = REJECT,
929 },
930 {
931 "junk insn2",
932 .insns = {
933 BPF_RAW_INSN(1, 0, 0, 0, 0),
934 BPF_EXIT_INSN(),
935 },
936 .errstr = "BPF_LDX uses reserved fields",
937 .result = REJECT,
938 },
939 {
940 "junk insn3",
941 .insns = {
942 BPF_RAW_INSN(-1, 0, 0, 0, 0),
943 BPF_EXIT_INSN(),
944 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100945 .errstr = "unknown opcode ff",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700946 .result = REJECT,
947 },
948 {
949 "junk insn4",
950 .insns = {
951 BPF_RAW_INSN(-1, -1, -1, -1, -1),
952 BPF_EXIT_INSN(),
953 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100954 .errstr = "unknown opcode ff",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700955 .result = REJECT,
956 },
957 {
958 "junk insn5",
959 .insns = {
960 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
961 BPF_EXIT_INSN(),
962 },
963 .errstr = "BPF_ALU uses reserved fields",
964 .result = REJECT,
965 },
966 {
967 "misaligned read from stack",
968 .insns = {
969 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
970 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
971 BPF_EXIT_INSN(),
972 },
Edward Creef65b1842017-08-07 15:27:12 +0100973 .errstr = "misaligned stack access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700974 .result = REJECT,
975 },
976 {
977 "invalid map_fd for function call",
978 .insns = {
979 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
980 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
981 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
982 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200983 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
984 BPF_FUNC_map_delete_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700985 BPF_EXIT_INSN(),
986 },
987 .errstr = "fd 0 is not pointing to valid bpf_map",
988 .result = REJECT,
989 },
990 {
991 "don't check return value before access",
992 .insns = {
993 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
994 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
995 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
996 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200997 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
998 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700999 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1000 BPF_EXIT_INSN(),
1001 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001002 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001003 .errstr = "R0 invalid mem access 'map_value_or_null'",
1004 .result = REJECT,
1005 },
1006 {
1007 "access memory with incorrect alignment",
1008 .insns = {
1009 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1010 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1012 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001013 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1014 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001015 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1016 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1017 BPF_EXIT_INSN(),
1018 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001019 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01001020 .errstr = "misaligned value access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001021 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001022 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001023 },
1024 {
1025 "sometimes access memory with incorrect alignment",
1026 .insns = {
1027 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1028 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1029 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1030 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001031 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1032 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001033 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1034 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1035 BPF_EXIT_INSN(),
1036 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1037 BPF_EXIT_INSN(),
1038 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001039 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001040 .errstr = "R0 invalid mem access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001041 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001042 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001043 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001044 },
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001045 {
1046 "jump test 1",
1047 .insns = {
1048 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1049 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1050 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1051 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1052 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1053 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1054 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1055 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1056 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1057 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1058 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1059 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1060 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1061 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1062 BPF_MOV64_IMM(BPF_REG_0, 0),
1063 BPF_EXIT_INSN(),
1064 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001065 .errstr_unpriv = "R1 pointer comparison",
1066 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001067 .result = ACCEPT,
1068 },
1069 {
1070 "jump test 2",
1071 .insns = {
1072 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1073 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1074 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1075 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1076 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1077 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1078 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1079 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1080 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1081 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1082 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1083 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1084 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1085 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1086 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1087 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1088 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1089 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1090 BPF_MOV64_IMM(BPF_REG_0, 0),
1091 BPF_EXIT_INSN(),
1092 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001093 .errstr_unpriv = "R1 pointer comparison",
1094 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001095 .result = ACCEPT,
1096 },
1097 {
1098 "jump test 3",
1099 .insns = {
1100 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1101 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1102 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1104 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1105 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1106 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1107 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1108 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1109 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1110 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1112 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1113 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1114 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1116 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1117 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1118 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1120 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1121 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1122 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1123 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1124 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001125 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1126 BPF_FUNC_map_delete_elem),
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001127 BPF_EXIT_INSN(),
1128 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001129 .fixup_map1 = { 24 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001130 .errstr_unpriv = "R1 pointer comparison",
1131 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001132 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08001133 .retval = -ENOENT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001134 },
1135 {
1136 "jump test 4",
1137 .insns = {
1138 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1139 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1140 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1141 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1142 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1143 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1144 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1145 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1146 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1147 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1148 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1149 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1150 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1151 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1152 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1154 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1155 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1156 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1157 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1158 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1159 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1160 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1161 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1162 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1163 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1164 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1166 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1168 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1169 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1170 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1171 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1172 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1173 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1174 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1175 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1176 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1177 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1178 BPF_MOV64_IMM(BPF_REG_0, 0),
1179 BPF_EXIT_INSN(),
1180 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001181 .errstr_unpriv = "R1 pointer comparison",
1182 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001183 .result = ACCEPT,
1184 },
Alexei Starovoitov342ded42014-10-28 15:11:42 -07001185 {
1186 "jump test 5",
1187 .insns = {
1188 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1189 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1190 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1191 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1192 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1193 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1194 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1195 BPF_MOV64_IMM(BPF_REG_0, 0),
1196 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1197 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1198 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1199 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1200 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1201 BPF_MOV64_IMM(BPF_REG_0, 0),
1202 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1203 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1204 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1205 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1206 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1207 BPF_MOV64_IMM(BPF_REG_0, 0),
1208 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1209 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1210 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1211 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1212 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1213 BPF_MOV64_IMM(BPF_REG_0, 0),
1214 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1215 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1216 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1217 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1218 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1219 BPF_MOV64_IMM(BPF_REG_0, 0),
1220 BPF_EXIT_INSN(),
1221 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001222 .errstr_unpriv = "R1 pointer comparison",
1223 .result_unpriv = REJECT,
Alexei Starovoitov342ded42014-10-28 15:11:42 -07001224 .result = ACCEPT,
1225 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001226 {
1227 "access skb fields ok",
1228 .insns = {
1229 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1230 offsetof(struct __sk_buff, len)),
1231 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1232 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1233 offsetof(struct __sk_buff, mark)),
1234 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1235 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1236 offsetof(struct __sk_buff, pkt_type)),
1237 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1238 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1239 offsetof(struct __sk_buff, queue_mapping)),
1240 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitovc2497392015-03-16 18:06:02 -07001241 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1242 offsetof(struct __sk_buff, protocol)),
1243 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1244 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1245 offsetof(struct __sk_buff, vlan_present)),
1246 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1247 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1248 offsetof(struct __sk_buff, vlan_tci)),
1249 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Daniel Borkmannb1d9fc42017-04-19 23:01:17 +02001250 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1251 offsetof(struct __sk_buff, napi_id)),
1252 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001253 BPF_EXIT_INSN(),
1254 },
1255 .result = ACCEPT,
1256 },
1257 {
1258 "access skb fields bad1",
1259 .insns = {
1260 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1261 BPF_EXIT_INSN(),
1262 },
1263 .errstr = "invalid bpf_context access",
1264 .result = REJECT,
1265 },
1266 {
1267 "access skb fields bad2",
1268 .insns = {
1269 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1270 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1273 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001274 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1275 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001276 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1277 BPF_EXIT_INSN(),
1278 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1279 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1280 offsetof(struct __sk_buff, pkt_type)),
1281 BPF_EXIT_INSN(),
1282 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001283 .fixup_map1 = { 4 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001284 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001285 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001286 .result = REJECT,
1287 },
1288 {
1289 "access skb fields bad3",
1290 .insns = {
1291 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1292 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1293 offsetof(struct __sk_buff, pkt_type)),
1294 BPF_EXIT_INSN(),
1295 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1296 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1297 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1298 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001299 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1300 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001301 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1302 BPF_EXIT_INSN(),
1303 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1304 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1305 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001306 .fixup_map1 = { 6 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001307 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001308 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001309 .result = REJECT,
1310 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001311 {
1312 "access skb fields bad4",
1313 .insns = {
1314 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1315 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1316 offsetof(struct __sk_buff, len)),
1317 BPF_MOV64_IMM(BPF_REG_0, 0),
1318 BPF_EXIT_INSN(),
1319 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1320 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1321 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1322 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001323 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1324 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001325 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1326 BPF_EXIT_INSN(),
1327 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1328 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1329 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001330 .fixup_map1 = { 7 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001331 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001332 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001333 .result = REJECT,
1334 },
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001335 {
John Fastabend41bc94f2017-08-15 22:33:56 -07001336 "invalid access __sk_buff family",
1337 .insns = {
1338 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1339 offsetof(struct __sk_buff, family)),
1340 BPF_EXIT_INSN(),
1341 },
1342 .errstr = "invalid bpf_context access",
1343 .result = REJECT,
1344 },
1345 {
1346 "invalid access __sk_buff remote_ip4",
1347 .insns = {
1348 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1349 offsetof(struct __sk_buff, remote_ip4)),
1350 BPF_EXIT_INSN(),
1351 },
1352 .errstr = "invalid bpf_context access",
1353 .result = REJECT,
1354 },
1355 {
1356 "invalid access __sk_buff local_ip4",
1357 .insns = {
1358 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1359 offsetof(struct __sk_buff, local_ip4)),
1360 BPF_EXIT_INSN(),
1361 },
1362 .errstr = "invalid bpf_context access",
1363 .result = REJECT,
1364 },
1365 {
1366 "invalid access __sk_buff remote_ip6",
1367 .insns = {
1368 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1369 offsetof(struct __sk_buff, remote_ip6)),
1370 BPF_EXIT_INSN(),
1371 },
1372 .errstr = "invalid bpf_context access",
1373 .result = REJECT,
1374 },
1375 {
1376 "invalid access __sk_buff local_ip6",
1377 .insns = {
1378 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1379 offsetof(struct __sk_buff, local_ip6)),
1380 BPF_EXIT_INSN(),
1381 },
1382 .errstr = "invalid bpf_context access",
1383 .result = REJECT,
1384 },
1385 {
1386 "invalid access __sk_buff remote_port",
1387 .insns = {
1388 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1389 offsetof(struct __sk_buff, remote_port)),
1390 BPF_EXIT_INSN(),
1391 },
1392 .errstr = "invalid bpf_context access",
1393 .result = REJECT,
1394 },
1395 {
1396 "invalid access __sk_buff remote_port",
1397 .insns = {
1398 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1399 offsetof(struct __sk_buff, local_port)),
1400 BPF_EXIT_INSN(),
1401 },
1402 .errstr = "invalid bpf_context access",
1403 .result = REJECT,
1404 },
1405 {
1406 "valid access __sk_buff family",
1407 .insns = {
1408 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1409 offsetof(struct __sk_buff, family)),
1410 BPF_EXIT_INSN(),
1411 },
1412 .result = ACCEPT,
1413 .prog_type = BPF_PROG_TYPE_SK_SKB,
1414 },
1415 {
1416 "valid access __sk_buff remote_ip4",
1417 .insns = {
1418 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1419 offsetof(struct __sk_buff, remote_ip4)),
1420 BPF_EXIT_INSN(),
1421 },
1422 .result = ACCEPT,
1423 .prog_type = BPF_PROG_TYPE_SK_SKB,
1424 },
1425 {
1426 "valid access __sk_buff local_ip4",
1427 .insns = {
1428 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1429 offsetof(struct __sk_buff, local_ip4)),
1430 BPF_EXIT_INSN(),
1431 },
1432 .result = ACCEPT,
1433 .prog_type = BPF_PROG_TYPE_SK_SKB,
1434 },
1435 {
1436 "valid access __sk_buff remote_ip6",
1437 .insns = {
1438 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1439 offsetof(struct __sk_buff, remote_ip6[0])),
1440 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1441 offsetof(struct __sk_buff, remote_ip6[1])),
1442 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1443 offsetof(struct __sk_buff, remote_ip6[2])),
1444 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1445 offsetof(struct __sk_buff, remote_ip6[3])),
1446 BPF_EXIT_INSN(),
1447 },
1448 .result = ACCEPT,
1449 .prog_type = BPF_PROG_TYPE_SK_SKB,
1450 },
1451 {
1452 "valid access __sk_buff local_ip6",
1453 .insns = {
1454 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1455 offsetof(struct __sk_buff, local_ip6[0])),
1456 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1457 offsetof(struct __sk_buff, local_ip6[1])),
1458 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1459 offsetof(struct __sk_buff, local_ip6[2])),
1460 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1461 offsetof(struct __sk_buff, local_ip6[3])),
1462 BPF_EXIT_INSN(),
1463 },
1464 .result = ACCEPT,
1465 .prog_type = BPF_PROG_TYPE_SK_SKB,
1466 },
1467 {
1468 "valid access __sk_buff remote_port",
1469 .insns = {
1470 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1471 offsetof(struct __sk_buff, remote_port)),
1472 BPF_EXIT_INSN(),
1473 },
1474 .result = ACCEPT,
1475 .prog_type = BPF_PROG_TYPE_SK_SKB,
1476 },
1477 {
1478 "valid access __sk_buff remote_port",
1479 .insns = {
1480 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1481 offsetof(struct __sk_buff, local_port)),
1482 BPF_EXIT_INSN(),
1483 },
1484 .result = ACCEPT,
1485 .prog_type = BPF_PROG_TYPE_SK_SKB,
1486 },
1487 {
John Fastabended850542017-08-28 07:11:24 -07001488 "invalid access of tc_classid for SK_SKB",
1489 .insns = {
1490 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1491 offsetof(struct __sk_buff, tc_classid)),
1492 BPF_EXIT_INSN(),
1493 },
1494 .result = REJECT,
1495 .prog_type = BPF_PROG_TYPE_SK_SKB,
1496 .errstr = "invalid bpf_context access",
1497 },
1498 {
John Fastabendf7e9cb12017-10-18 07:10:58 -07001499 "invalid access of skb->mark for SK_SKB",
1500 .insns = {
1501 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1502 offsetof(struct __sk_buff, mark)),
1503 BPF_EXIT_INSN(),
1504 },
1505 .result = REJECT,
1506 .prog_type = BPF_PROG_TYPE_SK_SKB,
1507 .errstr = "invalid bpf_context access",
1508 },
1509 {
1510 "check skb->mark is not writeable by SK_SKB",
John Fastabended850542017-08-28 07:11:24 -07001511 .insns = {
1512 BPF_MOV64_IMM(BPF_REG_0, 0),
1513 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1514 offsetof(struct __sk_buff, mark)),
1515 BPF_EXIT_INSN(),
1516 },
John Fastabendf7e9cb12017-10-18 07:10:58 -07001517 .result = REJECT,
John Fastabended850542017-08-28 07:11:24 -07001518 .prog_type = BPF_PROG_TYPE_SK_SKB,
John Fastabendf7e9cb12017-10-18 07:10:58 -07001519 .errstr = "invalid bpf_context access",
John Fastabended850542017-08-28 07:11:24 -07001520 },
1521 {
1522 "check skb->tc_index is writeable by SK_SKB",
1523 .insns = {
1524 BPF_MOV64_IMM(BPF_REG_0, 0),
1525 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1526 offsetof(struct __sk_buff, tc_index)),
1527 BPF_EXIT_INSN(),
1528 },
1529 .result = ACCEPT,
1530 .prog_type = BPF_PROG_TYPE_SK_SKB,
1531 },
1532 {
1533 "check skb->priority is writeable by SK_SKB",
1534 .insns = {
1535 BPF_MOV64_IMM(BPF_REG_0, 0),
1536 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1537 offsetof(struct __sk_buff, priority)),
1538 BPF_EXIT_INSN(),
1539 },
1540 .result = ACCEPT,
1541 .prog_type = BPF_PROG_TYPE_SK_SKB,
1542 },
1543 {
1544 "direct packet read for SK_SKB",
1545 .insns = {
1546 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1547 offsetof(struct __sk_buff, data)),
1548 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1549 offsetof(struct __sk_buff, data_end)),
1550 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1551 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1552 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1553 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1554 BPF_MOV64_IMM(BPF_REG_0, 0),
1555 BPF_EXIT_INSN(),
1556 },
1557 .result = ACCEPT,
1558 .prog_type = BPF_PROG_TYPE_SK_SKB,
1559 },
1560 {
1561 "direct packet write for SK_SKB",
1562 .insns = {
1563 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1564 offsetof(struct __sk_buff, data)),
1565 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1566 offsetof(struct __sk_buff, data_end)),
1567 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1569 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1570 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1571 BPF_MOV64_IMM(BPF_REG_0, 0),
1572 BPF_EXIT_INSN(),
1573 },
1574 .result = ACCEPT,
1575 .prog_type = BPF_PROG_TYPE_SK_SKB,
1576 },
1577 {
1578 "overlapping checks for direct packet access SK_SKB",
1579 .insns = {
1580 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1581 offsetof(struct __sk_buff, data)),
1582 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1583 offsetof(struct __sk_buff, data_end)),
1584 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1585 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1586 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1587 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1588 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1589 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1590 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1591 BPF_MOV64_IMM(BPF_REG_0, 0),
1592 BPF_EXIT_INSN(),
1593 },
1594 .result = ACCEPT,
1595 .prog_type = BPF_PROG_TYPE_SK_SKB,
1596 },
1597 {
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001598 "check skb->mark is not writeable by sockets",
1599 .insns = {
1600 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1601 offsetof(struct __sk_buff, mark)),
1602 BPF_EXIT_INSN(),
1603 },
1604 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001605 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001606 .result = REJECT,
1607 },
1608 {
1609 "check skb->tc_index is not writeable by sockets",
1610 .insns = {
1611 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1612 offsetof(struct __sk_buff, tc_index)),
1613 BPF_EXIT_INSN(),
1614 },
1615 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001616 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001617 .result = REJECT,
1618 },
1619 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001620 "check cb access: byte",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001621 .insns = {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001622 BPF_MOV64_IMM(BPF_REG_0, 0),
1623 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1624 offsetof(struct __sk_buff, cb[0])),
1625 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1626 offsetof(struct __sk_buff, cb[0]) + 1),
1627 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1628 offsetof(struct __sk_buff, cb[0]) + 2),
1629 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1630 offsetof(struct __sk_buff, cb[0]) + 3),
1631 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1632 offsetof(struct __sk_buff, cb[1])),
1633 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1634 offsetof(struct __sk_buff, cb[1]) + 1),
1635 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1636 offsetof(struct __sk_buff, cb[1]) + 2),
1637 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1638 offsetof(struct __sk_buff, cb[1]) + 3),
1639 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1640 offsetof(struct __sk_buff, cb[2])),
1641 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1642 offsetof(struct __sk_buff, cb[2]) + 1),
1643 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1644 offsetof(struct __sk_buff, cb[2]) + 2),
1645 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1646 offsetof(struct __sk_buff, cb[2]) + 3),
1647 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1648 offsetof(struct __sk_buff, cb[3])),
1649 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1650 offsetof(struct __sk_buff, cb[3]) + 1),
1651 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1652 offsetof(struct __sk_buff, cb[3]) + 2),
1653 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1654 offsetof(struct __sk_buff, cb[3]) + 3),
1655 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1656 offsetof(struct __sk_buff, cb[4])),
1657 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1658 offsetof(struct __sk_buff, cb[4]) + 1),
1659 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1660 offsetof(struct __sk_buff, cb[4]) + 2),
1661 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1662 offsetof(struct __sk_buff, cb[4]) + 3),
1663 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1664 offsetof(struct __sk_buff, cb[0])),
1665 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1666 offsetof(struct __sk_buff, cb[0]) + 1),
1667 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1668 offsetof(struct __sk_buff, cb[0]) + 2),
1669 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1670 offsetof(struct __sk_buff, cb[0]) + 3),
1671 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1672 offsetof(struct __sk_buff, cb[1])),
1673 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1674 offsetof(struct __sk_buff, cb[1]) + 1),
1675 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1676 offsetof(struct __sk_buff, cb[1]) + 2),
1677 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1678 offsetof(struct __sk_buff, cb[1]) + 3),
1679 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1680 offsetof(struct __sk_buff, cb[2])),
1681 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1682 offsetof(struct __sk_buff, cb[2]) + 1),
1683 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1684 offsetof(struct __sk_buff, cb[2]) + 2),
1685 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1686 offsetof(struct __sk_buff, cb[2]) + 3),
1687 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1688 offsetof(struct __sk_buff, cb[3])),
1689 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1690 offsetof(struct __sk_buff, cb[3]) + 1),
1691 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1692 offsetof(struct __sk_buff, cb[3]) + 2),
1693 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1694 offsetof(struct __sk_buff, cb[3]) + 3),
1695 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1696 offsetof(struct __sk_buff, cb[4])),
1697 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1698 offsetof(struct __sk_buff, cb[4]) + 1),
1699 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1700 offsetof(struct __sk_buff, cb[4]) + 2),
1701 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1702 offsetof(struct __sk_buff, cb[4]) + 3),
1703 BPF_EXIT_INSN(),
1704 },
1705 .result = ACCEPT,
1706 },
1707 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001708 "__sk_buff->hash, offset 0, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001709 .insns = {
1710 BPF_MOV64_IMM(BPF_REG_0, 0),
1711 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001712 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001713 BPF_EXIT_INSN(),
1714 },
1715 .errstr = "invalid bpf_context access",
1716 .result = REJECT,
1717 },
1718 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001719 "__sk_buff->tc_index, offset 3, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001720 .insns = {
1721 BPF_MOV64_IMM(BPF_REG_0, 0),
1722 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001723 offsetof(struct __sk_buff, tc_index) + 3),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001724 BPF_EXIT_INSN(),
1725 },
1726 .errstr = "invalid bpf_context access",
1727 .result = REJECT,
1728 },
1729 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001730 "check skb->hash byte load permitted",
1731 .insns = {
1732 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001733#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001734 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1735 offsetof(struct __sk_buff, hash)),
1736#else
1737 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1738 offsetof(struct __sk_buff, hash) + 3),
1739#endif
1740 BPF_EXIT_INSN(),
1741 },
1742 .result = ACCEPT,
1743 },
1744 {
1745 "check skb->hash byte load not permitted 1",
1746 .insns = {
1747 BPF_MOV64_IMM(BPF_REG_0, 0),
1748 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1749 offsetof(struct __sk_buff, hash) + 1),
1750 BPF_EXIT_INSN(),
1751 },
1752 .errstr = "invalid bpf_context access",
1753 .result = REJECT,
1754 },
1755 {
1756 "check skb->hash byte load not permitted 2",
1757 .insns = {
1758 BPF_MOV64_IMM(BPF_REG_0, 0),
1759 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1760 offsetof(struct __sk_buff, hash) + 2),
1761 BPF_EXIT_INSN(),
1762 },
1763 .errstr = "invalid bpf_context access",
1764 .result = REJECT,
1765 },
1766 {
1767 "check skb->hash byte load not permitted 3",
1768 .insns = {
1769 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001770#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001771 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1772 offsetof(struct __sk_buff, hash) + 3),
1773#else
1774 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1775 offsetof(struct __sk_buff, hash)),
1776#endif
1777 BPF_EXIT_INSN(),
1778 },
1779 .errstr = "invalid bpf_context access",
1780 .result = REJECT,
1781 },
1782 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001783 "check cb access: byte, wrong type",
1784 .insns = {
1785 BPF_MOV64_IMM(BPF_REG_0, 0),
1786 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001787 offsetof(struct __sk_buff, cb[0])),
1788 BPF_EXIT_INSN(),
1789 },
1790 .errstr = "invalid bpf_context access",
1791 .result = REJECT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001792 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1793 },
1794 {
1795 "check cb access: half",
1796 .insns = {
1797 BPF_MOV64_IMM(BPF_REG_0, 0),
1798 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1799 offsetof(struct __sk_buff, cb[0])),
1800 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1801 offsetof(struct __sk_buff, cb[0]) + 2),
1802 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1803 offsetof(struct __sk_buff, cb[1])),
1804 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1805 offsetof(struct __sk_buff, cb[1]) + 2),
1806 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1807 offsetof(struct __sk_buff, cb[2])),
1808 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1809 offsetof(struct __sk_buff, cb[2]) + 2),
1810 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1811 offsetof(struct __sk_buff, cb[3])),
1812 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1813 offsetof(struct __sk_buff, cb[3]) + 2),
1814 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1815 offsetof(struct __sk_buff, cb[4])),
1816 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1817 offsetof(struct __sk_buff, cb[4]) + 2),
1818 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1819 offsetof(struct __sk_buff, cb[0])),
1820 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1821 offsetof(struct __sk_buff, cb[0]) + 2),
1822 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1823 offsetof(struct __sk_buff, cb[1])),
1824 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1825 offsetof(struct __sk_buff, cb[1]) + 2),
1826 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1827 offsetof(struct __sk_buff, cb[2])),
1828 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1829 offsetof(struct __sk_buff, cb[2]) + 2),
1830 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1831 offsetof(struct __sk_buff, cb[3])),
1832 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1833 offsetof(struct __sk_buff, cb[3]) + 2),
1834 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1835 offsetof(struct __sk_buff, cb[4])),
1836 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1837 offsetof(struct __sk_buff, cb[4]) + 2),
1838 BPF_EXIT_INSN(),
1839 },
1840 .result = ACCEPT,
1841 },
1842 {
1843 "check cb access: half, unaligned",
1844 .insns = {
1845 BPF_MOV64_IMM(BPF_REG_0, 0),
1846 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1847 offsetof(struct __sk_buff, cb[0]) + 1),
1848 BPF_EXIT_INSN(),
1849 },
Edward Creef65b1842017-08-07 15:27:12 +01001850 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001851 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001852 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001853 },
1854 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001855 "check __sk_buff->hash, offset 0, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001856 .insns = {
1857 BPF_MOV64_IMM(BPF_REG_0, 0),
1858 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001859 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001860 BPF_EXIT_INSN(),
1861 },
1862 .errstr = "invalid bpf_context access",
1863 .result = REJECT,
1864 },
1865 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001866 "check __sk_buff->tc_index, offset 2, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001867 .insns = {
1868 BPF_MOV64_IMM(BPF_REG_0, 0),
1869 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001870 offsetof(struct __sk_buff, tc_index) + 2),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001871 BPF_EXIT_INSN(),
1872 },
1873 .errstr = "invalid bpf_context access",
1874 .result = REJECT,
1875 },
1876 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001877 "check skb->hash half load permitted",
1878 .insns = {
1879 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001880#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001881 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1882 offsetof(struct __sk_buff, hash)),
1883#else
1884 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1885 offsetof(struct __sk_buff, hash) + 2),
1886#endif
1887 BPF_EXIT_INSN(),
1888 },
1889 .result = ACCEPT,
1890 },
1891 {
1892 "check skb->hash half load not permitted",
1893 .insns = {
1894 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001895#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001896 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1897 offsetof(struct __sk_buff, hash) + 2),
1898#else
1899 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1900 offsetof(struct __sk_buff, hash)),
1901#endif
1902 BPF_EXIT_INSN(),
1903 },
1904 .errstr = "invalid bpf_context access",
1905 .result = REJECT,
1906 },
1907 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001908 "check cb access: half, wrong type",
1909 .insns = {
1910 BPF_MOV64_IMM(BPF_REG_0, 0),
1911 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1912 offsetof(struct __sk_buff, cb[0])),
1913 BPF_EXIT_INSN(),
1914 },
1915 .errstr = "invalid bpf_context access",
1916 .result = REJECT,
1917 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1918 },
1919 {
1920 "check cb access: word",
1921 .insns = {
1922 BPF_MOV64_IMM(BPF_REG_0, 0),
1923 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1924 offsetof(struct __sk_buff, cb[0])),
1925 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1926 offsetof(struct __sk_buff, cb[1])),
1927 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1928 offsetof(struct __sk_buff, cb[2])),
1929 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1930 offsetof(struct __sk_buff, cb[3])),
1931 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1932 offsetof(struct __sk_buff, cb[4])),
1933 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1934 offsetof(struct __sk_buff, cb[0])),
1935 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1936 offsetof(struct __sk_buff, cb[1])),
1937 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1938 offsetof(struct __sk_buff, cb[2])),
1939 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1940 offsetof(struct __sk_buff, cb[3])),
1941 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1942 offsetof(struct __sk_buff, cb[4])),
1943 BPF_EXIT_INSN(),
1944 },
1945 .result = ACCEPT,
1946 },
1947 {
1948 "check cb access: word, unaligned 1",
1949 .insns = {
1950 BPF_MOV64_IMM(BPF_REG_0, 0),
1951 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1952 offsetof(struct __sk_buff, cb[0]) + 2),
1953 BPF_EXIT_INSN(),
1954 },
Edward Creef65b1842017-08-07 15:27:12 +01001955 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001956 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001957 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001958 },
1959 {
1960 "check cb access: word, unaligned 2",
1961 .insns = {
1962 BPF_MOV64_IMM(BPF_REG_0, 0),
1963 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1964 offsetof(struct __sk_buff, cb[4]) + 1),
1965 BPF_EXIT_INSN(),
1966 },
Edward Creef65b1842017-08-07 15:27:12 +01001967 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001968 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001969 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001970 },
1971 {
1972 "check cb access: word, unaligned 3",
1973 .insns = {
1974 BPF_MOV64_IMM(BPF_REG_0, 0),
1975 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1976 offsetof(struct __sk_buff, cb[4]) + 2),
1977 BPF_EXIT_INSN(),
1978 },
Edward Creef65b1842017-08-07 15:27:12 +01001979 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001980 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001981 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001982 },
1983 {
1984 "check cb access: word, unaligned 4",
1985 .insns = {
1986 BPF_MOV64_IMM(BPF_REG_0, 0),
1987 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1988 offsetof(struct __sk_buff, cb[4]) + 3),
1989 BPF_EXIT_INSN(),
1990 },
Edward Creef65b1842017-08-07 15:27:12 +01001991 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001992 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001993 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001994 },
1995 {
1996 "check cb access: double",
1997 .insns = {
1998 BPF_MOV64_IMM(BPF_REG_0, 0),
1999 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2000 offsetof(struct __sk_buff, cb[0])),
2001 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2002 offsetof(struct __sk_buff, cb[2])),
2003 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2004 offsetof(struct __sk_buff, cb[0])),
2005 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2006 offsetof(struct __sk_buff, cb[2])),
2007 BPF_EXIT_INSN(),
2008 },
2009 .result = ACCEPT,
2010 },
2011 {
2012 "check cb access: double, unaligned 1",
2013 .insns = {
2014 BPF_MOV64_IMM(BPF_REG_0, 0),
2015 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2016 offsetof(struct __sk_buff, cb[1])),
2017 BPF_EXIT_INSN(),
2018 },
Edward Creef65b1842017-08-07 15:27:12 +01002019 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002020 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002021 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002022 },
2023 {
2024 "check cb access: double, unaligned 2",
2025 .insns = {
2026 BPF_MOV64_IMM(BPF_REG_0, 0),
2027 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2028 offsetof(struct __sk_buff, cb[3])),
2029 BPF_EXIT_INSN(),
2030 },
Edward Creef65b1842017-08-07 15:27:12 +01002031 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002032 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002033 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002034 },
2035 {
2036 "check cb access: double, oob 1",
2037 .insns = {
2038 BPF_MOV64_IMM(BPF_REG_0, 0),
2039 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2040 offsetof(struct __sk_buff, cb[4])),
2041 BPF_EXIT_INSN(),
2042 },
2043 .errstr = "invalid bpf_context access",
2044 .result = REJECT,
2045 },
2046 {
2047 "check cb access: double, oob 2",
2048 .insns = {
2049 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002050 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2051 offsetof(struct __sk_buff, cb[4])),
2052 BPF_EXIT_INSN(),
2053 },
2054 .errstr = "invalid bpf_context access",
2055 .result = REJECT,
2056 },
2057 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002058 "check __sk_buff->ifindex dw store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002059 .insns = {
2060 BPF_MOV64_IMM(BPF_REG_0, 0),
Yonghong Song31fd8582017-06-13 15:52:13 -07002061 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2062 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002063 BPF_EXIT_INSN(),
2064 },
2065 .errstr = "invalid bpf_context access",
2066 .result = REJECT,
2067 },
2068 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002069 "check __sk_buff->ifindex dw load not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002070 .insns = {
2071 BPF_MOV64_IMM(BPF_REG_0, 0),
2072 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
Yonghong Song31fd8582017-06-13 15:52:13 -07002073 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002074 BPF_EXIT_INSN(),
2075 },
2076 .errstr = "invalid bpf_context access",
2077 .result = REJECT,
2078 },
2079 {
2080 "check cb access: double, wrong type",
2081 .insns = {
2082 BPF_MOV64_IMM(BPF_REG_0, 0),
2083 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2084 offsetof(struct __sk_buff, cb[0])),
2085 BPF_EXIT_INSN(),
2086 },
2087 .errstr = "invalid bpf_context access",
2088 .result = REJECT,
2089 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002090 },
2091 {
2092 "check out of range skb->cb access",
2093 .insns = {
2094 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002095 offsetof(struct __sk_buff, cb[0]) + 256),
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002096 BPF_EXIT_INSN(),
2097 },
2098 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002099 .errstr_unpriv = "",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002100 .result = REJECT,
2101 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
2102 },
2103 {
2104 "write skb fields from socket prog",
2105 .insns = {
2106 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2107 offsetof(struct __sk_buff, cb[4])),
2108 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2109 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2110 offsetof(struct __sk_buff, mark)),
2111 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2112 offsetof(struct __sk_buff, tc_index)),
2113 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2114 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2115 offsetof(struct __sk_buff, cb[0])),
2116 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2117 offsetof(struct __sk_buff, cb[2])),
2118 BPF_EXIT_INSN(),
2119 },
2120 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002121 .errstr_unpriv = "R1 leaks addr",
2122 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002123 },
2124 {
2125 "write skb fields from tc_cls_act prog",
2126 .insns = {
2127 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2128 offsetof(struct __sk_buff, cb[0])),
2129 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2130 offsetof(struct __sk_buff, mark)),
2131 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2132 offsetof(struct __sk_buff, tc_index)),
2133 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2134 offsetof(struct __sk_buff, tc_index)),
2135 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2136 offsetof(struct __sk_buff, cb[3])),
2137 BPF_EXIT_INSN(),
2138 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002139 .errstr_unpriv = "",
2140 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002141 .result = ACCEPT,
2142 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2143 },
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002144 {
2145 "PTR_TO_STACK store/load",
2146 .insns = {
2147 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2148 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2149 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2150 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2151 BPF_EXIT_INSN(),
2152 },
2153 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002154 .retval = 0xfaceb00c,
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002155 },
2156 {
2157 "PTR_TO_STACK store/load - bad alignment on off",
2158 .insns = {
2159 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2160 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2161 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2162 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2163 BPF_EXIT_INSN(),
2164 },
2165 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002166 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002167 },
2168 {
2169 "PTR_TO_STACK store/load - bad alignment on reg",
2170 .insns = {
2171 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2173 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2174 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2175 BPF_EXIT_INSN(),
2176 },
2177 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002178 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002179 },
2180 {
2181 "PTR_TO_STACK store/load - out of bounds low",
2182 .insns = {
2183 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2185 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2186 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2187 BPF_EXIT_INSN(),
2188 },
2189 .result = REJECT,
2190 .errstr = "invalid stack off=-79992 size=8",
2191 },
2192 {
2193 "PTR_TO_STACK store/load - out of bounds high",
2194 .insns = {
2195 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2196 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2197 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2198 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2199 BPF_EXIT_INSN(),
2200 },
2201 .result = REJECT,
2202 .errstr = "invalid stack off=0 size=8",
2203 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002204 {
2205 "unpriv: return pointer",
2206 .insns = {
2207 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2208 BPF_EXIT_INSN(),
2209 },
2210 .result = ACCEPT,
2211 .result_unpriv = REJECT,
2212 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002213 .retval = POINTER_VALUE,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002214 },
2215 {
2216 "unpriv: add const to pointer",
2217 .insns = {
2218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2219 BPF_MOV64_IMM(BPF_REG_0, 0),
2220 BPF_EXIT_INSN(),
2221 },
2222 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002223 },
2224 {
2225 "unpriv: add pointer to pointer",
2226 .insns = {
2227 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2228 BPF_MOV64_IMM(BPF_REG_0, 0),
2229 BPF_EXIT_INSN(),
2230 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08002231 .result = REJECT,
2232 .errstr = "R1 pointer += pointer",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002233 },
2234 {
2235 "unpriv: neg pointer",
2236 .insns = {
2237 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2238 BPF_MOV64_IMM(BPF_REG_0, 0),
2239 BPF_EXIT_INSN(),
2240 },
2241 .result = ACCEPT,
2242 .result_unpriv = REJECT,
2243 .errstr_unpriv = "R1 pointer arithmetic",
2244 },
2245 {
2246 "unpriv: cmp pointer with const",
2247 .insns = {
2248 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2249 BPF_MOV64_IMM(BPF_REG_0, 0),
2250 BPF_EXIT_INSN(),
2251 },
2252 .result = ACCEPT,
2253 .result_unpriv = REJECT,
2254 .errstr_unpriv = "R1 pointer comparison",
2255 },
2256 {
2257 "unpriv: cmp pointer with pointer",
2258 .insns = {
2259 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2260 BPF_MOV64_IMM(BPF_REG_0, 0),
2261 BPF_EXIT_INSN(),
2262 },
2263 .result = ACCEPT,
2264 .result_unpriv = REJECT,
2265 .errstr_unpriv = "R10 pointer comparison",
2266 },
2267 {
2268 "unpriv: check that printk is disallowed",
2269 .insns = {
2270 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2271 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2273 BPF_MOV64_IMM(BPF_REG_2, 8),
2274 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002275 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2276 BPF_FUNC_trace_printk),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002277 BPF_MOV64_IMM(BPF_REG_0, 0),
2278 BPF_EXIT_INSN(),
2279 },
Daniel Borkmann0eb69842016-12-15 01:39:10 +01002280 .errstr_unpriv = "unknown func bpf_trace_printk#6",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002281 .result_unpriv = REJECT,
2282 .result = ACCEPT,
2283 },
2284 {
2285 "unpriv: pass pointer to helper function",
2286 .insns = {
2287 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2288 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2289 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2290 BPF_LD_MAP_FD(BPF_REG_1, 0),
2291 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2292 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002293 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2294 BPF_FUNC_map_update_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002295 BPF_MOV64_IMM(BPF_REG_0, 0),
2296 BPF_EXIT_INSN(),
2297 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002298 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002299 .errstr_unpriv = "R4 leaks addr",
2300 .result_unpriv = REJECT,
2301 .result = ACCEPT,
2302 },
2303 {
2304 "unpriv: indirectly pass pointer on stack to helper function",
2305 .insns = {
2306 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2307 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2308 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2309 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002310 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2311 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002312 BPF_MOV64_IMM(BPF_REG_0, 0),
2313 BPF_EXIT_INSN(),
2314 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002315 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002316 .errstr = "invalid indirect read from stack off -8+0 size 8",
2317 .result = REJECT,
2318 },
2319 {
2320 "unpriv: mangle pointer on stack 1",
2321 .insns = {
2322 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2323 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2324 BPF_MOV64_IMM(BPF_REG_0, 0),
2325 BPF_EXIT_INSN(),
2326 },
2327 .errstr_unpriv = "attempt to corrupt spilled",
2328 .result_unpriv = REJECT,
2329 .result = ACCEPT,
2330 },
2331 {
2332 "unpriv: mangle pointer on stack 2",
2333 .insns = {
2334 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2335 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2336 BPF_MOV64_IMM(BPF_REG_0, 0),
2337 BPF_EXIT_INSN(),
2338 },
2339 .errstr_unpriv = "attempt to corrupt spilled",
2340 .result_unpriv = REJECT,
2341 .result = ACCEPT,
2342 },
2343 {
2344 "unpriv: read pointer from stack in small chunks",
2345 .insns = {
2346 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2347 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2348 BPF_MOV64_IMM(BPF_REG_0, 0),
2349 BPF_EXIT_INSN(),
2350 },
2351 .errstr = "invalid size",
2352 .result = REJECT,
2353 },
2354 {
2355 "unpriv: write pointer into ctx",
2356 .insns = {
2357 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2358 BPF_MOV64_IMM(BPF_REG_0, 0),
2359 BPF_EXIT_INSN(),
2360 },
2361 .errstr_unpriv = "R1 leaks addr",
2362 .result_unpriv = REJECT,
2363 .errstr = "invalid bpf_context access",
2364 .result = REJECT,
2365 },
2366 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002367 "unpriv: spill/fill of ctx",
2368 .insns = {
2369 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2370 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2371 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2372 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2373 BPF_MOV64_IMM(BPF_REG_0, 0),
2374 BPF_EXIT_INSN(),
2375 },
2376 .result = ACCEPT,
2377 },
2378 {
2379 "unpriv: spill/fill of ctx 2",
2380 .insns = {
2381 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2382 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2383 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2384 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002385 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2386 BPF_FUNC_get_hash_recalc),
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002387 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002388 BPF_EXIT_INSN(),
2389 },
2390 .result = ACCEPT,
2391 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2392 },
2393 {
2394 "unpriv: spill/fill of ctx 3",
2395 .insns = {
2396 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2397 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2398 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2399 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2400 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002401 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2402 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002403 BPF_EXIT_INSN(),
2404 },
2405 .result = REJECT,
2406 .errstr = "R1 type=fp expected=ctx",
2407 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2408 },
2409 {
2410 "unpriv: spill/fill of ctx 4",
2411 .insns = {
2412 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2413 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2414 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2415 BPF_MOV64_IMM(BPF_REG_0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002416 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2417 BPF_REG_0, -8, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002418 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002419 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2420 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002421 BPF_EXIT_INSN(),
2422 },
2423 .result = REJECT,
2424 .errstr = "R1 type=inv expected=ctx",
2425 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2426 },
2427 {
2428 "unpriv: spill/fill of different pointers stx",
2429 .insns = {
2430 BPF_MOV64_IMM(BPF_REG_3, 42),
2431 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2432 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2433 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2434 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2436 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2437 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2438 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2439 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2440 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2441 offsetof(struct __sk_buff, mark)),
2442 BPF_MOV64_IMM(BPF_REG_0, 0),
2443 BPF_EXIT_INSN(),
2444 },
2445 .result = REJECT,
2446 .errstr = "same insn cannot be used with different pointers",
2447 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2448 },
2449 {
2450 "unpriv: spill/fill of different pointers ldx",
2451 .insns = {
2452 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2454 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2455 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2457 -(__s32)offsetof(struct bpf_perf_event_data,
2458 sample_period) - 8),
2459 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2460 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2461 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2462 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2463 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2464 offsetof(struct bpf_perf_event_data,
2465 sample_period)),
2466 BPF_MOV64_IMM(BPF_REG_0, 0),
2467 BPF_EXIT_INSN(),
2468 },
2469 .result = REJECT,
2470 .errstr = "same insn cannot be used with different pointers",
2471 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
2472 },
2473 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002474 "unpriv: write pointer into map elem value",
2475 .insns = {
2476 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2477 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2478 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2479 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002480 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2481 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002482 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2483 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2484 BPF_EXIT_INSN(),
2485 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002486 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002487 .errstr_unpriv = "R0 leaks addr",
2488 .result_unpriv = REJECT,
2489 .result = ACCEPT,
2490 },
2491 {
2492 "unpriv: partial copy of pointer",
2493 .insns = {
2494 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2495 BPF_MOV64_IMM(BPF_REG_0, 0),
2496 BPF_EXIT_INSN(),
2497 },
2498 .errstr_unpriv = "R10 partial copy",
2499 .result_unpriv = REJECT,
2500 .result = ACCEPT,
2501 },
2502 {
2503 "unpriv: pass pointer to tail_call",
2504 .insns = {
2505 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2506 BPF_LD_MAP_FD(BPF_REG_2, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002507 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2508 BPF_FUNC_tail_call),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002509 BPF_MOV64_IMM(BPF_REG_0, 0),
2510 BPF_EXIT_INSN(),
2511 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002512 .fixup_prog = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002513 .errstr_unpriv = "R3 leaks addr into helper",
2514 .result_unpriv = REJECT,
2515 .result = ACCEPT,
2516 },
2517 {
2518 "unpriv: cmp map pointer with zero",
2519 .insns = {
2520 BPF_MOV64_IMM(BPF_REG_1, 0),
2521 BPF_LD_MAP_FD(BPF_REG_1, 0),
2522 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2523 BPF_MOV64_IMM(BPF_REG_0, 0),
2524 BPF_EXIT_INSN(),
2525 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002526 .fixup_map1 = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002527 .errstr_unpriv = "R1 pointer comparison",
2528 .result_unpriv = REJECT,
2529 .result = ACCEPT,
2530 },
2531 {
2532 "unpriv: write into frame pointer",
2533 .insns = {
2534 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2535 BPF_MOV64_IMM(BPF_REG_0, 0),
2536 BPF_EXIT_INSN(),
2537 },
2538 .errstr = "frame pointer is read only",
2539 .result = REJECT,
2540 },
2541 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002542 "unpriv: spill/fill frame pointer",
2543 .insns = {
2544 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2546 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2547 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2548 BPF_MOV64_IMM(BPF_REG_0, 0),
2549 BPF_EXIT_INSN(),
2550 },
2551 .errstr = "frame pointer is read only",
2552 .result = REJECT,
2553 },
2554 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002555 "unpriv: cmp of frame pointer",
2556 .insns = {
2557 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2558 BPF_MOV64_IMM(BPF_REG_0, 0),
2559 BPF_EXIT_INSN(),
2560 },
2561 .errstr_unpriv = "R10 pointer comparison",
2562 .result_unpriv = REJECT,
2563 .result = ACCEPT,
2564 },
2565 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02002566 "unpriv: adding of fp",
2567 .insns = {
2568 BPF_MOV64_IMM(BPF_REG_0, 0),
2569 BPF_MOV64_IMM(BPF_REG_1, 0),
2570 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2571 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2572 BPF_EXIT_INSN(),
2573 },
Edward Creef65b1842017-08-07 15:27:12 +01002574 .result = ACCEPT,
Daniel Borkmann728a8532017-04-27 01:39:32 +02002575 },
2576 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002577 "unpriv: cmp of stack pointer",
2578 .insns = {
2579 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2580 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2581 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2582 BPF_MOV64_IMM(BPF_REG_0, 0),
2583 BPF_EXIT_INSN(),
2584 },
2585 .errstr_unpriv = "R2 pointer comparison",
2586 .result_unpriv = REJECT,
2587 .result = ACCEPT,
2588 },
2589 {
Yonghong Song332270f2017-04-29 22:52:42 -07002590 "stack pointer arithmetic",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002591 .insns = {
Yonghong Song332270f2017-04-29 22:52:42 -07002592 BPF_MOV64_IMM(BPF_REG_1, 4),
2593 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2594 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
2595 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2596 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2597 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2598 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
2599 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2600 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2601 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2602 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002603 BPF_MOV64_IMM(BPF_REG_0, 0),
2604 BPF_EXIT_INSN(),
2605 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002606 .result = ACCEPT,
2607 },
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002608 {
2609 "raw_stack: no skb_load_bytes",
2610 .insns = {
2611 BPF_MOV64_IMM(BPF_REG_2, 4),
2612 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2613 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2614 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2615 BPF_MOV64_IMM(BPF_REG_4, 8),
2616 /* Call to skb_load_bytes() omitted. */
2617 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2618 BPF_EXIT_INSN(),
2619 },
2620 .result = REJECT,
2621 .errstr = "invalid read from stack off -8+0 size 8",
2622 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2623 },
2624 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002625 "raw_stack: skb_load_bytes, negative len",
2626 .insns = {
2627 BPF_MOV64_IMM(BPF_REG_2, 4),
2628 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2629 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2630 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2631 BPF_MOV64_IMM(BPF_REG_4, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002632 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2633 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002634 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2635 BPF_EXIT_INSN(),
2636 },
2637 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002638 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002639 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2640 },
2641 {
2642 "raw_stack: skb_load_bytes, negative len 2",
2643 .insns = {
2644 BPF_MOV64_IMM(BPF_REG_2, 4),
2645 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2646 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2647 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2648 BPF_MOV64_IMM(BPF_REG_4, ~0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002649 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2650 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002651 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2652 BPF_EXIT_INSN(),
2653 },
2654 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002655 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002656 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2657 },
2658 {
2659 "raw_stack: skb_load_bytes, zero len",
2660 .insns = {
2661 BPF_MOV64_IMM(BPF_REG_2, 4),
2662 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2663 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2664 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2665 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002666 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2667 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002668 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2669 BPF_EXIT_INSN(),
2670 },
2671 .result = REJECT,
2672 .errstr = "invalid stack type R3",
2673 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2674 },
2675 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002676 "raw_stack: skb_load_bytes, no init",
2677 .insns = {
2678 BPF_MOV64_IMM(BPF_REG_2, 4),
2679 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2680 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2681 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2682 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002683 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2684 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002685 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2686 BPF_EXIT_INSN(),
2687 },
2688 .result = ACCEPT,
2689 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2690 },
2691 {
2692 "raw_stack: skb_load_bytes, init",
2693 .insns = {
2694 BPF_MOV64_IMM(BPF_REG_2, 4),
2695 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2696 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2697 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
2698 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2699 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002700 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2701 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002702 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2703 BPF_EXIT_INSN(),
2704 },
2705 .result = ACCEPT,
2706 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2707 },
2708 {
2709 "raw_stack: skb_load_bytes, spilled regs around bounds",
2710 .insns = {
2711 BPF_MOV64_IMM(BPF_REG_2, 4),
2712 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002714 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2715 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002716 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2717 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002718 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2719 BPF_FUNC_skb_load_bytes),
2720 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2721 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002722 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2723 offsetof(struct __sk_buff, mark)),
2724 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2725 offsetof(struct __sk_buff, priority)),
2726 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2727 BPF_EXIT_INSN(),
2728 },
2729 .result = ACCEPT,
2730 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2731 },
2732 {
2733 "raw_stack: skb_load_bytes, spilled regs corruption",
2734 .insns = {
2735 BPF_MOV64_IMM(BPF_REG_2, 4),
2736 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2737 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002738 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002739 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2740 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002741 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2742 BPF_FUNC_skb_load_bytes),
2743 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002744 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2745 offsetof(struct __sk_buff, mark)),
2746 BPF_EXIT_INSN(),
2747 },
2748 .result = REJECT,
2749 .errstr = "R0 invalid mem access 'inv'",
2750 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2751 },
2752 {
2753 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2754 .insns = {
2755 BPF_MOV64_IMM(BPF_REG_2, 4),
2756 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2757 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002758 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2759 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2760 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002761 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2762 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002763 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2764 BPF_FUNC_skb_load_bytes),
2765 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2766 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2767 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002768 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2769 offsetof(struct __sk_buff, mark)),
2770 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2771 offsetof(struct __sk_buff, priority)),
2772 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2773 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2774 offsetof(struct __sk_buff, pkt_type)),
2775 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2776 BPF_EXIT_INSN(),
2777 },
2778 .result = REJECT,
2779 .errstr = "R3 invalid mem access 'inv'",
2780 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2781 },
2782 {
2783 "raw_stack: skb_load_bytes, spilled regs + data",
2784 .insns = {
2785 BPF_MOV64_IMM(BPF_REG_2, 4),
2786 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2787 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002788 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2789 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2790 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002791 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2792 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002793 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2794 BPF_FUNC_skb_load_bytes),
2795 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2796 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2797 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002798 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2799 offsetof(struct __sk_buff, mark)),
2800 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2801 offsetof(struct __sk_buff, priority)),
2802 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2803 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2804 BPF_EXIT_INSN(),
2805 },
2806 .result = ACCEPT,
2807 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2808 },
2809 {
2810 "raw_stack: skb_load_bytes, invalid access 1",
2811 .insns = {
2812 BPF_MOV64_IMM(BPF_REG_2, 4),
2813 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2814 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2815 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2816 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002817 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2818 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002819 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2820 BPF_EXIT_INSN(),
2821 },
2822 .result = REJECT,
2823 .errstr = "invalid stack type R3 off=-513 access_size=8",
2824 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2825 },
2826 {
2827 "raw_stack: skb_load_bytes, invalid access 2",
2828 .insns = {
2829 BPF_MOV64_IMM(BPF_REG_2, 4),
2830 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2831 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2832 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2833 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002834 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2835 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002836 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2837 BPF_EXIT_INSN(),
2838 },
2839 .result = REJECT,
2840 .errstr = "invalid stack type R3 off=-1 access_size=8",
2841 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2842 },
2843 {
2844 "raw_stack: skb_load_bytes, invalid access 3",
2845 .insns = {
2846 BPF_MOV64_IMM(BPF_REG_2, 4),
2847 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2848 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2849 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2850 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002851 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2852 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002853 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2854 BPF_EXIT_INSN(),
2855 },
2856 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002857 .errstr = "R4 min value is negative",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002858 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2859 },
2860 {
2861 "raw_stack: skb_load_bytes, invalid access 4",
2862 .insns = {
2863 BPF_MOV64_IMM(BPF_REG_2, 4),
2864 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2865 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2866 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2867 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002868 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2869 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002870 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2871 BPF_EXIT_INSN(),
2872 },
2873 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002874 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002875 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2876 },
2877 {
2878 "raw_stack: skb_load_bytes, invalid access 5",
2879 .insns = {
2880 BPF_MOV64_IMM(BPF_REG_2, 4),
2881 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2882 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2883 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2884 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002885 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2886 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002887 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2888 BPF_EXIT_INSN(),
2889 },
2890 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002891 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002892 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2893 },
2894 {
2895 "raw_stack: skb_load_bytes, invalid access 6",
2896 .insns = {
2897 BPF_MOV64_IMM(BPF_REG_2, 4),
2898 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2899 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2900 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2901 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002902 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2903 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002904 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2905 BPF_EXIT_INSN(),
2906 },
2907 .result = REJECT,
2908 .errstr = "invalid stack type R3 off=-512 access_size=0",
2909 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2910 },
2911 {
2912 "raw_stack: skb_load_bytes, large access",
2913 .insns = {
2914 BPF_MOV64_IMM(BPF_REG_2, 4),
2915 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2916 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2917 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2918 BPF_MOV64_IMM(BPF_REG_4, 512),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002919 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2920 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002921 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2922 BPF_EXIT_INSN(),
2923 },
2924 .result = ACCEPT,
2925 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2926 },
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002927 {
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01002928 "context stores via ST",
2929 .insns = {
2930 BPF_MOV64_IMM(BPF_REG_0, 0),
2931 BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
2932 BPF_EXIT_INSN(),
2933 },
2934 .errstr = "BPF_ST stores into R1 context is not allowed",
2935 .result = REJECT,
2936 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2937 },
2938 {
2939 "context stores via XADD",
2940 .insns = {
2941 BPF_MOV64_IMM(BPF_REG_0, 0),
2942 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
2943 BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
2944 BPF_EXIT_INSN(),
2945 },
2946 .errstr = "BPF_XADD stores into R1 context is not allowed",
2947 .result = REJECT,
2948 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2949 },
2950 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002951 "direct packet access: test1",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002952 .insns = {
2953 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2954 offsetof(struct __sk_buff, data)),
2955 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2956 offsetof(struct __sk_buff, data_end)),
2957 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2958 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2959 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2960 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2961 BPF_MOV64_IMM(BPF_REG_0, 0),
2962 BPF_EXIT_INSN(),
2963 },
2964 .result = ACCEPT,
2965 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2966 },
2967 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002968 "direct packet access: test2",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002969 .insns = {
2970 BPF_MOV64_IMM(BPF_REG_0, 1),
2971 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2972 offsetof(struct __sk_buff, data_end)),
2973 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2974 offsetof(struct __sk_buff, data)),
2975 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2976 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2977 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2978 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2979 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2980 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2981 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2982 offsetof(struct __sk_buff, data)),
2983 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08002984 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2985 offsetof(struct __sk_buff, len)),
Edward Cree1f9ab382017-08-07 15:29:11 +01002986 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
2987 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002988 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2989 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2990 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2991 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2992 offsetof(struct __sk_buff, data_end)),
2993 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2994 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2995 BPF_MOV64_IMM(BPF_REG_0, 0),
2996 BPF_EXIT_INSN(),
2997 },
2998 .result = ACCEPT,
2999 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3000 },
3001 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003002 "direct packet access: test3",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003003 .insns = {
3004 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3005 offsetof(struct __sk_buff, data)),
3006 BPF_MOV64_IMM(BPF_REG_0, 0),
3007 BPF_EXIT_INSN(),
3008 },
3009 .errstr = "invalid bpf_context access off=76",
3010 .result = REJECT,
3011 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3012 },
3013 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003014 "direct packet access: test4 (write)",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003015 .insns = {
3016 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3017 offsetof(struct __sk_buff, data)),
3018 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3019 offsetof(struct __sk_buff, data_end)),
3020 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3021 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3022 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3023 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3024 BPF_MOV64_IMM(BPF_REG_0, 0),
3025 BPF_EXIT_INSN(),
3026 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003027 .result = ACCEPT,
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003028 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3029 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003030 {
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02003031 "direct packet access: test5 (pkt_end >= reg, good access)",
3032 .insns = {
3033 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3034 offsetof(struct __sk_buff, data)),
3035 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3036 offsetof(struct __sk_buff, data_end)),
3037 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3038 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3039 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3040 BPF_MOV64_IMM(BPF_REG_0, 1),
3041 BPF_EXIT_INSN(),
3042 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3043 BPF_MOV64_IMM(BPF_REG_0, 0),
3044 BPF_EXIT_INSN(),
3045 },
3046 .result = ACCEPT,
3047 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3048 },
3049 {
3050 "direct packet access: test6 (pkt_end >= reg, bad access)",
3051 .insns = {
3052 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3053 offsetof(struct __sk_buff, data)),
3054 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3055 offsetof(struct __sk_buff, data_end)),
3056 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3057 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3058 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3059 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3060 BPF_MOV64_IMM(BPF_REG_0, 1),
3061 BPF_EXIT_INSN(),
3062 BPF_MOV64_IMM(BPF_REG_0, 0),
3063 BPF_EXIT_INSN(),
3064 },
3065 .errstr = "invalid access to packet",
3066 .result = REJECT,
3067 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3068 },
3069 {
3070 "direct packet access: test7 (pkt_end >= reg, both accesses)",
3071 .insns = {
3072 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3073 offsetof(struct __sk_buff, data)),
3074 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3075 offsetof(struct __sk_buff, data_end)),
3076 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3077 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3078 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3079 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3080 BPF_MOV64_IMM(BPF_REG_0, 1),
3081 BPF_EXIT_INSN(),
3082 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3083 BPF_MOV64_IMM(BPF_REG_0, 0),
3084 BPF_EXIT_INSN(),
3085 },
3086 .errstr = "invalid access to packet",
3087 .result = REJECT,
3088 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3089 },
3090 {
3091 "direct packet access: test8 (double test, variant 1)",
3092 .insns = {
3093 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3094 offsetof(struct __sk_buff, data)),
3095 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3096 offsetof(struct __sk_buff, data_end)),
3097 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3098 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3099 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3100 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3101 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3102 BPF_MOV64_IMM(BPF_REG_0, 1),
3103 BPF_EXIT_INSN(),
3104 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3105 BPF_MOV64_IMM(BPF_REG_0, 0),
3106 BPF_EXIT_INSN(),
3107 },
3108 .result = ACCEPT,
3109 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3110 },
3111 {
3112 "direct packet access: test9 (double test, variant 2)",
3113 .insns = {
3114 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3115 offsetof(struct __sk_buff, data)),
3116 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3117 offsetof(struct __sk_buff, data_end)),
3118 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3120 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3121 BPF_MOV64_IMM(BPF_REG_0, 1),
3122 BPF_EXIT_INSN(),
3123 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3124 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3125 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3126 BPF_MOV64_IMM(BPF_REG_0, 0),
3127 BPF_EXIT_INSN(),
3128 },
3129 .result = ACCEPT,
3130 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3131 },
3132 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003133 "direct packet access: test10 (write invalid)",
3134 .insns = {
3135 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3136 offsetof(struct __sk_buff, data)),
3137 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3138 offsetof(struct __sk_buff, data_end)),
3139 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3140 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3141 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3142 BPF_MOV64_IMM(BPF_REG_0, 0),
3143 BPF_EXIT_INSN(),
3144 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3145 BPF_MOV64_IMM(BPF_REG_0, 0),
3146 BPF_EXIT_INSN(),
3147 },
3148 .errstr = "invalid access to packet",
3149 .result = REJECT,
3150 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3151 },
3152 {
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003153 "direct packet access: test11 (shift, good access)",
3154 .insns = {
3155 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3156 offsetof(struct __sk_buff, data)),
3157 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3158 offsetof(struct __sk_buff, data_end)),
3159 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3160 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3161 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3162 BPF_MOV64_IMM(BPF_REG_3, 144),
3163 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3164 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3165 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3166 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3167 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3168 BPF_MOV64_IMM(BPF_REG_0, 1),
3169 BPF_EXIT_INSN(),
3170 BPF_MOV64_IMM(BPF_REG_0, 0),
3171 BPF_EXIT_INSN(),
3172 },
3173 .result = ACCEPT,
3174 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003175 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003176 },
3177 {
3178 "direct packet access: test12 (and, good access)",
3179 .insns = {
3180 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3181 offsetof(struct __sk_buff, data)),
3182 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3183 offsetof(struct __sk_buff, data_end)),
3184 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3185 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3186 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3187 BPF_MOV64_IMM(BPF_REG_3, 144),
3188 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3189 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3190 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3191 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3192 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3193 BPF_MOV64_IMM(BPF_REG_0, 1),
3194 BPF_EXIT_INSN(),
3195 BPF_MOV64_IMM(BPF_REG_0, 0),
3196 BPF_EXIT_INSN(),
3197 },
3198 .result = ACCEPT,
3199 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003200 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003201 },
3202 {
3203 "direct packet access: test13 (branches, good access)",
3204 .insns = {
3205 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3206 offsetof(struct __sk_buff, data)),
3207 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3208 offsetof(struct __sk_buff, data_end)),
3209 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3211 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3212 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3213 offsetof(struct __sk_buff, mark)),
3214 BPF_MOV64_IMM(BPF_REG_4, 1),
3215 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3216 BPF_MOV64_IMM(BPF_REG_3, 14),
3217 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3218 BPF_MOV64_IMM(BPF_REG_3, 24),
3219 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3221 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3222 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3223 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3224 BPF_MOV64_IMM(BPF_REG_0, 1),
3225 BPF_EXIT_INSN(),
3226 BPF_MOV64_IMM(BPF_REG_0, 0),
3227 BPF_EXIT_INSN(),
3228 },
3229 .result = ACCEPT,
3230 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003231 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003232 },
3233 {
William Tu63dfef72017-02-04 08:37:29 -08003234 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3235 .insns = {
3236 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3237 offsetof(struct __sk_buff, data)),
3238 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3239 offsetof(struct __sk_buff, data_end)),
3240 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3241 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3242 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3243 BPF_MOV64_IMM(BPF_REG_5, 12),
3244 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3245 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3246 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3247 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3248 BPF_MOV64_IMM(BPF_REG_0, 1),
3249 BPF_EXIT_INSN(),
3250 BPF_MOV64_IMM(BPF_REG_0, 0),
3251 BPF_EXIT_INSN(),
3252 },
3253 .result = ACCEPT,
3254 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003255 .retval = 1,
William Tu63dfef72017-02-04 08:37:29 -08003256 },
3257 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003258 "direct packet access: test15 (spill with xadd)",
3259 .insns = {
3260 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3261 offsetof(struct __sk_buff, data)),
3262 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3263 offsetof(struct __sk_buff, data_end)),
3264 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3265 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3266 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3267 BPF_MOV64_IMM(BPF_REG_5, 4096),
3268 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3269 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3270 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3271 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3272 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3273 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3274 BPF_MOV64_IMM(BPF_REG_0, 0),
3275 BPF_EXIT_INSN(),
3276 },
3277 .errstr = "R2 invalid mem access 'inv'",
3278 .result = REJECT,
3279 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3280 },
3281 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02003282 "direct packet access: test16 (arith on data_end)",
3283 .insns = {
3284 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3285 offsetof(struct __sk_buff, data)),
3286 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3287 offsetof(struct __sk_buff, data_end)),
3288 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3289 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3290 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3291 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3292 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3293 BPF_MOV64_IMM(BPF_REG_0, 0),
3294 BPF_EXIT_INSN(),
3295 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08003296 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmann728a8532017-04-27 01:39:32 +02003297 .result = REJECT,
3298 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3299 },
3300 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003301 "direct packet access: test17 (pruning, alignment)",
3302 .insns = {
3303 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3304 offsetof(struct __sk_buff, data)),
3305 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3306 offsetof(struct __sk_buff, data_end)),
3307 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3308 offsetof(struct __sk_buff, mark)),
3309 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3310 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3311 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3312 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3313 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3314 BPF_MOV64_IMM(BPF_REG_0, 0),
3315 BPF_EXIT_INSN(),
3316 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3317 BPF_JMP_A(-6),
3318 },
Edward Creef65b1842017-08-07 15:27:12 +01003319 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003320 .result = REJECT,
3321 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3322 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3323 },
3324 {
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003325 "direct packet access: test18 (imm += pkt_ptr, 1)",
3326 .insns = {
3327 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3328 offsetof(struct __sk_buff, data)),
3329 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3330 offsetof(struct __sk_buff, data_end)),
3331 BPF_MOV64_IMM(BPF_REG_0, 8),
3332 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3333 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3334 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3335 BPF_MOV64_IMM(BPF_REG_0, 0),
3336 BPF_EXIT_INSN(),
3337 },
3338 .result = ACCEPT,
3339 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3340 },
3341 {
3342 "direct packet access: test19 (imm += pkt_ptr, 2)",
3343 .insns = {
3344 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3345 offsetof(struct __sk_buff, data)),
3346 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3347 offsetof(struct __sk_buff, data_end)),
3348 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3349 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3350 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3351 BPF_MOV64_IMM(BPF_REG_4, 4),
3352 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3353 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3354 BPF_MOV64_IMM(BPF_REG_0, 0),
3355 BPF_EXIT_INSN(),
3356 },
3357 .result = ACCEPT,
3358 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3359 },
3360 {
3361 "direct packet access: test20 (x += pkt_ptr, 1)",
3362 .insns = {
3363 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3364 offsetof(struct __sk_buff, data)),
3365 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3366 offsetof(struct __sk_buff, data_end)),
3367 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3368 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3369 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003370 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003371 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3372 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3373 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003374 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003375 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3376 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3377 BPF_MOV64_IMM(BPF_REG_0, 0),
3378 BPF_EXIT_INSN(),
3379 },
3380 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3381 .result = ACCEPT,
3382 },
3383 {
3384 "direct packet access: test21 (x += pkt_ptr, 2)",
3385 .insns = {
3386 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3387 offsetof(struct __sk_buff, data)),
3388 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3389 offsetof(struct __sk_buff, data_end)),
3390 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3391 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3392 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3393 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3394 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3395 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003396 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003397 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3398 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003399 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003400 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3401 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3402 BPF_MOV64_IMM(BPF_REG_0, 0),
3403 BPF_EXIT_INSN(),
3404 },
3405 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3406 .result = ACCEPT,
3407 },
3408 {
3409 "direct packet access: test22 (x += pkt_ptr, 3)",
3410 .insns = {
3411 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3412 offsetof(struct __sk_buff, data)),
3413 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3414 offsetof(struct __sk_buff, data_end)),
3415 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3416 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3417 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3418 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3419 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3420 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3421 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3422 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3423 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3424 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003425 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003426 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3427 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3428 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3429 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3430 BPF_MOV64_IMM(BPF_REG_2, 1),
3431 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3432 BPF_MOV64_IMM(BPF_REG_0, 0),
3433 BPF_EXIT_INSN(),
3434 },
3435 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3436 .result = ACCEPT,
3437 },
3438 {
3439 "direct packet access: test23 (x += pkt_ptr, 4)",
3440 .insns = {
3441 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3442 offsetof(struct __sk_buff, data)),
3443 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3444 offsetof(struct __sk_buff, data_end)),
3445 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3446 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3447 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3448 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3449 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3450 BPF_MOV64_IMM(BPF_REG_0, 31),
3451 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3452 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3453 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3454 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3455 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3456 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3457 BPF_MOV64_IMM(BPF_REG_0, 0),
3458 BPF_EXIT_INSN(),
3459 },
3460 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3461 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003462 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003463 },
3464 {
3465 "direct packet access: test24 (x += pkt_ptr, 5)",
3466 .insns = {
3467 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3468 offsetof(struct __sk_buff, data)),
3469 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3470 offsetof(struct __sk_buff, data_end)),
3471 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3472 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3473 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3474 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3475 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3476 BPF_MOV64_IMM(BPF_REG_0, 64),
3477 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3478 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3479 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
Edward Cree1f9ab382017-08-07 15:29:11 +01003480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003481 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3482 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3483 BPF_MOV64_IMM(BPF_REG_0, 0),
3484 BPF_EXIT_INSN(),
3485 },
3486 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3487 .result = ACCEPT,
3488 },
3489 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02003490 "direct packet access: test25 (marking on <, good access)",
3491 .insns = {
3492 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3493 offsetof(struct __sk_buff, data)),
3494 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3495 offsetof(struct __sk_buff, data_end)),
3496 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3497 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3498 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3499 BPF_MOV64_IMM(BPF_REG_0, 0),
3500 BPF_EXIT_INSN(),
3501 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3502 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3503 },
3504 .result = ACCEPT,
3505 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3506 },
3507 {
3508 "direct packet access: test26 (marking on <, bad access)",
3509 .insns = {
3510 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3511 offsetof(struct __sk_buff, data)),
3512 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3513 offsetof(struct __sk_buff, data_end)),
3514 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3515 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3516 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
3517 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3518 BPF_MOV64_IMM(BPF_REG_0, 0),
3519 BPF_EXIT_INSN(),
3520 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
3521 },
3522 .result = REJECT,
3523 .errstr = "invalid access to packet",
3524 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3525 },
3526 {
3527 "direct packet access: test27 (marking on <=, good access)",
3528 .insns = {
3529 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3530 offsetof(struct __sk_buff, data)),
3531 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3532 offsetof(struct __sk_buff, data_end)),
3533 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3534 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3535 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
3536 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3537 BPF_MOV64_IMM(BPF_REG_0, 1),
3538 BPF_EXIT_INSN(),
3539 },
3540 .result = ACCEPT,
3541 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003542 .retval = 1,
Daniel Borkmann31e482b2017-08-10 01:40:03 +02003543 },
3544 {
3545 "direct packet access: test28 (marking on <=, bad access)",
3546 .insns = {
3547 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3548 offsetof(struct __sk_buff, data)),
3549 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3550 offsetof(struct __sk_buff, data_end)),
3551 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3552 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3553 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
3554 BPF_MOV64_IMM(BPF_REG_0, 1),
3555 BPF_EXIT_INSN(),
3556 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3557 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3558 },
3559 .result = REJECT,
3560 .errstr = "invalid access to packet",
3561 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3562 },
3563 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003564 "helper access to packet: test1, valid packet_ptr range",
3565 .insns = {
3566 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3567 offsetof(struct xdp_md, data)),
3568 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3569 offsetof(struct xdp_md, data_end)),
3570 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3571 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3572 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3573 BPF_LD_MAP_FD(BPF_REG_1, 0),
3574 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3575 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003576 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3577 BPF_FUNC_map_update_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003578 BPF_MOV64_IMM(BPF_REG_0, 0),
3579 BPF_EXIT_INSN(),
3580 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003581 .fixup_map1 = { 5 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003582 .result_unpriv = ACCEPT,
3583 .result = ACCEPT,
3584 .prog_type = BPF_PROG_TYPE_XDP,
3585 },
3586 {
3587 "helper access to packet: test2, unchecked packet_ptr",
3588 .insns = {
3589 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3590 offsetof(struct xdp_md, data)),
3591 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003592 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3593 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003594 BPF_MOV64_IMM(BPF_REG_0, 0),
3595 BPF_EXIT_INSN(),
3596 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003597 .fixup_map1 = { 1 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003598 .result = REJECT,
3599 .errstr = "invalid access to packet",
3600 .prog_type = BPF_PROG_TYPE_XDP,
3601 },
3602 {
3603 "helper access to packet: test3, variable add",
3604 .insns = {
3605 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3606 offsetof(struct xdp_md, data)),
3607 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3608 offsetof(struct xdp_md, data_end)),
3609 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3610 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3611 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3612 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3613 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3614 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3615 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3617 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3618 BPF_LD_MAP_FD(BPF_REG_1, 0),
3619 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003620 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3621 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003622 BPF_MOV64_IMM(BPF_REG_0, 0),
3623 BPF_EXIT_INSN(),
3624 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003625 .fixup_map1 = { 11 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003626 .result = ACCEPT,
3627 .prog_type = BPF_PROG_TYPE_XDP,
3628 },
3629 {
3630 "helper access to packet: test4, packet_ptr with bad range",
3631 .insns = {
3632 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3633 offsetof(struct xdp_md, data)),
3634 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3635 offsetof(struct xdp_md, data_end)),
3636 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3637 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3638 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3639 BPF_MOV64_IMM(BPF_REG_0, 0),
3640 BPF_EXIT_INSN(),
3641 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003642 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3643 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003644 BPF_MOV64_IMM(BPF_REG_0, 0),
3645 BPF_EXIT_INSN(),
3646 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003647 .fixup_map1 = { 7 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003648 .result = REJECT,
3649 .errstr = "invalid access to packet",
3650 .prog_type = BPF_PROG_TYPE_XDP,
3651 },
3652 {
3653 "helper access to packet: test5, packet_ptr with too short range",
3654 .insns = {
3655 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3656 offsetof(struct xdp_md, data)),
3657 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3658 offsetof(struct xdp_md, data_end)),
3659 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3660 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3661 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3662 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3663 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003664 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3665 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003666 BPF_MOV64_IMM(BPF_REG_0, 0),
3667 BPF_EXIT_INSN(),
3668 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003669 .fixup_map1 = { 6 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003670 .result = REJECT,
3671 .errstr = "invalid access to packet",
3672 .prog_type = BPF_PROG_TYPE_XDP,
3673 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003674 {
3675 "helper access to packet: test6, cls valid packet_ptr range",
3676 .insns = {
3677 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3678 offsetof(struct __sk_buff, data)),
3679 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3680 offsetof(struct __sk_buff, data_end)),
3681 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3682 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3683 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3684 BPF_LD_MAP_FD(BPF_REG_1, 0),
3685 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3686 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003687 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3688 BPF_FUNC_map_update_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003689 BPF_MOV64_IMM(BPF_REG_0, 0),
3690 BPF_EXIT_INSN(),
3691 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003692 .fixup_map1 = { 5 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003693 .result = ACCEPT,
3694 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3695 },
3696 {
3697 "helper access to packet: test7, cls unchecked packet_ptr",
3698 .insns = {
3699 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3700 offsetof(struct __sk_buff, data)),
3701 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003702 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3703 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003704 BPF_MOV64_IMM(BPF_REG_0, 0),
3705 BPF_EXIT_INSN(),
3706 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003707 .fixup_map1 = { 1 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003708 .result = REJECT,
3709 .errstr = "invalid access to packet",
3710 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3711 },
3712 {
3713 "helper access to packet: test8, cls variable add",
3714 .insns = {
3715 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3716 offsetof(struct __sk_buff, data)),
3717 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3718 offsetof(struct __sk_buff, data_end)),
3719 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3720 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3721 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3722 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3723 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3724 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3725 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3726 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3727 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3728 BPF_LD_MAP_FD(BPF_REG_1, 0),
3729 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003730 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3731 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003732 BPF_MOV64_IMM(BPF_REG_0, 0),
3733 BPF_EXIT_INSN(),
3734 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003735 .fixup_map1 = { 11 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003736 .result = ACCEPT,
3737 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3738 },
3739 {
3740 "helper access to packet: test9, cls packet_ptr with bad range",
3741 .insns = {
3742 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3743 offsetof(struct __sk_buff, data)),
3744 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3745 offsetof(struct __sk_buff, data_end)),
3746 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3747 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3748 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3749 BPF_MOV64_IMM(BPF_REG_0, 0),
3750 BPF_EXIT_INSN(),
3751 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003752 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3753 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003754 BPF_MOV64_IMM(BPF_REG_0, 0),
3755 BPF_EXIT_INSN(),
3756 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003757 .fixup_map1 = { 7 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003758 .result = REJECT,
3759 .errstr = "invalid access to packet",
3760 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3761 },
3762 {
3763 "helper access to packet: test10, cls packet_ptr with too short range",
3764 .insns = {
3765 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3766 offsetof(struct __sk_buff, data)),
3767 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3768 offsetof(struct __sk_buff, data_end)),
3769 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3770 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3771 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3772 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3773 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003774 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3775 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003776 BPF_MOV64_IMM(BPF_REG_0, 0),
3777 BPF_EXIT_INSN(),
3778 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003779 .fixup_map1 = { 6 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003780 .result = REJECT,
3781 .errstr = "invalid access to packet",
3782 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3783 },
3784 {
3785 "helper access to packet: test11, cls unsuitable helper 1",
3786 .insns = {
3787 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3788 offsetof(struct __sk_buff, data)),
3789 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3790 offsetof(struct __sk_buff, data_end)),
3791 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3792 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
3794 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
3795 BPF_MOV64_IMM(BPF_REG_2, 0),
3796 BPF_MOV64_IMM(BPF_REG_4, 42),
3797 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003798 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3799 BPF_FUNC_skb_store_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003800 BPF_MOV64_IMM(BPF_REG_0, 0),
3801 BPF_EXIT_INSN(),
3802 },
3803 .result = REJECT,
3804 .errstr = "helper access to the packet",
3805 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3806 },
3807 {
3808 "helper access to packet: test12, cls unsuitable helper 2",
3809 .insns = {
3810 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3811 offsetof(struct __sk_buff, data)),
3812 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3813 offsetof(struct __sk_buff, data_end)),
3814 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
3816 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
3817 BPF_MOV64_IMM(BPF_REG_2, 0),
3818 BPF_MOV64_IMM(BPF_REG_4, 4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003819 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3820 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003821 BPF_MOV64_IMM(BPF_REG_0, 0),
3822 BPF_EXIT_INSN(),
3823 },
3824 .result = REJECT,
3825 .errstr = "helper access to the packet",
3826 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3827 },
3828 {
3829 "helper access to packet: test13, cls helper ok",
3830 .insns = {
3831 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3832 offsetof(struct __sk_buff, data)),
3833 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3834 offsetof(struct __sk_buff, data_end)),
3835 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3836 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3837 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3838 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3839 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3840 BPF_MOV64_IMM(BPF_REG_2, 4),
3841 BPF_MOV64_IMM(BPF_REG_3, 0),
3842 BPF_MOV64_IMM(BPF_REG_4, 0),
3843 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003844 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3845 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003846 BPF_MOV64_IMM(BPF_REG_0, 0),
3847 BPF_EXIT_INSN(),
3848 },
3849 .result = ACCEPT,
3850 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3851 },
3852 {
Edward Creef65b1842017-08-07 15:27:12 +01003853 "helper access to packet: test14, cls helper ok sub",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003854 .insns = {
3855 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3856 offsetof(struct __sk_buff, data)),
3857 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3858 offsetof(struct __sk_buff, data_end)),
3859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3860 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3861 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3862 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3863 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
3864 BPF_MOV64_IMM(BPF_REG_2, 4),
3865 BPF_MOV64_IMM(BPF_REG_3, 0),
3866 BPF_MOV64_IMM(BPF_REG_4, 0),
3867 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003868 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3869 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003870 BPF_MOV64_IMM(BPF_REG_0, 0),
3871 BPF_EXIT_INSN(),
3872 },
Edward Creef65b1842017-08-07 15:27:12 +01003873 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003874 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3875 },
3876 {
Edward Creef65b1842017-08-07 15:27:12 +01003877 "helper access to packet: test15, cls helper fail sub",
3878 .insns = {
3879 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3880 offsetof(struct __sk_buff, data)),
3881 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3882 offsetof(struct __sk_buff, data_end)),
3883 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3884 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3885 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3886 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3887 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
3888 BPF_MOV64_IMM(BPF_REG_2, 4),
3889 BPF_MOV64_IMM(BPF_REG_3, 0),
3890 BPF_MOV64_IMM(BPF_REG_4, 0),
3891 BPF_MOV64_IMM(BPF_REG_5, 0),
3892 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3893 BPF_FUNC_csum_diff),
3894 BPF_MOV64_IMM(BPF_REG_0, 0),
3895 BPF_EXIT_INSN(),
3896 },
3897 .result = REJECT,
3898 .errstr = "invalid access to packet",
3899 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3900 },
3901 {
3902 "helper access to packet: test16, cls helper fail range 1",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003903 .insns = {
3904 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3905 offsetof(struct __sk_buff, data)),
3906 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3907 offsetof(struct __sk_buff, data_end)),
3908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3909 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3910 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3911 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3912 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3913 BPF_MOV64_IMM(BPF_REG_2, 8),
3914 BPF_MOV64_IMM(BPF_REG_3, 0),
3915 BPF_MOV64_IMM(BPF_REG_4, 0),
3916 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003917 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3918 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003919 BPF_MOV64_IMM(BPF_REG_0, 0),
3920 BPF_EXIT_INSN(),
3921 },
3922 .result = REJECT,
3923 .errstr = "invalid access to packet",
3924 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3925 },
3926 {
Edward Creef65b1842017-08-07 15:27:12 +01003927 "helper access to packet: test17, cls helper fail range 2",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003928 .insns = {
3929 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3930 offsetof(struct __sk_buff, data)),
3931 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3932 offsetof(struct __sk_buff, data_end)),
3933 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3934 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3935 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3936 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3937 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3938 BPF_MOV64_IMM(BPF_REG_2, -9),
3939 BPF_MOV64_IMM(BPF_REG_3, 0),
3940 BPF_MOV64_IMM(BPF_REG_4, 0),
3941 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003942 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3943 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003944 BPF_MOV64_IMM(BPF_REG_0, 0),
3945 BPF_EXIT_INSN(),
3946 },
3947 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003948 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003949 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3950 },
3951 {
Edward Creef65b1842017-08-07 15:27:12 +01003952 "helper access to packet: test18, cls helper fail range 3",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003953 .insns = {
3954 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3955 offsetof(struct __sk_buff, data)),
3956 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3957 offsetof(struct __sk_buff, data_end)),
3958 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3959 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3960 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3961 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3962 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3963 BPF_MOV64_IMM(BPF_REG_2, ~0),
3964 BPF_MOV64_IMM(BPF_REG_3, 0),
3965 BPF_MOV64_IMM(BPF_REG_4, 0),
3966 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003967 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3968 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003969 BPF_MOV64_IMM(BPF_REG_0, 0),
3970 BPF_EXIT_INSN(),
3971 },
3972 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003973 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003974 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3975 },
3976 {
Yonghong Songb6ff6392017-11-12 14:49:11 -08003977 "helper access to packet: test19, cls helper range zero",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003978 .insns = {
3979 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3980 offsetof(struct __sk_buff, data)),
3981 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3982 offsetof(struct __sk_buff, data_end)),
3983 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3984 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3985 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3986 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3987 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3988 BPF_MOV64_IMM(BPF_REG_2, 0),
3989 BPF_MOV64_IMM(BPF_REG_3, 0),
3990 BPF_MOV64_IMM(BPF_REG_4, 0),
3991 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003992 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3993 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003994 BPF_MOV64_IMM(BPF_REG_0, 0),
3995 BPF_EXIT_INSN(),
3996 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08003997 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003998 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3999 },
4000 {
Edward Creef65b1842017-08-07 15:27:12 +01004001 "helper access to packet: test20, pkt end as input",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004002 .insns = {
4003 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4004 offsetof(struct __sk_buff, data)),
4005 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4006 offsetof(struct __sk_buff, data_end)),
4007 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4008 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4009 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4010 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4011 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4012 BPF_MOV64_IMM(BPF_REG_2, 4),
4013 BPF_MOV64_IMM(BPF_REG_3, 0),
4014 BPF_MOV64_IMM(BPF_REG_4, 0),
4015 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004016 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4017 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004018 BPF_MOV64_IMM(BPF_REG_0, 0),
4019 BPF_EXIT_INSN(),
4020 },
4021 .result = REJECT,
4022 .errstr = "R1 type=pkt_end expected=fp",
4023 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4024 },
4025 {
Edward Creef65b1842017-08-07 15:27:12 +01004026 "helper access to packet: test21, wrong reg",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004027 .insns = {
4028 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4029 offsetof(struct __sk_buff, data)),
4030 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4031 offsetof(struct __sk_buff, data_end)),
4032 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4033 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4034 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4035 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4036 BPF_MOV64_IMM(BPF_REG_2, 4),
4037 BPF_MOV64_IMM(BPF_REG_3, 0),
4038 BPF_MOV64_IMM(BPF_REG_4, 0),
4039 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004040 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4041 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004042 BPF_MOV64_IMM(BPF_REG_0, 0),
4043 BPF_EXIT_INSN(),
4044 },
4045 .result = REJECT,
4046 .errstr = "invalid access to packet",
4047 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4048 },
Josef Bacik48461132016-09-28 10:54:32 -04004049 {
4050 "valid map access into an array with a constant",
4051 .insns = {
4052 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4053 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4054 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4055 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004056 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4057 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004058 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004059 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4060 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004061 BPF_EXIT_INSN(),
4062 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004063 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004064 .errstr_unpriv = "R0 leaks addr",
4065 .result_unpriv = REJECT,
4066 .result = ACCEPT,
4067 },
4068 {
4069 "valid map access into an array with a register",
4070 .insns = {
4071 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4072 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4073 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4074 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004075 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4076 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004077 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4078 BPF_MOV64_IMM(BPF_REG_1, 4),
4079 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4080 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004081 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4082 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004083 BPF_EXIT_INSN(),
4084 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004085 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004086 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004087 .result_unpriv = REJECT,
4088 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004089 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004090 },
4091 {
4092 "valid map access into an array with a variable",
4093 .insns = {
4094 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4095 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4096 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4097 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004098 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4099 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004100 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4101 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4102 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4103 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4104 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004105 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4106 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004107 BPF_EXIT_INSN(),
4108 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004109 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004110 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004111 .result_unpriv = REJECT,
4112 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004113 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004114 },
4115 {
4116 "valid map access into an array with a signed variable",
4117 .insns = {
4118 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4119 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4120 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4121 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004122 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4123 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004124 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4125 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4126 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4127 BPF_MOV32_IMM(BPF_REG_1, 0),
4128 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4129 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4130 BPF_MOV32_IMM(BPF_REG_1, 0),
4131 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4132 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004133 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4134 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004135 BPF_EXIT_INSN(),
4136 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004137 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004138 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004139 .result_unpriv = REJECT,
4140 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004141 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004142 },
4143 {
4144 "invalid map access into an array with a constant",
4145 .insns = {
4146 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4147 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4148 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4149 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004150 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4151 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004152 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4153 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4154 offsetof(struct test_val, foo)),
4155 BPF_EXIT_INSN(),
4156 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004157 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004158 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
4159 .result = REJECT,
4160 },
4161 {
4162 "invalid map access into an array with a register",
4163 .insns = {
4164 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4165 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4166 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4167 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004168 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4169 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004170 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4171 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4172 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4173 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004174 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4175 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004176 BPF_EXIT_INSN(),
4177 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004178 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004179 .errstr = "R0 min value is outside of the array range",
4180 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004181 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004182 },
4183 {
4184 "invalid map access into an array with a variable",
4185 .insns = {
4186 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4187 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4189 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004190 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4191 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004192 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4193 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4194 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4195 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004196 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4197 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004198 BPF_EXIT_INSN(),
4199 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004200 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004201 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
Josef Bacik48461132016-09-28 10:54:32 -04004202 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004203 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004204 },
4205 {
4206 "invalid map access into an array with no floor check",
4207 .insns = {
4208 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4209 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4211 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004212 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4213 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004214 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
Edward Creef65b1842017-08-07 15:27:12 +01004215 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik48461132016-09-28 10:54:32 -04004216 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4217 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4218 BPF_MOV32_IMM(BPF_REG_1, 0),
4219 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4220 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004221 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4222 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004223 BPF_EXIT_INSN(),
4224 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004225 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004226 .errstr_unpriv = "R0 leaks addr",
4227 .errstr = "R0 unbounded memory access",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004228 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04004229 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004230 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004231 },
4232 {
4233 "invalid map access into an array with a invalid max check",
4234 .insns = {
4235 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4236 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4238 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004239 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4240 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004241 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4242 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4243 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4244 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4245 BPF_MOV32_IMM(BPF_REG_1, 0),
4246 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4247 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004248 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4249 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004250 BPF_EXIT_INSN(),
4251 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004252 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004253 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004254 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004255 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04004256 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004257 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004258 },
4259 {
4260 "invalid map access into an array with a invalid max check",
4261 .insns = {
4262 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4263 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4265 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004266 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4267 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004268 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4269 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4270 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4273 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004274 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4275 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004276 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4277 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004278 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4279 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004280 BPF_EXIT_INSN(),
4281 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004282 .fixup_map2 = { 3, 11 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004283 .errstr = "R0 pointer += pointer",
Josef Bacik48461132016-09-28 10:54:32 -04004284 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004285 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004286 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02004287 {
4288 "multiple registers share map_lookup_elem result",
4289 .insns = {
4290 BPF_MOV64_IMM(BPF_REG_1, 10),
4291 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4292 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4294 BPF_LD_MAP_FD(BPF_REG_1, 0),
4295 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4296 BPF_FUNC_map_lookup_elem),
4297 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4298 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4299 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4300 BPF_EXIT_INSN(),
4301 },
4302 .fixup_map1 = { 4 },
4303 .result = ACCEPT,
4304 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4305 },
4306 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004307 "alu ops on ptr_to_map_value_or_null, 1",
4308 .insns = {
4309 BPF_MOV64_IMM(BPF_REG_1, 10),
4310 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4311 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4312 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4313 BPF_LD_MAP_FD(BPF_REG_1, 0),
4314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4315 BPF_FUNC_map_lookup_elem),
4316 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4317 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
4318 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
4319 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4320 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4321 BPF_EXIT_INSN(),
4322 },
4323 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004324 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004325 .result = REJECT,
4326 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4327 },
4328 {
4329 "alu ops on ptr_to_map_value_or_null, 2",
4330 .insns = {
4331 BPF_MOV64_IMM(BPF_REG_1, 10),
4332 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4333 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4335 BPF_LD_MAP_FD(BPF_REG_1, 0),
4336 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4337 BPF_FUNC_map_lookup_elem),
4338 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4339 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
4340 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4341 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4342 BPF_EXIT_INSN(),
4343 },
4344 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004345 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004346 .result = REJECT,
4347 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4348 },
4349 {
4350 "alu ops on ptr_to_map_value_or_null, 3",
4351 .insns = {
4352 BPF_MOV64_IMM(BPF_REG_1, 10),
4353 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4354 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4356 BPF_LD_MAP_FD(BPF_REG_1, 0),
4357 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4358 BPF_FUNC_map_lookup_elem),
4359 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4360 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
4361 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4362 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4363 BPF_EXIT_INSN(),
4364 },
4365 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004366 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004367 .result = REJECT,
4368 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4369 },
4370 {
Thomas Graf57a09bf2016-10-18 19:51:19 +02004371 "invalid memory access with multiple map_lookup_elem calls",
4372 .insns = {
4373 BPF_MOV64_IMM(BPF_REG_1, 10),
4374 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4375 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4376 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4377 BPF_LD_MAP_FD(BPF_REG_1, 0),
4378 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4379 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4380 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4381 BPF_FUNC_map_lookup_elem),
4382 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4383 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4384 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4385 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4386 BPF_FUNC_map_lookup_elem),
4387 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4388 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4389 BPF_EXIT_INSN(),
4390 },
4391 .fixup_map1 = { 4 },
4392 .result = REJECT,
4393 .errstr = "R4 !read_ok",
4394 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4395 },
4396 {
4397 "valid indirect map_lookup_elem access with 2nd lookup in branch",
4398 .insns = {
4399 BPF_MOV64_IMM(BPF_REG_1, 10),
4400 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4401 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4403 BPF_LD_MAP_FD(BPF_REG_1, 0),
4404 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4405 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4406 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4407 BPF_FUNC_map_lookup_elem),
4408 BPF_MOV64_IMM(BPF_REG_2, 10),
4409 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
4410 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4411 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4412 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4413 BPF_FUNC_map_lookup_elem),
4414 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4415 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4416 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4417 BPF_EXIT_INSN(),
4418 },
4419 .fixup_map1 = { 4 },
4420 .result = ACCEPT,
4421 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4422 },
Josef Bacike9548902016-11-29 12:35:19 -05004423 {
4424 "invalid map access from else condition",
4425 .insns = {
4426 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4427 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4428 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4429 BPF_LD_MAP_FD(BPF_REG_1, 0),
4430 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
4431 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4432 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4433 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
4434 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4435 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4436 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4437 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
4438 BPF_EXIT_INSN(),
4439 },
4440 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004441 .errstr = "R0 unbounded memory access",
Josef Bacike9548902016-11-29 12:35:19 -05004442 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01004443 .errstr_unpriv = "R0 leaks addr",
Josef Bacike9548902016-11-29 12:35:19 -05004444 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004445 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacike9548902016-11-29 12:35:19 -05004446 },
Gianluca Borello3c8397442016-12-03 12:31:33 -08004447 {
4448 "constant register |= constant should keep constant type",
4449 .insns = {
4450 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4451 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4452 BPF_MOV64_IMM(BPF_REG_2, 34),
4453 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
4454 BPF_MOV64_IMM(BPF_REG_3, 0),
4455 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4456 BPF_EXIT_INSN(),
4457 },
4458 .result = ACCEPT,
4459 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4460 },
4461 {
4462 "constant register |= constant should not bypass stack boundary checks",
4463 .insns = {
4464 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4465 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4466 BPF_MOV64_IMM(BPF_REG_2, 34),
4467 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
4468 BPF_MOV64_IMM(BPF_REG_3, 0),
4469 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4470 BPF_EXIT_INSN(),
4471 },
4472 .errstr = "invalid stack type R1 off=-48 access_size=58",
4473 .result = REJECT,
4474 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4475 },
4476 {
4477 "constant register |= constant register should keep constant type",
4478 .insns = {
4479 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4481 BPF_MOV64_IMM(BPF_REG_2, 34),
4482 BPF_MOV64_IMM(BPF_REG_4, 13),
4483 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4484 BPF_MOV64_IMM(BPF_REG_3, 0),
4485 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4486 BPF_EXIT_INSN(),
4487 },
4488 .result = ACCEPT,
4489 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4490 },
4491 {
4492 "constant register |= constant register should not bypass stack boundary checks",
4493 .insns = {
4494 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4495 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4496 BPF_MOV64_IMM(BPF_REG_2, 34),
4497 BPF_MOV64_IMM(BPF_REG_4, 24),
4498 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4499 BPF_MOV64_IMM(BPF_REG_3, 0),
4500 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4501 BPF_EXIT_INSN(),
4502 },
4503 .errstr = "invalid stack type R1 off=-48 access_size=58",
4504 .result = REJECT,
4505 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4506 },
Thomas Graf3f731d82016-12-05 10:30:52 +01004507 {
4508 "invalid direct packet write for LWT_IN",
4509 .insns = {
4510 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4511 offsetof(struct __sk_buff, data)),
4512 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4513 offsetof(struct __sk_buff, data_end)),
4514 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4515 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4516 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4517 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4518 BPF_MOV64_IMM(BPF_REG_0, 0),
4519 BPF_EXIT_INSN(),
4520 },
4521 .errstr = "cannot write into packet",
4522 .result = REJECT,
4523 .prog_type = BPF_PROG_TYPE_LWT_IN,
4524 },
4525 {
4526 "invalid direct packet write for LWT_OUT",
4527 .insns = {
4528 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4529 offsetof(struct __sk_buff, data)),
4530 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4531 offsetof(struct __sk_buff, data_end)),
4532 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4533 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4534 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4535 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4536 BPF_MOV64_IMM(BPF_REG_0, 0),
4537 BPF_EXIT_INSN(),
4538 },
4539 .errstr = "cannot write into packet",
4540 .result = REJECT,
4541 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4542 },
4543 {
4544 "direct packet write for LWT_XMIT",
4545 .insns = {
4546 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4547 offsetof(struct __sk_buff, data)),
4548 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4549 offsetof(struct __sk_buff, data_end)),
4550 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4551 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4552 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4553 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4554 BPF_MOV64_IMM(BPF_REG_0, 0),
4555 BPF_EXIT_INSN(),
4556 },
4557 .result = ACCEPT,
4558 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4559 },
4560 {
4561 "direct packet read for LWT_IN",
4562 .insns = {
4563 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4564 offsetof(struct __sk_buff, data)),
4565 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4566 offsetof(struct __sk_buff, data_end)),
4567 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4569 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4570 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4571 BPF_MOV64_IMM(BPF_REG_0, 0),
4572 BPF_EXIT_INSN(),
4573 },
4574 .result = ACCEPT,
4575 .prog_type = BPF_PROG_TYPE_LWT_IN,
4576 },
4577 {
4578 "direct packet read for LWT_OUT",
4579 .insns = {
4580 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4581 offsetof(struct __sk_buff, data)),
4582 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4583 offsetof(struct __sk_buff, data_end)),
4584 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4585 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4586 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4587 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4588 BPF_MOV64_IMM(BPF_REG_0, 0),
4589 BPF_EXIT_INSN(),
4590 },
4591 .result = ACCEPT,
4592 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4593 },
4594 {
4595 "direct packet read for LWT_XMIT",
4596 .insns = {
4597 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4598 offsetof(struct __sk_buff, data)),
4599 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4600 offsetof(struct __sk_buff, data_end)),
4601 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4602 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4603 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4604 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4605 BPF_MOV64_IMM(BPF_REG_0, 0),
4606 BPF_EXIT_INSN(),
4607 },
4608 .result = ACCEPT,
4609 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4610 },
4611 {
Alexei Starovoitovb1977682017-03-24 15:57:33 -07004612 "overlapping checks for direct packet access",
4613 .insns = {
4614 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4615 offsetof(struct __sk_buff, data)),
4616 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4617 offsetof(struct __sk_buff, data_end)),
4618 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4619 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4620 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
4621 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4622 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
4623 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
4624 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
4625 BPF_MOV64_IMM(BPF_REG_0, 0),
4626 BPF_EXIT_INSN(),
4627 },
4628 .result = ACCEPT,
4629 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4630 },
4631 {
Thomas Graf3f731d82016-12-05 10:30:52 +01004632 "invalid access of tc_classid for LWT_IN",
4633 .insns = {
4634 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4635 offsetof(struct __sk_buff, tc_classid)),
4636 BPF_EXIT_INSN(),
4637 },
4638 .result = REJECT,
4639 .errstr = "invalid bpf_context access",
4640 },
4641 {
4642 "invalid access of tc_classid for LWT_OUT",
4643 .insns = {
4644 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4645 offsetof(struct __sk_buff, tc_classid)),
4646 BPF_EXIT_INSN(),
4647 },
4648 .result = REJECT,
4649 .errstr = "invalid bpf_context access",
4650 },
4651 {
4652 "invalid access of tc_classid for LWT_XMIT",
4653 .insns = {
4654 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4655 offsetof(struct __sk_buff, tc_classid)),
4656 BPF_EXIT_INSN(),
4657 },
4658 .result = REJECT,
4659 .errstr = "invalid bpf_context access",
4660 },
Gianluca Borello57225692017-01-09 10:19:47 -08004661 {
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004662 "leak pointer into ctx 1",
4663 .insns = {
4664 BPF_MOV64_IMM(BPF_REG_0, 0),
4665 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4666 offsetof(struct __sk_buff, cb[0])),
4667 BPF_LD_MAP_FD(BPF_REG_2, 0),
4668 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
4669 offsetof(struct __sk_buff, cb[0])),
4670 BPF_EXIT_INSN(),
4671 },
4672 .fixup_map1 = { 2 },
4673 .errstr_unpriv = "R2 leaks addr into mem",
4674 .result_unpriv = REJECT,
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01004675 .result = REJECT,
4676 .errstr = "BPF_XADD stores into R1 context is not allowed",
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004677 },
4678 {
4679 "leak pointer into ctx 2",
4680 .insns = {
4681 BPF_MOV64_IMM(BPF_REG_0, 0),
4682 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4683 offsetof(struct __sk_buff, cb[0])),
4684 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
4685 offsetof(struct __sk_buff, cb[0])),
4686 BPF_EXIT_INSN(),
4687 },
4688 .errstr_unpriv = "R10 leaks addr into mem",
4689 .result_unpriv = REJECT,
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01004690 .result = REJECT,
4691 .errstr = "BPF_XADD stores into R1 context is not allowed",
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004692 },
4693 {
4694 "leak pointer into ctx 3",
4695 .insns = {
4696 BPF_MOV64_IMM(BPF_REG_0, 0),
4697 BPF_LD_MAP_FD(BPF_REG_2, 0),
4698 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
4699 offsetof(struct __sk_buff, cb[0])),
4700 BPF_EXIT_INSN(),
4701 },
4702 .fixup_map1 = { 1 },
4703 .errstr_unpriv = "R2 leaks addr into ctx",
4704 .result_unpriv = REJECT,
4705 .result = ACCEPT,
4706 },
4707 {
4708 "leak pointer into map val",
4709 .insns = {
4710 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4711 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4712 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4714 BPF_LD_MAP_FD(BPF_REG_1, 0),
4715 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4716 BPF_FUNC_map_lookup_elem),
4717 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4718 BPF_MOV64_IMM(BPF_REG_3, 0),
4719 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
4720 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
4721 BPF_MOV64_IMM(BPF_REG_0, 0),
4722 BPF_EXIT_INSN(),
4723 },
4724 .fixup_map1 = { 4 },
4725 .errstr_unpriv = "R6 leaks addr into mem",
4726 .result_unpriv = REJECT,
4727 .result = ACCEPT,
4728 },
4729 {
Gianluca Borello57225692017-01-09 10:19:47 -08004730 "helper access to map: full range",
4731 .insns = {
4732 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4733 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4734 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4735 BPF_LD_MAP_FD(BPF_REG_1, 0),
4736 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4737 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4738 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4739 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4740 BPF_MOV64_IMM(BPF_REG_3, 0),
4741 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4742 BPF_EXIT_INSN(),
4743 },
4744 .fixup_map2 = { 3 },
4745 .result = ACCEPT,
4746 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4747 },
4748 {
4749 "helper access to map: partial range",
4750 .insns = {
4751 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4752 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4753 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4754 BPF_LD_MAP_FD(BPF_REG_1, 0),
4755 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4756 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4757 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4758 BPF_MOV64_IMM(BPF_REG_2, 8),
4759 BPF_MOV64_IMM(BPF_REG_3, 0),
4760 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4761 BPF_EXIT_INSN(),
4762 },
4763 .fixup_map2 = { 3 },
4764 .result = ACCEPT,
4765 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4766 },
4767 {
4768 "helper access to map: empty range",
4769 .insns = {
4770 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4771 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4772 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4773 BPF_LD_MAP_FD(BPF_REG_1, 0),
4774 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4776 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4777 BPF_MOV64_IMM(BPF_REG_2, 0),
4778 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08004779 BPF_EXIT_INSN(),
4780 },
4781 .fixup_map2 = { 3 },
4782 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
4783 .result = REJECT,
4784 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4785 },
4786 {
4787 "helper access to map: out-of-bound range",
4788 .insns = {
4789 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4790 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4791 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4792 BPF_LD_MAP_FD(BPF_REG_1, 0),
4793 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4794 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4795 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4796 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
4797 BPF_MOV64_IMM(BPF_REG_3, 0),
4798 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4799 BPF_EXIT_INSN(),
4800 },
4801 .fixup_map2 = { 3 },
4802 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
4803 .result = REJECT,
4804 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4805 },
4806 {
4807 "helper access to map: negative range",
4808 .insns = {
4809 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4810 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4811 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4812 BPF_LD_MAP_FD(BPF_REG_1, 0),
4813 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4814 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4815 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4816 BPF_MOV64_IMM(BPF_REG_2, -8),
4817 BPF_MOV64_IMM(BPF_REG_3, 0),
4818 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4819 BPF_EXIT_INSN(),
4820 },
4821 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004822 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004823 .result = REJECT,
4824 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4825 },
4826 {
4827 "helper access to adjusted map (via const imm): full range",
4828 .insns = {
4829 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4830 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4831 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4832 BPF_LD_MAP_FD(BPF_REG_1, 0),
4833 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4834 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4835 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4836 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4837 offsetof(struct test_val, foo)),
4838 BPF_MOV64_IMM(BPF_REG_2,
4839 sizeof(struct test_val) -
4840 offsetof(struct test_val, foo)),
4841 BPF_MOV64_IMM(BPF_REG_3, 0),
4842 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4843 BPF_EXIT_INSN(),
4844 },
4845 .fixup_map2 = { 3 },
4846 .result = ACCEPT,
4847 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4848 },
4849 {
4850 "helper access to adjusted map (via const imm): partial range",
4851 .insns = {
4852 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4854 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4855 BPF_LD_MAP_FD(BPF_REG_1, 0),
4856 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4857 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4858 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4860 offsetof(struct test_val, foo)),
4861 BPF_MOV64_IMM(BPF_REG_2, 8),
4862 BPF_MOV64_IMM(BPF_REG_3, 0),
4863 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4864 BPF_EXIT_INSN(),
4865 },
4866 .fixup_map2 = { 3 },
4867 .result = ACCEPT,
4868 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4869 },
4870 {
4871 "helper access to adjusted map (via const imm): empty range",
4872 .insns = {
4873 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4874 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4875 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4876 BPF_LD_MAP_FD(BPF_REG_1, 0),
4877 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004878 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Gianluca Borello57225692017-01-09 10:19:47 -08004879 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4880 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4881 offsetof(struct test_val, foo)),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004882 BPF_MOV64_IMM(BPF_REG_2, 0),
4883 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08004884 BPF_EXIT_INSN(),
4885 },
4886 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004887 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
Gianluca Borello57225692017-01-09 10:19:47 -08004888 .result = REJECT,
4889 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4890 },
4891 {
4892 "helper access to adjusted map (via const imm): out-of-bound range",
4893 .insns = {
4894 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4895 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4896 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4897 BPF_LD_MAP_FD(BPF_REG_1, 0),
4898 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4899 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4900 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4901 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4902 offsetof(struct test_val, foo)),
4903 BPF_MOV64_IMM(BPF_REG_2,
4904 sizeof(struct test_val) -
4905 offsetof(struct test_val, foo) + 8),
4906 BPF_MOV64_IMM(BPF_REG_3, 0),
4907 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4908 BPF_EXIT_INSN(),
4909 },
4910 .fixup_map2 = { 3 },
4911 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4912 .result = REJECT,
4913 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4914 },
4915 {
4916 "helper access to adjusted map (via const imm): negative range (> adjustment)",
4917 .insns = {
4918 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4919 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4920 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4921 BPF_LD_MAP_FD(BPF_REG_1, 0),
4922 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4923 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4924 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4925 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4926 offsetof(struct test_val, foo)),
4927 BPF_MOV64_IMM(BPF_REG_2, -8),
4928 BPF_MOV64_IMM(BPF_REG_3, 0),
4929 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4930 BPF_EXIT_INSN(),
4931 },
4932 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004933 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004934 .result = REJECT,
4935 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4936 },
4937 {
4938 "helper access to adjusted map (via const imm): negative range (< adjustment)",
4939 .insns = {
4940 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4941 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4942 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4943 BPF_LD_MAP_FD(BPF_REG_1, 0),
4944 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4945 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4946 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4947 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4948 offsetof(struct test_val, foo)),
4949 BPF_MOV64_IMM(BPF_REG_2, -1),
4950 BPF_MOV64_IMM(BPF_REG_3, 0),
4951 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4952 BPF_EXIT_INSN(),
4953 },
4954 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004955 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004956 .result = REJECT,
4957 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4958 },
4959 {
4960 "helper access to adjusted map (via const reg): full range",
4961 .insns = {
4962 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4964 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4965 BPF_LD_MAP_FD(BPF_REG_1, 0),
4966 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4967 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4968 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4969 BPF_MOV64_IMM(BPF_REG_3,
4970 offsetof(struct test_val, foo)),
4971 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4972 BPF_MOV64_IMM(BPF_REG_2,
4973 sizeof(struct test_val) -
4974 offsetof(struct test_val, foo)),
4975 BPF_MOV64_IMM(BPF_REG_3, 0),
4976 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4977 BPF_EXIT_INSN(),
4978 },
4979 .fixup_map2 = { 3 },
4980 .result = ACCEPT,
4981 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4982 },
4983 {
4984 "helper access to adjusted map (via const reg): partial range",
4985 .insns = {
4986 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4987 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4988 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4989 BPF_LD_MAP_FD(BPF_REG_1, 0),
4990 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4991 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4992 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4993 BPF_MOV64_IMM(BPF_REG_3,
4994 offsetof(struct test_val, foo)),
4995 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4996 BPF_MOV64_IMM(BPF_REG_2, 8),
4997 BPF_MOV64_IMM(BPF_REG_3, 0),
4998 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4999 BPF_EXIT_INSN(),
5000 },
5001 .fixup_map2 = { 3 },
5002 .result = ACCEPT,
5003 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5004 },
5005 {
5006 "helper access to adjusted map (via const reg): empty range",
5007 .insns = {
5008 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5009 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5010 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5011 BPF_LD_MAP_FD(BPF_REG_1, 0),
5012 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005013 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
Gianluca Borello57225692017-01-09 10:19:47 -08005014 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5015 BPF_MOV64_IMM(BPF_REG_3, 0),
5016 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005017 BPF_MOV64_IMM(BPF_REG_2, 0),
5018 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005019 BPF_EXIT_INSN(),
5020 },
5021 .fixup_map2 = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005022 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08005023 .result = REJECT,
5024 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5025 },
5026 {
5027 "helper access to adjusted map (via const reg): out-of-bound range",
5028 .insns = {
5029 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5031 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5032 BPF_LD_MAP_FD(BPF_REG_1, 0),
5033 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5034 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5035 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5036 BPF_MOV64_IMM(BPF_REG_3,
5037 offsetof(struct test_val, foo)),
5038 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5039 BPF_MOV64_IMM(BPF_REG_2,
5040 sizeof(struct test_val) -
5041 offsetof(struct test_val, foo) + 8),
5042 BPF_MOV64_IMM(BPF_REG_3, 0),
5043 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5044 BPF_EXIT_INSN(),
5045 },
5046 .fixup_map2 = { 3 },
5047 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
5048 .result = REJECT,
5049 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5050 },
5051 {
5052 "helper access to adjusted map (via const reg): negative range (> adjustment)",
5053 .insns = {
5054 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5055 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5056 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5057 BPF_LD_MAP_FD(BPF_REG_1, 0),
5058 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5059 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5060 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5061 BPF_MOV64_IMM(BPF_REG_3,
5062 offsetof(struct test_val, foo)),
5063 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5064 BPF_MOV64_IMM(BPF_REG_2, -8),
5065 BPF_MOV64_IMM(BPF_REG_3, 0),
5066 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5067 BPF_EXIT_INSN(),
5068 },
5069 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005070 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005071 .result = REJECT,
5072 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5073 },
5074 {
5075 "helper access to adjusted map (via const reg): negative range (< adjustment)",
5076 .insns = {
5077 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5078 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5079 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5080 BPF_LD_MAP_FD(BPF_REG_1, 0),
5081 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5082 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5083 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5084 BPF_MOV64_IMM(BPF_REG_3,
5085 offsetof(struct test_val, foo)),
5086 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5087 BPF_MOV64_IMM(BPF_REG_2, -1),
5088 BPF_MOV64_IMM(BPF_REG_3, 0),
5089 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5090 BPF_EXIT_INSN(),
5091 },
5092 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005093 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005094 .result = REJECT,
5095 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5096 },
5097 {
5098 "helper access to adjusted map (via variable): full range",
5099 .insns = {
5100 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5101 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5102 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5103 BPF_LD_MAP_FD(BPF_REG_1, 0),
5104 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5105 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5106 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5107 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5108 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5109 offsetof(struct test_val, foo), 4),
5110 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5111 BPF_MOV64_IMM(BPF_REG_2,
5112 sizeof(struct test_val) -
5113 offsetof(struct test_val, foo)),
5114 BPF_MOV64_IMM(BPF_REG_3, 0),
5115 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5116 BPF_EXIT_INSN(),
5117 },
5118 .fixup_map2 = { 3 },
5119 .result = ACCEPT,
5120 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5121 },
5122 {
5123 "helper access to adjusted map (via variable): partial range",
5124 .insns = {
5125 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5126 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5127 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5128 BPF_LD_MAP_FD(BPF_REG_1, 0),
5129 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5130 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5131 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5132 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5133 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5134 offsetof(struct test_val, foo), 4),
5135 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5136 BPF_MOV64_IMM(BPF_REG_2, 8),
5137 BPF_MOV64_IMM(BPF_REG_3, 0),
5138 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5139 BPF_EXIT_INSN(),
5140 },
5141 .fixup_map2 = { 3 },
5142 .result = ACCEPT,
5143 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5144 },
5145 {
5146 "helper access to adjusted map (via variable): empty range",
5147 .insns = {
5148 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5149 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5150 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5151 BPF_LD_MAP_FD(BPF_REG_1, 0),
5152 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
Gianluca Borello57225692017-01-09 10:19:47 -08005154 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5155 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5156 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005157 offsetof(struct test_val, foo), 3),
Gianluca Borello57225692017-01-09 10:19:47 -08005158 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005159 BPF_MOV64_IMM(BPF_REG_2, 0),
5160 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005161 BPF_EXIT_INSN(),
5162 },
5163 .fixup_map2 = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005164 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08005165 .result = REJECT,
5166 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5167 },
5168 {
5169 "helper access to adjusted map (via variable): no max check",
5170 .insns = {
5171 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5173 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5174 BPF_LD_MAP_FD(BPF_REG_1, 0),
5175 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5176 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5177 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5178 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5179 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Edward Creef65b1842017-08-07 15:27:12 +01005180 BPF_MOV64_IMM(BPF_REG_2, 1),
Gianluca Borello57225692017-01-09 10:19:47 -08005181 BPF_MOV64_IMM(BPF_REG_3, 0),
5182 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5183 BPF_EXIT_INSN(),
5184 },
5185 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005186 .errstr = "R1 unbounded memory access",
Gianluca Borello57225692017-01-09 10:19:47 -08005187 .result = REJECT,
5188 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5189 },
5190 {
5191 "helper access to adjusted map (via variable): wrong max check",
5192 .insns = {
5193 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5195 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5196 BPF_LD_MAP_FD(BPF_REG_1, 0),
5197 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5198 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5199 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5200 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5201 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5202 offsetof(struct test_val, foo), 4),
5203 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5204 BPF_MOV64_IMM(BPF_REG_2,
5205 sizeof(struct test_val) -
5206 offsetof(struct test_val, foo) + 1),
5207 BPF_MOV64_IMM(BPF_REG_3, 0),
5208 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5209 BPF_EXIT_INSN(),
5210 },
5211 .fixup_map2 = { 3 },
5212 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
5213 .result = REJECT,
5214 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5215 },
Gianluca Borellof0318d02017-01-09 10:19:48 -08005216 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02005217 "helper access to map: bounds check using <, good access",
5218 .insns = {
5219 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5221 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5222 BPF_LD_MAP_FD(BPF_REG_1, 0),
5223 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5224 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5225 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5226 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5227 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
5228 BPF_MOV64_IMM(BPF_REG_0, 0),
5229 BPF_EXIT_INSN(),
5230 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5231 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5232 BPF_MOV64_IMM(BPF_REG_0, 0),
5233 BPF_EXIT_INSN(),
5234 },
5235 .fixup_map2 = { 3 },
5236 .result = ACCEPT,
5237 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5238 },
5239 {
5240 "helper access to map: bounds check using <, bad access",
5241 .insns = {
5242 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5243 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5244 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5245 BPF_LD_MAP_FD(BPF_REG_1, 0),
5246 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5247 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5248 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5249 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5250 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
5251 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5252 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5253 BPF_MOV64_IMM(BPF_REG_0, 0),
5254 BPF_EXIT_INSN(),
5255 BPF_MOV64_IMM(BPF_REG_0, 0),
5256 BPF_EXIT_INSN(),
5257 },
5258 .fixup_map2 = { 3 },
5259 .result = REJECT,
5260 .errstr = "R1 unbounded memory access",
5261 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5262 },
5263 {
5264 "helper access to map: bounds check using <=, good access",
5265 .insns = {
5266 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5267 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5268 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5269 BPF_LD_MAP_FD(BPF_REG_1, 0),
5270 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5271 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5272 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5273 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5274 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
5275 BPF_MOV64_IMM(BPF_REG_0, 0),
5276 BPF_EXIT_INSN(),
5277 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5278 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5279 BPF_MOV64_IMM(BPF_REG_0, 0),
5280 BPF_EXIT_INSN(),
5281 },
5282 .fixup_map2 = { 3 },
5283 .result = ACCEPT,
5284 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5285 },
5286 {
5287 "helper access to map: bounds check using <=, bad access",
5288 .insns = {
5289 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5290 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5291 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5292 BPF_LD_MAP_FD(BPF_REG_1, 0),
5293 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5294 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5295 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5296 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5297 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
5298 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5299 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5300 BPF_MOV64_IMM(BPF_REG_0, 0),
5301 BPF_EXIT_INSN(),
5302 BPF_MOV64_IMM(BPF_REG_0, 0),
5303 BPF_EXIT_INSN(),
5304 },
5305 .fixup_map2 = { 3 },
5306 .result = REJECT,
5307 .errstr = "R1 unbounded memory access",
5308 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5309 },
5310 {
5311 "helper access to map: bounds check using s<, good access",
5312 .insns = {
5313 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5314 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5315 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5316 BPF_LD_MAP_FD(BPF_REG_1, 0),
5317 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5318 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5319 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5320 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5321 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5322 BPF_MOV64_IMM(BPF_REG_0, 0),
5323 BPF_EXIT_INSN(),
5324 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
5325 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5326 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5327 BPF_MOV64_IMM(BPF_REG_0, 0),
5328 BPF_EXIT_INSN(),
5329 },
5330 .fixup_map2 = { 3 },
5331 .result = ACCEPT,
5332 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5333 },
5334 {
5335 "helper access to map: bounds check using s<, good access 2",
5336 .insns = {
5337 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5339 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5340 BPF_LD_MAP_FD(BPF_REG_1, 0),
5341 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5342 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5343 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5344 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5345 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5346 BPF_MOV64_IMM(BPF_REG_0, 0),
5347 BPF_EXIT_INSN(),
5348 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5349 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5350 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5351 BPF_MOV64_IMM(BPF_REG_0, 0),
5352 BPF_EXIT_INSN(),
5353 },
5354 .fixup_map2 = { 3 },
5355 .result = ACCEPT,
5356 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5357 },
5358 {
5359 "helper access to map: bounds check using s<, bad access",
5360 .insns = {
5361 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5362 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5363 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5364 BPF_LD_MAP_FD(BPF_REG_1, 0),
5365 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5366 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5367 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5368 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5369 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5370 BPF_MOV64_IMM(BPF_REG_0, 0),
5371 BPF_EXIT_INSN(),
5372 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5373 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5374 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5375 BPF_MOV64_IMM(BPF_REG_0, 0),
5376 BPF_EXIT_INSN(),
5377 },
5378 .fixup_map2 = { 3 },
5379 .result = REJECT,
5380 .errstr = "R1 min value is negative",
5381 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5382 },
5383 {
5384 "helper access to map: bounds check using s<=, good access",
5385 .insns = {
5386 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5388 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5389 BPF_LD_MAP_FD(BPF_REG_1, 0),
5390 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5391 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5392 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5393 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5394 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5395 BPF_MOV64_IMM(BPF_REG_0, 0),
5396 BPF_EXIT_INSN(),
5397 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
5398 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5399 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5400 BPF_MOV64_IMM(BPF_REG_0, 0),
5401 BPF_EXIT_INSN(),
5402 },
5403 .fixup_map2 = { 3 },
5404 .result = ACCEPT,
5405 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5406 },
5407 {
5408 "helper access to map: bounds check using s<=, good access 2",
5409 .insns = {
5410 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5411 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5412 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5413 BPF_LD_MAP_FD(BPF_REG_1, 0),
5414 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5415 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5416 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5417 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5418 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5419 BPF_MOV64_IMM(BPF_REG_0, 0),
5420 BPF_EXIT_INSN(),
5421 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5422 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5423 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5424 BPF_MOV64_IMM(BPF_REG_0, 0),
5425 BPF_EXIT_INSN(),
5426 },
5427 .fixup_map2 = { 3 },
5428 .result = ACCEPT,
5429 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5430 },
5431 {
5432 "helper access to map: bounds check using s<=, bad access",
5433 .insns = {
5434 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5436 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5437 BPF_LD_MAP_FD(BPF_REG_1, 0),
5438 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5439 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5440 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5441 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5442 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5443 BPF_MOV64_IMM(BPF_REG_0, 0),
5444 BPF_EXIT_INSN(),
5445 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5446 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5447 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5448 BPF_MOV64_IMM(BPF_REG_0, 0),
5449 BPF_EXIT_INSN(),
5450 },
5451 .fixup_map2 = { 3 },
5452 .result = REJECT,
5453 .errstr = "R1 min value is negative",
5454 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5455 },
5456 {
Gianluca Borellof0318d02017-01-09 10:19:48 -08005457 "map element value is preserved across register spilling",
5458 .insns = {
5459 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5460 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5461 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5462 BPF_LD_MAP_FD(BPF_REG_1, 0),
5463 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5464 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5465 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5466 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5467 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5468 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5469 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5470 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5471 BPF_EXIT_INSN(),
5472 },
5473 .fixup_map2 = { 3 },
5474 .errstr_unpriv = "R0 leaks addr",
5475 .result = ACCEPT,
5476 .result_unpriv = REJECT,
5477 },
5478 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005479 "map element value or null is marked on register spilling",
5480 .insns = {
5481 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5482 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5483 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5484 BPF_LD_MAP_FD(BPF_REG_1, 0),
5485 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5486 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5487 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
5488 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5489 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5490 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5491 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5492 BPF_EXIT_INSN(),
5493 },
5494 .fixup_map2 = { 3 },
5495 .errstr_unpriv = "R0 leaks addr",
5496 .result = ACCEPT,
5497 .result_unpriv = REJECT,
5498 },
5499 {
5500 "map element value store of cleared call register",
5501 .insns = {
5502 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5503 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5504 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5505 BPF_LD_MAP_FD(BPF_REG_1, 0),
5506 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5507 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5508 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
5509 BPF_EXIT_INSN(),
5510 },
5511 .fixup_map2 = { 3 },
5512 .errstr_unpriv = "R1 !read_ok",
5513 .errstr = "R1 !read_ok",
5514 .result = REJECT,
5515 .result_unpriv = REJECT,
5516 },
5517 {
5518 "map element value with unaligned store",
5519 .insns = {
5520 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5522 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5523 BPF_LD_MAP_FD(BPF_REG_1, 0),
5524 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5525 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
5526 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5527 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5528 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
5529 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
5530 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5531 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
5532 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
5533 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
5534 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
5535 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
5536 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
5537 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
5538 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
5539 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
5540 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
5541 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
5542 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
5543 BPF_EXIT_INSN(),
5544 },
5545 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005546 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005547 .result = ACCEPT,
5548 .result_unpriv = REJECT,
5549 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5550 },
5551 {
5552 "map element value with unaligned load",
5553 .insns = {
5554 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5555 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5556 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5557 BPF_LD_MAP_FD(BPF_REG_1, 0),
5558 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5559 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5560 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5561 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
5562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5563 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5564 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
5565 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5566 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
5567 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
5568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
5569 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5570 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
5571 BPF_EXIT_INSN(),
5572 },
5573 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005574 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005575 .result = ACCEPT,
5576 .result_unpriv = REJECT,
5577 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5578 },
5579 {
5580 "map element value illegal alu op, 1",
5581 .insns = {
5582 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5583 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5584 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5585 BPF_LD_MAP_FD(BPF_REG_1, 0),
5586 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5587 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5588 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
5589 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5590 BPF_EXIT_INSN(),
5591 },
5592 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005593 .errstr = "R0 bitwise operator &= on pointer",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005594 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005595 },
5596 {
5597 "map element value illegal alu op, 2",
5598 .insns = {
5599 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5600 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5601 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5602 BPF_LD_MAP_FD(BPF_REG_1, 0),
5603 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5604 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5605 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
5606 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5607 BPF_EXIT_INSN(),
5608 },
5609 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005610 .errstr = "R0 32-bit pointer arithmetic prohibited",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005611 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005612 },
5613 {
5614 "map element value illegal alu op, 3",
5615 .insns = {
5616 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5617 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5618 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5619 BPF_LD_MAP_FD(BPF_REG_1, 0),
5620 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5622 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
5623 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5624 BPF_EXIT_INSN(),
5625 },
5626 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005627 .errstr = "R0 pointer arithmetic with /= operator",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005628 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005629 },
5630 {
5631 "map element value illegal alu op, 4",
5632 .insns = {
5633 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5634 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5635 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5636 BPF_LD_MAP_FD(BPF_REG_1, 0),
5637 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5638 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5639 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
5640 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5641 BPF_EXIT_INSN(),
5642 },
5643 .fixup_map2 = { 3 },
5644 .errstr_unpriv = "R0 pointer arithmetic prohibited",
5645 .errstr = "invalid mem access 'inv'",
5646 .result = REJECT,
5647 .result_unpriv = REJECT,
5648 },
5649 {
5650 "map element value illegal alu op, 5",
5651 .insns = {
5652 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5653 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5654 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5655 BPF_LD_MAP_FD(BPF_REG_1, 0),
5656 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5657 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5658 BPF_MOV64_IMM(BPF_REG_3, 4096),
5659 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5661 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5662 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
5663 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
5664 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5665 BPF_EXIT_INSN(),
5666 },
5667 .fixup_map2 = { 3 },
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005668 .errstr = "R0 invalid mem access 'inv'",
5669 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005670 },
5671 {
5672 "map element value is preserved across register spilling",
Gianluca Borellof0318d02017-01-09 10:19:48 -08005673 .insns = {
5674 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5675 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5676 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5677 BPF_LD_MAP_FD(BPF_REG_1, 0),
5678 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5679 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5680 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
5681 offsetof(struct test_val, foo)),
5682 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5683 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5685 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5686 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5687 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5688 BPF_EXIT_INSN(),
5689 },
5690 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005691 .errstr_unpriv = "R0 leaks addr",
Gianluca Borellof0318d02017-01-09 10:19:48 -08005692 .result = ACCEPT,
5693 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005694 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Gianluca Borellof0318d02017-01-09 10:19:48 -08005695 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08005696 {
5697 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5698 .insns = {
5699 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5700 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5701 BPF_MOV64_IMM(BPF_REG_0, 0),
5702 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5703 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5704 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5705 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5706 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5707 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5708 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5709 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5710 BPF_MOV64_IMM(BPF_REG_2, 16),
5711 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5712 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5713 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5714 BPF_MOV64_IMM(BPF_REG_4, 0),
5715 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5716 BPF_MOV64_IMM(BPF_REG_3, 0),
5717 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5718 BPF_MOV64_IMM(BPF_REG_0, 0),
5719 BPF_EXIT_INSN(),
5720 },
5721 .result = ACCEPT,
5722 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5723 },
5724 {
5725 "helper access to variable memory: stack, bitwise AND, zero included",
5726 .insns = {
5727 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5728 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5729 BPF_MOV64_IMM(BPF_REG_2, 16),
5730 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5731 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5732 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5733 BPF_MOV64_IMM(BPF_REG_3, 0),
5734 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5735 BPF_EXIT_INSN(),
5736 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08005737 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005738 .result = REJECT,
5739 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5740 },
5741 {
5742 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5743 .insns = {
5744 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5745 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5746 BPF_MOV64_IMM(BPF_REG_2, 16),
5747 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5748 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5749 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
5750 BPF_MOV64_IMM(BPF_REG_4, 0),
5751 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5752 BPF_MOV64_IMM(BPF_REG_3, 0),
5753 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5754 BPF_MOV64_IMM(BPF_REG_0, 0),
5755 BPF_EXIT_INSN(),
5756 },
5757 .errstr = "invalid stack type R1 off=-64 access_size=65",
5758 .result = REJECT,
5759 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5760 },
5761 {
5762 "helper access to variable memory: stack, JMP, correct bounds",
5763 .insns = {
5764 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5765 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5766 BPF_MOV64_IMM(BPF_REG_0, 0),
5767 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5768 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5769 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5770 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5771 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5772 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5773 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5774 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5775 BPF_MOV64_IMM(BPF_REG_2, 16),
5776 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5777 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5778 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
5779 BPF_MOV64_IMM(BPF_REG_4, 0),
5780 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5781 BPF_MOV64_IMM(BPF_REG_3, 0),
5782 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5783 BPF_MOV64_IMM(BPF_REG_0, 0),
5784 BPF_EXIT_INSN(),
5785 },
5786 .result = ACCEPT,
5787 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5788 },
5789 {
5790 "helper access to variable memory: stack, JMP (signed), correct bounds",
5791 .insns = {
5792 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5794 BPF_MOV64_IMM(BPF_REG_0, 0),
5795 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5796 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5797 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5798 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5799 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5800 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5801 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5802 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5803 BPF_MOV64_IMM(BPF_REG_2, 16),
5804 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5805 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5806 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
5807 BPF_MOV64_IMM(BPF_REG_4, 0),
5808 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5809 BPF_MOV64_IMM(BPF_REG_3, 0),
5810 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5811 BPF_MOV64_IMM(BPF_REG_0, 0),
5812 BPF_EXIT_INSN(),
5813 },
5814 .result = ACCEPT,
5815 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5816 },
5817 {
5818 "helper access to variable memory: stack, JMP, bounds + offset",
5819 .insns = {
5820 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5821 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5822 BPF_MOV64_IMM(BPF_REG_2, 16),
5823 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5824 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5825 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
5826 BPF_MOV64_IMM(BPF_REG_4, 0),
5827 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
5828 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5829 BPF_MOV64_IMM(BPF_REG_3, 0),
5830 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5831 BPF_MOV64_IMM(BPF_REG_0, 0),
5832 BPF_EXIT_INSN(),
5833 },
5834 .errstr = "invalid stack type R1 off=-64 access_size=65",
5835 .result = REJECT,
5836 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5837 },
5838 {
5839 "helper access to variable memory: stack, JMP, wrong max",
5840 .insns = {
5841 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5842 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5843 BPF_MOV64_IMM(BPF_REG_2, 16),
5844 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5845 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5846 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
5847 BPF_MOV64_IMM(BPF_REG_4, 0),
5848 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5849 BPF_MOV64_IMM(BPF_REG_3, 0),
5850 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5851 BPF_MOV64_IMM(BPF_REG_0, 0),
5852 BPF_EXIT_INSN(),
5853 },
5854 .errstr = "invalid stack type R1 off=-64 access_size=65",
5855 .result = REJECT,
5856 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5857 },
5858 {
5859 "helper access to variable memory: stack, JMP, no max check",
5860 .insns = {
5861 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5862 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5863 BPF_MOV64_IMM(BPF_REG_2, 16),
5864 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5865 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5866 BPF_MOV64_IMM(BPF_REG_4, 0),
5867 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5868 BPF_MOV64_IMM(BPF_REG_3, 0),
5869 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5870 BPF_MOV64_IMM(BPF_REG_0, 0),
5871 BPF_EXIT_INSN(),
5872 },
Edward Creef65b1842017-08-07 15:27:12 +01005873 /* because max wasn't checked, signed min is negative */
5874 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005875 .result = REJECT,
5876 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5877 },
5878 {
5879 "helper access to variable memory: stack, JMP, no min check",
5880 .insns = {
5881 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5882 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5883 BPF_MOV64_IMM(BPF_REG_2, 16),
5884 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5885 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5886 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
5887 BPF_MOV64_IMM(BPF_REG_3, 0),
5888 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5889 BPF_MOV64_IMM(BPF_REG_0, 0),
5890 BPF_EXIT_INSN(),
5891 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08005892 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005893 .result = REJECT,
5894 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5895 },
5896 {
5897 "helper access to variable memory: stack, JMP (signed), no min check",
5898 .insns = {
5899 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5901 BPF_MOV64_IMM(BPF_REG_2, 16),
5902 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5903 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5904 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
5905 BPF_MOV64_IMM(BPF_REG_3, 0),
5906 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5907 BPF_MOV64_IMM(BPF_REG_0, 0),
5908 BPF_EXIT_INSN(),
5909 },
5910 .errstr = "R2 min value is negative",
5911 .result = REJECT,
5912 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5913 },
5914 {
5915 "helper access to variable memory: map, JMP, correct bounds",
5916 .insns = {
5917 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5918 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5919 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5920 BPF_LD_MAP_FD(BPF_REG_1, 0),
5921 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5922 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5923 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5924 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5925 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5926 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5927 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5928 sizeof(struct test_val), 4),
5929 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005930 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005931 BPF_MOV64_IMM(BPF_REG_3, 0),
5932 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5933 BPF_MOV64_IMM(BPF_REG_0, 0),
5934 BPF_EXIT_INSN(),
5935 },
5936 .fixup_map2 = { 3 },
5937 .result = ACCEPT,
5938 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5939 },
5940 {
5941 "helper access to variable memory: map, JMP, wrong max",
5942 .insns = {
5943 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5945 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5946 BPF_LD_MAP_FD(BPF_REG_1, 0),
5947 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5948 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5949 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5950 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5951 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5952 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5953 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5954 sizeof(struct test_val) + 1, 4),
5955 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005956 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005957 BPF_MOV64_IMM(BPF_REG_3, 0),
5958 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5959 BPF_MOV64_IMM(BPF_REG_0, 0),
5960 BPF_EXIT_INSN(),
5961 },
5962 .fixup_map2 = { 3 },
5963 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
5964 .result = REJECT,
5965 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5966 },
5967 {
5968 "helper access to variable memory: map adjusted, JMP, correct bounds",
5969 .insns = {
5970 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5971 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5972 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5973 BPF_LD_MAP_FD(BPF_REG_1, 0),
5974 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5975 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5976 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5977 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5978 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5979 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5980 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5981 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5982 sizeof(struct test_val) - 20, 4),
5983 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005984 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005985 BPF_MOV64_IMM(BPF_REG_3, 0),
5986 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5987 BPF_MOV64_IMM(BPF_REG_0, 0),
5988 BPF_EXIT_INSN(),
5989 },
5990 .fixup_map2 = { 3 },
5991 .result = ACCEPT,
5992 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5993 },
5994 {
5995 "helper access to variable memory: map adjusted, JMP, wrong max",
5996 .insns = {
5997 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5998 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5999 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6000 BPF_LD_MAP_FD(BPF_REG_1, 0),
6001 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6002 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6003 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6004 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
6005 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6006 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6007 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6008 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6009 sizeof(struct test_val) - 19, 4),
6010 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02006011 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006012 BPF_MOV64_IMM(BPF_REG_3, 0),
6013 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6014 BPF_MOV64_IMM(BPF_REG_0, 0),
6015 BPF_EXIT_INSN(),
6016 },
6017 .fixup_map2 = { 3 },
6018 .errstr = "R1 min value is outside of the array range",
6019 .result = REJECT,
6020 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6021 },
6022 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006023 "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Edward Creef65b1842017-08-07 15:27:12 +01006024 .insns = {
6025 BPF_MOV64_IMM(BPF_REG_1, 0),
6026 BPF_MOV64_IMM(BPF_REG_2, 0),
6027 BPF_MOV64_IMM(BPF_REG_3, 0),
6028 BPF_MOV64_IMM(BPF_REG_4, 0),
6029 BPF_MOV64_IMM(BPF_REG_5, 0),
6030 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6031 BPF_EXIT_INSN(),
6032 },
6033 .result = ACCEPT,
6034 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6035 },
6036 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006037 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006038 .insns = {
6039 BPF_MOV64_IMM(BPF_REG_1, 0),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08006040 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01006041 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6042 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006043 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6044 BPF_MOV64_IMM(BPF_REG_3, 0),
6045 BPF_MOV64_IMM(BPF_REG_4, 0),
6046 BPF_MOV64_IMM(BPF_REG_5, 0),
6047 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6048 BPF_EXIT_INSN(),
6049 },
Edward Creef65b1842017-08-07 15:27:12 +01006050 .errstr = "R1 type=inv expected=fp",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006051 .result = REJECT,
6052 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6053 },
6054 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006055 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006056 .insns = {
6057 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6058 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6059 BPF_MOV64_IMM(BPF_REG_2, 0),
6060 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
6061 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
6062 BPF_MOV64_IMM(BPF_REG_3, 0),
6063 BPF_MOV64_IMM(BPF_REG_4, 0),
6064 BPF_MOV64_IMM(BPF_REG_5, 0),
6065 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6066 BPF_EXIT_INSN(),
6067 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08006068 .result = ACCEPT,
6069 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6070 },
6071 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006072 "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006073 .insns = {
6074 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6075 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6076 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6077 BPF_LD_MAP_FD(BPF_REG_1, 0),
6078 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6079 BPF_FUNC_map_lookup_elem),
6080 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6081 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6082 BPF_MOV64_IMM(BPF_REG_2, 0),
6083 BPF_MOV64_IMM(BPF_REG_3, 0),
6084 BPF_MOV64_IMM(BPF_REG_4, 0),
6085 BPF_MOV64_IMM(BPF_REG_5, 0),
6086 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6087 BPF_EXIT_INSN(),
6088 },
6089 .fixup_map1 = { 3 },
6090 .result = ACCEPT,
6091 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6092 },
6093 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006094 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006095 .insns = {
6096 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6097 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6098 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6099 BPF_LD_MAP_FD(BPF_REG_1, 0),
6100 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6101 BPF_FUNC_map_lookup_elem),
6102 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6103 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6104 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
6105 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6106 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6107 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
6108 BPF_MOV64_IMM(BPF_REG_3, 0),
6109 BPF_MOV64_IMM(BPF_REG_4, 0),
6110 BPF_MOV64_IMM(BPF_REG_5, 0),
6111 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6112 BPF_EXIT_INSN(),
6113 },
6114 .fixup_map1 = { 3 },
6115 .result = ACCEPT,
6116 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6117 },
6118 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006119 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006120 .insns = {
6121 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6122 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6123 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6124 BPF_LD_MAP_FD(BPF_REG_1, 0),
6125 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6126 BPF_FUNC_map_lookup_elem),
6127 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6128 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6129 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6130 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6131 BPF_MOV64_IMM(BPF_REG_3, 0),
6132 BPF_MOV64_IMM(BPF_REG_4, 0),
6133 BPF_MOV64_IMM(BPF_REG_5, 0),
6134 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6135 BPF_EXIT_INSN(),
6136 },
6137 .fixup_map1 = { 3 },
6138 .result = ACCEPT,
6139 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6140 },
6141 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006142 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006143 .insns = {
6144 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6145 offsetof(struct __sk_buff, data)),
6146 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6147 offsetof(struct __sk_buff, data_end)),
6148 BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
6149 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
6150 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
6151 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
6152 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
6153 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6154 BPF_MOV64_IMM(BPF_REG_3, 0),
6155 BPF_MOV64_IMM(BPF_REG_4, 0),
6156 BPF_MOV64_IMM(BPF_REG_5, 0),
6157 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6158 BPF_EXIT_INSN(),
6159 },
6160 .result = ACCEPT,
Gianluca Borello06c1c042017-01-09 10:19:49 -08006161 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006162 .retval = 0 /* csum_diff of 64-byte packet */,
Gianluca Borello06c1c042017-01-09 10:19:49 -08006163 },
6164 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006165 "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
6166 .insns = {
6167 BPF_MOV64_IMM(BPF_REG_1, 0),
6168 BPF_MOV64_IMM(BPF_REG_2, 0),
6169 BPF_MOV64_IMM(BPF_REG_3, 0),
6170 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6171 BPF_EXIT_INSN(),
6172 },
6173 .errstr = "R1 type=inv expected=fp",
6174 .result = REJECT,
6175 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6176 },
6177 {
6178 "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
6179 .insns = {
6180 BPF_MOV64_IMM(BPF_REG_1, 0),
6181 BPF_MOV64_IMM(BPF_REG_2, 1),
6182 BPF_MOV64_IMM(BPF_REG_3, 0),
6183 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6184 BPF_EXIT_INSN(),
6185 },
6186 .errstr = "R1 type=inv expected=fp",
6187 .result = REJECT,
6188 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6189 },
6190 {
6191 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6192 .insns = {
6193 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6195 BPF_MOV64_IMM(BPF_REG_2, 0),
6196 BPF_MOV64_IMM(BPF_REG_3, 0),
6197 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6198 BPF_EXIT_INSN(),
6199 },
6200 .result = ACCEPT,
6201 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6202 },
6203 {
6204 "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6205 .insns = {
6206 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6207 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6209 BPF_LD_MAP_FD(BPF_REG_1, 0),
6210 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6211 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6212 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6213 BPF_MOV64_IMM(BPF_REG_2, 0),
6214 BPF_MOV64_IMM(BPF_REG_3, 0),
6215 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6216 BPF_EXIT_INSN(),
6217 },
6218 .fixup_map1 = { 3 },
6219 .result = ACCEPT,
6220 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6221 },
6222 {
6223 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6224 .insns = {
6225 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6226 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6227 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6228 BPF_LD_MAP_FD(BPF_REG_1, 0),
6229 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6230 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6231 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6232 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6233 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6234 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6235 BPF_MOV64_IMM(BPF_REG_3, 0),
6236 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6237 BPF_EXIT_INSN(),
6238 },
6239 .fixup_map1 = { 3 },
6240 .result = ACCEPT,
6241 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6242 },
6243 {
6244 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6245 .insns = {
6246 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6247 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6249 BPF_LD_MAP_FD(BPF_REG_1, 0),
6250 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6251 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6252 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6253 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6254 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
6255 BPF_MOV64_IMM(BPF_REG_3, 0),
6256 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6257 BPF_EXIT_INSN(),
6258 },
6259 .fixup_map1 = { 3 },
6260 .result = ACCEPT,
6261 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6262 },
6263 {
Gianluca Borello06c1c042017-01-09 10:19:49 -08006264 "helper access to variable memory: 8 bytes leak",
6265 .insns = {
6266 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6267 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6268 BPF_MOV64_IMM(BPF_REG_0, 0),
6269 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6270 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6271 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6272 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6273 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6274 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6275 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08006276 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01006277 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6278 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006279 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
6280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
6281 BPF_MOV64_IMM(BPF_REG_3, 0),
6282 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6283 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6284 BPF_EXIT_INSN(),
6285 },
6286 .errstr = "invalid indirect read from stack off -64+32 size 64",
6287 .result = REJECT,
6288 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6289 },
6290 {
6291 "helper access to variable memory: 8 bytes no leak (init memory)",
6292 .insns = {
6293 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6294 BPF_MOV64_IMM(BPF_REG_0, 0),
6295 BPF_MOV64_IMM(BPF_REG_0, 0),
6296 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6297 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6298 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6299 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6300 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6301 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6302 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6303 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6304 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6305 BPF_MOV64_IMM(BPF_REG_2, 0),
6306 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
6307 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
6308 BPF_MOV64_IMM(BPF_REG_3, 0),
6309 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6310 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6311 BPF_EXIT_INSN(),
6312 },
6313 .result = ACCEPT,
6314 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6315 },
Josef Bacik29200c12017-02-03 16:25:23 -05006316 {
6317 "invalid and of negative number",
6318 .insns = {
6319 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6320 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6321 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6322 BPF_LD_MAP_FD(BPF_REG_1, 0),
6323 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6324 BPF_FUNC_map_lookup_elem),
6325 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Edward Creef65b1842017-08-07 15:27:12 +01006326 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik29200c12017-02-03 16:25:23 -05006327 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
6328 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
6329 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6330 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
6331 offsetof(struct test_val, foo)),
6332 BPF_EXIT_INSN(),
6333 },
6334 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006335 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05006336 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006337 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik29200c12017-02-03 16:25:23 -05006338 },
6339 {
6340 "invalid range check",
6341 .insns = {
6342 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6343 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6344 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6345 BPF_LD_MAP_FD(BPF_REG_1, 0),
6346 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6347 BPF_FUNC_map_lookup_elem),
6348 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
6349 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
6350 BPF_MOV64_IMM(BPF_REG_9, 1),
6351 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
6352 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
6353 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
6354 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
6355 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
6356 BPF_MOV32_IMM(BPF_REG_3, 1),
6357 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
6358 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
6359 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
6360 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
6361 BPF_MOV64_REG(BPF_REG_0, 0),
6362 BPF_EXIT_INSN(),
6363 },
6364 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006365 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05006366 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006367 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006368 },
6369 {
6370 "map in map access",
6371 .insns = {
6372 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6373 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6374 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6375 BPF_LD_MAP_FD(BPF_REG_1, 0),
6376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6377 BPF_FUNC_map_lookup_elem),
6378 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6379 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6380 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6381 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6382 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6383 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6384 BPF_FUNC_map_lookup_elem),
6385 BPF_MOV64_REG(BPF_REG_0, 0),
6386 BPF_EXIT_INSN(),
6387 },
6388 .fixup_map_in_map = { 3 },
6389 .result = ACCEPT,
6390 },
6391 {
6392 "invalid inner map pointer",
6393 .insns = {
6394 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6395 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6396 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6397 BPF_LD_MAP_FD(BPF_REG_1, 0),
6398 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6399 BPF_FUNC_map_lookup_elem),
6400 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6401 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6402 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6403 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6404 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6406 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6407 BPF_FUNC_map_lookup_elem),
6408 BPF_MOV64_REG(BPF_REG_0, 0),
6409 BPF_EXIT_INSN(),
6410 },
6411 .fixup_map_in_map = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08006412 .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006413 .result = REJECT,
6414 },
6415 {
6416 "forgot null checking on the inner map pointer",
6417 .insns = {
6418 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6419 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6421 BPF_LD_MAP_FD(BPF_REG_1, 0),
6422 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6423 BPF_FUNC_map_lookup_elem),
6424 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6425 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6426 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6427 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6428 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6429 BPF_FUNC_map_lookup_elem),
6430 BPF_MOV64_REG(BPF_REG_0, 0),
6431 BPF_EXIT_INSN(),
6432 },
6433 .fixup_map_in_map = { 3 },
6434 .errstr = "R1 type=map_value_or_null expected=map_ptr",
6435 .result = REJECT,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006436 },
6437 {
6438 "ld_abs: check calling conv, r1",
6439 .insns = {
6440 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6441 BPF_MOV64_IMM(BPF_REG_1, 0),
6442 BPF_LD_ABS(BPF_W, -0x200000),
6443 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
6444 BPF_EXIT_INSN(),
6445 },
6446 .errstr = "R1 !read_ok",
6447 .result = REJECT,
6448 },
6449 {
6450 "ld_abs: check calling conv, r2",
6451 .insns = {
6452 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6453 BPF_MOV64_IMM(BPF_REG_2, 0),
6454 BPF_LD_ABS(BPF_W, -0x200000),
6455 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6456 BPF_EXIT_INSN(),
6457 },
6458 .errstr = "R2 !read_ok",
6459 .result = REJECT,
6460 },
6461 {
6462 "ld_abs: check calling conv, r3",
6463 .insns = {
6464 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6465 BPF_MOV64_IMM(BPF_REG_3, 0),
6466 BPF_LD_ABS(BPF_W, -0x200000),
6467 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6468 BPF_EXIT_INSN(),
6469 },
6470 .errstr = "R3 !read_ok",
6471 .result = REJECT,
6472 },
6473 {
6474 "ld_abs: check calling conv, r4",
6475 .insns = {
6476 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6477 BPF_MOV64_IMM(BPF_REG_4, 0),
6478 BPF_LD_ABS(BPF_W, -0x200000),
6479 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6480 BPF_EXIT_INSN(),
6481 },
6482 .errstr = "R4 !read_ok",
6483 .result = REJECT,
6484 },
6485 {
6486 "ld_abs: check calling conv, r5",
6487 .insns = {
6488 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6489 BPF_MOV64_IMM(BPF_REG_5, 0),
6490 BPF_LD_ABS(BPF_W, -0x200000),
6491 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6492 BPF_EXIT_INSN(),
6493 },
6494 .errstr = "R5 !read_ok",
6495 .result = REJECT,
6496 },
6497 {
6498 "ld_abs: check calling conv, r7",
6499 .insns = {
6500 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6501 BPF_MOV64_IMM(BPF_REG_7, 0),
6502 BPF_LD_ABS(BPF_W, -0x200000),
6503 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6504 BPF_EXIT_INSN(),
6505 },
6506 .result = ACCEPT,
6507 },
6508 {
Daniel Borkmann87ab8192017-12-14 21:07:27 +01006509 "ld_abs: tests on r6 and skb data reload helper",
6510 .insns = {
6511 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6512 BPF_LD_ABS(BPF_B, 0),
6513 BPF_LD_ABS(BPF_H, 0),
6514 BPF_LD_ABS(BPF_W, 0),
6515 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
6516 BPF_MOV64_IMM(BPF_REG_6, 0),
6517 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
6518 BPF_MOV64_IMM(BPF_REG_2, 1),
6519 BPF_MOV64_IMM(BPF_REG_3, 2),
6520 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6521 BPF_FUNC_skb_vlan_push),
6522 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
6523 BPF_LD_ABS(BPF_B, 0),
6524 BPF_LD_ABS(BPF_H, 0),
6525 BPF_LD_ABS(BPF_W, 0),
6526 BPF_MOV64_IMM(BPF_REG_0, 42),
6527 BPF_EXIT_INSN(),
6528 },
6529 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6530 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006531 .retval = 42 /* ultimate return value */,
Daniel Borkmann87ab8192017-12-14 21:07:27 +01006532 },
6533 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006534 "ld_ind: check calling conv, r1",
6535 .insns = {
6536 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6537 BPF_MOV64_IMM(BPF_REG_1, 1),
6538 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
6539 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
6540 BPF_EXIT_INSN(),
6541 },
6542 .errstr = "R1 !read_ok",
6543 .result = REJECT,
6544 },
6545 {
6546 "ld_ind: check calling conv, r2",
6547 .insns = {
6548 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6549 BPF_MOV64_IMM(BPF_REG_2, 1),
6550 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
6551 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6552 BPF_EXIT_INSN(),
6553 },
6554 .errstr = "R2 !read_ok",
6555 .result = REJECT,
6556 },
6557 {
6558 "ld_ind: check calling conv, r3",
6559 .insns = {
6560 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6561 BPF_MOV64_IMM(BPF_REG_3, 1),
6562 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
6563 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6564 BPF_EXIT_INSN(),
6565 },
6566 .errstr = "R3 !read_ok",
6567 .result = REJECT,
6568 },
6569 {
6570 "ld_ind: check calling conv, r4",
6571 .insns = {
6572 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6573 BPF_MOV64_IMM(BPF_REG_4, 1),
6574 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
6575 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6576 BPF_EXIT_INSN(),
6577 },
6578 .errstr = "R4 !read_ok",
6579 .result = REJECT,
6580 },
6581 {
6582 "ld_ind: check calling conv, r5",
6583 .insns = {
6584 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6585 BPF_MOV64_IMM(BPF_REG_5, 1),
6586 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
6587 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6588 BPF_EXIT_INSN(),
6589 },
6590 .errstr = "R5 !read_ok",
6591 .result = REJECT,
6592 },
6593 {
6594 "ld_ind: check calling conv, r7",
6595 .insns = {
6596 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6597 BPF_MOV64_IMM(BPF_REG_7, 1),
6598 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
6599 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6600 BPF_EXIT_INSN(),
6601 },
6602 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006603 .retval = 1,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006604 },
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006605 {
6606 "check bpf_perf_event_data->sample_period byte load permitted",
6607 .insns = {
6608 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006609#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006610 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6611 offsetof(struct bpf_perf_event_data, sample_period)),
6612#else
6613 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6614 offsetof(struct bpf_perf_event_data, sample_period) + 7),
6615#endif
6616 BPF_EXIT_INSN(),
6617 },
6618 .result = ACCEPT,
6619 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6620 },
6621 {
6622 "check bpf_perf_event_data->sample_period half load permitted",
6623 .insns = {
6624 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006625#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006626 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6627 offsetof(struct bpf_perf_event_data, sample_period)),
6628#else
6629 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6630 offsetof(struct bpf_perf_event_data, sample_period) + 6),
6631#endif
6632 BPF_EXIT_INSN(),
6633 },
6634 .result = ACCEPT,
6635 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6636 },
6637 {
6638 "check bpf_perf_event_data->sample_period word load permitted",
6639 .insns = {
6640 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006641#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006642 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6643 offsetof(struct bpf_perf_event_data, sample_period)),
6644#else
6645 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6646 offsetof(struct bpf_perf_event_data, sample_period) + 4),
6647#endif
6648 BPF_EXIT_INSN(),
6649 },
6650 .result = ACCEPT,
6651 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6652 },
6653 {
6654 "check bpf_perf_event_data->sample_period dword load permitted",
6655 .insns = {
6656 BPF_MOV64_IMM(BPF_REG_0, 0),
6657 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
6658 offsetof(struct bpf_perf_event_data, sample_period)),
6659 BPF_EXIT_INSN(),
6660 },
6661 .result = ACCEPT,
6662 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6663 },
6664 {
6665 "check skb->data half load not permitted",
6666 .insns = {
6667 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006668#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006669 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6670 offsetof(struct __sk_buff, data)),
6671#else
6672 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6673 offsetof(struct __sk_buff, data) + 2),
6674#endif
6675 BPF_EXIT_INSN(),
6676 },
6677 .result = REJECT,
6678 .errstr = "invalid bpf_context access",
6679 },
6680 {
6681 "check skb->tc_classid half load not permitted for lwt prog",
6682 .insns = {
6683 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006684#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006685 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6686 offsetof(struct __sk_buff, tc_classid)),
6687#else
6688 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6689 offsetof(struct __sk_buff, tc_classid) + 2),
6690#endif
6691 BPF_EXIT_INSN(),
6692 },
6693 .result = REJECT,
6694 .errstr = "invalid bpf_context access",
6695 .prog_type = BPF_PROG_TYPE_LWT_IN,
6696 },
Edward Creeb7122962017-07-21 00:00:24 +02006697 {
6698 "bounds checks mixing signed and unsigned, positive bounds",
6699 .insns = {
6700 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6701 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6702 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6703 BPF_LD_MAP_FD(BPF_REG_1, 0),
6704 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6705 BPF_FUNC_map_lookup_elem),
6706 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6707 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6708 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6709 BPF_MOV64_IMM(BPF_REG_2, 2),
6710 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
6711 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
6712 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6713 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6714 BPF_MOV64_IMM(BPF_REG_0, 0),
6715 BPF_EXIT_INSN(),
6716 },
6717 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006718 .errstr = "unbounded min value",
Edward Creeb7122962017-07-21 00:00:24 +02006719 .result = REJECT,
Edward Creeb7122962017-07-21 00:00:24 +02006720 },
6721 {
6722 "bounds checks mixing signed and unsigned",
6723 .insns = {
6724 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6725 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6726 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6727 BPF_LD_MAP_FD(BPF_REG_1, 0),
6728 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6729 BPF_FUNC_map_lookup_elem),
6730 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6731 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6732 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6733 BPF_MOV64_IMM(BPF_REG_2, -1),
6734 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6735 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6736 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6737 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6738 BPF_MOV64_IMM(BPF_REG_0, 0),
6739 BPF_EXIT_INSN(),
6740 },
6741 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006742 .errstr = "unbounded min value",
Edward Creeb7122962017-07-21 00:00:24 +02006743 .result = REJECT,
Edward Creeb7122962017-07-21 00:00:24 +02006744 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006745 {
6746 "bounds checks mixing signed and unsigned, variant 2",
6747 .insns = {
6748 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6749 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6750 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6751 BPF_LD_MAP_FD(BPF_REG_1, 0),
6752 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6753 BPF_FUNC_map_lookup_elem),
6754 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6755 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6756 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6757 BPF_MOV64_IMM(BPF_REG_2, -1),
6758 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6759 BPF_MOV64_IMM(BPF_REG_8, 0),
6760 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
6761 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6762 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6763 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6764 BPF_MOV64_IMM(BPF_REG_0, 0),
6765 BPF_EXIT_INSN(),
6766 },
6767 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006768 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006769 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006770 },
6771 {
6772 "bounds checks mixing signed and unsigned, variant 3",
6773 .insns = {
6774 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6775 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6776 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6777 BPF_LD_MAP_FD(BPF_REG_1, 0),
6778 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6779 BPF_FUNC_map_lookup_elem),
6780 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6781 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6782 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6783 BPF_MOV64_IMM(BPF_REG_2, -1),
6784 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
6785 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
6786 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6787 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6788 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6789 BPF_MOV64_IMM(BPF_REG_0, 0),
6790 BPF_EXIT_INSN(),
6791 },
6792 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006793 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006794 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006795 },
6796 {
6797 "bounds checks mixing signed and unsigned, variant 4",
6798 .insns = {
6799 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6800 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6802 BPF_LD_MAP_FD(BPF_REG_1, 0),
6803 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6804 BPF_FUNC_map_lookup_elem),
6805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6806 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6807 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6808 BPF_MOV64_IMM(BPF_REG_2, 1),
6809 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
6810 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6811 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6812 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6813 BPF_MOV64_IMM(BPF_REG_0, 0),
6814 BPF_EXIT_INSN(),
6815 },
6816 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006817 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006818 },
6819 {
6820 "bounds checks mixing signed and unsigned, variant 5",
6821 .insns = {
6822 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6823 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6824 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6825 BPF_LD_MAP_FD(BPF_REG_1, 0),
6826 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6827 BPF_FUNC_map_lookup_elem),
6828 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6829 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6830 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6831 BPF_MOV64_IMM(BPF_REG_2, -1),
6832 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6833 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
6834 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
6835 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6836 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6837 BPF_MOV64_IMM(BPF_REG_0, 0),
6838 BPF_EXIT_INSN(),
6839 },
6840 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006841 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006842 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006843 },
6844 {
6845 "bounds checks mixing signed and unsigned, variant 6",
6846 .insns = {
6847 BPF_MOV64_IMM(BPF_REG_2, 0),
6848 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
6849 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
6850 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6851 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
6852 BPF_MOV64_IMM(BPF_REG_6, -1),
6853 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
6854 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
6855 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
6856 BPF_MOV64_IMM(BPF_REG_5, 0),
6857 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
6858 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6859 BPF_FUNC_skb_load_bytes),
6860 BPF_MOV64_IMM(BPF_REG_0, 0),
6861 BPF_EXIT_INSN(),
6862 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006863 .errstr = "R4 min value is negative, either use unsigned",
6864 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006865 },
6866 {
6867 "bounds checks mixing signed and unsigned, variant 7",
6868 .insns = {
6869 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6870 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6871 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6872 BPF_LD_MAP_FD(BPF_REG_1, 0),
6873 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6874 BPF_FUNC_map_lookup_elem),
6875 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6876 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6877 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6878 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
6879 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6880 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6881 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6882 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6883 BPF_MOV64_IMM(BPF_REG_0, 0),
6884 BPF_EXIT_INSN(),
6885 },
6886 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006887 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006888 },
6889 {
6890 "bounds checks mixing signed and unsigned, variant 8",
6891 .insns = {
6892 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6893 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6894 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6895 BPF_LD_MAP_FD(BPF_REG_1, 0),
6896 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6897 BPF_FUNC_map_lookup_elem),
Daniel Borkmann86412502017-07-21 00:00:25 +02006898 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6899 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6900 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6901 BPF_MOV64_IMM(BPF_REG_2, -1),
6902 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6903 BPF_MOV64_IMM(BPF_REG_0, 0),
6904 BPF_EXIT_INSN(),
6905 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6906 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6907 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6908 BPF_MOV64_IMM(BPF_REG_0, 0),
6909 BPF_EXIT_INSN(),
6910 },
6911 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006912 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006913 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006914 },
6915 {
Edward Creef65b1842017-08-07 15:27:12 +01006916 "bounds checks mixing signed and unsigned, variant 9",
Daniel Borkmann86412502017-07-21 00:00:25 +02006917 .insns = {
6918 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6919 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6920 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6921 BPF_LD_MAP_FD(BPF_REG_1, 0),
6922 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6923 BPF_FUNC_map_lookup_elem),
6924 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6925 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6926 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6927 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
6928 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6929 BPF_MOV64_IMM(BPF_REG_0, 0),
6930 BPF_EXIT_INSN(),
6931 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6932 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6933 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6934 BPF_MOV64_IMM(BPF_REG_0, 0),
6935 BPF_EXIT_INSN(),
6936 },
6937 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006938 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006939 },
6940 {
Edward Creef65b1842017-08-07 15:27:12 +01006941 "bounds checks mixing signed and unsigned, variant 10",
Daniel Borkmann86412502017-07-21 00:00:25 +02006942 .insns = {
6943 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6944 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6946 BPF_LD_MAP_FD(BPF_REG_1, 0),
6947 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6948 BPF_FUNC_map_lookup_elem),
6949 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6950 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6951 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6952 BPF_MOV64_IMM(BPF_REG_2, 0),
6953 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6954 BPF_MOV64_IMM(BPF_REG_0, 0),
6955 BPF_EXIT_INSN(),
6956 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6957 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6958 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6959 BPF_MOV64_IMM(BPF_REG_0, 0),
6960 BPF_EXIT_INSN(),
6961 },
6962 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006963 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006964 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006965 },
6966 {
Edward Creef65b1842017-08-07 15:27:12 +01006967 "bounds checks mixing signed and unsigned, variant 11",
Daniel Borkmann86412502017-07-21 00:00:25 +02006968 .insns = {
6969 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6970 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6971 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6972 BPF_LD_MAP_FD(BPF_REG_1, 0),
6973 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6974 BPF_FUNC_map_lookup_elem),
6975 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6976 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6977 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6978 BPF_MOV64_IMM(BPF_REG_2, -1),
6979 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6980 /* Dead branch. */
6981 BPF_MOV64_IMM(BPF_REG_0, 0),
6982 BPF_EXIT_INSN(),
6983 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6984 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6985 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6986 BPF_MOV64_IMM(BPF_REG_0, 0),
6987 BPF_EXIT_INSN(),
6988 },
6989 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006990 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006991 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006992 },
6993 {
Edward Creef65b1842017-08-07 15:27:12 +01006994 "bounds checks mixing signed and unsigned, variant 12",
Daniel Borkmann86412502017-07-21 00:00:25 +02006995 .insns = {
6996 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6997 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6998 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6999 BPF_LD_MAP_FD(BPF_REG_1, 0),
7000 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7001 BPF_FUNC_map_lookup_elem),
7002 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7003 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7004 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7005 BPF_MOV64_IMM(BPF_REG_2, -6),
7006 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7007 BPF_MOV64_IMM(BPF_REG_0, 0),
7008 BPF_EXIT_INSN(),
7009 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7010 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7011 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7012 BPF_MOV64_IMM(BPF_REG_0, 0),
7013 BPF_EXIT_INSN(),
7014 },
7015 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007016 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007017 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007018 },
7019 {
Edward Creef65b1842017-08-07 15:27:12 +01007020 "bounds checks mixing signed and unsigned, variant 13",
Daniel Borkmann86412502017-07-21 00:00:25 +02007021 .insns = {
7022 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7023 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7024 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7025 BPF_LD_MAP_FD(BPF_REG_1, 0),
7026 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7027 BPF_FUNC_map_lookup_elem),
7028 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7029 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7030 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7031 BPF_MOV64_IMM(BPF_REG_2, 2),
7032 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7033 BPF_MOV64_IMM(BPF_REG_7, 1),
7034 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
7035 BPF_MOV64_IMM(BPF_REG_0, 0),
7036 BPF_EXIT_INSN(),
7037 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
7038 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
7039 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
7040 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7041 BPF_MOV64_IMM(BPF_REG_0, 0),
7042 BPF_EXIT_INSN(),
7043 },
7044 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007045 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007046 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007047 },
7048 {
Edward Creef65b1842017-08-07 15:27:12 +01007049 "bounds checks mixing signed and unsigned, variant 14",
Daniel Borkmann86412502017-07-21 00:00:25 +02007050 .insns = {
7051 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
7052 offsetof(struct __sk_buff, mark)),
7053 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7054 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7055 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7056 BPF_LD_MAP_FD(BPF_REG_1, 0),
7057 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7058 BPF_FUNC_map_lookup_elem),
7059 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7060 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7061 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7062 BPF_MOV64_IMM(BPF_REG_2, -1),
7063 BPF_MOV64_IMM(BPF_REG_8, 2),
7064 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
7065 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
7066 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7067 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7068 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7069 BPF_MOV64_IMM(BPF_REG_0, 0),
7070 BPF_EXIT_INSN(),
7071 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
7072 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
7073 },
7074 .fixup_map1 = { 4 },
Daniel Borkmann6f161012018-01-18 01:15:21 +01007075 .errstr = "R0 invalid mem access 'inv'",
Daniel Borkmann86412502017-07-21 00:00:25 +02007076 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007077 },
7078 {
Edward Creef65b1842017-08-07 15:27:12 +01007079 "bounds checks mixing signed and unsigned, variant 15",
Daniel Borkmann86412502017-07-21 00:00:25 +02007080 .insns = {
7081 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7082 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7084 BPF_LD_MAP_FD(BPF_REG_1, 0),
7085 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7086 BPF_FUNC_map_lookup_elem),
7087 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7088 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7089 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7090 BPF_MOV64_IMM(BPF_REG_2, -6),
7091 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7092 BPF_MOV64_IMM(BPF_REG_0, 0),
7093 BPF_EXIT_INSN(),
7094 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7095 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
7096 BPF_MOV64_IMM(BPF_REG_0, 0),
7097 BPF_EXIT_INSN(),
7098 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7099 BPF_MOV64_IMM(BPF_REG_0, 0),
7100 BPF_EXIT_INSN(),
7101 },
7102 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007103 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007104 .result = REJECT,
7105 .result_unpriv = REJECT,
7106 },
Edward Cree545722c2017-07-21 14:36:57 +01007107 {
Edward Creef65b1842017-08-07 15:27:12 +01007108 "subtraction bounds (map value) variant 1",
Edward Cree545722c2017-07-21 14:36:57 +01007109 .insns = {
7110 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7111 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7112 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7113 BPF_LD_MAP_FD(BPF_REG_1, 0),
7114 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7115 BPF_FUNC_map_lookup_elem),
7116 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7117 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7118 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
7119 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
7120 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
7121 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
7122 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
7123 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7124 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7125 BPF_EXIT_INSN(),
7126 BPF_MOV64_IMM(BPF_REG_0, 0),
7127 BPF_EXIT_INSN(),
7128 },
7129 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01007130 .errstr = "R0 max value is outside of the array range",
7131 .result = REJECT,
7132 },
7133 {
7134 "subtraction bounds (map value) variant 2",
7135 .insns = {
7136 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7137 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7138 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7139 BPF_LD_MAP_FD(BPF_REG_1, 0),
7140 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7141 BPF_FUNC_map_lookup_elem),
7142 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7143 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7144 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
7145 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
7146 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
7147 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
7148 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7149 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7150 BPF_EXIT_INSN(),
7151 BPF_MOV64_IMM(BPF_REG_0, 0),
7152 BPF_EXIT_INSN(),
7153 },
7154 .fixup_map1 = { 3 },
Edward Cree545722c2017-07-21 14:36:57 +01007155 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
7156 .result = REJECT,
Edward Cree545722c2017-07-21 14:36:57 +01007157 },
Edward Cree69c4e8a2017-08-07 15:29:51 +01007158 {
Jann Horn2255f8d2017-12-18 20:12:01 -08007159 "bounds check based on zero-extended MOV",
7160 .insns = {
7161 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7162 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7163 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7164 BPF_LD_MAP_FD(BPF_REG_1, 0),
7165 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7166 BPF_FUNC_map_lookup_elem),
7167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7168 /* r2 = 0x0000'0000'ffff'ffff */
7169 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
7170 /* r2 = 0 */
7171 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
7172 /* no-op */
7173 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7174 /* access at offset 0 */
7175 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7176 /* exit */
7177 BPF_MOV64_IMM(BPF_REG_0, 0),
7178 BPF_EXIT_INSN(),
7179 },
7180 .fixup_map1 = { 3 },
7181 .result = ACCEPT
7182 },
7183 {
7184 "bounds check based on sign-extended MOV. test1",
7185 .insns = {
7186 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7187 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7189 BPF_LD_MAP_FD(BPF_REG_1, 0),
7190 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7191 BPF_FUNC_map_lookup_elem),
7192 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7193 /* r2 = 0xffff'ffff'ffff'ffff */
7194 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
7195 /* r2 = 0xffff'ffff */
7196 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
7197 /* r0 = <oob pointer> */
7198 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7199 /* access to OOB pointer */
7200 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7201 /* exit */
7202 BPF_MOV64_IMM(BPF_REG_0, 0),
7203 BPF_EXIT_INSN(),
7204 },
7205 .fixup_map1 = { 3 },
7206 .errstr = "map_value pointer and 4294967295",
7207 .result = REJECT
7208 },
7209 {
7210 "bounds check based on sign-extended MOV. test2",
7211 .insns = {
7212 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7213 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7214 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7215 BPF_LD_MAP_FD(BPF_REG_1, 0),
7216 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7217 BPF_FUNC_map_lookup_elem),
7218 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7219 /* r2 = 0xffff'ffff'ffff'ffff */
7220 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
7221 /* r2 = 0xfff'ffff */
7222 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
7223 /* r0 = <oob pointer> */
7224 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7225 /* access to OOB pointer */
7226 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7227 /* exit */
7228 BPF_MOV64_IMM(BPF_REG_0, 0),
7229 BPF_EXIT_INSN(),
7230 },
7231 .fixup_map1 = { 3 },
7232 .errstr = "R0 min value is outside of the array range",
7233 .result = REJECT
7234 },
7235 {
7236 "bounds check based on reg_off + var_off + insn_off. test1",
7237 .insns = {
7238 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7239 offsetof(struct __sk_buff, mark)),
7240 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7241 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7242 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7243 BPF_LD_MAP_FD(BPF_REG_1, 0),
7244 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7245 BPF_FUNC_map_lookup_elem),
7246 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7247 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
7248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
7249 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
7250 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
7251 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
7252 BPF_MOV64_IMM(BPF_REG_0, 0),
7253 BPF_EXIT_INSN(),
7254 },
7255 .fixup_map1 = { 4 },
7256 .errstr = "value_size=8 off=1073741825",
7257 .result = REJECT,
7258 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7259 },
7260 {
7261 "bounds check based on reg_off + var_off + insn_off. test2",
7262 .insns = {
7263 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7264 offsetof(struct __sk_buff, mark)),
7265 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7266 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7267 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7268 BPF_LD_MAP_FD(BPF_REG_1, 0),
7269 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7270 BPF_FUNC_map_lookup_elem),
7271 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7272 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
7273 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
7274 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
7275 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
7276 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
7277 BPF_MOV64_IMM(BPF_REG_0, 0),
7278 BPF_EXIT_INSN(),
7279 },
7280 .fixup_map1 = { 4 },
7281 .errstr = "value 1073741823",
7282 .result = REJECT,
7283 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7284 },
7285 {
7286 "bounds check after truncation of non-boundary-crossing range",
7287 .insns = {
7288 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7289 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7290 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7291 BPF_LD_MAP_FD(BPF_REG_1, 0),
7292 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7293 BPF_FUNC_map_lookup_elem),
7294 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7295 /* r1 = [0x00, 0xff] */
7296 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7297 BPF_MOV64_IMM(BPF_REG_2, 1),
7298 /* r2 = 0x10'0000'0000 */
7299 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
7300 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
7301 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
7302 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
7303 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7304 /* r1 = [0x00, 0xff] */
7305 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
7306 /* r1 = 0 */
7307 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7308 /* no-op */
7309 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7310 /* access at offset 0 */
7311 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7312 /* exit */
7313 BPF_MOV64_IMM(BPF_REG_0, 0),
7314 BPF_EXIT_INSN(),
7315 },
7316 .fixup_map1 = { 3 },
7317 .result = ACCEPT
7318 },
7319 {
7320 "bounds check after truncation of boundary-crossing range (1)",
7321 .insns = {
7322 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7323 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7324 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7325 BPF_LD_MAP_FD(BPF_REG_1, 0),
7326 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7327 BPF_FUNC_map_lookup_elem),
7328 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7329 /* r1 = [0x00, 0xff] */
7330 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7332 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
7333 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7334 /* r1 = [0xffff'ff80, 0xffff'ffff] or
7335 * [0x0000'0000, 0x0000'007f]
7336 */
7337 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
7338 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7339 /* r1 = [0x00, 0xff] or
7340 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7341 */
7342 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7343 /* r1 = 0 or
7344 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7345 */
7346 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7347 /* no-op or OOB pointer computation */
7348 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7349 /* potentially OOB access */
7350 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7351 /* exit */
7352 BPF_MOV64_IMM(BPF_REG_0, 0),
7353 BPF_EXIT_INSN(),
7354 },
7355 .fixup_map1 = { 3 },
7356 /* not actually fully unbounded, but the bound is very high */
7357 .errstr = "R0 unbounded memory access",
7358 .result = REJECT
7359 },
7360 {
7361 "bounds check after truncation of boundary-crossing range (2)",
7362 .insns = {
7363 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7364 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7365 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7366 BPF_LD_MAP_FD(BPF_REG_1, 0),
7367 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7368 BPF_FUNC_map_lookup_elem),
7369 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7370 /* r1 = [0x00, 0xff] */
7371 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7372 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7373 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
7374 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7375 /* r1 = [0xffff'ff80, 0xffff'ffff] or
7376 * [0x0000'0000, 0x0000'007f]
7377 * difference to previous test: truncation via MOV32
7378 * instead of ALU32.
7379 */
7380 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
7381 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7382 /* r1 = [0x00, 0xff] or
7383 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7384 */
7385 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7386 /* r1 = 0 or
7387 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7388 */
7389 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7390 /* no-op or OOB pointer computation */
7391 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7392 /* potentially OOB access */
7393 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7394 /* exit */
7395 BPF_MOV64_IMM(BPF_REG_0, 0),
7396 BPF_EXIT_INSN(),
7397 },
7398 .fixup_map1 = { 3 },
7399 /* not actually fully unbounded, but the bound is very high */
7400 .errstr = "R0 unbounded memory access",
7401 .result = REJECT
7402 },
7403 {
7404 "bounds check after wrapping 32-bit addition",
7405 .insns = {
7406 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7407 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7408 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7409 BPF_LD_MAP_FD(BPF_REG_1, 0),
7410 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7411 BPF_FUNC_map_lookup_elem),
7412 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7413 /* r1 = 0x7fff'ffff */
7414 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
7415 /* r1 = 0xffff'fffe */
7416 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7417 /* r1 = 0 */
7418 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
7419 /* no-op */
7420 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7421 /* access at offset 0 */
7422 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7423 /* exit */
7424 BPF_MOV64_IMM(BPF_REG_0, 0),
7425 BPF_EXIT_INSN(),
7426 },
7427 .fixup_map1 = { 3 },
7428 .result = ACCEPT
7429 },
7430 {
7431 "bounds check after shift with oversized count operand",
7432 .insns = {
7433 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7434 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7436 BPF_LD_MAP_FD(BPF_REG_1, 0),
7437 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7438 BPF_FUNC_map_lookup_elem),
7439 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7440 BPF_MOV64_IMM(BPF_REG_2, 32),
7441 BPF_MOV64_IMM(BPF_REG_1, 1),
7442 /* r1 = (u32)1 << (u32)32 = ? */
7443 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
7444 /* r1 = [0x0000, 0xffff] */
7445 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
7446 /* computes unknown pointer, potentially OOB */
7447 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7448 /* potentially OOB access */
7449 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7450 /* exit */
7451 BPF_MOV64_IMM(BPF_REG_0, 0),
7452 BPF_EXIT_INSN(),
7453 },
7454 .fixup_map1 = { 3 },
7455 .errstr = "R0 max value is outside of the array range",
7456 .result = REJECT
7457 },
7458 {
7459 "bounds check after right shift of maybe-negative number",
7460 .insns = {
7461 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7462 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7463 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7464 BPF_LD_MAP_FD(BPF_REG_1, 0),
7465 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7466 BPF_FUNC_map_lookup_elem),
7467 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7468 /* r1 = [0x00, 0xff] */
7469 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7470 /* r1 = [-0x01, 0xfe] */
7471 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
7472 /* r1 = 0 or 0xff'ffff'ffff'ffff */
7473 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7474 /* r1 = 0 or 0xffff'ffff'ffff */
7475 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7476 /* computes unknown pointer, potentially OOB */
7477 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7478 /* potentially OOB access */
7479 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7480 /* exit */
7481 BPF_MOV64_IMM(BPF_REG_0, 0),
7482 BPF_EXIT_INSN(),
7483 },
7484 .fixup_map1 = { 3 },
7485 .errstr = "R0 unbounded memory access",
7486 .result = REJECT
7487 },
7488 {
7489 "bounds check map access with off+size signed 32bit overflow. test1",
7490 .insns = {
7491 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7492 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7493 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7494 BPF_LD_MAP_FD(BPF_REG_1, 0),
7495 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7496 BPF_FUNC_map_lookup_elem),
7497 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7498 BPF_EXIT_INSN(),
7499 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
7500 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7501 BPF_JMP_A(0),
7502 BPF_EXIT_INSN(),
7503 },
7504 .fixup_map1 = { 3 },
7505 .errstr = "map_value pointer and 2147483646",
7506 .result = REJECT
7507 },
7508 {
7509 "bounds check map access with off+size signed 32bit overflow. test2",
7510 .insns = {
7511 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7512 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7513 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7514 BPF_LD_MAP_FD(BPF_REG_1, 0),
7515 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7516 BPF_FUNC_map_lookup_elem),
7517 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7518 BPF_EXIT_INSN(),
7519 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7520 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7522 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7523 BPF_JMP_A(0),
7524 BPF_EXIT_INSN(),
7525 },
7526 .fixup_map1 = { 3 },
7527 .errstr = "pointer offset 1073741822",
7528 .result = REJECT
7529 },
7530 {
7531 "bounds check map access with off+size signed 32bit overflow. test3",
7532 .insns = {
7533 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7534 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7535 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7536 BPF_LD_MAP_FD(BPF_REG_1, 0),
7537 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7538 BPF_FUNC_map_lookup_elem),
7539 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7540 BPF_EXIT_INSN(),
7541 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7542 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7543 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7544 BPF_JMP_A(0),
7545 BPF_EXIT_INSN(),
7546 },
7547 .fixup_map1 = { 3 },
7548 .errstr = "pointer offset -1073741822",
7549 .result = REJECT
7550 },
7551 {
7552 "bounds check map access with off+size signed 32bit overflow. test4",
7553 .insns = {
7554 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7555 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7557 BPF_LD_MAP_FD(BPF_REG_1, 0),
7558 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7559 BPF_FUNC_map_lookup_elem),
7560 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7561 BPF_EXIT_INSN(),
7562 BPF_MOV64_IMM(BPF_REG_1, 1000000),
7563 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
7564 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7565 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7566 BPF_JMP_A(0),
7567 BPF_EXIT_INSN(),
7568 },
7569 .fixup_map1 = { 3 },
7570 .errstr = "map_value pointer and 1000000000000",
7571 .result = REJECT
7572 },
7573 {
7574 "pointer/scalar confusion in state equality check (way 1)",
7575 .insns = {
7576 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7577 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7578 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7579 BPF_LD_MAP_FD(BPF_REG_1, 0),
7580 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7581 BPF_FUNC_map_lookup_elem),
7582 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7583 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7584 BPF_JMP_A(1),
7585 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7586 BPF_JMP_A(0),
7587 BPF_EXIT_INSN(),
7588 },
7589 .fixup_map1 = { 3 },
7590 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007591 .retval = POINTER_VALUE,
Jann Horn2255f8d2017-12-18 20:12:01 -08007592 .result_unpriv = REJECT,
7593 .errstr_unpriv = "R0 leaks addr as return value"
7594 },
7595 {
7596 "pointer/scalar confusion in state equality check (way 2)",
7597 .insns = {
7598 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7599 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7600 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7601 BPF_LD_MAP_FD(BPF_REG_1, 0),
7602 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7603 BPF_FUNC_map_lookup_elem),
7604 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
7605 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7606 BPF_JMP_A(1),
7607 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7608 BPF_EXIT_INSN(),
7609 },
7610 .fixup_map1 = { 3 },
7611 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007612 .retval = POINTER_VALUE,
Jann Horn2255f8d2017-12-18 20:12:01 -08007613 .result_unpriv = REJECT,
7614 .errstr_unpriv = "R0 leaks addr as return value"
7615 },
7616 {
Edward Cree69c4e8a2017-08-07 15:29:51 +01007617 "variable-offset ctx access",
7618 .insns = {
7619 /* Get an unknown value */
7620 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7621 /* Make it small and 4-byte aligned */
7622 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7623 /* add it to skb. We now have either &skb->len or
7624 * &skb->pkt_type, but we don't know which
7625 */
7626 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
7627 /* dereference it */
7628 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
7629 BPF_EXIT_INSN(),
7630 },
7631 .errstr = "variable ctx access var_off=(0x0; 0x4)",
7632 .result = REJECT,
7633 .prog_type = BPF_PROG_TYPE_LWT_IN,
7634 },
7635 {
7636 "variable-offset stack access",
7637 .insns = {
7638 /* Fill the top 8 bytes of the stack */
7639 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7640 /* Get an unknown value */
7641 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7642 /* Make it small and 4-byte aligned */
7643 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7644 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7645 /* add it to fp. We now have either fp-4 or fp-8, but
7646 * we don't know which
7647 */
7648 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7649 /* dereference it */
7650 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
7651 BPF_EXIT_INSN(),
7652 },
7653 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
7654 .result = REJECT,
7655 .prog_type = BPF_PROG_TYPE_LWT_IN,
7656 },
Edward Creed893dc22017-08-23 15:09:46 +01007657 {
Jann Horn2255f8d2017-12-18 20:12:01 -08007658 "indirect variable-offset stack access",
7659 .insns = {
7660 /* Fill the top 8 bytes of the stack */
7661 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7662 /* Get an unknown value */
7663 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7664 /* Make it small and 4-byte aligned */
7665 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7666 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7667 /* add it to fp. We now have either fp-4 or fp-8, but
7668 * we don't know which
7669 */
7670 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7671 /* dereference it indirectly */
7672 BPF_LD_MAP_FD(BPF_REG_1, 0),
7673 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7674 BPF_FUNC_map_lookup_elem),
7675 BPF_MOV64_IMM(BPF_REG_0, 0),
7676 BPF_EXIT_INSN(),
7677 },
7678 .fixup_map1 = { 5 },
7679 .errstr = "variable stack read R2",
7680 .result = REJECT,
7681 .prog_type = BPF_PROG_TYPE_LWT_IN,
7682 },
7683 {
7684 "direct stack access with 32-bit wraparound. test1",
7685 .insns = {
7686 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7687 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7688 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7689 BPF_MOV32_IMM(BPF_REG_0, 0),
7690 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7691 BPF_EXIT_INSN()
7692 },
7693 .errstr = "fp pointer and 2147483647",
7694 .result = REJECT
7695 },
7696 {
7697 "direct stack access with 32-bit wraparound. test2",
7698 .insns = {
7699 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7700 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7701 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7702 BPF_MOV32_IMM(BPF_REG_0, 0),
7703 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7704 BPF_EXIT_INSN()
7705 },
7706 .errstr = "fp pointer and 1073741823",
7707 .result = REJECT
7708 },
7709 {
7710 "direct stack access with 32-bit wraparound. test3",
7711 .insns = {
7712 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7714 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7715 BPF_MOV32_IMM(BPF_REG_0, 0),
7716 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7717 BPF_EXIT_INSN()
7718 },
7719 .errstr = "fp pointer offset 1073741822",
7720 .result = REJECT
7721 },
7722 {
Edward Creed893dc22017-08-23 15:09:46 +01007723 "liveness pruning and write screening",
7724 .insns = {
7725 /* Get an unknown value */
7726 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7727 /* branch conditions teach us nothing about R2 */
7728 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
7729 BPF_MOV64_IMM(BPF_REG_0, 0),
7730 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
7731 BPF_MOV64_IMM(BPF_REG_0, 0),
7732 BPF_EXIT_INSN(),
7733 },
7734 .errstr = "R0 !read_ok",
7735 .result = REJECT,
7736 .prog_type = BPF_PROG_TYPE_LWT_IN,
7737 },
Alexei Starovoitovdf20cb72017-08-23 15:10:26 +01007738 {
7739 "varlen_map_value_access pruning",
7740 .insns = {
7741 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7742 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7744 BPF_LD_MAP_FD(BPF_REG_1, 0),
7745 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7746 BPF_FUNC_map_lookup_elem),
7747 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7748 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7749 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
7750 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
7751 BPF_MOV32_IMM(BPF_REG_1, 0),
7752 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
7753 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7754 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
7755 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7756 offsetof(struct test_val, foo)),
7757 BPF_EXIT_INSN(),
7758 },
7759 .fixup_map2 = { 3 },
7760 .errstr_unpriv = "R0 leaks addr",
7761 .errstr = "R0 unbounded memory access",
7762 .result_unpriv = REJECT,
7763 .result = REJECT,
7764 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7765 },
Edward Creee67b8a62017-09-15 14:37:38 +01007766 {
7767 "invalid 64-bit BPF_END",
7768 .insns = {
7769 BPF_MOV32_IMM(BPF_REG_0, 0),
7770 {
7771 .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
7772 .dst_reg = BPF_REG_0,
7773 .src_reg = 0,
7774 .off = 0,
7775 .imm = 32,
7776 },
7777 BPF_EXIT_INSN(),
7778 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01007779 .errstr = "unknown opcode d7",
Edward Creee67b8a62017-09-15 14:37:38 +01007780 .result = REJECT,
7781 },
Daniel Borkmann22c88522017-09-25 02:25:53 +02007782 {
Daniel Borkmann65073a62018-01-31 12:58:56 +01007783 "XDP, using ifindex from netdev",
7784 .insns = {
7785 BPF_MOV64_IMM(BPF_REG_0, 0),
7786 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7787 offsetof(struct xdp_md, ingress_ifindex)),
7788 BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
7789 BPF_MOV64_IMM(BPF_REG_0, 1),
7790 BPF_EXIT_INSN(),
7791 },
7792 .result = ACCEPT,
7793 .prog_type = BPF_PROG_TYPE_XDP,
7794 .retval = 1,
7795 },
7796 {
Daniel Borkmann22c88522017-09-25 02:25:53 +02007797 "meta access, test1",
7798 .insns = {
7799 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7800 offsetof(struct xdp_md, data_meta)),
7801 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7802 offsetof(struct xdp_md, data)),
7803 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7804 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7805 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7806 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7807 BPF_MOV64_IMM(BPF_REG_0, 0),
7808 BPF_EXIT_INSN(),
7809 },
7810 .result = ACCEPT,
7811 .prog_type = BPF_PROG_TYPE_XDP,
7812 },
7813 {
7814 "meta access, test2",
7815 .insns = {
7816 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7817 offsetof(struct xdp_md, data_meta)),
7818 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7819 offsetof(struct xdp_md, data)),
7820 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7821 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
7822 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7823 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7824 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7825 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7826 BPF_MOV64_IMM(BPF_REG_0, 0),
7827 BPF_EXIT_INSN(),
7828 },
7829 .result = REJECT,
7830 .errstr = "invalid access to packet, off=-8",
7831 .prog_type = BPF_PROG_TYPE_XDP,
7832 },
7833 {
7834 "meta access, test3",
7835 .insns = {
7836 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7837 offsetof(struct xdp_md, data_meta)),
7838 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7839 offsetof(struct xdp_md, data_end)),
7840 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7841 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7842 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7843 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7844 BPF_MOV64_IMM(BPF_REG_0, 0),
7845 BPF_EXIT_INSN(),
7846 },
7847 .result = REJECT,
7848 .errstr = "invalid access to packet",
7849 .prog_type = BPF_PROG_TYPE_XDP,
7850 },
7851 {
7852 "meta access, test4",
7853 .insns = {
7854 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7855 offsetof(struct xdp_md, data_meta)),
7856 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7857 offsetof(struct xdp_md, data_end)),
7858 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7859 offsetof(struct xdp_md, data)),
7860 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7861 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7862 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7863 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7864 BPF_MOV64_IMM(BPF_REG_0, 0),
7865 BPF_EXIT_INSN(),
7866 },
7867 .result = REJECT,
7868 .errstr = "invalid access to packet",
7869 .prog_type = BPF_PROG_TYPE_XDP,
7870 },
7871 {
7872 "meta access, test5",
7873 .insns = {
7874 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7875 offsetof(struct xdp_md, data_meta)),
7876 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7877 offsetof(struct xdp_md, data)),
7878 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7879 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7880 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
7881 BPF_MOV64_IMM(BPF_REG_2, -8),
7882 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7883 BPF_FUNC_xdp_adjust_meta),
7884 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
7885 BPF_MOV64_IMM(BPF_REG_0, 0),
7886 BPF_EXIT_INSN(),
7887 },
7888 .result = REJECT,
7889 .errstr = "R3 !read_ok",
7890 .prog_type = BPF_PROG_TYPE_XDP,
7891 },
7892 {
7893 "meta access, test6",
7894 .insns = {
7895 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7896 offsetof(struct xdp_md, data_meta)),
7897 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7898 offsetof(struct xdp_md, data)),
7899 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7901 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7902 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7903 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
7904 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7905 BPF_MOV64_IMM(BPF_REG_0, 0),
7906 BPF_EXIT_INSN(),
7907 },
7908 .result = REJECT,
7909 .errstr = "invalid access to packet",
7910 .prog_type = BPF_PROG_TYPE_XDP,
7911 },
7912 {
7913 "meta access, test7",
7914 .insns = {
7915 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7916 offsetof(struct xdp_md, data_meta)),
7917 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7918 offsetof(struct xdp_md, data)),
7919 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7920 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7921 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7923 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7924 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7925 BPF_MOV64_IMM(BPF_REG_0, 0),
7926 BPF_EXIT_INSN(),
7927 },
7928 .result = ACCEPT,
7929 .prog_type = BPF_PROG_TYPE_XDP,
7930 },
7931 {
7932 "meta access, test8",
7933 .insns = {
7934 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7935 offsetof(struct xdp_md, data_meta)),
7936 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7937 offsetof(struct xdp_md, data)),
7938 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7939 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
7940 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7941 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7942 BPF_MOV64_IMM(BPF_REG_0, 0),
7943 BPF_EXIT_INSN(),
7944 },
7945 .result = ACCEPT,
7946 .prog_type = BPF_PROG_TYPE_XDP,
7947 },
7948 {
7949 "meta access, test9",
7950 .insns = {
7951 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7952 offsetof(struct xdp_md, data_meta)),
7953 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7954 offsetof(struct xdp_md, data)),
7955 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7956 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
7957 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
7958 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7959 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7960 BPF_MOV64_IMM(BPF_REG_0, 0),
7961 BPF_EXIT_INSN(),
7962 },
7963 .result = REJECT,
7964 .errstr = "invalid access to packet",
7965 .prog_type = BPF_PROG_TYPE_XDP,
7966 },
7967 {
7968 "meta access, test10",
7969 .insns = {
7970 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7971 offsetof(struct xdp_md, data_meta)),
7972 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7973 offsetof(struct xdp_md, data)),
7974 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7975 offsetof(struct xdp_md, data_end)),
7976 BPF_MOV64_IMM(BPF_REG_5, 42),
7977 BPF_MOV64_IMM(BPF_REG_6, 24),
7978 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
7979 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
7980 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
7981 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
7982 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
7983 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
7984 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
7985 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
7986 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
7987 BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
7988 BPF_MOV64_IMM(BPF_REG_0, 0),
7989 BPF_EXIT_INSN(),
7990 },
7991 .result = REJECT,
7992 .errstr = "invalid access to packet",
7993 .prog_type = BPF_PROG_TYPE_XDP,
7994 },
7995 {
7996 "meta access, test11",
7997 .insns = {
7998 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7999 offsetof(struct xdp_md, data_meta)),
8000 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8001 offsetof(struct xdp_md, data)),
8002 BPF_MOV64_IMM(BPF_REG_5, 42),
8003 BPF_MOV64_IMM(BPF_REG_6, 24),
8004 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
8005 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
8006 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
8007 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
8008 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
8009 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
8010 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
8011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
8012 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
8013 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
8014 BPF_MOV64_IMM(BPF_REG_0, 0),
8015 BPF_EXIT_INSN(),
8016 },
8017 .result = ACCEPT,
8018 .prog_type = BPF_PROG_TYPE_XDP,
8019 },
8020 {
8021 "meta access, test12",
8022 .insns = {
8023 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8024 offsetof(struct xdp_md, data_meta)),
8025 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8026 offsetof(struct xdp_md, data)),
8027 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8028 offsetof(struct xdp_md, data_end)),
8029 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
8030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
8031 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
8032 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
8033 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
8034 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
8035 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
8036 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8037 BPF_MOV64_IMM(BPF_REG_0, 0),
8038 BPF_EXIT_INSN(),
8039 },
8040 .result = ACCEPT,
8041 .prog_type = BPF_PROG_TYPE_XDP,
8042 },
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07008043 {
Jakub Kicinski28e33f92017-10-16 11:16:55 -07008044 "arithmetic ops make PTR_TO_CTX unusable",
8045 .insns = {
8046 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
8047 offsetof(struct __sk_buff, data) -
8048 offsetof(struct __sk_buff, mark)),
8049 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8050 offsetof(struct __sk_buff, mark)),
8051 BPF_EXIT_INSN(),
8052 },
8053 .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
8054 .result = REJECT,
8055 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8056 },
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008057 {
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008058 "pkt_end - pkt_start is allowed",
8059 .insns = {
8060 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8061 offsetof(struct __sk_buff, data_end)),
8062 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8063 offsetof(struct __sk_buff, data)),
8064 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
8065 BPF_EXIT_INSN(),
8066 },
8067 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08008068 .retval = TEST_DATA_LEN,
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008069 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8070 },
8071 {
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008072 "XDP pkt read, pkt_end mangling, bad access 1",
8073 .insns = {
8074 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8075 offsetof(struct xdp_md, data)),
8076 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8077 offsetof(struct xdp_md, data_end)),
8078 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8079 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8080 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
8081 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8082 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8083 BPF_MOV64_IMM(BPF_REG_0, 0),
8084 BPF_EXIT_INSN(),
8085 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008086 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008087 .result = REJECT,
8088 .prog_type = BPF_PROG_TYPE_XDP,
8089 },
8090 {
8091 "XDP pkt read, pkt_end mangling, bad access 2",
8092 .insns = {
8093 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8094 offsetof(struct xdp_md, data)),
8095 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8096 offsetof(struct xdp_md, data_end)),
8097 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8098 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8099 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
8100 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8101 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8102 BPF_MOV64_IMM(BPF_REG_0, 0),
8103 BPF_EXIT_INSN(),
8104 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008105 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008106 .result = REJECT,
8107 .prog_type = BPF_PROG_TYPE_XDP,
8108 },
8109 {
8110 "XDP pkt read, pkt_data' > pkt_end, good access",
8111 .insns = {
8112 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8113 offsetof(struct xdp_md, data)),
8114 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8115 offsetof(struct xdp_md, data_end)),
8116 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8118 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8119 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8120 BPF_MOV64_IMM(BPF_REG_0, 0),
8121 BPF_EXIT_INSN(),
8122 },
8123 .result = ACCEPT,
8124 .prog_type = BPF_PROG_TYPE_XDP,
8125 },
8126 {
8127 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
8128 .insns = {
8129 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8130 offsetof(struct xdp_md, data)),
8131 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8132 offsetof(struct xdp_md, data_end)),
8133 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8134 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8135 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8136 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8137 BPF_MOV64_IMM(BPF_REG_0, 0),
8138 BPF_EXIT_INSN(),
8139 },
8140 .errstr = "R1 offset is outside of the packet",
8141 .result = REJECT,
8142 .prog_type = BPF_PROG_TYPE_XDP,
8143 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8144 },
8145 {
8146 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
8147 .insns = {
8148 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8149 offsetof(struct xdp_md, data)),
8150 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8151 offsetof(struct xdp_md, data_end)),
8152 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8153 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8154 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
8155 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8156 BPF_MOV64_IMM(BPF_REG_0, 0),
8157 BPF_EXIT_INSN(),
8158 },
8159 .errstr = "R1 offset is outside of the packet",
8160 .result = REJECT,
8161 .prog_type = BPF_PROG_TYPE_XDP,
8162 },
8163 {
8164 "XDP pkt read, pkt_end > pkt_data', good access",
8165 .insns = {
8166 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8167 offsetof(struct xdp_md, data)),
8168 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8169 offsetof(struct xdp_md, data_end)),
8170 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8171 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8172 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8173 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8174 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8175 BPF_MOV64_IMM(BPF_REG_0, 0),
8176 BPF_EXIT_INSN(),
8177 },
8178 .result = ACCEPT,
8179 .prog_type = BPF_PROG_TYPE_XDP,
8180 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8181 },
8182 {
8183 "XDP pkt read, pkt_end > pkt_data', bad access 1",
8184 .insns = {
8185 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8186 offsetof(struct xdp_md, data)),
8187 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8188 offsetof(struct xdp_md, data_end)),
8189 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8190 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8191 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8192 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8193 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8194 BPF_MOV64_IMM(BPF_REG_0, 0),
8195 BPF_EXIT_INSN(),
8196 },
8197 .errstr = "R1 offset is outside of the packet",
8198 .result = REJECT,
8199 .prog_type = BPF_PROG_TYPE_XDP,
8200 },
8201 {
8202 "XDP pkt read, pkt_end > pkt_data', bad access 2",
8203 .insns = {
8204 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8205 offsetof(struct xdp_md, data)),
8206 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8207 offsetof(struct xdp_md, data_end)),
8208 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8209 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8210 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8211 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8212 BPF_MOV64_IMM(BPF_REG_0, 0),
8213 BPF_EXIT_INSN(),
8214 },
8215 .errstr = "R1 offset is outside of the packet",
8216 .result = REJECT,
8217 .prog_type = BPF_PROG_TYPE_XDP,
8218 },
8219 {
8220 "XDP pkt read, pkt_data' < pkt_end, good access",
8221 .insns = {
8222 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8223 offsetof(struct xdp_md, data)),
8224 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8225 offsetof(struct xdp_md, data_end)),
8226 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8227 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8228 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8229 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8230 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8231 BPF_MOV64_IMM(BPF_REG_0, 0),
8232 BPF_EXIT_INSN(),
8233 },
8234 .result = ACCEPT,
8235 .prog_type = BPF_PROG_TYPE_XDP,
8236 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8237 },
8238 {
8239 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
8240 .insns = {
8241 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8242 offsetof(struct xdp_md, data)),
8243 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8244 offsetof(struct xdp_md, data_end)),
8245 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8247 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8248 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8249 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8250 BPF_MOV64_IMM(BPF_REG_0, 0),
8251 BPF_EXIT_INSN(),
8252 },
8253 .errstr = "R1 offset is outside of the packet",
8254 .result = REJECT,
8255 .prog_type = BPF_PROG_TYPE_XDP,
8256 },
8257 {
8258 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
8259 .insns = {
8260 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8261 offsetof(struct xdp_md, data)),
8262 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8263 offsetof(struct xdp_md, data_end)),
8264 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8265 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8266 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8267 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8268 BPF_MOV64_IMM(BPF_REG_0, 0),
8269 BPF_EXIT_INSN(),
8270 },
8271 .errstr = "R1 offset is outside of the packet",
8272 .result = REJECT,
8273 .prog_type = BPF_PROG_TYPE_XDP,
8274 },
8275 {
8276 "XDP pkt read, pkt_end < pkt_data', good access",
8277 .insns = {
8278 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8279 offsetof(struct xdp_md, data)),
8280 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8281 offsetof(struct xdp_md, data_end)),
8282 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8283 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8284 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8285 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8286 BPF_MOV64_IMM(BPF_REG_0, 0),
8287 BPF_EXIT_INSN(),
8288 },
8289 .result = ACCEPT,
8290 .prog_type = BPF_PROG_TYPE_XDP,
8291 },
8292 {
8293 "XDP pkt read, pkt_end < pkt_data', bad access 1",
8294 .insns = {
8295 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8296 offsetof(struct xdp_md, data)),
8297 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8298 offsetof(struct xdp_md, data_end)),
8299 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8300 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8301 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8302 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8303 BPF_MOV64_IMM(BPF_REG_0, 0),
8304 BPF_EXIT_INSN(),
8305 },
8306 .errstr = "R1 offset is outside of the packet",
8307 .result = REJECT,
8308 .prog_type = BPF_PROG_TYPE_XDP,
8309 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8310 },
8311 {
8312 "XDP pkt read, pkt_end < pkt_data', bad access 2",
8313 .insns = {
8314 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8315 offsetof(struct xdp_md, data)),
8316 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8317 offsetof(struct xdp_md, data_end)),
8318 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8319 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8320 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
8321 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8322 BPF_MOV64_IMM(BPF_REG_0, 0),
8323 BPF_EXIT_INSN(),
8324 },
8325 .errstr = "R1 offset is outside of the packet",
8326 .result = REJECT,
8327 .prog_type = BPF_PROG_TYPE_XDP,
8328 },
8329 {
8330 "XDP pkt read, pkt_data' >= pkt_end, good access",
8331 .insns = {
8332 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8333 offsetof(struct xdp_md, data)),
8334 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8335 offsetof(struct xdp_md, data_end)),
8336 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8337 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8338 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8339 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8340 BPF_MOV64_IMM(BPF_REG_0, 0),
8341 BPF_EXIT_INSN(),
8342 },
8343 .result = ACCEPT,
8344 .prog_type = BPF_PROG_TYPE_XDP,
8345 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8346 },
8347 {
8348 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
8349 .insns = {
8350 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8351 offsetof(struct xdp_md, data)),
8352 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8353 offsetof(struct xdp_md, data_end)),
8354 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8356 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8357 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8358 BPF_MOV64_IMM(BPF_REG_0, 0),
8359 BPF_EXIT_INSN(),
8360 },
8361 .errstr = "R1 offset is outside of the packet",
8362 .result = REJECT,
8363 .prog_type = BPF_PROG_TYPE_XDP,
8364 },
8365 {
8366 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
8367 .insns = {
8368 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8369 offsetof(struct xdp_md, data)),
8370 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8371 offsetof(struct xdp_md, data_end)),
8372 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8373 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8374 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
8375 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8376 BPF_MOV64_IMM(BPF_REG_0, 0),
8377 BPF_EXIT_INSN(),
8378 },
8379 .errstr = "R1 offset is outside of the packet",
8380 .result = REJECT,
8381 .prog_type = BPF_PROG_TYPE_XDP,
8382 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8383 },
8384 {
8385 "XDP pkt read, pkt_end >= pkt_data', good access",
8386 .insns = {
8387 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8388 offsetof(struct xdp_md, data)),
8389 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8390 offsetof(struct xdp_md, data_end)),
8391 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8392 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8393 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8394 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8395 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8396 BPF_MOV64_IMM(BPF_REG_0, 0),
8397 BPF_EXIT_INSN(),
8398 },
8399 .result = ACCEPT,
8400 .prog_type = BPF_PROG_TYPE_XDP,
8401 },
8402 {
8403 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
8404 .insns = {
8405 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8406 offsetof(struct xdp_md, data)),
8407 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8408 offsetof(struct xdp_md, data_end)),
8409 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8410 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8411 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8412 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8413 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8414 BPF_MOV64_IMM(BPF_REG_0, 0),
8415 BPF_EXIT_INSN(),
8416 },
8417 .errstr = "R1 offset is outside of the packet",
8418 .result = REJECT,
8419 .prog_type = BPF_PROG_TYPE_XDP,
8420 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8421 },
8422 {
8423 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
8424 .insns = {
8425 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8426 offsetof(struct xdp_md, data)),
8427 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8428 offsetof(struct xdp_md, data_end)),
8429 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8430 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8431 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8432 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8433 BPF_MOV64_IMM(BPF_REG_0, 0),
8434 BPF_EXIT_INSN(),
8435 },
8436 .errstr = "R1 offset is outside of the packet",
8437 .result = REJECT,
8438 .prog_type = BPF_PROG_TYPE_XDP,
8439 },
8440 {
8441 "XDP pkt read, pkt_data' <= pkt_end, good access",
8442 .insns = {
8443 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8444 offsetof(struct xdp_md, data)),
8445 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8446 offsetof(struct xdp_md, data_end)),
8447 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8448 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8449 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8450 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8451 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8452 BPF_MOV64_IMM(BPF_REG_0, 0),
8453 BPF_EXIT_INSN(),
8454 },
8455 .result = ACCEPT,
8456 .prog_type = BPF_PROG_TYPE_XDP,
8457 },
8458 {
8459 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
8460 .insns = {
8461 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8462 offsetof(struct xdp_md, data)),
8463 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8464 offsetof(struct xdp_md, data_end)),
8465 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8466 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8467 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8468 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8469 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8470 BPF_MOV64_IMM(BPF_REG_0, 0),
8471 BPF_EXIT_INSN(),
8472 },
8473 .errstr = "R1 offset is outside of the packet",
8474 .result = REJECT,
8475 .prog_type = BPF_PROG_TYPE_XDP,
8476 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8477 },
8478 {
8479 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
8480 .insns = {
8481 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8482 offsetof(struct xdp_md, data)),
8483 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8484 offsetof(struct xdp_md, data_end)),
8485 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8486 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8487 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8488 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8489 BPF_MOV64_IMM(BPF_REG_0, 0),
8490 BPF_EXIT_INSN(),
8491 },
8492 .errstr = "R1 offset is outside of the packet",
8493 .result = REJECT,
8494 .prog_type = BPF_PROG_TYPE_XDP,
8495 },
8496 {
8497 "XDP pkt read, pkt_end <= pkt_data', good access",
8498 .insns = {
8499 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8500 offsetof(struct xdp_md, data)),
8501 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8502 offsetof(struct xdp_md, data_end)),
8503 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8504 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8505 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8506 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8507 BPF_MOV64_IMM(BPF_REG_0, 0),
8508 BPF_EXIT_INSN(),
8509 },
8510 .result = ACCEPT,
8511 .prog_type = BPF_PROG_TYPE_XDP,
8512 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8513 },
8514 {
8515 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
8516 .insns = {
8517 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8518 offsetof(struct xdp_md, data)),
8519 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8520 offsetof(struct xdp_md, data_end)),
8521 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8522 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8523 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8524 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8525 BPF_MOV64_IMM(BPF_REG_0, 0),
8526 BPF_EXIT_INSN(),
8527 },
8528 .errstr = "R1 offset is outside of the packet",
8529 .result = REJECT,
8530 .prog_type = BPF_PROG_TYPE_XDP,
8531 },
8532 {
8533 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
8534 .insns = {
8535 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8536 offsetof(struct xdp_md, data)),
8537 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8538 offsetof(struct xdp_md, data_end)),
8539 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8540 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8541 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
8542 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8543 BPF_MOV64_IMM(BPF_REG_0, 0),
8544 BPF_EXIT_INSN(),
8545 },
8546 .errstr = "R1 offset is outside of the packet",
8547 .result = REJECT,
8548 .prog_type = BPF_PROG_TYPE_XDP,
8549 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8550 },
Daniel Borkmannb06723d2017-11-01 23:58:09 +01008551 {
Daniel Borkmann634eab12017-11-01 23:58:11 +01008552 "XDP pkt read, pkt_meta' > pkt_data, good access",
8553 .insns = {
8554 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8555 offsetof(struct xdp_md, data_meta)),
8556 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8557 offsetof(struct xdp_md, data)),
8558 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8559 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8560 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8561 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8562 BPF_MOV64_IMM(BPF_REG_0, 0),
8563 BPF_EXIT_INSN(),
8564 },
8565 .result = ACCEPT,
8566 .prog_type = BPF_PROG_TYPE_XDP,
8567 },
8568 {
8569 "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
8570 .insns = {
8571 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8572 offsetof(struct xdp_md, data_meta)),
8573 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8574 offsetof(struct xdp_md, data)),
8575 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8576 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8577 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8578 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8579 BPF_MOV64_IMM(BPF_REG_0, 0),
8580 BPF_EXIT_INSN(),
8581 },
8582 .errstr = "R1 offset is outside of the packet",
8583 .result = REJECT,
8584 .prog_type = BPF_PROG_TYPE_XDP,
8585 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8586 },
8587 {
8588 "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
8589 .insns = {
8590 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8591 offsetof(struct xdp_md, data_meta)),
8592 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8593 offsetof(struct xdp_md, data)),
8594 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8595 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8596 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
8597 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8598 BPF_MOV64_IMM(BPF_REG_0, 0),
8599 BPF_EXIT_INSN(),
8600 },
8601 .errstr = "R1 offset is outside of the packet",
8602 .result = REJECT,
8603 .prog_type = BPF_PROG_TYPE_XDP,
8604 },
8605 {
8606 "XDP pkt read, pkt_data > pkt_meta', good access",
8607 .insns = {
8608 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8609 offsetof(struct xdp_md, data_meta)),
8610 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8611 offsetof(struct xdp_md, data)),
8612 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8613 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8614 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8615 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8616 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8617 BPF_MOV64_IMM(BPF_REG_0, 0),
8618 BPF_EXIT_INSN(),
8619 },
8620 .result = ACCEPT,
8621 .prog_type = BPF_PROG_TYPE_XDP,
8622 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8623 },
8624 {
8625 "XDP pkt read, pkt_data > pkt_meta', bad access 1",
8626 .insns = {
8627 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8628 offsetof(struct xdp_md, data_meta)),
8629 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8630 offsetof(struct xdp_md, data)),
8631 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8632 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8633 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8634 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8635 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8636 BPF_MOV64_IMM(BPF_REG_0, 0),
8637 BPF_EXIT_INSN(),
8638 },
8639 .errstr = "R1 offset is outside of the packet",
8640 .result = REJECT,
8641 .prog_type = BPF_PROG_TYPE_XDP,
8642 },
8643 {
8644 "XDP pkt read, pkt_data > pkt_meta', bad access 2",
8645 .insns = {
8646 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8647 offsetof(struct xdp_md, data_meta)),
8648 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8649 offsetof(struct xdp_md, data)),
8650 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8651 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8652 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8653 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8654 BPF_MOV64_IMM(BPF_REG_0, 0),
8655 BPF_EXIT_INSN(),
8656 },
8657 .errstr = "R1 offset is outside of the packet",
8658 .result = REJECT,
8659 .prog_type = BPF_PROG_TYPE_XDP,
8660 },
8661 {
8662 "XDP pkt read, pkt_meta' < pkt_data, good access",
8663 .insns = {
8664 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8665 offsetof(struct xdp_md, data_meta)),
8666 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8667 offsetof(struct xdp_md, data)),
8668 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8669 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8670 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8671 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8672 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8673 BPF_MOV64_IMM(BPF_REG_0, 0),
8674 BPF_EXIT_INSN(),
8675 },
8676 .result = ACCEPT,
8677 .prog_type = BPF_PROG_TYPE_XDP,
8678 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8679 },
8680 {
8681 "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
8682 .insns = {
8683 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8684 offsetof(struct xdp_md, data_meta)),
8685 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8686 offsetof(struct xdp_md, data)),
8687 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8688 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8689 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8690 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8691 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8692 BPF_MOV64_IMM(BPF_REG_0, 0),
8693 BPF_EXIT_INSN(),
8694 },
8695 .errstr = "R1 offset is outside of the packet",
8696 .result = REJECT,
8697 .prog_type = BPF_PROG_TYPE_XDP,
8698 },
8699 {
8700 "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
8701 .insns = {
8702 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8703 offsetof(struct xdp_md, data_meta)),
8704 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8705 offsetof(struct xdp_md, data)),
8706 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8707 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8708 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8709 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8710 BPF_MOV64_IMM(BPF_REG_0, 0),
8711 BPF_EXIT_INSN(),
8712 },
8713 .errstr = "R1 offset is outside of the packet",
8714 .result = REJECT,
8715 .prog_type = BPF_PROG_TYPE_XDP,
8716 },
8717 {
8718 "XDP pkt read, pkt_data < pkt_meta', good access",
8719 .insns = {
8720 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8721 offsetof(struct xdp_md, data_meta)),
8722 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8723 offsetof(struct xdp_md, data)),
8724 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8725 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8726 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8727 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8728 BPF_MOV64_IMM(BPF_REG_0, 0),
8729 BPF_EXIT_INSN(),
8730 },
8731 .result = ACCEPT,
8732 .prog_type = BPF_PROG_TYPE_XDP,
8733 },
8734 {
8735 "XDP pkt read, pkt_data < pkt_meta', bad access 1",
8736 .insns = {
8737 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8738 offsetof(struct xdp_md, data_meta)),
8739 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8740 offsetof(struct xdp_md, data)),
8741 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8742 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8743 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8744 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8745 BPF_MOV64_IMM(BPF_REG_0, 0),
8746 BPF_EXIT_INSN(),
8747 },
8748 .errstr = "R1 offset is outside of the packet",
8749 .result = REJECT,
8750 .prog_type = BPF_PROG_TYPE_XDP,
8751 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8752 },
8753 {
8754 "XDP pkt read, pkt_data < pkt_meta', bad access 2",
8755 .insns = {
8756 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8757 offsetof(struct xdp_md, data_meta)),
8758 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8759 offsetof(struct xdp_md, data)),
8760 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8762 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
8763 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8764 BPF_MOV64_IMM(BPF_REG_0, 0),
8765 BPF_EXIT_INSN(),
8766 },
8767 .errstr = "R1 offset is outside of the packet",
8768 .result = REJECT,
8769 .prog_type = BPF_PROG_TYPE_XDP,
8770 },
8771 {
8772 "XDP pkt read, pkt_meta' >= pkt_data, good access",
8773 .insns = {
8774 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8775 offsetof(struct xdp_md, data_meta)),
8776 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8777 offsetof(struct xdp_md, data)),
8778 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8779 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8780 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8781 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8782 BPF_MOV64_IMM(BPF_REG_0, 0),
8783 BPF_EXIT_INSN(),
8784 },
8785 .result = ACCEPT,
8786 .prog_type = BPF_PROG_TYPE_XDP,
8787 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8788 },
8789 {
8790 "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
8791 .insns = {
8792 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8793 offsetof(struct xdp_md, data_meta)),
8794 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8795 offsetof(struct xdp_md, data)),
8796 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8797 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8798 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8799 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8800 BPF_MOV64_IMM(BPF_REG_0, 0),
8801 BPF_EXIT_INSN(),
8802 },
8803 .errstr = "R1 offset is outside of the packet",
8804 .result = REJECT,
8805 .prog_type = BPF_PROG_TYPE_XDP,
8806 },
8807 {
8808 "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
8809 .insns = {
8810 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8811 offsetof(struct xdp_md, data_meta)),
8812 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8813 offsetof(struct xdp_md, data)),
8814 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8816 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
8817 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8818 BPF_MOV64_IMM(BPF_REG_0, 0),
8819 BPF_EXIT_INSN(),
8820 },
8821 .errstr = "R1 offset is outside of the packet",
8822 .result = REJECT,
8823 .prog_type = BPF_PROG_TYPE_XDP,
8824 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8825 },
8826 {
8827 "XDP pkt read, pkt_data >= pkt_meta', good access",
8828 .insns = {
8829 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8830 offsetof(struct xdp_md, data_meta)),
8831 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8832 offsetof(struct xdp_md, data)),
8833 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8834 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8835 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8836 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8837 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8838 BPF_MOV64_IMM(BPF_REG_0, 0),
8839 BPF_EXIT_INSN(),
8840 },
8841 .result = ACCEPT,
8842 .prog_type = BPF_PROG_TYPE_XDP,
8843 },
8844 {
8845 "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
8846 .insns = {
8847 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8848 offsetof(struct xdp_md, data_meta)),
8849 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8850 offsetof(struct xdp_md, data)),
8851 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8852 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8853 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8854 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8855 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8856 BPF_MOV64_IMM(BPF_REG_0, 0),
8857 BPF_EXIT_INSN(),
8858 },
8859 .errstr = "R1 offset is outside of the packet",
8860 .result = REJECT,
8861 .prog_type = BPF_PROG_TYPE_XDP,
8862 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8863 },
8864 {
8865 "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
8866 .insns = {
8867 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8868 offsetof(struct xdp_md, data_meta)),
8869 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8870 offsetof(struct xdp_md, data)),
8871 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8872 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8873 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8874 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8875 BPF_MOV64_IMM(BPF_REG_0, 0),
8876 BPF_EXIT_INSN(),
8877 },
8878 .errstr = "R1 offset is outside of the packet",
8879 .result = REJECT,
8880 .prog_type = BPF_PROG_TYPE_XDP,
8881 },
8882 {
8883 "XDP pkt read, pkt_meta' <= pkt_data, good access",
8884 .insns = {
8885 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8886 offsetof(struct xdp_md, data_meta)),
8887 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8888 offsetof(struct xdp_md, data)),
8889 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8890 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8891 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8892 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8893 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8894 BPF_MOV64_IMM(BPF_REG_0, 0),
8895 BPF_EXIT_INSN(),
8896 },
8897 .result = ACCEPT,
8898 .prog_type = BPF_PROG_TYPE_XDP,
8899 },
8900 {
8901 "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
8902 .insns = {
8903 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8904 offsetof(struct xdp_md, data_meta)),
8905 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8906 offsetof(struct xdp_md, data)),
8907 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8909 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8910 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8911 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8912 BPF_MOV64_IMM(BPF_REG_0, 0),
8913 BPF_EXIT_INSN(),
8914 },
8915 .errstr = "R1 offset is outside of the packet",
8916 .result = REJECT,
8917 .prog_type = BPF_PROG_TYPE_XDP,
8918 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8919 },
8920 {
8921 "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
8922 .insns = {
8923 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8924 offsetof(struct xdp_md, data_meta)),
8925 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8926 offsetof(struct xdp_md, data)),
8927 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8928 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8929 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8930 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8931 BPF_MOV64_IMM(BPF_REG_0, 0),
8932 BPF_EXIT_INSN(),
8933 },
8934 .errstr = "R1 offset is outside of the packet",
8935 .result = REJECT,
8936 .prog_type = BPF_PROG_TYPE_XDP,
8937 },
8938 {
8939 "XDP pkt read, pkt_data <= pkt_meta', good access",
8940 .insns = {
8941 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8942 offsetof(struct xdp_md, data_meta)),
8943 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8944 offsetof(struct xdp_md, data)),
8945 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8946 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8947 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8948 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8949 BPF_MOV64_IMM(BPF_REG_0, 0),
8950 BPF_EXIT_INSN(),
8951 },
8952 .result = ACCEPT,
8953 .prog_type = BPF_PROG_TYPE_XDP,
8954 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8955 },
8956 {
8957 "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
8958 .insns = {
8959 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8960 offsetof(struct xdp_md, data_meta)),
8961 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8962 offsetof(struct xdp_md, data)),
8963 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8964 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8965 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8966 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8967 BPF_MOV64_IMM(BPF_REG_0, 0),
8968 BPF_EXIT_INSN(),
8969 },
8970 .errstr = "R1 offset is outside of the packet",
8971 .result = REJECT,
8972 .prog_type = BPF_PROG_TYPE_XDP,
8973 },
8974 {
8975 "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
8976 .insns = {
8977 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8978 offsetof(struct xdp_md, data_meta)),
8979 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8980 offsetof(struct xdp_md, data)),
8981 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8982 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8983 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
8984 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8985 BPF_MOV64_IMM(BPF_REG_0, 0),
8986 BPF_EXIT_INSN(),
8987 },
8988 .errstr = "R1 offset is outside of the packet",
8989 .result = REJECT,
8990 .prog_type = BPF_PROG_TYPE_XDP,
8991 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8992 },
8993 {
Daniel Borkmann6f161012018-01-18 01:15:21 +01008994 "check deducing bounds from const, 1",
8995 .insns = {
8996 BPF_MOV64_IMM(BPF_REG_0, 1),
8997 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
8998 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8999 BPF_EXIT_INSN(),
9000 },
9001 .result = REJECT,
9002 .errstr = "R0 tried to subtract pointer from scalar",
9003 },
9004 {
9005 "check deducing bounds from const, 2",
9006 .insns = {
9007 BPF_MOV64_IMM(BPF_REG_0, 1),
9008 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
9009 BPF_EXIT_INSN(),
9010 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
9011 BPF_EXIT_INSN(),
9012 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9013 BPF_EXIT_INSN(),
9014 },
9015 .result = ACCEPT,
Yonghong Song35136922018-01-22 22:10:59 -08009016 .retval = 1,
Daniel Borkmann6f161012018-01-18 01:15:21 +01009017 },
9018 {
9019 "check deducing bounds from const, 3",
9020 .insns = {
9021 BPF_MOV64_IMM(BPF_REG_0, 0),
9022 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
9023 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9024 BPF_EXIT_INSN(),
9025 },
9026 .result = REJECT,
9027 .errstr = "R0 tried to subtract pointer from scalar",
9028 },
9029 {
9030 "check deducing bounds from const, 4",
9031 .insns = {
9032 BPF_MOV64_IMM(BPF_REG_0, 0),
9033 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
9034 BPF_EXIT_INSN(),
9035 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9036 BPF_EXIT_INSN(),
9037 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9038 BPF_EXIT_INSN(),
9039 },
9040 .result = ACCEPT,
9041 },
9042 {
9043 "check deducing bounds from const, 5",
9044 .insns = {
9045 BPF_MOV64_IMM(BPF_REG_0, 0),
9046 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9047 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9048 BPF_EXIT_INSN(),
9049 },
9050 .result = REJECT,
9051 .errstr = "R0 tried to subtract pointer from scalar",
9052 },
9053 {
9054 "check deducing bounds from const, 6",
9055 .insns = {
9056 BPF_MOV64_IMM(BPF_REG_0, 0),
9057 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9058 BPF_EXIT_INSN(),
9059 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9060 BPF_EXIT_INSN(),
9061 },
9062 .result = REJECT,
9063 .errstr = "R0 tried to subtract pointer from scalar",
9064 },
9065 {
9066 "check deducing bounds from const, 7",
9067 .insns = {
9068 BPF_MOV64_IMM(BPF_REG_0, ~0),
9069 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
9070 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9071 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9072 offsetof(struct __sk_buff, mark)),
9073 BPF_EXIT_INSN(),
9074 },
9075 .result = REJECT,
9076 .errstr = "dereference of modified ctx ptr",
9077 },
9078 {
9079 "check deducing bounds from const, 8",
9080 .insns = {
9081 BPF_MOV64_IMM(BPF_REG_0, ~0),
9082 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9083 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
9084 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9085 offsetof(struct __sk_buff, mark)),
9086 BPF_EXIT_INSN(),
9087 },
9088 .result = REJECT,
9089 .errstr = "dereference of modified ctx ptr",
9090 },
9091 {
9092 "check deducing bounds from const, 9",
9093 .insns = {
9094 BPF_MOV64_IMM(BPF_REG_0, 0),
9095 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
9096 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9097 BPF_EXIT_INSN(),
9098 },
9099 .result = REJECT,
9100 .errstr = "R0 tried to subtract pointer from scalar",
9101 },
9102 {
9103 "check deducing bounds from const, 10",
9104 .insns = {
9105 BPF_MOV64_IMM(BPF_REG_0, 0),
9106 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
9107 /* Marks reg as unknown. */
9108 BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
9109 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9110 BPF_EXIT_INSN(),
9111 },
9112 .result = REJECT,
9113 .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
9114 },
9115 {
Daniel Borkmannb06723d2017-11-01 23:58:09 +01009116 "bpf_exit with invalid return code. test1",
9117 .insns = {
9118 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9119 BPF_EXIT_INSN(),
9120 },
9121 .errstr = "R0 has value (0x0; 0xffffffff)",
9122 .result = REJECT,
9123 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9124 },
9125 {
9126 "bpf_exit with invalid return code. test2",
9127 .insns = {
9128 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9129 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
9130 BPF_EXIT_INSN(),
9131 },
9132 .result = ACCEPT,
9133 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9134 },
9135 {
9136 "bpf_exit with invalid return code. test3",
9137 .insns = {
9138 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9139 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
9140 BPF_EXIT_INSN(),
9141 },
9142 .errstr = "R0 has value (0x0; 0x3)",
9143 .result = REJECT,
9144 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9145 },
9146 {
9147 "bpf_exit with invalid return code. test4",
9148 .insns = {
9149 BPF_MOV64_IMM(BPF_REG_0, 1),
9150 BPF_EXIT_INSN(),
9151 },
9152 .result = ACCEPT,
9153 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9154 },
9155 {
9156 "bpf_exit with invalid return code. test5",
9157 .insns = {
9158 BPF_MOV64_IMM(BPF_REG_0, 2),
9159 BPF_EXIT_INSN(),
9160 },
9161 .errstr = "R0 has value (0x2; 0x0)",
9162 .result = REJECT,
9163 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9164 },
9165 {
9166 "bpf_exit with invalid return code. test6",
9167 .insns = {
9168 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9169 BPF_EXIT_INSN(),
9170 },
9171 .errstr = "R0 is not a known value (ctx)",
9172 .result = REJECT,
9173 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9174 },
9175 {
9176 "bpf_exit with invalid return code. test7",
9177 .insns = {
9178 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9179 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
9180 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
9181 BPF_EXIT_INSN(),
9182 },
9183 .errstr = "R0 has unknown scalar value",
9184 .result = REJECT,
9185 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9186 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009187 {
9188 "calls: basic sanity",
9189 .insns = {
9190 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9191 BPF_MOV64_IMM(BPF_REG_0, 1),
9192 BPF_EXIT_INSN(),
9193 BPF_MOV64_IMM(BPF_REG_0, 2),
9194 BPF_EXIT_INSN(),
9195 },
9196 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9197 .result = ACCEPT,
9198 },
9199 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009200 "calls: not on unpriviledged",
9201 .insns = {
9202 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9203 BPF_MOV64_IMM(BPF_REG_0, 1),
9204 BPF_EXIT_INSN(),
9205 BPF_MOV64_IMM(BPF_REG_0, 2),
9206 BPF_EXIT_INSN(),
9207 },
9208 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
9209 .result_unpriv = REJECT,
9210 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009211 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009212 },
9213 {
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01009214 "calls: div by 0 in subprog",
9215 .insns = {
9216 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9217 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9218 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9219 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
9220 offsetof(struct __sk_buff, data_end)),
9221 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
9222 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
9223 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
9224 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9225 BPF_MOV64_IMM(BPF_REG_0, 1),
9226 BPF_EXIT_INSN(),
9227 BPF_MOV32_IMM(BPF_REG_2, 0),
9228 BPF_MOV32_IMM(BPF_REG_3, 1),
9229 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
9230 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9231 offsetof(struct __sk_buff, data)),
9232 BPF_EXIT_INSN(),
9233 },
9234 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9235 .result = ACCEPT,
9236 .retval = 1,
9237 },
9238 {
9239 "calls: multiple ret types in subprog 1",
9240 .insns = {
9241 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9242 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9243 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9244 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
9245 offsetof(struct __sk_buff, data_end)),
9246 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
9247 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
9248 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
9249 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9250 BPF_MOV64_IMM(BPF_REG_0, 1),
9251 BPF_EXIT_INSN(),
9252 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9253 offsetof(struct __sk_buff, data)),
9254 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9255 BPF_MOV32_IMM(BPF_REG_0, 42),
9256 BPF_EXIT_INSN(),
9257 },
9258 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9259 .result = REJECT,
9260 .errstr = "R0 invalid mem access 'inv'",
9261 },
9262 {
9263 "calls: multiple ret types in subprog 2",
9264 .insns = {
9265 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9266 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9267 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9268 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
9269 offsetof(struct __sk_buff, data_end)),
9270 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
9271 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
9272 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
9273 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9274 BPF_MOV64_IMM(BPF_REG_0, 1),
9275 BPF_EXIT_INSN(),
9276 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9277 offsetof(struct __sk_buff, data)),
9278 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9279 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
9280 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9281 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9283 BPF_LD_MAP_FD(BPF_REG_1, 0),
9284 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9285 BPF_FUNC_map_lookup_elem),
9286 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9287 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
9288 offsetof(struct __sk_buff, data)),
9289 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
9290 BPF_EXIT_INSN(),
9291 },
9292 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9293 .fixup_map1 = { 16 },
9294 .result = REJECT,
9295 .errstr = "R0 min value is outside of the array range",
9296 },
9297 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009298 "calls: overlapping caller/callee",
9299 .insns = {
9300 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
9301 BPF_MOV64_IMM(BPF_REG_0, 1),
9302 BPF_EXIT_INSN(),
9303 },
9304 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9305 .errstr = "last insn is not an exit or jmp",
9306 .result = REJECT,
9307 },
9308 {
9309 "calls: wrong recursive calls",
9310 .insns = {
9311 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
9312 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
9313 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
9314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
9315 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
9316 BPF_MOV64_IMM(BPF_REG_0, 1),
9317 BPF_EXIT_INSN(),
9318 },
9319 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9320 .errstr = "jump out of range",
9321 .result = REJECT,
9322 },
9323 {
9324 "calls: wrong src reg",
9325 .insns = {
9326 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
9327 BPF_MOV64_IMM(BPF_REG_0, 1),
9328 BPF_EXIT_INSN(),
9329 },
9330 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9331 .errstr = "BPF_CALL uses reserved fields",
9332 .result = REJECT,
9333 },
9334 {
9335 "calls: wrong off value",
9336 .insns = {
9337 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
9338 BPF_MOV64_IMM(BPF_REG_0, 1),
9339 BPF_EXIT_INSN(),
9340 BPF_MOV64_IMM(BPF_REG_0, 2),
9341 BPF_EXIT_INSN(),
9342 },
9343 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9344 .errstr = "BPF_CALL uses reserved fields",
9345 .result = REJECT,
9346 },
9347 {
9348 "calls: jump back loop",
9349 .insns = {
9350 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
9351 BPF_MOV64_IMM(BPF_REG_0, 1),
9352 BPF_EXIT_INSN(),
9353 },
9354 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9355 .errstr = "back-edge from insn 0 to 0",
9356 .result = REJECT,
9357 },
9358 {
9359 "calls: conditional call",
9360 .insns = {
9361 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9362 offsetof(struct __sk_buff, mark)),
9363 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9364 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9365 BPF_MOV64_IMM(BPF_REG_0, 1),
9366 BPF_EXIT_INSN(),
9367 BPF_MOV64_IMM(BPF_REG_0, 2),
9368 BPF_EXIT_INSN(),
9369 },
9370 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9371 .errstr = "jump out of range",
9372 .result = REJECT,
9373 },
9374 {
9375 "calls: conditional call 2",
9376 .insns = {
9377 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9378 offsetof(struct __sk_buff, mark)),
9379 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9380 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9381 BPF_MOV64_IMM(BPF_REG_0, 1),
9382 BPF_EXIT_INSN(),
9383 BPF_MOV64_IMM(BPF_REG_0, 2),
9384 BPF_EXIT_INSN(),
9385 BPF_MOV64_IMM(BPF_REG_0, 3),
9386 BPF_EXIT_INSN(),
9387 },
9388 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9389 .result = ACCEPT,
9390 },
9391 {
9392 "calls: conditional call 3",
9393 .insns = {
9394 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9395 offsetof(struct __sk_buff, mark)),
9396 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9397 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
9398 BPF_MOV64_IMM(BPF_REG_0, 1),
9399 BPF_EXIT_INSN(),
9400 BPF_MOV64_IMM(BPF_REG_0, 1),
9401 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
9402 BPF_MOV64_IMM(BPF_REG_0, 3),
9403 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
9404 },
9405 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9406 .errstr = "back-edge from insn",
9407 .result = REJECT,
9408 },
9409 {
9410 "calls: conditional call 4",
9411 .insns = {
9412 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9413 offsetof(struct __sk_buff, mark)),
9414 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9415 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9416 BPF_MOV64_IMM(BPF_REG_0, 1),
9417 BPF_EXIT_INSN(),
9418 BPF_MOV64_IMM(BPF_REG_0, 1),
9419 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
9420 BPF_MOV64_IMM(BPF_REG_0, 3),
9421 BPF_EXIT_INSN(),
9422 },
9423 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9424 .result = ACCEPT,
9425 },
9426 {
9427 "calls: conditional call 5",
9428 .insns = {
9429 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9430 offsetof(struct __sk_buff, mark)),
9431 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9432 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9433 BPF_MOV64_IMM(BPF_REG_0, 1),
9434 BPF_EXIT_INSN(),
9435 BPF_MOV64_IMM(BPF_REG_0, 1),
9436 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
9437 BPF_MOV64_IMM(BPF_REG_0, 3),
9438 BPF_EXIT_INSN(),
9439 },
9440 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9441 .errstr = "back-edge from insn",
9442 .result = REJECT,
9443 },
9444 {
9445 "calls: conditional call 6",
9446 .insns = {
9447 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9448 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
9449 BPF_EXIT_INSN(),
9450 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9451 offsetof(struct __sk_buff, mark)),
9452 BPF_EXIT_INSN(),
9453 },
9454 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9455 .errstr = "back-edge from insn",
9456 .result = REJECT,
9457 },
9458 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009459 "calls: using r0 returned by callee",
9460 .insns = {
9461 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9462 BPF_EXIT_INSN(),
9463 BPF_MOV64_IMM(BPF_REG_0, 2),
9464 BPF_EXIT_INSN(),
9465 },
9466 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9467 .result = ACCEPT,
9468 },
9469 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009470 "calls: using uninit r0 from callee",
9471 .insns = {
9472 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9473 BPF_EXIT_INSN(),
9474 BPF_EXIT_INSN(),
9475 },
9476 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9477 .errstr = "!read_ok",
9478 .result = REJECT,
9479 },
9480 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009481 "calls: callee is using r1",
9482 .insns = {
9483 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9484 BPF_EXIT_INSN(),
9485 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9486 offsetof(struct __sk_buff, len)),
9487 BPF_EXIT_INSN(),
9488 },
9489 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
9490 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009491 .retval = TEST_DATA_LEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009492 },
9493 {
9494 "calls: callee using args1",
9495 .insns = {
9496 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9497 BPF_EXIT_INSN(),
9498 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9499 BPF_EXIT_INSN(),
9500 },
9501 .errstr_unpriv = "allowed for root only",
9502 .result_unpriv = REJECT,
9503 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009504 .retval = POINTER_VALUE,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009505 },
9506 {
9507 "calls: callee using wrong args2",
9508 .insns = {
9509 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9510 BPF_EXIT_INSN(),
9511 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9512 BPF_EXIT_INSN(),
9513 },
9514 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9515 .errstr = "R2 !read_ok",
9516 .result = REJECT,
9517 },
9518 {
9519 "calls: callee using two args",
9520 .insns = {
9521 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9522 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
9523 offsetof(struct __sk_buff, len)),
9524 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
9525 offsetof(struct __sk_buff, len)),
9526 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9527 BPF_EXIT_INSN(),
9528 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9529 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
9530 BPF_EXIT_INSN(),
9531 },
9532 .errstr_unpriv = "allowed for root only",
9533 .result_unpriv = REJECT,
9534 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009535 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009536 },
9537 {
9538 "calls: callee changing pkt pointers",
9539 .insns = {
9540 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
9541 offsetof(struct xdp_md, data)),
9542 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
9543 offsetof(struct xdp_md, data_end)),
9544 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
9545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
9546 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
9547 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9548 /* clear_all_pkt_pointers() has to walk all frames
9549 * to make sure that pkt pointers in the caller
9550 * are cleared when callee is calling a helper that
9551 * adjusts packet size
9552 */
9553 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
9554 BPF_MOV32_IMM(BPF_REG_0, 0),
9555 BPF_EXIT_INSN(),
9556 BPF_MOV64_IMM(BPF_REG_2, 0),
9557 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9558 BPF_FUNC_xdp_adjust_head),
9559 BPF_EXIT_INSN(),
9560 },
9561 .result = REJECT,
9562 .errstr = "R6 invalid mem access 'inv'",
9563 .prog_type = BPF_PROG_TYPE_XDP,
9564 },
9565 {
9566 "calls: two calls with args",
9567 .insns = {
9568 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9569 BPF_EXIT_INSN(),
9570 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9571 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9572 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9573 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9574 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9575 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9576 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9577 BPF_EXIT_INSN(),
9578 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9579 offsetof(struct __sk_buff, len)),
9580 BPF_EXIT_INSN(),
9581 },
9582 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9583 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009584 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009585 },
9586 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009587 "calls: calls with stack arith",
9588 .insns = {
9589 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9591 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9592 BPF_EXIT_INSN(),
9593 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9594 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9595 BPF_EXIT_INSN(),
9596 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9597 BPF_MOV64_IMM(BPF_REG_0, 42),
9598 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
9599 BPF_EXIT_INSN(),
9600 },
9601 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9602 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009603 .retval = 42,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009604 },
9605 {
9606 "calls: calls with misaligned stack access",
9607 .insns = {
9608 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9609 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
9610 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9611 BPF_EXIT_INSN(),
9612 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
9613 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9614 BPF_EXIT_INSN(),
9615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
9616 BPF_MOV64_IMM(BPF_REG_0, 42),
9617 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
9618 BPF_EXIT_INSN(),
9619 },
9620 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9621 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
9622 .errstr = "misaligned stack access",
9623 .result = REJECT,
9624 },
9625 {
9626 "calls: calls control flow, jump test",
9627 .insns = {
9628 BPF_MOV64_IMM(BPF_REG_0, 42),
9629 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9630 BPF_MOV64_IMM(BPF_REG_0, 43),
9631 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9632 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
9633 BPF_EXIT_INSN(),
9634 },
9635 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9636 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009637 .retval = 43,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009638 },
9639 {
9640 "calls: calls control flow, jump test 2",
9641 .insns = {
9642 BPF_MOV64_IMM(BPF_REG_0, 42),
9643 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9644 BPF_MOV64_IMM(BPF_REG_0, 43),
9645 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9646 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
9647 BPF_EXIT_INSN(),
9648 },
9649 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9650 .errstr = "jump out of range from insn 1 to 4",
9651 .result = REJECT,
9652 },
9653 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009654 "calls: two calls with bad jump",
9655 .insns = {
9656 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9657 BPF_EXIT_INSN(),
9658 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9659 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9660 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9661 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9662 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9663 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9664 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9665 BPF_EXIT_INSN(),
9666 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9667 offsetof(struct __sk_buff, len)),
9668 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
9669 BPF_EXIT_INSN(),
9670 },
9671 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9672 .errstr = "jump out of range from insn 11 to 9",
9673 .result = REJECT,
9674 },
9675 {
9676 "calls: recursive call. test1",
9677 .insns = {
9678 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9679 BPF_EXIT_INSN(),
9680 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
9681 BPF_EXIT_INSN(),
9682 },
9683 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9684 .errstr = "back-edge",
9685 .result = REJECT,
9686 },
9687 {
9688 "calls: recursive call. test2",
9689 .insns = {
9690 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9691 BPF_EXIT_INSN(),
9692 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
9693 BPF_EXIT_INSN(),
9694 },
9695 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9696 .errstr = "back-edge",
9697 .result = REJECT,
9698 },
9699 {
9700 "calls: unreachable code",
9701 .insns = {
9702 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9703 BPF_EXIT_INSN(),
9704 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9705 BPF_EXIT_INSN(),
9706 BPF_MOV64_IMM(BPF_REG_0, 0),
9707 BPF_EXIT_INSN(),
9708 BPF_MOV64_IMM(BPF_REG_0, 0),
9709 BPF_EXIT_INSN(),
9710 },
9711 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9712 .errstr = "unreachable insn 6",
9713 .result = REJECT,
9714 },
9715 {
9716 "calls: invalid call",
9717 .insns = {
9718 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9719 BPF_EXIT_INSN(),
9720 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
9721 BPF_EXIT_INSN(),
9722 },
9723 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9724 .errstr = "invalid destination",
9725 .result = REJECT,
9726 },
9727 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009728 "calls: invalid call 2",
9729 .insns = {
9730 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9731 BPF_EXIT_INSN(),
9732 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
9733 BPF_EXIT_INSN(),
9734 },
9735 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9736 .errstr = "invalid destination",
9737 .result = REJECT,
9738 },
9739 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009740 "calls: jumping across function bodies. test1",
9741 .insns = {
9742 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9743 BPF_MOV64_IMM(BPF_REG_0, 0),
9744 BPF_EXIT_INSN(),
9745 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
9746 BPF_EXIT_INSN(),
9747 },
9748 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9749 .errstr = "jump out of range",
9750 .result = REJECT,
9751 },
9752 {
9753 "calls: jumping across function bodies. test2",
9754 .insns = {
9755 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
9756 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9757 BPF_MOV64_IMM(BPF_REG_0, 0),
9758 BPF_EXIT_INSN(),
9759 BPF_EXIT_INSN(),
9760 },
9761 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9762 .errstr = "jump out of range",
9763 .result = REJECT,
9764 },
9765 {
9766 "calls: call without exit",
9767 .insns = {
9768 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9769 BPF_EXIT_INSN(),
9770 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9771 BPF_EXIT_INSN(),
9772 BPF_MOV64_IMM(BPF_REG_0, 0),
9773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
9774 },
9775 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9776 .errstr = "not an exit",
9777 .result = REJECT,
9778 },
9779 {
9780 "calls: call into middle of ld_imm64",
9781 .insns = {
9782 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9783 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9784 BPF_MOV64_IMM(BPF_REG_0, 0),
9785 BPF_EXIT_INSN(),
9786 BPF_LD_IMM64(BPF_REG_0, 0),
9787 BPF_EXIT_INSN(),
9788 },
9789 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9790 .errstr = "last insn",
9791 .result = REJECT,
9792 },
9793 {
9794 "calls: call into middle of other call",
9795 .insns = {
9796 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9797 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9798 BPF_MOV64_IMM(BPF_REG_0, 0),
9799 BPF_EXIT_INSN(),
9800 BPF_MOV64_IMM(BPF_REG_0, 0),
9801 BPF_MOV64_IMM(BPF_REG_0, 0),
9802 BPF_EXIT_INSN(),
9803 },
9804 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9805 .errstr = "last insn",
9806 .result = REJECT,
9807 },
9808 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009809 "calls: ld_abs with changing ctx data in callee",
9810 .insns = {
9811 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9812 BPF_LD_ABS(BPF_B, 0),
9813 BPF_LD_ABS(BPF_H, 0),
9814 BPF_LD_ABS(BPF_W, 0),
9815 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
9816 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
9817 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
9818 BPF_LD_ABS(BPF_B, 0),
9819 BPF_LD_ABS(BPF_H, 0),
9820 BPF_LD_ABS(BPF_W, 0),
9821 BPF_EXIT_INSN(),
9822 BPF_MOV64_IMM(BPF_REG_2, 1),
9823 BPF_MOV64_IMM(BPF_REG_3, 2),
9824 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9825 BPF_FUNC_skb_vlan_push),
9826 BPF_EXIT_INSN(),
9827 },
9828 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9829 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
9830 .result = REJECT,
9831 },
9832 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009833 "calls: two calls with bad fallthrough",
9834 .insns = {
9835 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9836 BPF_EXIT_INSN(),
9837 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9838 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9839 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9840 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9841 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9842 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9843 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9844 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
9845 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9846 offsetof(struct __sk_buff, len)),
9847 BPF_EXIT_INSN(),
9848 },
9849 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9850 .errstr = "not an exit",
9851 .result = REJECT,
9852 },
9853 {
9854 "calls: two calls with stack read",
9855 .insns = {
9856 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9857 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9858 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9859 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9860 BPF_EXIT_INSN(),
9861 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9862 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9863 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9864 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9865 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9866 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9867 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9868 BPF_EXIT_INSN(),
9869 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9870 BPF_EXIT_INSN(),
9871 },
9872 .prog_type = BPF_PROG_TYPE_XDP,
9873 .result = ACCEPT,
9874 },
9875 {
9876 "calls: two calls with stack write",
9877 .insns = {
9878 /* main prog */
9879 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9880 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9881 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9882 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9883 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9884 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9885 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
9886 BPF_EXIT_INSN(),
9887
9888 /* subprog 1 */
9889 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9890 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9891 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
9892 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
9893 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9894 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9895 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
9896 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
9897 /* write into stack frame of main prog */
9898 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
9899 BPF_EXIT_INSN(),
9900
9901 /* subprog 2 */
9902 /* read from stack frame of main prog */
9903 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9904 BPF_EXIT_INSN(),
9905 },
9906 .prog_type = BPF_PROG_TYPE_XDP,
9907 .result = ACCEPT,
9908 },
9909 {
Jann Horn6b80ad22017-12-22 19:12:35 +01009910 "calls: stack overflow using two frames (pre-call access)",
9911 .insns = {
9912 /* prog 1 */
9913 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9914 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
9915 BPF_EXIT_INSN(),
9916
9917 /* prog 2 */
9918 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9919 BPF_MOV64_IMM(BPF_REG_0, 0),
9920 BPF_EXIT_INSN(),
9921 },
9922 .prog_type = BPF_PROG_TYPE_XDP,
9923 .errstr = "combined stack size",
9924 .result = REJECT,
9925 },
9926 {
9927 "calls: stack overflow using two frames (post-call access)",
9928 .insns = {
9929 /* prog 1 */
9930 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
9931 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9932 BPF_EXIT_INSN(),
9933
9934 /* prog 2 */
9935 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9936 BPF_MOV64_IMM(BPF_REG_0, 0),
9937 BPF_EXIT_INSN(),
9938 },
9939 .prog_type = BPF_PROG_TYPE_XDP,
9940 .errstr = "combined stack size",
9941 .result = REJECT,
9942 },
9943 {
Alexei Starovoitov6b86c422017-12-25 13:15:41 -08009944 "calls: stack depth check using three frames. test1",
9945 .insns = {
9946 /* main */
9947 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
9948 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
9949 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
9950 BPF_MOV64_IMM(BPF_REG_0, 0),
9951 BPF_EXIT_INSN(),
9952 /* A */
9953 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
9954 BPF_EXIT_INSN(),
9955 /* B */
9956 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
9957 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
9958 BPF_EXIT_INSN(),
9959 },
9960 .prog_type = BPF_PROG_TYPE_XDP,
9961 /* stack_main=32, stack_A=256, stack_B=64
9962 * and max(main+A, main+A+B) < 512
9963 */
9964 .result = ACCEPT,
9965 },
9966 {
9967 "calls: stack depth check using three frames. test2",
9968 .insns = {
9969 /* main */
9970 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
9971 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
9972 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
9973 BPF_MOV64_IMM(BPF_REG_0, 0),
9974 BPF_EXIT_INSN(),
9975 /* A */
9976 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
9977 BPF_EXIT_INSN(),
9978 /* B */
9979 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
9980 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
9981 BPF_EXIT_INSN(),
9982 },
9983 .prog_type = BPF_PROG_TYPE_XDP,
9984 /* stack_main=32, stack_A=64, stack_B=256
9985 * and max(main+A, main+A+B) < 512
9986 */
9987 .result = ACCEPT,
9988 },
9989 {
9990 "calls: stack depth check using three frames. test3",
9991 .insns = {
9992 /* main */
9993 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9994 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
9995 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9996 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
9997 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
9998 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
9999 BPF_MOV64_IMM(BPF_REG_0, 0),
10000 BPF_EXIT_INSN(),
10001 /* A */
10002 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
10003 BPF_EXIT_INSN(),
10004 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
10005 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
10006 /* B */
10007 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
10008 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
10009 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10010 BPF_EXIT_INSN(),
10011 },
10012 .prog_type = BPF_PROG_TYPE_XDP,
10013 /* stack_main=64, stack_A=224, stack_B=256
10014 * and max(main+A, main+A+B) > 512
10015 */
10016 .errstr = "combined stack",
10017 .result = REJECT,
10018 },
10019 {
10020 "calls: stack depth check using three frames. test4",
10021 /* void main(void) {
10022 * func1(0);
10023 * func1(1);
10024 * func2(1);
10025 * }
10026 * void func1(int alloc_or_recurse) {
10027 * if (alloc_or_recurse) {
10028 * frame_pointer[-300] = 1;
10029 * } else {
10030 * func2(alloc_or_recurse);
10031 * }
10032 * }
10033 * void func2(int alloc_or_recurse) {
10034 * if (alloc_or_recurse) {
10035 * frame_pointer[-300] = 1;
10036 * }
10037 * }
10038 */
10039 .insns = {
10040 /* main */
10041 BPF_MOV64_IMM(BPF_REG_1, 0),
10042 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
10043 BPF_MOV64_IMM(BPF_REG_1, 1),
10044 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10045 BPF_MOV64_IMM(BPF_REG_1, 1),
10046 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
10047 BPF_MOV64_IMM(BPF_REG_0, 0),
10048 BPF_EXIT_INSN(),
10049 /* A */
10050 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
10051 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10052 BPF_EXIT_INSN(),
10053 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
10054 BPF_EXIT_INSN(),
10055 /* B */
10056 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
10057 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10058 BPF_EXIT_INSN(),
10059 },
10060 .prog_type = BPF_PROG_TYPE_XDP,
10061 .result = REJECT,
10062 .errstr = "combined stack",
10063 },
10064 {
Alexei Starovoitovaada9ce2017-12-25 13:15:42 -080010065 "calls: stack depth check using three frames. test5",
10066 .insns = {
10067 /* main */
10068 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
10069 BPF_EXIT_INSN(),
10070 /* A */
10071 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
10072 BPF_EXIT_INSN(),
10073 /* B */
10074 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
10075 BPF_EXIT_INSN(),
10076 /* C */
10077 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
10078 BPF_EXIT_INSN(),
10079 /* D */
10080 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
10081 BPF_EXIT_INSN(),
10082 /* E */
10083 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
10084 BPF_EXIT_INSN(),
10085 /* F */
10086 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
10087 BPF_EXIT_INSN(),
10088 /* G */
10089 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
10090 BPF_EXIT_INSN(),
10091 /* H */
10092 BPF_MOV64_IMM(BPF_REG_0, 0),
10093 BPF_EXIT_INSN(),
10094 },
10095 .prog_type = BPF_PROG_TYPE_XDP,
10096 .errstr = "call stack",
10097 .result = REJECT,
10098 },
10099 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010100 "calls: spill into caller stack frame",
10101 .insns = {
10102 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10103 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10105 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10106 BPF_EXIT_INSN(),
10107 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
10108 BPF_MOV64_IMM(BPF_REG_0, 0),
10109 BPF_EXIT_INSN(),
10110 },
10111 .prog_type = BPF_PROG_TYPE_XDP,
10112 .errstr = "cannot spill",
10113 .result = REJECT,
10114 },
10115 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010116 "calls: write into caller stack frame",
10117 .insns = {
10118 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10120 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10121 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10122 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10123 BPF_EXIT_INSN(),
10124 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
10125 BPF_MOV64_IMM(BPF_REG_0, 0),
10126 BPF_EXIT_INSN(),
10127 },
10128 .prog_type = BPF_PROG_TYPE_XDP,
10129 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010130 .retval = 42,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010131 },
10132 {
10133 "calls: write into callee stack frame",
10134 .insns = {
10135 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10136 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
10137 BPF_EXIT_INSN(),
10138 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
10139 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
10140 BPF_EXIT_INSN(),
10141 },
10142 .prog_type = BPF_PROG_TYPE_XDP,
10143 .errstr = "cannot return stack pointer",
10144 .result = REJECT,
10145 },
10146 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010147 "calls: two calls with stack write and void return",
10148 .insns = {
10149 /* main prog */
10150 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10151 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10152 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10153 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10154 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10155 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10156 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10157 BPF_EXIT_INSN(),
10158
10159 /* subprog 1 */
10160 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10161 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10162 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10163 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10164 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10165 BPF_EXIT_INSN(),
10166
10167 /* subprog 2 */
10168 /* write into stack frame of main prog */
10169 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
10170 BPF_EXIT_INSN(), /* void return */
10171 },
10172 .prog_type = BPF_PROG_TYPE_XDP,
10173 .result = ACCEPT,
10174 },
10175 {
10176 "calls: ambiguous return value",
10177 .insns = {
10178 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10179 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
10180 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
10181 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10182 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10183 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
10184 BPF_EXIT_INSN(),
10185 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
10186 BPF_MOV64_IMM(BPF_REG_0, 0),
10187 BPF_EXIT_INSN(),
10188 },
10189 .errstr_unpriv = "allowed for root only",
10190 .result_unpriv = REJECT,
10191 .errstr = "R0 !read_ok",
10192 .result = REJECT,
10193 },
10194 {
10195 "calls: two calls that return map_value",
10196 .insns = {
10197 /* main prog */
10198 /* pass fp-16, fp-8 into a function */
10199 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10200 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10201 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10202 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10203 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10204
10205 /* fetch map_value_ptr from the stack of this function */
10206 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
10207 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
10208 /* write into map value */
10209 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10210 /* fetch secound map_value_ptr from the stack */
10211 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10212 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
10213 /* write into map value */
10214 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10215 BPF_MOV64_IMM(BPF_REG_0, 0),
10216 BPF_EXIT_INSN(),
10217
10218 /* subprog 1 */
10219 /* call 3rd function twice */
10220 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10221 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10222 /* first time with fp-8 */
10223 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10224 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10225 /* second time with fp-16 */
10226 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10227 BPF_EXIT_INSN(),
10228
10229 /* subprog 2 */
10230 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10231 /* lookup from map */
10232 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10233 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10234 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10235 BPF_LD_MAP_FD(BPF_REG_1, 0),
10236 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10237 BPF_FUNC_map_lookup_elem),
10238 /* write map_value_ptr into stack frame of main prog */
10239 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10240 BPF_MOV64_IMM(BPF_REG_0, 0),
10241 BPF_EXIT_INSN(), /* return 0 */
10242 },
10243 .prog_type = BPF_PROG_TYPE_XDP,
10244 .fixup_map1 = { 23 },
10245 .result = ACCEPT,
10246 },
10247 {
10248 "calls: two calls that return map_value with bool condition",
10249 .insns = {
10250 /* main prog */
10251 /* pass fp-16, fp-8 into a function */
10252 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10253 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10254 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10255 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10256 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10257 BPF_MOV64_IMM(BPF_REG_0, 0),
10258 BPF_EXIT_INSN(),
10259
10260 /* subprog 1 */
10261 /* call 3rd function twice */
10262 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10263 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10264 /* first time with fp-8 */
10265 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
10266 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
10267 /* fetch map_value_ptr from the stack of this function */
10268 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10269 /* write into map value */
10270 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10271 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10272 /* second time with fp-16 */
10273 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10274 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
10275 /* fetch secound map_value_ptr from the stack */
10276 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
10277 /* write into map value */
10278 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10279 BPF_EXIT_INSN(),
10280
10281 /* subprog 2 */
10282 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10283 /* lookup from map */
10284 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10285 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10286 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10287 BPF_LD_MAP_FD(BPF_REG_1, 0),
10288 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10289 BPF_FUNC_map_lookup_elem),
10290 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10291 BPF_MOV64_IMM(BPF_REG_0, 0),
10292 BPF_EXIT_INSN(), /* return 0 */
10293 /* write map_value_ptr into stack frame of main prog */
10294 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10295 BPF_MOV64_IMM(BPF_REG_0, 1),
10296 BPF_EXIT_INSN(), /* return 1 */
10297 },
10298 .prog_type = BPF_PROG_TYPE_XDP,
10299 .fixup_map1 = { 23 },
10300 .result = ACCEPT,
10301 },
10302 {
10303 "calls: two calls that return map_value with incorrect bool check",
10304 .insns = {
10305 /* main prog */
10306 /* pass fp-16, fp-8 into a function */
10307 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10308 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10309 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10310 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10311 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10312 BPF_MOV64_IMM(BPF_REG_0, 0),
10313 BPF_EXIT_INSN(),
10314
10315 /* subprog 1 */
10316 /* call 3rd function twice */
10317 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10318 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10319 /* first time with fp-8 */
10320 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
10321 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
10322 /* fetch map_value_ptr from the stack of this function */
10323 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10324 /* write into map value */
10325 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10326 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10327 /* second time with fp-16 */
10328 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10329 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10330 /* fetch secound map_value_ptr from the stack */
10331 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
10332 /* write into map value */
10333 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10334 BPF_EXIT_INSN(),
10335
10336 /* subprog 2 */
10337 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10338 /* lookup from map */
10339 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10340 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10341 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10342 BPF_LD_MAP_FD(BPF_REG_1, 0),
10343 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10344 BPF_FUNC_map_lookup_elem),
10345 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10346 BPF_MOV64_IMM(BPF_REG_0, 0),
10347 BPF_EXIT_INSN(), /* return 0 */
10348 /* write map_value_ptr into stack frame of main prog */
10349 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10350 BPF_MOV64_IMM(BPF_REG_0, 1),
10351 BPF_EXIT_INSN(), /* return 1 */
10352 },
10353 .prog_type = BPF_PROG_TYPE_XDP,
10354 .fixup_map1 = { 23 },
10355 .result = REJECT,
10356 .errstr = "invalid read from stack off -16+0 size 8",
10357 },
10358 {
10359 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
10360 .insns = {
10361 /* main prog */
10362 /* pass fp-16, fp-8 into a function */
10363 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10364 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10365 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10366 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10367 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10368 BPF_MOV64_IMM(BPF_REG_0, 0),
10369 BPF_EXIT_INSN(),
10370
10371 /* subprog 1 */
10372 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10373 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10374 /* 1st lookup from map */
10375 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10376 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10378 BPF_LD_MAP_FD(BPF_REG_1, 0),
10379 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10380 BPF_FUNC_map_lookup_elem),
10381 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10382 BPF_MOV64_IMM(BPF_REG_8, 0),
10383 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10384 /* write map_value_ptr into stack frame of main prog at fp-8 */
10385 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10386 BPF_MOV64_IMM(BPF_REG_8, 1),
10387
10388 /* 2nd lookup from map */
10389 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
10390 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10391 BPF_LD_MAP_FD(BPF_REG_1, 0),
10392 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
10393 BPF_FUNC_map_lookup_elem),
10394 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10395 BPF_MOV64_IMM(BPF_REG_9, 0),
10396 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10397 /* write map_value_ptr into stack frame of main prog at fp-16 */
10398 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10399 BPF_MOV64_IMM(BPF_REG_9, 1),
10400
10401 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10402 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
10403 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10404 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10405 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10406 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
10407 BPF_EXIT_INSN(),
10408
10409 /* subprog 2 */
10410 /* if arg2 == 1 do *arg1 = 0 */
10411 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10412 /* fetch map_value_ptr from the stack of this function */
10413 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10414 /* write into map value */
10415 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10416
10417 /* if arg4 == 1 do *arg3 = 0 */
10418 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10419 /* fetch map_value_ptr from the stack of this function */
10420 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10421 /* write into map value */
10422 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
10423 BPF_EXIT_INSN(),
10424 },
10425 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10426 .fixup_map1 = { 12, 22 },
10427 .result = REJECT,
10428 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
10429 },
10430 {
10431 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
10432 .insns = {
10433 /* main prog */
10434 /* pass fp-16, fp-8 into a function */
10435 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10436 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10437 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10438 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10439 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10440 BPF_MOV64_IMM(BPF_REG_0, 0),
10441 BPF_EXIT_INSN(),
10442
10443 /* subprog 1 */
10444 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10445 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10446 /* 1st lookup from map */
10447 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10448 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10449 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10450 BPF_LD_MAP_FD(BPF_REG_1, 0),
10451 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10452 BPF_FUNC_map_lookup_elem),
10453 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10454 BPF_MOV64_IMM(BPF_REG_8, 0),
10455 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10456 /* write map_value_ptr into stack frame of main prog at fp-8 */
10457 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10458 BPF_MOV64_IMM(BPF_REG_8, 1),
10459
10460 /* 2nd lookup from map */
10461 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
10462 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10463 BPF_LD_MAP_FD(BPF_REG_1, 0),
10464 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
10465 BPF_FUNC_map_lookup_elem),
10466 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10467 BPF_MOV64_IMM(BPF_REG_9, 0),
10468 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10469 /* write map_value_ptr into stack frame of main prog at fp-16 */
10470 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10471 BPF_MOV64_IMM(BPF_REG_9, 1),
10472
10473 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10474 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
10475 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10476 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10477 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10478 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
10479 BPF_EXIT_INSN(),
10480
10481 /* subprog 2 */
10482 /* if arg2 == 1 do *arg1 = 0 */
10483 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10484 /* fetch map_value_ptr from the stack of this function */
10485 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10486 /* write into map value */
10487 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10488
10489 /* if arg4 == 1 do *arg3 = 0 */
10490 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10491 /* fetch map_value_ptr from the stack of this function */
10492 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10493 /* write into map value */
10494 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10495 BPF_EXIT_INSN(),
10496 },
10497 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10498 .fixup_map1 = { 12, 22 },
10499 .result = ACCEPT,
10500 },
10501 {
10502 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
10503 .insns = {
10504 /* main prog */
10505 /* pass fp-16, fp-8 into a function */
10506 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10507 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10508 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10509 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10510 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
10511 BPF_MOV64_IMM(BPF_REG_0, 0),
10512 BPF_EXIT_INSN(),
10513
10514 /* subprog 1 */
10515 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10516 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10517 /* 1st lookup from map */
10518 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
10519 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10520 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
10521 BPF_LD_MAP_FD(BPF_REG_1, 0),
10522 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10523 BPF_FUNC_map_lookup_elem),
10524 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10525 BPF_MOV64_IMM(BPF_REG_8, 0),
10526 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10527 /* write map_value_ptr into stack frame of main prog at fp-8 */
10528 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10529 BPF_MOV64_IMM(BPF_REG_8, 1),
10530
10531 /* 2nd lookup from map */
10532 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10533 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
10534 BPF_LD_MAP_FD(BPF_REG_1, 0),
10535 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10536 BPF_FUNC_map_lookup_elem),
10537 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10538 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
10539 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10540 /* write map_value_ptr into stack frame of main prog at fp-16 */
10541 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10542 BPF_MOV64_IMM(BPF_REG_9, 1),
10543
10544 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10545 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
10546 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10547 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10548 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10549 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
10550 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
10551
10552 /* subprog 2 */
10553 /* if arg2 == 1 do *arg1 = 0 */
10554 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10555 /* fetch map_value_ptr from the stack of this function */
10556 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10557 /* write into map value */
10558 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10559
10560 /* if arg4 == 1 do *arg3 = 0 */
10561 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10562 /* fetch map_value_ptr from the stack of this function */
10563 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10564 /* write into map value */
10565 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
10566 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
10567 },
10568 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10569 .fixup_map1 = { 12, 22 },
10570 .result = REJECT,
10571 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
10572 },
10573 {
10574 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
10575 .insns = {
10576 /* main prog */
10577 /* pass fp-16, fp-8 into a function */
10578 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10579 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10580 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10581 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10582 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10583 BPF_MOV64_IMM(BPF_REG_0, 0),
10584 BPF_EXIT_INSN(),
10585
10586 /* subprog 1 */
10587 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10588 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10589 /* 1st lookup from map */
10590 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10591 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10592 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10593 BPF_LD_MAP_FD(BPF_REG_1, 0),
10594 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10595 BPF_FUNC_map_lookup_elem),
10596 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
10597 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10598 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10599 BPF_MOV64_IMM(BPF_REG_8, 0),
10600 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10601 BPF_MOV64_IMM(BPF_REG_8, 1),
10602
10603 /* 2nd lookup from map */
10604 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10605 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10606 BPF_LD_MAP_FD(BPF_REG_1, 0),
10607 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10608 BPF_FUNC_map_lookup_elem),
10609 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
10610 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10611 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10612 BPF_MOV64_IMM(BPF_REG_9, 0),
10613 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10614 BPF_MOV64_IMM(BPF_REG_9, 1),
10615
10616 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10617 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10618 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10619 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10620 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10621 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10622 BPF_EXIT_INSN(),
10623
10624 /* subprog 2 */
10625 /* if arg2 == 1 do *arg1 = 0 */
10626 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10627 /* fetch map_value_ptr from the stack of this function */
10628 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10629 /* write into map value */
10630 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10631
10632 /* if arg4 == 1 do *arg3 = 0 */
10633 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10634 /* fetch map_value_ptr from the stack of this function */
10635 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10636 /* write into map value */
10637 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10638 BPF_EXIT_INSN(),
10639 },
10640 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10641 .fixup_map1 = { 12, 22 },
10642 .result = ACCEPT,
10643 },
10644 {
10645 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
10646 .insns = {
10647 /* main prog */
10648 /* pass fp-16, fp-8 into a function */
10649 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10650 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10651 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10652 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10653 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10654 BPF_MOV64_IMM(BPF_REG_0, 0),
10655 BPF_EXIT_INSN(),
10656
10657 /* subprog 1 */
10658 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10659 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10660 /* 1st lookup from map */
10661 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10662 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10663 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10664 BPF_LD_MAP_FD(BPF_REG_1, 0),
10665 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10666 BPF_FUNC_map_lookup_elem),
10667 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
10668 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10669 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10670 BPF_MOV64_IMM(BPF_REG_8, 0),
10671 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10672 BPF_MOV64_IMM(BPF_REG_8, 1),
10673
10674 /* 2nd lookup from map */
10675 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10676 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10677 BPF_LD_MAP_FD(BPF_REG_1, 0),
10678 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10679 BPF_FUNC_map_lookup_elem),
10680 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
10681 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10682 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10683 BPF_MOV64_IMM(BPF_REG_9, 0),
10684 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10685 BPF_MOV64_IMM(BPF_REG_9, 1),
10686
10687 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10688 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10689 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10690 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10691 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10692 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10693 BPF_EXIT_INSN(),
10694
10695 /* subprog 2 */
10696 /* if arg2 == 1 do *arg1 = 0 */
10697 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10698 /* fetch map_value_ptr from the stack of this function */
10699 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10700 /* write into map value */
10701 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10702
10703 /* if arg4 == 0 do *arg3 = 0 */
10704 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
10705 /* fetch map_value_ptr from the stack of this function */
10706 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10707 /* write into map value */
10708 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10709 BPF_EXIT_INSN(),
10710 },
10711 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10712 .fixup_map1 = { 12, 22 },
10713 .result = REJECT,
10714 .errstr = "R0 invalid mem access 'inv'",
10715 },
10716 {
10717 "calls: pkt_ptr spill into caller stack",
10718 .insns = {
10719 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10720 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10721 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10722 BPF_EXIT_INSN(),
10723
10724 /* subprog 1 */
10725 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10726 offsetof(struct __sk_buff, data)),
10727 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10728 offsetof(struct __sk_buff, data_end)),
10729 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10731 /* spill unchecked pkt_ptr into stack of caller */
10732 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10733 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10734 /* now the pkt range is verified, read pkt_ptr from stack */
10735 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
10736 /* write 4 bytes into packet */
10737 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10738 BPF_EXIT_INSN(),
10739 },
10740 .result = ACCEPT,
10741 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010742 .retval = POINTER_VALUE,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010743 },
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080010744 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010745 "calls: pkt_ptr spill into caller stack 2",
10746 .insns = {
10747 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10748 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10749 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10750 /* Marking is still kept, but not in all cases safe. */
10751 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10752 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
10753 BPF_EXIT_INSN(),
10754
10755 /* subprog 1 */
10756 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10757 offsetof(struct __sk_buff, data)),
10758 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10759 offsetof(struct __sk_buff, data_end)),
10760 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10762 /* spill unchecked pkt_ptr into stack of caller */
10763 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10764 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10765 /* now the pkt range is verified, read pkt_ptr from stack */
10766 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
10767 /* write 4 bytes into packet */
10768 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10769 BPF_EXIT_INSN(),
10770 },
10771 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10772 .errstr = "invalid access to packet",
10773 .result = REJECT,
10774 },
10775 {
10776 "calls: pkt_ptr spill into caller stack 3",
10777 .insns = {
10778 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10779 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10780 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10781 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
10782 /* Marking is still kept and safe here. */
10783 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10784 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
10785 BPF_EXIT_INSN(),
10786
10787 /* subprog 1 */
10788 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10789 offsetof(struct __sk_buff, data)),
10790 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10791 offsetof(struct __sk_buff, data_end)),
10792 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10794 /* spill unchecked pkt_ptr into stack of caller */
10795 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10796 BPF_MOV64_IMM(BPF_REG_5, 0),
10797 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10798 BPF_MOV64_IMM(BPF_REG_5, 1),
10799 /* now the pkt range is verified, read pkt_ptr from stack */
10800 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
10801 /* write 4 bytes into packet */
10802 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10803 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10804 BPF_EXIT_INSN(),
10805 },
10806 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10807 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010808 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010809 },
10810 {
10811 "calls: pkt_ptr spill into caller stack 4",
10812 .insns = {
10813 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10814 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10815 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10816 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
10817 /* Check marking propagated. */
10818 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10819 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
10820 BPF_EXIT_INSN(),
10821
10822 /* subprog 1 */
10823 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10824 offsetof(struct __sk_buff, data)),
10825 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10826 offsetof(struct __sk_buff, data_end)),
10827 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10828 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10829 /* spill unchecked pkt_ptr into stack of caller */
10830 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10831 BPF_MOV64_IMM(BPF_REG_5, 0),
10832 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10833 BPF_MOV64_IMM(BPF_REG_5, 1),
10834 /* don't read back pkt_ptr from stack here */
10835 /* write 4 bytes into packet */
10836 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10837 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10838 BPF_EXIT_INSN(),
10839 },
10840 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10841 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010842 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010843 },
10844 {
10845 "calls: pkt_ptr spill into caller stack 5",
10846 .insns = {
10847 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10848 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10849 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
10850 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10851 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10852 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10853 BPF_EXIT_INSN(),
10854
10855 /* subprog 1 */
10856 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10857 offsetof(struct __sk_buff, data)),
10858 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10859 offsetof(struct __sk_buff, data_end)),
10860 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10861 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10862 BPF_MOV64_IMM(BPF_REG_5, 0),
10863 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10864 /* spill checked pkt_ptr into stack of caller */
10865 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10866 BPF_MOV64_IMM(BPF_REG_5, 1),
10867 /* don't read back pkt_ptr from stack here */
10868 /* write 4 bytes into packet */
10869 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10870 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10871 BPF_EXIT_INSN(),
10872 },
10873 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10874 .errstr = "same insn cannot be used with different",
10875 .result = REJECT,
10876 },
10877 {
10878 "calls: pkt_ptr spill into caller stack 6",
10879 .insns = {
10880 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10881 offsetof(struct __sk_buff, data_end)),
10882 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10883 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10884 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10885 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10886 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10887 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10888 BPF_EXIT_INSN(),
10889
10890 /* subprog 1 */
10891 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10892 offsetof(struct __sk_buff, data)),
10893 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10894 offsetof(struct __sk_buff, data_end)),
10895 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10896 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10897 BPF_MOV64_IMM(BPF_REG_5, 0),
10898 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10899 /* spill checked pkt_ptr into stack of caller */
10900 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10901 BPF_MOV64_IMM(BPF_REG_5, 1),
10902 /* don't read back pkt_ptr from stack here */
10903 /* write 4 bytes into packet */
10904 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10905 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10906 BPF_EXIT_INSN(),
10907 },
10908 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10909 .errstr = "R4 invalid mem access",
10910 .result = REJECT,
10911 },
10912 {
10913 "calls: pkt_ptr spill into caller stack 7",
10914 .insns = {
10915 BPF_MOV64_IMM(BPF_REG_2, 0),
10916 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10917 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10918 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10919 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10920 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10921 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10922 BPF_EXIT_INSN(),
10923
10924 /* subprog 1 */
10925 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10926 offsetof(struct __sk_buff, data)),
10927 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10928 offsetof(struct __sk_buff, data_end)),
10929 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10930 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10931 BPF_MOV64_IMM(BPF_REG_5, 0),
10932 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10933 /* spill checked pkt_ptr into stack of caller */
10934 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10935 BPF_MOV64_IMM(BPF_REG_5, 1),
10936 /* don't read back pkt_ptr from stack here */
10937 /* write 4 bytes into packet */
10938 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10939 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10940 BPF_EXIT_INSN(),
10941 },
10942 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10943 .errstr = "R4 invalid mem access",
10944 .result = REJECT,
10945 },
10946 {
10947 "calls: pkt_ptr spill into caller stack 8",
10948 .insns = {
10949 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10950 offsetof(struct __sk_buff, data)),
10951 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10952 offsetof(struct __sk_buff, data_end)),
10953 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10954 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10955 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
10956 BPF_EXIT_INSN(),
10957 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10958 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10959 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10960 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10961 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10962 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10963 BPF_EXIT_INSN(),
10964
10965 /* subprog 1 */
10966 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10967 offsetof(struct __sk_buff, data)),
10968 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10969 offsetof(struct __sk_buff, data_end)),
10970 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10971 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10972 BPF_MOV64_IMM(BPF_REG_5, 0),
10973 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10974 /* spill checked pkt_ptr into stack of caller */
10975 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10976 BPF_MOV64_IMM(BPF_REG_5, 1),
10977 /* don't read back pkt_ptr from stack here */
10978 /* write 4 bytes into packet */
10979 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10980 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10981 BPF_EXIT_INSN(),
10982 },
10983 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10984 .result = ACCEPT,
10985 },
10986 {
10987 "calls: pkt_ptr spill into caller stack 9",
10988 .insns = {
10989 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10990 offsetof(struct __sk_buff, data)),
10991 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10992 offsetof(struct __sk_buff, data_end)),
10993 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10994 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10995 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
10996 BPF_EXIT_INSN(),
10997 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10998 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10999 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11000 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11001 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11002 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11003 BPF_EXIT_INSN(),
11004
11005 /* subprog 1 */
11006 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11007 offsetof(struct __sk_buff, data)),
11008 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11009 offsetof(struct __sk_buff, data_end)),
11010 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11012 BPF_MOV64_IMM(BPF_REG_5, 0),
11013 /* spill unchecked pkt_ptr into stack of caller */
11014 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11015 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11016 BPF_MOV64_IMM(BPF_REG_5, 1),
11017 /* don't read back pkt_ptr from stack here */
11018 /* write 4 bytes into packet */
11019 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11020 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11021 BPF_EXIT_INSN(),
11022 },
11023 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11024 .errstr = "invalid access to packet",
11025 .result = REJECT,
11026 },
11027 {
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080011028 "calls: caller stack init to zero or map_value_or_null",
11029 .insns = {
11030 BPF_MOV64_IMM(BPF_REG_0, 0),
11031 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
11032 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11033 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11034 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11035 /* fetch map_value_or_null or const_zero from stack */
11036 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11037 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11038 /* store into map_value */
11039 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
11040 BPF_EXIT_INSN(),
11041
11042 /* subprog 1 */
11043 /* if (ctx == 0) return; */
11044 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
11045 /* else bpf_map_lookup() and *(fp - 8) = r0 */
11046 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
11047 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11048 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11049 BPF_LD_MAP_FD(BPF_REG_1, 0),
11050 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11051 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11052 BPF_FUNC_map_lookup_elem),
11053 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11054 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11055 BPF_EXIT_INSN(),
11056 },
11057 .fixup_map1 = { 13 },
11058 .result = ACCEPT,
11059 .prog_type = BPF_PROG_TYPE_XDP,
11060 },
11061 {
11062 "calls: stack init to zero and pruning",
11063 .insns = {
11064 /* first make allocated_stack 16 byte */
11065 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
11066 /* now fork the execution such that the false branch
11067 * of JGT insn will be verified second and it skisp zero
11068 * init of fp-8 stack slot. If stack liveness marking
11069 * is missing live_read marks from call map_lookup
11070 * processing then pruning will incorrectly assume
11071 * that fp-8 stack slot was unused in the fall-through
11072 * branch and will accept the program incorrectly
11073 */
11074 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
11075 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11076 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
11077 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11078 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11079 BPF_LD_MAP_FD(BPF_REG_1, 0),
11080 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11081 BPF_FUNC_map_lookup_elem),
11082 BPF_EXIT_INSN(),
11083 },
11084 .fixup_map2 = { 6 },
11085 .errstr = "invalid indirect read from stack off -8+0 size 8",
11086 .result = REJECT,
11087 .prog_type = BPF_PROG_TYPE_XDP,
11088 },
Gianluca Borellofd05e572017-12-23 10:09:55 +000011089 {
11090 "search pruning: all branches should be verified (nop operation)",
11091 .insns = {
11092 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11093 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11094 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
11095 BPF_LD_MAP_FD(BPF_REG_1, 0),
11096 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
11097 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
11098 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
11099 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
11100 BPF_MOV64_IMM(BPF_REG_4, 0),
11101 BPF_JMP_A(1),
11102 BPF_MOV64_IMM(BPF_REG_4, 1),
11103 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
11104 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
11105 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
11106 BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
11107 BPF_MOV64_IMM(BPF_REG_6, 0),
11108 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
11109 BPF_EXIT_INSN(),
11110 },
11111 .fixup_map1 = { 3 },
11112 .errstr = "R6 invalid mem access 'inv'",
11113 .result = REJECT,
11114 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11115 },
11116 {
11117 "search pruning: all branches should be verified (invalid stack access)",
11118 .insns = {
11119 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11120 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11121 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
11122 BPF_LD_MAP_FD(BPF_REG_1, 0),
11123 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
11124 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
11125 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
11126 BPF_MOV64_IMM(BPF_REG_4, 0),
11127 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
11128 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
11129 BPF_JMP_A(1),
11130 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
11131 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
11132 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
11133 BPF_EXIT_INSN(),
11134 },
11135 .fixup_map1 = { 3 },
11136 .errstr = "invalid read from stack off -16+0 size 8",
11137 .result = REJECT,
11138 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11139 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011140};
11141
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011142static int probe_filter_length(const struct bpf_insn *fp)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011143{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011144 int len;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011145
11146 for (len = MAX_INSNS - 1; len > 0; --len)
11147 if (fp[len].code != 0 || fp[len].imm != 0)
11148 break;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011149 return len + 1;
11150}
11151
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011152static int create_map(uint32_t size_value, uint32_t max_elem)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011153{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011154 int fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011155
Mickaël Salaünf4874d02017-02-10 00:21:43 +010011156 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011157 size_value, max_elem, BPF_F_NO_PREALLOC);
11158 if (fd < 0)
11159 printf("Failed to create hash map '%s'!\n", strerror(errno));
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011160
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011161 return fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011162}
11163
11164static int create_prog_array(void)
11165{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011166 int fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011167
Mickaël Salaünf4874d02017-02-10 00:21:43 +010011168 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011169 sizeof(int), 4, 0);
11170 if (fd < 0)
11171 printf("Failed to create prog array '%s'!\n", strerror(errno));
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011172
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011173 return fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011174}
11175
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011176static int create_map_in_map(void)
11177{
11178 int inner_map_fd, outer_map_fd;
11179
11180 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
11181 sizeof(int), 1, 0);
11182 if (inner_map_fd < 0) {
11183 printf("Failed to create array '%s'!\n", strerror(errno));
11184 return inner_map_fd;
11185 }
11186
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070011187 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011188 sizeof(int), inner_map_fd, 1, 0);
11189 if (outer_map_fd < 0)
11190 printf("Failed to create array of maps '%s'!\n",
11191 strerror(errno));
11192
11193 close(inner_map_fd);
11194
11195 return outer_map_fd;
11196}
11197
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011198static char bpf_vlog[32768];
11199
11200static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011201 int *map_fds)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011202{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011203 int *fixup_map1 = test->fixup_map1;
11204 int *fixup_map2 = test->fixup_map2;
11205 int *fixup_prog = test->fixup_prog;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011206 int *fixup_map_in_map = test->fixup_map_in_map;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011207
11208 /* Allocating HTs with 1 elem is fine here, since we only test
11209 * for verifier and not do a runtime lookup, so the only thing
11210 * that really matters is value size in this case.
11211 */
11212 if (*fixup_map1) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011213 map_fds[0] = create_map(sizeof(long long), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011214 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011215 prog[*fixup_map1].imm = map_fds[0];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011216 fixup_map1++;
11217 } while (*fixup_map1);
11218 }
11219
11220 if (*fixup_map2) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011221 map_fds[1] = create_map(sizeof(struct test_val), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011222 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011223 prog[*fixup_map2].imm = map_fds[1];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011224 fixup_map2++;
11225 } while (*fixup_map2);
11226 }
11227
11228 if (*fixup_prog) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011229 map_fds[2] = create_prog_array();
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011230 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011231 prog[*fixup_prog].imm = map_fds[2];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011232 fixup_prog++;
11233 } while (*fixup_prog);
11234 }
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011235
11236 if (*fixup_map_in_map) {
11237 map_fds[3] = create_map_in_map();
11238 do {
11239 prog[*fixup_map_in_map].imm = map_fds[3];
11240 fixup_map_in_map++;
11241 } while (*fixup_map_in_map);
11242 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011243}
11244
11245static void do_test_single(struct bpf_test *test, bool unpriv,
11246 int *passes, int *errors)
11247{
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011248 int fd_prog, expected_ret, reject_from_alignment;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011249 struct bpf_insn *prog = test->insns;
11250 int prog_len = probe_filter_length(prog);
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011251 char data_in[TEST_DATA_LEN] = {};
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011252 int prog_type = test->prog_type;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011253 int map_fds[MAX_NR_MAPS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011254 const char *expected_err;
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011255 uint32_t retval;
11256 int i, err;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011257
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011258 for (i = 0; i < MAX_NR_MAPS; i++)
11259 map_fds[i] = -1;
11260
11261 do_test_fixup(test, prog, map_fds);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011262
Daniel Borkmann614d0d72017-05-25 01:05:09 +020011263 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
11264 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmannd6554902017-07-21 00:00:22 +020011265 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011266
11267 expected_ret = unpriv && test->result_unpriv != UNDEF ?
11268 test->result_unpriv : test->result;
11269 expected_err = unpriv && test->errstr_unpriv ?
11270 test->errstr_unpriv : test->errstr;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011271
11272 reject_from_alignment = fd_prog < 0 &&
11273 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
11274 strstr(bpf_vlog, "Unknown alignment.");
11275#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
11276 if (reject_from_alignment) {
11277 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
11278 strerror(errno));
11279 goto fail_log;
11280 }
11281#endif
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011282 if (expected_ret == ACCEPT) {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011283 if (fd_prog < 0 && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011284 printf("FAIL\nFailed to load prog '%s'!\n",
11285 strerror(errno));
11286 goto fail_log;
11287 }
11288 } else {
11289 if (fd_prog >= 0) {
11290 printf("FAIL\nUnexpected success to load!\n");
11291 goto fail_log;
11292 }
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011293 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011294 printf("FAIL\nUnexpected error message!\n");
11295 goto fail_log;
11296 }
11297 }
11298
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011299 if (fd_prog >= 0) {
11300 err = bpf_prog_test_run(fd_prog, 1, data_in, sizeof(data_in),
11301 NULL, NULL, &retval, NULL);
11302 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
11303 printf("Unexpected bpf_prog_test_run error\n");
11304 goto fail_log;
11305 }
11306 if (!err && retval != test->retval &&
11307 test->retval != POINTER_VALUE) {
11308 printf("FAIL retval %d != %d\n", retval, test->retval);
11309 goto fail_log;
11310 }
11311 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011312 (*passes)++;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011313 printf("OK%s\n", reject_from_alignment ?
11314 " (NOTE: reject due to unknown alignment)" : "");
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011315close_fds:
11316 close(fd_prog);
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011317 for (i = 0; i < MAX_NR_MAPS; i++)
11318 close(map_fds[i]);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011319 sched_yield();
11320 return;
11321fail_log:
11322 (*errors)++;
11323 printf("%s", bpf_vlog);
11324 goto close_fds;
11325}
11326
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011327static bool is_admin(void)
11328{
11329 cap_t caps;
11330 cap_flag_value_t sysadmin = CAP_CLEAR;
11331 const cap_value_t cap_val = CAP_SYS_ADMIN;
11332
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080011333#ifdef CAP_IS_SUPPORTED
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011334 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
11335 perror("cap_get_flag");
11336 return false;
11337 }
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080011338#endif
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011339 caps = cap_get_proc();
11340 if (!caps) {
11341 perror("cap_get_proc");
11342 return false;
11343 }
11344 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
11345 perror("cap_get_flag");
11346 if (cap_free(caps))
11347 perror("cap_free");
11348 return (sysadmin == CAP_SET);
11349}
11350
11351static int set_admin(bool admin)
11352{
11353 cap_t caps;
11354 const cap_value_t cap_val = CAP_SYS_ADMIN;
11355 int ret = -1;
11356
11357 caps = cap_get_proc();
11358 if (!caps) {
11359 perror("cap_get_proc");
11360 return -1;
11361 }
11362 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
11363 admin ? CAP_SET : CAP_CLEAR)) {
11364 perror("cap_set_flag");
11365 goto out;
11366 }
11367 if (cap_set_proc(caps)) {
11368 perror("cap_set_proc");
11369 goto out;
11370 }
11371 ret = 0;
11372out:
11373 if (cap_free(caps))
11374 perror("cap_free");
11375 return ret;
11376}
11377
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011378static int do_test(bool unpriv, unsigned int from, unsigned int to)
11379{
11380 int i, passes = 0, errors = 0;
11381
11382 for (i = from; i < to; i++) {
11383 struct bpf_test *test = &tests[i];
11384
11385 /* Program types that are not supported by non-root we
11386 * skip right away.
11387 */
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011388 if (!test->prog_type) {
11389 if (!unpriv)
11390 set_admin(false);
11391 printf("#%d/u %s ", i, test->descr);
11392 do_test_single(test, true, &passes, &errors);
11393 if (!unpriv)
11394 set_admin(true);
11395 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011396
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011397 if (!unpriv) {
11398 printf("#%d/p %s ", i, test->descr);
11399 do_test_single(test, false, &passes, &errors);
11400 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011401 }
11402
11403 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
Jesper Dangaard Brouerefe5f9c2017-06-13 15:17:19 +020011404 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011405}
11406
11407int main(int argc, char **argv)
11408{
11409 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
11410 struct rlimit rlim = { 1 << 20, 1 << 20 };
11411 unsigned int from = 0, to = ARRAY_SIZE(tests);
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011412 bool unpriv = !is_admin();
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011413
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011414 if (argc == 3) {
11415 unsigned int l = atoi(argv[argc - 2]);
11416 unsigned int u = atoi(argv[argc - 1]);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011417
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011418 if (l < to && u < to) {
11419 from = l;
11420 to = u + 1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011421 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011422 } else if (argc == 2) {
11423 unsigned int t = atoi(argv[argc - 1]);
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011424
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011425 if (t < to) {
11426 from = t;
11427 to = t + 1;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011428 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011429 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011430
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011431 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
11432 return do_test(unpriv, from, to);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011433}