blob: 437c0b1c9d21835b081a3992e303493d030d004e [file] [log] [blame]
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08005 * Copyright (c) 2017 Facebook
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
10 */
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011
Daniel Borkmann2c460622017-08-04 22:24:41 +020012#include <endian.h>
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080013#include <asm/types.h>
14#include <linux/types.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010015#include <stdint.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070016#include <stdio.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010017#include <stdlib.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070018#include <unistd.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070019#include <errno.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070020#include <string.h>
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -070021#include <stddef.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070022#include <stdbool.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020023#include <sched.h>
Daniel Borkmann21ccaf22018-01-26 23:33:48 +010024#include <limits.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020025
Mickaël Salaünd02d8982017-02-10 00:21:37 +010026#include <sys/capability.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070027#include <sys/resource.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070028
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020029#include <linux/unistd.h>
30#include <linux/filter.h>
31#include <linux/bpf_perf_event.h>
32#include <linux/bpf.h>
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080033#include <linux/if_ether.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070034
Mickaël Salaün2ee89fb2017-02-10 00:21:38 +010035#include <bpf/bpf.h>
36
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020037#ifdef HAVE_GENHDR
38# include "autoconf.h"
39#else
40# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42# endif
43#endif
44
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020045#include "../../../include/linux/filter.h"
46
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020047#ifndef ARRAY_SIZE
48# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
49#endif
50
51#define MAX_INSNS 512
52#define MAX_FIXUPS 8
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070053#define MAX_NR_MAPS 4
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080054#define POINTER_VALUE 0xcafe4all
55#define TEST_DATA_LEN 64
Alexei Starovoitovbf508872015-10-07 22:23:23 -070056
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020057#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
Daniel Borkmann614d0d72017-05-25 01:05:09 +020058#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020059
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070060struct bpf_test {
61 const char *descr;
62 struct bpf_insn insns[MAX_INSNS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020063 int fixup_map1[MAX_FIXUPS];
64 int fixup_map2[MAX_FIXUPS];
65 int fixup_prog[MAX_FIXUPS];
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070066 int fixup_map_in_map[MAX_FIXUPS];
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070067 const char *errstr;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070068 const char *errstr_unpriv;
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080069 uint32_t retval;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070070 enum {
Alexei Starovoitovbf508872015-10-07 22:23:23 -070071 UNDEF,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070072 ACCEPT,
73 REJECT
Alexei Starovoitovbf508872015-10-07 22:23:23 -070074 } result, result_unpriv;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070075 enum bpf_prog_type prog_type;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020076 uint8_t flags;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070077};
78
Josef Bacik48461132016-09-28 10:54:32 -040079/* Note we want this to be 64 bit aligned so that the end of our array is
80 * actually the end of the structure.
81 */
82#define MAX_ENTRIES 11
Josef Bacik48461132016-09-28 10:54:32 -040083
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020084struct test_val {
85 unsigned int index;
86 int foo[MAX_ENTRIES];
Josef Bacik48461132016-09-28 10:54:32 -040087};
88
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070089static struct bpf_test tests[] = {
90 {
91 "add+sub+mul",
92 .insns = {
93 BPF_MOV64_IMM(BPF_REG_1, 1),
94 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
95 BPF_MOV64_IMM(BPF_REG_2, 3),
96 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
97 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
98 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
99 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
100 BPF_EXIT_INSN(),
101 },
102 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800103 .retval = -3,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700104 },
105 {
Daniel Borkmann87c17932018-01-20 01:24:32 +0100106 "DIV32 by 0, zero check 1",
107 .insns = {
108 BPF_MOV32_IMM(BPF_REG_0, 42),
109 BPF_MOV32_IMM(BPF_REG_1, 0),
110 BPF_MOV32_IMM(BPF_REG_2, 1),
111 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
112 BPF_EXIT_INSN(),
113 },
114 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100115 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100116 },
117 {
118 "DIV32 by 0, zero check 2",
119 .insns = {
120 BPF_MOV32_IMM(BPF_REG_0, 42),
121 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
122 BPF_MOV32_IMM(BPF_REG_2, 1),
123 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
124 BPF_EXIT_INSN(),
125 },
126 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100127 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100128 },
129 {
130 "DIV64 by 0, zero check",
131 .insns = {
132 BPF_MOV32_IMM(BPF_REG_0, 42),
133 BPF_MOV32_IMM(BPF_REG_1, 0),
134 BPF_MOV32_IMM(BPF_REG_2, 1),
135 BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
136 BPF_EXIT_INSN(),
137 },
138 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100139 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100140 },
141 {
142 "MOD32 by 0, zero check 1",
143 .insns = {
144 BPF_MOV32_IMM(BPF_REG_0, 42),
145 BPF_MOV32_IMM(BPF_REG_1, 0),
146 BPF_MOV32_IMM(BPF_REG_2, 1),
147 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
148 BPF_EXIT_INSN(),
149 },
150 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100151 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100152 },
153 {
154 "MOD32 by 0, zero check 2",
155 .insns = {
156 BPF_MOV32_IMM(BPF_REG_0, 42),
157 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
158 BPF_MOV32_IMM(BPF_REG_2, 1),
159 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
160 BPF_EXIT_INSN(),
161 },
162 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100163 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100164 },
165 {
166 "MOD64 by 0, zero check",
167 .insns = {
168 BPF_MOV32_IMM(BPF_REG_0, 42),
169 BPF_MOV32_IMM(BPF_REG_1, 0),
170 BPF_MOV32_IMM(BPF_REG_2, 1),
171 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
172 BPF_EXIT_INSN(),
173 },
174 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100175 .retval = 42,
176 },
177 {
178 "DIV32 by 0, zero check ok, cls",
179 .insns = {
180 BPF_MOV32_IMM(BPF_REG_0, 42),
181 BPF_MOV32_IMM(BPF_REG_1, 2),
182 BPF_MOV32_IMM(BPF_REG_2, 16),
183 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
184 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
185 BPF_EXIT_INSN(),
186 },
187 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
188 .result = ACCEPT,
189 .retval = 8,
190 },
191 {
192 "DIV32 by 0, zero check 1, cls",
193 .insns = {
194 BPF_MOV32_IMM(BPF_REG_1, 0),
195 BPF_MOV32_IMM(BPF_REG_0, 1),
196 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
197 BPF_EXIT_INSN(),
198 },
199 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
200 .result = ACCEPT,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100201 .retval = 0,
202 },
203 {
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100204 "DIV32 by 0, zero check 2, cls",
205 .insns = {
206 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
207 BPF_MOV32_IMM(BPF_REG_0, 1),
208 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
209 BPF_EXIT_INSN(),
210 },
211 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
212 .result = ACCEPT,
213 .retval = 0,
214 },
215 {
216 "DIV64 by 0, zero check, cls",
217 .insns = {
218 BPF_MOV32_IMM(BPF_REG_1, 0),
219 BPF_MOV32_IMM(BPF_REG_0, 1),
220 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
221 BPF_EXIT_INSN(),
222 },
223 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
224 .result = ACCEPT,
225 .retval = 0,
226 },
227 {
228 "MOD32 by 0, zero check ok, cls",
229 .insns = {
230 BPF_MOV32_IMM(BPF_REG_0, 42),
231 BPF_MOV32_IMM(BPF_REG_1, 3),
232 BPF_MOV32_IMM(BPF_REG_2, 5),
233 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
234 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
235 BPF_EXIT_INSN(),
236 },
237 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
238 .result = ACCEPT,
239 .retval = 2,
240 },
241 {
242 "MOD32 by 0, zero check 1, cls",
243 .insns = {
244 BPF_MOV32_IMM(BPF_REG_1, 0),
245 BPF_MOV32_IMM(BPF_REG_0, 1),
246 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
247 BPF_EXIT_INSN(),
248 },
249 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
250 .result = ACCEPT,
251 .retval = 1,
252 },
253 {
254 "MOD32 by 0, zero check 2, cls",
255 .insns = {
256 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
257 BPF_MOV32_IMM(BPF_REG_0, 1),
258 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
259 BPF_EXIT_INSN(),
260 },
261 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
262 .result = ACCEPT,
263 .retval = 1,
264 },
265 {
266 "MOD64 by 0, zero check 1, cls",
267 .insns = {
268 BPF_MOV32_IMM(BPF_REG_1, 0),
269 BPF_MOV32_IMM(BPF_REG_0, 2),
270 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
271 BPF_EXIT_INSN(),
272 },
273 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
274 .result = ACCEPT,
275 .retval = 2,
276 },
277 {
278 "MOD64 by 0, zero check 2, cls",
279 .insns = {
280 BPF_MOV32_IMM(BPF_REG_1, 0),
281 BPF_MOV32_IMM(BPF_REG_0, -1),
282 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
283 BPF_EXIT_INSN(),
284 },
285 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
286 .result = ACCEPT,
287 .retval = -1,
288 },
289 /* Just make sure that JITs used udiv/umod as otherwise we get
290 * an exception from INT_MIN/-1 overflow similarly as with div
291 * by zero.
292 */
293 {
294 "DIV32 overflow, check 1",
295 .insns = {
296 BPF_MOV32_IMM(BPF_REG_1, -1),
297 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
298 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
299 BPF_EXIT_INSN(),
300 },
301 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
302 .result = ACCEPT,
303 .retval = 0,
304 },
305 {
306 "DIV32 overflow, check 2",
307 .insns = {
308 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
309 BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
310 BPF_EXIT_INSN(),
311 },
312 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
313 .result = ACCEPT,
314 .retval = 0,
315 },
316 {
317 "DIV64 overflow, check 1",
318 .insns = {
319 BPF_MOV64_IMM(BPF_REG_1, -1),
320 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
321 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
322 BPF_EXIT_INSN(),
323 },
324 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
325 .result = ACCEPT,
326 .retval = 0,
327 },
328 {
329 "DIV64 overflow, check 2",
330 .insns = {
331 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
332 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
333 BPF_EXIT_INSN(),
334 },
335 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
336 .result = ACCEPT,
337 .retval = 0,
338 },
339 {
340 "MOD32 overflow, check 1",
341 .insns = {
342 BPF_MOV32_IMM(BPF_REG_1, -1),
343 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
344 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
345 BPF_EXIT_INSN(),
346 },
347 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
348 .result = ACCEPT,
349 .retval = INT_MIN,
350 },
351 {
352 "MOD32 overflow, check 2",
353 .insns = {
354 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
355 BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
356 BPF_EXIT_INSN(),
357 },
358 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
359 .result = ACCEPT,
360 .retval = INT_MIN,
361 },
362 {
363 "MOD64 overflow, check 1",
364 .insns = {
365 BPF_MOV64_IMM(BPF_REG_1, -1),
366 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
367 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
368 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
369 BPF_MOV32_IMM(BPF_REG_0, 0),
370 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
371 BPF_MOV32_IMM(BPF_REG_0, 1),
372 BPF_EXIT_INSN(),
373 },
374 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
375 .result = ACCEPT,
376 .retval = 1,
377 },
378 {
379 "MOD64 overflow, check 2",
380 .insns = {
381 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
382 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
383 BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
384 BPF_MOV32_IMM(BPF_REG_0, 0),
385 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
386 BPF_MOV32_IMM(BPF_REG_0, 1),
387 BPF_EXIT_INSN(),
388 },
389 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
390 .result = ACCEPT,
391 .retval = 1,
392 },
393 {
394 "xor32 zero extend check",
395 .insns = {
396 BPF_MOV32_IMM(BPF_REG_2, -1),
397 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
398 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
399 BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
400 BPF_MOV32_IMM(BPF_REG_0, 2),
401 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
402 BPF_MOV32_IMM(BPF_REG_0, 1),
403 BPF_EXIT_INSN(),
404 },
405 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
406 .result = ACCEPT,
407 .retval = 1,
408 },
409 {
Daniel Borkmann87c17932018-01-20 01:24:32 +0100410 "empty prog",
411 .insns = {
412 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100413 .errstr = "unknown opcode 00",
Daniel Borkmann87c17932018-01-20 01:24:32 +0100414 .result = REJECT,
415 },
416 {
417 "only exit insn",
418 .insns = {
419 BPF_EXIT_INSN(),
420 },
421 .errstr = "R0 !read_ok",
422 .result = REJECT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700423 },
424 {
425 "unreachable",
426 .insns = {
427 BPF_EXIT_INSN(),
428 BPF_EXIT_INSN(),
429 },
430 .errstr = "unreachable",
431 .result = REJECT,
432 },
433 {
434 "unreachable2",
435 .insns = {
436 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
437 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
438 BPF_EXIT_INSN(),
439 },
440 .errstr = "unreachable",
441 .result = REJECT,
442 },
443 {
444 "out of range jump",
445 .insns = {
446 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
447 BPF_EXIT_INSN(),
448 },
449 .errstr = "jump out of range",
450 .result = REJECT,
451 },
452 {
453 "out of range jump2",
454 .insns = {
455 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
456 BPF_EXIT_INSN(),
457 },
458 .errstr = "jump out of range",
459 .result = REJECT,
460 },
461 {
462 "test1 ld_imm64",
463 .insns = {
464 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
465 BPF_LD_IMM64(BPF_REG_0, 0),
466 BPF_LD_IMM64(BPF_REG_0, 0),
467 BPF_LD_IMM64(BPF_REG_0, 1),
468 BPF_LD_IMM64(BPF_REG_0, 1),
469 BPF_MOV64_IMM(BPF_REG_0, 2),
470 BPF_EXIT_INSN(),
471 },
472 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700473 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700474 .result = REJECT,
475 },
476 {
477 "test2 ld_imm64",
478 .insns = {
479 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
480 BPF_LD_IMM64(BPF_REG_0, 0),
481 BPF_LD_IMM64(BPF_REG_0, 0),
482 BPF_LD_IMM64(BPF_REG_0, 1),
483 BPF_LD_IMM64(BPF_REG_0, 1),
484 BPF_EXIT_INSN(),
485 },
486 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700487 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700488 .result = REJECT,
489 },
490 {
491 "test3 ld_imm64",
492 .insns = {
493 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
494 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
495 BPF_LD_IMM64(BPF_REG_0, 0),
496 BPF_LD_IMM64(BPF_REG_0, 0),
497 BPF_LD_IMM64(BPF_REG_0, 1),
498 BPF_LD_IMM64(BPF_REG_0, 1),
499 BPF_EXIT_INSN(),
500 },
501 .errstr = "invalid bpf_ld_imm64 insn",
502 .result = REJECT,
503 },
504 {
505 "test4 ld_imm64",
506 .insns = {
507 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
508 BPF_EXIT_INSN(),
509 },
510 .errstr = "invalid bpf_ld_imm64 insn",
511 .result = REJECT,
512 },
513 {
514 "test5 ld_imm64",
515 .insns = {
516 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
517 },
518 .errstr = "invalid bpf_ld_imm64 insn",
519 .result = REJECT,
520 },
521 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200522 "test6 ld_imm64",
523 .insns = {
524 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
525 BPF_RAW_INSN(0, 0, 0, 0, 0),
526 BPF_EXIT_INSN(),
527 },
528 .result = ACCEPT,
529 },
530 {
531 "test7 ld_imm64",
532 .insns = {
533 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
534 BPF_RAW_INSN(0, 0, 0, 0, 1),
535 BPF_EXIT_INSN(),
536 },
537 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800538 .retval = 1,
Daniel Borkmann728a8532017-04-27 01:39:32 +0200539 },
540 {
541 "test8 ld_imm64",
542 .insns = {
543 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
544 BPF_RAW_INSN(0, 0, 0, 0, 1),
545 BPF_EXIT_INSN(),
546 },
547 .errstr = "uses reserved fields",
548 .result = REJECT,
549 },
550 {
551 "test9 ld_imm64",
552 .insns = {
553 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
554 BPF_RAW_INSN(0, 0, 0, 1, 1),
555 BPF_EXIT_INSN(),
556 },
557 .errstr = "invalid bpf_ld_imm64 insn",
558 .result = REJECT,
559 },
560 {
561 "test10 ld_imm64",
562 .insns = {
563 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
564 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
565 BPF_EXIT_INSN(),
566 },
567 .errstr = "invalid bpf_ld_imm64 insn",
568 .result = REJECT,
569 },
570 {
571 "test11 ld_imm64",
572 .insns = {
573 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
574 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
575 BPF_EXIT_INSN(),
576 },
577 .errstr = "invalid bpf_ld_imm64 insn",
578 .result = REJECT,
579 },
580 {
581 "test12 ld_imm64",
582 .insns = {
583 BPF_MOV64_IMM(BPF_REG_1, 0),
584 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
585 BPF_RAW_INSN(0, 0, 0, 0, 1),
586 BPF_EXIT_INSN(),
587 },
588 .errstr = "not pointing to valid bpf_map",
589 .result = REJECT,
590 },
591 {
592 "test13 ld_imm64",
593 .insns = {
594 BPF_MOV64_IMM(BPF_REG_1, 0),
595 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
596 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
597 BPF_EXIT_INSN(),
598 },
599 .errstr = "invalid bpf_ld_imm64 insn",
600 .result = REJECT,
601 },
602 {
Daniel Borkmann7891a872018-01-10 20:04:37 +0100603 "arsh32 on imm",
604 .insns = {
605 BPF_MOV64_IMM(BPF_REG_0, 1),
606 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
607 BPF_EXIT_INSN(),
608 },
609 .result = REJECT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100610 .errstr = "unknown opcode c4",
Daniel Borkmann7891a872018-01-10 20:04:37 +0100611 },
612 {
613 "arsh32 on reg",
614 .insns = {
615 BPF_MOV64_IMM(BPF_REG_0, 1),
616 BPF_MOV64_IMM(BPF_REG_1, 5),
617 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
618 BPF_EXIT_INSN(),
619 },
620 .result = REJECT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100621 .errstr = "unknown opcode cc",
Daniel Borkmann7891a872018-01-10 20:04:37 +0100622 },
623 {
624 "arsh64 on imm",
625 .insns = {
626 BPF_MOV64_IMM(BPF_REG_0, 1),
627 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
628 BPF_EXIT_INSN(),
629 },
630 .result = ACCEPT,
631 },
632 {
633 "arsh64 on reg",
634 .insns = {
635 BPF_MOV64_IMM(BPF_REG_0, 1),
636 BPF_MOV64_IMM(BPF_REG_1, 5),
637 BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
638 BPF_EXIT_INSN(),
639 },
640 .result = ACCEPT,
641 },
642 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700643 "no bpf_exit",
644 .insns = {
645 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
646 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -0800647 .errstr = "not an exit",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700648 .result = REJECT,
649 },
650 {
651 "loop (back-edge)",
652 .insns = {
653 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
654 BPF_EXIT_INSN(),
655 },
656 .errstr = "back-edge",
657 .result = REJECT,
658 },
659 {
660 "loop2 (back-edge)",
661 .insns = {
662 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
663 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
664 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
665 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
666 BPF_EXIT_INSN(),
667 },
668 .errstr = "back-edge",
669 .result = REJECT,
670 },
671 {
672 "conditional loop",
673 .insns = {
674 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
675 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
676 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
677 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
678 BPF_EXIT_INSN(),
679 },
680 .errstr = "back-edge",
681 .result = REJECT,
682 },
683 {
684 "read uninitialized register",
685 .insns = {
686 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
687 BPF_EXIT_INSN(),
688 },
689 .errstr = "R2 !read_ok",
690 .result = REJECT,
691 },
692 {
693 "read invalid register",
694 .insns = {
695 BPF_MOV64_REG(BPF_REG_0, -1),
696 BPF_EXIT_INSN(),
697 },
698 .errstr = "R15 is invalid",
699 .result = REJECT,
700 },
701 {
702 "program doesn't init R0 before exit",
703 .insns = {
704 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
705 BPF_EXIT_INSN(),
706 },
707 .errstr = "R0 !read_ok",
708 .result = REJECT,
709 },
710 {
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700711 "program doesn't init R0 before exit in all branches",
712 .insns = {
713 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
714 BPF_MOV64_IMM(BPF_REG_0, 1),
715 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
716 BPF_EXIT_INSN(),
717 },
718 .errstr = "R0 !read_ok",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700719 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700720 .result = REJECT,
721 },
722 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700723 "stack out of bounds",
724 .insns = {
725 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
726 BPF_EXIT_INSN(),
727 },
728 .errstr = "invalid stack",
729 .result = REJECT,
730 },
731 {
732 "invalid call insn1",
733 .insns = {
734 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
735 BPF_EXIT_INSN(),
736 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100737 .errstr = "unknown opcode 8d",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700738 .result = REJECT,
739 },
740 {
741 "invalid call insn2",
742 .insns = {
743 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
744 BPF_EXIT_INSN(),
745 },
746 .errstr = "BPF_CALL uses reserved",
747 .result = REJECT,
748 },
749 {
750 "invalid function call",
751 .insns = {
752 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
753 BPF_EXIT_INSN(),
754 },
Daniel Borkmanne00c7b22016-11-26 01:28:09 +0100755 .errstr = "invalid func unknown#1234567",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700756 .result = REJECT,
757 },
758 {
759 "uninitialized stack1",
760 .insns = {
761 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
762 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
763 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200764 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
765 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700766 BPF_EXIT_INSN(),
767 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200768 .fixup_map1 = { 2 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700769 .errstr = "invalid indirect read from stack",
770 .result = REJECT,
771 },
772 {
773 "uninitialized stack2",
774 .insns = {
775 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
776 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
777 BPF_EXIT_INSN(),
778 },
779 .errstr = "invalid read from stack",
780 .result = REJECT,
781 },
782 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200783 "invalid fp arithmetic",
784 /* If this gets ever changed, make sure JITs can deal with it. */
785 .insns = {
786 BPF_MOV64_IMM(BPF_REG_0, 0),
787 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
788 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
789 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
790 BPF_EXIT_INSN(),
791 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -0800792 .errstr = "R1 subtraction from stack pointer",
Daniel Borkmann728a8532017-04-27 01:39:32 +0200793 .result = REJECT,
794 },
795 {
796 "non-invalid fp arithmetic",
797 .insns = {
798 BPF_MOV64_IMM(BPF_REG_0, 0),
799 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
800 BPF_EXIT_INSN(),
801 },
802 .result = ACCEPT,
803 },
804 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200805 "invalid argument register",
806 .insns = {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200807 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
808 BPF_FUNC_get_cgroup_classid),
809 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
810 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200811 BPF_EXIT_INSN(),
812 },
813 .errstr = "R1 !read_ok",
814 .result = REJECT,
815 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
816 },
817 {
818 "non-invalid argument register",
819 .insns = {
820 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200821 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
822 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200823 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200824 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
825 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200826 BPF_EXIT_INSN(),
827 },
828 .result = ACCEPT,
829 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
830 },
831 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700832 "check valid spill/fill",
833 .insns = {
834 /* spill R1(ctx) into stack */
835 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700836 /* fill it back into R2 */
837 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700838 /* should be able to access R0 = *(R2 + 8) */
Daniel Borkmannf91fe172015-03-01 12:31:41 +0100839 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
840 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700841 BPF_EXIT_INSN(),
842 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700843 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700844 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700845 .result_unpriv = REJECT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800846 .retval = POINTER_VALUE,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700847 },
848 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +0200849 "check valid spill/fill, skb mark",
850 .insns = {
851 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
852 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
853 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
854 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
855 offsetof(struct __sk_buff, mark)),
856 BPF_EXIT_INSN(),
857 },
858 .result = ACCEPT,
859 .result_unpriv = ACCEPT,
860 },
861 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700862 "check corrupted spill/fill",
863 .insns = {
864 /* spill R1(ctx) into stack */
865 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700866 /* mess up with R1 pointer on stack */
867 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700868 /* fill back into R0 should fail */
869 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700870 BPF_EXIT_INSN(),
871 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700872 .errstr_unpriv = "attempt to corrupt spilled",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700873 .errstr = "corrupted spill",
874 .result = REJECT,
875 },
876 {
877 "invalid src register in STX",
878 .insns = {
879 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
880 BPF_EXIT_INSN(),
881 },
882 .errstr = "R15 is invalid",
883 .result = REJECT,
884 },
885 {
886 "invalid dst register in STX",
887 .insns = {
888 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
889 BPF_EXIT_INSN(),
890 },
891 .errstr = "R14 is invalid",
892 .result = REJECT,
893 },
894 {
895 "invalid dst register in ST",
896 .insns = {
897 BPF_ST_MEM(BPF_B, 14, -1, -1),
898 BPF_EXIT_INSN(),
899 },
900 .errstr = "R14 is invalid",
901 .result = REJECT,
902 },
903 {
904 "invalid src register in LDX",
905 .insns = {
906 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
907 BPF_EXIT_INSN(),
908 },
909 .errstr = "R12 is invalid",
910 .result = REJECT,
911 },
912 {
913 "invalid dst register in LDX",
914 .insns = {
915 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
916 BPF_EXIT_INSN(),
917 },
918 .errstr = "R11 is invalid",
919 .result = REJECT,
920 },
921 {
922 "junk insn",
923 .insns = {
924 BPF_RAW_INSN(0, 0, 0, 0, 0),
925 BPF_EXIT_INSN(),
926 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100927 .errstr = "unknown opcode 00",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700928 .result = REJECT,
929 },
930 {
931 "junk insn2",
932 .insns = {
933 BPF_RAW_INSN(1, 0, 0, 0, 0),
934 BPF_EXIT_INSN(),
935 },
936 .errstr = "BPF_LDX uses reserved fields",
937 .result = REJECT,
938 },
939 {
940 "junk insn3",
941 .insns = {
942 BPF_RAW_INSN(-1, 0, 0, 0, 0),
943 BPF_EXIT_INSN(),
944 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100945 .errstr = "unknown opcode ff",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700946 .result = REJECT,
947 },
948 {
949 "junk insn4",
950 .insns = {
951 BPF_RAW_INSN(-1, -1, -1, -1, -1),
952 BPF_EXIT_INSN(),
953 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100954 .errstr = "unknown opcode ff",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700955 .result = REJECT,
956 },
957 {
958 "junk insn5",
959 .insns = {
960 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
961 BPF_EXIT_INSN(),
962 },
963 .errstr = "BPF_ALU uses reserved fields",
964 .result = REJECT,
965 },
966 {
967 "misaligned read from stack",
968 .insns = {
969 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
970 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
971 BPF_EXIT_INSN(),
972 },
Edward Creef65b1842017-08-07 15:27:12 +0100973 .errstr = "misaligned stack access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700974 .result = REJECT,
975 },
976 {
977 "invalid map_fd for function call",
978 .insns = {
979 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
980 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
981 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
982 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200983 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
984 BPF_FUNC_map_delete_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700985 BPF_EXIT_INSN(),
986 },
987 .errstr = "fd 0 is not pointing to valid bpf_map",
988 .result = REJECT,
989 },
990 {
991 "don't check return value before access",
992 .insns = {
993 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
994 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
995 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
996 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200997 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
998 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700999 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1000 BPF_EXIT_INSN(),
1001 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001002 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001003 .errstr = "R0 invalid mem access 'map_value_or_null'",
1004 .result = REJECT,
1005 },
1006 {
1007 "access memory with incorrect alignment",
1008 .insns = {
1009 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1010 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1012 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001013 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1014 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001015 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1016 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1017 BPF_EXIT_INSN(),
1018 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001019 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01001020 .errstr = "misaligned value access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001021 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001022 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001023 },
1024 {
1025 "sometimes access memory with incorrect alignment",
1026 .insns = {
1027 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1028 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1029 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1030 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001031 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1032 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001033 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1034 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1035 BPF_EXIT_INSN(),
1036 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1037 BPF_EXIT_INSN(),
1038 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001039 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001040 .errstr = "R0 invalid mem access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001041 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001042 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001043 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001044 },
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001045 {
1046 "jump test 1",
1047 .insns = {
1048 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1049 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1050 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1051 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1052 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1053 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1054 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1055 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1056 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1057 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1058 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1059 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1060 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1061 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1062 BPF_MOV64_IMM(BPF_REG_0, 0),
1063 BPF_EXIT_INSN(),
1064 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001065 .errstr_unpriv = "R1 pointer comparison",
1066 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001067 .result = ACCEPT,
1068 },
1069 {
1070 "jump test 2",
1071 .insns = {
1072 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1073 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1074 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1075 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1076 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1077 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1078 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1079 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1080 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1081 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1082 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1083 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1084 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1085 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1086 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1087 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1088 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1089 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1090 BPF_MOV64_IMM(BPF_REG_0, 0),
1091 BPF_EXIT_INSN(),
1092 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001093 .errstr_unpriv = "R1 pointer comparison",
1094 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001095 .result = ACCEPT,
1096 },
1097 {
1098 "jump test 3",
1099 .insns = {
1100 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1101 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1102 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1104 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1105 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1106 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1107 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1108 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1109 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1110 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1112 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1113 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1114 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1116 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1117 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1118 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1120 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1121 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1122 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1123 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1124 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001125 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1126 BPF_FUNC_map_delete_elem),
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001127 BPF_EXIT_INSN(),
1128 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001129 .fixup_map1 = { 24 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001130 .errstr_unpriv = "R1 pointer comparison",
1131 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001132 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08001133 .retval = -ENOENT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001134 },
1135 {
1136 "jump test 4",
1137 .insns = {
1138 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1139 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1140 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1141 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1142 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1143 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1144 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1145 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1146 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1147 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1148 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1149 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1150 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1151 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1152 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1154 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1155 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1156 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1157 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1158 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1159 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1160 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1161 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1162 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1163 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1164 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1166 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1168 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1169 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1170 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1171 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1172 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1173 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1174 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1175 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1176 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1177 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1178 BPF_MOV64_IMM(BPF_REG_0, 0),
1179 BPF_EXIT_INSN(),
1180 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001181 .errstr_unpriv = "R1 pointer comparison",
1182 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001183 .result = ACCEPT,
1184 },
Alexei Starovoitov342ded42014-10-28 15:11:42 -07001185 {
1186 "jump test 5",
1187 .insns = {
1188 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1189 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1190 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1191 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1192 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1193 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1194 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1195 BPF_MOV64_IMM(BPF_REG_0, 0),
1196 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1197 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1198 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1199 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1200 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1201 BPF_MOV64_IMM(BPF_REG_0, 0),
1202 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1203 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1204 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1205 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1206 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1207 BPF_MOV64_IMM(BPF_REG_0, 0),
1208 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1209 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1210 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1211 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1212 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1213 BPF_MOV64_IMM(BPF_REG_0, 0),
1214 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1215 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1216 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1217 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1218 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1219 BPF_MOV64_IMM(BPF_REG_0, 0),
1220 BPF_EXIT_INSN(),
1221 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001222 .errstr_unpriv = "R1 pointer comparison",
1223 .result_unpriv = REJECT,
Alexei Starovoitov342ded42014-10-28 15:11:42 -07001224 .result = ACCEPT,
1225 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001226 {
1227 "access skb fields ok",
1228 .insns = {
1229 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1230 offsetof(struct __sk_buff, len)),
1231 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1232 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1233 offsetof(struct __sk_buff, mark)),
1234 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1235 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1236 offsetof(struct __sk_buff, pkt_type)),
1237 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1238 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1239 offsetof(struct __sk_buff, queue_mapping)),
1240 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitovc2497392015-03-16 18:06:02 -07001241 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1242 offsetof(struct __sk_buff, protocol)),
1243 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1244 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1245 offsetof(struct __sk_buff, vlan_present)),
1246 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1247 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1248 offsetof(struct __sk_buff, vlan_tci)),
1249 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Daniel Borkmannb1d9fc42017-04-19 23:01:17 +02001250 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1251 offsetof(struct __sk_buff, napi_id)),
1252 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001253 BPF_EXIT_INSN(),
1254 },
1255 .result = ACCEPT,
1256 },
1257 {
1258 "access skb fields bad1",
1259 .insns = {
1260 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1261 BPF_EXIT_INSN(),
1262 },
1263 .errstr = "invalid bpf_context access",
1264 .result = REJECT,
1265 },
1266 {
1267 "access skb fields bad2",
1268 .insns = {
1269 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1270 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1273 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001274 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1275 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001276 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1277 BPF_EXIT_INSN(),
1278 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1279 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1280 offsetof(struct __sk_buff, pkt_type)),
1281 BPF_EXIT_INSN(),
1282 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001283 .fixup_map1 = { 4 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001284 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001285 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001286 .result = REJECT,
1287 },
1288 {
1289 "access skb fields bad3",
1290 .insns = {
1291 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1292 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1293 offsetof(struct __sk_buff, pkt_type)),
1294 BPF_EXIT_INSN(),
1295 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1296 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1297 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1298 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001299 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1300 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001301 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1302 BPF_EXIT_INSN(),
1303 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1304 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1305 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001306 .fixup_map1 = { 6 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001307 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001308 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001309 .result = REJECT,
1310 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001311 {
1312 "access skb fields bad4",
1313 .insns = {
1314 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1315 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1316 offsetof(struct __sk_buff, len)),
1317 BPF_MOV64_IMM(BPF_REG_0, 0),
1318 BPF_EXIT_INSN(),
1319 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1320 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1321 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1322 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001323 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1324 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001325 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1326 BPF_EXIT_INSN(),
1327 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1328 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1329 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001330 .fixup_map1 = { 7 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001331 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001332 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001333 .result = REJECT,
1334 },
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001335 {
John Fastabend41bc94f2017-08-15 22:33:56 -07001336 "invalid access __sk_buff family",
1337 .insns = {
1338 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1339 offsetof(struct __sk_buff, family)),
1340 BPF_EXIT_INSN(),
1341 },
1342 .errstr = "invalid bpf_context access",
1343 .result = REJECT,
1344 },
1345 {
1346 "invalid access __sk_buff remote_ip4",
1347 .insns = {
1348 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1349 offsetof(struct __sk_buff, remote_ip4)),
1350 BPF_EXIT_INSN(),
1351 },
1352 .errstr = "invalid bpf_context access",
1353 .result = REJECT,
1354 },
1355 {
1356 "invalid access __sk_buff local_ip4",
1357 .insns = {
1358 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1359 offsetof(struct __sk_buff, local_ip4)),
1360 BPF_EXIT_INSN(),
1361 },
1362 .errstr = "invalid bpf_context access",
1363 .result = REJECT,
1364 },
1365 {
1366 "invalid access __sk_buff remote_ip6",
1367 .insns = {
1368 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1369 offsetof(struct __sk_buff, remote_ip6)),
1370 BPF_EXIT_INSN(),
1371 },
1372 .errstr = "invalid bpf_context access",
1373 .result = REJECT,
1374 },
1375 {
1376 "invalid access __sk_buff local_ip6",
1377 .insns = {
1378 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1379 offsetof(struct __sk_buff, local_ip6)),
1380 BPF_EXIT_INSN(),
1381 },
1382 .errstr = "invalid bpf_context access",
1383 .result = REJECT,
1384 },
1385 {
1386 "invalid access __sk_buff remote_port",
1387 .insns = {
1388 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1389 offsetof(struct __sk_buff, remote_port)),
1390 BPF_EXIT_INSN(),
1391 },
1392 .errstr = "invalid bpf_context access",
1393 .result = REJECT,
1394 },
1395 {
1396 "invalid access __sk_buff remote_port",
1397 .insns = {
1398 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1399 offsetof(struct __sk_buff, local_port)),
1400 BPF_EXIT_INSN(),
1401 },
1402 .errstr = "invalid bpf_context access",
1403 .result = REJECT,
1404 },
1405 {
1406 "valid access __sk_buff family",
1407 .insns = {
1408 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1409 offsetof(struct __sk_buff, family)),
1410 BPF_EXIT_INSN(),
1411 },
1412 .result = ACCEPT,
1413 .prog_type = BPF_PROG_TYPE_SK_SKB,
1414 },
1415 {
1416 "valid access __sk_buff remote_ip4",
1417 .insns = {
1418 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1419 offsetof(struct __sk_buff, remote_ip4)),
1420 BPF_EXIT_INSN(),
1421 },
1422 .result = ACCEPT,
1423 .prog_type = BPF_PROG_TYPE_SK_SKB,
1424 },
1425 {
1426 "valid access __sk_buff local_ip4",
1427 .insns = {
1428 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1429 offsetof(struct __sk_buff, local_ip4)),
1430 BPF_EXIT_INSN(),
1431 },
1432 .result = ACCEPT,
1433 .prog_type = BPF_PROG_TYPE_SK_SKB,
1434 },
1435 {
1436 "valid access __sk_buff remote_ip6",
1437 .insns = {
1438 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1439 offsetof(struct __sk_buff, remote_ip6[0])),
1440 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1441 offsetof(struct __sk_buff, remote_ip6[1])),
1442 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1443 offsetof(struct __sk_buff, remote_ip6[2])),
1444 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1445 offsetof(struct __sk_buff, remote_ip6[3])),
1446 BPF_EXIT_INSN(),
1447 },
1448 .result = ACCEPT,
1449 .prog_type = BPF_PROG_TYPE_SK_SKB,
1450 },
1451 {
1452 "valid access __sk_buff local_ip6",
1453 .insns = {
1454 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1455 offsetof(struct __sk_buff, local_ip6[0])),
1456 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1457 offsetof(struct __sk_buff, local_ip6[1])),
1458 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1459 offsetof(struct __sk_buff, local_ip6[2])),
1460 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1461 offsetof(struct __sk_buff, local_ip6[3])),
1462 BPF_EXIT_INSN(),
1463 },
1464 .result = ACCEPT,
1465 .prog_type = BPF_PROG_TYPE_SK_SKB,
1466 },
1467 {
1468 "valid access __sk_buff remote_port",
1469 .insns = {
1470 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1471 offsetof(struct __sk_buff, remote_port)),
1472 BPF_EXIT_INSN(),
1473 },
1474 .result = ACCEPT,
1475 .prog_type = BPF_PROG_TYPE_SK_SKB,
1476 },
1477 {
1478 "valid access __sk_buff remote_port",
1479 .insns = {
1480 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1481 offsetof(struct __sk_buff, local_port)),
1482 BPF_EXIT_INSN(),
1483 },
1484 .result = ACCEPT,
1485 .prog_type = BPF_PROG_TYPE_SK_SKB,
1486 },
1487 {
John Fastabended850542017-08-28 07:11:24 -07001488 "invalid access of tc_classid for SK_SKB",
1489 .insns = {
1490 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1491 offsetof(struct __sk_buff, tc_classid)),
1492 BPF_EXIT_INSN(),
1493 },
1494 .result = REJECT,
1495 .prog_type = BPF_PROG_TYPE_SK_SKB,
1496 .errstr = "invalid bpf_context access",
1497 },
1498 {
John Fastabendf7e9cb12017-10-18 07:10:58 -07001499 "invalid access of skb->mark for SK_SKB",
1500 .insns = {
1501 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1502 offsetof(struct __sk_buff, mark)),
1503 BPF_EXIT_INSN(),
1504 },
1505 .result = REJECT,
1506 .prog_type = BPF_PROG_TYPE_SK_SKB,
1507 .errstr = "invalid bpf_context access",
1508 },
1509 {
1510 "check skb->mark is not writeable by SK_SKB",
John Fastabended850542017-08-28 07:11:24 -07001511 .insns = {
1512 BPF_MOV64_IMM(BPF_REG_0, 0),
1513 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1514 offsetof(struct __sk_buff, mark)),
1515 BPF_EXIT_INSN(),
1516 },
John Fastabendf7e9cb12017-10-18 07:10:58 -07001517 .result = REJECT,
John Fastabended850542017-08-28 07:11:24 -07001518 .prog_type = BPF_PROG_TYPE_SK_SKB,
John Fastabendf7e9cb12017-10-18 07:10:58 -07001519 .errstr = "invalid bpf_context access",
John Fastabended850542017-08-28 07:11:24 -07001520 },
1521 {
1522 "check skb->tc_index is writeable by SK_SKB",
1523 .insns = {
1524 BPF_MOV64_IMM(BPF_REG_0, 0),
1525 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1526 offsetof(struct __sk_buff, tc_index)),
1527 BPF_EXIT_INSN(),
1528 },
1529 .result = ACCEPT,
1530 .prog_type = BPF_PROG_TYPE_SK_SKB,
1531 },
1532 {
1533 "check skb->priority is writeable by SK_SKB",
1534 .insns = {
1535 BPF_MOV64_IMM(BPF_REG_0, 0),
1536 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1537 offsetof(struct __sk_buff, priority)),
1538 BPF_EXIT_INSN(),
1539 },
1540 .result = ACCEPT,
1541 .prog_type = BPF_PROG_TYPE_SK_SKB,
1542 },
1543 {
1544 "direct packet read for SK_SKB",
1545 .insns = {
1546 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1547 offsetof(struct __sk_buff, data)),
1548 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1549 offsetof(struct __sk_buff, data_end)),
1550 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1551 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1552 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1553 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1554 BPF_MOV64_IMM(BPF_REG_0, 0),
1555 BPF_EXIT_INSN(),
1556 },
1557 .result = ACCEPT,
1558 .prog_type = BPF_PROG_TYPE_SK_SKB,
1559 },
1560 {
1561 "direct packet write for SK_SKB",
1562 .insns = {
1563 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1564 offsetof(struct __sk_buff, data)),
1565 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1566 offsetof(struct __sk_buff, data_end)),
1567 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1569 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1570 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1571 BPF_MOV64_IMM(BPF_REG_0, 0),
1572 BPF_EXIT_INSN(),
1573 },
1574 .result = ACCEPT,
1575 .prog_type = BPF_PROG_TYPE_SK_SKB,
1576 },
1577 {
1578 "overlapping checks for direct packet access SK_SKB",
1579 .insns = {
1580 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1581 offsetof(struct __sk_buff, data)),
1582 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1583 offsetof(struct __sk_buff, data_end)),
1584 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1585 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1586 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1587 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1588 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1589 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1590 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1591 BPF_MOV64_IMM(BPF_REG_0, 0),
1592 BPF_EXIT_INSN(),
1593 },
1594 .result = ACCEPT,
1595 .prog_type = BPF_PROG_TYPE_SK_SKB,
1596 },
1597 {
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001598 "check skb->mark is not writeable by sockets",
1599 .insns = {
1600 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1601 offsetof(struct __sk_buff, mark)),
1602 BPF_EXIT_INSN(),
1603 },
1604 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001605 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001606 .result = REJECT,
1607 },
1608 {
1609 "check skb->tc_index is not writeable by sockets",
1610 .insns = {
1611 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1612 offsetof(struct __sk_buff, tc_index)),
1613 BPF_EXIT_INSN(),
1614 },
1615 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001616 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001617 .result = REJECT,
1618 },
1619 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001620 "check cb access: byte",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001621 .insns = {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001622 BPF_MOV64_IMM(BPF_REG_0, 0),
1623 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1624 offsetof(struct __sk_buff, cb[0])),
1625 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1626 offsetof(struct __sk_buff, cb[0]) + 1),
1627 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1628 offsetof(struct __sk_buff, cb[0]) + 2),
1629 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1630 offsetof(struct __sk_buff, cb[0]) + 3),
1631 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1632 offsetof(struct __sk_buff, cb[1])),
1633 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1634 offsetof(struct __sk_buff, cb[1]) + 1),
1635 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1636 offsetof(struct __sk_buff, cb[1]) + 2),
1637 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1638 offsetof(struct __sk_buff, cb[1]) + 3),
1639 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1640 offsetof(struct __sk_buff, cb[2])),
1641 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1642 offsetof(struct __sk_buff, cb[2]) + 1),
1643 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1644 offsetof(struct __sk_buff, cb[2]) + 2),
1645 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1646 offsetof(struct __sk_buff, cb[2]) + 3),
1647 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1648 offsetof(struct __sk_buff, cb[3])),
1649 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1650 offsetof(struct __sk_buff, cb[3]) + 1),
1651 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1652 offsetof(struct __sk_buff, cb[3]) + 2),
1653 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1654 offsetof(struct __sk_buff, cb[3]) + 3),
1655 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1656 offsetof(struct __sk_buff, cb[4])),
1657 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1658 offsetof(struct __sk_buff, cb[4]) + 1),
1659 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1660 offsetof(struct __sk_buff, cb[4]) + 2),
1661 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1662 offsetof(struct __sk_buff, cb[4]) + 3),
1663 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1664 offsetof(struct __sk_buff, cb[0])),
1665 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1666 offsetof(struct __sk_buff, cb[0]) + 1),
1667 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1668 offsetof(struct __sk_buff, cb[0]) + 2),
1669 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1670 offsetof(struct __sk_buff, cb[0]) + 3),
1671 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1672 offsetof(struct __sk_buff, cb[1])),
1673 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1674 offsetof(struct __sk_buff, cb[1]) + 1),
1675 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1676 offsetof(struct __sk_buff, cb[1]) + 2),
1677 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1678 offsetof(struct __sk_buff, cb[1]) + 3),
1679 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1680 offsetof(struct __sk_buff, cb[2])),
1681 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1682 offsetof(struct __sk_buff, cb[2]) + 1),
1683 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1684 offsetof(struct __sk_buff, cb[2]) + 2),
1685 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1686 offsetof(struct __sk_buff, cb[2]) + 3),
1687 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1688 offsetof(struct __sk_buff, cb[3])),
1689 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1690 offsetof(struct __sk_buff, cb[3]) + 1),
1691 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1692 offsetof(struct __sk_buff, cb[3]) + 2),
1693 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1694 offsetof(struct __sk_buff, cb[3]) + 3),
1695 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1696 offsetof(struct __sk_buff, cb[4])),
1697 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1698 offsetof(struct __sk_buff, cb[4]) + 1),
1699 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1700 offsetof(struct __sk_buff, cb[4]) + 2),
1701 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1702 offsetof(struct __sk_buff, cb[4]) + 3),
1703 BPF_EXIT_INSN(),
1704 },
1705 .result = ACCEPT,
1706 },
1707 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001708 "__sk_buff->hash, offset 0, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001709 .insns = {
1710 BPF_MOV64_IMM(BPF_REG_0, 0),
1711 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001712 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001713 BPF_EXIT_INSN(),
1714 },
1715 .errstr = "invalid bpf_context access",
1716 .result = REJECT,
1717 },
1718 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001719 "__sk_buff->tc_index, offset 3, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001720 .insns = {
1721 BPF_MOV64_IMM(BPF_REG_0, 0),
1722 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001723 offsetof(struct __sk_buff, tc_index) + 3),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001724 BPF_EXIT_INSN(),
1725 },
1726 .errstr = "invalid bpf_context access",
1727 .result = REJECT,
1728 },
1729 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001730 "check skb->hash byte load permitted",
1731 .insns = {
1732 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001733#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001734 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1735 offsetof(struct __sk_buff, hash)),
1736#else
1737 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1738 offsetof(struct __sk_buff, hash) + 3),
1739#endif
1740 BPF_EXIT_INSN(),
1741 },
1742 .result = ACCEPT,
1743 },
1744 {
1745 "check skb->hash byte load not permitted 1",
1746 .insns = {
1747 BPF_MOV64_IMM(BPF_REG_0, 0),
1748 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1749 offsetof(struct __sk_buff, hash) + 1),
1750 BPF_EXIT_INSN(),
1751 },
1752 .errstr = "invalid bpf_context access",
1753 .result = REJECT,
1754 },
1755 {
1756 "check skb->hash byte load not permitted 2",
1757 .insns = {
1758 BPF_MOV64_IMM(BPF_REG_0, 0),
1759 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1760 offsetof(struct __sk_buff, hash) + 2),
1761 BPF_EXIT_INSN(),
1762 },
1763 .errstr = "invalid bpf_context access",
1764 .result = REJECT,
1765 },
1766 {
1767 "check skb->hash byte load not permitted 3",
1768 .insns = {
1769 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001770#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001771 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1772 offsetof(struct __sk_buff, hash) + 3),
1773#else
1774 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1775 offsetof(struct __sk_buff, hash)),
1776#endif
1777 BPF_EXIT_INSN(),
1778 },
1779 .errstr = "invalid bpf_context access",
1780 .result = REJECT,
1781 },
1782 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001783 "check cb access: byte, wrong type",
1784 .insns = {
1785 BPF_MOV64_IMM(BPF_REG_0, 0),
1786 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001787 offsetof(struct __sk_buff, cb[0])),
1788 BPF_EXIT_INSN(),
1789 },
1790 .errstr = "invalid bpf_context access",
1791 .result = REJECT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001792 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1793 },
1794 {
1795 "check cb access: half",
1796 .insns = {
1797 BPF_MOV64_IMM(BPF_REG_0, 0),
1798 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1799 offsetof(struct __sk_buff, cb[0])),
1800 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1801 offsetof(struct __sk_buff, cb[0]) + 2),
1802 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1803 offsetof(struct __sk_buff, cb[1])),
1804 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1805 offsetof(struct __sk_buff, cb[1]) + 2),
1806 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1807 offsetof(struct __sk_buff, cb[2])),
1808 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1809 offsetof(struct __sk_buff, cb[2]) + 2),
1810 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1811 offsetof(struct __sk_buff, cb[3])),
1812 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1813 offsetof(struct __sk_buff, cb[3]) + 2),
1814 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1815 offsetof(struct __sk_buff, cb[4])),
1816 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1817 offsetof(struct __sk_buff, cb[4]) + 2),
1818 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1819 offsetof(struct __sk_buff, cb[0])),
1820 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1821 offsetof(struct __sk_buff, cb[0]) + 2),
1822 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1823 offsetof(struct __sk_buff, cb[1])),
1824 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1825 offsetof(struct __sk_buff, cb[1]) + 2),
1826 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1827 offsetof(struct __sk_buff, cb[2])),
1828 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1829 offsetof(struct __sk_buff, cb[2]) + 2),
1830 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1831 offsetof(struct __sk_buff, cb[3])),
1832 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1833 offsetof(struct __sk_buff, cb[3]) + 2),
1834 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1835 offsetof(struct __sk_buff, cb[4])),
1836 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1837 offsetof(struct __sk_buff, cb[4]) + 2),
1838 BPF_EXIT_INSN(),
1839 },
1840 .result = ACCEPT,
1841 },
1842 {
1843 "check cb access: half, unaligned",
1844 .insns = {
1845 BPF_MOV64_IMM(BPF_REG_0, 0),
1846 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1847 offsetof(struct __sk_buff, cb[0]) + 1),
1848 BPF_EXIT_INSN(),
1849 },
Edward Creef65b1842017-08-07 15:27:12 +01001850 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001851 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001852 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001853 },
1854 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001855 "check __sk_buff->hash, offset 0, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001856 .insns = {
1857 BPF_MOV64_IMM(BPF_REG_0, 0),
1858 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001859 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001860 BPF_EXIT_INSN(),
1861 },
1862 .errstr = "invalid bpf_context access",
1863 .result = REJECT,
1864 },
1865 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001866 "check __sk_buff->tc_index, offset 2, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001867 .insns = {
1868 BPF_MOV64_IMM(BPF_REG_0, 0),
1869 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001870 offsetof(struct __sk_buff, tc_index) + 2),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001871 BPF_EXIT_INSN(),
1872 },
1873 .errstr = "invalid bpf_context access",
1874 .result = REJECT,
1875 },
1876 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001877 "check skb->hash half load permitted",
1878 .insns = {
1879 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001880#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001881 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1882 offsetof(struct __sk_buff, hash)),
1883#else
1884 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1885 offsetof(struct __sk_buff, hash) + 2),
1886#endif
1887 BPF_EXIT_INSN(),
1888 },
1889 .result = ACCEPT,
1890 },
1891 {
1892 "check skb->hash half load not permitted",
1893 .insns = {
1894 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001895#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001896 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1897 offsetof(struct __sk_buff, hash) + 2),
1898#else
1899 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1900 offsetof(struct __sk_buff, hash)),
1901#endif
1902 BPF_EXIT_INSN(),
1903 },
1904 .errstr = "invalid bpf_context access",
1905 .result = REJECT,
1906 },
1907 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001908 "check cb access: half, wrong type",
1909 .insns = {
1910 BPF_MOV64_IMM(BPF_REG_0, 0),
1911 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1912 offsetof(struct __sk_buff, cb[0])),
1913 BPF_EXIT_INSN(),
1914 },
1915 .errstr = "invalid bpf_context access",
1916 .result = REJECT,
1917 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1918 },
1919 {
1920 "check cb access: word",
1921 .insns = {
1922 BPF_MOV64_IMM(BPF_REG_0, 0),
1923 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1924 offsetof(struct __sk_buff, cb[0])),
1925 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1926 offsetof(struct __sk_buff, cb[1])),
1927 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1928 offsetof(struct __sk_buff, cb[2])),
1929 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1930 offsetof(struct __sk_buff, cb[3])),
1931 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1932 offsetof(struct __sk_buff, cb[4])),
1933 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1934 offsetof(struct __sk_buff, cb[0])),
1935 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1936 offsetof(struct __sk_buff, cb[1])),
1937 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1938 offsetof(struct __sk_buff, cb[2])),
1939 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1940 offsetof(struct __sk_buff, cb[3])),
1941 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1942 offsetof(struct __sk_buff, cb[4])),
1943 BPF_EXIT_INSN(),
1944 },
1945 .result = ACCEPT,
1946 },
1947 {
1948 "check cb access: word, unaligned 1",
1949 .insns = {
1950 BPF_MOV64_IMM(BPF_REG_0, 0),
1951 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1952 offsetof(struct __sk_buff, cb[0]) + 2),
1953 BPF_EXIT_INSN(),
1954 },
Edward Creef65b1842017-08-07 15:27:12 +01001955 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001956 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001957 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001958 },
1959 {
1960 "check cb access: word, unaligned 2",
1961 .insns = {
1962 BPF_MOV64_IMM(BPF_REG_0, 0),
1963 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1964 offsetof(struct __sk_buff, cb[4]) + 1),
1965 BPF_EXIT_INSN(),
1966 },
Edward Creef65b1842017-08-07 15:27:12 +01001967 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001968 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001969 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001970 },
1971 {
1972 "check cb access: word, unaligned 3",
1973 .insns = {
1974 BPF_MOV64_IMM(BPF_REG_0, 0),
1975 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1976 offsetof(struct __sk_buff, cb[4]) + 2),
1977 BPF_EXIT_INSN(),
1978 },
Edward Creef65b1842017-08-07 15:27:12 +01001979 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001980 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001981 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001982 },
1983 {
1984 "check cb access: word, unaligned 4",
1985 .insns = {
1986 BPF_MOV64_IMM(BPF_REG_0, 0),
1987 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1988 offsetof(struct __sk_buff, cb[4]) + 3),
1989 BPF_EXIT_INSN(),
1990 },
Edward Creef65b1842017-08-07 15:27:12 +01001991 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001992 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001993 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001994 },
1995 {
1996 "check cb access: double",
1997 .insns = {
1998 BPF_MOV64_IMM(BPF_REG_0, 0),
1999 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2000 offsetof(struct __sk_buff, cb[0])),
2001 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2002 offsetof(struct __sk_buff, cb[2])),
2003 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2004 offsetof(struct __sk_buff, cb[0])),
2005 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2006 offsetof(struct __sk_buff, cb[2])),
2007 BPF_EXIT_INSN(),
2008 },
2009 .result = ACCEPT,
2010 },
2011 {
2012 "check cb access: double, unaligned 1",
2013 .insns = {
2014 BPF_MOV64_IMM(BPF_REG_0, 0),
2015 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2016 offsetof(struct __sk_buff, cb[1])),
2017 BPF_EXIT_INSN(),
2018 },
Edward Creef65b1842017-08-07 15:27:12 +01002019 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002020 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002021 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002022 },
2023 {
2024 "check cb access: double, unaligned 2",
2025 .insns = {
2026 BPF_MOV64_IMM(BPF_REG_0, 0),
2027 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2028 offsetof(struct __sk_buff, cb[3])),
2029 BPF_EXIT_INSN(),
2030 },
Edward Creef65b1842017-08-07 15:27:12 +01002031 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002032 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002033 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002034 },
2035 {
2036 "check cb access: double, oob 1",
2037 .insns = {
2038 BPF_MOV64_IMM(BPF_REG_0, 0),
2039 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2040 offsetof(struct __sk_buff, cb[4])),
2041 BPF_EXIT_INSN(),
2042 },
2043 .errstr = "invalid bpf_context access",
2044 .result = REJECT,
2045 },
2046 {
2047 "check cb access: double, oob 2",
2048 .insns = {
2049 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002050 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2051 offsetof(struct __sk_buff, cb[4])),
2052 BPF_EXIT_INSN(),
2053 },
2054 .errstr = "invalid bpf_context access",
2055 .result = REJECT,
2056 },
2057 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002058 "check __sk_buff->ifindex dw store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002059 .insns = {
2060 BPF_MOV64_IMM(BPF_REG_0, 0),
Yonghong Song31fd8582017-06-13 15:52:13 -07002061 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2062 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002063 BPF_EXIT_INSN(),
2064 },
2065 .errstr = "invalid bpf_context access",
2066 .result = REJECT,
2067 },
2068 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002069 "check __sk_buff->ifindex dw load not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002070 .insns = {
2071 BPF_MOV64_IMM(BPF_REG_0, 0),
2072 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
Yonghong Song31fd8582017-06-13 15:52:13 -07002073 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002074 BPF_EXIT_INSN(),
2075 },
2076 .errstr = "invalid bpf_context access",
2077 .result = REJECT,
2078 },
2079 {
2080 "check cb access: double, wrong type",
2081 .insns = {
2082 BPF_MOV64_IMM(BPF_REG_0, 0),
2083 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2084 offsetof(struct __sk_buff, cb[0])),
2085 BPF_EXIT_INSN(),
2086 },
2087 .errstr = "invalid bpf_context access",
2088 .result = REJECT,
2089 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002090 },
2091 {
2092 "check out of range skb->cb access",
2093 .insns = {
2094 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002095 offsetof(struct __sk_buff, cb[0]) + 256),
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002096 BPF_EXIT_INSN(),
2097 },
2098 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002099 .errstr_unpriv = "",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002100 .result = REJECT,
2101 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
2102 },
2103 {
2104 "write skb fields from socket prog",
2105 .insns = {
2106 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2107 offsetof(struct __sk_buff, cb[4])),
2108 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2109 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2110 offsetof(struct __sk_buff, mark)),
2111 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2112 offsetof(struct __sk_buff, tc_index)),
2113 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2114 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2115 offsetof(struct __sk_buff, cb[0])),
2116 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2117 offsetof(struct __sk_buff, cb[2])),
2118 BPF_EXIT_INSN(),
2119 },
2120 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002121 .errstr_unpriv = "R1 leaks addr",
2122 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002123 },
2124 {
2125 "write skb fields from tc_cls_act prog",
2126 .insns = {
2127 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2128 offsetof(struct __sk_buff, cb[0])),
2129 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2130 offsetof(struct __sk_buff, mark)),
2131 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2132 offsetof(struct __sk_buff, tc_index)),
2133 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2134 offsetof(struct __sk_buff, tc_index)),
2135 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2136 offsetof(struct __sk_buff, cb[3])),
2137 BPF_EXIT_INSN(),
2138 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002139 .errstr_unpriv = "",
2140 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002141 .result = ACCEPT,
2142 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2143 },
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002144 {
2145 "PTR_TO_STACK store/load",
2146 .insns = {
2147 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2148 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2149 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2150 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2151 BPF_EXIT_INSN(),
2152 },
2153 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002154 .retval = 0xfaceb00c,
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002155 },
2156 {
2157 "PTR_TO_STACK store/load - bad alignment on off",
2158 .insns = {
2159 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2160 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2161 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2162 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2163 BPF_EXIT_INSN(),
2164 },
2165 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002166 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002167 },
2168 {
2169 "PTR_TO_STACK store/load - bad alignment on reg",
2170 .insns = {
2171 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2173 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2174 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2175 BPF_EXIT_INSN(),
2176 },
2177 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002178 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002179 },
2180 {
2181 "PTR_TO_STACK store/load - out of bounds low",
2182 .insns = {
2183 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2185 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2186 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2187 BPF_EXIT_INSN(),
2188 },
2189 .result = REJECT,
2190 .errstr = "invalid stack off=-79992 size=8",
2191 },
2192 {
2193 "PTR_TO_STACK store/load - out of bounds high",
2194 .insns = {
2195 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2196 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2197 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2198 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2199 BPF_EXIT_INSN(),
2200 },
2201 .result = REJECT,
2202 .errstr = "invalid stack off=0 size=8",
2203 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002204 {
2205 "unpriv: return pointer",
2206 .insns = {
2207 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2208 BPF_EXIT_INSN(),
2209 },
2210 .result = ACCEPT,
2211 .result_unpriv = REJECT,
2212 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002213 .retval = POINTER_VALUE,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002214 },
2215 {
2216 "unpriv: add const to pointer",
2217 .insns = {
2218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2219 BPF_MOV64_IMM(BPF_REG_0, 0),
2220 BPF_EXIT_INSN(),
2221 },
2222 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002223 },
2224 {
2225 "unpriv: add pointer to pointer",
2226 .insns = {
2227 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2228 BPF_MOV64_IMM(BPF_REG_0, 0),
2229 BPF_EXIT_INSN(),
2230 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08002231 .result = REJECT,
2232 .errstr = "R1 pointer += pointer",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002233 },
2234 {
2235 "unpriv: neg pointer",
2236 .insns = {
2237 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2238 BPF_MOV64_IMM(BPF_REG_0, 0),
2239 BPF_EXIT_INSN(),
2240 },
2241 .result = ACCEPT,
2242 .result_unpriv = REJECT,
2243 .errstr_unpriv = "R1 pointer arithmetic",
2244 },
2245 {
2246 "unpriv: cmp pointer with const",
2247 .insns = {
2248 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2249 BPF_MOV64_IMM(BPF_REG_0, 0),
2250 BPF_EXIT_INSN(),
2251 },
2252 .result = ACCEPT,
2253 .result_unpriv = REJECT,
2254 .errstr_unpriv = "R1 pointer comparison",
2255 },
2256 {
2257 "unpriv: cmp pointer with pointer",
2258 .insns = {
2259 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2260 BPF_MOV64_IMM(BPF_REG_0, 0),
2261 BPF_EXIT_INSN(),
2262 },
2263 .result = ACCEPT,
2264 .result_unpriv = REJECT,
2265 .errstr_unpriv = "R10 pointer comparison",
2266 },
2267 {
2268 "unpriv: check that printk is disallowed",
2269 .insns = {
2270 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2271 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2273 BPF_MOV64_IMM(BPF_REG_2, 8),
2274 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002275 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2276 BPF_FUNC_trace_printk),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002277 BPF_MOV64_IMM(BPF_REG_0, 0),
2278 BPF_EXIT_INSN(),
2279 },
Daniel Borkmann0eb69842016-12-15 01:39:10 +01002280 .errstr_unpriv = "unknown func bpf_trace_printk#6",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002281 .result_unpriv = REJECT,
2282 .result = ACCEPT,
2283 },
2284 {
2285 "unpriv: pass pointer to helper function",
2286 .insns = {
2287 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2288 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2289 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2290 BPF_LD_MAP_FD(BPF_REG_1, 0),
2291 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2292 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002293 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2294 BPF_FUNC_map_update_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002295 BPF_MOV64_IMM(BPF_REG_0, 0),
2296 BPF_EXIT_INSN(),
2297 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002298 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002299 .errstr_unpriv = "R4 leaks addr",
2300 .result_unpriv = REJECT,
2301 .result = ACCEPT,
2302 },
2303 {
2304 "unpriv: indirectly pass pointer on stack to helper function",
2305 .insns = {
2306 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2307 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2308 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2309 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002310 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2311 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002312 BPF_MOV64_IMM(BPF_REG_0, 0),
2313 BPF_EXIT_INSN(),
2314 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002315 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002316 .errstr = "invalid indirect read from stack off -8+0 size 8",
2317 .result = REJECT,
2318 },
2319 {
2320 "unpriv: mangle pointer on stack 1",
2321 .insns = {
2322 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2323 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2324 BPF_MOV64_IMM(BPF_REG_0, 0),
2325 BPF_EXIT_INSN(),
2326 },
2327 .errstr_unpriv = "attempt to corrupt spilled",
2328 .result_unpriv = REJECT,
2329 .result = ACCEPT,
2330 },
2331 {
2332 "unpriv: mangle pointer on stack 2",
2333 .insns = {
2334 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2335 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2336 BPF_MOV64_IMM(BPF_REG_0, 0),
2337 BPF_EXIT_INSN(),
2338 },
2339 .errstr_unpriv = "attempt to corrupt spilled",
2340 .result_unpriv = REJECT,
2341 .result = ACCEPT,
2342 },
2343 {
2344 "unpriv: read pointer from stack in small chunks",
2345 .insns = {
2346 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2347 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2348 BPF_MOV64_IMM(BPF_REG_0, 0),
2349 BPF_EXIT_INSN(),
2350 },
2351 .errstr = "invalid size",
2352 .result = REJECT,
2353 },
2354 {
2355 "unpriv: write pointer into ctx",
2356 .insns = {
2357 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2358 BPF_MOV64_IMM(BPF_REG_0, 0),
2359 BPF_EXIT_INSN(),
2360 },
2361 .errstr_unpriv = "R1 leaks addr",
2362 .result_unpriv = REJECT,
2363 .errstr = "invalid bpf_context access",
2364 .result = REJECT,
2365 },
2366 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002367 "unpriv: spill/fill of ctx",
2368 .insns = {
2369 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2370 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2371 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2372 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2373 BPF_MOV64_IMM(BPF_REG_0, 0),
2374 BPF_EXIT_INSN(),
2375 },
2376 .result = ACCEPT,
2377 },
2378 {
2379 "unpriv: spill/fill of ctx 2",
2380 .insns = {
2381 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2382 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2383 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2384 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002385 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2386 BPF_FUNC_get_hash_recalc),
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002387 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002388 BPF_EXIT_INSN(),
2389 },
2390 .result = ACCEPT,
2391 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2392 },
2393 {
2394 "unpriv: spill/fill of ctx 3",
2395 .insns = {
2396 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2397 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2398 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2399 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2400 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002401 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2402 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002403 BPF_EXIT_INSN(),
2404 },
2405 .result = REJECT,
2406 .errstr = "R1 type=fp expected=ctx",
2407 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2408 },
2409 {
2410 "unpriv: spill/fill of ctx 4",
2411 .insns = {
2412 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2413 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2414 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2415 BPF_MOV64_IMM(BPF_REG_0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002416 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2417 BPF_REG_0, -8, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002418 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002419 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2420 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002421 BPF_EXIT_INSN(),
2422 },
2423 .result = REJECT,
2424 .errstr = "R1 type=inv expected=ctx",
2425 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2426 },
2427 {
2428 "unpriv: spill/fill of different pointers stx",
2429 .insns = {
2430 BPF_MOV64_IMM(BPF_REG_3, 42),
2431 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2432 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2433 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2434 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2436 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2437 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2438 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2439 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2440 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2441 offsetof(struct __sk_buff, mark)),
2442 BPF_MOV64_IMM(BPF_REG_0, 0),
2443 BPF_EXIT_INSN(),
2444 },
2445 .result = REJECT,
2446 .errstr = "same insn cannot be used with different pointers",
2447 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2448 },
2449 {
2450 "unpriv: spill/fill of different pointers ldx",
2451 .insns = {
2452 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2454 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2455 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2457 -(__s32)offsetof(struct bpf_perf_event_data,
2458 sample_period) - 8),
2459 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2460 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2461 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2462 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2463 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2464 offsetof(struct bpf_perf_event_data,
2465 sample_period)),
2466 BPF_MOV64_IMM(BPF_REG_0, 0),
2467 BPF_EXIT_INSN(),
2468 },
2469 .result = REJECT,
2470 .errstr = "same insn cannot be used with different pointers",
2471 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
2472 },
2473 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002474 "unpriv: write pointer into map elem value",
2475 .insns = {
2476 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2477 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2478 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2479 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002480 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2481 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002482 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2483 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2484 BPF_EXIT_INSN(),
2485 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002486 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002487 .errstr_unpriv = "R0 leaks addr",
2488 .result_unpriv = REJECT,
2489 .result = ACCEPT,
2490 },
2491 {
2492 "unpriv: partial copy of pointer",
2493 .insns = {
2494 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2495 BPF_MOV64_IMM(BPF_REG_0, 0),
2496 BPF_EXIT_INSN(),
2497 },
2498 .errstr_unpriv = "R10 partial copy",
2499 .result_unpriv = REJECT,
2500 .result = ACCEPT,
2501 },
2502 {
2503 "unpriv: pass pointer to tail_call",
2504 .insns = {
2505 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2506 BPF_LD_MAP_FD(BPF_REG_2, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002507 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2508 BPF_FUNC_tail_call),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002509 BPF_MOV64_IMM(BPF_REG_0, 0),
2510 BPF_EXIT_INSN(),
2511 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002512 .fixup_prog = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002513 .errstr_unpriv = "R3 leaks addr into helper",
2514 .result_unpriv = REJECT,
2515 .result = ACCEPT,
2516 },
2517 {
2518 "unpriv: cmp map pointer with zero",
2519 .insns = {
2520 BPF_MOV64_IMM(BPF_REG_1, 0),
2521 BPF_LD_MAP_FD(BPF_REG_1, 0),
2522 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2523 BPF_MOV64_IMM(BPF_REG_0, 0),
2524 BPF_EXIT_INSN(),
2525 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002526 .fixup_map1 = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002527 .errstr_unpriv = "R1 pointer comparison",
2528 .result_unpriv = REJECT,
2529 .result = ACCEPT,
2530 },
2531 {
2532 "unpriv: write into frame pointer",
2533 .insns = {
2534 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2535 BPF_MOV64_IMM(BPF_REG_0, 0),
2536 BPF_EXIT_INSN(),
2537 },
2538 .errstr = "frame pointer is read only",
2539 .result = REJECT,
2540 },
2541 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002542 "unpriv: spill/fill frame pointer",
2543 .insns = {
2544 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2546 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2547 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2548 BPF_MOV64_IMM(BPF_REG_0, 0),
2549 BPF_EXIT_INSN(),
2550 },
2551 .errstr = "frame pointer is read only",
2552 .result = REJECT,
2553 },
2554 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002555 "unpriv: cmp of frame pointer",
2556 .insns = {
2557 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2558 BPF_MOV64_IMM(BPF_REG_0, 0),
2559 BPF_EXIT_INSN(),
2560 },
2561 .errstr_unpriv = "R10 pointer comparison",
2562 .result_unpriv = REJECT,
2563 .result = ACCEPT,
2564 },
2565 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02002566 "unpriv: adding of fp",
2567 .insns = {
2568 BPF_MOV64_IMM(BPF_REG_0, 0),
2569 BPF_MOV64_IMM(BPF_REG_1, 0),
2570 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2571 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2572 BPF_EXIT_INSN(),
2573 },
Edward Creef65b1842017-08-07 15:27:12 +01002574 .result = ACCEPT,
Daniel Borkmann728a8532017-04-27 01:39:32 +02002575 },
2576 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002577 "unpriv: cmp of stack pointer",
2578 .insns = {
2579 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2580 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2581 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2582 BPF_MOV64_IMM(BPF_REG_0, 0),
2583 BPF_EXIT_INSN(),
2584 },
2585 .errstr_unpriv = "R2 pointer comparison",
2586 .result_unpriv = REJECT,
2587 .result = ACCEPT,
2588 },
2589 {
Daniel Borkmann16338a92018-02-23 01:03:43 +01002590 "runtime/jit: pass negative index to tail_call",
2591 .insns = {
2592 BPF_MOV64_IMM(BPF_REG_3, -1),
2593 BPF_LD_MAP_FD(BPF_REG_2, 0),
2594 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2595 BPF_FUNC_tail_call),
2596 BPF_MOV64_IMM(BPF_REG_0, 0),
2597 BPF_EXIT_INSN(),
2598 },
2599 .fixup_prog = { 1 },
2600 .result = ACCEPT,
2601 },
2602 {
2603 "runtime/jit: pass > 32bit index to tail_call",
2604 .insns = {
2605 BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
2606 BPF_LD_MAP_FD(BPF_REG_2, 0),
2607 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2608 BPF_FUNC_tail_call),
2609 BPF_MOV64_IMM(BPF_REG_0, 0),
2610 BPF_EXIT_INSN(),
2611 },
2612 .fixup_prog = { 2 },
2613 .result = ACCEPT,
2614 },
2615 {
Yonghong Song332270f2017-04-29 22:52:42 -07002616 "stack pointer arithmetic",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002617 .insns = {
Yonghong Song332270f2017-04-29 22:52:42 -07002618 BPF_MOV64_IMM(BPF_REG_1, 4),
2619 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2620 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
2621 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2622 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2623 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2624 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
2625 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2626 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2627 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2628 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002629 BPF_MOV64_IMM(BPF_REG_0, 0),
2630 BPF_EXIT_INSN(),
2631 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002632 .result = ACCEPT,
2633 },
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002634 {
2635 "raw_stack: no skb_load_bytes",
2636 .insns = {
2637 BPF_MOV64_IMM(BPF_REG_2, 4),
2638 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2639 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2640 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2641 BPF_MOV64_IMM(BPF_REG_4, 8),
2642 /* Call to skb_load_bytes() omitted. */
2643 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2644 BPF_EXIT_INSN(),
2645 },
2646 .result = REJECT,
2647 .errstr = "invalid read from stack off -8+0 size 8",
2648 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2649 },
2650 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002651 "raw_stack: skb_load_bytes, negative len",
2652 .insns = {
2653 BPF_MOV64_IMM(BPF_REG_2, 4),
2654 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2655 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2656 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2657 BPF_MOV64_IMM(BPF_REG_4, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002658 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2659 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002660 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2661 BPF_EXIT_INSN(),
2662 },
2663 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002664 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002665 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2666 },
2667 {
2668 "raw_stack: skb_load_bytes, negative len 2",
2669 .insns = {
2670 BPF_MOV64_IMM(BPF_REG_2, 4),
2671 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2672 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2673 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2674 BPF_MOV64_IMM(BPF_REG_4, ~0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002675 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2676 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002677 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2678 BPF_EXIT_INSN(),
2679 },
2680 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002681 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002682 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2683 },
2684 {
2685 "raw_stack: skb_load_bytes, zero len",
2686 .insns = {
2687 BPF_MOV64_IMM(BPF_REG_2, 4),
2688 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2689 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2690 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2691 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002692 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2693 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002694 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2695 BPF_EXIT_INSN(),
2696 },
2697 .result = REJECT,
2698 .errstr = "invalid stack type R3",
2699 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2700 },
2701 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002702 "raw_stack: skb_load_bytes, no init",
2703 .insns = {
2704 BPF_MOV64_IMM(BPF_REG_2, 4),
2705 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2706 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2707 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2708 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002709 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2710 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002711 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2712 BPF_EXIT_INSN(),
2713 },
2714 .result = ACCEPT,
2715 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2716 },
2717 {
2718 "raw_stack: skb_load_bytes, init",
2719 .insns = {
2720 BPF_MOV64_IMM(BPF_REG_2, 4),
2721 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2722 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2723 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
2724 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2725 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002726 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2727 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002728 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2729 BPF_EXIT_INSN(),
2730 },
2731 .result = ACCEPT,
2732 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2733 },
2734 {
2735 "raw_stack: skb_load_bytes, spilled regs around bounds",
2736 .insns = {
2737 BPF_MOV64_IMM(BPF_REG_2, 4),
2738 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2739 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002740 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2741 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002742 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2743 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002744 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2745 BPF_FUNC_skb_load_bytes),
2746 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2747 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002748 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2749 offsetof(struct __sk_buff, mark)),
2750 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2751 offsetof(struct __sk_buff, priority)),
2752 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2753 BPF_EXIT_INSN(),
2754 },
2755 .result = ACCEPT,
2756 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2757 },
2758 {
2759 "raw_stack: skb_load_bytes, spilled regs corruption",
2760 .insns = {
2761 BPF_MOV64_IMM(BPF_REG_2, 4),
2762 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2763 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002764 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002765 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2766 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002767 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2768 BPF_FUNC_skb_load_bytes),
2769 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002770 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2771 offsetof(struct __sk_buff, mark)),
2772 BPF_EXIT_INSN(),
2773 },
2774 .result = REJECT,
2775 .errstr = "R0 invalid mem access 'inv'",
2776 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2777 },
2778 {
2779 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2780 .insns = {
2781 BPF_MOV64_IMM(BPF_REG_2, 4),
2782 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2783 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002784 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2785 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2786 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002787 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2788 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002789 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2790 BPF_FUNC_skb_load_bytes),
2791 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2792 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2793 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002794 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2795 offsetof(struct __sk_buff, mark)),
2796 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2797 offsetof(struct __sk_buff, priority)),
2798 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2799 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2800 offsetof(struct __sk_buff, pkt_type)),
2801 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2802 BPF_EXIT_INSN(),
2803 },
2804 .result = REJECT,
2805 .errstr = "R3 invalid mem access 'inv'",
2806 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2807 },
2808 {
2809 "raw_stack: skb_load_bytes, spilled regs + data",
2810 .insns = {
2811 BPF_MOV64_IMM(BPF_REG_2, 4),
2812 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2813 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002814 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2815 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2816 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002817 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2818 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002819 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2820 BPF_FUNC_skb_load_bytes),
2821 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2822 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2823 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002824 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2825 offsetof(struct __sk_buff, mark)),
2826 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2827 offsetof(struct __sk_buff, priority)),
2828 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2829 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2830 BPF_EXIT_INSN(),
2831 },
2832 .result = ACCEPT,
2833 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2834 },
2835 {
2836 "raw_stack: skb_load_bytes, invalid access 1",
2837 .insns = {
2838 BPF_MOV64_IMM(BPF_REG_2, 4),
2839 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2840 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2841 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2842 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002843 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2844 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002845 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2846 BPF_EXIT_INSN(),
2847 },
2848 .result = REJECT,
2849 .errstr = "invalid stack type R3 off=-513 access_size=8",
2850 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2851 },
2852 {
2853 "raw_stack: skb_load_bytes, invalid access 2",
2854 .insns = {
2855 BPF_MOV64_IMM(BPF_REG_2, 4),
2856 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2857 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2858 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2859 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002860 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2861 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002862 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2863 BPF_EXIT_INSN(),
2864 },
2865 .result = REJECT,
2866 .errstr = "invalid stack type R3 off=-1 access_size=8",
2867 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2868 },
2869 {
2870 "raw_stack: skb_load_bytes, invalid access 3",
2871 .insns = {
2872 BPF_MOV64_IMM(BPF_REG_2, 4),
2873 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2874 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2875 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2876 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002877 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2878 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002879 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2880 BPF_EXIT_INSN(),
2881 },
2882 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002883 .errstr = "R4 min value is negative",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002884 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2885 },
2886 {
2887 "raw_stack: skb_load_bytes, invalid access 4",
2888 .insns = {
2889 BPF_MOV64_IMM(BPF_REG_2, 4),
2890 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2891 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2892 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2893 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002894 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2895 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002896 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2897 BPF_EXIT_INSN(),
2898 },
2899 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002900 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002901 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2902 },
2903 {
2904 "raw_stack: skb_load_bytes, invalid access 5",
2905 .insns = {
2906 BPF_MOV64_IMM(BPF_REG_2, 4),
2907 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2909 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2910 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002911 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2912 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002913 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2914 BPF_EXIT_INSN(),
2915 },
2916 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002917 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002918 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2919 },
2920 {
2921 "raw_stack: skb_load_bytes, invalid access 6",
2922 .insns = {
2923 BPF_MOV64_IMM(BPF_REG_2, 4),
2924 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2925 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2926 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2927 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002928 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2929 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002930 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2931 BPF_EXIT_INSN(),
2932 },
2933 .result = REJECT,
2934 .errstr = "invalid stack type R3 off=-512 access_size=0",
2935 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2936 },
2937 {
2938 "raw_stack: skb_load_bytes, large access",
2939 .insns = {
2940 BPF_MOV64_IMM(BPF_REG_2, 4),
2941 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2942 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2943 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2944 BPF_MOV64_IMM(BPF_REG_4, 512),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002945 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2946 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002947 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2948 BPF_EXIT_INSN(),
2949 },
2950 .result = ACCEPT,
2951 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2952 },
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002953 {
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01002954 "context stores via ST",
2955 .insns = {
2956 BPF_MOV64_IMM(BPF_REG_0, 0),
2957 BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
2958 BPF_EXIT_INSN(),
2959 },
2960 .errstr = "BPF_ST stores into R1 context is not allowed",
2961 .result = REJECT,
2962 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2963 },
2964 {
2965 "context stores via XADD",
2966 .insns = {
2967 BPF_MOV64_IMM(BPF_REG_0, 0),
2968 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
2969 BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
2970 BPF_EXIT_INSN(),
2971 },
2972 .errstr = "BPF_XADD stores into R1 context is not allowed",
2973 .result = REJECT,
2974 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2975 },
2976 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002977 "direct packet access: test1",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002978 .insns = {
2979 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2980 offsetof(struct __sk_buff, data)),
2981 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2982 offsetof(struct __sk_buff, data_end)),
2983 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2984 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2985 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2986 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2987 BPF_MOV64_IMM(BPF_REG_0, 0),
2988 BPF_EXIT_INSN(),
2989 },
2990 .result = ACCEPT,
2991 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2992 },
2993 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002994 "direct packet access: test2",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002995 .insns = {
2996 BPF_MOV64_IMM(BPF_REG_0, 1),
2997 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2998 offsetof(struct __sk_buff, data_end)),
2999 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3000 offsetof(struct __sk_buff, data)),
3001 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3002 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3003 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3004 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3005 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3006 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3007 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3008 offsetof(struct __sk_buff, data)),
3009 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08003010 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3011 offsetof(struct __sk_buff, len)),
Edward Cree1f9ab382017-08-07 15:29:11 +01003012 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3013 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003014 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3015 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3016 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3017 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3018 offsetof(struct __sk_buff, data_end)),
3019 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3020 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3021 BPF_MOV64_IMM(BPF_REG_0, 0),
3022 BPF_EXIT_INSN(),
3023 },
3024 .result = ACCEPT,
3025 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3026 },
3027 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003028 "direct packet access: test3",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003029 .insns = {
3030 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3031 offsetof(struct __sk_buff, data)),
3032 BPF_MOV64_IMM(BPF_REG_0, 0),
3033 BPF_EXIT_INSN(),
3034 },
3035 .errstr = "invalid bpf_context access off=76",
3036 .result = REJECT,
3037 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3038 },
3039 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003040 "direct packet access: test4 (write)",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003041 .insns = {
3042 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3043 offsetof(struct __sk_buff, data)),
3044 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3045 offsetof(struct __sk_buff, data_end)),
3046 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3047 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3048 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3049 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3050 BPF_MOV64_IMM(BPF_REG_0, 0),
3051 BPF_EXIT_INSN(),
3052 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003053 .result = ACCEPT,
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003054 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3055 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003056 {
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02003057 "direct packet access: test5 (pkt_end >= reg, good access)",
3058 .insns = {
3059 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3060 offsetof(struct __sk_buff, data)),
3061 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3062 offsetof(struct __sk_buff, data_end)),
3063 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3064 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3065 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3066 BPF_MOV64_IMM(BPF_REG_0, 1),
3067 BPF_EXIT_INSN(),
3068 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3069 BPF_MOV64_IMM(BPF_REG_0, 0),
3070 BPF_EXIT_INSN(),
3071 },
3072 .result = ACCEPT,
3073 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3074 },
3075 {
3076 "direct packet access: test6 (pkt_end >= reg, bad access)",
3077 .insns = {
3078 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3079 offsetof(struct __sk_buff, data)),
3080 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3081 offsetof(struct __sk_buff, data_end)),
3082 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3084 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3085 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3086 BPF_MOV64_IMM(BPF_REG_0, 1),
3087 BPF_EXIT_INSN(),
3088 BPF_MOV64_IMM(BPF_REG_0, 0),
3089 BPF_EXIT_INSN(),
3090 },
3091 .errstr = "invalid access to packet",
3092 .result = REJECT,
3093 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3094 },
3095 {
3096 "direct packet access: test7 (pkt_end >= reg, both accesses)",
3097 .insns = {
3098 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3099 offsetof(struct __sk_buff, data)),
3100 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3101 offsetof(struct __sk_buff, data_end)),
3102 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3104 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3105 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3106 BPF_MOV64_IMM(BPF_REG_0, 1),
3107 BPF_EXIT_INSN(),
3108 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3109 BPF_MOV64_IMM(BPF_REG_0, 0),
3110 BPF_EXIT_INSN(),
3111 },
3112 .errstr = "invalid access to packet",
3113 .result = REJECT,
3114 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3115 },
3116 {
3117 "direct packet access: test8 (double test, variant 1)",
3118 .insns = {
3119 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3120 offsetof(struct __sk_buff, data)),
3121 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3122 offsetof(struct __sk_buff, data_end)),
3123 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3124 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3125 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3126 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3127 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3128 BPF_MOV64_IMM(BPF_REG_0, 1),
3129 BPF_EXIT_INSN(),
3130 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3131 BPF_MOV64_IMM(BPF_REG_0, 0),
3132 BPF_EXIT_INSN(),
3133 },
3134 .result = ACCEPT,
3135 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3136 },
3137 {
3138 "direct packet access: test9 (double test, variant 2)",
3139 .insns = {
3140 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3141 offsetof(struct __sk_buff, data)),
3142 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3143 offsetof(struct __sk_buff, data_end)),
3144 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3145 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3146 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3147 BPF_MOV64_IMM(BPF_REG_0, 1),
3148 BPF_EXIT_INSN(),
3149 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3150 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3151 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3152 BPF_MOV64_IMM(BPF_REG_0, 0),
3153 BPF_EXIT_INSN(),
3154 },
3155 .result = ACCEPT,
3156 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3157 },
3158 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003159 "direct packet access: test10 (write invalid)",
3160 .insns = {
3161 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3162 offsetof(struct __sk_buff, data)),
3163 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3164 offsetof(struct __sk_buff, data_end)),
3165 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3166 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3167 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3168 BPF_MOV64_IMM(BPF_REG_0, 0),
3169 BPF_EXIT_INSN(),
3170 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3171 BPF_MOV64_IMM(BPF_REG_0, 0),
3172 BPF_EXIT_INSN(),
3173 },
3174 .errstr = "invalid access to packet",
3175 .result = REJECT,
3176 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3177 },
3178 {
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003179 "direct packet access: test11 (shift, good access)",
3180 .insns = {
3181 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3182 offsetof(struct __sk_buff, data)),
3183 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3184 offsetof(struct __sk_buff, data_end)),
3185 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3186 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3187 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3188 BPF_MOV64_IMM(BPF_REG_3, 144),
3189 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3190 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3191 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3192 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3193 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3194 BPF_MOV64_IMM(BPF_REG_0, 1),
3195 BPF_EXIT_INSN(),
3196 BPF_MOV64_IMM(BPF_REG_0, 0),
3197 BPF_EXIT_INSN(),
3198 },
3199 .result = ACCEPT,
3200 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003201 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003202 },
3203 {
3204 "direct packet access: test12 (and, good access)",
3205 .insns = {
3206 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3207 offsetof(struct __sk_buff, data)),
3208 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3209 offsetof(struct __sk_buff, data_end)),
3210 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3211 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3212 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3213 BPF_MOV64_IMM(BPF_REG_3, 144),
3214 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3216 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3217 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3218 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3219 BPF_MOV64_IMM(BPF_REG_0, 1),
3220 BPF_EXIT_INSN(),
3221 BPF_MOV64_IMM(BPF_REG_0, 0),
3222 BPF_EXIT_INSN(),
3223 },
3224 .result = ACCEPT,
3225 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003226 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003227 },
3228 {
3229 "direct packet access: test13 (branches, good access)",
3230 .insns = {
3231 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3232 offsetof(struct __sk_buff, data)),
3233 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3234 offsetof(struct __sk_buff, data_end)),
3235 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3236 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3237 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3238 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3239 offsetof(struct __sk_buff, mark)),
3240 BPF_MOV64_IMM(BPF_REG_4, 1),
3241 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3242 BPF_MOV64_IMM(BPF_REG_3, 14),
3243 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3244 BPF_MOV64_IMM(BPF_REG_3, 24),
3245 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3247 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3248 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3249 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3250 BPF_MOV64_IMM(BPF_REG_0, 1),
3251 BPF_EXIT_INSN(),
3252 BPF_MOV64_IMM(BPF_REG_0, 0),
3253 BPF_EXIT_INSN(),
3254 },
3255 .result = ACCEPT,
3256 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003257 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003258 },
3259 {
William Tu63dfef72017-02-04 08:37:29 -08003260 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3261 .insns = {
3262 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3263 offsetof(struct __sk_buff, data)),
3264 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3265 offsetof(struct __sk_buff, data_end)),
3266 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3267 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3268 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3269 BPF_MOV64_IMM(BPF_REG_5, 12),
3270 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3271 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3272 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3273 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3274 BPF_MOV64_IMM(BPF_REG_0, 1),
3275 BPF_EXIT_INSN(),
3276 BPF_MOV64_IMM(BPF_REG_0, 0),
3277 BPF_EXIT_INSN(),
3278 },
3279 .result = ACCEPT,
3280 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003281 .retval = 1,
William Tu63dfef72017-02-04 08:37:29 -08003282 },
3283 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003284 "direct packet access: test15 (spill with xadd)",
3285 .insns = {
3286 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3287 offsetof(struct __sk_buff, data)),
3288 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3289 offsetof(struct __sk_buff, data_end)),
3290 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3291 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3292 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3293 BPF_MOV64_IMM(BPF_REG_5, 4096),
3294 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3295 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3296 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3297 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3298 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3299 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3300 BPF_MOV64_IMM(BPF_REG_0, 0),
3301 BPF_EXIT_INSN(),
3302 },
3303 .errstr = "R2 invalid mem access 'inv'",
3304 .result = REJECT,
3305 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3306 },
3307 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02003308 "direct packet access: test16 (arith on data_end)",
3309 .insns = {
3310 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3311 offsetof(struct __sk_buff, data)),
3312 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3313 offsetof(struct __sk_buff, data_end)),
3314 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3315 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3316 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3317 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3318 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3319 BPF_MOV64_IMM(BPF_REG_0, 0),
3320 BPF_EXIT_INSN(),
3321 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08003322 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmann728a8532017-04-27 01:39:32 +02003323 .result = REJECT,
3324 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3325 },
3326 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003327 "direct packet access: test17 (pruning, alignment)",
3328 .insns = {
3329 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3330 offsetof(struct __sk_buff, data)),
3331 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3332 offsetof(struct __sk_buff, data_end)),
3333 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3334 offsetof(struct __sk_buff, mark)),
3335 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3336 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3337 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3338 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3339 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3340 BPF_MOV64_IMM(BPF_REG_0, 0),
3341 BPF_EXIT_INSN(),
3342 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3343 BPF_JMP_A(-6),
3344 },
Edward Creef65b1842017-08-07 15:27:12 +01003345 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003346 .result = REJECT,
3347 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3348 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3349 },
3350 {
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003351 "direct packet access: test18 (imm += pkt_ptr, 1)",
3352 .insns = {
3353 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3354 offsetof(struct __sk_buff, data)),
3355 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3356 offsetof(struct __sk_buff, data_end)),
3357 BPF_MOV64_IMM(BPF_REG_0, 8),
3358 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3359 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3360 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3361 BPF_MOV64_IMM(BPF_REG_0, 0),
3362 BPF_EXIT_INSN(),
3363 },
3364 .result = ACCEPT,
3365 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3366 },
3367 {
3368 "direct packet access: test19 (imm += pkt_ptr, 2)",
3369 .insns = {
3370 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3371 offsetof(struct __sk_buff, data)),
3372 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3373 offsetof(struct __sk_buff, data_end)),
3374 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3375 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3376 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3377 BPF_MOV64_IMM(BPF_REG_4, 4),
3378 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3379 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3380 BPF_MOV64_IMM(BPF_REG_0, 0),
3381 BPF_EXIT_INSN(),
3382 },
3383 .result = ACCEPT,
3384 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3385 },
3386 {
3387 "direct packet access: test20 (x += pkt_ptr, 1)",
3388 .insns = {
3389 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3390 offsetof(struct __sk_buff, data)),
3391 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3392 offsetof(struct __sk_buff, data_end)),
3393 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3394 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3395 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003396 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003397 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3398 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3399 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003400 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003401 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3402 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3403 BPF_MOV64_IMM(BPF_REG_0, 0),
3404 BPF_EXIT_INSN(),
3405 },
3406 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3407 .result = ACCEPT,
3408 },
3409 {
3410 "direct packet access: test21 (x += pkt_ptr, 2)",
3411 .insns = {
3412 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3413 offsetof(struct __sk_buff, data)),
3414 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3415 offsetof(struct __sk_buff, data_end)),
3416 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3417 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3418 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3419 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3420 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3421 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003422 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003423 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3424 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003425 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003426 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3427 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3428 BPF_MOV64_IMM(BPF_REG_0, 0),
3429 BPF_EXIT_INSN(),
3430 },
3431 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3432 .result = ACCEPT,
3433 },
3434 {
3435 "direct packet access: test22 (x += pkt_ptr, 3)",
3436 .insns = {
3437 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3438 offsetof(struct __sk_buff, data)),
3439 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3440 offsetof(struct __sk_buff, data_end)),
3441 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3442 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3443 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3444 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3445 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3446 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3447 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3448 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3449 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3450 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003451 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003452 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3453 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3454 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3455 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3456 BPF_MOV64_IMM(BPF_REG_2, 1),
3457 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3458 BPF_MOV64_IMM(BPF_REG_0, 0),
3459 BPF_EXIT_INSN(),
3460 },
3461 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3462 .result = ACCEPT,
3463 },
3464 {
3465 "direct packet access: test23 (x += pkt_ptr, 4)",
3466 .insns = {
3467 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3468 offsetof(struct __sk_buff, data)),
3469 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3470 offsetof(struct __sk_buff, data_end)),
3471 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3472 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3473 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3474 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3475 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3476 BPF_MOV64_IMM(BPF_REG_0, 31),
3477 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3478 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3479 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3481 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3482 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3483 BPF_MOV64_IMM(BPF_REG_0, 0),
3484 BPF_EXIT_INSN(),
3485 },
3486 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3487 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003488 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003489 },
3490 {
3491 "direct packet access: test24 (x += pkt_ptr, 5)",
3492 .insns = {
3493 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3494 offsetof(struct __sk_buff, data)),
3495 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3496 offsetof(struct __sk_buff, data_end)),
3497 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3498 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3499 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3500 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3501 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3502 BPF_MOV64_IMM(BPF_REG_0, 64),
3503 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3504 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3505 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
Edward Cree1f9ab382017-08-07 15:29:11 +01003506 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003507 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3508 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3509 BPF_MOV64_IMM(BPF_REG_0, 0),
3510 BPF_EXIT_INSN(),
3511 },
3512 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3513 .result = ACCEPT,
3514 },
3515 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02003516 "direct packet access: test25 (marking on <, good access)",
3517 .insns = {
3518 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3519 offsetof(struct __sk_buff, data)),
3520 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3521 offsetof(struct __sk_buff, data_end)),
3522 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3523 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3524 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3525 BPF_MOV64_IMM(BPF_REG_0, 0),
3526 BPF_EXIT_INSN(),
3527 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3528 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3529 },
3530 .result = ACCEPT,
3531 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3532 },
3533 {
3534 "direct packet access: test26 (marking on <, bad access)",
3535 .insns = {
3536 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3537 offsetof(struct __sk_buff, data)),
3538 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3539 offsetof(struct __sk_buff, data_end)),
3540 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3541 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3542 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
3543 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3544 BPF_MOV64_IMM(BPF_REG_0, 0),
3545 BPF_EXIT_INSN(),
3546 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
3547 },
3548 .result = REJECT,
3549 .errstr = "invalid access to packet",
3550 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3551 },
3552 {
3553 "direct packet access: test27 (marking on <=, good access)",
3554 .insns = {
3555 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3556 offsetof(struct __sk_buff, data)),
3557 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3558 offsetof(struct __sk_buff, data_end)),
3559 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3560 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3561 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
3562 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3563 BPF_MOV64_IMM(BPF_REG_0, 1),
3564 BPF_EXIT_INSN(),
3565 },
3566 .result = ACCEPT,
3567 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003568 .retval = 1,
Daniel Borkmann31e482b2017-08-10 01:40:03 +02003569 },
3570 {
3571 "direct packet access: test28 (marking on <=, bad access)",
3572 .insns = {
3573 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3574 offsetof(struct __sk_buff, data)),
3575 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3576 offsetof(struct __sk_buff, data_end)),
3577 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3578 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3579 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
3580 BPF_MOV64_IMM(BPF_REG_0, 1),
3581 BPF_EXIT_INSN(),
3582 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3583 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3584 },
3585 .result = REJECT,
3586 .errstr = "invalid access to packet",
3587 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3588 },
3589 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003590 "helper access to packet: test1, valid packet_ptr range",
3591 .insns = {
3592 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3593 offsetof(struct xdp_md, data)),
3594 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3595 offsetof(struct xdp_md, data_end)),
3596 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3597 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3598 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3599 BPF_LD_MAP_FD(BPF_REG_1, 0),
3600 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3601 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003602 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3603 BPF_FUNC_map_update_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003604 BPF_MOV64_IMM(BPF_REG_0, 0),
3605 BPF_EXIT_INSN(),
3606 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003607 .fixup_map1 = { 5 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003608 .result_unpriv = ACCEPT,
3609 .result = ACCEPT,
3610 .prog_type = BPF_PROG_TYPE_XDP,
3611 },
3612 {
3613 "helper access to packet: test2, unchecked packet_ptr",
3614 .insns = {
3615 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3616 offsetof(struct xdp_md, data)),
3617 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003618 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3619 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003620 BPF_MOV64_IMM(BPF_REG_0, 0),
3621 BPF_EXIT_INSN(),
3622 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003623 .fixup_map1 = { 1 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003624 .result = REJECT,
3625 .errstr = "invalid access to packet",
3626 .prog_type = BPF_PROG_TYPE_XDP,
3627 },
3628 {
3629 "helper access to packet: test3, variable add",
3630 .insns = {
3631 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3632 offsetof(struct xdp_md, data)),
3633 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3634 offsetof(struct xdp_md, data_end)),
3635 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3636 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3637 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3638 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3639 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3640 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3641 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3642 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3643 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3644 BPF_LD_MAP_FD(BPF_REG_1, 0),
3645 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003646 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3647 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003648 BPF_MOV64_IMM(BPF_REG_0, 0),
3649 BPF_EXIT_INSN(),
3650 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003651 .fixup_map1 = { 11 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003652 .result = ACCEPT,
3653 .prog_type = BPF_PROG_TYPE_XDP,
3654 },
3655 {
3656 "helper access to packet: test4, packet_ptr with bad range",
3657 .insns = {
3658 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3659 offsetof(struct xdp_md, data)),
3660 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3661 offsetof(struct xdp_md, data_end)),
3662 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3663 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3664 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3665 BPF_MOV64_IMM(BPF_REG_0, 0),
3666 BPF_EXIT_INSN(),
3667 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003668 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3669 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003670 BPF_MOV64_IMM(BPF_REG_0, 0),
3671 BPF_EXIT_INSN(),
3672 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003673 .fixup_map1 = { 7 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003674 .result = REJECT,
3675 .errstr = "invalid access to packet",
3676 .prog_type = BPF_PROG_TYPE_XDP,
3677 },
3678 {
3679 "helper access to packet: test5, packet_ptr with too short range",
3680 .insns = {
3681 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3682 offsetof(struct xdp_md, data)),
3683 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3684 offsetof(struct xdp_md, data_end)),
3685 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3686 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3687 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3688 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3689 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003690 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3691 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003692 BPF_MOV64_IMM(BPF_REG_0, 0),
3693 BPF_EXIT_INSN(),
3694 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003695 .fixup_map1 = { 6 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003696 .result = REJECT,
3697 .errstr = "invalid access to packet",
3698 .prog_type = BPF_PROG_TYPE_XDP,
3699 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003700 {
3701 "helper access to packet: test6, cls valid packet_ptr range",
3702 .insns = {
3703 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3704 offsetof(struct __sk_buff, data)),
3705 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3706 offsetof(struct __sk_buff, data_end)),
3707 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3709 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3710 BPF_LD_MAP_FD(BPF_REG_1, 0),
3711 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3712 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003713 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3714 BPF_FUNC_map_update_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003715 BPF_MOV64_IMM(BPF_REG_0, 0),
3716 BPF_EXIT_INSN(),
3717 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003718 .fixup_map1 = { 5 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003719 .result = ACCEPT,
3720 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3721 },
3722 {
3723 "helper access to packet: test7, cls unchecked packet_ptr",
3724 .insns = {
3725 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3726 offsetof(struct __sk_buff, data)),
3727 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003728 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3729 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003730 BPF_MOV64_IMM(BPF_REG_0, 0),
3731 BPF_EXIT_INSN(),
3732 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003733 .fixup_map1 = { 1 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003734 .result = REJECT,
3735 .errstr = "invalid access to packet",
3736 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3737 },
3738 {
3739 "helper access to packet: test8, cls variable add",
3740 .insns = {
3741 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3742 offsetof(struct __sk_buff, data)),
3743 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3744 offsetof(struct __sk_buff, data_end)),
3745 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3746 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3747 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3748 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3749 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3750 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3751 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3752 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3753 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3754 BPF_LD_MAP_FD(BPF_REG_1, 0),
3755 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003756 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3757 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003758 BPF_MOV64_IMM(BPF_REG_0, 0),
3759 BPF_EXIT_INSN(),
3760 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003761 .fixup_map1 = { 11 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003762 .result = ACCEPT,
3763 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3764 },
3765 {
3766 "helper access to packet: test9, cls packet_ptr with bad range",
3767 .insns = {
3768 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3769 offsetof(struct __sk_buff, data)),
3770 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3771 offsetof(struct __sk_buff, data_end)),
3772 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3773 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3774 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3775 BPF_MOV64_IMM(BPF_REG_0, 0),
3776 BPF_EXIT_INSN(),
3777 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003778 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3779 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003780 BPF_MOV64_IMM(BPF_REG_0, 0),
3781 BPF_EXIT_INSN(),
3782 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003783 .fixup_map1 = { 7 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003784 .result = REJECT,
3785 .errstr = "invalid access to packet",
3786 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3787 },
3788 {
3789 "helper access to packet: test10, cls packet_ptr with too short range",
3790 .insns = {
3791 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3792 offsetof(struct __sk_buff, data)),
3793 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3794 offsetof(struct __sk_buff, data_end)),
3795 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3796 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3797 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3798 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3799 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003800 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3801 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003802 BPF_MOV64_IMM(BPF_REG_0, 0),
3803 BPF_EXIT_INSN(),
3804 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003805 .fixup_map1 = { 6 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003806 .result = REJECT,
3807 .errstr = "invalid access to packet",
3808 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3809 },
3810 {
3811 "helper access to packet: test11, cls unsuitable helper 1",
3812 .insns = {
3813 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3814 offsetof(struct __sk_buff, data)),
3815 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3816 offsetof(struct __sk_buff, data_end)),
3817 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3818 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3819 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
3820 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
3821 BPF_MOV64_IMM(BPF_REG_2, 0),
3822 BPF_MOV64_IMM(BPF_REG_4, 42),
3823 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003824 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3825 BPF_FUNC_skb_store_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003826 BPF_MOV64_IMM(BPF_REG_0, 0),
3827 BPF_EXIT_INSN(),
3828 },
3829 .result = REJECT,
3830 .errstr = "helper access to the packet",
3831 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3832 },
3833 {
3834 "helper access to packet: test12, cls unsuitable helper 2",
3835 .insns = {
3836 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3837 offsetof(struct __sk_buff, data)),
3838 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3839 offsetof(struct __sk_buff, data_end)),
3840 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3841 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
3842 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
3843 BPF_MOV64_IMM(BPF_REG_2, 0),
3844 BPF_MOV64_IMM(BPF_REG_4, 4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003845 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3846 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003847 BPF_MOV64_IMM(BPF_REG_0, 0),
3848 BPF_EXIT_INSN(),
3849 },
3850 .result = REJECT,
3851 .errstr = "helper access to the packet",
3852 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3853 },
3854 {
3855 "helper access to packet: test13, cls helper ok",
3856 .insns = {
3857 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3858 offsetof(struct __sk_buff, data)),
3859 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3860 offsetof(struct __sk_buff, data_end)),
3861 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3862 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3863 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3864 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3865 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3866 BPF_MOV64_IMM(BPF_REG_2, 4),
3867 BPF_MOV64_IMM(BPF_REG_3, 0),
3868 BPF_MOV64_IMM(BPF_REG_4, 0),
3869 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003870 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3871 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003872 BPF_MOV64_IMM(BPF_REG_0, 0),
3873 BPF_EXIT_INSN(),
3874 },
3875 .result = ACCEPT,
3876 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3877 },
3878 {
Edward Creef65b1842017-08-07 15:27:12 +01003879 "helper access to packet: test14, cls helper ok sub",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003880 .insns = {
3881 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3882 offsetof(struct __sk_buff, data)),
3883 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3884 offsetof(struct __sk_buff, data_end)),
3885 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3886 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3888 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3889 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
3890 BPF_MOV64_IMM(BPF_REG_2, 4),
3891 BPF_MOV64_IMM(BPF_REG_3, 0),
3892 BPF_MOV64_IMM(BPF_REG_4, 0),
3893 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003894 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3895 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003896 BPF_MOV64_IMM(BPF_REG_0, 0),
3897 BPF_EXIT_INSN(),
3898 },
Edward Creef65b1842017-08-07 15:27:12 +01003899 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003900 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3901 },
3902 {
Edward Creef65b1842017-08-07 15:27:12 +01003903 "helper access to packet: test15, cls helper fail sub",
3904 .insns = {
3905 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3906 offsetof(struct __sk_buff, data)),
3907 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3908 offsetof(struct __sk_buff, data_end)),
3909 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3910 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3912 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3913 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
3914 BPF_MOV64_IMM(BPF_REG_2, 4),
3915 BPF_MOV64_IMM(BPF_REG_3, 0),
3916 BPF_MOV64_IMM(BPF_REG_4, 0),
3917 BPF_MOV64_IMM(BPF_REG_5, 0),
3918 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3919 BPF_FUNC_csum_diff),
3920 BPF_MOV64_IMM(BPF_REG_0, 0),
3921 BPF_EXIT_INSN(),
3922 },
3923 .result = REJECT,
3924 .errstr = "invalid access to packet",
3925 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3926 },
3927 {
3928 "helper access to packet: test16, cls helper fail range 1",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003929 .insns = {
3930 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3931 offsetof(struct __sk_buff, data)),
3932 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3933 offsetof(struct __sk_buff, data_end)),
3934 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3935 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3936 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3937 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3938 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3939 BPF_MOV64_IMM(BPF_REG_2, 8),
3940 BPF_MOV64_IMM(BPF_REG_3, 0),
3941 BPF_MOV64_IMM(BPF_REG_4, 0),
3942 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003943 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3944 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003945 BPF_MOV64_IMM(BPF_REG_0, 0),
3946 BPF_EXIT_INSN(),
3947 },
3948 .result = REJECT,
3949 .errstr = "invalid access to packet",
3950 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3951 },
3952 {
Edward Creef65b1842017-08-07 15:27:12 +01003953 "helper access to packet: test17, cls helper fail range 2",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003954 .insns = {
3955 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3956 offsetof(struct __sk_buff, data)),
3957 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3958 offsetof(struct __sk_buff, data_end)),
3959 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3960 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3961 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3962 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3963 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3964 BPF_MOV64_IMM(BPF_REG_2, -9),
3965 BPF_MOV64_IMM(BPF_REG_3, 0),
3966 BPF_MOV64_IMM(BPF_REG_4, 0),
3967 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003968 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3969 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003970 BPF_MOV64_IMM(BPF_REG_0, 0),
3971 BPF_EXIT_INSN(),
3972 },
3973 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003974 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003975 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3976 },
3977 {
Edward Creef65b1842017-08-07 15:27:12 +01003978 "helper access to packet: test18, cls helper fail range 3",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003979 .insns = {
3980 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3981 offsetof(struct __sk_buff, data)),
3982 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3983 offsetof(struct __sk_buff, data_end)),
3984 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3985 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3986 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3987 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3988 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3989 BPF_MOV64_IMM(BPF_REG_2, ~0),
3990 BPF_MOV64_IMM(BPF_REG_3, 0),
3991 BPF_MOV64_IMM(BPF_REG_4, 0),
3992 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003993 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3994 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003995 BPF_MOV64_IMM(BPF_REG_0, 0),
3996 BPF_EXIT_INSN(),
3997 },
3998 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003999 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004000 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4001 },
4002 {
Yonghong Songb6ff6392017-11-12 14:49:11 -08004003 "helper access to packet: test19, cls helper range zero",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004004 .insns = {
4005 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4006 offsetof(struct __sk_buff, data)),
4007 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4008 offsetof(struct __sk_buff, data_end)),
4009 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4010 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4012 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4013 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4014 BPF_MOV64_IMM(BPF_REG_2, 0),
4015 BPF_MOV64_IMM(BPF_REG_3, 0),
4016 BPF_MOV64_IMM(BPF_REG_4, 0),
4017 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004018 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4019 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004020 BPF_MOV64_IMM(BPF_REG_0, 0),
4021 BPF_EXIT_INSN(),
4022 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08004023 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004024 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4025 },
4026 {
Edward Creef65b1842017-08-07 15:27:12 +01004027 "helper access to packet: test20, pkt end as input",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004028 .insns = {
4029 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4030 offsetof(struct __sk_buff, data)),
4031 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4032 offsetof(struct __sk_buff, data_end)),
4033 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4034 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4035 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4036 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4037 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4038 BPF_MOV64_IMM(BPF_REG_2, 4),
4039 BPF_MOV64_IMM(BPF_REG_3, 0),
4040 BPF_MOV64_IMM(BPF_REG_4, 0),
4041 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004042 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4043 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004044 BPF_MOV64_IMM(BPF_REG_0, 0),
4045 BPF_EXIT_INSN(),
4046 },
4047 .result = REJECT,
4048 .errstr = "R1 type=pkt_end expected=fp",
4049 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4050 },
4051 {
Edward Creef65b1842017-08-07 15:27:12 +01004052 "helper access to packet: test21, wrong reg",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004053 .insns = {
4054 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4055 offsetof(struct __sk_buff, data)),
4056 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4057 offsetof(struct __sk_buff, data_end)),
4058 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4059 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4060 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4061 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4062 BPF_MOV64_IMM(BPF_REG_2, 4),
4063 BPF_MOV64_IMM(BPF_REG_3, 0),
4064 BPF_MOV64_IMM(BPF_REG_4, 0),
4065 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004066 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4067 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004068 BPF_MOV64_IMM(BPF_REG_0, 0),
4069 BPF_EXIT_INSN(),
4070 },
4071 .result = REJECT,
4072 .errstr = "invalid access to packet",
4073 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4074 },
Josef Bacik48461132016-09-28 10:54:32 -04004075 {
4076 "valid map access into an array with a constant",
4077 .insns = {
4078 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4079 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4080 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4081 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004082 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4083 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004084 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004085 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4086 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004087 BPF_EXIT_INSN(),
4088 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004089 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004090 .errstr_unpriv = "R0 leaks addr",
4091 .result_unpriv = REJECT,
4092 .result = ACCEPT,
4093 },
4094 {
4095 "valid map access into an array with a register",
4096 .insns = {
4097 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4098 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4099 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4100 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004101 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4102 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004103 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4104 BPF_MOV64_IMM(BPF_REG_1, 4),
4105 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4106 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004107 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4108 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004109 BPF_EXIT_INSN(),
4110 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004111 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004112 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004113 .result_unpriv = REJECT,
4114 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004115 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004116 },
4117 {
4118 "valid map access into an array with a variable",
4119 .insns = {
4120 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4121 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4123 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004124 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4125 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004126 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4127 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4128 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4129 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4130 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004131 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4132 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004133 BPF_EXIT_INSN(),
4134 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004135 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004136 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004137 .result_unpriv = REJECT,
4138 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004139 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004140 },
4141 {
4142 "valid map access into an array with a signed variable",
4143 .insns = {
4144 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4145 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4146 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4147 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004148 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4149 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004150 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4151 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4152 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4153 BPF_MOV32_IMM(BPF_REG_1, 0),
4154 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4155 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4156 BPF_MOV32_IMM(BPF_REG_1, 0),
4157 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4158 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004159 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4160 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004161 BPF_EXIT_INSN(),
4162 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004163 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004164 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004165 .result_unpriv = REJECT,
4166 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004167 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004168 },
4169 {
4170 "invalid map access into an array with a constant",
4171 .insns = {
4172 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4173 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4174 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4175 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004176 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4177 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004178 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4179 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4180 offsetof(struct test_val, foo)),
4181 BPF_EXIT_INSN(),
4182 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004183 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004184 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
4185 .result = REJECT,
4186 },
4187 {
4188 "invalid map access into an array with a register",
4189 .insns = {
4190 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4191 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4192 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4193 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004194 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4195 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004196 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4197 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4198 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4199 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004200 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4201 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004202 BPF_EXIT_INSN(),
4203 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004204 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004205 .errstr = "R0 min value is outside of the array range",
4206 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004207 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004208 },
4209 {
4210 "invalid map access into an array with a variable",
4211 .insns = {
4212 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4213 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4214 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4215 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004216 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4217 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004218 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4219 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4220 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4221 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004222 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4223 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004224 BPF_EXIT_INSN(),
4225 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004226 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004227 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
Josef Bacik48461132016-09-28 10:54:32 -04004228 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004229 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004230 },
4231 {
4232 "invalid map access into an array with no floor check",
4233 .insns = {
4234 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4235 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4236 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4237 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004238 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4239 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004240 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
Edward Creef65b1842017-08-07 15:27:12 +01004241 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik48461132016-09-28 10:54:32 -04004242 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4243 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4244 BPF_MOV32_IMM(BPF_REG_1, 0),
4245 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4246 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004247 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4248 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004249 BPF_EXIT_INSN(),
4250 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004251 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004252 .errstr_unpriv = "R0 leaks addr",
4253 .errstr = "R0 unbounded memory access",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004254 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04004255 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004256 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004257 },
4258 {
4259 "invalid map access into an array with a invalid max check",
4260 .insns = {
4261 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4262 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4263 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4264 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004265 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4266 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004267 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4268 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4269 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4270 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4271 BPF_MOV32_IMM(BPF_REG_1, 0),
4272 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4273 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004274 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4275 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004276 BPF_EXIT_INSN(),
4277 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004278 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004279 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004280 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004281 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04004282 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004283 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004284 },
4285 {
4286 "invalid map access into an array with a invalid max check",
4287 .insns = {
4288 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4289 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4290 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4291 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004292 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4293 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004294 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4295 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4296 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4297 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4298 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4299 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004300 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4301 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004302 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4303 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004304 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4305 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004306 BPF_EXIT_INSN(),
4307 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004308 .fixup_map2 = { 3, 11 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004309 .errstr = "R0 pointer += pointer",
Josef Bacik48461132016-09-28 10:54:32 -04004310 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004311 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004312 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02004313 {
4314 "multiple registers share map_lookup_elem result",
4315 .insns = {
4316 BPF_MOV64_IMM(BPF_REG_1, 10),
4317 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4318 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4319 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4320 BPF_LD_MAP_FD(BPF_REG_1, 0),
4321 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4322 BPF_FUNC_map_lookup_elem),
4323 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4324 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4325 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4326 BPF_EXIT_INSN(),
4327 },
4328 .fixup_map1 = { 4 },
4329 .result = ACCEPT,
4330 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4331 },
4332 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004333 "alu ops on ptr_to_map_value_or_null, 1",
4334 .insns = {
4335 BPF_MOV64_IMM(BPF_REG_1, 10),
4336 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4337 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4339 BPF_LD_MAP_FD(BPF_REG_1, 0),
4340 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4341 BPF_FUNC_map_lookup_elem),
4342 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4343 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
4344 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
4345 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4346 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4347 BPF_EXIT_INSN(),
4348 },
4349 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004350 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004351 .result = REJECT,
4352 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4353 },
4354 {
4355 "alu ops on ptr_to_map_value_or_null, 2",
4356 .insns = {
4357 BPF_MOV64_IMM(BPF_REG_1, 10),
4358 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4359 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4361 BPF_LD_MAP_FD(BPF_REG_1, 0),
4362 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4363 BPF_FUNC_map_lookup_elem),
4364 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4365 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
4366 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4367 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4368 BPF_EXIT_INSN(),
4369 },
4370 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004371 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004372 .result = REJECT,
4373 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4374 },
4375 {
4376 "alu ops on ptr_to_map_value_or_null, 3",
4377 .insns = {
4378 BPF_MOV64_IMM(BPF_REG_1, 10),
4379 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4380 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4381 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4382 BPF_LD_MAP_FD(BPF_REG_1, 0),
4383 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4384 BPF_FUNC_map_lookup_elem),
4385 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4386 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
4387 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4388 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4389 BPF_EXIT_INSN(),
4390 },
4391 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004392 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004393 .result = REJECT,
4394 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4395 },
4396 {
Thomas Graf57a09bf2016-10-18 19:51:19 +02004397 "invalid memory access with multiple map_lookup_elem calls",
4398 .insns = {
4399 BPF_MOV64_IMM(BPF_REG_1, 10),
4400 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4401 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4403 BPF_LD_MAP_FD(BPF_REG_1, 0),
4404 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4405 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4406 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4407 BPF_FUNC_map_lookup_elem),
4408 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4409 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4410 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4411 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4412 BPF_FUNC_map_lookup_elem),
4413 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4414 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4415 BPF_EXIT_INSN(),
4416 },
4417 .fixup_map1 = { 4 },
4418 .result = REJECT,
4419 .errstr = "R4 !read_ok",
4420 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4421 },
4422 {
4423 "valid indirect map_lookup_elem access with 2nd lookup in branch",
4424 .insns = {
4425 BPF_MOV64_IMM(BPF_REG_1, 10),
4426 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4427 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4428 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4429 BPF_LD_MAP_FD(BPF_REG_1, 0),
4430 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4431 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4432 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4433 BPF_FUNC_map_lookup_elem),
4434 BPF_MOV64_IMM(BPF_REG_2, 10),
4435 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
4436 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4437 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4438 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4439 BPF_FUNC_map_lookup_elem),
4440 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4441 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4442 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4443 BPF_EXIT_INSN(),
4444 },
4445 .fixup_map1 = { 4 },
4446 .result = ACCEPT,
4447 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4448 },
Josef Bacike9548902016-11-29 12:35:19 -05004449 {
4450 "invalid map access from else condition",
4451 .insns = {
4452 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4453 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4454 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4455 BPF_LD_MAP_FD(BPF_REG_1, 0),
4456 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
4457 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4458 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4459 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
4460 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4461 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4462 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4463 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
4464 BPF_EXIT_INSN(),
4465 },
4466 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004467 .errstr = "R0 unbounded memory access",
Josef Bacike9548902016-11-29 12:35:19 -05004468 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01004469 .errstr_unpriv = "R0 leaks addr",
Josef Bacike9548902016-11-29 12:35:19 -05004470 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004471 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacike9548902016-11-29 12:35:19 -05004472 },
Gianluca Borello3c8397442016-12-03 12:31:33 -08004473 {
4474 "constant register |= constant should keep constant type",
4475 .insns = {
4476 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4477 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4478 BPF_MOV64_IMM(BPF_REG_2, 34),
4479 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
4480 BPF_MOV64_IMM(BPF_REG_3, 0),
4481 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4482 BPF_EXIT_INSN(),
4483 },
4484 .result = ACCEPT,
4485 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4486 },
4487 {
4488 "constant register |= constant should not bypass stack boundary checks",
4489 .insns = {
4490 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4491 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4492 BPF_MOV64_IMM(BPF_REG_2, 34),
4493 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
4494 BPF_MOV64_IMM(BPF_REG_3, 0),
4495 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4496 BPF_EXIT_INSN(),
4497 },
4498 .errstr = "invalid stack type R1 off=-48 access_size=58",
4499 .result = REJECT,
4500 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4501 },
4502 {
4503 "constant register |= constant register should keep constant type",
4504 .insns = {
4505 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4506 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4507 BPF_MOV64_IMM(BPF_REG_2, 34),
4508 BPF_MOV64_IMM(BPF_REG_4, 13),
4509 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4510 BPF_MOV64_IMM(BPF_REG_3, 0),
4511 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4512 BPF_EXIT_INSN(),
4513 },
4514 .result = ACCEPT,
4515 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4516 },
4517 {
4518 "constant register |= constant register should not bypass stack boundary checks",
4519 .insns = {
4520 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4522 BPF_MOV64_IMM(BPF_REG_2, 34),
4523 BPF_MOV64_IMM(BPF_REG_4, 24),
4524 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4525 BPF_MOV64_IMM(BPF_REG_3, 0),
4526 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4527 BPF_EXIT_INSN(),
4528 },
4529 .errstr = "invalid stack type R1 off=-48 access_size=58",
4530 .result = REJECT,
4531 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4532 },
Thomas Graf3f731d82016-12-05 10:30:52 +01004533 {
4534 "invalid direct packet write for LWT_IN",
4535 .insns = {
4536 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4537 offsetof(struct __sk_buff, data)),
4538 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4539 offsetof(struct __sk_buff, data_end)),
4540 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4541 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4542 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4543 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4544 BPF_MOV64_IMM(BPF_REG_0, 0),
4545 BPF_EXIT_INSN(),
4546 },
4547 .errstr = "cannot write into packet",
4548 .result = REJECT,
4549 .prog_type = BPF_PROG_TYPE_LWT_IN,
4550 },
4551 {
4552 "invalid direct packet write for LWT_OUT",
4553 .insns = {
4554 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4555 offsetof(struct __sk_buff, data)),
4556 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4557 offsetof(struct __sk_buff, data_end)),
4558 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4559 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4560 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4561 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4562 BPF_MOV64_IMM(BPF_REG_0, 0),
4563 BPF_EXIT_INSN(),
4564 },
4565 .errstr = "cannot write into packet",
4566 .result = REJECT,
4567 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4568 },
4569 {
4570 "direct packet write for LWT_XMIT",
4571 .insns = {
4572 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4573 offsetof(struct __sk_buff, data)),
4574 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4575 offsetof(struct __sk_buff, data_end)),
4576 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4577 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4578 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4579 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4580 BPF_MOV64_IMM(BPF_REG_0, 0),
4581 BPF_EXIT_INSN(),
4582 },
4583 .result = ACCEPT,
4584 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4585 },
4586 {
4587 "direct packet read for LWT_IN",
4588 .insns = {
4589 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4590 offsetof(struct __sk_buff, data)),
4591 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4592 offsetof(struct __sk_buff, data_end)),
4593 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4594 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4595 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4596 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4597 BPF_MOV64_IMM(BPF_REG_0, 0),
4598 BPF_EXIT_INSN(),
4599 },
4600 .result = ACCEPT,
4601 .prog_type = BPF_PROG_TYPE_LWT_IN,
4602 },
4603 {
4604 "direct packet read for LWT_OUT",
4605 .insns = {
4606 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4607 offsetof(struct __sk_buff, data)),
4608 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4609 offsetof(struct __sk_buff, data_end)),
4610 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4611 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4612 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4613 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4614 BPF_MOV64_IMM(BPF_REG_0, 0),
4615 BPF_EXIT_INSN(),
4616 },
4617 .result = ACCEPT,
4618 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4619 },
4620 {
4621 "direct packet read for LWT_XMIT",
4622 .insns = {
4623 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4624 offsetof(struct __sk_buff, data)),
4625 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4626 offsetof(struct __sk_buff, data_end)),
4627 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4628 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4629 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4630 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4631 BPF_MOV64_IMM(BPF_REG_0, 0),
4632 BPF_EXIT_INSN(),
4633 },
4634 .result = ACCEPT,
4635 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4636 },
4637 {
Alexei Starovoitovb1977682017-03-24 15:57:33 -07004638 "overlapping checks for direct packet access",
4639 .insns = {
4640 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4641 offsetof(struct __sk_buff, data)),
4642 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4643 offsetof(struct __sk_buff, data_end)),
4644 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4645 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4646 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
4647 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4648 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
4649 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
4650 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
4651 BPF_MOV64_IMM(BPF_REG_0, 0),
4652 BPF_EXIT_INSN(),
4653 },
4654 .result = ACCEPT,
4655 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4656 },
4657 {
Thomas Graf3f731d82016-12-05 10:30:52 +01004658 "invalid access of tc_classid for LWT_IN",
4659 .insns = {
4660 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4661 offsetof(struct __sk_buff, tc_classid)),
4662 BPF_EXIT_INSN(),
4663 },
4664 .result = REJECT,
4665 .errstr = "invalid bpf_context access",
4666 },
4667 {
4668 "invalid access of tc_classid for LWT_OUT",
4669 .insns = {
4670 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4671 offsetof(struct __sk_buff, tc_classid)),
4672 BPF_EXIT_INSN(),
4673 },
4674 .result = REJECT,
4675 .errstr = "invalid bpf_context access",
4676 },
4677 {
4678 "invalid access of tc_classid for LWT_XMIT",
4679 .insns = {
4680 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4681 offsetof(struct __sk_buff, tc_classid)),
4682 BPF_EXIT_INSN(),
4683 },
4684 .result = REJECT,
4685 .errstr = "invalid bpf_context access",
4686 },
Gianluca Borello57225692017-01-09 10:19:47 -08004687 {
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004688 "leak pointer into ctx 1",
4689 .insns = {
4690 BPF_MOV64_IMM(BPF_REG_0, 0),
4691 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4692 offsetof(struct __sk_buff, cb[0])),
4693 BPF_LD_MAP_FD(BPF_REG_2, 0),
4694 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
4695 offsetof(struct __sk_buff, cb[0])),
4696 BPF_EXIT_INSN(),
4697 },
4698 .fixup_map1 = { 2 },
4699 .errstr_unpriv = "R2 leaks addr into mem",
4700 .result_unpriv = REJECT,
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01004701 .result = REJECT,
4702 .errstr = "BPF_XADD stores into R1 context is not allowed",
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004703 },
4704 {
4705 "leak pointer into ctx 2",
4706 .insns = {
4707 BPF_MOV64_IMM(BPF_REG_0, 0),
4708 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4709 offsetof(struct __sk_buff, cb[0])),
4710 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
4711 offsetof(struct __sk_buff, cb[0])),
4712 BPF_EXIT_INSN(),
4713 },
4714 .errstr_unpriv = "R10 leaks addr into mem",
4715 .result_unpriv = REJECT,
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01004716 .result = REJECT,
4717 .errstr = "BPF_XADD stores into R1 context is not allowed",
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004718 },
4719 {
4720 "leak pointer into ctx 3",
4721 .insns = {
4722 BPF_MOV64_IMM(BPF_REG_0, 0),
4723 BPF_LD_MAP_FD(BPF_REG_2, 0),
4724 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
4725 offsetof(struct __sk_buff, cb[0])),
4726 BPF_EXIT_INSN(),
4727 },
4728 .fixup_map1 = { 1 },
4729 .errstr_unpriv = "R2 leaks addr into ctx",
4730 .result_unpriv = REJECT,
4731 .result = ACCEPT,
4732 },
4733 {
4734 "leak pointer into map val",
4735 .insns = {
4736 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4737 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4738 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4739 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4740 BPF_LD_MAP_FD(BPF_REG_1, 0),
4741 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4742 BPF_FUNC_map_lookup_elem),
4743 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4744 BPF_MOV64_IMM(BPF_REG_3, 0),
4745 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
4746 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
4747 BPF_MOV64_IMM(BPF_REG_0, 0),
4748 BPF_EXIT_INSN(),
4749 },
4750 .fixup_map1 = { 4 },
4751 .errstr_unpriv = "R6 leaks addr into mem",
4752 .result_unpriv = REJECT,
4753 .result = ACCEPT,
4754 },
4755 {
Gianluca Borello57225692017-01-09 10:19:47 -08004756 "helper access to map: full range",
4757 .insns = {
4758 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4759 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4760 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4761 BPF_LD_MAP_FD(BPF_REG_1, 0),
4762 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4763 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4764 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4765 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4766 BPF_MOV64_IMM(BPF_REG_3, 0),
4767 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4768 BPF_EXIT_INSN(),
4769 },
4770 .fixup_map2 = { 3 },
4771 .result = ACCEPT,
4772 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4773 },
4774 {
4775 "helper access to map: partial range",
4776 .insns = {
4777 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4778 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4779 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4780 BPF_LD_MAP_FD(BPF_REG_1, 0),
4781 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4782 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4783 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4784 BPF_MOV64_IMM(BPF_REG_2, 8),
4785 BPF_MOV64_IMM(BPF_REG_3, 0),
4786 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4787 BPF_EXIT_INSN(),
4788 },
4789 .fixup_map2 = { 3 },
4790 .result = ACCEPT,
4791 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4792 },
4793 {
4794 "helper access to map: empty range",
4795 .insns = {
4796 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4797 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4798 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4799 BPF_LD_MAP_FD(BPF_REG_1, 0),
4800 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004801 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4802 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4803 BPF_MOV64_IMM(BPF_REG_2, 0),
4804 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08004805 BPF_EXIT_INSN(),
4806 },
4807 .fixup_map2 = { 3 },
4808 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
4809 .result = REJECT,
4810 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4811 },
4812 {
4813 "helper access to map: out-of-bound range",
4814 .insns = {
4815 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4816 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4817 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4818 BPF_LD_MAP_FD(BPF_REG_1, 0),
4819 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4820 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4821 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4822 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
4823 BPF_MOV64_IMM(BPF_REG_3, 0),
4824 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4825 BPF_EXIT_INSN(),
4826 },
4827 .fixup_map2 = { 3 },
4828 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
4829 .result = REJECT,
4830 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4831 },
4832 {
4833 "helper access to map: negative range",
4834 .insns = {
4835 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4836 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4837 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4838 BPF_LD_MAP_FD(BPF_REG_1, 0),
4839 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4840 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4841 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4842 BPF_MOV64_IMM(BPF_REG_2, -8),
4843 BPF_MOV64_IMM(BPF_REG_3, 0),
4844 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4845 BPF_EXIT_INSN(),
4846 },
4847 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004848 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004849 .result = REJECT,
4850 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4851 },
4852 {
4853 "helper access to adjusted map (via const imm): full range",
4854 .insns = {
4855 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4856 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4857 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4858 BPF_LD_MAP_FD(BPF_REG_1, 0),
4859 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4860 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4861 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4862 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4863 offsetof(struct test_val, foo)),
4864 BPF_MOV64_IMM(BPF_REG_2,
4865 sizeof(struct test_val) -
4866 offsetof(struct test_val, foo)),
4867 BPF_MOV64_IMM(BPF_REG_3, 0),
4868 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4869 BPF_EXIT_INSN(),
4870 },
4871 .fixup_map2 = { 3 },
4872 .result = ACCEPT,
4873 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4874 },
4875 {
4876 "helper access to adjusted map (via const imm): partial range",
4877 .insns = {
4878 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4879 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4880 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4881 BPF_LD_MAP_FD(BPF_REG_1, 0),
4882 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4883 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4884 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4885 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4886 offsetof(struct test_val, foo)),
4887 BPF_MOV64_IMM(BPF_REG_2, 8),
4888 BPF_MOV64_IMM(BPF_REG_3, 0),
4889 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4890 BPF_EXIT_INSN(),
4891 },
4892 .fixup_map2 = { 3 },
4893 .result = ACCEPT,
4894 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4895 },
4896 {
4897 "helper access to adjusted map (via const imm): empty range",
4898 .insns = {
4899 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4901 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4902 BPF_LD_MAP_FD(BPF_REG_1, 0),
4903 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004904 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Gianluca Borello57225692017-01-09 10:19:47 -08004905 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4906 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4907 offsetof(struct test_val, foo)),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004908 BPF_MOV64_IMM(BPF_REG_2, 0),
4909 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08004910 BPF_EXIT_INSN(),
4911 },
4912 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004913 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
Gianluca Borello57225692017-01-09 10:19:47 -08004914 .result = REJECT,
4915 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4916 },
4917 {
4918 "helper access to adjusted map (via const imm): out-of-bound range",
4919 .insns = {
4920 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4921 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4922 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4923 BPF_LD_MAP_FD(BPF_REG_1, 0),
4924 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4925 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4926 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4927 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4928 offsetof(struct test_val, foo)),
4929 BPF_MOV64_IMM(BPF_REG_2,
4930 sizeof(struct test_val) -
4931 offsetof(struct test_val, foo) + 8),
4932 BPF_MOV64_IMM(BPF_REG_3, 0),
4933 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4934 BPF_EXIT_INSN(),
4935 },
4936 .fixup_map2 = { 3 },
4937 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4938 .result = REJECT,
4939 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4940 },
4941 {
4942 "helper access to adjusted map (via const imm): negative range (> adjustment)",
4943 .insns = {
4944 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4946 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4947 BPF_LD_MAP_FD(BPF_REG_1, 0),
4948 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4949 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4950 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4951 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4952 offsetof(struct test_val, foo)),
4953 BPF_MOV64_IMM(BPF_REG_2, -8),
4954 BPF_MOV64_IMM(BPF_REG_3, 0),
4955 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4956 BPF_EXIT_INSN(),
4957 },
4958 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004959 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004960 .result = REJECT,
4961 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4962 },
4963 {
4964 "helper access to adjusted map (via const imm): negative range (< adjustment)",
4965 .insns = {
4966 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4967 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4968 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4969 BPF_LD_MAP_FD(BPF_REG_1, 0),
4970 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4971 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4972 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4973 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4974 offsetof(struct test_val, foo)),
4975 BPF_MOV64_IMM(BPF_REG_2, -1),
4976 BPF_MOV64_IMM(BPF_REG_3, 0),
4977 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4978 BPF_EXIT_INSN(),
4979 },
4980 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004981 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004982 .result = REJECT,
4983 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4984 },
4985 {
4986 "helper access to adjusted map (via const reg): full range",
4987 .insns = {
4988 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4989 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4990 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4991 BPF_LD_MAP_FD(BPF_REG_1, 0),
4992 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4993 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4994 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4995 BPF_MOV64_IMM(BPF_REG_3,
4996 offsetof(struct test_val, foo)),
4997 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4998 BPF_MOV64_IMM(BPF_REG_2,
4999 sizeof(struct test_val) -
5000 offsetof(struct test_val, foo)),
5001 BPF_MOV64_IMM(BPF_REG_3, 0),
5002 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5003 BPF_EXIT_INSN(),
5004 },
5005 .fixup_map2 = { 3 },
5006 .result = ACCEPT,
5007 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5008 },
5009 {
5010 "helper access to adjusted map (via const reg): partial range",
5011 .insns = {
5012 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5014 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5015 BPF_LD_MAP_FD(BPF_REG_1, 0),
5016 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5017 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5018 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5019 BPF_MOV64_IMM(BPF_REG_3,
5020 offsetof(struct test_val, foo)),
5021 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5022 BPF_MOV64_IMM(BPF_REG_2, 8),
5023 BPF_MOV64_IMM(BPF_REG_3, 0),
5024 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5025 BPF_EXIT_INSN(),
5026 },
5027 .fixup_map2 = { 3 },
5028 .result = ACCEPT,
5029 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5030 },
5031 {
5032 "helper access to adjusted map (via const reg): empty range",
5033 .insns = {
5034 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5035 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5036 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5037 BPF_LD_MAP_FD(BPF_REG_1, 0),
5038 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005039 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
Gianluca Borello57225692017-01-09 10:19:47 -08005040 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5041 BPF_MOV64_IMM(BPF_REG_3, 0),
5042 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005043 BPF_MOV64_IMM(BPF_REG_2, 0),
5044 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005045 BPF_EXIT_INSN(),
5046 },
5047 .fixup_map2 = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005048 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08005049 .result = REJECT,
5050 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5051 },
5052 {
5053 "helper access to adjusted map (via const reg): out-of-bound range",
5054 .insns = {
5055 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5056 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5057 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5058 BPF_LD_MAP_FD(BPF_REG_1, 0),
5059 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5060 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5061 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5062 BPF_MOV64_IMM(BPF_REG_3,
5063 offsetof(struct test_val, foo)),
5064 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5065 BPF_MOV64_IMM(BPF_REG_2,
5066 sizeof(struct test_val) -
5067 offsetof(struct test_val, foo) + 8),
5068 BPF_MOV64_IMM(BPF_REG_3, 0),
5069 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5070 BPF_EXIT_INSN(),
5071 },
5072 .fixup_map2 = { 3 },
5073 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
5074 .result = REJECT,
5075 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5076 },
5077 {
5078 "helper access to adjusted map (via const reg): negative range (> adjustment)",
5079 .insns = {
5080 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5081 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5082 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5083 BPF_LD_MAP_FD(BPF_REG_1, 0),
5084 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5085 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5086 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5087 BPF_MOV64_IMM(BPF_REG_3,
5088 offsetof(struct test_val, foo)),
5089 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5090 BPF_MOV64_IMM(BPF_REG_2, -8),
5091 BPF_MOV64_IMM(BPF_REG_3, 0),
5092 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5093 BPF_EXIT_INSN(),
5094 },
5095 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005096 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005097 .result = REJECT,
5098 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5099 },
5100 {
5101 "helper access to adjusted map (via const reg): negative range (< adjustment)",
5102 .insns = {
5103 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5105 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5106 BPF_LD_MAP_FD(BPF_REG_1, 0),
5107 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5108 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5109 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5110 BPF_MOV64_IMM(BPF_REG_3,
5111 offsetof(struct test_val, foo)),
5112 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5113 BPF_MOV64_IMM(BPF_REG_2, -1),
5114 BPF_MOV64_IMM(BPF_REG_3, 0),
5115 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5116 BPF_EXIT_INSN(),
5117 },
5118 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005119 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005120 .result = REJECT,
5121 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5122 },
5123 {
5124 "helper access to adjusted map (via variable): full range",
5125 .insns = {
5126 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5127 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5128 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5129 BPF_LD_MAP_FD(BPF_REG_1, 0),
5130 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5131 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5132 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5133 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5134 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5135 offsetof(struct test_val, foo), 4),
5136 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5137 BPF_MOV64_IMM(BPF_REG_2,
5138 sizeof(struct test_val) -
5139 offsetof(struct test_val, foo)),
5140 BPF_MOV64_IMM(BPF_REG_3, 0),
5141 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5142 BPF_EXIT_INSN(),
5143 },
5144 .fixup_map2 = { 3 },
5145 .result = ACCEPT,
5146 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5147 },
5148 {
5149 "helper access to adjusted map (via variable): partial range",
5150 .insns = {
5151 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5152 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5153 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5154 BPF_LD_MAP_FD(BPF_REG_1, 0),
5155 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5156 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5157 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5158 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5159 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5160 offsetof(struct test_val, foo), 4),
5161 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5162 BPF_MOV64_IMM(BPF_REG_2, 8),
5163 BPF_MOV64_IMM(BPF_REG_3, 0),
5164 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5165 BPF_EXIT_INSN(),
5166 },
5167 .fixup_map2 = { 3 },
5168 .result = ACCEPT,
5169 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5170 },
5171 {
5172 "helper access to adjusted map (via variable): empty range",
5173 .insns = {
5174 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5175 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5176 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5177 BPF_LD_MAP_FD(BPF_REG_1, 0),
5178 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005179 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
Gianluca Borello57225692017-01-09 10:19:47 -08005180 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5181 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5182 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005183 offsetof(struct test_val, foo), 3),
Gianluca Borello57225692017-01-09 10:19:47 -08005184 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005185 BPF_MOV64_IMM(BPF_REG_2, 0),
5186 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005187 BPF_EXIT_INSN(),
5188 },
5189 .fixup_map2 = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005190 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08005191 .result = REJECT,
5192 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5193 },
5194 {
5195 "helper access to adjusted map (via variable): no max check",
5196 .insns = {
5197 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5198 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5199 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5200 BPF_LD_MAP_FD(BPF_REG_1, 0),
5201 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5202 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5203 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5204 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5205 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Edward Creef65b1842017-08-07 15:27:12 +01005206 BPF_MOV64_IMM(BPF_REG_2, 1),
Gianluca Borello57225692017-01-09 10:19:47 -08005207 BPF_MOV64_IMM(BPF_REG_3, 0),
5208 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5209 BPF_EXIT_INSN(),
5210 },
5211 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005212 .errstr = "R1 unbounded memory access",
Gianluca Borello57225692017-01-09 10:19:47 -08005213 .result = REJECT,
5214 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5215 },
5216 {
5217 "helper access to adjusted map (via variable): wrong max check",
5218 .insns = {
5219 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5221 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5222 BPF_LD_MAP_FD(BPF_REG_1, 0),
5223 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5224 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5225 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5226 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5227 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5228 offsetof(struct test_val, foo), 4),
5229 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5230 BPF_MOV64_IMM(BPF_REG_2,
5231 sizeof(struct test_val) -
5232 offsetof(struct test_val, foo) + 1),
5233 BPF_MOV64_IMM(BPF_REG_3, 0),
5234 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5235 BPF_EXIT_INSN(),
5236 },
5237 .fixup_map2 = { 3 },
5238 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
5239 .result = REJECT,
5240 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5241 },
Gianluca Borellof0318d02017-01-09 10:19:48 -08005242 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02005243 "helper access to map: bounds check using <, good access",
5244 .insns = {
5245 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5247 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5248 BPF_LD_MAP_FD(BPF_REG_1, 0),
5249 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5250 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5251 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5252 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5253 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
5254 BPF_MOV64_IMM(BPF_REG_0, 0),
5255 BPF_EXIT_INSN(),
5256 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5257 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5258 BPF_MOV64_IMM(BPF_REG_0, 0),
5259 BPF_EXIT_INSN(),
5260 },
5261 .fixup_map2 = { 3 },
5262 .result = ACCEPT,
5263 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5264 },
5265 {
5266 "helper access to map: bounds check using <, bad access",
5267 .insns = {
5268 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5269 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5270 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5271 BPF_LD_MAP_FD(BPF_REG_1, 0),
5272 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5273 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5274 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5275 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5276 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
5277 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5278 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5279 BPF_MOV64_IMM(BPF_REG_0, 0),
5280 BPF_EXIT_INSN(),
5281 BPF_MOV64_IMM(BPF_REG_0, 0),
5282 BPF_EXIT_INSN(),
5283 },
5284 .fixup_map2 = { 3 },
5285 .result = REJECT,
5286 .errstr = "R1 unbounded memory access",
5287 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5288 },
5289 {
5290 "helper access to map: bounds check using <=, good access",
5291 .insns = {
5292 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5294 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5295 BPF_LD_MAP_FD(BPF_REG_1, 0),
5296 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5297 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5298 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5299 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5300 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
5301 BPF_MOV64_IMM(BPF_REG_0, 0),
5302 BPF_EXIT_INSN(),
5303 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5304 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5305 BPF_MOV64_IMM(BPF_REG_0, 0),
5306 BPF_EXIT_INSN(),
5307 },
5308 .fixup_map2 = { 3 },
5309 .result = ACCEPT,
5310 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5311 },
5312 {
5313 "helper access to map: bounds check using <=, bad access",
5314 .insns = {
5315 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5316 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5317 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5318 BPF_LD_MAP_FD(BPF_REG_1, 0),
5319 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5320 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5321 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5322 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5323 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
5324 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5325 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5326 BPF_MOV64_IMM(BPF_REG_0, 0),
5327 BPF_EXIT_INSN(),
5328 BPF_MOV64_IMM(BPF_REG_0, 0),
5329 BPF_EXIT_INSN(),
5330 },
5331 .fixup_map2 = { 3 },
5332 .result = REJECT,
5333 .errstr = "R1 unbounded memory access",
5334 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5335 },
5336 {
5337 "helper access to map: bounds check using s<, good access",
5338 .insns = {
5339 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5340 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5341 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5342 BPF_LD_MAP_FD(BPF_REG_1, 0),
5343 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5344 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5345 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5346 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5347 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5348 BPF_MOV64_IMM(BPF_REG_0, 0),
5349 BPF_EXIT_INSN(),
5350 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
5351 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5352 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5353 BPF_MOV64_IMM(BPF_REG_0, 0),
5354 BPF_EXIT_INSN(),
5355 },
5356 .fixup_map2 = { 3 },
5357 .result = ACCEPT,
5358 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5359 },
5360 {
5361 "helper access to map: bounds check using s<, good access 2",
5362 .insns = {
5363 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5364 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5365 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5366 BPF_LD_MAP_FD(BPF_REG_1, 0),
5367 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5368 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5369 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5370 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5371 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5372 BPF_MOV64_IMM(BPF_REG_0, 0),
5373 BPF_EXIT_INSN(),
5374 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5375 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5376 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5377 BPF_MOV64_IMM(BPF_REG_0, 0),
5378 BPF_EXIT_INSN(),
5379 },
5380 .fixup_map2 = { 3 },
5381 .result = ACCEPT,
5382 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5383 },
5384 {
5385 "helper access to map: bounds check using s<, bad access",
5386 .insns = {
5387 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5389 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5390 BPF_LD_MAP_FD(BPF_REG_1, 0),
5391 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5392 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5393 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5394 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5395 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5396 BPF_MOV64_IMM(BPF_REG_0, 0),
5397 BPF_EXIT_INSN(),
5398 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5399 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5400 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5401 BPF_MOV64_IMM(BPF_REG_0, 0),
5402 BPF_EXIT_INSN(),
5403 },
5404 .fixup_map2 = { 3 },
5405 .result = REJECT,
5406 .errstr = "R1 min value is negative",
5407 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5408 },
5409 {
5410 "helper access to map: bounds check using s<=, good access",
5411 .insns = {
5412 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5413 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5414 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5415 BPF_LD_MAP_FD(BPF_REG_1, 0),
5416 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5417 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5418 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5419 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5420 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5421 BPF_MOV64_IMM(BPF_REG_0, 0),
5422 BPF_EXIT_INSN(),
5423 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
5424 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5425 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5426 BPF_MOV64_IMM(BPF_REG_0, 0),
5427 BPF_EXIT_INSN(),
5428 },
5429 .fixup_map2 = { 3 },
5430 .result = ACCEPT,
5431 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5432 },
5433 {
5434 "helper access to map: bounds check using s<=, good access 2",
5435 .insns = {
5436 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5437 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5438 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5439 BPF_LD_MAP_FD(BPF_REG_1, 0),
5440 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5441 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5442 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5443 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5444 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5445 BPF_MOV64_IMM(BPF_REG_0, 0),
5446 BPF_EXIT_INSN(),
5447 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5448 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5449 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5450 BPF_MOV64_IMM(BPF_REG_0, 0),
5451 BPF_EXIT_INSN(),
5452 },
5453 .fixup_map2 = { 3 },
5454 .result = ACCEPT,
5455 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5456 },
5457 {
5458 "helper access to map: bounds check using s<=, bad access",
5459 .insns = {
5460 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5462 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5463 BPF_LD_MAP_FD(BPF_REG_1, 0),
5464 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5465 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5466 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5467 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5468 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5469 BPF_MOV64_IMM(BPF_REG_0, 0),
5470 BPF_EXIT_INSN(),
5471 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5472 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5473 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5474 BPF_MOV64_IMM(BPF_REG_0, 0),
5475 BPF_EXIT_INSN(),
5476 },
5477 .fixup_map2 = { 3 },
5478 .result = REJECT,
5479 .errstr = "R1 min value is negative",
5480 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5481 },
5482 {
Gianluca Borellof0318d02017-01-09 10:19:48 -08005483 "map element value is preserved across register spilling",
5484 .insns = {
5485 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5486 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5487 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5488 BPF_LD_MAP_FD(BPF_REG_1, 0),
5489 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5490 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5491 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5492 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5493 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5494 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5495 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5496 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5497 BPF_EXIT_INSN(),
5498 },
5499 .fixup_map2 = { 3 },
5500 .errstr_unpriv = "R0 leaks addr",
5501 .result = ACCEPT,
5502 .result_unpriv = REJECT,
5503 },
5504 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005505 "map element value or null is marked on register spilling",
5506 .insns = {
5507 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5508 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5509 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5510 BPF_LD_MAP_FD(BPF_REG_1, 0),
5511 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5512 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5513 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
5514 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5515 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5516 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5517 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5518 BPF_EXIT_INSN(),
5519 },
5520 .fixup_map2 = { 3 },
5521 .errstr_unpriv = "R0 leaks addr",
5522 .result = ACCEPT,
5523 .result_unpriv = REJECT,
5524 },
5525 {
5526 "map element value store of cleared call register",
5527 .insns = {
5528 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5529 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5530 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5531 BPF_LD_MAP_FD(BPF_REG_1, 0),
5532 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5533 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5534 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
5535 BPF_EXIT_INSN(),
5536 },
5537 .fixup_map2 = { 3 },
5538 .errstr_unpriv = "R1 !read_ok",
5539 .errstr = "R1 !read_ok",
5540 .result = REJECT,
5541 .result_unpriv = REJECT,
5542 },
5543 {
5544 "map element value with unaligned store",
5545 .insns = {
5546 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5547 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5548 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5549 BPF_LD_MAP_FD(BPF_REG_1, 0),
5550 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5551 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
5552 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5553 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5554 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
5555 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
5556 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5557 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
5558 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
5559 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
5560 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
5561 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
5562 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
5563 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
5564 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
5565 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
5566 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
5567 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
5568 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
5569 BPF_EXIT_INSN(),
5570 },
5571 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005572 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005573 .result = ACCEPT,
5574 .result_unpriv = REJECT,
5575 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5576 },
5577 {
5578 "map element value with unaligned load",
5579 .insns = {
5580 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5581 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5582 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5583 BPF_LD_MAP_FD(BPF_REG_1, 0),
5584 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5585 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5586 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5587 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
5588 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5589 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5590 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
5591 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5592 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
5593 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
5594 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
5595 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5596 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
5597 BPF_EXIT_INSN(),
5598 },
5599 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005600 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005601 .result = ACCEPT,
5602 .result_unpriv = REJECT,
5603 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5604 },
5605 {
5606 "map element value illegal alu op, 1",
5607 .insns = {
5608 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5609 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5610 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5611 BPF_LD_MAP_FD(BPF_REG_1, 0),
5612 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5613 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5614 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
5615 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5616 BPF_EXIT_INSN(),
5617 },
5618 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005619 .errstr = "R0 bitwise operator &= on pointer",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005620 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005621 },
5622 {
5623 "map element value illegal alu op, 2",
5624 .insns = {
5625 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5627 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5628 BPF_LD_MAP_FD(BPF_REG_1, 0),
5629 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5630 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5631 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
5632 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5633 BPF_EXIT_INSN(),
5634 },
5635 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005636 .errstr = "R0 32-bit pointer arithmetic prohibited",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005637 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005638 },
5639 {
5640 "map element value illegal alu op, 3",
5641 .insns = {
5642 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5643 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5644 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5645 BPF_LD_MAP_FD(BPF_REG_1, 0),
5646 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5647 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5648 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
5649 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5650 BPF_EXIT_INSN(),
5651 },
5652 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005653 .errstr = "R0 pointer arithmetic with /= operator",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005654 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005655 },
5656 {
5657 "map element value illegal alu op, 4",
5658 .insns = {
5659 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5661 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5662 BPF_LD_MAP_FD(BPF_REG_1, 0),
5663 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5664 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5665 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
5666 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5667 BPF_EXIT_INSN(),
5668 },
5669 .fixup_map2 = { 3 },
5670 .errstr_unpriv = "R0 pointer arithmetic prohibited",
5671 .errstr = "invalid mem access 'inv'",
5672 .result = REJECT,
5673 .result_unpriv = REJECT,
5674 },
5675 {
5676 "map element value illegal alu op, 5",
5677 .insns = {
5678 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5679 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5680 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5681 BPF_LD_MAP_FD(BPF_REG_1, 0),
5682 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5683 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5684 BPF_MOV64_IMM(BPF_REG_3, 4096),
5685 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5686 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5687 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5688 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
5689 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
5690 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5691 BPF_EXIT_INSN(),
5692 },
5693 .fixup_map2 = { 3 },
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005694 .errstr = "R0 invalid mem access 'inv'",
5695 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005696 },
5697 {
5698 "map element value is preserved across register spilling",
Gianluca Borellof0318d02017-01-09 10:19:48 -08005699 .insns = {
5700 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5701 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5702 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5703 BPF_LD_MAP_FD(BPF_REG_1, 0),
5704 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5705 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5706 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
5707 offsetof(struct test_val, foo)),
5708 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5709 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5710 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5711 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5712 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5713 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5714 BPF_EXIT_INSN(),
5715 },
5716 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005717 .errstr_unpriv = "R0 leaks addr",
Gianluca Borellof0318d02017-01-09 10:19:48 -08005718 .result = ACCEPT,
5719 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005720 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Gianluca Borellof0318d02017-01-09 10:19:48 -08005721 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08005722 {
5723 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5724 .insns = {
5725 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5726 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5727 BPF_MOV64_IMM(BPF_REG_0, 0),
5728 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5729 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5730 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5731 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5732 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5733 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5734 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5735 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5736 BPF_MOV64_IMM(BPF_REG_2, 16),
5737 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5738 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5739 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5740 BPF_MOV64_IMM(BPF_REG_4, 0),
5741 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5742 BPF_MOV64_IMM(BPF_REG_3, 0),
5743 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5744 BPF_MOV64_IMM(BPF_REG_0, 0),
5745 BPF_EXIT_INSN(),
5746 },
5747 .result = ACCEPT,
5748 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5749 },
5750 {
5751 "helper access to variable memory: stack, bitwise AND, zero included",
5752 .insns = {
5753 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5754 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5755 BPF_MOV64_IMM(BPF_REG_2, 16),
5756 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5757 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5758 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5759 BPF_MOV64_IMM(BPF_REG_3, 0),
5760 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5761 BPF_EXIT_INSN(),
5762 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08005763 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005764 .result = REJECT,
5765 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5766 },
5767 {
5768 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5769 .insns = {
5770 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5771 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5772 BPF_MOV64_IMM(BPF_REG_2, 16),
5773 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5774 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5775 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
5776 BPF_MOV64_IMM(BPF_REG_4, 0),
5777 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5778 BPF_MOV64_IMM(BPF_REG_3, 0),
5779 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5780 BPF_MOV64_IMM(BPF_REG_0, 0),
5781 BPF_EXIT_INSN(),
5782 },
5783 .errstr = "invalid stack type R1 off=-64 access_size=65",
5784 .result = REJECT,
5785 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5786 },
5787 {
5788 "helper access to variable memory: stack, JMP, correct bounds",
5789 .insns = {
5790 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5791 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5792 BPF_MOV64_IMM(BPF_REG_0, 0),
5793 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5794 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5795 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5796 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5797 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5798 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5799 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5800 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5801 BPF_MOV64_IMM(BPF_REG_2, 16),
5802 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5803 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5804 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
5805 BPF_MOV64_IMM(BPF_REG_4, 0),
5806 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5807 BPF_MOV64_IMM(BPF_REG_3, 0),
5808 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5809 BPF_MOV64_IMM(BPF_REG_0, 0),
5810 BPF_EXIT_INSN(),
5811 },
5812 .result = ACCEPT,
5813 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5814 },
5815 {
5816 "helper access to variable memory: stack, JMP (signed), correct bounds",
5817 .insns = {
5818 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5819 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5820 BPF_MOV64_IMM(BPF_REG_0, 0),
5821 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5822 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5823 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5824 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5825 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5826 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5827 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5828 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5829 BPF_MOV64_IMM(BPF_REG_2, 16),
5830 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5831 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5832 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
5833 BPF_MOV64_IMM(BPF_REG_4, 0),
5834 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5835 BPF_MOV64_IMM(BPF_REG_3, 0),
5836 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5837 BPF_MOV64_IMM(BPF_REG_0, 0),
5838 BPF_EXIT_INSN(),
5839 },
5840 .result = ACCEPT,
5841 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5842 },
5843 {
5844 "helper access to variable memory: stack, JMP, bounds + offset",
5845 .insns = {
5846 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5847 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5848 BPF_MOV64_IMM(BPF_REG_2, 16),
5849 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5850 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5851 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
5852 BPF_MOV64_IMM(BPF_REG_4, 0),
5853 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
5854 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5855 BPF_MOV64_IMM(BPF_REG_3, 0),
5856 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5857 BPF_MOV64_IMM(BPF_REG_0, 0),
5858 BPF_EXIT_INSN(),
5859 },
5860 .errstr = "invalid stack type R1 off=-64 access_size=65",
5861 .result = REJECT,
5862 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5863 },
5864 {
5865 "helper access to variable memory: stack, JMP, wrong max",
5866 .insns = {
5867 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5868 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5869 BPF_MOV64_IMM(BPF_REG_2, 16),
5870 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5871 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5872 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
5873 BPF_MOV64_IMM(BPF_REG_4, 0),
5874 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5875 BPF_MOV64_IMM(BPF_REG_3, 0),
5876 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5877 BPF_MOV64_IMM(BPF_REG_0, 0),
5878 BPF_EXIT_INSN(),
5879 },
5880 .errstr = "invalid stack type R1 off=-64 access_size=65",
5881 .result = REJECT,
5882 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5883 },
5884 {
5885 "helper access to variable memory: stack, JMP, no max check",
5886 .insns = {
5887 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5888 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5889 BPF_MOV64_IMM(BPF_REG_2, 16),
5890 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5891 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5892 BPF_MOV64_IMM(BPF_REG_4, 0),
5893 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5894 BPF_MOV64_IMM(BPF_REG_3, 0),
5895 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5896 BPF_MOV64_IMM(BPF_REG_0, 0),
5897 BPF_EXIT_INSN(),
5898 },
Edward Creef65b1842017-08-07 15:27:12 +01005899 /* because max wasn't checked, signed min is negative */
5900 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005901 .result = REJECT,
5902 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5903 },
5904 {
5905 "helper access to variable memory: stack, JMP, no min check",
5906 .insns = {
5907 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5909 BPF_MOV64_IMM(BPF_REG_2, 16),
5910 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5911 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5912 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
5913 BPF_MOV64_IMM(BPF_REG_3, 0),
5914 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5915 BPF_MOV64_IMM(BPF_REG_0, 0),
5916 BPF_EXIT_INSN(),
5917 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08005918 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005919 .result = REJECT,
5920 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5921 },
5922 {
5923 "helper access to variable memory: stack, JMP (signed), no min check",
5924 .insns = {
5925 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5927 BPF_MOV64_IMM(BPF_REG_2, 16),
5928 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5929 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5930 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
5931 BPF_MOV64_IMM(BPF_REG_3, 0),
5932 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5933 BPF_MOV64_IMM(BPF_REG_0, 0),
5934 BPF_EXIT_INSN(),
5935 },
5936 .errstr = "R2 min value is negative",
5937 .result = REJECT,
5938 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5939 },
5940 {
5941 "helper access to variable memory: map, JMP, correct bounds",
5942 .insns = {
5943 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5945 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5946 BPF_LD_MAP_FD(BPF_REG_1, 0),
5947 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5948 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5949 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5950 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5951 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5952 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5953 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5954 sizeof(struct test_val), 4),
5955 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005956 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005957 BPF_MOV64_IMM(BPF_REG_3, 0),
5958 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5959 BPF_MOV64_IMM(BPF_REG_0, 0),
5960 BPF_EXIT_INSN(),
5961 },
5962 .fixup_map2 = { 3 },
5963 .result = ACCEPT,
5964 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5965 },
5966 {
5967 "helper access to variable memory: map, JMP, wrong max",
5968 .insns = {
5969 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5970 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5971 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5972 BPF_LD_MAP_FD(BPF_REG_1, 0),
5973 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5974 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5975 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5976 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5977 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5978 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5979 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5980 sizeof(struct test_val) + 1, 4),
5981 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005982 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005983 BPF_MOV64_IMM(BPF_REG_3, 0),
5984 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5985 BPF_MOV64_IMM(BPF_REG_0, 0),
5986 BPF_EXIT_INSN(),
5987 },
5988 .fixup_map2 = { 3 },
5989 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
5990 .result = REJECT,
5991 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5992 },
5993 {
5994 "helper access to variable memory: map adjusted, JMP, correct bounds",
5995 .insns = {
5996 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5997 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5998 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5999 BPF_LD_MAP_FD(BPF_REG_1, 0),
6000 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6001 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6002 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6003 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
6004 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6005 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6006 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6007 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6008 sizeof(struct test_val) - 20, 4),
6009 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02006010 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006011 BPF_MOV64_IMM(BPF_REG_3, 0),
6012 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6013 BPF_MOV64_IMM(BPF_REG_0, 0),
6014 BPF_EXIT_INSN(),
6015 },
6016 .fixup_map2 = { 3 },
6017 .result = ACCEPT,
6018 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6019 },
6020 {
6021 "helper access to variable memory: map adjusted, JMP, wrong max",
6022 .insns = {
6023 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6024 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6025 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6026 BPF_LD_MAP_FD(BPF_REG_1, 0),
6027 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6028 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6029 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
6031 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6032 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6033 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6034 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6035 sizeof(struct test_val) - 19, 4),
6036 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02006037 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006038 BPF_MOV64_IMM(BPF_REG_3, 0),
6039 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6040 BPF_MOV64_IMM(BPF_REG_0, 0),
6041 BPF_EXIT_INSN(),
6042 },
6043 .fixup_map2 = { 3 },
6044 .errstr = "R1 min value is outside of the array range",
6045 .result = REJECT,
6046 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6047 },
6048 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006049 "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Edward Creef65b1842017-08-07 15:27:12 +01006050 .insns = {
6051 BPF_MOV64_IMM(BPF_REG_1, 0),
6052 BPF_MOV64_IMM(BPF_REG_2, 0),
6053 BPF_MOV64_IMM(BPF_REG_3, 0),
6054 BPF_MOV64_IMM(BPF_REG_4, 0),
6055 BPF_MOV64_IMM(BPF_REG_5, 0),
6056 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6057 BPF_EXIT_INSN(),
6058 },
6059 .result = ACCEPT,
6060 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6061 },
6062 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006063 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006064 .insns = {
6065 BPF_MOV64_IMM(BPF_REG_1, 0),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08006066 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01006067 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6068 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006069 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6070 BPF_MOV64_IMM(BPF_REG_3, 0),
6071 BPF_MOV64_IMM(BPF_REG_4, 0),
6072 BPF_MOV64_IMM(BPF_REG_5, 0),
6073 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6074 BPF_EXIT_INSN(),
6075 },
Edward Creef65b1842017-08-07 15:27:12 +01006076 .errstr = "R1 type=inv expected=fp",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006077 .result = REJECT,
6078 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6079 },
6080 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006081 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006082 .insns = {
6083 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6084 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6085 BPF_MOV64_IMM(BPF_REG_2, 0),
6086 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
6087 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
6088 BPF_MOV64_IMM(BPF_REG_3, 0),
6089 BPF_MOV64_IMM(BPF_REG_4, 0),
6090 BPF_MOV64_IMM(BPF_REG_5, 0),
6091 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6092 BPF_EXIT_INSN(),
6093 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08006094 .result = ACCEPT,
6095 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6096 },
6097 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006098 "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006099 .insns = {
6100 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6101 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6102 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6103 BPF_LD_MAP_FD(BPF_REG_1, 0),
6104 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6105 BPF_FUNC_map_lookup_elem),
6106 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6107 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6108 BPF_MOV64_IMM(BPF_REG_2, 0),
6109 BPF_MOV64_IMM(BPF_REG_3, 0),
6110 BPF_MOV64_IMM(BPF_REG_4, 0),
6111 BPF_MOV64_IMM(BPF_REG_5, 0),
6112 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6113 BPF_EXIT_INSN(),
6114 },
6115 .fixup_map1 = { 3 },
6116 .result = ACCEPT,
6117 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6118 },
6119 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006120 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006121 .insns = {
6122 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6123 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6124 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6125 BPF_LD_MAP_FD(BPF_REG_1, 0),
6126 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6127 BPF_FUNC_map_lookup_elem),
6128 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6129 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6130 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
6131 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6133 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
6134 BPF_MOV64_IMM(BPF_REG_3, 0),
6135 BPF_MOV64_IMM(BPF_REG_4, 0),
6136 BPF_MOV64_IMM(BPF_REG_5, 0),
6137 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6138 BPF_EXIT_INSN(),
6139 },
6140 .fixup_map1 = { 3 },
6141 .result = ACCEPT,
6142 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6143 },
6144 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006145 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006146 .insns = {
6147 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6148 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6149 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6150 BPF_LD_MAP_FD(BPF_REG_1, 0),
6151 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6152 BPF_FUNC_map_lookup_elem),
6153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6154 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6155 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6156 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6157 BPF_MOV64_IMM(BPF_REG_3, 0),
6158 BPF_MOV64_IMM(BPF_REG_4, 0),
6159 BPF_MOV64_IMM(BPF_REG_5, 0),
6160 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6161 BPF_EXIT_INSN(),
6162 },
6163 .fixup_map1 = { 3 },
6164 .result = ACCEPT,
6165 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6166 },
6167 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006168 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006169 .insns = {
6170 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6171 offsetof(struct __sk_buff, data)),
6172 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6173 offsetof(struct __sk_buff, data_end)),
6174 BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
6175 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
6176 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
6177 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
6178 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
6179 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6180 BPF_MOV64_IMM(BPF_REG_3, 0),
6181 BPF_MOV64_IMM(BPF_REG_4, 0),
6182 BPF_MOV64_IMM(BPF_REG_5, 0),
6183 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6184 BPF_EXIT_INSN(),
6185 },
6186 .result = ACCEPT,
Gianluca Borello06c1c042017-01-09 10:19:49 -08006187 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006188 .retval = 0 /* csum_diff of 64-byte packet */,
Gianluca Borello06c1c042017-01-09 10:19:49 -08006189 },
6190 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006191 "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
6192 .insns = {
6193 BPF_MOV64_IMM(BPF_REG_1, 0),
6194 BPF_MOV64_IMM(BPF_REG_2, 0),
6195 BPF_MOV64_IMM(BPF_REG_3, 0),
6196 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6197 BPF_EXIT_INSN(),
6198 },
6199 .errstr = "R1 type=inv expected=fp",
6200 .result = REJECT,
6201 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6202 },
6203 {
6204 "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
6205 .insns = {
6206 BPF_MOV64_IMM(BPF_REG_1, 0),
6207 BPF_MOV64_IMM(BPF_REG_2, 1),
6208 BPF_MOV64_IMM(BPF_REG_3, 0),
6209 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6210 BPF_EXIT_INSN(),
6211 },
6212 .errstr = "R1 type=inv expected=fp",
6213 .result = REJECT,
6214 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6215 },
6216 {
6217 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6218 .insns = {
6219 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6221 BPF_MOV64_IMM(BPF_REG_2, 0),
6222 BPF_MOV64_IMM(BPF_REG_3, 0),
6223 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6224 BPF_EXIT_INSN(),
6225 },
6226 .result = ACCEPT,
6227 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6228 },
6229 {
6230 "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6231 .insns = {
6232 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6233 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6234 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6235 BPF_LD_MAP_FD(BPF_REG_1, 0),
6236 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6237 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6238 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6239 BPF_MOV64_IMM(BPF_REG_2, 0),
6240 BPF_MOV64_IMM(BPF_REG_3, 0),
6241 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6242 BPF_EXIT_INSN(),
6243 },
6244 .fixup_map1 = { 3 },
6245 .result = ACCEPT,
6246 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6247 },
6248 {
6249 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6250 .insns = {
6251 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6252 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6253 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6254 BPF_LD_MAP_FD(BPF_REG_1, 0),
6255 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6256 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6257 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6258 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6259 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6260 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6261 BPF_MOV64_IMM(BPF_REG_3, 0),
6262 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6263 BPF_EXIT_INSN(),
6264 },
6265 .fixup_map1 = { 3 },
6266 .result = ACCEPT,
6267 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6268 },
6269 {
6270 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6271 .insns = {
6272 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6273 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6275 BPF_LD_MAP_FD(BPF_REG_1, 0),
6276 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6277 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6278 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6279 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6280 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
6281 BPF_MOV64_IMM(BPF_REG_3, 0),
6282 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6283 BPF_EXIT_INSN(),
6284 },
6285 .fixup_map1 = { 3 },
6286 .result = ACCEPT,
6287 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6288 },
6289 {
Gianluca Borello06c1c042017-01-09 10:19:49 -08006290 "helper access to variable memory: 8 bytes leak",
6291 .insns = {
6292 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6294 BPF_MOV64_IMM(BPF_REG_0, 0),
6295 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6296 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6297 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6298 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6299 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6300 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6301 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08006302 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01006303 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6304 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006305 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
6306 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
6307 BPF_MOV64_IMM(BPF_REG_3, 0),
6308 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6309 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6310 BPF_EXIT_INSN(),
6311 },
6312 .errstr = "invalid indirect read from stack off -64+32 size 64",
6313 .result = REJECT,
6314 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6315 },
6316 {
6317 "helper access to variable memory: 8 bytes no leak (init memory)",
6318 .insns = {
6319 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6320 BPF_MOV64_IMM(BPF_REG_0, 0),
6321 BPF_MOV64_IMM(BPF_REG_0, 0),
6322 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6323 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6324 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6325 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6326 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6327 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6328 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6329 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6330 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6331 BPF_MOV64_IMM(BPF_REG_2, 0),
6332 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
6333 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
6334 BPF_MOV64_IMM(BPF_REG_3, 0),
6335 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6336 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6337 BPF_EXIT_INSN(),
6338 },
6339 .result = ACCEPT,
6340 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6341 },
Josef Bacik29200c12017-02-03 16:25:23 -05006342 {
6343 "invalid and of negative number",
6344 .insns = {
6345 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6346 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6347 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6348 BPF_LD_MAP_FD(BPF_REG_1, 0),
6349 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6350 BPF_FUNC_map_lookup_elem),
6351 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Edward Creef65b1842017-08-07 15:27:12 +01006352 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik29200c12017-02-03 16:25:23 -05006353 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
6354 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
6355 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6356 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
6357 offsetof(struct test_val, foo)),
6358 BPF_EXIT_INSN(),
6359 },
6360 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006361 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05006362 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006363 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik29200c12017-02-03 16:25:23 -05006364 },
6365 {
6366 "invalid range check",
6367 .insns = {
6368 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6369 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6370 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6371 BPF_LD_MAP_FD(BPF_REG_1, 0),
6372 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6373 BPF_FUNC_map_lookup_elem),
6374 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
6375 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
6376 BPF_MOV64_IMM(BPF_REG_9, 1),
6377 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
6378 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
6379 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
6380 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
6381 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
6382 BPF_MOV32_IMM(BPF_REG_3, 1),
6383 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
6384 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
6385 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
6386 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
6387 BPF_MOV64_REG(BPF_REG_0, 0),
6388 BPF_EXIT_INSN(),
6389 },
6390 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006391 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05006392 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006393 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006394 },
6395 {
6396 "map in map access",
6397 .insns = {
6398 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6399 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6400 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6401 BPF_LD_MAP_FD(BPF_REG_1, 0),
6402 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6403 BPF_FUNC_map_lookup_elem),
6404 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6405 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6406 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6407 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6408 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6409 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6410 BPF_FUNC_map_lookup_elem),
6411 BPF_MOV64_REG(BPF_REG_0, 0),
6412 BPF_EXIT_INSN(),
6413 },
6414 .fixup_map_in_map = { 3 },
6415 .result = ACCEPT,
6416 },
6417 {
6418 "invalid inner map pointer",
6419 .insns = {
6420 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6421 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6422 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6423 BPF_LD_MAP_FD(BPF_REG_1, 0),
6424 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6425 BPF_FUNC_map_lookup_elem),
6426 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6427 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6428 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6429 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6430 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6431 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6432 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6433 BPF_FUNC_map_lookup_elem),
6434 BPF_MOV64_REG(BPF_REG_0, 0),
6435 BPF_EXIT_INSN(),
6436 },
6437 .fixup_map_in_map = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08006438 .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006439 .result = REJECT,
6440 },
6441 {
6442 "forgot null checking on the inner map pointer",
6443 .insns = {
6444 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6445 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6446 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6447 BPF_LD_MAP_FD(BPF_REG_1, 0),
6448 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6449 BPF_FUNC_map_lookup_elem),
6450 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6451 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6453 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6454 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6455 BPF_FUNC_map_lookup_elem),
6456 BPF_MOV64_REG(BPF_REG_0, 0),
6457 BPF_EXIT_INSN(),
6458 },
6459 .fixup_map_in_map = { 3 },
6460 .errstr = "R1 type=map_value_or_null expected=map_ptr",
6461 .result = REJECT,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006462 },
6463 {
6464 "ld_abs: check calling conv, r1",
6465 .insns = {
6466 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6467 BPF_MOV64_IMM(BPF_REG_1, 0),
6468 BPF_LD_ABS(BPF_W, -0x200000),
6469 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
6470 BPF_EXIT_INSN(),
6471 },
6472 .errstr = "R1 !read_ok",
6473 .result = REJECT,
6474 },
6475 {
6476 "ld_abs: check calling conv, r2",
6477 .insns = {
6478 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6479 BPF_MOV64_IMM(BPF_REG_2, 0),
6480 BPF_LD_ABS(BPF_W, -0x200000),
6481 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6482 BPF_EXIT_INSN(),
6483 },
6484 .errstr = "R2 !read_ok",
6485 .result = REJECT,
6486 },
6487 {
6488 "ld_abs: check calling conv, r3",
6489 .insns = {
6490 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6491 BPF_MOV64_IMM(BPF_REG_3, 0),
6492 BPF_LD_ABS(BPF_W, -0x200000),
6493 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6494 BPF_EXIT_INSN(),
6495 },
6496 .errstr = "R3 !read_ok",
6497 .result = REJECT,
6498 },
6499 {
6500 "ld_abs: check calling conv, r4",
6501 .insns = {
6502 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6503 BPF_MOV64_IMM(BPF_REG_4, 0),
6504 BPF_LD_ABS(BPF_W, -0x200000),
6505 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6506 BPF_EXIT_INSN(),
6507 },
6508 .errstr = "R4 !read_ok",
6509 .result = REJECT,
6510 },
6511 {
6512 "ld_abs: check calling conv, r5",
6513 .insns = {
6514 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6515 BPF_MOV64_IMM(BPF_REG_5, 0),
6516 BPF_LD_ABS(BPF_W, -0x200000),
6517 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6518 BPF_EXIT_INSN(),
6519 },
6520 .errstr = "R5 !read_ok",
6521 .result = REJECT,
6522 },
6523 {
6524 "ld_abs: check calling conv, r7",
6525 .insns = {
6526 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6527 BPF_MOV64_IMM(BPF_REG_7, 0),
6528 BPF_LD_ABS(BPF_W, -0x200000),
6529 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6530 BPF_EXIT_INSN(),
6531 },
6532 .result = ACCEPT,
6533 },
6534 {
Daniel Borkmann87ab8192017-12-14 21:07:27 +01006535 "ld_abs: tests on r6 and skb data reload helper",
6536 .insns = {
6537 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6538 BPF_LD_ABS(BPF_B, 0),
6539 BPF_LD_ABS(BPF_H, 0),
6540 BPF_LD_ABS(BPF_W, 0),
6541 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
6542 BPF_MOV64_IMM(BPF_REG_6, 0),
6543 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
6544 BPF_MOV64_IMM(BPF_REG_2, 1),
6545 BPF_MOV64_IMM(BPF_REG_3, 2),
6546 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6547 BPF_FUNC_skb_vlan_push),
6548 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
6549 BPF_LD_ABS(BPF_B, 0),
6550 BPF_LD_ABS(BPF_H, 0),
6551 BPF_LD_ABS(BPF_W, 0),
6552 BPF_MOV64_IMM(BPF_REG_0, 42),
6553 BPF_EXIT_INSN(),
6554 },
6555 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6556 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006557 .retval = 42 /* ultimate return value */,
Daniel Borkmann87ab8192017-12-14 21:07:27 +01006558 },
6559 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006560 "ld_ind: check calling conv, r1",
6561 .insns = {
6562 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6563 BPF_MOV64_IMM(BPF_REG_1, 1),
6564 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
6565 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
6566 BPF_EXIT_INSN(),
6567 },
6568 .errstr = "R1 !read_ok",
6569 .result = REJECT,
6570 },
6571 {
6572 "ld_ind: check calling conv, r2",
6573 .insns = {
6574 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6575 BPF_MOV64_IMM(BPF_REG_2, 1),
6576 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
6577 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6578 BPF_EXIT_INSN(),
6579 },
6580 .errstr = "R2 !read_ok",
6581 .result = REJECT,
6582 },
6583 {
6584 "ld_ind: check calling conv, r3",
6585 .insns = {
6586 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6587 BPF_MOV64_IMM(BPF_REG_3, 1),
6588 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
6589 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6590 BPF_EXIT_INSN(),
6591 },
6592 .errstr = "R3 !read_ok",
6593 .result = REJECT,
6594 },
6595 {
6596 "ld_ind: check calling conv, r4",
6597 .insns = {
6598 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6599 BPF_MOV64_IMM(BPF_REG_4, 1),
6600 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
6601 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6602 BPF_EXIT_INSN(),
6603 },
6604 .errstr = "R4 !read_ok",
6605 .result = REJECT,
6606 },
6607 {
6608 "ld_ind: check calling conv, r5",
6609 .insns = {
6610 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6611 BPF_MOV64_IMM(BPF_REG_5, 1),
6612 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
6613 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6614 BPF_EXIT_INSN(),
6615 },
6616 .errstr = "R5 !read_ok",
6617 .result = REJECT,
6618 },
6619 {
6620 "ld_ind: check calling conv, r7",
6621 .insns = {
6622 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6623 BPF_MOV64_IMM(BPF_REG_7, 1),
6624 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
6625 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6626 BPF_EXIT_INSN(),
6627 },
6628 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006629 .retval = 1,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006630 },
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006631 {
6632 "check bpf_perf_event_data->sample_period byte load permitted",
6633 .insns = {
6634 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006635#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006636 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6637 offsetof(struct bpf_perf_event_data, sample_period)),
6638#else
6639 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6640 offsetof(struct bpf_perf_event_data, sample_period) + 7),
6641#endif
6642 BPF_EXIT_INSN(),
6643 },
6644 .result = ACCEPT,
6645 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6646 },
6647 {
6648 "check bpf_perf_event_data->sample_period half load permitted",
6649 .insns = {
6650 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006651#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006652 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6653 offsetof(struct bpf_perf_event_data, sample_period)),
6654#else
6655 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6656 offsetof(struct bpf_perf_event_data, sample_period) + 6),
6657#endif
6658 BPF_EXIT_INSN(),
6659 },
6660 .result = ACCEPT,
6661 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6662 },
6663 {
6664 "check bpf_perf_event_data->sample_period word load permitted",
6665 .insns = {
6666 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006667#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006668 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6669 offsetof(struct bpf_perf_event_data, sample_period)),
6670#else
6671 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6672 offsetof(struct bpf_perf_event_data, sample_period) + 4),
6673#endif
6674 BPF_EXIT_INSN(),
6675 },
6676 .result = ACCEPT,
6677 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6678 },
6679 {
6680 "check bpf_perf_event_data->sample_period dword load permitted",
6681 .insns = {
6682 BPF_MOV64_IMM(BPF_REG_0, 0),
6683 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
6684 offsetof(struct bpf_perf_event_data, sample_period)),
6685 BPF_EXIT_INSN(),
6686 },
6687 .result = ACCEPT,
6688 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6689 },
6690 {
6691 "check skb->data half load not permitted",
6692 .insns = {
6693 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006694#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006695 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6696 offsetof(struct __sk_buff, data)),
6697#else
6698 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6699 offsetof(struct __sk_buff, data) + 2),
6700#endif
6701 BPF_EXIT_INSN(),
6702 },
6703 .result = REJECT,
6704 .errstr = "invalid bpf_context access",
6705 },
6706 {
6707 "check skb->tc_classid half load not permitted for lwt prog",
6708 .insns = {
6709 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006710#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006711 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6712 offsetof(struct __sk_buff, tc_classid)),
6713#else
6714 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6715 offsetof(struct __sk_buff, tc_classid) + 2),
6716#endif
6717 BPF_EXIT_INSN(),
6718 },
6719 .result = REJECT,
6720 .errstr = "invalid bpf_context access",
6721 .prog_type = BPF_PROG_TYPE_LWT_IN,
6722 },
Edward Creeb7122962017-07-21 00:00:24 +02006723 {
6724 "bounds checks mixing signed and unsigned, positive bounds",
6725 .insns = {
6726 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6727 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6728 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6729 BPF_LD_MAP_FD(BPF_REG_1, 0),
6730 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6731 BPF_FUNC_map_lookup_elem),
6732 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6733 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6734 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6735 BPF_MOV64_IMM(BPF_REG_2, 2),
6736 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
6737 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
6738 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6739 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6740 BPF_MOV64_IMM(BPF_REG_0, 0),
6741 BPF_EXIT_INSN(),
6742 },
6743 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006744 .errstr = "unbounded min value",
Edward Creeb7122962017-07-21 00:00:24 +02006745 .result = REJECT,
Edward Creeb7122962017-07-21 00:00:24 +02006746 },
6747 {
6748 "bounds checks mixing signed and unsigned",
6749 .insns = {
6750 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6751 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6752 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6753 BPF_LD_MAP_FD(BPF_REG_1, 0),
6754 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6755 BPF_FUNC_map_lookup_elem),
6756 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6757 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6758 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6759 BPF_MOV64_IMM(BPF_REG_2, -1),
6760 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6761 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6762 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6763 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6764 BPF_MOV64_IMM(BPF_REG_0, 0),
6765 BPF_EXIT_INSN(),
6766 },
6767 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006768 .errstr = "unbounded min value",
Edward Creeb7122962017-07-21 00:00:24 +02006769 .result = REJECT,
Edward Creeb7122962017-07-21 00:00:24 +02006770 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006771 {
6772 "bounds checks mixing signed and unsigned, variant 2",
6773 .insns = {
6774 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6775 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6776 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6777 BPF_LD_MAP_FD(BPF_REG_1, 0),
6778 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6779 BPF_FUNC_map_lookup_elem),
6780 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6781 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6782 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6783 BPF_MOV64_IMM(BPF_REG_2, -1),
6784 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6785 BPF_MOV64_IMM(BPF_REG_8, 0),
6786 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
6787 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6788 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6789 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6790 BPF_MOV64_IMM(BPF_REG_0, 0),
6791 BPF_EXIT_INSN(),
6792 },
6793 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006794 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006795 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006796 },
6797 {
6798 "bounds checks mixing signed and unsigned, variant 3",
6799 .insns = {
6800 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6801 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6802 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6803 BPF_LD_MAP_FD(BPF_REG_1, 0),
6804 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6805 BPF_FUNC_map_lookup_elem),
6806 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6807 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6808 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6809 BPF_MOV64_IMM(BPF_REG_2, -1),
6810 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
6811 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
6812 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6813 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6814 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6815 BPF_MOV64_IMM(BPF_REG_0, 0),
6816 BPF_EXIT_INSN(),
6817 },
6818 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006819 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006820 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006821 },
6822 {
6823 "bounds checks mixing signed and unsigned, variant 4",
6824 .insns = {
6825 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6826 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6827 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6828 BPF_LD_MAP_FD(BPF_REG_1, 0),
6829 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6830 BPF_FUNC_map_lookup_elem),
6831 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6832 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6833 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6834 BPF_MOV64_IMM(BPF_REG_2, 1),
6835 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
6836 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6837 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6838 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6839 BPF_MOV64_IMM(BPF_REG_0, 0),
6840 BPF_EXIT_INSN(),
6841 },
6842 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006843 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006844 },
6845 {
6846 "bounds checks mixing signed and unsigned, variant 5",
6847 .insns = {
6848 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6849 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6850 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6851 BPF_LD_MAP_FD(BPF_REG_1, 0),
6852 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6853 BPF_FUNC_map_lookup_elem),
6854 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6855 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6856 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6857 BPF_MOV64_IMM(BPF_REG_2, -1),
6858 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6859 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
6860 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
6861 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6862 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6863 BPF_MOV64_IMM(BPF_REG_0, 0),
6864 BPF_EXIT_INSN(),
6865 },
6866 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006867 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006868 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006869 },
6870 {
6871 "bounds checks mixing signed and unsigned, variant 6",
6872 .insns = {
6873 BPF_MOV64_IMM(BPF_REG_2, 0),
6874 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
6875 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
6876 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6877 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
6878 BPF_MOV64_IMM(BPF_REG_6, -1),
6879 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
6880 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
6881 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
6882 BPF_MOV64_IMM(BPF_REG_5, 0),
6883 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
6884 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6885 BPF_FUNC_skb_load_bytes),
6886 BPF_MOV64_IMM(BPF_REG_0, 0),
6887 BPF_EXIT_INSN(),
6888 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006889 .errstr = "R4 min value is negative, either use unsigned",
6890 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006891 },
6892 {
6893 "bounds checks mixing signed and unsigned, variant 7",
6894 .insns = {
6895 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6896 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6897 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6898 BPF_LD_MAP_FD(BPF_REG_1, 0),
6899 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6900 BPF_FUNC_map_lookup_elem),
6901 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6902 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6903 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6904 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
6905 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6906 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6907 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6908 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6909 BPF_MOV64_IMM(BPF_REG_0, 0),
6910 BPF_EXIT_INSN(),
6911 },
6912 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006913 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006914 },
6915 {
6916 "bounds checks mixing signed and unsigned, variant 8",
6917 .insns = {
6918 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6919 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6920 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6921 BPF_LD_MAP_FD(BPF_REG_1, 0),
6922 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6923 BPF_FUNC_map_lookup_elem),
Daniel Borkmann86412502017-07-21 00:00:25 +02006924 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6925 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6926 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6927 BPF_MOV64_IMM(BPF_REG_2, -1),
6928 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6929 BPF_MOV64_IMM(BPF_REG_0, 0),
6930 BPF_EXIT_INSN(),
6931 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6932 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6933 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6934 BPF_MOV64_IMM(BPF_REG_0, 0),
6935 BPF_EXIT_INSN(),
6936 },
6937 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006938 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006939 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006940 },
6941 {
Edward Creef65b1842017-08-07 15:27:12 +01006942 "bounds checks mixing signed and unsigned, variant 9",
Daniel Borkmann86412502017-07-21 00:00:25 +02006943 .insns = {
6944 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6945 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6946 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6947 BPF_LD_MAP_FD(BPF_REG_1, 0),
6948 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6949 BPF_FUNC_map_lookup_elem),
6950 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6951 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6952 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6953 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
6954 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6955 BPF_MOV64_IMM(BPF_REG_0, 0),
6956 BPF_EXIT_INSN(),
6957 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6958 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6959 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6960 BPF_MOV64_IMM(BPF_REG_0, 0),
6961 BPF_EXIT_INSN(),
6962 },
6963 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006964 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006965 },
6966 {
Edward Creef65b1842017-08-07 15:27:12 +01006967 "bounds checks mixing signed and unsigned, variant 10",
Daniel Borkmann86412502017-07-21 00:00:25 +02006968 .insns = {
6969 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6970 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6971 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6972 BPF_LD_MAP_FD(BPF_REG_1, 0),
6973 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6974 BPF_FUNC_map_lookup_elem),
6975 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6976 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6977 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6978 BPF_MOV64_IMM(BPF_REG_2, 0),
6979 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6980 BPF_MOV64_IMM(BPF_REG_0, 0),
6981 BPF_EXIT_INSN(),
6982 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6983 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6984 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6985 BPF_MOV64_IMM(BPF_REG_0, 0),
6986 BPF_EXIT_INSN(),
6987 },
6988 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006989 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006990 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006991 },
6992 {
Edward Creef65b1842017-08-07 15:27:12 +01006993 "bounds checks mixing signed and unsigned, variant 11",
Daniel Borkmann86412502017-07-21 00:00:25 +02006994 .insns = {
6995 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6996 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6997 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6998 BPF_LD_MAP_FD(BPF_REG_1, 0),
6999 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7000 BPF_FUNC_map_lookup_elem),
7001 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7002 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7003 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7004 BPF_MOV64_IMM(BPF_REG_2, -1),
7005 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7006 /* Dead branch. */
7007 BPF_MOV64_IMM(BPF_REG_0, 0),
7008 BPF_EXIT_INSN(),
7009 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7010 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7011 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7012 BPF_MOV64_IMM(BPF_REG_0, 0),
7013 BPF_EXIT_INSN(),
7014 },
7015 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007016 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007017 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007018 },
7019 {
Edward Creef65b1842017-08-07 15:27:12 +01007020 "bounds checks mixing signed and unsigned, variant 12",
Daniel Borkmann86412502017-07-21 00:00:25 +02007021 .insns = {
7022 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7023 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7024 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7025 BPF_LD_MAP_FD(BPF_REG_1, 0),
7026 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7027 BPF_FUNC_map_lookup_elem),
7028 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7029 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7030 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7031 BPF_MOV64_IMM(BPF_REG_2, -6),
7032 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7033 BPF_MOV64_IMM(BPF_REG_0, 0),
7034 BPF_EXIT_INSN(),
7035 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7036 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7037 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7038 BPF_MOV64_IMM(BPF_REG_0, 0),
7039 BPF_EXIT_INSN(),
7040 },
7041 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007042 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007043 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007044 },
7045 {
Edward Creef65b1842017-08-07 15:27:12 +01007046 "bounds checks mixing signed and unsigned, variant 13",
Daniel Borkmann86412502017-07-21 00:00:25 +02007047 .insns = {
7048 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7049 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7050 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7051 BPF_LD_MAP_FD(BPF_REG_1, 0),
7052 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7053 BPF_FUNC_map_lookup_elem),
7054 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7055 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7056 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7057 BPF_MOV64_IMM(BPF_REG_2, 2),
7058 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7059 BPF_MOV64_IMM(BPF_REG_7, 1),
7060 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
7061 BPF_MOV64_IMM(BPF_REG_0, 0),
7062 BPF_EXIT_INSN(),
7063 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
7064 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
7065 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
7066 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7067 BPF_MOV64_IMM(BPF_REG_0, 0),
7068 BPF_EXIT_INSN(),
7069 },
7070 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007071 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007072 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007073 },
7074 {
Edward Creef65b1842017-08-07 15:27:12 +01007075 "bounds checks mixing signed and unsigned, variant 14",
Daniel Borkmann86412502017-07-21 00:00:25 +02007076 .insns = {
7077 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
7078 offsetof(struct __sk_buff, mark)),
7079 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7080 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7081 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7082 BPF_LD_MAP_FD(BPF_REG_1, 0),
7083 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7084 BPF_FUNC_map_lookup_elem),
7085 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7086 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7087 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7088 BPF_MOV64_IMM(BPF_REG_2, -1),
7089 BPF_MOV64_IMM(BPF_REG_8, 2),
7090 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
7091 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
7092 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7093 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7094 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7095 BPF_MOV64_IMM(BPF_REG_0, 0),
7096 BPF_EXIT_INSN(),
7097 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
7098 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
7099 },
7100 .fixup_map1 = { 4 },
Daniel Borkmann6f161012018-01-18 01:15:21 +01007101 .errstr = "R0 invalid mem access 'inv'",
Daniel Borkmann86412502017-07-21 00:00:25 +02007102 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007103 },
7104 {
Edward Creef65b1842017-08-07 15:27:12 +01007105 "bounds checks mixing signed and unsigned, variant 15",
Daniel Borkmann86412502017-07-21 00:00:25 +02007106 .insns = {
7107 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7108 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7109 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7110 BPF_LD_MAP_FD(BPF_REG_1, 0),
7111 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7112 BPF_FUNC_map_lookup_elem),
7113 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7114 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7115 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7116 BPF_MOV64_IMM(BPF_REG_2, -6),
7117 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7118 BPF_MOV64_IMM(BPF_REG_0, 0),
7119 BPF_EXIT_INSN(),
7120 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7121 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
7122 BPF_MOV64_IMM(BPF_REG_0, 0),
7123 BPF_EXIT_INSN(),
7124 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7125 BPF_MOV64_IMM(BPF_REG_0, 0),
7126 BPF_EXIT_INSN(),
7127 },
7128 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007129 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007130 .result = REJECT,
7131 .result_unpriv = REJECT,
7132 },
Edward Cree545722c2017-07-21 14:36:57 +01007133 {
Edward Creef65b1842017-08-07 15:27:12 +01007134 "subtraction bounds (map value) variant 1",
Edward Cree545722c2017-07-21 14:36:57 +01007135 .insns = {
7136 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7137 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7138 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7139 BPF_LD_MAP_FD(BPF_REG_1, 0),
7140 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7141 BPF_FUNC_map_lookup_elem),
7142 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7143 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7144 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
7145 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
7146 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
7147 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
7148 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
7149 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7150 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7151 BPF_EXIT_INSN(),
7152 BPF_MOV64_IMM(BPF_REG_0, 0),
7153 BPF_EXIT_INSN(),
7154 },
7155 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01007156 .errstr = "R0 max value is outside of the array range",
7157 .result = REJECT,
7158 },
7159 {
7160 "subtraction bounds (map value) variant 2",
7161 .insns = {
7162 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7163 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7164 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7165 BPF_LD_MAP_FD(BPF_REG_1, 0),
7166 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7167 BPF_FUNC_map_lookup_elem),
7168 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7169 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7170 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
7171 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
7172 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
7173 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
7174 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7175 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7176 BPF_EXIT_INSN(),
7177 BPF_MOV64_IMM(BPF_REG_0, 0),
7178 BPF_EXIT_INSN(),
7179 },
7180 .fixup_map1 = { 3 },
Edward Cree545722c2017-07-21 14:36:57 +01007181 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
7182 .result = REJECT,
Edward Cree545722c2017-07-21 14:36:57 +01007183 },
Edward Cree69c4e8a2017-08-07 15:29:51 +01007184 {
Jann Horn2255f8d2017-12-18 20:12:01 -08007185 "bounds check based on zero-extended MOV",
7186 .insns = {
7187 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7188 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7189 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7190 BPF_LD_MAP_FD(BPF_REG_1, 0),
7191 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7192 BPF_FUNC_map_lookup_elem),
7193 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7194 /* r2 = 0x0000'0000'ffff'ffff */
7195 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
7196 /* r2 = 0 */
7197 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
7198 /* no-op */
7199 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7200 /* access at offset 0 */
7201 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7202 /* exit */
7203 BPF_MOV64_IMM(BPF_REG_0, 0),
7204 BPF_EXIT_INSN(),
7205 },
7206 .fixup_map1 = { 3 },
7207 .result = ACCEPT
7208 },
7209 {
7210 "bounds check based on sign-extended MOV. test1",
7211 .insns = {
7212 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7213 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7214 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7215 BPF_LD_MAP_FD(BPF_REG_1, 0),
7216 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7217 BPF_FUNC_map_lookup_elem),
7218 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7219 /* r2 = 0xffff'ffff'ffff'ffff */
7220 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
7221 /* r2 = 0xffff'ffff */
7222 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
7223 /* r0 = <oob pointer> */
7224 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7225 /* access to OOB pointer */
7226 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7227 /* exit */
7228 BPF_MOV64_IMM(BPF_REG_0, 0),
7229 BPF_EXIT_INSN(),
7230 },
7231 .fixup_map1 = { 3 },
7232 .errstr = "map_value pointer and 4294967295",
7233 .result = REJECT
7234 },
7235 {
7236 "bounds check based on sign-extended MOV. test2",
7237 .insns = {
7238 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7239 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7240 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7241 BPF_LD_MAP_FD(BPF_REG_1, 0),
7242 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7243 BPF_FUNC_map_lookup_elem),
7244 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7245 /* r2 = 0xffff'ffff'ffff'ffff */
7246 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
7247 /* r2 = 0xfff'ffff */
7248 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
7249 /* r0 = <oob pointer> */
7250 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7251 /* access to OOB pointer */
7252 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7253 /* exit */
7254 BPF_MOV64_IMM(BPF_REG_0, 0),
7255 BPF_EXIT_INSN(),
7256 },
7257 .fixup_map1 = { 3 },
7258 .errstr = "R0 min value is outside of the array range",
7259 .result = REJECT
7260 },
7261 {
7262 "bounds check based on reg_off + var_off + insn_off. test1",
7263 .insns = {
7264 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7265 offsetof(struct __sk_buff, mark)),
7266 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7267 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7268 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7269 BPF_LD_MAP_FD(BPF_REG_1, 0),
7270 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7271 BPF_FUNC_map_lookup_elem),
7272 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7273 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
7274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
7275 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
7276 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
7277 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
7278 BPF_MOV64_IMM(BPF_REG_0, 0),
7279 BPF_EXIT_INSN(),
7280 },
7281 .fixup_map1 = { 4 },
7282 .errstr = "value_size=8 off=1073741825",
7283 .result = REJECT,
7284 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7285 },
7286 {
7287 "bounds check based on reg_off + var_off + insn_off. test2",
7288 .insns = {
7289 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7290 offsetof(struct __sk_buff, mark)),
7291 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7292 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7294 BPF_LD_MAP_FD(BPF_REG_1, 0),
7295 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7296 BPF_FUNC_map_lookup_elem),
7297 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7298 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
7299 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
7300 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
7301 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
7302 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
7303 BPF_MOV64_IMM(BPF_REG_0, 0),
7304 BPF_EXIT_INSN(),
7305 },
7306 .fixup_map1 = { 4 },
7307 .errstr = "value 1073741823",
7308 .result = REJECT,
7309 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7310 },
7311 {
7312 "bounds check after truncation of non-boundary-crossing range",
7313 .insns = {
7314 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7315 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7316 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7317 BPF_LD_MAP_FD(BPF_REG_1, 0),
7318 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7319 BPF_FUNC_map_lookup_elem),
7320 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7321 /* r1 = [0x00, 0xff] */
7322 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7323 BPF_MOV64_IMM(BPF_REG_2, 1),
7324 /* r2 = 0x10'0000'0000 */
7325 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
7326 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
7327 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
7328 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
7329 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7330 /* r1 = [0x00, 0xff] */
7331 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
7332 /* r1 = 0 */
7333 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7334 /* no-op */
7335 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7336 /* access at offset 0 */
7337 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7338 /* exit */
7339 BPF_MOV64_IMM(BPF_REG_0, 0),
7340 BPF_EXIT_INSN(),
7341 },
7342 .fixup_map1 = { 3 },
7343 .result = ACCEPT
7344 },
7345 {
7346 "bounds check after truncation of boundary-crossing range (1)",
7347 .insns = {
7348 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7349 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7350 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7351 BPF_LD_MAP_FD(BPF_REG_1, 0),
7352 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7353 BPF_FUNC_map_lookup_elem),
7354 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7355 /* r1 = [0x00, 0xff] */
7356 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7357 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7358 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
7359 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7360 /* r1 = [0xffff'ff80, 0xffff'ffff] or
7361 * [0x0000'0000, 0x0000'007f]
7362 */
7363 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
7364 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7365 /* r1 = [0x00, 0xff] or
7366 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7367 */
7368 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7369 /* r1 = 0 or
7370 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7371 */
7372 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7373 /* no-op or OOB pointer computation */
7374 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7375 /* potentially OOB access */
7376 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7377 /* exit */
7378 BPF_MOV64_IMM(BPF_REG_0, 0),
7379 BPF_EXIT_INSN(),
7380 },
7381 .fixup_map1 = { 3 },
7382 /* not actually fully unbounded, but the bound is very high */
7383 .errstr = "R0 unbounded memory access",
7384 .result = REJECT
7385 },
7386 {
7387 "bounds check after truncation of boundary-crossing range (2)",
7388 .insns = {
7389 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7390 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7391 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7392 BPF_LD_MAP_FD(BPF_REG_1, 0),
7393 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7394 BPF_FUNC_map_lookup_elem),
7395 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7396 /* r1 = [0x00, 0xff] */
7397 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7398 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7399 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
7400 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7401 /* r1 = [0xffff'ff80, 0xffff'ffff] or
7402 * [0x0000'0000, 0x0000'007f]
7403 * difference to previous test: truncation via MOV32
7404 * instead of ALU32.
7405 */
7406 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
7407 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7408 /* r1 = [0x00, 0xff] or
7409 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7410 */
7411 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7412 /* r1 = 0 or
7413 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7414 */
7415 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7416 /* no-op or OOB pointer computation */
7417 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7418 /* potentially OOB access */
7419 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7420 /* exit */
7421 BPF_MOV64_IMM(BPF_REG_0, 0),
7422 BPF_EXIT_INSN(),
7423 },
7424 .fixup_map1 = { 3 },
7425 /* not actually fully unbounded, but the bound is very high */
7426 .errstr = "R0 unbounded memory access",
7427 .result = REJECT
7428 },
7429 {
7430 "bounds check after wrapping 32-bit addition",
7431 .insns = {
7432 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7433 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7434 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7435 BPF_LD_MAP_FD(BPF_REG_1, 0),
7436 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7437 BPF_FUNC_map_lookup_elem),
7438 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7439 /* r1 = 0x7fff'ffff */
7440 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
7441 /* r1 = 0xffff'fffe */
7442 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7443 /* r1 = 0 */
7444 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
7445 /* no-op */
7446 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7447 /* access at offset 0 */
7448 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7449 /* exit */
7450 BPF_MOV64_IMM(BPF_REG_0, 0),
7451 BPF_EXIT_INSN(),
7452 },
7453 .fixup_map1 = { 3 },
7454 .result = ACCEPT
7455 },
7456 {
7457 "bounds check after shift with oversized count operand",
7458 .insns = {
7459 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7460 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7462 BPF_LD_MAP_FD(BPF_REG_1, 0),
7463 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7464 BPF_FUNC_map_lookup_elem),
7465 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7466 BPF_MOV64_IMM(BPF_REG_2, 32),
7467 BPF_MOV64_IMM(BPF_REG_1, 1),
7468 /* r1 = (u32)1 << (u32)32 = ? */
7469 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
7470 /* r1 = [0x0000, 0xffff] */
7471 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
7472 /* computes unknown pointer, potentially OOB */
7473 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7474 /* potentially OOB access */
7475 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7476 /* exit */
7477 BPF_MOV64_IMM(BPF_REG_0, 0),
7478 BPF_EXIT_INSN(),
7479 },
7480 .fixup_map1 = { 3 },
7481 .errstr = "R0 max value is outside of the array range",
7482 .result = REJECT
7483 },
7484 {
7485 "bounds check after right shift of maybe-negative number",
7486 .insns = {
7487 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7488 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7489 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7490 BPF_LD_MAP_FD(BPF_REG_1, 0),
7491 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7492 BPF_FUNC_map_lookup_elem),
7493 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7494 /* r1 = [0x00, 0xff] */
7495 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7496 /* r1 = [-0x01, 0xfe] */
7497 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
7498 /* r1 = 0 or 0xff'ffff'ffff'ffff */
7499 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7500 /* r1 = 0 or 0xffff'ffff'ffff */
7501 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7502 /* computes unknown pointer, potentially OOB */
7503 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7504 /* potentially OOB access */
7505 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7506 /* exit */
7507 BPF_MOV64_IMM(BPF_REG_0, 0),
7508 BPF_EXIT_INSN(),
7509 },
7510 .fixup_map1 = { 3 },
7511 .errstr = "R0 unbounded memory access",
7512 .result = REJECT
7513 },
7514 {
7515 "bounds check map access with off+size signed 32bit overflow. test1",
7516 .insns = {
7517 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7518 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7519 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7520 BPF_LD_MAP_FD(BPF_REG_1, 0),
7521 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7522 BPF_FUNC_map_lookup_elem),
7523 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7524 BPF_EXIT_INSN(),
7525 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
7526 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7527 BPF_JMP_A(0),
7528 BPF_EXIT_INSN(),
7529 },
7530 .fixup_map1 = { 3 },
7531 .errstr = "map_value pointer and 2147483646",
7532 .result = REJECT
7533 },
7534 {
7535 "bounds check map access with off+size signed 32bit overflow. test2",
7536 .insns = {
7537 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7538 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7539 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7540 BPF_LD_MAP_FD(BPF_REG_1, 0),
7541 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7542 BPF_FUNC_map_lookup_elem),
7543 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7544 BPF_EXIT_INSN(),
7545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7546 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7547 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7548 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7549 BPF_JMP_A(0),
7550 BPF_EXIT_INSN(),
7551 },
7552 .fixup_map1 = { 3 },
7553 .errstr = "pointer offset 1073741822",
7554 .result = REJECT
7555 },
7556 {
7557 "bounds check map access with off+size signed 32bit overflow. test3",
7558 .insns = {
7559 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7560 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7561 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7562 BPF_LD_MAP_FD(BPF_REG_1, 0),
7563 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7564 BPF_FUNC_map_lookup_elem),
7565 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7566 BPF_EXIT_INSN(),
7567 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7568 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7569 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7570 BPF_JMP_A(0),
7571 BPF_EXIT_INSN(),
7572 },
7573 .fixup_map1 = { 3 },
7574 .errstr = "pointer offset -1073741822",
7575 .result = REJECT
7576 },
7577 {
7578 "bounds check map access with off+size signed 32bit overflow. test4",
7579 .insns = {
7580 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7581 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7582 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7583 BPF_LD_MAP_FD(BPF_REG_1, 0),
7584 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7585 BPF_FUNC_map_lookup_elem),
7586 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7587 BPF_EXIT_INSN(),
7588 BPF_MOV64_IMM(BPF_REG_1, 1000000),
7589 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
7590 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7591 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7592 BPF_JMP_A(0),
7593 BPF_EXIT_INSN(),
7594 },
7595 .fixup_map1 = { 3 },
7596 .errstr = "map_value pointer and 1000000000000",
7597 .result = REJECT
7598 },
7599 {
7600 "pointer/scalar confusion in state equality check (way 1)",
7601 .insns = {
7602 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7603 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7604 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7605 BPF_LD_MAP_FD(BPF_REG_1, 0),
7606 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7607 BPF_FUNC_map_lookup_elem),
7608 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7609 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7610 BPF_JMP_A(1),
7611 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7612 BPF_JMP_A(0),
7613 BPF_EXIT_INSN(),
7614 },
7615 .fixup_map1 = { 3 },
7616 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007617 .retval = POINTER_VALUE,
Jann Horn2255f8d2017-12-18 20:12:01 -08007618 .result_unpriv = REJECT,
7619 .errstr_unpriv = "R0 leaks addr as return value"
7620 },
7621 {
7622 "pointer/scalar confusion in state equality check (way 2)",
7623 .insns = {
7624 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7625 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7627 BPF_LD_MAP_FD(BPF_REG_1, 0),
7628 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7629 BPF_FUNC_map_lookup_elem),
7630 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
7631 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7632 BPF_JMP_A(1),
7633 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7634 BPF_EXIT_INSN(),
7635 },
7636 .fixup_map1 = { 3 },
7637 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007638 .retval = POINTER_VALUE,
Jann Horn2255f8d2017-12-18 20:12:01 -08007639 .result_unpriv = REJECT,
7640 .errstr_unpriv = "R0 leaks addr as return value"
7641 },
7642 {
Edward Cree69c4e8a2017-08-07 15:29:51 +01007643 "variable-offset ctx access",
7644 .insns = {
7645 /* Get an unknown value */
7646 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7647 /* Make it small and 4-byte aligned */
7648 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7649 /* add it to skb. We now have either &skb->len or
7650 * &skb->pkt_type, but we don't know which
7651 */
7652 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
7653 /* dereference it */
7654 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
7655 BPF_EXIT_INSN(),
7656 },
7657 .errstr = "variable ctx access var_off=(0x0; 0x4)",
7658 .result = REJECT,
7659 .prog_type = BPF_PROG_TYPE_LWT_IN,
7660 },
7661 {
7662 "variable-offset stack access",
7663 .insns = {
7664 /* Fill the top 8 bytes of the stack */
7665 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7666 /* Get an unknown value */
7667 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7668 /* Make it small and 4-byte aligned */
7669 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7670 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7671 /* add it to fp. We now have either fp-4 or fp-8, but
7672 * we don't know which
7673 */
7674 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7675 /* dereference it */
7676 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
7677 BPF_EXIT_INSN(),
7678 },
7679 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
7680 .result = REJECT,
7681 .prog_type = BPF_PROG_TYPE_LWT_IN,
7682 },
Edward Creed893dc22017-08-23 15:09:46 +01007683 {
Jann Horn2255f8d2017-12-18 20:12:01 -08007684 "indirect variable-offset stack access",
7685 .insns = {
7686 /* Fill the top 8 bytes of the stack */
7687 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7688 /* Get an unknown value */
7689 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7690 /* Make it small and 4-byte aligned */
7691 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7692 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7693 /* add it to fp. We now have either fp-4 or fp-8, but
7694 * we don't know which
7695 */
7696 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7697 /* dereference it indirectly */
7698 BPF_LD_MAP_FD(BPF_REG_1, 0),
7699 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7700 BPF_FUNC_map_lookup_elem),
7701 BPF_MOV64_IMM(BPF_REG_0, 0),
7702 BPF_EXIT_INSN(),
7703 },
7704 .fixup_map1 = { 5 },
7705 .errstr = "variable stack read R2",
7706 .result = REJECT,
7707 .prog_type = BPF_PROG_TYPE_LWT_IN,
7708 },
7709 {
7710 "direct stack access with 32-bit wraparound. test1",
7711 .insns = {
7712 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7714 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7715 BPF_MOV32_IMM(BPF_REG_0, 0),
7716 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7717 BPF_EXIT_INSN()
7718 },
7719 .errstr = "fp pointer and 2147483647",
7720 .result = REJECT
7721 },
7722 {
7723 "direct stack access with 32-bit wraparound. test2",
7724 .insns = {
7725 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7726 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7727 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7728 BPF_MOV32_IMM(BPF_REG_0, 0),
7729 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7730 BPF_EXIT_INSN()
7731 },
7732 .errstr = "fp pointer and 1073741823",
7733 .result = REJECT
7734 },
7735 {
7736 "direct stack access with 32-bit wraparound. test3",
7737 .insns = {
7738 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7739 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7740 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7741 BPF_MOV32_IMM(BPF_REG_0, 0),
7742 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7743 BPF_EXIT_INSN()
7744 },
7745 .errstr = "fp pointer offset 1073741822",
7746 .result = REJECT
7747 },
7748 {
Edward Creed893dc22017-08-23 15:09:46 +01007749 "liveness pruning and write screening",
7750 .insns = {
7751 /* Get an unknown value */
7752 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7753 /* branch conditions teach us nothing about R2 */
7754 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
7755 BPF_MOV64_IMM(BPF_REG_0, 0),
7756 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
7757 BPF_MOV64_IMM(BPF_REG_0, 0),
7758 BPF_EXIT_INSN(),
7759 },
7760 .errstr = "R0 !read_ok",
7761 .result = REJECT,
7762 .prog_type = BPF_PROG_TYPE_LWT_IN,
7763 },
Alexei Starovoitovdf20cb72017-08-23 15:10:26 +01007764 {
7765 "varlen_map_value_access pruning",
7766 .insns = {
7767 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7768 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7769 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7770 BPF_LD_MAP_FD(BPF_REG_1, 0),
7771 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7772 BPF_FUNC_map_lookup_elem),
7773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7774 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7775 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
7776 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
7777 BPF_MOV32_IMM(BPF_REG_1, 0),
7778 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
7779 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7780 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
7781 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7782 offsetof(struct test_val, foo)),
7783 BPF_EXIT_INSN(),
7784 },
7785 .fixup_map2 = { 3 },
7786 .errstr_unpriv = "R0 leaks addr",
7787 .errstr = "R0 unbounded memory access",
7788 .result_unpriv = REJECT,
7789 .result = REJECT,
7790 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7791 },
Edward Creee67b8a62017-09-15 14:37:38 +01007792 {
7793 "invalid 64-bit BPF_END",
7794 .insns = {
7795 BPF_MOV32_IMM(BPF_REG_0, 0),
7796 {
7797 .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
7798 .dst_reg = BPF_REG_0,
7799 .src_reg = 0,
7800 .off = 0,
7801 .imm = 32,
7802 },
7803 BPF_EXIT_INSN(),
7804 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01007805 .errstr = "unknown opcode d7",
Edward Creee67b8a62017-09-15 14:37:38 +01007806 .result = REJECT,
7807 },
Daniel Borkmann22c88522017-09-25 02:25:53 +02007808 {
Daniel Borkmann65073a62018-01-31 12:58:56 +01007809 "XDP, using ifindex from netdev",
7810 .insns = {
7811 BPF_MOV64_IMM(BPF_REG_0, 0),
7812 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7813 offsetof(struct xdp_md, ingress_ifindex)),
7814 BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
7815 BPF_MOV64_IMM(BPF_REG_0, 1),
7816 BPF_EXIT_INSN(),
7817 },
7818 .result = ACCEPT,
7819 .prog_type = BPF_PROG_TYPE_XDP,
7820 .retval = 1,
7821 },
7822 {
Daniel Borkmann22c88522017-09-25 02:25:53 +02007823 "meta access, test1",
7824 .insns = {
7825 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7826 offsetof(struct xdp_md, data_meta)),
7827 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7828 offsetof(struct xdp_md, data)),
7829 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7830 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7831 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7832 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7833 BPF_MOV64_IMM(BPF_REG_0, 0),
7834 BPF_EXIT_INSN(),
7835 },
7836 .result = ACCEPT,
7837 .prog_type = BPF_PROG_TYPE_XDP,
7838 },
7839 {
7840 "meta access, test2",
7841 .insns = {
7842 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7843 offsetof(struct xdp_md, data_meta)),
7844 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7845 offsetof(struct xdp_md, data)),
7846 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7847 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
7848 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7849 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7850 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7851 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7852 BPF_MOV64_IMM(BPF_REG_0, 0),
7853 BPF_EXIT_INSN(),
7854 },
7855 .result = REJECT,
7856 .errstr = "invalid access to packet, off=-8",
7857 .prog_type = BPF_PROG_TYPE_XDP,
7858 },
7859 {
7860 "meta access, test3",
7861 .insns = {
7862 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7863 offsetof(struct xdp_md, data_meta)),
7864 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7865 offsetof(struct xdp_md, data_end)),
7866 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7867 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7868 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7869 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7870 BPF_MOV64_IMM(BPF_REG_0, 0),
7871 BPF_EXIT_INSN(),
7872 },
7873 .result = REJECT,
7874 .errstr = "invalid access to packet",
7875 .prog_type = BPF_PROG_TYPE_XDP,
7876 },
7877 {
7878 "meta access, test4",
7879 .insns = {
7880 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7881 offsetof(struct xdp_md, data_meta)),
7882 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7883 offsetof(struct xdp_md, data_end)),
7884 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7885 offsetof(struct xdp_md, data)),
7886 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7888 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7889 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7890 BPF_MOV64_IMM(BPF_REG_0, 0),
7891 BPF_EXIT_INSN(),
7892 },
7893 .result = REJECT,
7894 .errstr = "invalid access to packet",
7895 .prog_type = BPF_PROG_TYPE_XDP,
7896 },
7897 {
7898 "meta access, test5",
7899 .insns = {
7900 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7901 offsetof(struct xdp_md, data_meta)),
7902 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7903 offsetof(struct xdp_md, data)),
7904 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7905 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7906 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
7907 BPF_MOV64_IMM(BPF_REG_2, -8),
7908 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7909 BPF_FUNC_xdp_adjust_meta),
7910 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
7911 BPF_MOV64_IMM(BPF_REG_0, 0),
7912 BPF_EXIT_INSN(),
7913 },
7914 .result = REJECT,
7915 .errstr = "R3 !read_ok",
7916 .prog_type = BPF_PROG_TYPE_XDP,
7917 },
7918 {
7919 "meta access, test6",
7920 .insns = {
7921 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7922 offsetof(struct xdp_md, data_meta)),
7923 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7924 offsetof(struct xdp_md, data)),
7925 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7927 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7928 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7929 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
7930 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7931 BPF_MOV64_IMM(BPF_REG_0, 0),
7932 BPF_EXIT_INSN(),
7933 },
7934 .result = REJECT,
7935 .errstr = "invalid access to packet",
7936 .prog_type = BPF_PROG_TYPE_XDP,
7937 },
7938 {
7939 "meta access, test7",
7940 .insns = {
7941 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7942 offsetof(struct xdp_md, data_meta)),
7943 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7944 offsetof(struct xdp_md, data)),
7945 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7946 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7947 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7948 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7949 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7950 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7951 BPF_MOV64_IMM(BPF_REG_0, 0),
7952 BPF_EXIT_INSN(),
7953 },
7954 .result = ACCEPT,
7955 .prog_type = BPF_PROG_TYPE_XDP,
7956 },
7957 {
7958 "meta access, test8",
7959 .insns = {
7960 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7961 offsetof(struct xdp_md, data_meta)),
7962 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7963 offsetof(struct xdp_md, data)),
7964 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7965 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
7966 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7967 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7968 BPF_MOV64_IMM(BPF_REG_0, 0),
7969 BPF_EXIT_INSN(),
7970 },
7971 .result = ACCEPT,
7972 .prog_type = BPF_PROG_TYPE_XDP,
7973 },
7974 {
7975 "meta access, test9",
7976 .insns = {
7977 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7978 offsetof(struct xdp_md, data_meta)),
7979 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7980 offsetof(struct xdp_md, data)),
7981 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7982 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
7983 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
7984 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7985 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7986 BPF_MOV64_IMM(BPF_REG_0, 0),
7987 BPF_EXIT_INSN(),
7988 },
7989 .result = REJECT,
7990 .errstr = "invalid access to packet",
7991 .prog_type = BPF_PROG_TYPE_XDP,
7992 },
7993 {
7994 "meta access, test10",
7995 .insns = {
7996 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7997 offsetof(struct xdp_md, data_meta)),
7998 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7999 offsetof(struct xdp_md, data)),
8000 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8001 offsetof(struct xdp_md, data_end)),
8002 BPF_MOV64_IMM(BPF_REG_5, 42),
8003 BPF_MOV64_IMM(BPF_REG_6, 24),
8004 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
8005 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
8006 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
8007 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
8008 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
8009 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
8010 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
8011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
8012 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
8013 BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
8014 BPF_MOV64_IMM(BPF_REG_0, 0),
8015 BPF_EXIT_INSN(),
8016 },
8017 .result = REJECT,
8018 .errstr = "invalid access to packet",
8019 .prog_type = BPF_PROG_TYPE_XDP,
8020 },
8021 {
8022 "meta access, test11",
8023 .insns = {
8024 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8025 offsetof(struct xdp_md, data_meta)),
8026 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8027 offsetof(struct xdp_md, data)),
8028 BPF_MOV64_IMM(BPF_REG_5, 42),
8029 BPF_MOV64_IMM(BPF_REG_6, 24),
8030 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
8031 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
8032 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
8033 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
8034 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
8035 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
8036 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
8037 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
8038 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
8039 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
8040 BPF_MOV64_IMM(BPF_REG_0, 0),
8041 BPF_EXIT_INSN(),
8042 },
8043 .result = ACCEPT,
8044 .prog_type = BPF_PROG_TYPE_XDP,
8045 },
8046 {
8047 "meta access, test12",
8048 .insns = {
8049 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8050 offsetof(struct xdp_md, data_meta)),
8051 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8052 offsetof(struct xdp_md, data)),
8053 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8054 offsetof(struct xdp_md, data_end)),
8055 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
8056 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
8057 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
8058 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
8059 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
8060 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
8061 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
8062 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8063 BPF_MOV64_IMM(BPF_REG_0, 0),
8064 BPF_EXIT_INSN(),
8065 },
8066 .result = ACCEPT,
8067 .prog_type = BPF_PROG_TYPE_XDP,
8068 },
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07008069 {
Jakub Kicinski28e33f92017-10-16 11:16:55 -07008070 "arithmetic ops make PTR_TO_CTX unusable",
8071 .insns = {
8072 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
8073 offsetof(struct __sk_buff, data) -
8074 offsetof(struct __sk_buff, mark)),
8075 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8076 offsetof(struct __sk_buff, mark)),
8077 BPF_EXIT_INSN(),
8078 },
8079 .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
8080 .result = REJECT,
8081 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8082 },
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008083 {
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008084 "pkt_end - pkt_start is allowed",
8085 .insns = {
8086 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8087 offsetof(struct __sk_buff, data_end)),
8088 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8089 offsetof(struct __sk_buff, data)),
8090 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
8091 BPF_EXIT_INSN(),
8092 },
8093 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08008094 .retval = TEST_DATA_LEN,
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008095 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8096 },
8097 {
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008098 "XDP pkt read, pkt_end mangling, bad access 1",
8099 .insns = {
8100 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8101 offsetof(struct xdp_md, data)),
8102 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8103 offsetof(struct xdp_md, data_end)),
8104 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8105 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8106 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
8107 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8108 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8109 BPF_MOV64_IMM(BPF_REG_0, 0),
8110 BPF_EXIT_INSN(),
8111 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008112 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008113 .result = REJECT,
8114 .prog_type = BPF_PROG_TYPE_XDP,
8115 },
8116 {
8117 "XDP pkt read, pkt_end mangling, bad access 2",
8118 .insns = {
8119 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8120 offsetof(struct xdp_md, data)),
8121 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8122 offsetof(struct xdp_md, data_end)),
8123 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8124 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8125 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
8126 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8127 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8128 BPF_MOV64_IMM(BPF_REG_0, 0),
8129 BPF_EXIT_INSN(),
8130 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008131 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008132 .result = REJECT,
8133 .prog_type = BPF_PROG_TYPE_XDP,
8134 },
8135 {
8136 "XDP pkt read, pkt_data' > pkt_end, good access",
8137 .insns = {
8138 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8139 offsetof(struct xdp_md, data)),
8140 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8141 offsetof(struct xdp_md, data_end)),
8142 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8143 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8144 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8145 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8146 BPF_MOV64_IMM(BPF_REG_0, 0),
8147 BPF_EXIT_INSN(),
8148 },
8149 .result = ACCEPT,
8150 .prog_type = BPF_PROG_TYPE_XDP,
8151 },
8152 {
8153 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
8154 .insns = {
8155 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8156 offsetof(struct xdp_md, data)),
8157 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8158 offsetof(struct xdp_md, data_end)),
8159 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8160 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8161 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8162 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8163 BPF_MOV64_IMM(BPF_REG_0, 0),
8164 BPF_EXIT_INSN(),
8165 },
8166 .errstr = "R1 offset is outside of the packet",
8167 .result = REJECT,
8168 .prog_type = BPF_PROG_TYPE_XDP,
8169 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8170 },
8171 {
8172 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
8173 .insns = {
8174 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8175 offsetof(struct xdp_md, data)),
8176 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8177 offsetof(struct xdp_md, data_end)),
8178 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8179 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8180 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
8181 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8182 BPF_MOV64_IMM(BPF_REG_0, 0),
8183 BPF_EXIT_INSN(),
8184 },
8185 .errstr = "R1 offset is outside of the packet",
8186 .result = REJECT,
8187 .prog_type = BPF_PROG_TYPE_XDP,
8188 },
8189 {
8190 "XDP pkt read, pkt_end > pkt_data', good access",
8191 .insns = {
8192 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8193 offsetof(struct xdp_md, data)),
8194 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8195 offsetof(struct xdp_md, data_end)),
8196 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8197 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8198 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8199 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8200 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8201 BPF_MOV64_IMM(BPF_REG_0, 0),
8202 BPF_EXIT_INSN(),
8203 },
8204 .result = ACCEPT,
8205 .prog_type = BPF_PROG_TYPE_XDP,
8206 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8207 },
8208 {
8209 "XDP pkt read, pkt_end > pkt_data', bad access 1",
8210 .insns = {
8211 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8212 offsetof(struct xdp_md, data)),
8213 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8214 offsetof(struct xdp_md, data_end)),
8215 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8216 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8217 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8218 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8219 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8220 BPF_MOV64_IMM(BPF_REG_0, 0),
8221 BPF_EXIT_INSN(),
8222 },
8223 .errstr = "R1 offset is outside of the packet",
8224 .result = REJECT,
8225 .prog_type = BPF_PROG_TYPE_XDP,
8226 },
8227 {
8228 "XDP pkt read, pkt_end > pkt_data', bad access 2",
8229 .insns = {
8230 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8231 offsetof(struct xdp_md, data)),
8232 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8233 offsetof(struct xdp_md, data_end)),
8234 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8235 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8236 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8237 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8238 BPF_MOV64_IMM(BPF_REG_0, 0),
8239 BPF_EXIT_INSN(),
8240 },
8241 .errstr = "R1 offset is outside of the packet",
8242 .result = REJECT,
8243 .prog_type = BPF_PROG_TYPE_XDP,
8244 },
8245 {
8246 "XDP pkt read, pkt_data' < pkt_end, good access",
8247 .insns = {
8248 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8249 offsetof(struct xdp_md, data)),
8250 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8251 offsetof(struct xdp_md, data_end)),
8252 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8253 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8254 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8255 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8256 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8257 BPF_MOV64_IMM(BPF_REG_0, 0),
8258 BPF_EXIT_INSN(),
8259 },
8260 .result = ACCEPT,
8261 .prog_type = BPF_PROG_TYPE_XDP,
8262 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8263 },
8264 {
8265 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
8266 .insns = {
8267 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8268 offsetof(struct xdp_md, data)),
8269 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8270 offsetof(struct xdp_md, data_end)),
8271 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8273 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8274 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8275 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8276 BPF_MOV64_IMM(BPF_REG_0, 0),
8277 BPF_EXIT_INSN(),
8278 },
8279 .errstr = "R1 offset is outside of the packet",
8280 .result = REJECT,
8281 .prog_type = BPF_PROG_TYPE_XDP,
8282 },
8283 {
8284 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
8285 .insns = {
8286 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8287 offsetof(struct xdp_md, data)),
8288 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8289 offsetof(struct xdp_md, data_end)),
8290 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8291 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8292 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8293 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8294 BPF_MOV64_IMM(BPF_REG_0, 0),
8295 BPF_EXIT_INSN(),
8296 },
8297 .errstr = "R1 offset is outside of the packet",
8298 .result = REJECT,
8299 .prog_type = BPF_PROG_TYPE_XDP,
8300 },
8301 {
8302 "XDP pkt read, pkt_end < pkt_data', good access",
8303 .insns = {
8304 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8305 offsetof(struct xdp_md, data)),
8306 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8307 offsetof(struct xdp_md, data_end)),
8308 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8309 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8310 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8311 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8312 BPF_MOV64_IMM(BPF_REG_0, 0),
8313 BPF_EXIT_INSN(),
8314 },
8315 .result = ACCEPT,
8316 .prog_type = BPF_PROG_TYPE_XDP,
8317 },
8318 {
8319 "XDP pkt read, pkt_end < pkt_data', bad access 1",
8320 .insns = {
8321 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8322 offsetof(struct xdp_md, data)),
8323 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8324 offsetof(struct xdp_md, data_end)),
8325 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8326 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8327 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8328 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8329 BPF_MOV64_IMM(BPF_REG_0, 0),
8330 BPF_EXIT_INSN(),
8331 },
8332 .errstr = "R1 offset is outside of the packet",
8333 .result = REJECT,
8334 .prog_type = BPF_PROG_TYPE_XDP,
8335 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8336 },
8337 {
8338 "XDP pkt read, pkt_end < pkt_data', bad access 2",
8339 .insns = {
8340 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8341 offsetof(struct xdp_md, data)),
8342 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8343 offsetof(struct xdp_md, data_end)),
8344 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8345 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8346 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
8347 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8348 BPF_MOV64_IMM(BPF_REG_0, 0),
8349 BPF_EXIT_INSN(),
8350 },
8351 .errstr = "R1 offset is outside of the packet",
8352 .result = REJECT,
8353 .prog_type = BPF_PROG_TYPE_XDP,
8354 },
8355 {
8356 "XDP pkt read, pkt_data' >= pkt_end, good access",
8357 .insns = {
8358 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8359 offsetof(struct xdp_md, data)),
8360 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8361 offsetof(struct xdp_md, data_end)),
8362 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8363 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8364 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8365 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8366 BPF_MOV64_IMM(BPF_REG_0, 0),
8367 BPF_EXIT_INSN(),
8368 },
8369 .result = ACCEPT,
8370 .prog_type = BPF_PROG_TYPE_XDP,
8371 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8372 },
8373 {
8374 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
8375 .insns = {
8376 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8377 offsetof(struct xdp_md, data)),
8378 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8379 offsetof(struct xdp_md, data_end)),
8380 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8381 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8382 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8383 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8384 BPF_MOV64_IMM(BPF_REG_0, 0),
8385 BPF_EXIT_INSN(),
8386 },
8387 .errstr = "R1 offset is outside of the packet",
8388 .result = REJECT,
8389 .prog_type = BPF_PROG_TYPE_XDP,
8390 },
8391 {
8392 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
8393 .insns = {
8394 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8395 offsetof(struct xdp_md, data)),
8396 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8397 offsetof(struct xdp_md, data_end)),
8398 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8399 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8400 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
8401 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8402 BPF_MOV64_IMM(BPF_REG_0, 0),
8403 BPF_EXIT_INSN(),
8404 },
8405 .errstr = "R1 offset is outside of the packet",
8406 .result = REJECT,
8407 .prog_type = BPF_PROG_TYPE_XDP,
8408 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8409 },
8410 {
8411 "XDP pkt read, pkt_end >= pkt_data', good access",
8412 .insns = {
8413 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8414 offsetof(struct xdp_md, data)),
8415 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8416 offsetof(struct xdp_md, data_end)),
8417 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8418 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8419 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8420 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8421 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8422 BPF_MOV64_IMM(BPF_REG_0, 0),
8423 BPF_EXIT_INSN(),
8424 },
8425 .result = ACCEPT,
8426 .prog_type = BPF_PROG_TYPE_XDP,
8427 },
8428 {
8429 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
8430 .insns = {
8431 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8432 offsetof(struct xdp_md, data)),
8433 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8434 offsetof(struct xdp_md, data_end)),
8435 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8436 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8437 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8438 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8439 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8440 BPF_MOV64_IMM(BPF_REG_0, 0),
8441 BPF_EXIT_INSN(),
8442 },
8443 .errstr = "R1 offset is outside of the packet",
8444 .result = REJECT,
8445 .prog_type = BPF_PROG_TYPE_XDP,
8446 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8447 },
8448 {
8449 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
8450 .insns = {
8451 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8452 offsetof(struct xdp_md, data)),
8453 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8454 offsetof(struct xdp_md, data_end)),
8455 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8457 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8458 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8459 BPF_MOV64_IMM(BPF_REG_0, 0),
8460 BPF_EXIT_INSN(),
8461 },
8462 .errstr = "R1 offset is outside of the packet",
8463 .result = REJECT,
8464 .prog_type = BPF_PROG_TYPE_XDP,
8465 },
8466 {
8467 "XDP pkt read, pkt_data' <= pkt_end, good access",
8468 .insns = {
8469 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8470 offsetof(struct xdp_md, data)),
8471 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8472 offsetof(struct xdp_md, data_end)),
8473 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8474 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8475 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8476 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8477 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8478 BPF_MOV64_IMM(BPF_REG_0, 0),
8479 BPF_EXIT_INSN(),
8480 },
8481 .result = ACCEPT,
8482 .prog_type = BPF_PROG_TYPE_XDP,
8483 },
8484 {
8485 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
8486 .insns = {
8487 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8488 offsetof(struct xdp_md, data)),
8489 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8490 offsetof(struct xdp_md, data_end)),
8491 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8492 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8493 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8494 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8495 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8496 BPF_MOV64_IMM(BPF_REG_0, 0),
8497 BPF_EXIT_INSN(),
8498 },
8499 .errstr = "R1 offset is outside of the packet",
8500 .result = REJECT,
8501 .prog_type = BPF_PROG_TYPE_XDP,
8502 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8503 },
8504 {
8505 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
8506 .insns = {
8507 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8508 offsetof(struct xdp_md, data)),
8509 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8510 offsetof(struct xdp_md, data_end)),
8511 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8512 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8513 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8514 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8515 BPF_MOV64_IMM(BPF_REG_0, 0),
8516 BPF_EXIT_INSN(),
8517 },
8518 .errstr = "R1 offset is outside of the packet",
8519 .result = REJECT,
8520 .prog_type = BPF_PROG_TYPE_XDP,
8521 },
8522 {
8523 "XDP pkt read, pkt_end <= pkt_data', good access",
8524 .insns = {
8525 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8526 offsetof(struct xdp_md, data)),
8527 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8528 offsetof(struct xdp_md, data_end)),
8529 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8530 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8531 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8532 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8533 BPF_MOV64_IMM(BPF_REG_0, 0),
8534 BPF_EXIT_INSN(),
8535 },
8536 .result = ACCEPT,
8537 .prog_type = BPF_PROG_TYPE_XDP,
8538 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8539 },
8540 {
8541 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
8542 .insns = {
8543 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8544 offsetof(struct xdp_md, data)),
8545 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8546 offsetof(struct xdp_md, data_end)),
8547 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8548 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8549 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8550 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8551 BPF_MOV64_IMM(BPF_REG_0, 0),
8552 BPF_EXIT_INSN(),
8553 },
8554 .errstr = "R1 offset is outside of the packet",
8555 .result = REJECT,
8556 .prog_type = BPF_PROG_TYPE_XDP,
8557 },
8558 {
8559 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
8560 .insns = {
8561 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8562 offsetof(struct xdp_md, data)),
8563 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8564 offsetof(struct xdp_md, data_end)),
8565 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8566 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8567 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
8568 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8569 BPF_MOV64_IMM(BPF_REG_0, 0),
8570 BPF_EXIT_INSN(),
8571 },
8572 .errstr = "R1 offset is outside of the packet",
8573 .result = REJECT,
8574 .prog_type = BPF_PROG_TYPE_XDP,
8575 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8576 },
Daniel Borkmannb06723d2017-11-01 23:58:09 +01008577 {
Daniel Borkmann634eab12017-11-01 23:58:11 +01008578 "XDP pkt read, pkt_meta' > pkt_data, good access",
8579 .insns = {
8580 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8581 offsetof(struct xdp_md, data_meta)),
8582 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8583 offsetof(struct xdp_md, data)),
8584 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8585 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8586 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8587 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8588 BPF_MOV64_IMM(BPF_REG_0, 0),
8589 BPF_EXIT_INSN(),
8590 },
8591 .result = ACCEPT,
8592 .prog_type = BPF_PROG_TYPE_XDP,
8593 },
8594 {
8595 "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
8596 .insns = {
8597 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8598 offsetof(struct xdp_md, data_meta)),
8599 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8600 offsetof(struct xdp_md, data)),
8601 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8602 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8603 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8604 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8605 BPF_MOV64_IMM(BPF_REG_0, 0),
8606 BPF_EXIT_INSN(),
8607 },
8608 .errstr = "R1 offset is outside of the packet",
8609 .result = REJECT,
8610 .prog_type = BPF_PROG_TYPE_XDP,
8611 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8612 },
8613 {
8614 "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
8615 .insns = {
8616 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8617 offsetof(struct xdp_md, data_meta)),
8618 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8619 offsetof(struct xdp_md, data)),
8620 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8621 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8622 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
8623 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8624 BPF_MOV64_IMM(BPF_REG_0, 0),
8625 BPF_EXIT_INSN(),
8626 },
8627 .errstr = "R1 offset is outside of the packet",
8628 .result = REJECT,
8629 .prog_type = BPF_PROG_TYPE_XDP,
8630 },
8631 {
8632 "XDP pkt read, pkt_data > pkt_meta', good access",
8633 .insns = {
8634 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8635 offsetof(struct xdp_md, data_meta)),
8636 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8637 offsetof(struct xdp_md, data)),
8638 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8639 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8640 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8641 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8642 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8643 BPF_MOV64_IMM(BPF_REG_0, 0),
8644 BPF_EXIT_INSN(),
8645 },
8646 .result = ACCEPT,
8647 .prog_type = BPF_PROG_TYPE_XDP,
8648 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8649 },
8650 {
8651 "XDP pkt read, pkt_data > pkt_meta', bad access 1",
8652 .insns = {
8653 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8654 offsetof(struct xdp_md, data_meta)),
8655 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8656 offsetof(struct xdp_md, data)),
8657 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8658 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8659 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8660 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8661 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8662 BPF_MOV64_IMM(BPF_REG_0, 0),
8663 BPF_EXIT_INSN(),
8664 },
8665 .errstr = "R1 offset is outside of the packet",
8666 .result = REJECT,
8667 .prog_type = BPF_PROG_TYPE_XDP,
8668 },
8669 {
8670 "XDP pkt read, pkt_data > pkt_meta', bad access 2",
8671 .insns = {
8672 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8673 offsetof(struct xdp_md, data_meta)),
8674 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8675 offsetof(struct xdp_md, data)),
8676 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8677 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8678 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8679 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8680 BPF_MOV64_IMM(BPF_REG_0, 0),
8681 BPF_EXIT_INSN(),
8682 },
8683 .errstr = "R1 offset is outside of the packet",
8684 .result = REJECT,
8685 .prog_type = BPF_PROG_TYPE_XDP,
8686 },
8687 {
8688 "XDP pkt read, pkt_meta' < pkt_data, good access",
8689 .insns = {
8690 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8691 offsetof(struct xdp_md, data_meta)),
8692 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8693 offsetof(struct xdp_md, data)),
8694 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8695 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8696 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8697 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8698 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8699 BPF_MOV64_IMM(BPF_REG_0, 0),
8700 BPF_EXIT_INSN(),
8701 },
8702 .result = ACCEPT,
8703 .prog_type = BPF_PROG_TYPE_XDP,
8704 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8705 },
8706 {
8707 "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
8708 .insns = {
8709 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8710 offsetof(struct xdp_md, data_meta)),
8711 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8712 offsetof(struct xdp_md, data)),
8713 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8714 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8715 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8716 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8717 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8718 BPF_MOV64_IMM(BPF_REG_0, 0),
8719 BPF_EXIT_INSN(),
8720 },
8721 .errstr = "R1 offset is outside of the packet",
8722 .result = REJECT,
8723 .prog_type = BPF_PROG_TYPE_XDP,
8724 },
8725 {
8726 "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
8727 .insns = {
8728 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8729 offsetof(struct xdp_md, data_meta)),
8730 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8731 offsetof(struct xdp_md, data)),
8732 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8733 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8734 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8735 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8736 BPF_MOV64_IMM(BPF_REG_0, 0),
8737 BPF_EXIT_INSN(),
8738 },
8739 .errstr = "R1 offset is outside of the packet",
8740 .result = REJECT,
8741 .prog_type = BPF_PROG_TYPE_XDP,
8742 },
8743 {
8744 "XDP pkt read, pkt_data < pkt_meta', good access",
8745 .insns = {
8746 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8747 offsetof(struct xdp_md, data_meta)),
8748 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8749 offsetof(struct xdp_md, data)),
8750 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8751 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8752 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8753 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8754 BPF_MOV64_IMM(BPF_REG_0, 0),
8755 BPF_EXIT_INSN(),
8756 },
8757 .result = ACCEPT,
8758 .prog_type = BPF_PROG_TYPE_XDP,
8759 },
8760 {
8761 "XDP pkt read, pkt_data < pkt_meta', bad access 1",
8762 .insns = {
8763 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8764 offsetof(struct xdp_md, data_meta)),
8765 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8766 offsetof(struct xdp_md, data)),
8767 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8768 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8769 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8770 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8771 BPF_MOV64_IMM(BPF_REG_0, 0),
8772 BPF_EXIT_INSN(),
8773 },
8774 .errstr = "R1 offset is outside of the packet",
8775 .result = REJECT,
8776 .prog_type = BPF_PROG_TYPE_XDP,
8777 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8778 },
8779 {
8780 "XDP pkt read, pkt_data < pkt_meta', bad access 2",
8781 .insns = {
8782 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8783 offsetof(struct xdp_md, data_meta)),
8784 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8785 offsetof(struct xdp_md, data)),
8786 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8787 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8788 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
8789 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8790 BPF_MOV64_IMM(BPF_REG_0, 0),
8791 BPF_EXIT_INSN(),
8792 },
8793 .errstr = "R1 offset is outside of the packet",
8794 .result = REJECT,
8795 .prog_type = BPF_PROG_TYPE_XDP,
8796 },
8797 {
8798 "XDP pkt read, pkt_meta' >= pkt_data, good access",
8799 .insns = {
8800 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8801 offsetof(struct xdp_md, data_meta)),
8802 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8803 offsetof(struct xdp_md, data)),
8804 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8805 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8806 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8807 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8808 BPF_MOV64_IMM(BPF_REG_0, 0),
8809 BPF_EXIT_INSN(),
8810 },
8811 .result = ACCEPT,
8812 .prog_type = BPF_PROG_TYPE_XDP,
8813 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8814 },
8815 {
8816 "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
8817 .insns = {
8818 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8819 offsetof(struct xdp_md, data_meta)),
8820 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8821 offsetof(struct xdp_md, data)),
8822 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8823 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8824 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8825 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8826 BPF_MOV64_IMM(BPF_REG_0, 0),
8827 BPF_EXIT_INSN(),
8828 },
8829 .errstr = "R1 offset is outside of the packet",
8830 .result = REJECT,
8831 .prog_type = BPF_PROG_TYPE_XDP,
8832 },
8833 {
8834 "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
8835 .insns = {
8836 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8837 offsetof(struct xdp_md, data_meta)),
8838 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8839 offsetof(struct xdp_md, data)),
8840 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8841 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8842 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
8843 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8844 BPF_MOV64_IMM(BPF_REG_0, 0),
8845 BPF_EXIT_INSN(),
8846 },
8847 .errstr = "R1 offset is outside of the packet",
8848 .result = REJECT,
8849 .prog_type = BPF_PROG_TYPE_XDP,
8850 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8851 },
8852 {
8853 "XDP pkt read, pkt_data >= pkt_meta', good access",
8854 .insns = {
8855 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8856 offsetof(struct xdp_md, data_meta)),
8857 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8858 offsetof(struct xdp_md, data)),
8859 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8860 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8861 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8862 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8863 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8864 BPF_MOV64_IMM(BPF_REG_0, 0),
8865 BPF_EXIT_INSN(),
8866 },
8867 .result = ACCEPT,
8868 .prog_type = BPF_PROG_TYPE_XDP,
8869 },
8870 {
8871 "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
8872 .insns = {
8873 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8874 offsetof(struct xdp_md, data_meta)),
8875 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8876 offsetof(struct xdp_md, data)),
8877 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8878 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8879 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8880 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8881 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8882 BPF_MOV64_IMM(BPF_REG_0, 0),
8883 BPF_EXIT_INSN(),
8884 },
8885 .errstr = "R1 offset is outside of the packet",
8886 .result = REJECT,
8887 .prog_type = BPF_PROG_TYPE_XDP,
8888 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8889 },
8890 {
8891 "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
8892 .insns = {
8893 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8894 offsetof(struct xdp_md, data_meta)),
8895 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8896 offsetof(struct xdp_md, data)),
8897 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8899 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8900 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8901 BPF_MOV64_IMM(BPF_REG_0, 0),
8902 BPF_EXIT_INSN(),
8903 },
8904 .errstr = "R1 offset is outside of the packet",
8905 .result = REJECT,
8906 .prog_type = BPF_PROG_TYPE_XDP,
8907 },
8908 {
8909 "XDP pkt read, pkt_meta' <= pkt_data, good access",
8910 .insns = {
8911 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8912 offsetof(struct xdp_md, data_meta)),
8913 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8914 offsetof(struct xdp_md, data)),
8915 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8916 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8917 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8918 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8919 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8920 BPF_MOV64_IMM(BPF_REG_0, 0),
8921 BPF_EXIT_INSN(),
8922 },
8923 .result = ACCEPT,
8924 .prog_type = BPF_PROG_TYPE_XDP,
8925 },
8926 {
8927 "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
8928 .insns = {
8929 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8930 offsetof(struct xdp_md, data_meta)),
8931 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8932 offsetof(struct xdp_md, data)),
8933 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8934 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8935 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8936 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8937 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8938 BPF_MOV64_IMM(BPF_REG_0, 0),
8939 BPF_EXIT_INSN(),
8940 },
8941 .errstr = "R1 offset is outside of the packet",
8942 .result = REJECT,
8943 .prog_type = BPF_PROG_TYPE_XDP,
8944 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8945 },
8946 {
8947 "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
8948 .insns = {
8949 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8950 offsetof(struct xdp_md, data_meta)),
8951 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8952 offsetof(struct xdp_md, data)),
8953 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8954 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8955 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8956 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8957 BPF_MOV64_IMM(BPF_REG_0, 0),
8958 BPF_EXIT_INSN(),
8959 },
8960 .errstr = "R1 offset is outside of the packet",
8961 .result = REJECT,
8962 .prog_type = BPF_PROG_TYPE_XDP,
8963 },
8964 {
8965 "XDP pkt read, pkt_data <= pkt_meta', good access",
8966 .insns = {
8967 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8968 offsetof(struct xdp_md, data_meta)),
8969 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8970 offsetof(struct xdp_md, data)),
8971 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8972 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8973 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8974 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8975 BPF_MOV64_IMM(BPF_REG_0, 0),
8976 BPF_EXIT_INSN(),
8977 },
8978 .result = ACCEPT,
8979 .prog_type = BPF_PROG_TYPE_XDP,
8980 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8981 },
8982 {
8983 "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
8984 .insns = {
8985 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8986 offsetof(struct xdp_md, data_meta)),
8987 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8988 offsetof(struct xdp_md, data)),
8989 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8990 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8991 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8992 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8993 BPF_MOV64_IMM(BPF_REG_0, 0),
8994 BPF_EXIT_INSN(),
8995 },
8996 .errstr = "R1 offset is outside of the packet",
8997 .result = REJECT,
8998 .prog_type = BPF_PROG_TYPE_XDP,
8999 },
9000 {
9001 "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
9002 .insns = {
9003 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9004 offsetof(struct xdp_md, data_meta)),
9005 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9006 offsetof(struct xdp_md, data)),
9007 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9008 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9009 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
9010 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9011 BPF_MOV64_IMM(BPF_REG_0, 0),
9012 BPF_EXIT_INSN(),
9013 },
9014 .errstr = "R1 offset is outside of the packet",
9015 .result = REJECT,
9016 .prog_type = BPF_PROG_TYPE_XDP,
9017 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9018 },
9019 {
Daniel Borkmann6f161012018-01-18 01:15:21 +01009020 "check deducing bounds from const, 1",
9021 .insns = {
9022 BPF_MOV64_IMM(BPF_REG_0, 1),
9023 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
9024 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9025 BPF_EXIT_INSN(),
9026 },
9027 .result = REJECT,
9028 .errstr = "R0 tried to subtract pointer from scalar",
9029 },
9030 {
9031 "check deducing bounds from const, 2",
9032 .insns = {
9033 BPF_MOV64_IMM(BPF_REG_0, 1),
9034 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
9035 BPF_EXIT_INSN(),
9036 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
9037 BPF_EXIT_INSN(),
9038 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9039 BPF_EXIT_INSN(),
9040 },
9041 .result = ACCEPT,
Yonghong Song35136922018-01-22 22:10:59 -08009042 .retval = 1,
Daniel Borkmann6f161012018-01-18 01:15:21 +01009043 },
9044 {
9045 "check deducing bounds from const, 3",
9046 .insns = {
9047 BPF_MOV64_IMM(BPF_REG_0, 0),
9048 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
9049 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9050 BPF_EXIT_INSN(),
9051 },
9052 .result = REJECT,
9053 .errstr = "R0 tried to subtract pointer from scalar",
9054 },
9055 {
9056 "check deducing bounds from const, 4",
9057 .insns = {
9058 BPF_MOV64_IMM(BPF_REG_0, 0),
9059 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
9060 BPF_EXIT_INSN(),
9061 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9062 BPF_EXIT_INSN(),
9063 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9064 BPF_EXIT_INSN(),
9065 },
9066 .result = ACCEPT,
9067 },
9068 {
9069 "check deducing bounds from const, 5",
9070 .insns = {
9071 BPF_MOV64_IMM(BPF_REG_0, 0),
9072 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9073 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9074 BPF_EXIT_INSN(),
9075 },
9076 .result = REJECT,
9077 .errstr = "R0 tried to subtract pointer from scalar",
9078 },
9079 {
9080 "check deducing bounds from const, 6",
9081 .insns = {
9082 BPF_MOV64_IMM(BPF_REG_0, 0),
9083 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9084 BPF_EXIT_INSN(),
9085 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9086 BPF_EXIT_INSN(),
9087 },
9088 .result = REJECT,
9089 .errstr = "R0 tried to subtract pointer from scalar",
9090 },
9091 {
9092 "check deducing bounds from const, 7",
9093 .insns = {
9094 BPF_MOV64_IMM(BPF_REG_0, ~0),
9095 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
9096 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9097 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9098 offsetof(struct __sk_buff, mark)),
9099 BPF_EXIT_INSN(),
9100 },
9101 .result = REJECT,
9102 .errstr = "dereference of modified ctx ptr",
9103 },
9104 {
9105 "check deducing bounds from const, 8",
9106 .insns = {
9107 BPF_MOV64_IMM(BPF_REG_0, ~0),
9108 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9109 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
9110 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9111 offsetof(struct __sk_buff, mark)),
9112 BPF_EXIT_INSN(),
9113 },
9114 .result = REJECT,
9115 .errstr = "dereference of modified ctx ptr",
9116 },
9117 {
9118 "check deducing bounds from const, 9",
9119 .insns = {
9120 BPF_MOV64_IMM(BPF_REG_0, 0),
9121 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
9122 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9123 BPF_EXIT_INSN(),
9124 },
9125 .result = REJECT,
9126 .errstr = "R0 tried to subtract pointer from scalar",
9127 },
9128 {
9129 "check deducing bounds from const, 10",
9130 .insns = {
9131 BPF_MOV64_IMM(BPF_REG_0, 0),
9132 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
9133 /* Marks reg as unknown. */
9134 BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
9135 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9136 BPF_EXIT_INSN(),
9137 },
9138 .result = REJECT,
9139 .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
9140 },
9141 {
Daniel Borkmannb06723d2017-11-01 23:58:09 +01009142 "bpf_exit with invalid return code. test1",
9143 .insns = {
9144 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9145 BPF_EXIT_INSN(),
9146 },
9147 .errstr = "R0 has value (0x0; 0xffffffff)",
9148 .result = REJECT,
9149 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9150 },
9151 {
9152 "bpf_exit with invalid return code. test2",
9153 .insns = {
9154 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9155 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
9156 BPF_EXIT_INSN(),
9157 },
9158 .result = ACCEPT,
9159 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9160 },
9161 {
9162 "bpf_exit with invalid return code. test3",
9163 .insns = {
9164 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9165 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
9166 BPF_EXIT_INSN(),
9167 },
9168 .errstr = "R0 has value (0x0; 0x3)",
9169 .result = REJECT,
9170 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9171 },
9172 {
9173 "bpf_exit with invalid return code. test4",
9174 .insns = {
9175 BPF_MOV64_IMM(BPF_REG_0, 1),
9176 BPF_EXIT_INSN(),
9177 },
9178 .result = ACCEPT,
9179 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9180 },
9181 {
9182 "bpf_exit with invalid return code. test5",
9183 .insns = {
9184 BPF_MOV64_IMM(BPF_REG_0, 2),
9185 BPF_EXIT_INSN(),
9186 },
9187 .errstr = "R0 has value (0x2; 0x0)",
9188 .result = REJECT,
9189 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9190 },
9191 {
9192 "bpf_exit with invalid return code. test6",
9193 .insns = {
9194 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9195 BPF_EXIT_INSN(),
9196 },
9197 .errstr = "R0 is not a known value (ctx)",
9198 .result = REJECT,
9199 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9200 },
9201 {
9202 "bpf_exit with invalid return code. test7",
9203 .insns = {
9204 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9205 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
9206 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
9207 BPF_EXIT_INSN(),
9208 },
9209 .errstr = "R0 has unknown scalar value",
9210 .result = REJECT,
9211 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9212 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009213 {
9214 "calls: basic sanity",
9215 .insns = {
9216 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9217 BPF_MOV64_IMM(BPF_REG_0, 1),
9218 BPF_EXIT_INSN(),
9219 BPF_MOV64_IMM(BPF_REG_0, 2),
9220 BPF_EXIT_INSN(),
9221 },
9222 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9223 .result = ACCEPT,
9224 },
9225 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009226 "calls: not on unpriviledged",
9227 .insns = {
9228 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9229 BPF_MOV64_IMM(BPF_REG_0, 1),
9230 BPF_EXIT_INSN(),
9231 BPF_MOV64_IMM(BPF_REG_0, 2),
9232 BPF_EXIT_INSN(),
9233 },
9234 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
9235 .result_unpriv = REJECT,
9236 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009237 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009238 },
9239 {
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01009240 "calls: div by 0 in subprog",
9241 .insns = {
9242 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9243 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9244 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9245 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
9246 offsetof(struct __sk_buff, data_end)),
9247 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
9248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
9249 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
9250 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9251 BPF_MOV64_IMM(BPF_REG_0, 1),
9252 BPF_EXIT_INSN(),
9253 BPF_MOV32_IMM(BPF_REG_2, 0),
9254 BPF_MOV32_IMM(BPF_REG_3, 1),
9255 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
9256 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9257 offsetof(struct __sk_buff, data)),
9258 BPF_EXIT_INSN(),
9259 },
9260 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9261 .result = ACCEPT,
9262 .retval = 1,
9263 },
9264 {
9265 "calls: multiple ret types in subprog 1",
9266 .insns = {
9267 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9268 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9269 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9270 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
9271 offsetof(struct __sk_buff, data_end)),
9272 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
9273 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
9274 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
9275 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9276 BPF_MOV64_IMM(BPF_REG_0, 1),
9277 BPF_EXIT_INSN(),
9278 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9279 offsetof(struct __sk_buff, data)),
9280 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9281 BPF_MOV32_IMM(BPF_REG_0, 42),
9282 BPF_EXIT_INSN(),
9283 },
9284 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9285 .result = REJECT,
9286 .errstr = "R0 invalid mem access 'inv'",
9287 },
9288 {
9289 "calls: multiple ret types in subprog 2",
9290 .insns = {
9291 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9292 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9293 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9294 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
9295 offsetof(struct __sk_buff, data_end)),
9296 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
9297 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
9298 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
9299 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9300 BPF_MOV64_IMM(BPF_REG_0, 1),
9301 BPF_EXIT_INSN(),
9302 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9303 offsetof(struct __sk_buff, data)),
9304 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9305 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
9306 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9307 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9308 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9309 BPF_LD_MAP_FD(BPF_REG_1, 0),
9310 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9311 BPF_FUNC_map_lookup_elem),
9312 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9313 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
9314 offsetof(struct __sk_buff, data)),
9315 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
9316 BPF_EXIT_INSN(),
9317 },
9318 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9319 .fixup_map1 = { 16 },
9320 .result = REJECT,
9321 .errstr = "R0 min value is outside of the array range",
9322 },
9323 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009324 "calls: overlapping caller/callee",
9325 .insns = {
9326 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
9327 BPF_MOV64_IMM(BPF_REG_0, 1),
9328 BPF_EXIT_INSN(),
9329 },
9330 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9331 .errstr = "last insn is not an exit or jmp",
9332 .result = REJECT,
9333 },
9334 {
9335 "calls: wrong recursive calls",
9336 .insns = {
9337 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
9338 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
9339 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
9340 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
9341 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
9342 BPF_MOV64_IMM(BPF_REG_0, 1),
9343 BPF_EXIT_INSN(),
9344 },
9345 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9346 .errstr = "jump out of range",
9347 .result = REJECT,
9348 },
9349 {
9350 "calls: wrong src reg",
9351 .insns = {
9352 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
9353 BPF_MOV64_IMM(BPF_REG_0, 1),
9354 BPF_EXIT_INSN(),
9355 },
9356 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9357 .errstr = "BPF_CALL uses reserved fields",
9358 .result = REJECT,
9359 },
9360 {
9361 "calls: wrong off value",
9362 .insns = {
9363 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
9364 BPF_MOV64_IMM(BPF_REG_0, 1),
9365 BPF_EXIT_INSN(),
9366 BPF_MOV64_IMM(BPF_REG_0, 2),
9367 BPF_EXIT_INSN(),
9368 },
9369 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9370 .errstr = "BPF_CALL uses reserved fields",
9371 .result = REJECT,
9372 },
9373 {
9374 "calls: jump back loop",
9375 .insns = {
9376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
9377 BPF_MOV64_IMM(BPF_REG_0, 1),
9378 BPF_EXIT_INSN(),
9379 },
9380 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9381 .errstr = "back-edge from insn 0 to 0",
9382 .result = REJECT,
9383 },
9384 {
9385 "calls: conditional call",
9386 .insns = {
9387 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9388 offsetof(struct __sk_buff, mark)),
9389 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9390 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9391 BPF_MOV64_IMM(BPF_REG_0, 1),
9392 BPF_EXIT_INSN(),
9393 BPF_MOV64_IMM(BPF_REG_0, 2),
9394 BPF_EXIT_INSN(),
9395 },
9396 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9397 .errstr = "jump out of range",
9398 .result = REJECT,
9399 },
9400 {
9401 "calls: conditional call 2",
9402 .insns = {
9403 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9404 offsetof(struct __sk_buff, mark)),
9405 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9406 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9407 BPF_MOV64_IMM(BPF_REG_0, 1),
9408 BPF_EXIT_INSN(),
9409 BPF_MOV64_IMM(BPF_REG_0, 2),
9410 BPF_EXIT_INSN(),
9411 BPF_MOV64_IMM(BPF_REG_0, 3),
9412 BPF_EXIT_INSN(),
9413 },
9414 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9415 .result = ACCEPT,
9416 },
9417 {
9418 "calls: conditional call 3",
9419 .insns = {
9420 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9421 offsetof(struct __sk_buff, mark)),
9422 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9423 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
9424 BPF_MOV64_IMM(BPF_REG_0, 1),
9425 BPF_EXIT_INSN(),
9426 BPF_MOV64_IMM(BPF_REG_0, 1),
9427 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
9428 BPF_MOV64_IMM(BPF_REG_0, 3),
9429 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
9430 },
9431 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9432 .errstr = "back-edge from insn",
9433 .result = REJECT,
9434 },
9435 {
9436 "calls: conditional call 4",
9437 .insns = {
9438 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9439 offsetof(struct __sk_buff, mark)),
9440 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9441 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9442 BPF_MOV64_IMM(BPF_REG_0, 1),
9443 BPF_EXIT_INSN(),
9444 BPF_MOV64_IMM(BPF_REG_0, 1),
9445 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
9446 BPF_MOV64_IMM(BPF_REG_0, 3),
9447 BPF_EXIT_INSN(),
9448 },
9449 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9450 .result = ACCEPT,
9451 },
9452 {
9453 "calls: conditional call 5",
9454 .insns = {
9455 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9456 offsetof(struct __sk_buff, mark)),
9457 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9458 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9459 BPF_MOV64_IMM(BPF_REG_0, 1),
9460 BPF_EXIT_INSN(),
9461 BPF_MOV64_IMM(BPF_REG_0, 1),
9462 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
9463 BPF_MOV64_IMM(BPF_REG_0, 3),
9464 BPF_EXIT_INSN(),
9465 },
9466 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9467 .errstr = "back-edge from insn",
9468 .result = REJECT,
9469 },
9470 {
9471 "calls: conditional call 6",
9472 .insns = {
9473 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9474 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
9475 BPF_EXIT_INSN(),
9476 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9477 offsetof(struct __sk_buff, mark)),
9478 BPF_EXIT_INSN(),
9479 },
9480 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9481 .errstr = "back-edge from insn",
9482 .result = REJECT,
9483 },
9484 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009485 "calls: using r0 returned by callee",
9486 .insns = {
9487 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9488 BPF_EXIT_INSN(),
9489 BPF_MOV64_IMM(BPF_REG_0, 2),
9490 BPF_EXIT_INSN(),
9491 },
9492 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9493 .result = ACCEPT,
9494 },
9495 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009496 "calls: using uninit r0 from callee",
9497 .insns = {
9498 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9499 BPF_EXIT_INSN(),
9500 BPF_EXIT_INSN(),
9501 },
9502 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9503 .errstr = "!read_ok",
9504 .result = REJECT,
9505 },
9506 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009507 "calls: callee is using r1",
9508 .insns = {
9509 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9510 BPF_EXIT_INSN(),
9511 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9512 offsetof(struct __sk_buff, len)),
9513 BPF_EXIT_INSN(),
9514 },
9515 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
9516 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009517 .retval = TEST_DATA_LEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009518 },
9519 {
9520 "calls: callee using args1",
9521 .insns = {
9522 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9523 BPF_EXIT_INSN(),
9524 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9525 BPF_EXIT_INSN(),
9526 },
9527 .errstr_unpriv = "allowed for root only",
9528 .result_unpriv = REJECT,
9529 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009530 .retval = POINTER_VALUE,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009531 },
9532 {
9533 "calls: callee using wrong args2",
9534 .insns = {
9535 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9536 BPF_EXIT_INSN(),
9537 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9538 BPF_EXIT_INSN(),
9539 },
9540 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9541 .errstr = "R2 !read_ok",
9542 .result = REJECT,
9543 },
9544 {
9545 "calls: callee using two args",
9546 .insns = {
9547 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9548 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
9549 offsetof(struct __sk_buff, len)),
9550 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
9551 offsetof(struct __sk_buff, len)),
9552 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9553 BPF_EXIT_INSN(),
9554 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9555 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
9556 BPF_EXIT_INSN(),
9557 },
9558 .errstr_unpriv = "allowed for root only",
9559 .result_unpriv = REJECT,
9560 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009561 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009562 },
9563 {
9564 "calls: callee changing pkt pointers",
9565 .insns = {
9566 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
9567 offsetof(struct xdp_md, data)),
9568 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
9569 offsetof(struct xdp_md, data_end)),
9570 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
9571 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
9572 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
9573 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9574 /* clear_all_pkt_pointers() has to walk all frames
9575 * to make sure that pkt pointers in the caller
9576 * are cleared when callee is calling a helper that
9577 * adjusts packet size
9578 */
9579 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
9580 BPF_MOV32_IMM(BPF_REG_0, 0),
9581 BPF_EXIT_INSN(),
9582 BPF_MOV64_IMM(BPF_REG_2, 0),
9583 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9584 BPF_FUNC_xdp_adjust_head),
9585 BPF_EXIT_INSN(),
9586 },
9587 .result = REJECT,
9588 .errstr = "R6 invalid mem access 'inv'",
9589 .prog_type = BPF_PROG_TYPE_XDP,
9590 },
9591 {
9592 "calls: two calls with args",
9593 .insns = {
9594 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9595 BPF_EXIT_INSN(),
9596 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9597 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9598 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9599 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9600 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9601 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9602 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9603 BPF_EXIT_INSN(),
9604 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9605 offsetof(struct __sk_buff, len)),
9606 BPF_EXIT_INSN(),
9607 },
9608 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9609 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009610 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009611 },
9612 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009613 "calls: calls with stack arith",
9614 .insns = {
9615 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9617 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9618 BPF_EXIT_INSN(),
9619 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9620 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9621 BPF_EXIT_INSN(),
9622 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9623 BPF_MOV64_IMM(BPF_REG_0, 42),
9624 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
9625 BPF_EXIT_INSN(),
9626 },
9627 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9628 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009629 .retval = 42,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009630 },
9631 {
9632 "calls: calls with misaligned stack access",
9633 .insns = {
9634 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9635 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
9636 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9637 BPF_EXIT_INSN(),
9638 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
9639 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9640 BPF_EXIT_INSN(),
9641 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
9642 BPF_MOV64_IMM(BPF_REG_0, 42),
9643 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
9644 BPF_EXIT_INSN(),
9645 },
9646 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9647 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
9648 .errstr = "misaligned stack access",
9649 .result = REJECT,
9650 },
9651 {
9652 "calls: calls control flow, jump test",
9653 .insns = {
9654 BPF_MOV64_IMM(BPF_REG_0, 42),
9655 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9656 BPF_MOV64_IMM(BPF_REG_0, 43),
9657 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9658 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
9659 BPF_EXIT_INSN(),
9660 },
9661 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9662 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009663 .retval = 43,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009664 },
9665 {
9666 "calls: calls control flow, jump test 2",
9667 .insns = {
9668 BPF_MOV64_IMM(BPF_REG_0, 42),
9669 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9670 BPF_MOV64_IMM(BPF_REG_0, 43),
9671 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9672 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
9673 BPF_EXIT_INSN(),
9674 },
9675 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9676 .errstr = "jump out of range from insn 1 to 4",
9677 .result = REJECT,
9678 },
9679 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009680 "calls: two calls with bad jump",
9681 .insns = {
9682 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9683 BPF_EXIT_INSN(),
9684 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9685 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9686 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9687 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9688 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9689 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9690 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9691 BPF_EXIT_INSN(),
9692 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9693 offsetof(struct __sk_buff, len)),
9694 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
9695 BPF_EXIT_INSN(),
9696 },
9697 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9698 .errstr = "jump out of range from insn 11 to 9",
9699 .result = REJECT,
9700 },
9701 {
9702 "calls: recursive call. test1",
9703 .insns = {
9704 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9705 BPF_EXIT_INSN(),
9706 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
9707 BPF_EXIT_INSN(),
9708 },
9709 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9710 .errstr = "back-edge",
9711 .result = REJECT,
9712 },
9713 {
9714 "calls: recursive call. test2",
9715 .insns = {
9716 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9717 BPF_EXIT_INSN(),
9718 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
9719 BPF_EXIT_INSN(),
9720 },
9721 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9722 .errstr = "back-edge",
9723 .result = REJECT,
9724 },
9725 {
9726 "calls: unreachable code",
9727 .insns = {
9728 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9729 BPF_EXIT_INSN(),
9730 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9731 BPF_EXIT_INSN(),
9732 BPF_MOV64_IMM(BPF_REG_0, 0),
9733 BPF_EXIT_INSN(),
9734 BPF_MOV64_IMM(BPF_REG_0, 0),
9735 BPF_EXIT_INSN(),
9736 },
9737 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9738 .errstr = "unreachable insn 6",
9739 .result = REJECT,
9740 },
9741 {
9742 "calls: invalid call",
9743 .insns = {
9744 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9745 BPF_EXIT_INSN(),
9746 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
9747 BPF_EXIT_INSN(),
9748 },
9749 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9750 .errstr = "invalid destination",
9751 .result = REJECT,
9752 },
9753 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009754 "calls: invalid call 2",
9755 .insns = {
9756 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9757 BPF_EXIT_INSN(),
9758 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
9759 BPF_EXIT_INSN(),
9760 },
9761 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9762 .errstr = "invalid destination",
9763 .result = REJECT,
9764 },
9765 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009766 "calls: jumping across function bodies. test1",
9767 .insns = {
9768 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9769 BPF_MOV64_IMM(BPF_REG_0, 0),
9770 BPF_EXIT_INSN(),
9771 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
9772 BPF_EXIT_INSN(),
9773 },
9774 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9775 .errstr = "jump out of range",
9776 .result = REJECT,
9777 },
9778 {
9779 "calls: jumping across function bodies. test2",
9780 .insns = {
9781 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
9782 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9783 BPF_MOV64_IMM(BPF_REG_0, 0),
9784 BPF_EXIT_INSN(),
9785 BPF_EXIT_INSN(),
9786 },
9787 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9788 .errstr = "jump out of range",
9789 .result = REJECT,
9790 },
9791 {
9792 "calls: call without exit",
9793 .insns = {
9794 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9795 BPF_EXIT_INSN(),
9796 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9797 BPF_EXIT_INSN(),
9798 BPF_MOV64_IMM(BPF_REG_0, 0),
9799 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
9800 },
9801 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9802 .errstr = "not an exit",
9803 .result = REJECT,
9804 },
9805 {
9806 "calls: call into middle of ld_imm64",
9807 .insns = {
9808 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9809 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9810 BPF_MOV64_IMM(BPF_REG_0, 0),
9811 BPF_EXIT_INSN(),
9812 BPF_LD_IMM64(BPF_REG_0, 0),
9813 BPF_EXIT_INSN(),
9814 },
9815 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9816 .errstr = "last insn",
9817 .result = REJECT,
9818 },
9819 {
9820 "calls: call into middle of other call",
9821 .insns = {
9822 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9823 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9824 BPF_MOV64_IMM(BPF_REG_0, 0),
9825 BPF_EXIT_INSN(),
9826 BPF_MOV64_IMM(BPF_REG_0, 0),
9827 BPF_MOV64_IMM(BPF_REG_0, 0),
9828 BPF_EXIT_INSN(),
9829 },
9830 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9831 .errstr = "last insn",
9832 .result = REJECT,
9833 },
9834 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009835 "calls: ld_abs with changing ctx data in callee",
9836 .insns = {
9837 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9838 BPF_LD_ABS(BPF_B, 0),
9839 BPF_LD_ABS(BPF_H, 0),
9840 BPF_LD_ABS(BPF_W, 0),
9841 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
9842 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
9843 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
9844 BPF_LD_ABS(BPF_B, 0),
9845 BPF_LD_ABS(BPF_H, 0),
9846 BPF_LD_ABS(BPF_W, 0),
9847 BPF_EXIT_INSN(),
9848 BPF_MOV64_IMM(BPF_REG_2, 1),
9849 BPF_MOV64_IMM(BPF_REG_3, 2),
9850 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9851 BPF_FUNC_skb_vlan_push),
9852 BPF_EXIT_INSN(),
9853 },
9854 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9855 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
9856 .result = REJECT,
9857 },
9858 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009859 "calls: two calls with bad fallthrough",
9860 .insns = {
9861 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9862 BPF_EXIT_INSN(),
9863 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9864 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9865 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9866 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9867 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9868 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9869 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9870 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
9871 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9872 offsetof(struct __sk_buff, len)),
9873 BPF_EXIT_INSN(),
9874 },
9875 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9876 .errstr = "not an exit",
9877 .result = REJECT,
9878 },
9879 {
9880 "calls: two calls with stack read",
9881 .insns = {
9882 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9883 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9884 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9885 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9886 BPF_EXIT_INSN(),
9887 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9888 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9889 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9890 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9891 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9892 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9893 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9894 BPF_EXIT_INSN(),
9895 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9896 BPF_EXIT_INSN(),
9897 },
9898 .prog_type = BPF_PROG_TYPE_XDP,
9899 .result = ACCEPT,
9900 },
9901 {
9902 "calls: two calls with stack write",
9903 .insns = {
9904 /* main prog */
9905 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9906 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9907 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9908 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9909 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9910 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9911 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
9912 BPF_EXIT_INSN(),
9913
9914 /* subprog 1 */
9915 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9916 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9917 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
9918 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
9919 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9920 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9921 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
9922 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
9923 /* write into stack frame of main prog */
9924 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
9925 BPF_EXIT_INSN(),
9926
9927 /* subprog 2 */
9928 /* read from stack frame of main prog */
9929 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9930 BPF_EXIT_INSN(),
9931 },
9932 .prog_type = BPF_PROG_TYPE_XDP,
9933 .result = ACCEPT,
9934 },
9935 {
Jann Horn6b80ad22017-12-22 19:12:35 +01009936 "calls: stack overflow using two frames (pre-call access)",
9937 .insns = {
9938 /* prog 1 */
9939 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9940 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
9941 BPF_EXIT_INSN(),
9942
9943 /* prog 2 */
9944 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9945 BPF_MOV64_IMM(BPF_REG_0, 0),
9946 BPF_EXIT_INSN(),
9947 },
9948 .prog_type = BPF_PROG_TYPE_XDP,
9949 .errstr = "combined stack size",
9950 .result = REJECT,
9951 },
9952 {
9953 "calls: stack overflow using two frames (post-call access)",
9954 .insns = {
9955 /* prog 1 */
9956 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
9957 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9958 BPF_EXIT_INSN(),
9959
9960 /* prog 2 */
9961 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9962 BPF_MOV64_IMM(BPF_REG_0, 0),
9963 BPF_EXIT_INSN(),
9964 },
9965 .prog_type = BPF_PROG_TYPE_XDP,
9966 .errstr = "combined stack size",
9967 .result = REJECT,
9968 },
9969 {
Alexei Starovoitov6b86c422017-12-25 13:15:41 -08009970 "calls: stack depth check using three frames. test1",
9971 .insns = {
9972 /* main */
9973 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
9974 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
9975 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
9976 BPF_MOV64_IMM(BPF_REG_0, 0),
9977 BPF_EXIT_INSN(),
9978 /* A */
9979 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
9980 BPF_EXIT_INSN(),
9981 /* B */
9982 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
9983 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
9984 BPF_EXIT_INSN(),
9985 },
9986 .prog_type = BPF_PROG_TYPE_XDP,
9987 /* stack_main=32, stack_A=256, stack_B=64
9988 * and max(main+A, main+A+B) < 512
9989 */
9990 .result = ACCEPT,
9991 },
9992 {
9993 "calls: stack depth check using three frames. test2",
9994 .insns = {
9995 /* main */
9996 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
9997 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
9998 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
9999 BPF_MOV64_IMM(BPF_REG_0, 0),
10000 BPF_EXIT_INSN(),
10001 /* A */
10002 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10003 BPF_EXIT_INSN(),
10004 /* B */
10005 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
10006 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10007 BPF_EXIT_INSN(),
10008 },
10009 .prog_type = BPF_PROG_TYPE_XDP,
10010 /* stack_main=32, stack_A=64, stack_B=256
10011 * and max(main+A, main+A+B) < 512
10012 */
10013 .result = ACCEPT,
10014 },
10015 {
10016 "calls: stack depth check using three frames. test3",
10017 .insns = {
10018 /* main */
10019 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10020 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
10021 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10022 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
10023 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
10024 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10025 BPF_MOV64_IMM(BPF_REG_0, 0),
10026 BPF_EXIT_INSN(),
10027 /* A */
10028 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
10029 BPF_EXIT_INSN(),
10030 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
10031 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
10032 /* B */
10033 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
10034 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
10035 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10036 BPF_EXIT_INSN(),
10037 },
10038 .prog_type = BPF_PROG_TYPE_XDP,
10039 /* stack_main=64, stack_A=224, stack_B=256
10040 * and max(main+A, main+A+B) > 512
10041 */
10042 .errstr = "combined stack",
10043 .result = REJECT,
10044 },
10045 {
10046 "calls: stack depth check using three frames. test4",
10047 /* void main(void) {
10048 * func1(0);
10049 * func1(1);
10050 * func2(1);
10051 * }
10052 * void func1(int alloc_or_recurse) {
10053 * if (alloc_or_recurse) {
10054 * frame_pointer[-300] = 1;
10055 * } else {
10056 * func2(alloc_or_recurse);
10057 * }
10058 * }
10059 * void func2(int alloc_or_recurse) {
10060 * if (alloc_or_recurse) {
10061 * frame_pointer[-300] = 1;
10062 * }
10063 * }
10064 */
10065 .insns = {
10066 /* main */
10067 BPF_MOV64_IMM(BPF_REG_1, 0),
10068 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
10069 BPF_MOV64_IMM(BPF_REG_1, 1),
10070 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10071 BPF_MOV64_IMM(BPF_REG_1, 1),
10072 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
10073 BPF_MOV64_IMM(BPF_REG_0, 0),
10074 BPF_EXIT_INSN(),
10075 /* A */
10076 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
10077 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10078 BPF_EXIT_INSN(),
10079 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
10080 BPF_EXIT_INSN(),
10081 /* B */
10082 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
10083 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10084 BPF_EXIT_INSN(),
10085 },
10086 .prog_type = BPF_PROG_TYPE_XDP,
10087 .result = REJECT,
10088 .errstr = "combined stack",
10089 },
10090 {
Alexei Starovoitovaada9ce2017-12-25 13:15:42 -080010091 "calls: stack depth check using three frames. test5",
10092 .insns = {
10093 /* main */
10094 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
10095 BPF_EXIT_INSN(),
10096 /* A */
10097 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
10098 BPF_EXIT_INSN(),
10099 /* B */
10100 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
10101 BPF_EXIT_INSN(),
10102 /* C */
10103 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
10104 BPF_EXIT_INSN(),
10105 /* D */
10106 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
10107 BPF_EXIT_INSN(),
10108 /* E */
10109 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
10110 BPF_EXIT_INSN(),
10111 /* F */
10112 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
10113 BPF_EXIT_INSN(),
10114 /* G */
10115 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
10116 BPF_EXIT_INSN(),
10117 /* H */
10118 BPF_MOV64_IMM(BPF_REG_0, 0),
10119 BPF_EXIT_INSN(),
10120 },
10121 .prog_type = BPF_PROG_TYPE_XDP,
10122 .errstr = "call stack",
10123 .result = REJECT,
10124 },
10125 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010126 "calls: spill into caller stack frame",
10127 .insns = {
10128 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10129 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10130 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10131 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10132 BPF_EXIT_INSN(),
10133 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
10134 BPF_MOV64_IMM(BPF_REG_0, 0),
10135 BPF_EXIT_INSN(),
10136 },
10137 .prog_type = BPF_PROG_TYPE_XDP,
10138 .errstr = "cannot spill",
10139 .result = REJECT,
10140 },
10141 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010142 "calls: write into caller stack frame",
10143 .insns = {
10144 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10145 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10146 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10147 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10148 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10149 BPF_EXIT_INSN(),
10150 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
10151 BPF_MOV64_IMM(BPF_REG_0, 0),
10152 BPF_EXIT_INSN(),
10153 },
10154 .prog_type = BPF_PROG_TYPE_XDP,
10155 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010156 .retval = 42,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010157 },
10158 {
10159 "calls: write into callee stack frame",
10160 .insns = {
10161 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10162 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
10163 BPF_EXIT_INSN(),
10164 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
10165 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
10166 BPF_EXIT_INSN(),
10167 },
10168 .prog_type = BPF_PROG_TYPE_XDP,
10169 .errstr = "cannot return stack pointer",
10170 .result = REJECT,
10171 },
10172 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010173 "calls: two calls with stack write and void return",
10174 .insns = {
10175 /* main prog */
10176 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10177 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10178 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10179 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10180 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10181 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10182 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10183 BPF_EXIT_INSN(),
10184
10185 /* subprog 1 */
10186 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10187 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10188 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10189 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10190 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10191 BPF_EXIT_INSN(),
10192
10193 /* subprog 2 */
10194 /* write into stack frame of main prog */
10195 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
10196 BPF_EXIT_INSN(), /* void return */
10197 },
10198 .prog_type = BPF_PROG_TYPE_XDP,
10199 .result = ACCEPT,
10200 },
10201 {
10202 "calls: ambiguous return value",
10203 .insns = {
10204 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10205 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
10206 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
10207 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10208 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10209 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
10210 BPF_EXIT_INSN(),
10211 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
10212 BPF_MOV64_IMM(BPF_REG_0, 0),
10213 BPF_EXIT_INSN(),
10214 },
10215 .errstr_unpriv = "allowed for root only",
10216 .result_unpriv = REJECT,
10217 .errstr = "R0 !read_ok",
10218 .result = REJECT,
10219 },
10220 {
10221 "calls: two calls that return map_value",
10222 .insns = {
10223 /* main prog */
10224 /* pass fp-16, fp-8 into a function */
10225 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10226 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10227 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10228 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10229 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10230
10231 /* fetch map_value_ptr from the stack of this function */
10232 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
10233 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
10234 /* write into map value */
10235 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10236 /* fetch secound map_value_ptr from the stack */
10237 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10238 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
10239 /* write into map value */
10240 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10241 BPF_MOV64_IMM(BPF_REG_0, 0),
10242 BPF_EXIT_INSN(),
10243
10244 /* subprog 1 */
10245 /* call 3rd function twice */
10246 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10247 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10248 /* first time with fp-8 */
10249 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10250 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10251 /* second time with fp-16 */
10252 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10253 BPF_EXIT_INSN(),
10254
10255 /* subprog 2 */
10256 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10257 /* lookup from map */
10258 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10259 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10260 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10261 BPF_LD_MAP_FD(BPF_REG_1, 0),
10262 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10263 BPF_FUNC_map_lookup_elem),
10264 /* write map_value_ptr into stack frame of main prog */
10265 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10266 BPF_MOV64_IMM(BPF_REG_0, 0),
10267 BPF_EXIT_INSN(), /* return 0 */
10268 },
10269 .prog_type = BPF_PROG_TYPE_XDP,
10270 .fixup_map1 = { 23 },
10271 .result = ACCEPT,
10272 },
10273 {
10274 "calls: two calls that return map_value with bool condition",
10275 .insns = {
10276 /* main prog */
10277 /* pass fp-16, fp-8 into a function */
10278 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10279 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10280 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10281 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10282 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10283 BPF_MOV64_IMM(BPF_REG_0, 0),
10284 BPF_EXIT_INSN(),
10285
10286 /* subprog 1 */
10287 /* call 3rd function twice */
10288 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10289 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10290 /* first time with fp-8 */
10291 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
10292 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
10293 /* fetch map_value_ptr from the stack of this function */
10294 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10295 /* write into map value */
10296 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10297 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10298 /* second time with fp-16 */
10299 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10300 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
10301 /* fetch secound map_value_ptr from the stack */
10302 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
10303 /* write into map value */
10304 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10305 BPF_EXIT_INSN(),
10306
10307 /* subprog 2 */
10308 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10309 /* lookup from map */
10310 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10311 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10312 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10313 BPF_LD_MAP_FD(BPF_REG_1, 0),
10314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10315 BPF_FUNC_map_lookup_elem),
10316 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10317 BPF_MOV64_IMM(BPF_REG_0, 0),
10318 BPF_EXIT_INSN(), /* return 0 */
10319 /* write map_value_ptr into stack frame of main prog */
10320 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10321 BPF_MOV64_IMM(BPF_REG_0, 1),
10322 BPF_EXIT_INSN(), /* return 1 */
10323 },
10324 .prog_type = BPF_PROG_TYPE_XDP,
10325 .fixup_map1 = { 23 },
10326 .result = ACCEPT,
10327 },
10328 {
10329 "calls: two calls that return map_value with incorrect bool check",
10330 .insns = {
10331 /* main prog */
10332 /* pass fp-16, fp-8 into a function */
10333 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10335 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10336 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10337 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10338 BPF_MOV64_IMM(BPF_REG_0, 0),
10339 BPF_EXIT_INSN(),
10340
10341 /* subprog 1 */
10342 /* call 3rd function twice */
10343 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10344 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10345 /* first time with fp-8 */
10346 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
10347 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
10348 /* fetch map_value_ptr from the stack of this function */
10349 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10350 /* write into map value */
10351 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10352 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10353 /* second time with fp-16 */
10354 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10355 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10356 /* fetch secound map_value_ptr from the stack */
10357 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
10358 /* write into map value */
10359 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10360 BPF_EXIT_INSN(),
10361
10362 /* subprog 2 */
10363 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10364 /* lookup from map */
10365 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10366 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10367 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10368 BPF_LD_MAP_FD(BPF_REG_1, 0),
10369 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10370 BPF_FUNC_map_lookup_elem),
10371 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10372 BPF_MOV64_IMM(BPF_REG_0, 0),
10373 BPF_EXIT_INSN(), /* return 0 */
10374 /* write map_value_ptr into stack frame of main prog */
10375 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10376 BPF_MOV64_IMM(BPF_REG_0, 1),
10377 BPF_EXIT_INSN(), /* return 1 */
10378 },
10379 .prog_type = BPF_PROG_TYPE_XDP,
10380 .fixup_map1 = { 23 },
10381 .result = REJECT,
10382 .errstr = "invalid read from stack off -16+0 size 8",
10383 },
10384 {
10385 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
10386 .insns = {
10387 /* main prog */
10388 /* pass fp-16, fp-8 into a function */
10389 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10390 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10391 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10392 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10393 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10394 BPF_MOV64_IMM(BPF_REG_0, 0),
10395 BPF_EXIT_INSN(),
10396
10397 /* subprog 1 */
10398 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10399 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10400 /* 1st lookup from map */
10401 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10402 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10403 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10404 BPF_LD_MAP_FD(BPF_REG_1, 0),
10405 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10406 BPF_FUNC_map_lookup_elem),
10407 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10408 BPF_MOV64_IMM(BPF_REG_8, 0),
10409 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10410 /* write map_value_ptr into stack frame of main prog at fp-8 */
10411 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10412 BPF_MOV64_IMM(BPF_REG_8, 1),
10413
10414 /* 2nd lookup from map */
10415 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
10416 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10417 BPF_LD_MAP_FD(BPF_REG_1, 0),
10418 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
10419 BPF_FUNC_map_lookup_elem),
10420 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10421 BPF_MOV64_IMM(BPF_REG_9, 0),
10422 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10423 /* write map_value_ptr into stack frame of main prog at fp-16 */
10424 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10425 BPF_MOV64_IMM(BPF_REG_9, 1),
10426
10427 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10428 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
10429 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10430 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10431 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10432 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
10433 BPF_EXIT_INSN(),
10434
10435 /* subprog 2 */
10436 /* if arg2 == 1 do *arg1 = 0 */
10437 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10438 /* fetch map_value_ptr from the stack of this function */
10439 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10440 /* write into map value */
10441 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10442
10443 /* if arg4 == 1 do *arg3 = 0 */
10444 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10445 /* fetch map_value_ptr from the stack of this function */
10446 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10447 /* write into map value */
10448 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
10449 BPF_EXIT_INSN(),
10450 },
10451 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10452 .fixup_map1 = { 12, 22 },
10453 .result = REJECT,
10454 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
10455 },
10456 {
10457 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
10458 .insns = {
10459 /* main prog */
10460 /* pass fp-16, fp-8 into a function */
10461 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10462 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10463 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10464 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10465 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10466 BPF_MOV64_IMM(BPF_REG_0, 0),
10467 BPF_EXIT_INSN(),
10468
10469 /* subprog 1 */
10470 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10471 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10472 /* 1st lookup from map */
10473 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10474 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10475 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10476 BPF_LD_MAP_FD(BPF_REG_1, 0),
10477 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10478 BPF_FUNC_map_lookup_elem),
10479 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10480 BPF_MOV64_IMM(BPF_REG_8, 0),
10481 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10482 /* write map_value_ptr into stack frame of main prog at fp-8 */
10483 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10484 BPF_MOV64_IMM(BPF_REG_8, 1),
10485
10486 /* 2nd lookup from map */
10487 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
10488 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10489 BPF_LD_MAP_FD(BPF_REG_1, 0),
10490 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
10491 BPF_FUNC_map_lookup_elem),
10492 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10493 BPF_MOV64_IMM(BPF_REG_9, 0),
10494 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10495 /* write map_value_ptr into stack frame of main prog at fp-16 */
10496 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10497 BPF_MOV64_IMM(BPF_REG_9, 1),
10498
10499 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10500 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
10501 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10502 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10503 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10504 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
10505 BPF_EXIT_INSN(),
10506
10507 /* subprog 2 */
10508 /* if arg2 == 1 do *arg1 = 0 */
10509 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10510 /* fetch map_value_ptr from the stack of this function */
10511 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10512 /* write into map value */
10513 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10514
10515 /* if arg4 == 1 do *arg3 = 0 */
10516 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10517 /* fetch map_value_ptr from the stack of this function */
10518 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10519 /* write into map value */
10520 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10521 BPF_EXIT_INSN(),
10522 },
10523 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10524 .fixup_map1 = { 12, 22 },
10525 .result = ACCEPT,
10526 },
10527 {
10528 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
10529 .insns = {
10530 /* main prog */
10531 /* pass fp-16, fp-8 into a function */
10532 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10533 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10534 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10535 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10536 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
10537 BPF_MOV64_IMM(BPF_REG_0, 0),
10538 BPF_EXIT_INSN(),
10539
10540 /* subprog 1 */
10541 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10542 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10543 /* 1st lookup from map */
10544 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
10545 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10546 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
10547 BPF_LD_MAP_FD(BPF_REG_1, 0),
10548 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10549 BPF_FUNC_map_lookup_elem),
10550 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10551 BPF_MOV64_IMM(BPF_REG_8, 0),
10552 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10553 /* write map_value_ptr into stack frame of main prog at fp-8 */
10554 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10555 BPF_MOV64_IMM(BPF_REG_8, 1),
10556
10557 /* 2nd lookup from map */
10558 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10559 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
10560 BPF_LD_MAP_FD(BPF_REG_1, 0),
10561 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10562 BPF_FUNC_map_lookup_elem),
10563 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10564 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
10565 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10566 /* write map_value_ptr into stack frame of main prog at fp-16 */
10567 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10568 BPF_MOV64_IMM(BPF_REG_9, 1),
10569
10570 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10571 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
10572 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10573 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10574 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10575 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
10576 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
10577
10578 /* subprog 2 */
10579 /* if arg2 == 1 do *arg1 = 0 */
10580 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10581 /* fetch map_value_ptr from the stack of this function */
10582 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10583 /* write into map value */
10584 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10585
10586 /* if arg4 == 1 do *arg3 = 0 */
10587 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10588 /* fetch map_value_ptr from the stack of this function */
10589 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10590 /* write into map value */
10591 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
10592 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
10593 },
10594 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10595 .fixup_map1 = { 12, 22 },
10596 .result = REJECT,
10597 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
10598 },
10599 {
10600 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
10601 .insns = {
10602 /* main prog */
10603 /* pass fp-16, fp-8 into a function */
10604 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10605 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10606 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10607 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10608 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10609 BPF_MOV64_IMM(BPF_REG_0, 0),
10610 BPF_EXIT_INSN(),
10611
10612 /* subprog 1 */
10613 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10614 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10615 /* 1st lookup from map */
10616 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10617 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10618 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10619 BPF_LD_MAP_FD(BPF_REG_1, 0),
10620 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10621 BPF_FUNC_map_lookup_elem),
10622 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
10623 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10624 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10625 BPF_MOV64_IMM(BPF_REG_8, 0),
10626 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10627 BPF_MOV64_IMM(BPF_REG_8, 1),
10628
10629 /* 2nd lookup from map */
10630 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10631 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10632 BPF_LD_MAP_FD(BPF_REG_1, 0),
10633 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10634 BPF_FUNC_map_lookup_elem),
10635 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
10636 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10637 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10638 BPF_MOV64_IMM(BPF_REG_9, 0),
10639 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10640 BPF_MOV64_IMM(BPF_REG_9, 1),
10641
10642 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10643 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10644 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10645 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10646 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10647 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10648 BPF_EXIT_INSN(),
10649
10650 /* subprog 2 */
10651 /* if arg2 == 1 do *arg1 = 0 */
10652 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10653 /* fetch map_value_ptr from the stack of this function */
10654 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10655 /* write into map value */
10656 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10657
10658 /* if arg4 == 1 do *arg3 = 0 */
10659 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10660 /* fetch map_value_ptr from the stack of this function */
10661 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10662 /* write into map value */
10663 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10664 BPF_EXIT_INSN(),
10665 },
10666 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10667 .fixup_map1 = { 12, 22 },
10668 .result = ACCEPT,
10669 },
10670 {
10671 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
10672 .insns = {
10673 /* main prog */
10674 /* pass fp-16, fp-8 into a function */
10675 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10676 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10677 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10678 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10679 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10680 BPF_MOV64_IMM(BPF_REG_0, 0),
10681 BPF_EXIT_INSN(),
10682
10683 /* subprog 1 */
10684 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10685 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10686 /* 1st lookup from map */
10687 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10688 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10689 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10690 BPF_LD_MAP_FD(BPF_REG_1, 0),
10691 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10692 BPF_FUNC_map_lookup_elem),
10693 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
10694 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10695 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10696 BPF_MOV64_IMM(BPF_REG_8, 0),
10697 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10698 BPF_MOV64_IMM(BPF_REG_8, 1),
10699
10700 /* 2nd lookup from map */
10701 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10702 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10703 BPF_LD_MAP_FD(BPF_REG_1, 0),
10704 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10705 BPF_FUNC_map_lookup_elem),
10706 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
10707 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10708 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10709 BPF_MOV64_IMM(BPF_REG_9, 0),
10710 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10711 BPF_MOV64_IMM(BPF_REG_9, 1),
10712
10713 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10714 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10715 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10716 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10717 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10718 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10719 BPF_EXIT_INSN(),
10720
10721 /* subprog 2 */
10722 /* if arg2 == 1 do *arg1 = 0 */
10723 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10724 /* fetch map_value_ptr from the stack of this function */
10725 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10726 /* write into map value */
10727 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10728
10729 /* if arg4 == 0 do *arg3 = 0 */
10730 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
10731 /* fetch map_value_ptr from the stack of this function */
10732 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10733 /* write into map value */
10734 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10735 BPF_EXIT_INSN(),
10736 },
10737 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10738 .fixup_map1 = { 12, 22 },
10739 .result = REJECT,
10740 .errstr = "R0 invalid mem access 'inv'",
10741 },
10742 {
10743 "calls: pkt_ptr spill into caller stack",
10744 .insns = {
10745 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10746 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10747 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10748 BPF_EXIT_INSN(),
10749
10750 /* subprog 1 */
10751 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10752 offsetof(struct __sk_buff, data)),
10753 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10754 offsetof(struct __sk_buff, data_end)),
10755 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10756 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10757 /* spill unchecked pkt_ptr into stack of caller */
10758 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10759 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10760 /* now the pkt range is verified, read pkt_ptr from stack */
10761 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
10762 /* write 4 bytes into packet */
10763 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10764 BPF_EXIT_INSN(),
10765 },
10766 .result = ACCEPT,
10767 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010768 .retval = POINTER_VALUE,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010769 },
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080010770 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010771 "calls: pkt_ptr spill into caller stack 2",
10772 .insns = {
10773 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10774 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10775 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10776 /* Marking is still kept, but not in all cases safe. */
10777 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10778 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
10779 BPF_EXIT_INSN(),
10780
10781 /* subprog 1 */
10782 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10783 offsetof(struct __sk_buff, data)),
10784 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10785 offsetof(struct __sk_buff, data_end)),
10786 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10787 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10788 /* spill unchecked pkt_ptr into stack of caller */
10789 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10790 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10791 /* now the pkt range is verified, read pkt_ptr from stack */
10792 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
10793 /* write 4 bytes into packet */
10794 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10795 BPF_EXIT_INSN(),
10796 },
10797 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10798 .errstr = "invalid access to packet",
10799 .result = REJECT,
10800 },
10801 {
10802 "calls: pkt_ptr spill into caller stack 3",
10803 .insns = {
10804 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10805 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10806 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10807 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
10808 /* Marking is still kept and safe here. */
10809 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10810 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
10811 BPF_EXIT_INSN(),
10812
10813 /* subprog 1 */
10814 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10815 offsetof(struct __sk_buff, data)),
10816 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10817 offsetof(struct __sk_buff, data_end)),
10818 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10819 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10820 /* spill unchecked pkt_ptr into stack of caller */
10821 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10822 BPF_MOV64_IMM(BPF_REG_5, 0),
10823 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10824 BPF_MOV64_IMM(BPF_REG_5, 1),
10825 /* now the pkt range is verified, read pkt_ptr from stack */
10826 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
10827 /* write 4 bytes into packet */
10828 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10829 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10830 BPF_EXIT_INSN(),
10831 },
10832 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10833 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010834 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010835 },
10836 {
10837 "calls: pkt_ptr spill into caller stack 4",
10838 .insns = {
10839 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10840 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10841 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10842 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
10843 /* Check marking propagated. */
10844 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10845 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
10846 BPF_EXIT_INSN(),
10847
10848 /* subprog 1 */
10849 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10850 offsetof(struct __sk_buff, data)),
10851 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10852 offsetof(struct __sk_buff, data_end)),
10853 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10854 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10855 /* spill unchecked pkt_ptr into stack of caller */
10856 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10857 BPF_MOV64_IMM(BPF_REG_5, 0),
10858 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10859 BPF_MOV64_IMM(BPF_REG_5, 1),
10860 /* don't read back pkt_ptr from stack here */
10861 /* write 4 bytes into packet */
10862 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10863 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10864 BPF_EXIT_INSN(),
10865 },
10866 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10867 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010868 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010869 },
10870 {
10871 "calls: pkt_ptr spill into caller stack 5",
10872 .insns = {
10873 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10874 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10875 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
10876 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10877 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10878 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10879 BPF_EXIT_INSN(),
10880
10881 /* subprog 1 */
10882 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10883 offsetof(struct __sk_buff, data)),
10884 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10885 offsetof(struct __sk_buff, data_end)),
10886 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10888 BPF_MOV64_IMM(BPF_REG_5, 0),
10889 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10890 /* spill checked pkt_ptr into stack of caller */
10891 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10892 BPF_MOV64_IMM(BPF_REG_5, 1),
10893 /* don't read back pkt_ptr from stack here */
10894 /* write 4 bytes into packet */
10895 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10896 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10897 BPF_EXIT_INSN(),
10898 },
10899 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10900 .errstr = "same insn cannot be used with different",
10901 .result = REJECT,
10902 },
10903 {
10904 "calls: pkt_ptr spill into caller stack 6",
10905 .insns = {
10906 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10907 offsetof(struct __sk_buff, data_end)),
10908 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10909 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10910 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10911 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10912 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10913 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10914 BPF_EXIT_INSN(),
10915
10916 /* subprog 1 */
10917 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10918 offsetof(struct __sk_buff, data)),
10919 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10920 offsetof(struct __sk_buff, data_end)),
10921 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10923 BPF_MOV64_IMM(BPF_REG_5, 0),
10924 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10925 /* spill checked pkt_ptr into stack of caller */
10926 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10927 BPF_MOV64_IMM(BPF_REG_5, 1),
10928 /* don't read back pkt_ptr from stack here */
10929 /* write 4 bytes into packet */
10930 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10931 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10932 BPF_EXIT_INSN(),
10933 },
10934 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10935 .errstr = "R4 invalid mem access",
10936 .result = REJECT,
10937 },
10938 {
10939 "calls: pkt_ptr spill into caller stack 7",
10940 .insns = {
10941 BPF_MOV64_IMM(BPF_REG_2, 0),
10942 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10944 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10945 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10946 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10947 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10948 BPF_EXIT_INSN(),
10949
10950 /* subprog 1 */
10951 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10952 offsetof(struct __sk_buff, data)),
10953 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10954 offsetof(struct __sk_buff, data_end)),
10955 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10956 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10957 BPF_MOV64_IMM(BPF_REG_5, 0),
10958 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10959 /* spill checked pkt_ptr into stack of caller */
10960 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10961 BPF_MOV64_IMM(BPF_REG_5, 1),
10962 /* don't read back pkt_ptr from stack here */
10963 /* write 4 bytes into packet */
10964 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10965 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10966 BPF_EXIT_INSN(),
10967 },
10968 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10969 .errstr = "R4 invalid mem access",
10970 .result = REJECT,
10971 },
10972 {
10973 "calls: pkt_ptr spill into caller stack 8",
10974 .insns = {
10975 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10976 offsetof(struct __sk_buff, data)),
10977 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10978 offsetof(struct __sk_buff, data_end)),
10979 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10980 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10981 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
10982 BPF_EXIT_INSN(),
10983 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10984 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10985 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10986 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10987 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10988 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10989 BPF_EXIT_INSN(),
10990
10991 /* subprog 1 */
10992 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10993 offsetof(struct __sk_buff, data)),
10994 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10995 offsetof(struct __sk_buff, data_end)),
10996 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10997 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10998 BPF_MOV64_IMM(BPF_REG_5, 0),
10999 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11000 /* spill checked pkt_ptr into stack of caller */
11001 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11002 BPF_MOV64_IMM(BPF_REG_5, 1),
11003 /* don't read back pkt_ptr from stack here */
11004 /* write 4 bytes into packet */
11005 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11006 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11007 BPF_EXIT_INSN(),
11008 },
11009 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11010 .result = ACCEPT,
11011 },
11012 {
11013 "calls: pkt_ptr spill into caller stack 9",
11014 .insns = {
11015 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11016 offsetof(struct __sk_buff, data)),
11017 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11018 offsetof(struct __sk_buff, data_end)),
11019 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11020 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11021 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
11022 BPF_EXIT_INSN(),
11023 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11024 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11025 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11026 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11027 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11028 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11029 BPF_EXIT_INSN(),
11030
11031 /* subprog 1 */
11032 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11033 offsetof(struct __sk_buff, data)),
11034 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11035 offsetof(struct __sk_buff, data_end)),
11036 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11037 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11038 BPF_MOV64_IMM(BPF_REG_5, 0),
11039 /* spill unchecked pkt_ptr into stack of caller */
11040 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11041 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11042 BPF_MOV64_IMM(BPF_REG_5, 1),
11043 /* don't read back pkt_ptr from stack here */
11044 /* write 4 bytes into packet */
11045 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11046 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11047 BPF_EXIT_INSN(),
11048 },
11049 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11050 .errstr = "invalid access to packet",
11051 .result = REJECT,
11052 },
11053 {
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080011054 "calls: caller stack init to zero or map_value_or_null",
11055 .insns = {
11056 BPF_MOV64_IMM(BPF_REG_0, 0),
11057 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
11058 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11059 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11060 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11061 /* fetch map_value_or_null or const_zero from stack */
11062 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11063 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11064 /* store into map_value */
11065 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
11066 BPF_EXIT_INSN(),
11067
11068 /* subprog 1 */
11069 /* if (ctx == 0) return; */
11070 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
11071 /* else bpf_map_lookup() and *(fp - 8) = r0 */
11072 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
11073 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11074 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11075 BPF_LD_MAP_FD(BPF_REG_1, 0),
11076 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11077 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11078 BPF_FUNC_map_lookup_elem),
11079 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11080 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11081 BPF_EXIT_INSN(),
11082 },
11083 .fixup_map1 = { 13 },
11084 .result = ACCEPT,
11085 .prog_type = BPF_PROG_TYPE_XDP,
11086 },
11087 {
11088 "calls: stack init to zero and pruning",
11089 .insns = {
11090 /* first make allocated_stack 16 byte */
11091 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
11092 /* now fork the execution such that the false branch
11093 * of JGT insn will be verified second and it skisp zero
11094 * init of fp-8 stack slot. If stack liveness marking
11095 * is missing live_read marks from call map_lookup
11096 * processing then pruning will incorrectly assume
11097 * that fp-8 stack slot was unused in the fall-through
11098 * branch and will accept the program incorrectly
11099 */
11100 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
11101 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11102 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
11103 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11105 BPF_LD_MAP_FD(BPF_REG_1, 0),
11106 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11107 BPF_FUNC_map_lookup_elem),
11108 BPF_EXIT_INSN(),
11109 },
11110 .fixup_map2 = { 6 },
11111 .errstr = "invalid indirect read from stack off -8+0 size 8",
11112 .result = REJECT,
11113 .prog_type = BPF_PROG_TYPE_XDP,
11114 },
Gianluca Borellofd05e572017-12-23 10:09:55 +000011115 {
11116 "search pruning: all branches should be verified (nop operation)",
11117 .insns = {
11118 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11120 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
11121 BPF_LD_MAP_FD(BPF_REG_1, 0),
11122 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
11123 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
11124 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
11125 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
11126 BPF_MOV64_IMM(BPF_REG_4, 0),
11127 BPF_JMP_A(1),
11128 BPF_MOV64_IMM(BPF_REG_4, 1),
11129 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
11130 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
11131 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
11132 BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
11133 BPF_MOV64_IMM(BPF_REG_6, 0),
11134 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
11135 BPF_EXIT_INSN(),
11136 },
11137 .fixup_map1 = { 3 },
11138 .errstr = "R6 invalid mem access 'inv'",
11139 .result = REJECT,
11140 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11141 },
11142 {
11143 "search pruning: all branches should be verified (invalid stack access)",
11144 .insns = {
11145 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11146 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11147 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
11148 BPF_LD_MAP_FD(BPF_REG_1, 0),
11149 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
11150 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
11151 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
11152 BPF_MOV64_IMM(BPF_REG_4, 0),
11153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
11154 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
11155 BPF_JMP_A(1),
11156 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
11157 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
11158 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
11159 BPF_EXIT_INSN(),
11160 },
11161 .fixup_map1 = { 3 },
11162 .errstr = "invalid read from stack off -16+0 size 8",
11163 .result = REJECT,
11164 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11165 },
Daniel Borkmannca369602018-02-23 22:29:05 +010011166 {
11167 "xadd/w check unaligned stack",
11168 .insns = {
11169 BPF_MOV64_IMM(BPF_REG_0, 1),
11170 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
11171 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
11172 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11173 BPF_EXIT_INSN(),
11174 },
11175 .result = REJECT,
11176 .errstr = "misaligned stack access off",
11177 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11178 },
11179 {
11180 "xadd/w check unaligned map",
11181 .insns = {
11182 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11183 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11185 BPF_LD_MAP_FD(BPF_REG_1, 0),
11186 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11187 BPF_FUNC_map_lookup_elem),
11188 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
11189 BPF_EXIT_INSN(),
11190 BPF_MOV64_IMM(BPF_REG_1, 1),
11191 BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
11192 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
11193 BPF_EXIT_INSN(),
11194 },
11195 .fixup_map1 = { 3 },
11196 .result = REJECT,
11197 .errstr = "misaligned value access off",
11198 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11199 },
11200 {
11201 "xadd/w check unaligned pkt",
11202 .insns = {
11203 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11204 offsetof(struct xdp_md, data)),
11205 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11206 offsetof(struct xdp_md, data_end)),
11207 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11209 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
11210 BPF_MOV64_IMM(BPF_REG_0, 99),
11211 BPF_JMP_IMM(BPF_JA, 0, 0, 6),
11212 BPF_MOV64_IMM(BPF_REG_0, 1),
11213 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11214 BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
11215 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
11216 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
11217 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
11218 BPF_EXIT_INSN(),
11219 },
11220 .result = REJECT,
11221 .errstr = "BPF_XADD stores into R2 packet",
11222 .prog_type = BPF_PROG_TYPE_XDP,
11223 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011224};
11225
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011226static int probe_filter_length(const struct bpf_insn *fp)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011227{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011228 int len;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011229
11230 for (len = MAX_INSNS - 1; len > 0; --len)
11231 if (fp[len].code != 0 || fp[len].imm != 0)
11232 break;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011233 return len + 1;
11234}
11235
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011236static int create_map(uint32_t size_value, uint32_t max_elem)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011237{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011238 int fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011239
Mickaël Salaünf4874d02017-02-10 00:21:43 +010011240 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011241 size_value, max_elem, BPF_F_NO_PREALLOC);
11242 if (fd < 0)
11243 printf("Failed to create hash map '%s'!\n", strerror(errno));
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011244
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011245 return fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011246}
11247
11248static int create_prog_array(void)
11249{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011250 int fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011251
Mickaël Salaünf4874d02017-02-10 00:21:43 +010011252 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011253 sizeof(int), 4, 0);
11254 if (fd < 0)
11255 printf("Failed to create prog array '%s'!\n", strerror(errno));
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011256
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011257 return fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011258}
11259
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011260static int create_map_in_map(void)
11261{
11262 int inner_map_fd, outer_map_fd;
11263
11264 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
11265 sizeof(int), 1, 0);
11266 if (inner_map_fd < 0) {
11267 printf("Failed to create array '%s'!\n", strerror(errno));
11268 return inner_map_fd;
11269 }
11270
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070011271 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011272 sizeof(int), inner_map_fd, 1, 0);
11273 if (outer_map_fd < 0)
11274 printf("Failed to create array of maps '%s'!\n",
11275 strerror(errno));
11276
11277 close(inner_map_fd);
11278
11279 return outer_map_fd;
11280}
11281
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011282static char bpf_vlog[32768];
11283
11284static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011285 int *map_fds)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011286{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011287 int *fixup_map1 = test->fixup_map1;
11288 int *fixup_map2 = test->fixup_map2;
11289 int *fixup_prog = test->fixup_prog;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011290 int *fixup_map_in_map = test->fixup_map_in_map;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011291
11292 /* Allocating HTs with 1 elem is fine here, since we only test
11293 * for verifier and not do a runtime lookup, so the only thing
11294 * that really matters is value size in this case.
11295 */
11296 if (*fixup_map1) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011297 map_fds[0] = create_map(sizeof(long long), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011298 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011299 prog[*fixup_map1].imm = map_fds[0];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011300 fixup_map1++;
11301 } while (*fixup_map1);
11302 }
11303
11304 if (*fixup_map2) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011305 map_fds[1] = create_map(sizeof(struct test_val), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011306 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011307 prog[*fixup_map2].imm = map_fds[1];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011308 fixup_map2++;
11309 } while (*fixup_map2);
11310 }
11311
11312 if (*fixup_prog) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011313 map_fds[2] = create_prog_array();
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011314 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011315 prog[*fixup_prog].imm = map_fds[2];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011316 fixup_prog++;
11317 } while (*fixup_prog);
11318 }
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011319
11320 if (*fixup_map_in_map) {
11321 map_fds[3] = create_map_in_map();
11322 do {
11323 prog[*fixup_map_in_map].imm = map_fds[3];
11324 fixup_map_in_map++;
11325 } while (*fixup_map_in_map);
11326 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011327}
11328
11329static void do_test_single(struct bpf_test *test, bool unpriv,
11330 int *passes, int *errors)
11331{
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011332 int fd_prog, expected_ret, reject_from_alignment;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011333 struct bpf_insn *prog = test->insns;
11334 int prog_len = probe_filter_length(prog);
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011335 char data_in[TEST_DATA_LEN] = {};
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011336 int prog_type = test->prog_type;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011337 int map_fds[MAX_NR_MAPS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011338 const char *expected_err;
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011339 uint32_t retval;
11340 int i, err;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011341
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011342 for (i = 0; i < MAX_NR_MAPS; i++)
11343 map_fds[i] = -1;
11344
11345 do_test_fixup(test, prog, map_fds);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011346
Daniel Borkmann614d0d72017-05-25 01:05:09 +020011347 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
11348 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmannd6554902017-07-21 00:00:22 +020011349 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011350
11351 expected_ret = unpriv && test->result_unpriv != UNDEF ?
11352 test->result_unpriv : test->result;
11353 expected_err = unpriv && test->errstr_unpriv ?
11354 test->errstr_unpriv : test->errstr;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011355
11356 reject_from_alignment = fd_prog < 0 &&
11357 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
11358 strstr(bpf_vlog, "Unknown alignment.");
11359#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
11360 if (reject_from_alignment) {
11361 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
11362 strerror(errno));
11363 goto fail_log;
11364 }
11365#endif
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011366 if (expected_ret == ACCEPT) {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011367 if (fd_prog < 0 && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011368 printf("FAIL\nFailed to load prog '%s'!\n",
11369 strerror(errno));
11370 goto fail_log;
11371 }
11372 } else {
11373 if (fd_prog >= 0) {
11374 printf("FAIL\nUnexpected success to load!\n");
11375 goto fail_log;
11376 }
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011377 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011378 printf("FAIL\nUnexpected error message!\n");
11379 goto fail_log;
11380 }
11381 }
11382
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011383 if (fd_prog >= 0) {
11384 err = bpf_prog_test_run(fd_prog, 1, data_in, sizeof(data_in),
11385 NULL, NULL, &retval, NULL);
11386 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
11387 printf("Unexpected bpf_prog_test_run error\n");
11388 goto fail_log;
11389 }
11390 if (!err && retval != test->retval &&
11391 test->retval != POINTER_VALUE) {
11392 printf("FAIL retval %d != %d\n", retval, test->retval);
11393 goto fail_log;
11394 }
11395 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011396 (*passes)++;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011397 printf("OK%s\n", reject_from_alignment ?
11398 " (NOTE: reject due to unknown alignment)" : "");
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011399close_fds:
11400 close(fd_prog);
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011401 for (i = 0; i < MAX_NR_MAPS; i++)
11402 close(map_fds[i]);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011403 sched_yield();
11404 return;
11405fail_log:
11406 (*errors)++;
11407 printf("%s", bpf_vlog);
11408 goto close_fds;
11409}
11410
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011411static bool is_admin(void)
11412{
11413 cap_t caps;
11414 cap_flag_value_t sysadmin = CAP_CLEAR;
11415 const cap_value_t cap_val = CAP_SYS_ADMIN;
11416
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080011417#ifdef CAP_IS_SUPPORTED
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011418 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
11419 perror("cap_get_flag");
11420 return false;
11421 }
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080011422#endif
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011423 caps = cap_get_proc();
11424 if (!caps) {
11425 perror("cap_get_proc");
11426 return false;
11427 }
11428 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
11429 perror("cap_get_flag");
11430 if (cap_free(caps))
11431 perror("cap_free");
11432 return (sysadmin == CAP_SET);
11433}
11434
11435static int set_admin(bool admin)
11436{
11437 cap_t caps;
11438 const cap_value_t cap_val = CAP_SYS_ADMIN;
11439 int ret = -1;
11440
11441 caps = cap_get_proc();
11442 if (!caps) {
11443 perror("cap_get_proc");
11444 return -1;
11445 }
11446 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
11447 admin ? CAP_SET : CAP_CLEAR)) {
11448 perror("cap_set_flag");
11449 goto out;
11450 }
11451 if (cap_set_proc(caps)) {
11452 perror("cap_set_proc");
11453 goto out;
11454 }
11455 ret = 0;
11456out:
11457 if (cap_free(caps))
11458 perror("cap_free");
11459 return ret;
11460}
11461
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011462static int do_test(bool unpriv, unsigned int from, unsigned int to)
11463{
11464 int i, passes = 0, errors = 0;
11465
11466 for (i = from; i < to; i++) {
11467 struct bpf_test *test = &tests[i];
11468
11469 /* Program types that are not supported by non-root we
11470 * skip right away.
11471 */
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011472 if (!test->prog_type) {
11473 if (!unpriv)
11474 set_admin(false);
11475 printf("#%d/u %s ", i, test->descr);
11476 do_test_single(test, true, &passes, &errors);
11477 if (!unpriv)
11478 set_admin(true);
11479 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011480
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011481 if (!unpriv) {
11482 printf("#%d/p %s ", i, test->descr);
11483 do_test_single(test, false, &passes, &errors);
11484 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011485 }
11486
11487 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
Jesper Dangaard Brouerefe5f9c2017-06-13 15:17:19 +020011488 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011489}
11490
11491int main(int argc, char **argv)
11492{
11493 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
11494 struct rlimit rlim = { 1 << 20, 1 << 20 };
11495 unsigned int from = 0, to = ARRAY_SIZE(tests);
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011496 bool unpriv = !is_admin();
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011497
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011498 if (argc == 3) {
11499 unsigned int l = atoi(argv[argc - 2]);
11500 unsigned int u = atoi(argv[argc - 1]);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011501
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011502 if (l < to && u < to) {
11503 from = l;
11504 to = u + 1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011505 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011506 } else if (argc == 2) {
11507 unsigned int t = atoi(argv[argc - 1]);
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011508
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011509 if (t < to) {
11510 from = t;
11511 to = t + 1;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011512 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011513 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011514
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011515 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
11516 return do_test(unpriv, from, to);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011517}