blob: 165e9ddfa4460e1c86d0eac4d0de29e312a063e0 [file] [log] [blame]
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08005 * Copyright (c) 2017 Facebook
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
10 */
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011
Daniel Borkmann2c460622017-08-04 22:24:41 +020012#include <endian.h>
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080013#include <asm/types.h>
14#include <linux/types.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010015#include <stdint.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070016#include <stdio.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010017#include <stdlib.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070018#include <unistd.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070019#include <errno.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070020#include <string.h>
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -070021#include <stddef.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070022#include <stdbool.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020023#include <sched.h>
Daniel Borkmann21ccaf22018-01-26 23:33:48 +010024#include <limits.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020025
Mickaël Salaünd02d8982017-02-10 00:21:37 +010026#include <sys/capability.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070027
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020028#include <linux/unistd.h>
29#include <linux/filter.h>
30#include <linux/bpf_perf_event.h>
31#include <linux/bpf.h>
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080032#include <linux/if_ether.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070033
Mickaël Salaün2ee89fb2017-02-10 00:21:38 +010034#include <bpf/bpf.h>
35
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020036#ifdef HAVE_GENHDR
37# include "autoconf.h"
38#else
39# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
40# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
41# endif
42#endif
Daniel Borkmannfe8d6622018-02-26 22:34:32 +010043#include "bpf_rlimit.h"
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020044#include "../../../include/linux/filter.h"
45
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020046#ifndef ARRAY_SIZE
47# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
48#endif
49
50#define MAX_INSNS 512
51#define MAX_FIXUPS 8
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070052#define MAX_NR_MAPS 4
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080053#define POINTER_VALUE 0xcafe4all
54#define TEST_DATA_LEN 64
Alexei Starovoitovbf508872015-10-07 22:23:23 -070055
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020056#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
Daniel Borkmann614d0d72017-05-25 01:05:09 +020057#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020058
Joe Stringer0a6748742018-02-14 13:50:36 -080059#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
60static bool unpriv_disabled = false;
61
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070062struct bpf_test {
63 const char *descr;
64 struct bpf_insn insns[MAX_INSNS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020065 int fixup_map1[MAX_FIXUPS];
66 int fixup_map2[MAX_FIXUPS];
Paul Chaignon5f90dd62018-04-24 15:08:19 +020067 int fixup_map3[MAX_FIXUPS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020068 int fixup_prog[MAX_FIXUPS];
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070069 int fixup_map_in_map[MAX_FIXUPS];
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070070 const char *errstr;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070071 const char *errstr_unpriv;
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080072 uint32_t retval;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070073 enum {
Alexei Starovoitovbf508872015-10-07 22:23:23 -070074 UNDEF,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070075 ACCEPT,
76 REJECT
Alexei Starovoitovbf508872015-10-07 22:23:23 -070077 } result, result_unpriv;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070078 enum bpf_prog_type prog_type;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020079 uint8_t flags;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070080};
81
Josef Bacik48461132016-09-28 10:54:32 -040082/* Note we want this to be 64 bit aligned so that the end of our array is
83 * actually the end of the structure.
84 */
85#define MAX_ENTRIES 11
Josef Bacik48461132016-09-28 10:54:32 -040086
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020087struct test_val {
88 unsigned int index;
89 int foo[MAX_ENTRIES];
Josef Bacik48461132016-09-28 10:54:32 -040090};
91
Paul Chaignon5f90dd62018-04-24 15:08:19 +020092struct other_val {
93 long long foo;
94 long long bar;
95};
96
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070097static struct bpf_test tests[] = {
98 {
99 "add+sub+mul",
100 .insns = {
101 BPF_MOV64_IMM(BPF_REG_1, 1),
102 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
103 BPF_MOV64_IMM(BPF_REG_2, 3),
104 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
105 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
106 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
107 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
108 BPF_EXIT_INSN(),
109 },
110 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800111 .retval = -3,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700112 },
113 {
Daniel Borkmann87c17932018-01-20 01:24:32 +0100114 "DIV32 by 0, zero check 1",
115 .insns = {
116 BPF_MOV32_IMM(BPF_REG_0, 42),
117 BPF_MOV32_IMM(BPF_REG_1, 0),
118 BPF_MOV32_IMM(BPF_REG_2, 1),
119 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
120 BPF_EXIT_INSN(),
121 },
122 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100123 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100124 },
125 {
126 "DIV32 by 0, zero check 2",
127 .insns = {
128 BPF_MOV32_IMM(BPF_REG_0, 42),
129 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
130 BPF_MOV32_IMM(BPF_REG_2, 1),
131 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
132 BPF_EXIT_INSN(),
133 },
134 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100135 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100136 },
137 {
138 "DIV64 by 0, zero check",
139 .insns = {
140 BPF_MOV32_IMM(BPF_REG_0, 42),
141 BPF_MOV32_IMM(BPF_REG_1, 0),
142 BPF_MOV32_IMM(BPF_REG_2, 1),
143 BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
144 BPF_EXIT_INSN(),
145 },
146 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100147 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100148 },
149 {
150 "MOD32 by 0, zero check 1",
151 .insns = {
152 BPF_MOV32_IMM(BPF_REG_0, 42),
153 BPF_MOV32_IMM(BPF_REG_1, 0),
154 BPF_MOV32_IMM(BPF_REG_2, 1),
155 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
156 BPF_EXIT_INSN(),
157 },
158 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100159 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100160 },
161 {
162 "MOD32 by 0, zero check 2",
163 .insns = {
164 BPF_MOV32_IMM(BPF_REG_0, 42),
165 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
166 BPF_MOV32_IMM(BPF_REG_2, 1),
167 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
168 BPF_EXIT_INSN(),
169 },
170 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100171 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100172 },
173 {
174 "MOD64 by 0, zero check",
175 .insns = {
176 BPF_MOV32_IMM(BPF_REG_0, 42),
177 BPF_MOV32_IMM(BPF_REG_1, 0),
178 BPF_MOV32_IMM(BPF_REG_2, 1),
179 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
180 BPF_EXIT_INSN(),
181 },
182 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100183 .retval = 42,
184 },
185 {
186 "DIV32 by 0, zero check ok, cls",
187 .insns = {
188 BPF_MOV32_IMM(BPF_REG_0, 42),
189 BPF_MOV32_IMM(BPF_REG_1, 2),
190 BPF_MOV32_IMM(BPF_REG_2, 16),
191 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
192 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
193 BPF_EXIT_INSN(),
194 },
195 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
196 .result = ACCEPT,
197 .retval = 8,
198 },
199 {
200 "DIV32 by 0, zero check 1, cls",
201 .insns = {
202 BPF_MOV32_IMM(BPF_REG_1, 0),
203 BPF_MOV32_IMM(BPF_REG_0, 1),
204 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
205 BPF_EXIT_INSN(),
206 },
207 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
208 .result = ACCEPT,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100209 .retval = 0,
210 },
211 {
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100212 "DIV32 by 0, zero check 2, cls",
213 .insns = {
214 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
215 BPF_MOV32_IMM(BPF_REG_0, 1),
216 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
217 BPF_EXIT_INSN(),
218 },
219 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
220 .result = ACCEPT,
221 .retval = 0,
222 },
223 {
224 "DIV64 by 0, zero check, cls",
225 .insns = {
226 BPF_MOV32_IMM(BPF_REG_1, 0),
227 BPF_MOV32_IMM(BPF_REG_0, 1),
228 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
229 BPF_EXIT_INSN(),
230 },
231 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
232 .result = ACCEPT,
233 .retval = 0,
234 },
235 {
236 "MOD32 by 0, zero check ok, cls",
237 .insns = {
238 BPF_MOV32_IMM(BPF_REG_0, 42),
239 BPF_MOV32_IMM(BPF_REG_1, 3),
240 BPF_MOV32_IMM(BPF_REG_2, 5),
241 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
242 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
243 BPF_EXIT_INSN(),
244 },
245 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
246 .result = ACCEPT,
247 .retval = 2,
248 },
249 {
250 "MOD32 by 0, zero check 1, cls",
251 .insns = {
252 BPF_MOV32_IMM(BPF_REG_1, 0),
253 BPF_MOV32_IMM(BPF_REG_0, 1),
254 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
255 BPF_EXIT_INSN(),
256 },
257 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
258 .result = ACCEPT,
259 .retval = 1,
260 },
261 {
262 "MOD32 by 0, zero check 2, cls",
263 .insns = {
264 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
265 BPF_MOV32_IMM(BPF_REG_0, 1),
266 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
267 BPF_EXIT_INSN(),
268 },
269 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
270 .result = ACCEPT,
271 .retval = 1,
272 },
273 {
274 "MOD64 by 0, zero check 1, cls",
275 .insns = {
276 BPF_MOV32_IMM(BPF_REG_1, 0),
277 BPF_MOV32_IMM(BPF_REG_0, 2),
278 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
279 BPF_EXIT_INSN(),
280 },
281 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
282 .result = ACCEPT,
283 .retval = 2,
284 },
285 {
286 "MOD64 by 0, zero check 2, cls",
287 .insns = {
288 BPF_MOV32_IMM(BPF_REG_1, 0),
289 BPF_MOV32_IMM(BPF_REG_0, -1),
290 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
291 BPF_EXIT_INSN(),
292 },
293 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
294 .result = ACCEPT,
295 .retval = -1,
296 },
297 /* Just make sure that JITs used udiv/umod as otherwise we get
298 * an exception from INT_MIN/-1 overflow similarly as with div
299 * by zero.
300 */
301 {
302 "DIV32 overflow, check 1",
303 .insns = {
304 BPF_MOV32_IMM(BPF_REG_1, -1),
305 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
306 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
307 BPF_EXIT_INSN(),
308 },
309 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
310 .result = ACCEPT,
311 .retval = 0,
312 },
313 {
314 "DIV32 overflow, check 2",
315 .insns = {
316 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
317 BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
318 BPF_EXIT_INSN(),
319 },
320 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
321 .result = ACCEPT,
322 .retval = 0,
323 },
324 {
325 "DIV64 overflow, check 1",
326 .insns = {
327 BPF_MOV64_IMM(BPF_REG_1, -1),
328 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
329 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
330 BPF_EXIT_INSN(),
331 },
332 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
333 .result = ACCEPT,
334 .retval = 0,
335 },
336 {
337 "DIV64 overflow, check 2",
338 .insns = {
339 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
340 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
341 BPF_EXIT_INSN(),
342 },
343 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
344 .result = ACCEPT,
345 .retval = 0,
346 },
347 {
348 "MOD32 overflow, check 1",
349 .insns = {
350 BPF_MOV32_IMM(BPF_REG_1, -1),
351 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
352 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
353 BPF_EXIT_INSN(),
354 },
355 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
356 .result = ACCEPT,
357 .retval = INT_MIN,
358 },
359 {
360 "MOD32 overflow, check 2",
361 .insns = {
362 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
363 BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
364 BPF_EXIT_INSN(),
365 },
366 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
367 .result = ACCEPT,
368 .retval = INT_MIN,
369 },
370 {
371 "MOD64 overflow, check 1",
372 .insns = {
373 BPF_MOV64_IMM(BPF_REG_1, -1),
374 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
375 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
376 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
377 BPF_MOV32_IMM(BPF_REG_0, 0),
378 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
379 BPF_MOV32_IMM(BPF_REG_0, 1),
380 BPF_EXIT_INSN(),
381 },
382 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
383 .result = ACCEPT,
384 .retval = 1,
385 },
386 {
387 "MOD64 overflow, check 2",
388 .insns = {
389 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
390 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
391 BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
392 BPF_MOV32_IMM(BPF_REG_0, 0),
393 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
394 BPF_MOV32_IMM(BPF_REG_0, 1),
395 BPF_EXIT_INSN(),
396 },
397 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
398 .result = ACCEPT,
399 .retval = 1,
400 },
401 {
402 "xor32 zero extend check",
403 .insns = {
404 BPF_MOV32_IMM(BPF_REG_2, -1),
405 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
406 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
407 BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
408 BPF_MOV32_IMM(BPF_REG_0, 2),
409 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
410 BPF_MOV32_IMM(BPF_REG_0, 1),
411 BPF_EXIT_INSN(),
412 },
413 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
414 .result = ACCEPT,
415 .retval = 1,
416 },
417 {
Daniel Borkmann87c17932018-01-20 01:24:32 +0100418 "empty prog",
419 .insns = {
420 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100421 .errstr = "unknown opcode 00",
Daniel Borkmann87c17932018-01-20 01:24:32 +0100422 .result = REJECT,
423 },
424 {
425 "only exit insn",
426 .insns = {
427 BPF_EXIT_INSN(),
428 },
429 .errstr = "R0 !read_ok",
430 .result = REJECT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700431 },
432 {
433 "unreachable",
434 .insns = {
435 BPF_EXIT_INSN(),
436 BPF_EXIT_INSN(),
437 },
438 .errstr = "unreachable",
439 .result = REJECT,
440 },
441 {
442 "unreachable2",
443 .insns = {
444 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
445 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
446 BPF_EXIT_INSN(),
447 },
448 .errstr = "unreachable",
449 .result = REJECT,
450 },
451 {
452 "out of range jump",
453 .insns = {
454 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
455 BPF_EXIT_INSN(),
456 },
457 .errstr = "jump out of range",
458 .result = REJECT,
459 },
460 {
461 "out of range jump2",
462 .insns = {
463 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
464 BPF_EXIT_INSN(),
465 },
466 .errstr = "jump out of range",
467 .result = REJECT,
468 },
469 {
470 "test1 ld_imm64",
471 .insns = {
472 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
473 BPF_LD_IMM64(BPF_REG_0, 0),
474 BPF_LD_IMM64(BPF_REG_0, 0),
475 BPF_LD_IMM64(BPF_REG_0, 1),
476 BPF_LD_IMM64(BPF_REG_0, 1),
477 BPF_MOV64_IMM(BPF_REG_0, 2),
478 BPF_EXIT_INSN(),
479 },
480 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700481 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700482 .result = REJECT,
483 },
484 {
485 "test2 ld_imm64",
486 .insns = {
487 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
488 BPF_LD_IMM64(BPF_REG_0, 0),
489 BPF_LD_IMM64(BPF_REG_0, 0),
490 BPF_LD_IMM64(BPF_REG_0, 1),
491 BPF_LD_IMM64(BPF_REG_0, 1),
492 BPF_EXIT_INSN(),
493 },
494 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700495 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700496 .result = REJECT,
497 },
498 {
499 "test3 ld_imm64",
500 .insns = {
501 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
502 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
503 BPF_LD_IMM64(BPF_REG_0, 0),
504 BPF_LD_IMM64(BPF_REG_0, 0),
505 BPF_LD_IMM64(BPF_REG_0, 1),
506 BPF_LD_IMM64(BPF_REG_0, 1),
507 BPF_EXIT_INSN(),
508 },
509 .errstr = "invalid bpf_ld_imm64 insn",
510 .result = REJECT,
511 },
512 {
513 "test4 ld_imm64",
514 .insns = {
515 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
516 BPF_EXIT_INSN(),
517 },
518 .errstr = "invalid bpf_ld_imm64 insn",
519 .result = REJECT,
520 },
521 {
522 "test5 ld_imm64",
523 .insns = {
524 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
525 },
526 .errstr = "invalid bpf_ld_imm64 insn",
527 .result = REJECT,
528 },
529 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200530 "test6 ld_imm64",
531 .insns = {
532 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
533 BPF_RAW_INSN(0, 0, 0, 0, 0),
534 BPF_EXIT_INSN(),
535 },
536 .result = ACCEPT,
537 },
538 {
539 "test7 ld_imm64",
540 .insns = {
541 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
542 BPF_RAW_INSN(0, 0, 0, 0, 1),
543 BPF_EXIT_INSN(),
544 },
545 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800546 .retval = 1,
Daniel Borkmann728a8532017-04-27 01:39:32 +0200547 },
548 {
549 "test8 ld_imm64",
550 .insns = {
551 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
552 BPF_RAW_INSN(0, 0, 0, 0, 1),
553 BPF_EXIT_INSN(),
554 },
555 .errstr = "uses reserved fields",
556 .result = REJECT,
557 },
558 {
559 "test9 ld_imm64",
560 .insns = {
561 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
562 BPF_RAW_INSN(0, 0, 0, 1, 1),
563 BPF_EXIT_INSN(),
564 },
565 .errstr = "invalid bpf_ld_imm64 insn",
566 .result = REJECT,
567 },
568 {
569 "test10 ld_imm64",
570 .insns = {
571 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
572 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
573 BPF_EXIT_INSN(),
574 },
575 .errstr = "invalid bpf_ld_imm64 insn",
576 .result = REJECT,
577 },
578 {
579 "test11 ld_imm64",
580 .insns = {
581 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
582 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
583 BPF_EXIT_INSN(),
584 },
585 .errstr = "invalid bpf_ld_imm64 insn",
586 .result = REJECT,
587 },
588 {
589 "test12 ld_imm64",
590 .insns = {
591 BPF_MOV64_IMM(BPF_REG_1, 0),
592 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
593 BPF_RAW_INSN(0, 0, 0, 0, 1),
594 BPF_EXIT_INSN(),
595 },
596 .errstr = "not pointing to valid bpf_map",
597 .result = REJECT,
598 },
599 {
600 "test13 ld_imm64",
601 .insns = {
602 BPF_MOV64_IMM(BPF_REG_1, 0),
603 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
604 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
605 BPF_EXIT_INSN(),
606 },
607 .errstr = "invalid bpf_ld_imm64 insn",
608 .result = REJECT,
609 },
610 {
Daniel Borkmann7891a872018-01-10 20:04:37 +0100611 "arsh32 on imm",
612 .insns = {
613 BPF_MOV64_IMM(BPF_REG_0, 1),
614 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
615 BPF_EXIT_INSN(),
616 },
617 .result = REJECT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100618 .errstr = "unknown opcode c4",
Daniel Borkmann7891a872018-01-10 20:04:37 +0100619 },
620 {
621 "arsh32 on reg",
622 .insns = {
623 BPF_MOV64_IMM(BPF_REG_0, 1),
624 BPF_MOV64_IMM(BPF_REG_1, 5),
625 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
626 BPF_EXIT_INSN(),
627 },
628 .result = REJECT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100629 .errstr = "unknown opcode cc",
Daniel Borkmann7891a872018-01-10 20:04:37 +0100630 },
631 {
632 "arsh64 on imm",
633 .insns = {
634 BPF_MOV64_IMM(BPF_REG_0, 1),
635 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
636 BPF_EXIT_INSN(),
637 },
638 .result = ACCEPT,
639 },
640 {
641 "arsh64 on reg",
642 .insns = {
643 BPF_MOV64_IMM(BPF_REG_0, 1),
644 BPF_MOV64_IMM(BPF_REG_1, 5),
645 BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
646 BPF_EXIT_INSN(),
647 },
648 .result = ACCEPT,
649 },
650 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700651 "no bpf_exit",
652 .insns = {
653 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
654 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -0800655 .errstr = "not an exit",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700656 .result = REJECT,
657 },
658 {
659 "loop (back-edge)",
660 .insns = {
661 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
662 BPF_EXIT_INSN(),
663 },
664 .errstr = "back-edge",
665 .result = REJECT,
666 },
667 {
668 "loop2 (back-edge)",
669 .insns = {
670 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
671 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
672 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
673 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
674 BPF_EXIT_INSN(),
675 },
676 .errstr = "back-edge",
677 .result = REJECT,
678 },
679 {
680 "conditional loop",
681 .insns = {
682 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
683 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
684 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
685 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
686 BPF_EXIT_INSN(),
687 },
688 .errstr = "back-edge",
689 .result = REJECT,
690 },
691 {
692 "read uninitialized register",
693 .insns = {
694 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
695 BPF_EXIT_INSN(),
696 },
697 .errstr = "R2 !read_ok",
698 .result = REJECT,
699 },
700 {
701 "read invalid register",
702 .insns = {
703 BPF_MOV64_REG(BPF_REG_0, -1),
704 BPF_EXIT_INSN(),
705 },
706 .errstr = "R15 is invalid",
707 .result = REJECT,
708 },
709 {
710 "program doesn't init R0 before exit",
711 .insns = {
712 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
713 BPF_EXIT_INSN(),
714 },
715 .errstr = "R0 !read_ok",
716 .result = REJECT,
717 },
718 {
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700719 "program doesn't init R0 before exit in all branches",
720 .insns = {
721 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
722 BPF_MOV64_IMM(BPF_REG_0, 1),
723 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
724 BPF_EXIT_INSN(),
725 },
726 .errstr = "R0 !read_ok",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700727 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700728 .result = REJECT,
729 },
730 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700731 "stack out of bounds",
732 .insns = {
733 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
734 BPF_EXIT_INSN(),
735 },
736 .errstr = "invalid stack",
737 .result = REJECT,
738 },
739 {
740 "invalid call insn1",
741 .insns = {
742 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
743 BPF_EXIT_INSN(),
744 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100745 .errstr = "unknown opcode 8d",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700746 .result = REJECT,
747 },
748 {
749 "invalid call insn2",
750 .insns = {
751 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
752 BPF_EXIT_INSN(),
753 },
754 .errstr = "BPF_CALL uses reserved",
755 .result = REJECT,
756 },
757 {
758 "invalid function call",
759 .insns = {
760 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
761 BPF_EXIT_INSN(),
762 },
Daniel Borkmanne00c7b22016-11-26 01:28:09 +0100763 .errstr = "invalid func unknown#1234567",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700764 .result = REJECT,
765 },
766 {
767 "uninitialized stack1",
768 .insns = {
769 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
770 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
771 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200772 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
773 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700774 BPF_EXIT_INSN(),
775 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200776 .fixup_map1 = { 2 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700777 .errstr = "invalid indirect read from stack",
778 .result = REJECT,
779 },
780 {
781 "uninitialized stack2",
782 .insns = {
783 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
784 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
785 BPF_EXIT_INSN(),
786 },
787 .errstr = "invalid read from stack",
788 .result = REJECT,
789 },
790 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200791 "invalid fp arithmetic",
792 /* If this gets ever changed, make sure JITs can deal with it. */
793 .insns = {
794 BPF_MOV64_IMM(BPF_REG_0, 0),
795 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
796 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
797 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
798 BPF_EXIT_INSN(),
799 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -0800800 .errstr = "R1 subtraction from stack pointer",
Daniel Borkmann728a8532017-04-27 01:39:32 +0200801 .result = REJECT,
802 },
803 {
804 "non-invalid fp arithmetic",
805 .insns = {
806 BPF_MOV64_IMM(BPF_REG_0, 0),
807 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
808 BPF_EXIT_INSN(),
809 },
810 .result = ACCEPT,
811 },
812 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200813 "invalid argument register",
814 .insns = {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200815 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
816 BPF_FUNC_get_cgroup_classid),
817 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
818 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200819 BPF_EXIT_INSN(),
820 },
821 .errstr = "R1 !read_ok",
822 .result = REJECT,
823 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
824 },
825 {
826 "non-invalid argument register",
827 .insns = {
828 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200829 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
830 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200831 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200832 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
833 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200834 BPF_EXIT_INSN(),
835 },
836 .result = ACCEPT,
837 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
838 },
839 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700840 "check valid spill/fill",
841 .insns = {
842 /* spill R1(ctx) into stack */
843 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700844 /* fill it back into R2 */
845 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700846 /* should be able to access R0 = *(R2 + 8) */
Daniel Borkmannf91fe172015-03-01 12:31:41 +0100847 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
848 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700849 BPF_EXIT_INSN(),
850 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700851 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700852 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700853 .result_unpriv = REJECT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800854 .retval = POINTER_VALUE,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700855 },
856 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +0200857 "check valid spill/fill, skb mark",
858 .insns = {
859 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
860 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
861 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
862 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
863 offsetof(struct __sk_buff, mark)),
864 BPF_EXIT_INSN(),
865 },
866 .result = ACCEPT,
867 .result_unpriv = ACCEPT,
868 },
869 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700870 "check corrupted spill/fill",
871 .insns = {
872 /* spill R1(ctx) into stack */
873 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700874 /* mess up with R1 pointer on stack */
875 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700876 /* fill back into R0 should fail */
877 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700878 BPF_EXIT_INSN(),
879 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700880 .errstr_unpriv = "attempt to corrupt spilled",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700881 .errstr = "corrupted spill",
882 .result = REJECT,
883 },
884 {
885 "invalid src register in STX",
886 .insns = {
887 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
888 BPF_EXIT_INSN(),
889 },
890 .errstr = "R15 is invalid",
891 .result = REJECT,
892 },
893 {
894 "invalid dst register in STX",
895 .insns = {
896 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
897 BPF_EXIT_INSN(),
898 },
899 .errstr = "R14 is invalid",
900 .result = REJECT,
901 },
902 {
903 "invalid dst register in ST",
904 .insns = {
905 BPF_ST_MEM(BPF_B, 14, -1, -1),
906 BPF_EXIT_INSN(),
907 },
908 .errstr = "R14 is invalid",
909 .result = REJECT,
910 },
911 {
912 "invalid src register in LDX",
913 .insns = {
914 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
915 BPF_EXIT_INSN(),
916 },
917 .errstr = "R12 is invalid",
918 .result = REJECT,
919 },
920 {
921 "invalid dst register in LDX",
922 .insns = {
923 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
924 BPF_EXIT_INSN(),
925 },
926 .errstr = "R11 is invalid",
927 .result = REJECT,
928 },
929 {
930 "junk insn",
931 .insns = {
932 BPF_RAW_INSN(0, 0, 0, 0, 0),
933 BPF_EXIT_INSN(),
934 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100935 .errstr = "unknown opcode 00",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700936 .result = REJECT,
937 },
938 {
939 "junk insn2",
940 .insns = {
941 BPF_RAW_INSN(1, 0, 0, 0, 0),
942 BPF_EXIT_INSN(),
943 },
944 .errstr = "BPF_LDX uses reserved fields",
945 .result = REJECT,
946 },
947 {
948 "junk insn3",
949 .insns = {
950 BPF_RAW_INSN(-1, 0, 0, 0, 0),
951 BPF_EXIT_INSN(),
952 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100953 .errstr = "unknown opcode ff",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700954 .result = REJECT,
955 },
956 {
957 "junk insn4",
958 .insns = {
959 BPF_RAW_INSN(-1, -1, -1, -1, -1),
960 BPF_EXIT_INSN(),
961 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100962 .errstr = "unknown opcode ff",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700963 .result = REJECT,
964 },
965 {
966 "junk insn5",
967 .insns = {
968 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
969 BPF_EXIT_INSN(),
970 },
971 .errstr = "BPF_ALU uses reserved fields",
972 .result = REJECT,
973 },
974 {
975 "misaligned read from stack",
976 .insns = {
977 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
978 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
979 BPF_EXIT_INSN(),
980 },
Edward Creef65b1842017-08-07 15:27:12 +0100981 .errstr = "misaligned stack access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700982 .result = REJECT,
983 },
984 {
985 "invalid map_fd for function call",
986 .insns = {
987 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
988 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
989 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
990 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200991 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
992 BPF_FUNC_map_delete_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700993 BPF_EXIT_INSN(),
994 },
995 .errstr = "fd 0 is not pointing to valid bpf_map",
996 .result = REJECT,
997 },
998 {
999 "don't check return value before access",
1000 .insns = {
1001 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1002 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1003 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1004 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001005 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1006 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001007 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1008 BPF_EXIT_INSN(),
1009 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001010 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001011 .errstr = "R0 invalid mem access 'map_value_or_null'",
1012 .result = REJECT,
1013 },
1014 {
1015 "access memory with incorrect alignment",
1016 .insns = {
1017 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1018 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1019 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1020 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001021 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1022 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001023 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1024 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1025 BPF_EXIT_INSN(),
1026 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001027 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01001028 .errstr = "misaligned value access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001029 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001030 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001031 },
1032 {
1033 "sometimes access memory with incorrect alignment",
1034 .insns = {
1035 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1036 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1037 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1038 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001039 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1040 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001041 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1042 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1043 BPF_EXIT_INSN(),
1044 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1045 BPF_EXIT_INSN(),
1046 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001047 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001048 .errstr = "R0 invalid mem access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001049 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001050 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001051 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001052 },
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001053 {
1054 "jump test 1",
1055 .insns = {
1056 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1057 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1058 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1059 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1060 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1061 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1062 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1063 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1064 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1065 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1066 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1067 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1068 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1069 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1070 BPF_MOV64_IMM(BPF_REG_0, 0),
1071 BPF_EXIT_INSN(),
1072 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001073 .errstr_unpriv = "R1 pointer comparison",
1074 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001075 .result = ACCEPT,
1076 },
1077 {
1078 "jump test 2",
1079 .insns = {
1080 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1081 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1082 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1083 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1084 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1085 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1086 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1087 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1088 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1089 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1090 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1091 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1092 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1093 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1094 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1095 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1096 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1097 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1098 BPF_MOV64_IMM(BPF_REG_0, 0),
1099 BPF_EXIT_INSN(),
1100 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001101 .errstr_unpriv = "R1 pointer comparison",
1102 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001103 .result = ACCEPT,
1104 },
1105 {
1106 "jump test 3",
1107 .insns = {
1108 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1109 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1110 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1112 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1113 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1114 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1116 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1117 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1118 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1120 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1121 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1122 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1123 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1124 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1125 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1126 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1127 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1128 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1129 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1130 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1131 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1132 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001133 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1134 BPF_FUNC_map_delete_elem),
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001135 BPF_EXIT_INSN(),
1136 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001137 .fixup_map1 = { 24 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001138 .errstr_unpriv = "R1 pointer comparison",
1139 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001140 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08001141 .retval = -ENOENT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001142 },
1143 {
1144 "jump test 4",
1145 .insns = {
1146 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1147 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1148 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1149 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1150 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1151 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1152 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1154 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1155 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1156 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1157 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1158 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1159 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1160 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1161 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1162 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1163 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1164 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1166 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1168 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1169 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1170 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1171 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1172 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1173 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1174 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1175 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1176 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1177 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1178 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1179 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1180 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1181 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1182 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1183 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1184 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1185 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1186 BPF_MOV64_IMM(BPF_REG_0, 0),
1187 BPF_EXIT_INSN(),
1188 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001189 .errstr_unpriv = "R1 pointer comparison",
1190 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001191 .result = ACCEPT,
1192 },
Alexei Starovoitov342ded42014-10-28 15:11:42 -07001193 {
1194 "jump test 5",
1195 .insns = {
1196 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1197 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1198 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1199 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1200 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1201 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1202 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1203 BPF_MOV64_IMM(BPF_REG_0, 0),
1204 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1205 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1206 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1207 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1208 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1209 BPF_MOV64_IMM(BPF_REG_0, 0),
1210 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1211 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1212 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1213 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1214 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1215 BPF_MOV64_IMM(BPF_REG_0, 0),
1216 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1217 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1218 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1219 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1220 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1221 BPF_MOV64_IMM(BPF_REG_0, 0),
1222 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1223 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1224 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1225 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1226 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1227 BPF_MOV64_IMM(BPF_REG_0, 0),
1228 BPF_EXIT_INSN(),
1229 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001230 .errstr_unpriv = "R1 pointer comparison",
1231 .result_unpriv = REJECT,
Alexei Starovoitov342ded42014-10-28 15:11:42 -07001232 .result = ACCEPT,
1233 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001234 {
1235 "access skb fields ok",
1236 .insns = {
1237 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1238 offsetof(struct __sk_buff, len)),
1239 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1240 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1241 offsetof(struct __sk_buff, mark)),
1242 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1243 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1244 offsetof(struct __sk_buff, pkt_type)),
1245 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1246 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1247 offsetof(struct __sk_buff, queue_mapping)),
1248 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitovc2497392015-03-16 18:06:02 -07001249 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1250 offsetof(struct __sk_buff, protocol)),
1251 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1252 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1253 offsetof(struct __sk_buff, vlan_present)),
1254 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1255 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1256 offsetof(struct __sk_buff, vlan_tci)),
1257 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Daniel Borkmannb1d9fc42017-04-19 23:01:17 +02001258 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1259 offsetof(struct __sk_buff, napi_id)),
1260 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001261 BPF_EXIT_INSN(),
1262 },
1263 .result = ACCEPT,
1264 },
1265 {
1266 "access skb fields bad1",
1267 .insns = {
1268 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1269 BPF_EXIT_INSN(),
1270 },
1271 .errstr = "invalid bpf_context access",
1272 .result = REJECT,
1273 },
1274 {
1275 "access skb fields bad2",
1276 .insns = {
1277 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1278 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1279 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1281 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001282 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1283 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001284 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1285 BPF_EXIT_INSN(),
1286 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1287 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1288 offsetof(struct __sk_buff, pkt_type)),
1289 BPF_EXIT_INSN(),
1290 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001291 .fixup_map1 = { 4 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001292 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001293 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001294 .result = REJECT,
1295 },
1296 {
1297 "access skb fields bad3",
1298 .insns = {
1299 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1300 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1301 offsetof(struct __sk_buff, pkt_type)),
1302 BPF_EXIT_INSN(),
1303 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1304 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1305 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1306 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001307 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1308 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001309 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1310 BPF_EXIT_INSN(),
1311 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1312 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1313 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001314 .fixup_map1 = { 6 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001315 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001316 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001317 .result = REJECT,
1318 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001319 {
1320 "access skb fields bad4",
1321 .insns = {
1322 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1323 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1324 offsetof(struct __sk_buff, len)),
1325 BPF_MOV64_IMM(BPF_REG_0, 0),
1326 BPF_EXIT_INSN(),
1327 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1328 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1329 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1330 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001331 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1332 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001333 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1334 BPF_EXIT_INSN(),
1335 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1336 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1337 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001338 .fixup_map1 = { 7 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001339 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001340 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001341 .result = REJECT,
1342 },
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001343 {
John Fastabend41bc94f2017-08-15 22:33:56 -07001344 "invalid access __sk_buff family",
1345 .insns = {
1346 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1347 offsetof(struct __sk_buff, family)),
1348 BPF_EXIT_INSN(),
1349 },
1350 .errstr = "invalid bpf_context access",
1351 .result = REJECT,
1352 },
1353 {
1354 "invalid access __sk_buff remote_ip4",
1355 .insns = {
1356 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1357 offsetof(struct __sk_buff, remote_ip4)),
1358 BPF_EXIT_INSN(),
1359 },
1360 .errstr = "invalid bpf_context access",
1361 .result = REJECT,
1362 },
1363 {
1364 "invalid access __sk_buff local_ip4",
1365 .insns = {
1366 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1367 offsetof(struct __sk_buff, local_ip4)),
1368 BPF_EXIT_INSN(),
1369 },
1370 .errstr = "invalid bpf_context access",
1371 .result = REJECT,
1372 },
1373 {
1374 "invalid access __sk_buff remote_ip6",
1375 .insns = {
1376 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1377 offsetof(struct __sk_buff, remote_ip6)),
1378 BPF_EXIT_INSN(),
1379 },
1380 .errstr = "invalid bpf_context access",
1381 .result = REJECT,
1382 },
1383 {
1384 "invalid access __sk_buff local_ip6",
1385 .insns = {
1386 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1387 offsetof(struct __sk_buff, local_ip6)),
1388 BPF_EXIT_INSN(),
1389 },
1390 .errstr = "invalid bpf_context access",
1391 .result = REJECT,
1392 },
1393 {
1394 "invalid access __sk_buff remote_port",
1395 .insns = {
1396 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1397 offsetof(struct __sk_buff, remote_port)),
1398 BPF_EXIT_INSN(),
1399 },
1400 .errstr = "invalid bpf_context access",
1401 .result = REJECT,
1402 },
1403 {
1404 "invalid access __sk_buff remote_port",
1405 .insns = {
1406 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1407 offsetof(struct __sk_buff, local_port)),
1408 BPF_EXIT_INSN(),
1409 },
1410 .errstr = "invalid bpf_context access",
1411 .result = REJECT,
1412 },
1413 {
1414 "valid access __sk_buff family",
1415 .insns = {
1416 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1417 offsetof(struct __sk_buff, family)),
1418 BPF_EXIT_INSN(),
1419 },
1420 .result = ACCEPT,
1421 .prog_type = BPF_PROG_TYPE_SK_SKB,
1422 },
1423 {
1424 "valid access __sk_buff remote_ip4",
1425 .insns = {
1426 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1427 offsetof(struct __sk_buff, remote_ip4)),
1428 BPF_EXIT_INSN(),
1429 },
1430 .result = ACCEPT,
1431 .prog_type = BPF_PROG_TYPE_SK_SKB,
1432 },
1433 {
1434 "valid access __sk_buff local_ip4",
1435 .insns = {
1436 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1437 offsetof(struct __sk_buff, local_ip4)),
1438 BPF_EXIT_INSN(),
1439 },
1440 .result = ACCEPT,
1441 .prog_type = BPF_PROG_TYPE_SK_SKB,
1442 },
1443 {
1444 "valid access __sk_buff remote_ip6",
1445 .insns = {
1446 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1447 offsetof(struct __sk_buff, remote_ip6[0])),
1448 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1449 offsetof(struct __sk_buff, remote_ip6[1])),
1450 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1451 offsetof(struct __sk_buff, remote_ip6[2])),
1452 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1453 offsetof(struct __sk_buff, remote_ip6[3])),
1454 BPF_EXIT_INSN(),
1455 },
1456 .result = ACCEPT,
1457 .prog_type = BPF_PROG_TYPE_SK_SKB,
1458 },
1459 {
1460 "valid access __sk_buff local_ip6",
1461 .insns = {
1462 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1463 offsetof(struct __sk_buff, local_ip6[0])),
1464 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1465 offsetof(struct __sk_buff, local_ip6[1])),
1466 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1467 offsetof(struct __sk_buff, local_ip6[2])),
1468 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1469 offsetof(struct __sk_buff, local_ip6[3])),
1470 BPF_EXIT_INSN(),
1471 },
1472 .result = ACCEPT,
1473 .prog_type = BPF_PROG_TYPE_SK_SKB,
1474 },
1475 {
1476 "valid access __sk_buff remote_port",
1477 .insns = {
1478 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1479 offsetof(struct __sk_buff, remote_port)),
1480 BPF_EXIT_INSN(),
1481 },
1482 .result = ACCEPT,
1483 .prog_type = BPF_PROG_TYPE_SK_SKB,
1484 },
1485 {
1486 "valid access __sk_buff remote_port",
1487 .insns = {
1488 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1489 offsetof(struct __sk_buff, local_port)),
1490 BPF_EXIT_INSN(),
1491 },
1492 .result = ACCEPT,
1493 .prog_type = BPF_PROG_TYPE_SK_SKB,
1494 },
1495 {
John Fastabended850542017-08-28 07:11:24 -07001496 "invalid access of tc_classid for SK_SKB",
1497 .insns = {
1498 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1499 offsetof(struct __sk_buff, tc_classid)),
1500 BPF_EXIT_INSN(),
1501 },
1502 .result = REJECT,
1503 .prog_type = BPF_PROG_TYPE_SK_SKB,
1504 .errstr = "invalid bpf_context access",
1505 },
1506 {
John Fastabendf7e9cb12017-10-18 07:10:58 -07001507 "invalid access of skb->mark for SK_SKB",
1508 .insns = {
1509 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1510 offsetof(struct __sk_buff, mark)),
1511 BPF_EXIT_INSN(),
1512 },
1513 .result = REJECT,
1514 .prog_type = BPF_PROG_TYPE_SK_SKB,
1515 .errstr = "invalid bpf_context access",
1516 },
1517 {
1518 "check skb->mark is not writeable by SK_SKB",
John Fastabended850542017-08-28 07:11:24 -07001519 .insns = {
1520 BPF_MOV64_IMM(BPF_REG_0, 0),
1521 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1522 offsetof(struct __sk_buff, mark)),
1523 BPF_EXIT_INSN(),
1524 },
John Fastabendf7e9cb12017-10-18 07:10:58 -07001525 .result = REJECT,
John Fastabended850542017-08-28 07:11:24 -07001526 .prog_type = BPF_PROG_TYPE_SK_SKB,
John Fastabendf7e9cb12017-10-18 07:10:58 -07001527 .errstr = "invalid bpf_context access",
John Fastabended850542017-08-28 07:11:24 -07001528 },
1529 {
1530 "check skb->tc_index is writeable by SK_SKB",
1531 .insns = {
1532 BPF_MOV64_IMM(BPF_REG_0, 0),
1533 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1534 offsetof(struct __sk_buff, tc_index)),
1535 BPF_EXIT_INSN(),
1536 },
1537 .result = ACCEPT,
1538 .prog_type = BPF_PROG_TYPE_SK_SKB,
1539 },
1540 {
1541 "check skb->priority is writeable by SK_SKB",
1542 .insns = {
1543 BPF_MOV64_IMM(BPF_REG_0, 0),
1544 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1545 offsetof(struct __sk_buff, priority)),
1546 BPF_EXIT_INSN(),
1547 },
1548 .result = ACCEPT,
1549 .prog_type = BPF_PROG_TYPE_SK_SKB,
1550 },
1551 {
1552 "direct packet read for SK_SKB",
1553 .insns = {
1554 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1555 offsetof(struct __sk_buff, data)),
1556 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1557 offsetof(struct __sk_buff, data_end)),
1558 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1559 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1560 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1561 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1562 BPF_MOV64_IMM(BPF_REG_0, 0),
1563 BPF_EXIT_INSN(),
1564 },
1565 .result = ACCEPT,
1566 .prog_type = BPF_PROG_TYPE_SK_SKB,
1567 },
1568 {
1569 "direct packet write for SK_SKB",
1570 .insns = {
1571 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1572 offsetof(struct __sk_buff, data)),
1573 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1574 offsetof(struct __sk_buff, data_end)),
1575 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1576 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1577 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1578 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1579 BPF_MOV64_IMM(BPF_REG_0, 0),
1580 BPF_EXIT_INSN(),
1581 },
1582 .result = ACCEPT,
1583 .prog_type = BPF_PROG_TYPE_SK_SKB,
1584 },
1585 {
1586 "overlapping checks for direct packet access SK_SKB",
1587 .insns = {
1588 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1589 offsetof(struct __sk_buff, data)),
1590 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1591 offsetof(struct __sk_buff, data_end)),
1592 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1593 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1594 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1595 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1596 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1597 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1598 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1599 BPF_MOV64_IMM(BPF_REG_0, 0),
1600 BPF_EXIT_INSN(),
1601 },
1602 .result = ACCEPT,
1603 .prog_type = BPF_PROG_TYPE_SK_SKB,
1604 },
1605 {
John Fastabend1acc60b2018-03-18 12:57:36 -07001606 "direct packet read for SK_MSG",
1607 .insns = {
1608 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1609 offsetof(struct sk_msg_md, data)),
1610 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1611 offsetof(struct sk_msg_md, data_end)),
1612 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1613 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1614 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1615 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1616 BPF_MOV64_IMM(BPF_REG_0, 0),
1617 BPF_EXIT_INSN(),
1618 },
1619 .result = ACCEPT,
1620 .prog_type = BPF_PROG_TYPE_SK_MSG,
1621 },
1622 {
1623 "direct packet write for SK_MSG",
1624 .insns = {
1625 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1626 offsetof(struct sk_msg_md, data)),
1627 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1628 offsetof(struct sk_msg_md, data_end)),
1629 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1630 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1631 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1632 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1633 BPF_MOV64_IMM(BPF_REG_0, 0),
1634 BPF_EXIT_INSN(),
1635 },
1636 .result = ACCEPT,
1637 .prog_type = BPF_PROG_TYPE_SK_MSG,
1638 },
1639 {
1640 "overlapping checks for direct packet access SK_MSG",
1641 .insns = {
1642 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1643 offsetof(struct sk_msg_md, data)),
1644 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1645 offsetof(struct sk_msg_md, data_end)),
1646 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1647 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1648 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1649 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1650 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1651 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1652 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1653 BPF_MOV64_IMM(BPF_REG_0, 0),
1654 BPF_EXIT_INSN(),
1655 },
1656 .result = ACCEPT,
1657 .prog_type = BPF_PROG_TYPE_SK_MSG,
1658 },
1659 {
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001660 "check skb->mark is not writeable by sockets",
1661 .insns = {
1662 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1663 offsetof(struct __sk_buff, mark)),
1664 BPF_EXIT_INSN(),
1665 },
1666 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001667 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001668 .result = REJECT,
1669 },
1670 {
1671 "check skb->tc_index is not writeable by sockets",
1672 .insns = {
1673 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1674 offsetof(struct __sk_buff, tc_index)),
1675 BPF_EXIT_INSN(),
1676 },
1677 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001678 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001679 .result = REJECT,
1680 },
1681 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001682 "check cb access: byte",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001683 .insns = {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001684 BPF_MOV64_IMM(BPF_REG_0, 0),
1685 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1686 offsetof(struct __sk_buff, cb[0])),
1687 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1688 offsetof(struct __sk_buff, cb[0]) + 1),
1689 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1690 offsetof(struct __sk_buff, cb[0]) + 2),
1691 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1692 offsetof(struct __sk_buff, cb[0]) + 3),
1693 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1694 offsetof(struct __sk_buff, cb[1])),
1695 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1696 offsetof(struct __sk_buff, cb[1]) + 1),
1697 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1698 offsetof(struct __sk_buff, cb[1]) + 2),
1699 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1700 offsetof(struct __sk_buff, cb[1]) + 3),
1701 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1702 offsetof(struct __sk_buff, cb[2])),
1703 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1704 offsetof(struct __sk_buff, cb[2]) + 1),
1705 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1706 offsetof(struct __sk_buff, cb[2]) + 2),
1707 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1708 offsetof(struct __sk_buff, cb[2]) + 3),
1709 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1710 offsetof(struct __sk_buff, cb[3])),
1711 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1712 offsetof(struct __sk_buff, cb[3]) + 1),
1713 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1714 offsetof(struct __sk_buff, cb[3]) + 2),
1715 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1716 offsetof(struct __sk_buff, cb[3]) + 3),
1717 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1718 offsetof(struct __sk_buff, cb[4])),
1719 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1720 offsetof(struct __sk_buff, cb[4]) + 1),
1721 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1722 offsetof(struct __sk_buff, cb[4]) + 2),
1723 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1724 offsetof(struct __sk_buff, cb[4]) + 3),
1725 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1726 offsetof(struct __sk_buff, cb[0])),
1727 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1728 offsetof(struct __sk_buff, cb[0]) + 1),
1729 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1730 offsetof(struct __sk_buff, cb[0]) + 2),
1731 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1732 offsetof(struct __sk_buff, cb[0]) + 3),
1733 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1734 offsetof(struct __sk_buff, cb[1])),
1735 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1736 offsetof(struct __sk_buff, cb[1]) + 1),
1737 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1738 offsetof(struct __sk_buff, cb[1]) + 2),
1739 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1740 offsetof(struct __sk_buff, cb[1]) + 3),
1741 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1742 offsetof(struct __sk_buff, cb[2])),
1743 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1744 offsetof(struct __sk_buff, cb[2]) + 1),
1745 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1746 offsetof(struct __sk_buff, cb[2]) + 2),
1747 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1748 offsetof(struct __sk_buff, cb[2]) + 3),
1749 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1750 offsetof(struct __sk_buff, cb[3])),
1751 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1752 offsetof(struct __sk_buff, cb[3]) + 1),
1753 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1754 offsetof(struct __sk_buff, cb[3]) + 2),
1755 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1756 offsetof(struct __sk_buff, cb[3]) + 3),
1757 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1758 offsetof(struct __sk_buff, cb[4])),
1759 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1760 offsetof(struct __sk_buff, cb[4]) + 1),
1761 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1762 offsetof(struct __sk_buff, cb[4]) + 2),
1763 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1764 offsetof(struct __sk_buff, cb[4]) + 3),
1765 BPF_EXIT_INSN(),
1766 },
1767 .result = ACCEPT,
1768 },
1769 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001770 "__sk_buff->hash, offset 0, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001771 .insns = {
1772 BPF_MOV64_IMM(BPF_REG_0, 0),
1773 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001774 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001775 BPF_EXIT_INSN(),
1776 },
1777 .errstr = "invalid bpf_context access",
1778 .result = REJECT,
1779 },
1780 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001781 "__sk_buff->tc_index, offset 3, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001782 .insns = {
1783 BPF_MOV64_IMM(BPF_REG_0, 0),
1784 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001785 offsetof(struct __sk_buff, tc_index) + 3),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001786 BPF_EXIT_INSN(),
1787 },
1788 .errstr = "invalid bpf_context access",
1789 .result = REJECT,
1790 },
1791 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001792 "check skb->hash byte load permitted",
1793 .insns = {
1794 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001795#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001796 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1797 offsetof(struct __sk_buff, hash)),
1798#else
1799 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1800 offsetof(struct __sk_buff, hash) + 3),
1801#endif
1802 BPF_EXIT_INSN(),
1803 },
1804 .result = ACCEPT,
1805 },
1806 {
1807 "check skb->hash byte load not permitted 1",
1808 .insns = {
1809 BPF_MOV64_IMM(BPF_REG_0, 0),
1810 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1811 offsetof(struct __sk_buff, hash) + 1),
1812 BPF_EXIT_INSN(),
1813 },
1814 .errstr = "invalid bpf_context access",
1815 .result = REJECT,
1816 },
1817 {
1818 "check skb->hash byte load not permitted 2",
1819 .insns = {
1820 BPF_MOV64_IMM(BPF_REG_0, 0),
1821 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1822 offsetof(struct __sk_buff, hash) + 2),
1823 BPF_EXIT_INSN(),
1824 },
1825 .errstr = "invalid bpf_context access",
1826 .result = REJECT,
1827 },
1828 {
1829 "check skb->hash byte load not permitted 3",
1830 .insns = {
1831 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001832#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001833 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1834 offsetof(struct __sk_buff, hash) + 3),
1835#else
1836 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1837 offsetof(struct __sk_buff, hash)),
1838#endif
1839 BPF_EXIT_INSN(),
1840 },
1841 .errstr = "invalid bpf_context access",
1842 .result = REJECT,
1843 },
1844 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001845 "check cb access: byte, wrong type",
1846 .insns = {
1847 BPF_MOV64_IMM(BPF_REG_0, 0),
1848 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001849 offsetof(struct __sk_buff, cb[0])),
1850 BPF_EXIT_INSN(),
1851 },
1852 .errstr = "invalid bpf_context access",
1853 .result = REJECT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001854 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1855 },
1856 {
1857 "check cb access: half",
1858 .insns = {
1859 BPF_MOV64_IMM(BPF_REG_0, 0),
1860 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1861 offsetof(struct __sk_buff, cb[0])),
1862 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1863 offsetof(struct __sk_buff, cb[0]) + 2),
1864 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1865 offsetof(struct __sk_buff, cb[1])),
1866 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1867 offsetof(struct __sk_buff, cb[1]) + 2),
1868 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1869 offsetof(struct __sk_buff, cb[2])),
1870 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1871 offsetof(struct __sk_buff, cb[2]) + 2),
1872 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1873 offsetof(struct __sk_buff, cb[3])),
1874 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1875 offsetof(struct __sk_buff, cb[3]) + 2),
1876 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1877 offsetof(struct __sk_buff, cb[4])),
1878 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1879 offsetof(struct __sk_buff, cb[4]) + 2),
1880 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1881 offsetof(struct __sk_buff, cb[0])),
1882 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1883 offsetof(struct __sk_buff, cb[0]) + 2),
1884 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1885 offsetof(struct __sk_buff, cb[1])),
1886 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1887 offsetof(struct __sk_buff, cb[1]) + 2),
1888 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1889 offsetof(struct __sk_buff, cb[2])),
1890 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1891 offsetof(struct __sk_buff, cb[2]) + 2),
1892 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1893 offsetof(struct __sk_buff, cb[3])),
1894 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1895 offsetof(struct __sk_buff, cb[3]) + 2),
1896 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1897 offsetof(struct __sk_buff, cb[4])),
1898 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1899 offsetof(struct __sk_buff, cb[4]) + 2),
1900 BPF_EXIT_INSN(),
1901 },
1902 .result = ACCEPT,
1903 },
1904 {
1905 "check cb access: half, unaligned",
1906 .insns = {
1907 BPF_MOV64_IMM(BPF_REG_0, 0),
1908 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1909 offsetof(struct __sk_buff, cb[0]) + 1),
1910 BPF_EXIT_INSN(),
1911 },
Edward Creef65b1842017-08-07 15:27:12 +01001912 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001913 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001914 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001915 },
1916 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001917 "check __sk_buff->hash, offset 0, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001918 .insns = {
1919 BPF_MOV64_IMM(BPF_REG_0, 0),
1920 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001921 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001922 BPF_EXIT_INSN(),
1923 },
1924 .errstr = "invalid bpf_context access",
1925 .result = REJECT,
1926 },
1927 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001928 "check __sk_buff->tc_index, offset 2, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001929 .insns = {
1930 BPF_MOV64_IMM(BPF_REG_0, 0),
1931 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001932 offsetof(struct __sk_buff, tc_index) + 2),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001933 BPF_EXIT_INSN(),
1934 },
1935 .errstr = "invalid bpf_context access",
1936 .result = REJECT,
1937 },
1938 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001939 "check skb->hash half load permitted",
1940 .insns = {
1941 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001942#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001943 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1944 offsetof(struct __sk_buff, hash)),
1945#else
1946 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1947 offsetof(struct __sk_buff, hash) + 2),
1948#endif
1949 BPF_EXIT_INSN(),
1950 },
1951 .result = ACCEPT,
1952 },
1953 {
1954 "check skb->hash half load not permitted",
1955 .insns = {
1956 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001957#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001958 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1959 offsetof(struct __sk_buff, hash) + 2),
1960#else
1961 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1962 offsetof(struct __sk_buff, hash)),
1963#endif
1964 BPF_EXIT_INSN(),
1965 },
1966 .errstr = "invalid bpf_context access",
1967 .result = REJECT,
1968 },
1969 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001970 "check cb access: half, wrong type",
1971 .insns = {
1972 BPF_MOV64_IMM(BPF_REG_0, 0),
1973 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1974 offsetof(struct __sk_buff, cb[0])),
1975 BPF_EXIT_INSN(),
1976 },
1977 .errstr = "invalid bpf_context access",
1978 .result = REJECT,
1979 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1980 },
1981 {
1982 "check cb access: word",
1983 .insns = {
1984 BPF_MOV64_IMM(BPF_REG_0, 0),
1985 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1986 offsetof(struct __sk_buff, cb[0])),
1987 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1988 offsetof(struct __sk_buff, cb[1])),
1989 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1990 offsetof(struct __sk_buff, cb[2])),
1991 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1992 offsetof(struct __sk_buff, cb[3])),
1993 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1994 offsetof(struct __sk_buff, cb[4])),
1995 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1996 offsetof(struct __sk_buff, cb[0])),
1997 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1998 offsetof(struct __sk_buff, cb[1])),
1999 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2000 offsetof(struct __sk_buff, cb[2])),
2001 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2002 offsetof(struct __sk_buff, cb[3])),
2003 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2004 offsetof(struct __sk_buff, cb[4])),
2005 BPF_EXIT_INSN(),
2006 },
2007 .result = ACCEPT,
2008 },
2009 {
2010 "check cb access: word, unaligned 1",
2011 .insns = {
2012 BPF_MOV64_IMM(BPF_REG_0, 0),
2013 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2014 offsetof(struct __sk_buff, cb[0]) + 2),
2015 BPF_EXIT_INSN(),
2016 },
Edward Creef65b1842017-08-07 15:27:12 +01002017 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002018 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002019 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002020 },
2021 {
2022 "check cb access: word, unaligned 2",
2023 .insns = {
2024 BPF_MOV64_IMM(BPF_REG_0, 0),
2025 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2026 offsetof(struct __sk_buff, cb[4]) + 1),
2027 BPF_EXIT_INSN(),
2028 },
Edward Creef65b1842017-08-07 15:27:12 +01002029 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002030 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002031 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002032 },
2033 {
2034 "check cb access: word, unaligned 3",
2035 .insns = {
2036 BPF_MOV64_IMM(BPF_REG_0, 0),
2037 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2038 offsetof(struct __sk_buff, cb[4]) + 2),
2039 BPF_EXIT_INSN(),
2040 },
Edward Creef65b1842017-08-07 15:27:12 +01002041 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002042 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002043 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002044 },
2045 {
2046 "check cb access: word, unaligned 4",
2047 .insns = {
2048 BPF_MOV64_IMM(BPF_REG_0, 0),
2049 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2050 offsetof(struct __sk_buff, cb[4]) + 3),
2051 BPF_EXIT_INSN(),
2052 },
Edward Creef65b1842017-08-07 15:27:12 +01002053 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002054 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002055 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002056 },
2057 {
2058 "check cb access: double",
2059 .insns = {
2060 BPF_MOV64_IMM(BPF_REG_0, 0),
2061 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2062 offsetof(struct __sk_buff, cb[0])),
2063 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2064 offsetof(struct __sk_buff, cb[2])),
2065 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2066 offsetof(struct __sk_buff, cb[0])),
2067 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2068 offsetof(struct __sk_buff, cb[2])),
2069 BPF_EXIT_INSN(),
2070 },
2071 .result = ACCEPT,
2072 },
2073 {
2074 "check cb access: double, unaligned 1",
2075 .insns = {
2076 BPF_MOV64_IMM(BPF_REG_0, 0),
2077 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2078 offsetof(struct __sk_buff, cb[1])),
2079 BPF_EXIT_INSN(),
2080 },
Edward Creef65b1842017-08-07 15:27:12 +01002081 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002082 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002083 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002084 },
2085 {
2086 "check cb access: double, unaligned 2",
2087 .insns = {
2088 BPF_MOV64_IMM(BPF_REG_0, 0),
2089 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2090 offsetof(struct __sk_buff, cb[3])),
2091 BPF_EXIT_INSN(),
2092 },
Edward Creef65b1842017-08-07 15:27:12 +01002093 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002094 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002095 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002096 },
2097 {
2098 "check cb access: double, oob 1",
2099 .insns = {
2100 BPF_MOV64_IMM(BPF_REG_0, 0),
2101 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2102 offsetof(struct __sk_buff, cb[4])),
2103 BPF_EXIT_INSN(),
2104 },
2105 .errstr = "invalid bpf_context access",
2106 .result = REJECT,
2107 },
2108 {
2109 "check cb access: double, oob 2",
2110 .insns = {
2111 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002112 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2113 offsetof(struct __sk_buff, cb[4])),
2114 BPF_EXIT_INSN(),
2115 },
2116 .errstr = "invalid bpf_context access",
2117 .result = REJECT,
2118 },
2119 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002120 "check __sk_buff->ifindex dw store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002121 .insns = {
2122 BPF_MOV64_IMM(BPF_REG_0, 0),
Yonghong Song31fd8582017-06-13 15:52:13 -07002123 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2124 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002125 BPF_EXIT_INSN(),
2126 },
2127 .errstr = "invalid bpf_context access",
2128 .result = REJECT,
2129 },
2130 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002131 "check __sk_buff->ifindex dw load not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002132 .insns = {
2133 BPF_MOV64_IMM(BPF_REG_0, 0),
2134 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
Yonghong Song31fd8582017-06-13 15:52:13 -07002135 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002136 BPF_EXIT_INSN(),
2137 },
2138 .errstr = "invalid bpf_context access",
2139 .result = REJECT,
2140 },
2141 {
2142 "check cb access: double, wrong type",
2143 .insns = {
2144 BPF_MOV64_IMM(BPF_REG_0, 0),
2145 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2146 offsetof(struct __sk_buff, cb[0])),
2147 BPF_EXIT_INSN(),
2148 },
2149 .errstr = "invalid bpf_context access",
2150 .result = REJECT,
2151 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002152 },
2153 {
2154 "check out of range skb->cb access",
2155 .insns = {
2156 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002157 offsetof(struct __sk_buff, cb[0]) + 256),
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002158 BPF_EXIT_INSN(),
2159 },
2160 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002161 .errstr_unpriv = "",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002162 .result = REJECT,
2163 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
2164 },
2165 {
2166 "write skb fields from socket prog",
2167 .insns = {
2168 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2169 offsetof(struct __sk_buff, cb[4])),
2170 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2171 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2172 offsetof(struct __sk_buff, mark)),
2173 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2174 offsetof(struct __sk_buff, tc_index)),
2175 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2176 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2177 offsetof(struct __sk_buff, cb[0])),
2178 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2179 offsetof(struct __sk_buff, cb[2])),
2180 BPF_EXIT_INSN(),
2181 },
2182 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002183 .errstr_unpriv = "R1 leaks addr",
2184 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002185 },
2186 {
2187 "write skb fields from tc_cls_act prog",
2188 .insns = {
2189 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2190 offsetof(struct __sk_buff, cb[0])),
2191 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2192 offsetof(struct __sk_buff, mark)),
2193 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2194 offsetof(struct __sk_buff, tc_index)),
2195 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2196 offsetof(struct __sk_buff, tc_index)),
2197 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2198 offsetof(struct __sk_buff, cb[3])),
2199 BPF_EXIT_INSN(),
2200 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002201 .errstr_unpriv = "",
2202 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002203 .result = ACCEPT,
2204 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2205 },
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002206 {
2207 "PTR_TO_STACK store/load",
2208 .insns = {
2209 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2211 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2212 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2213 BPF_EXIT_INSN(),
2214 },
2215 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002216 .retval = 0xfaceb00c,
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002217 },
2218 {
2219 "PTR_TO_STACK store/load - bad alignment on off",
2220 .insns = {
2221 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2222 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2223 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2224 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2225 BPF_EXIT_INSN(),
2226 },
2227 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002228 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002229 },
2230 {
2231 "PTR_TO_STACK store/load - bad alignment on reg",
2232 .insns = {
2233 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2234 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2235 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2236 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2237 BPF_EXIT_INSN(),
2238 },
2239 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002240 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002241 },
2242 {
2243 "PTR_TO_STACK store/load - out of bounds low",
2244 .insns = {
2245 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2247 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2248 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2249 BPF_EXIT_INSN(),
2250 },
2251 .result = REJECT,
2252 .errstr = "invalid stack off=-79992 size=8",
2253 },
2254 {
2255 "PTR_TO_STACK store/load - out of bounds high",
2256 .insns = {
2257 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2258 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2259 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2260 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2261 BPF_EXIT_INSN(),
2262 },
2263 .result = REJECT,
2264 .errstr = "invalid stack off=0 size=8",
2265 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002266 {
2267 "unpriv: return pointer",
2268 .insns = {
2269 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2270 BPF_EXIT_INSN(),
2271 },
2272 .result = ACCEPT,
2273 .result_unpriv = REJECT,
2274 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002275 .retval = POINTER_VALUE,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002276 },
2277 {
2278 "unpriv: add const to pointer",
2279 .insns = {
2280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2281 BPF_MOV64_IMM(BPF_REG_0, 0),
2282 BPF_EXIT_INSN(),
2283 },
2284 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002285 },
2286 {
2287 "unpriv: add pointer to pointer",
2288 .insns = {
2289 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2290 BPF_MOV64_IMM(BPF_REG_0, 0),
2291 BPF_EXIT_INSN(),
2292 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08002293 .result = REJECT,
2294 .errstr = "R1 pointer += pointer",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002295 },
2296 {
2297 "unpriv: neg pointer",
2298 .insns = {
2299 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2300 BPF_MOV64_IMM(BPF_REG_0, 0),
2301 BPF_EXIT_INSN(),
2302 },
2303 .result = ACCEPT,
2304 .result_unpriv = REJECT,
2305 .errstr_unpriv = "R1 pointer arithmetic",
2306 },
2307 {
2308 "unpriv: cmp pointer with const",
2309 .insns = {
2310 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2311 BPF_MOV64_IMM(BPF_REG_0, 0),
2312 BPF_EXIT_INSN(),
2313 },
2314 .result = ACCEPT,
2315 .result_unpriv = REJECT,
2316 .errstr_unpriv = "R1 pointer comparison",
2317 },
2318 {
2319 "unpriv: cmp pointer with pointer",
2320 .insns = {
2321 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2322 BPF_MOV64_IMM(BPF_REG_0, 0),
2323 BPF_EXIT_INSN(),
2324 },
2325 .result = ACCEPT,
2326 .result_unpriv = REJECT,
2327 .errstr_unpriv = "R10 pointer comparison",
2328 },
2329 {
2330 "unpriv: check that printk is disallowed",
2331 .insns = {
2332 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2333 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2335 BPF_MOV64_IMM(BPF_REG_2, 8),
2336 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002337 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2338 BPF_FUNC_trace_printk),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002339 BPF_MOV64_IMM(BPF_REG_0, 0),
2340 BPF_EXIT_INSN(),
2341 },
Daniel Borkmann0eb69842016-12-15 01:39:10 +01002342 .errstr_unpriv = "unknown func bpf_trace_printk#6",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002343 .result_unpriv = REJECT,
2344 .result = ACCEPT,
2345 },
2346 {
2347 "unpriv: pass pointer to helper function",
2348 .insns = {
2349 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2350 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2351 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2352 BPF_LD_MAP_FD(BPF_REG_1, 0),
2353 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2354 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002355 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2356 BPF_FUNC_map_update_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002357 BPF_MOV64_IMM(BPF_REG_0, 0),
2358 BPF_EXIT_INSN(),
2359 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002360 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002361 .errstr_unpriv = "R4 leaks addr",
2362 .result_unpriv = REJECT,
2363 .result = ACCEPT,
2364 },
2365 {
2366 "unpriv: indirectly pass pointer on stack to helper function",
2367 .insns = {
2368 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2369 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2370 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2371 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002372 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2373 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002374 BPF_MOV64_IMM(BPF_REG_0, 0),
2375 BPF_EXIT_INSN(),
2376 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002377 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002378 .errstr = "invalid indirect read from stack off -8+0 size 8",
2379 .result = REJECT,
2380 },
2381 {
2382 "unpriv: mangle pointer on stack 1",
2383 .insns = {
2384 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2385 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2386 BPF_MOV64_IMM(BPF_REG_0, 0),
2387 BPF_EXIT_INSN(),
2388 },
2389 .errstr_unpriv = "attempt to corrupt spilled",
2390 .result_unpriv = REJECT,
2391 .result = ACCEPT,
2392 },
2393 {
2394 "unpriv: mangle pointer on stack 2",
2395 .insns = {
2396 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2397 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2398 BPF_MOV64_IMM(BPF_REG_0, 0),
2399 BPF_EXIT_INSN(),
2400 },
2401 .errstr_unpriv = "attempt to corrupt spilled",
2402 .result_unpriv = REJECT,
2403 .result = ACCEPT,
2404 },
2405 {
2406 "unpriv: read pointer from stack in small chunks",
2407 .insns = {
2408 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2409 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2410 BPF_MOV64_IMM(BPF_REG_0, 0),
2411 BPF_EXIT_INSN(),
2412 },
2413 .errstr = "invalid size",
2414 .result = REJECT,
2415 },
2416 {
2417 "unpriv: write pointer into ctx",
2418 .insns = {
2419 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2420 BPF_MOV64_IMM(BPF_REG_0, 0),
2421 BPF_EXIT_INSN(),
2422 },
2423 .errstr_unpriv = "R1 leaks addr",
2424 .result_unpriv = REJECT,
2425 .errstr = "invalid bpf_context access",
2426 .result = REJECT,
2427 },
2428 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002429 "unpriv: spill/fill of ctx",
2430 .insns = {
2431 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2432 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2433 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2434 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2435 BPF_MOV64_IMM(BPF_REG_0, 0),
2436 BPF_EXIT_INSN(),
2437 },
2438 .result = ACCEPT,
2439 },
2440 {
2441 "unpriv: spill/fill of ctx 2",
2442 .insns = {
2443 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2444 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2445 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2446 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002447 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2448 BPF_FUNC_get_hash_recalc),
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002449 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002450 BPF_EXIT_INSN(),
2451 },
2452 .result = ACCEPT,
2453 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2454 },
2455 {
2456 "unpriv: spill/fill of ctx 3",
2457 .insns = {
2458 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2459 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2460 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2461 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2462 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002463 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2464 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002465 BPF_EXIT_INSN(),
2466 },
2467 .result = REJECT,
2468 .errstr = "R1 type=fp expected=ctx",
2469 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2470 },
2471 {
2472 "unpriv: spill/fill of ctx 4",
2473 .insns = {
2474 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2475 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2476 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2477 BPF_MOV64_IMM(BPF_REG_0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002478 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2479 BPF_REG_0, -8, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002480 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002481 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2482 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002483 BPF_EXIT_INSN(),
2484 },
2485 .result = REJECT,
2486 .errstr = "R1 type=inv expected=ctx",
2487 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2488 },
2489 {
2490 "unpriv: spill/fill of different pointers stx",
2491 .insns = {
2492 BPF_MOV64_IMM(BPF_REG_3, 42),
2493 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2494 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2495 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2496 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2497 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2498 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2499 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2500 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2501 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2502 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2503 offsetof(struct __sk_buff, mark)),
2504 BPF_MOV64_IMM(BPF_REG_0, 0),
2505 BPF_EXIT_INSN(),
2506 },
2507 .result = REJECT,
2508 .errstr = "same insn cannot be used with different pointers",
2509 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2510 },
2511 {
2512 "unpriv: spill/fill of different pointers ldx",
2513 .insns = {
2514 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2515 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2516 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2517 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2518 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2519 -(__s32)offsetof(struct bpf_perf_event_data,
2520 sample_period) - 8),
2521 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2522 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2523 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2524 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2525 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2526 offsetof(struct bpf_perf_event_data,
2527 sample_period)),
2528 BPF_MOV64_IMM(BPF_REG_0, 0),
2529 BPF_EXIT_INSN(),
2530 },
2531 .result = REJECT,
2532 .errstr = "same insn cannot be used with different pointers",
2533 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
2534 },
2535 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002536 "unpriv: write pointer into map elem value",
2537 .insns = {
2538 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2539 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2540 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2541 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002542 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2543 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002544 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2545 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2546 BPF_EXIT_INSN(),
2547 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002548 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002549 .errstr_unpriv = "R0 leaks addr",
2550 .result_unpriv = REJECT,
2551 .result = ACCEPT,
2552 },
2553 {
2554 "unpriv: partial copy of pointer",
2555 .insns = {
2556 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2557 BPF_MOV64_IMM(BPF_REG_0, 0),
2558 BPF_EXIT_INSN(),
2559 },
2560 .errstr_unpriv = "R10 partial copy",
2561 .result_unpriv = REJECT,
2562 .result = ACCEPT,
2563 },
2564 {
2565 "unpriv: pass pointer to tail_call",
2566 .insns = {
2567 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2568 BPF_LD_MAP_FD(BPF_REG_2, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002569 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2570 BPF_FUNC_tail_call),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002571 BPF_MOV64_IMM(BPF_REG_0, 0),
2572 BPF_EXIT_INSN(),
2573 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002574 .fixup_prog = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002575 .errstr_unpriv = "R3 leaks addr into helper",
2576 .result_unpriv = REJECT,
2577 .result = ACCEPT,
2578 },
2579 {
2580 "unpriv: cmp map pointer with zero",
2581 .insns = {
2582 BPF_MOV64_IMM(BPF_REG_1, 0),
2583 BPF_LD_MAP_FD(BPF_REG_1, 0),
2584 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2585 BPF_MOV64_IMM(BPF_REG_0, 0),
2586 BPF_EXIT_INSN(),
2587 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002588 .fixup_map1 = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002589 .errstr_unpriv = "R1 pointer comparison",
2590 .result_unpriv = REJECT,
2591 .result = ACCEPT,
2592 },
2593 {
2594 "unpriv: write into frame pointer",
2595 .insns = {
2596 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2597 BPF_MOV64_IMM(BPF_REG_0, 0),
2598 BPF_EXIT_INSN(),
2599 },
2600 .errstr = "frame pointer is read only",
2601 .result = REJECT,
2602 },
2603 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002604 "unpriv: spill/fill frame pointer",
2605 .insns = {
2606 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2607 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2608 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2609 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2610 BPF_MOV64_IMM(BPF_REG_0, 0),
2611 BPF_EXIT_INSN(),
2612 },
2613 .errstr = "frame pointer is read only",
2614 .result = REJECT,
2615 },
2616 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002617 "unpriv: cmp of frame pointer",
2618 .insns = {
2619 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2620 BPF_MOV64_IMM(BPF_REG_0, 0),
2621 BPF_EXIT_INSN(),
2622 },
2623 .errstr_unpriv = "R10 pointer comparison",
2624 .result_unpriv = REJECT,
2625 .result = ACCEPT,
2626 },
2627 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02002628 "unpriv: adding of fp",
2629 .insns = {
2630 BPF_MOV64_IMM(BPF_REG_0, 0),
2631 BPF_MOV64_IMM(BPF_REG_1, 0),
2632 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2633 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2634 BPF_EXIT_INSN(),
2635 },
Edward Creef65b1842017-08-07 15:27:12 +01002636 .result = ACCEPT,
Daniel Borkmann728a8532017-04-27 01:39:32 +02002637 },
2638 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002639 "unpriv: cmp of stack pointer",
2640 .insns = {
2641 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2642 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2643 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2644 BPF_MOV64_IMM(BPF_REG_0, 0),
2645 BPF_EXIT_INSN(),
2646 },
2647 .errstr_unpriv = "R2 pointer comparison",
2648 .result_unpriv = REJECT,
2649 .result = ACCEPT,
2650 },
2651 {
Daniel Borkmannb33eb732018-02-26 22:34:33 +01002652 "runtime/jit: tail_call within bounds, prog once",
2653 .insns = {
2654 BPF_MOV64_IMM(BPF_REG_3, 0),
2655 BPF_LD_MAP_FD(BPF_REG_2, 0),
2656 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2657 BPF_FUNC_tail_call),
2658 BPF_MOV64_IMM(BPF_REG_0, 1),
2659 BPF_EXIT_INSN(),
2660 },
2661 .fixup_prog = { 1 },
2662 .result = ACCEPT,
2663 .retval = 42,
2664 },
2665 {
2666 "runtime/jit: tail_call within bounds, prog loop",
2667 .insns = {
2668 BPF_MOV64_IMM(BPF_REG_3, 1),
2669 BPF_LD_MAP_FD(BPF_REG_2, 0),
2670 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2671 BPF_FUNC_tail_call),
2672 BPF_MOV64_IMM(BPF_REG_0, 1),
2673 BPF_EXIT_INSN(),
2674 },
2675 .fixup_prog = { 1 },
2676 .result = ACCEPT,
2677 .retval = 41,
2678 },
2679 {
2680 "runtime/jit: tail_call within bounds, no prog",
2681 .insns = {
2682 BPF_MOV64_IMM(BPF_REG_3, 2),
2683 BPF_LD_MAP_FD(BPF_REG_2, 0),
2684 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2685 BPF_FUNC_tail_call),
2686 BPF_MOV64_IMM(BPF_REG_0, 1),
2687 BPF_EXIT_INSN(),
2688 },
2689 .fixup_prog = { 1 },
2690 .result = ACCEPT,
2691 .retval = 1,
2692 },
2693 {
2694 "runtime/jit: tail_call out of bounds",
2695 .insns = {
2696 BPF_MOV64_IMM(BPF_REG_3, 256),
2697 BPF_LD_MAP_FD(BPF_REG_2, 0),
2698 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2699 BPF_FUNC_tail_call),
2700 BPF_MOV64_IMM(BPF_REG_0, 2),
2701 BPF_EXIT_INSN(),
2702 },
2703 .fixup_prog = { 1 },
2704 .result = ACCEPT,
2705 .retval = 2,
2706 },
2707 {
Daniel Borkmann16338a92018-02-23 01:03:43 +01002708 "runtime/jit: pass negative index to tail_call",
2709 .insns = {
2710 BPF_MOV64_IMM(BPF_REG_3, -1),
2711 BPF_LD_MAP_FD(BPF_REG_2, 0),
2712 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2713 BPF_FUNC_tail_call),
Daniel Borkmannb33eb732018-02-26 22:34:33 +01002714 BPF_MOV64_IMM(BPF_REG_0, 2),
Daniel Borkmann16338a92018-02-23 01:03:43 +01002715 BPF_EXIT_INSN(),
2716 },
2717 .fixup_prog = { 1 },
2718 .result = ACCEPT,
Daniel Borkmannb33eb732018-02-26 22:34:33 +01002719 .retval = 2,
Daniel Borkmann16338a92018-02-23 01:03:43 +01002720 },
2721 {
2722 "runtime/jit: pass > 32bit index to tail_call",
2723 .insns = {
2724 BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
2725 BPF_LD_MAP_FD(BPF_REG_2, 0),
2726 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2727 BPF_FUNC_tail_call),
Daniel Borkmannb33eb732018-02-26 22:34:33 +01002728 BPF_MOV64_IMM(BPF_REG_0, 2),
Daniel Borkmann16338a92018-02-23 01:03:43 +01002729 BPF_EXIT_INSN(),
2730 },
2731 .fixup_prog = { 2 },
2732 .result = ACCEPT,
Daniel Borkmannb33eb732018-02-26 22:34:33 +01002733 .retval = 42,
Daniel Borkmann16338a92018-02-23 01:03:43 +01002734 },
2735 {
Yonghong Song332270f2017-04-29 22:52:42 -07002736 "stack pointer arithmetic",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002737 .insns = {
Yonghong Song332270f2017-04-29 22:52:42 -07002738 BPF_MOV64_IMM(BPF_REG_1, 4),
2739 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2740 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
2741 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2742 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2743 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2744 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
2745 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2746 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2747 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2748 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002749 BPF_MOV64_IMM(BPF_REG_0, 0),
2750 BPF_EXIT_INSN(),
2751 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002752 .result = ACCEPT,
2753 },
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002754 {
2755 "raw_stack: no skb_load_bytes",
2756 .insns = {
2757 BPF_MOV64_IMM(BPF_REG_2, 4),
2758 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2759 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2760 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2761 BPF_MOV64_IMM(BPF_REG_4, 8),
2762 /* Call to skb_load_bytes() omitted. */
2763 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2764 BPF_EXIT_INSN(),
2765 },
2766 .result = REJECT,
2767 .errstr = "invalid read from stack off -8+0 size 8",
2768 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2769 },
2770 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002771 "raw_stack: skb_load_bytes, negative len",
2772 .insns = {
2773 BPF_MOV64_IMM(BPF_REG_2, 4),
2774 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2775 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2776 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2777 BPF_MOV64_IMM(BPF_REG_4, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002778 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2779 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002780 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2781 BPF_EXIT_INSN(),
2782 },
2783 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002784 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002785 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2786 },
2787 {
2788 "raw_stack: skb_load_bytes, negative len 2",
2789 .insns = {
2790 BPF_MOV64_IMM(BPF_REG_2, 4),
2791 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2792 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2793 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2794 BPF_MOV64_IMM(BPF_REG_4, ~0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002795 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2796 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002797 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2798 BPF_EXIT_INSN(),
2799 },
2800 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002801 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002802 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2803 },
2804 {
2805 "raw_stack: skb_load_bytes, zero len",
2806 .insns = {
2807 BPF_MOV64_IMM(BPF_REG_2, 4),
2808 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2809 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2810 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2811 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002812 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2813 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002814 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2815 BPF_EXIT_INSN(),
2816 },
2817 .result = REJECT,
2818 .errstr = "invalid stack type R3",
2819 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2820 },
2821 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002822 "raw_stack: skb_load_bytes, no init",
2823 .insns = {
2824 BPF_MOV64_IMM(BPF_REG_2, 4),
2825 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2826 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2827 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2828 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002829 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2830 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002831 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2832 BPF_EXIT_INSN(),
2833 },
2834 .result = ACCEPT,
2835 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2836 },
2837 {
2838 "raw_stack: skb_load_bytes, init",
2839 .insns = {
2840 BPF_MOV64_IMM(BPF_REG_2, 4),
2841 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2842 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2843 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
2844 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2845 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002846 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2847 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002848 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2849 BPF_EXIT_INSN(),
2850 },
2851 .result = ACCEPT,
2852 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2853 },
2854 {
2855 "raw_stack: skb_load_bytes, spilled regs around bounds",
2856 .insns = {
2857 BPF_MOV64_IMM(BPF_REG_2, 4),
2858 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002860 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2861 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002862 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2863 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002864 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2865 BPF_FUNC_skb_load_bytes),
2866 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2867 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002868 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2869 offsetof(struct __sk_buff, mark)),
2870 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2871 offsetof(struct __sk_buff, priority)),
2872 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2873 BPF_EXIT_INSN(),
2874 },
2875 .result = ACCEPT,
2876 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2877 },
2878 {
2879 "raw_stack: skb_load_bytes, spilled regs corruption",
2880 .insns = {
2881 BPF_MOV64_IMM(BPF_REG_2, 4),
2882 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2883 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002884 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002885 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2886 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002887 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2888 BPF_FUNC_skb_load_bytes),
2889 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002890 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2891 offsetof(struct __sk_buff, mark)),
2892 BPF_EXIT_INSN(),
2893 },
2894 .result = REJECT,
2895 .errstr = "R0 invalid mem access 'inv'",
2896 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2897 },
2898 {
2899 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2900 .insns = {
2901 BPF_MOV64_IMM(BPF_REG_2, 4),
2902 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2903 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002904 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2905 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2906 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002907 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2908 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002909 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2910 BPF_FUNC_skb_load_bytes),
2911 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2912 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2913 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002914 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2915 offsetof(struct __sk_buff, mark)),
2916 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2917 offsetof(struct __sk_buff, priority)),
2918 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2919 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2920 offsetof(struct __sk_buff, pkt_type)),
2921 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2922 BPF_EXIT_INSN(),
2923 },
2924 .result = REJECT,
2925 .errstr = "R3 invalid mem access 'inv'",
2926 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2927 },
2928 {
2929 "raw_stack: skb_load_bytes, spilled regs + data",
2930 .insns = {
2931 BPF_MOV64_IMM(BPF_REG_2, 4),
2932 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2933 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002934 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2935 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2936 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002937 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2938 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002939 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2940 BPF_FUNC_skb_load_bytes),
2941 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2942 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2943 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002944 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2945 offsetof(struct __sk_buff, mark)),
2946 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2947 offsetof(struct __sk_buff, priority)),
2948 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2949 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2950 BPF_EXIT_INSN(),
2951 },
2952 .result = ACCEPT,
2953 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2954 },
2955 {
2956 "raw_stack: skb_load_bytes, invalid access 1",
2957 .insns = {
2958 BPF_MOV64_IMM(BPF_REG_2, 4),
2959 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2960 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2961 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2962 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002963 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2964 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002965 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2966 BPF_EXIT_INSN(),
2967 },
2968 .result = REJECT,
2969 .errstr = "invalid stack type R3 off=-513 access_size=8",
2970 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2971 },
2972 {
2973 "raw_stack: skb_load_bytes, invalid access 2",
2974 .insns = {
2975 BPF_MOV64_IMM(BPF_REG_2, 4),
2976 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2977 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2978 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2979 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002980 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2981 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002982 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2983 BPF_EXIT_INSN(),
2984 },
2985 .result = REJECT,
2986 .errstr = "invalid stack type R3 off=-1 access_size=8",
2987 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2988 },
2989 {
2990 "raw_stack: skb_load_bytes, invalid access 3",
2991 .insns = {
2992 BPF_MOV64_IMM(BPF_REG_2, 4),
2993 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2994 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2995 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2996 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002997 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2998 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002999 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3000 BPF_EXIT_INSN(),
3001 },
3002 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003003 .errstr = "R4 min value is negative",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003004 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3005 },
3006 {
3007 "raw_stack: skb_load_bytes, invalid access 4",
3008 .insns = {
3009 BPF_MOV64_IMM(BPF_REG_2, 4),
3010 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3012 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3013 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003014 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3015 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003016 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3017 BPF_EXIT_INSN(),
3018 },
3019 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003020 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003021 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3022 },
3023 {
3024 "raw_stack: skb_load_bytes, invalid access 5",
3025 .insns = {
3026 BPF_MOV64_IMM(BPF_REG_2, 4),
3027 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3028 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3029 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3030 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003031 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3032 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003033 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3034 BPF_EXIT_INSN(),
3035 },
3036 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003037 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003038 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3039 },
3040 {
3041 "raw_stack: skb_load_bytes, invalid access 6",
3042 .insns = {
3043 BPF_MOV64_IMM(BPF_REG_2, 4),
3044 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3045 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3046 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3047 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003048 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3049 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003050 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3051 BPF_EXIT_INSN(),
3052 },
3053 .result = REJECT,
3054 .errstr = "invalid stack type R3 off=-512 access_size=0",
3055 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3056 },
3057 {
3058 "raw_stack: skb_load_bytes, large access",
3059 .insns = {
3060 BPF_MOV64_IMM(BPF_REG_2, 4),
3061 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3062 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3063 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3064 BPF_MOV64_IMM(BPF_REG_4, 512),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003065 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3066 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003067 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3068 BPF_EXIT_INSN(),
3069 },
3070 .result = ACCEPT,
3071 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3072 },
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003073 {
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01003074 "context stores via ST",
3075 .insns = {
3076 BPF_MOV64_IMM(BPF_REG_0, 0),
3077 BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
3078 BPF_EXIT_INSN(),
3079 },
3080 .errstr = "BPF_ST stores into R1 context is not allowed",
3081 .result = REJECT,
3082 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3083 },
3084 {
3085 "context stores via XADD",
3086 .insns = {
3087 BPF_MOV64_IMM(BPF_REG_0, 0),
3088 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
3089 BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
3090 BPF_EXIT_INSN(),
3091 },
3092 .errstr = "BPF_XADD stores into R1 context is not allowed",
3093 .result = REJECT,
3094 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3095 },
3096 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003097 "direct packet access: test1",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003098 .insns = {
3099 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3100 offsetof(struct __sk_buff, data)),
3101 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3102 offsetof(struct __sk_buff, data_end)),
3103 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3105 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3106 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3107 BPF_MOV64_IMM(BPF_REG_0, 0),
3108 BPF_EXIT_INSN(),
3109 },
3110 .result = ACCEPT,
3111 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3112 },
3113 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003114 "direct packet access: test2",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003115 .insns = {
3116 BPF_MOV64_IMM(BPF_REG_0, 1),
3117 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
3118 offsetof(struct __sk_buff, data_end)),
3119 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3120 offsetof(struct __sk_buff, data)),
3121 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3123 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3124 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3125 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3126 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3127 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3128 offsetof(struct __sk_buff, data)),
3129 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08003130 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3131 offsetof(struct __sk_buff, len)),
Edward Cree1f9ab382017-08-07 15:29:11 +01003132 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3133 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003134 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3135 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3136 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3137 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3138 offsetof(struct __sk_buff, data_end)),
3139 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3140 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3141 BPF_MOV64_IMM(BPF_REG_0, 0),
3142 BPF_EXIT_INSN(),
3143 },
3144 .result = ACCEPT,
3145 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3146 },
3147 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003148 "direct packet access: test3",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003149 .insns = {
3150 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3151 offsetof(struct __sk_buff, data)),
3152 BPF_MOV64_IMM(BPF_REG_0, 0),
3153 BPF_EXIT_INSN(),
3154 },
3155 .errstr = "invalid bpf_context access off=76",
3156 .result = REJECT,
3157 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3158 },
3159 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003160 "direct packet access: test4 (write)",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003161 .insns = {
3162 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3163 offsetof(struct __sk_buff, data)),
3164 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3165 offsetof(struct __sk_buff, data_end)),
3166 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3167 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3168 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3169 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3170 BPF_MOV64_IMM(BPF_REG_0, 0),
3171 BPF_EXIT_INSN(),
3172 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003173 .result = ACCEPT,
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003174 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3175 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003176 {
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02003177 "direct packet access: test5 (pkt_end >= reg, good access)",
3178 .insns = {
3179 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3180 offsetof(struct __sk_buff, data)),
3181 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3182 offsetof(struct __sk_buff, data_end)),
3183 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3185 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3186 BPF_MOV64_IMM(BPF_REG_0, 1),
3187 BPF_EXIT_INSN(),
3188 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3189 BPF_MOV64_IMM(BPF_REG_0, 0),
3190 BPF_EXIT_INSN(),
3191 },
3192 .result = ACCEPT,
3193 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3194 },
3195 {
3196 "direct packet access: test6 (pkt_end >= reg, bad access)",
3197 .insns = {
3198 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3199 offsetof(struct __sk_buff, data)),
3200 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3201 offsetof(struct __sk_buff, data_end)),
3202 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3203 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3204 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3205 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3206 BPF_MOV64_IMM(BPF_REG_0, 1),
3207 BPF_EXIT_INSN(),
3208 BPF_MOV64_IMM(BPF_REG_0, 0),
3209 BPF_EXIT_INSN(),
3210 },
3211 .errstr = "invalid access to packet",
3212 .result = REJECT,
3213 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3214 },
3215 {
3216 "direct packet access: test7 (pkt_end >= reg, both accesses)",
3217 .insns = {
3218 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3219 offsetof(struct __sk_buff, data)),
3220 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3221 offsetof(struct __sk_buff, data_end)),
3222 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3223 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3224 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3225 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3226 BPF_MOV64_IMM(BPF_REG_0, 1),
3227 BPF_EXIT_INSN(),
3228 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3229 BPF_MOV64_IMM(BPF_REG_0, 0),
3230 BPF_EXIT_INSN(),
3231 },
3232 .errstr = "invalid access to packet",
3233 .result = REJECT,
3234 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3235 },
3236 {
3237 "direct packet access: test8 (double test, variant 1)",
3238 .insns = {
3239 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3240 offsetof(struct __sk_buff, data)),
3241 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3242 offsetof(struct __sk_buff, data_end)),
3243 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3244 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3245 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3246 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3247 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3248 BPF_MOV64_IMM(BPF_REG_0, 1),
3249 BPF_EXIT_INSN(),
3250 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3251 BPF_MOV64_IMM(BPF_REG_0, 0),
3252 BPF_EXIT_INSN(),
3253 },
3254 .result = ACCEPT,
3255 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3256 },
3257 {
3258 "direct packet access: test9 (double test, variant 2)",
3259 .insns = {
3260 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3261 offsetof(struct __sk_buff, data)),
3262 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3263 offsetof(struct __sk_buff, data_end)),
3264 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3265 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3266 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3267 BPF_MOV64_IMM(BPF_REG_0, 1),
3268 BPF_EXIT_INSN(),
3269 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3270 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3271 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3272 BPF_MOV64_IMM(BPF_REG_0, 0),
3273 BPF_EXIT_INSN(),
3274 },
3275 .result = ACCEPT,
3276 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3277 },
3278 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003279 "direct packet access: test10 (write invalid)",
3280 .insns = {
3281 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3282 offsetof(struct __sk_buff, data)),
3283 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3284 offsetof(struct __sk_buff, data_end)),
3285 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3286 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3287 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3288 BPF_MOV64_IMM(BPF_REG_0, 0),
3289 BPF_EXIT_INSN(),
3290 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3291 BPF_MOV64_IMM(BPF_REG_0, 0),
3292 BPF_EXIT_INSN(),
3293 },
3294 .errstr = "invalid access to packet",
3295 .result = REJECT,
3296 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3297 },
3298 {
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003299 "direct packet access: test11 (shift, good access)",
3300 .insns = {
3301 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3302 offsetof(struct __sk_buff, data)),
3303 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3304 offsetof(struct __sk_buff, data_end)),
3305 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3306 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3307 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3308 BPF_MOV64_IMM(BPF_REG_3, 144),
3309 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3310 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3311 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3312 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3313 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3314 BPF_MOV64_IMM(BPF_REG_0, 1),
3315 BPF_EXIT_INSN(),
3316 BPF_MOV64_IMM(BPF_REG_0, 0),
3317 BPF_EXIT_INSN(),
3318 },
3319 .result = ACCEPT,
3320 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003321 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003322 },
3323 {
3324 "direct packet access: test12 (and, good access)",
3325 .insns = {
3326 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3327 offsetof(struct __sk_buff, data)),
3328 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3329 offsetof(struct __sk_buff, data_end)),
3330 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3332 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3333 BPF_MOV64_IMM(BPF_REG_3, 144),
3334 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3335 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3336 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3337 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3338 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3339 BPF_MOV64_IMM(BPF_REG_0, 1),
3340 BPF_EXIT_INSN(),
3341 BPF_MOV64_IMM(BPF_REG_0, 0),
3342 BPF_EXIT_INSN(),
3343 },
3344 .result = ACCEPT,
3345 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003346 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003347 },
3348 {
3349 "direct packet access: test13 (branches, good access)",
3350 .insns = {
3351 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3352 offsetof(struct __sk_buff, data)),
3353 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3354 offsetof(struct __sk_buff, data_end)),
3355 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3356 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3357 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3358 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3359 offsetof(struct __sk_buff, mark)),
3360 BPF_MOV64_IMM(BPF_REG_4, 1),
3361 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3362 BPF_MOV64_IMM(BPF_REG_3, 14),
3363 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3364 BPF_MOV64_IMM(BPF_REG_3, 24),
3365 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3366 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3367 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3368 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3369 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3370 BPF_MOV64_IMM(BPF_REG_0, 1),
3371 BPF_EXIT_INSN(),
3372 BPF_MOV64_IMM(BPF_REG_0, 0),
3373 BPF_EXIT_INSN(),
3374 },
3375 .result = ACCEPT,
3376 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003377 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003378 },
3379 {
William Tu63dfef72017-02-04 08:37:29 -08003380 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3381 .insns = {
3382 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3383 offsetof(struct __sk_buff, data)),
3384 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3385 offsetof(struct __sk_buff, data_end)),
3386 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3388 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3389 BPF_MOV64_IMM(BPF_REG_5, 12),
3390 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3391 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3392 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3393 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3394 BPF_MOV64_IMM(BPF_REG_0, 1),
3395 BPF_EXIT_INSN(),
3396 BPF_MOV64_IMM(BPF_REG_0, 0),
3397 BPF_EXIT_INSN(),
3398 },
3399 .result = ACCEPT,
3400 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003401 .retval = 1,
William Tu63dfef72017-02-04 08:37:29 -08003402 },
3403 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003404 "direct packet access: test15 (spill with xadd)",
3405 .insns = {
3406 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3407 offsetof(struct __sk_buff, data)),
3408 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3409 offsetof(struct __sk_buff, data_end)),
3410 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3411 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3412 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3413 BPF_MOV64_IMM(BPF_REG_5, 4096),
3414 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3415 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3416 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3417 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3418 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3419 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3420 BPF_MOV64_IMM(BPF_REG_0, 0),
3421 BPF_EXIT_INSN(),
3422 },
3423 .errstr = "R2 invalid mem access 'inv'",
3424 .result = REJECT,
3425 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3426 },
3427 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02003428 "direct packet access: test16 (arith on data_end)",
3429 .insns = {
3430 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3431 offsetof(struct __sk_buff, data)),
3432 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3433 offsetof(struct __sk_buff, data_end)),
3434 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3436 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3437 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3438 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3439 BPF_MOV64_IMM(BPF_REG_0, 0),
3440 BPF_EXIT_INSN(),
3441 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08003442 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmann728a8532017-04-27 01:39:32 +02003443 .result = REJECT,
3444 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3445 },
3446 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003447 "direct packet access: test17 (pruning, alignment)",
3448 .insns = {
3449 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3450 offsetof(struct __sk_buff, data)),
3451 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3452 offsetof(struct __sk_buff, data_end)),
3453 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3454 offsetof(struct __sk_buff, mark)),
3455 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3457 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3458 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3459 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3460 BPF_MOV64_IMM(BPF_REG_0, 0),
3461 BPF_EXIT_INSN(),
3462 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3463 BPF_JMP_A(-6),
3464 },
Edward Creef65b1842017-08-07 15:27:12 +01003465 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003466 .result = REJECT,
3467 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3468 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3469 },
3470 {
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003471 "direct packet access: test18 (imm += pkt_ptr, 1)",
3472 .insns = {
3473 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3474 offsetof(struct __sk_buff, data)),
3475 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3476 offsetof(struct __sk_buff, data_end)),
3477 BPF_MOV64_IMM(BPF_REG_0, 8),
3478 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3479 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3480 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3481 BPF_MOV64_IMM(BPF_REG_0, 0),
3482 BPF_EXIT_INSN(),
3483 },
3484 .result = ACCEPT,
3485 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3486 },
3487 {
3488 "direct packet access: test19 (imm += pkt_ptr, 2)",
3489 .insns = {
3490 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3491 offsetof(struct __sk_buff, data)),
3492 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3493 offsetof(struct __sk_buff, data_end)),
3494 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3495 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3496 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3497 BPF_MOV64_IMM(BPF_REG_4, 4),
3498 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3499 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3500 BPF_MOV64_IMM(BPF_REG_0, 0),
3501 BPF_EXIT_INSN(),
3502 },
3503 .result = ACCEPT,
3504 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3505 },
3506 {
3507 "direct packet access: test20 (x += pkt_ptr, 1)",
3508 .insns = {
3509 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3510 offsetof(struct __sk_buff, data)),
3511 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3512 offsetof(struct __sk_buff, data_end)),
3513 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3514 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3515 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003516 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003517 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3518 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3519 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003520 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003521 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3522 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3523 BPF_MOV64_IMM(BPF_REG_0, 0),
3524 BPF_EXIT_INSN(),
3525 },
3526 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3527 .result = ACCEPT,
3528 },
3529 {
3530 "direct packet access: test21 (x += pkt_ptr, 2)",
3531 .insns = {
3532 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3533 offsetof(struct __sk_buff, data)),
3534 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3535 offsetof(struct __sk_buff, data_end)),
3536 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3537 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3538 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3539 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3540 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3541 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003542 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003543 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3544 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003546 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3547 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3548 BPF_MOV64_IMM(BPF_REG_0, 0),
3549 BPF_EXIT_INSN(),
3550 },
3551 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3552 .result = ACCEPT,
3553 },
3554 {
3555 "direct packet access: test22 (x += pkt_ptr, 3)",
3556 .insns = {
3557 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3558 offsetof(struct __sk_buff, data)),
3559 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3560 offsetof(struct __sk_buff, data_end)),
3561 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3563 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3564 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3565 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3566 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3567 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3568 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3569 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3570 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003571 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003572 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3573 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3574 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3575 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3576 BPF_MOV64_IMM(BPF_REG_2, 1),
3577 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3578 BPF_MOV64_IMM(BPF_REG_0, 0),
3579 BPF_EXIT_INSN(),
3580 },
3581 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3582 .result = ACCEPT,
3583 },
3584 {
3585 "direct packet access: test23 (x += pkt_ptr, 4)",
3586 .insns = {
3587 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3588 offsetof(struct __sk_buff, data)),
3589 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3590 offsetof(struct __sk_buff, data_end)),
3591 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3592 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3593 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3594 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3595 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3596 BPF_MOV64_IMM(BPF_REG_0, 31),
3597 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3598 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3599 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3600 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3601 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3602 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3603 BPF_MOV64_IMM(BPF_REG_0, 0),
3604 BPF_EXIT_INSN(),
3605 },
3606 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3607 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003608 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003609 },
3610 {
3611 "direct packet access: test24 (x += pkt_ptr, 5)",
3612 .insns = {
3613 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3614 offsetof(struct __sk_buff, data)),
3615 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3616 offsetof(struct __sk_buff, data_end)),
3617 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3618 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3619 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3620 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3621 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3622 BPF_MOV64_IMM(BPF_REG_0, 64),
3623 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3624 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3625 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
Edward Cree1f9ab382017-08-07 15:29:11 +01003626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003627 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3628 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3629 BPF_MOV64_IMM(BPF_REG_0, 0),
3630 BPF_EXIT_INSN(),
3631 },
3632 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3633 .result = ACCEPT,
3634 },
3635 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02003636 "direct packet access: test25 (marking on <, good access)",
3637 .insns = {
3638 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3639 offsetof(struct __sk_buff, data)),
3640 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3641 offsetof(struct __sk_buff, data_end)),
3642 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3643 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3644 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3645 BPF_MOV64_IMM(BPF_REG_0, 0),
3646 BPF_EXIT_INSN(),
3647 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3648 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3649 },
3650 .result = ACCEPT,
3651 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3652 },
3653 {
3654 "direct packet access: test26 (marking on <, bad access)",
3655 .insns = {
3656 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3657 offsetof(struct __sk_buff, data)),
3658 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3659 offsetof(struct __sk_buff, data_end)),
3660 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3661 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3662 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
3663 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3664 BPF_MOV64_IMM(BPF_REG_0, 0),
3665 BPF_EXIT_INSN(),
3666 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
3667 },
3668 .result = REJECT,
3669 .errstr = "invalid access to packet",
3670 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3671 },
3672 {
3673 "direct packet access: test27 (marking on <=, good access)",
3674 .insns = {
3675 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3676 offsetof(struct __sk_buff, data)),
3677 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3678 offsetof(struct __sk_buff, data_end)),
3679 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3680 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3681 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
3682 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3683 BPF_MOV64_IMM(BPF_REG_0, 1),
3684 BPF_EXIT_INSN(),
3685 },
3686 .result = ACCEPT,
3687 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003688 .retval = 1,
Daniel Borkmann31e482b2017-08-10 01:40:03 +02003689 },
3690 {
3691 "direct packet access: test28 (marking on <=, bad access)",
3692 .insns = {
3693 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3694 offsetof(struct __sk_buff, data)),
3695 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3696 offsetof(struct __sk_buff, data_end)),
3697 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3698 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3699 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
3700 BPF_MOV64_IMM(BPF_REG_0, 1),
3701 BPF_EXIT_INSN(),
3702 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3703 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3704 },
3705 .result = REJECT,
3706 .errstr = "invalid access to packet",
3707 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3708 },
3709 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003710 "helper access to packet: test1, valid packet_ptr range",
3711 .insns = {
3712 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3713 offsetof(struct xdp_md, data)),
3714 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3715 offsetof(struct xdp_md, data_end)),
3716 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3717 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3718 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3719 BPF_LD_MAP_FD(BPF_REG_1, 0),
3720 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3721 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003722 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3723 BPF_FUNC_map_update_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003724 BPF_MOV64_IMM(BPF_REG_0, 0),
3725 BPF_EXIT_INSN(),
3726 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003727 .fixup_map1 = { 5 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003728 .result_unpriv = ACCEPT,
3729 .result = ACCEPT,
3730 .prog_type = BPF_PROG_TYPE_XDP,
3731 },
3732 {
3733 "helper access to packet: test2, unchecked packet_ptr",
3734 .insns = {
3735 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3736 offsetof(struct xdp_md, data)),
3737 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003738 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3739 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003740 BPF_MOV64_IMM(BPF_REG_0, 0),
3741 BPF_EXIT_INSN(),
3742 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003743 .fixup_map1 = { 1 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003744 .result = REJECT,
3745 .errstr = "invalid access to packet",
3746 .prog_type = BPF_PROG_TYPE_XDP,
3747 },
3748 {
3749 "helper access to packet: test3, variable add",
3750 .insns = {
3751 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3752 offsetof(struct xdp_md, data)),
3753 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3754 offsetof(struct xdp_md, data_end)),
3755 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3756 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3757 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3758 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3759 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3760 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3761 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3762 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3763 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3764 BPF_LD_MAP_FD(BPF_REG_1, 0),
3765 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003766 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3767 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003768 BPF_MOV64_IMM(BPF_REG_0, 0),
3769 BPF_EXIT_INSN(),
3770 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003771 .fixup_map1 = { 11 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003772 .result = ACCEPT,
3773 .prog_type = BPF_PROG_TYPE_XDP,
3774 },
3775 {
3776 "helper access to packet: test4, packet_ptr with bad range",
3777 .insns = {
3778 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3779 offsetof(struct xdp_md, data)),
3780 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3781 offsetof(struct xdp_md, data_end)),
3782 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3783 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3784 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3785 BPF_MOV64_IMM(BPF_REG_0, 0),
3786 BPF_EXIT_INSN(),
3787 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003788 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3789 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003790 BPF_MOV64_IMM(BPF_REG_0, 0),
3791 BPF_EXIT_INSN(),
3792 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003793 .fixup_map1 = { 7 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003794 .result = REJECT,
3795 .errstr = "invalid access to packet",
3796 .prog_type = BPF_PROG_TYPE_XDP,
3797 },
3798 {
3799 "helper access to packet: test5, packet_ptr with too short range",
3800 .insns = {
3801 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3802 offsetof(struct xdp_md, data)),
3803 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3804 offsetof(struct xdp_md, data_end)),
3805 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3806 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3807 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3808 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3809 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003810 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3811 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003812 BPF_MOV64_IMM(BPF_REG_0, 0),
3813 BPF_EXIT_INSN(),
3814 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003815 .fixup_map1 = { 6 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003816 .result = REJECT,
3817 .errstr = "invalid access to packet",
3818 .prog_type = BPF_PROG_TYPE_XDP,
3819 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003820 {
3821 "helper access to packet: test6, cls valid packet_ptr range",
3822 .insns = {
3823 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3824 offsetof(struct __sk_buff, data)),
3825 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3826 offsetof(struct __sk_buff, data_end)),
3827 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3828 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3829 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3830 BPF_LD_MAP_FD(BPF_REG_1, 0),
3831 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3832 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003833 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3834 BPF_FUNC_map_update_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003835 BPF_MOV64_IMM(BPF_REG_0, 0),
3836 BPF_EXIT_INSN(),
3837 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003838 .fixup_map1 = { 5 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003839 .result = ACCEPT,
3840 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3841 },
3842 {
3843 "helper access to packet: test7, cls unchecked packet_ptr",
3844 .insns = {
3845 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3846 offsetof(struct __sk_buff, data)),
3847 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003848 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3849 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003850 BPF_MOV64_IMM(BPF_REG_0, 0),
3851 BPF_EXIT_INSN(),
3852 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003853 .fixup_map1 = { 1 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003854 .result = REJECT,
3855 .errstr = "invalid access to packet",
3856 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3857 },
3858 {
3859 "helper access to packet: test8, cls variable add",
3860 .insns = {
3861 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3862 offsetof(struct __sk_buff, data)),
3863 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3864 offsetof(struct __sk_buff, data_end)),
3865 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3866 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3867 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3868 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3869 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3870 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3871 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3872 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3873 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3874 BPF_LD_MAP_FD(BPF_REG_1, 0),
3875 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003876 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3877 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003878 BPF_MOV64_IMM(BPF_REG_0, 0),
3879 BPF_EXIT_INSN(),
3880 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003881 .fixup_map1 = { 11 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003882 .result = ACCEPT,
3883 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3884 },
3885 {
3886 "helper access to packet: test9, cls packet_ptr with bad range",
3887 .insns = {
3888 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3889 offsetof(struct __sk_buff, data)),
3890 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3891 offsetof(struct __sk_buff, data_end)),
3892 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3893 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3894 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3895 BPF_MOV64_IMM(BPF_REG_0, 0),
3896 BPF_EXIT_INSN(),
3897 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003898 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3899 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003900 BPF_MOV64_IMM(BPF_REG_0, 0),
3901 BPF_EXIT_INSN(),
3902 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003903 .fixup_map1 = { 7 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003904 .result = REJECT,
3905 .errstr = "invalid access to packet",
3906 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3907 },
3908 {
3909 "helper access to packet: test10, cls packet_ptr with too short range",
3910 .insns = {
3911 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3912 offsetof(struct __sk_buff, data)),
3913 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3914 offsetof(struct __sk_buff, data_end)),
3915 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3916 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3917 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3918 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3919 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003920 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3921 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003922 BPF_MOV64_IMM(BPF_REG_0, 0),
3923 BPF_EXIT_INSN(),
3924 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003925 .fixup_map1 = { 6 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003926 .result = REJECT,
3927 .errstr = "invalid access to packet",
3928 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3929 },
3930 {
3931 "helper access to packet: test11, cls unsuitable helper 1",
3932 .insns = {
3933 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3934 offsetof(struct __sk_buff, data)),
3935 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3936 offsetof(struct __sk_buff, data_end)),
3937 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3938 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3939 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
3940 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
3941 BPF_MOV64_IMM(BPF_REG_2, 0),
3942 BPF_MOV64_IMM(BPF_REG_4, 42),
3943 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003944 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3945 BPF_FUNC_skb_store_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003946 BPF_MOV64_IMM(BPF_REG_0, 0),
3947 BPF_EXIT_INSN(),
3948 },
3949 .result = REJECT,
3950 .errstr = "helper access to the packet",
3951 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3952 },
3953 {
3954 "helper access to packet: test12, cls unsuitable helper 2",
3955 .insns = {
3956 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3957 offsetof(struct __sk_buff, data)),
3958 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3959 offsetof(struct __sk_buff, data_end)),
3960 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3961 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
3962 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
3963 BPF_MOV64_IMM(BPF_REG_2, 0),
3964 BPF_MOV64_IMM(BPF_REG_4, 4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003965 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3966 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003967 BPF_MOV64_IMM(BPF_REG_0, 0),
3968 BPF_EXIT_INSN(),
3969 },
3970 .result = REJECT,
3971 .errstr = "helper access to the packet",
3972 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3973 },
3974 {
3975 "helper access to packet: test13, cls helper ok",
3976 .insns = {
3977 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3978 offsetof(struct __sk_buff, data)),
3979 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3980 offsetof(struct __sk_buff, data_end)),
3981 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3982 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3983 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3984 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3985 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3986 BPF_MOV64_IMM(BPF_REG_2, 4),
3987 BPF_MOV64_IMM(BPF_REG_3, 0),
3988 BPF_MOV64_IMM(BPF_REG_4, 0),
3989 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003990 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3991 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003992 BPF_MOV64_IMM(BPF_REG_0, 0),
3993 BPF_EXIT_INSN(),
3994 },
3995 .result = ACCEPT,
3996 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3997 },
3998 {
Edward Creef65b1842017-08-07 15:27:12 +01003999 "helper access to packet: test14, cls helper ok sub",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004000 .insns = {
4001 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4002 offsetof(struct __sk_buff, data)),
4003 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4004 offsetof(struct __sk_buff, data_end)),
4005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4006 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4007 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4008 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4009 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
4010 BPF_MOV64_IMM(BPF_REG_2, 4),
4011 BPF_MOV64_IMM(BPF_REG_3, 0),
4012 BPF_MOV64_IMM(BPF_REG_4, 0),
4013 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004014 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4015 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004016 BPF_MOV64_IMM(BPF_REG_0, 0),
4017 BPF_EXIT_INSN(),
4018 },
Edward Creef65b1842017-08-07 15:27:12 +01004019 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004020 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4021 },
4022 {
Edward Creef65b1842017-08-07 15:27:12 +01004023 "helper access to packet: test15, cls helper fail sub",
4024 .insns = {
4025 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4026 offsetof(struct __sk_buff, data)),
4027 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4028 offsetof(struct __sk_buff, data_end)),
4029 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4030 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4032 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4033 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
4034 BPF_MOV64_IMM(BPF_REG_2, 4),
4035 BPF_MOV64_IMM(BPF_REG_3, 0),
4036 BPF_MOV64_IMM(BPF_REG_4, 0),
4037 BPF_MOV64_IMM(BPF_REG_5, 0),
4038 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4039 BPF_FUNC_csum_diff),
4040 BPF_MOV64_IMM(BPF_REG_0, 0),
4041 BPF_EXIT_INSN(),
4042 },
4043 .result = REJECT,
4044 .errstr = "invalid access to packet",
4045 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4046 },
4047 {
4048 "helper access to packet: test16, cls helper fail range 1",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004049 .insns = {
4050 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4051 offsetof(struct __sk_buff, data)),
4052 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4053 offsetof(struct __sk_buff, data_end)),
4054 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4055 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4056 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4057 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4058 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4059 BPF_MOV64_IMM(BPF_REG_2, 8),
4060 BPF_MOV64_IMM(BPF_REG_3, 0),
4061 BPF_MOV64_IMM(BPF_REG_4, 0),
4062 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004063 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4064 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004065 BPF_MOV64_IMM(BPF_REG_0, 0),
4066 BPF_EXIT_INSN(),
4067 },
4068 .result = REJECT,
4069 .errstr = "invalid access to packet",
4070 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4071 },
4072 {
Edward Creef65b1842017-08-07 15:27:12 +01004073 "helper access to packet: test17, cls helper fail range 2",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004074 .insns = {
4075 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4076 offsetof(struct __sk_buff, data)),
4077 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4078 offsetof(struct __sk_buff, data_end)),
4079 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4080 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4081 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4082 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4083 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4084 BPF_MOV64_IMM(BPF_REG_2, -9),
4085 BPF_MOV64_IMM(BPF_REG_3, 0),
4086 BPF_MOV64_IMM(BPF_REG_4, 0),
4087 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004088 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4089 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004090 BPF_MOV64_IMM(BPF_REG_0, 0),
4091 BPF_EXIT_INSN(),
4092 },
4093 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01004094 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004095 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4096 },
4097 {
Edward Creef65b1842017-08-07 15:27:12 +01004098 "helper access to packet: test18, cls helper fail range 3",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004099 .insns = {
4100 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4101 offsetof(struct __sk_buff, data)),
4102 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4103 offsetof(struct __sk_buff, data_end)),
4104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4105 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4106 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4107 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4108 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4109 BPF_MOV64_IMM(BPF_REG_2, ~0),
4110 BPF_MOV64_IMM(BPF_REG_3, 0),
4111 BPF_MOV64_IMM(BPF_REG_4, 0),
4112 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4114 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004115 BPF_MOV64_IMM(BPF_REG_0, 0),
4116 BPF_EXIT_INSN(),
4117 },
4118 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01004119 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004120 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4121 },
4122 {
Yonghong Songb6ff6392017-11-12 14:49:11 -08004123 "helper access to packet: test19, cls helper range zero",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004124 .insns = {
4125 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4126 offsetof(struct __sk_buff, data)),
4127 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4128 offsetof(struct __sk_buff, data_end)),
4129 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4130 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4131 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4132 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4133 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4134 BPF_MOV64_IMM(BPF_REG_2, 0),
4135 BPF_MOV64_IMM(BPF_REG_3, 0),
4136 BPF_MOV64_IMM(BPF_REG_4, 0),
4137 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004138 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4139 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004140 BPF_MOV64_IMM(BPF_REG_0, 0),
4141 BPF_EXIT_INSN(),
4142 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08004143 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004144 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4145 },
4146 {
Edward Creef65b1842017-08-07 15:27:12 +01004147 "helper access to packet: test20, pkt end as input",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004148 .insns = {
4149 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4150 offsetof(struct __sk_buff, data)),
4151 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4152 offsetof(struct __sk_buff, data_end)),
4153 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4154 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4155 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4156 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4157 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4158 BPF_MOV64_IMM(BPF_REG_2, 4),
4159 BPF_MOV64_IMM(BPF_REG_3, 0),
4160 BPF_MOV64_IMM(BPF_REG_4, 0),
4161 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004162 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4163 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004164 BPF_MOV64_IMM(BPF_REG_0, 0),
4165 BPF_EXIT_INSN(),
4166 },
4167 .result = REJECT,
4168 .errstr = "R1 type=pkt_end expected=fp",
4169 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4170 },
4171 {
Edward Creef65b1842017-08-07 15:27:12 +01004172 "helper access to packet: test21, wrong reg",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004173 .insns = {
4174 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4175 offsetof(struct __sk_buff, data)),
4176 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4177 offsetof(struct __sk_buff, data_end)),
4178 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4179 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4180 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4181 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4182 BPF_MOV64_IMM(BPF_REG_2, 4),
4183 BPF_MOV64_IMM(BPF_REG_3, 0),
4184 BPF_MOV64_IMM(BPF_REG_4, 0),
4185 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004186 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4187 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004188 BPF_MOV64_IMM(BPF_REG_0, 0),
4189 BPF_EXIT_INSN(),
4190 },
4191 .result = REJECT,
4192 .errstr = "invalid access to packet",
4193 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4194 },
Josef Bacik48461132016-09-28 10:54:32 -04004195 {
4196 "valid map access into an array with a constant",
4197 .insns = {
4198 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4199 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4200 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4201 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004202 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4203 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004204 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004205 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4206 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004207 BPF_EXIT_INSN(),
4208 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004209 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004210 .errstr_unpriv = "R0 leaks addr",
4211 .result_unpriv = REJECT,
4212 .result = ACCEPT,
4213 },
4214 {
4215 "valid map access into an array with a register",
4216 .insns = {
4217 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4218 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4219 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4220 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004221 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4222 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004223 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4224 BPF_MOV64_IMM(BPF_REG_1, 4),
4225 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4226 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004227 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4228 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004229 BPF_EXIT_INSN(),
4230 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004231 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004232 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004233 .result_unpriv = REJECT,
4234 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004235 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004236 },
4237 {
4238 "valid map access into an array with a variable",
4239 .insns = {
4240 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4241 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4242 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4243 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004244 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4245 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004246 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4247 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4248 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4249 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4250 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004251 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4252 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004253 BPF_EXIT_INSN(),
4254 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004255 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004256 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004257 .result_unpriv = REJECT,
4258 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004259 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004260 },
4261 {
4262 "valid map access into an array with a signed variable",
4263 .insns = {
4264 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4265 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4266 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4267 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004268 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4269 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004270 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4271 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4272 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4273 BPF_MOV32_IMM(BPF_REG_1, 0),
4274 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4275 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4276 BPF_MOV32_IMM(BPF_REG_1, 0),
4277 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4278 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004279 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4280 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004281 BPF_EXIT_INSN(),
4282 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004283 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004284 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004285 .result_unpriv = REJECT,
4286 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004287 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004288 },
4289 {
4290 "invalid map access into an array with a constant",
4291 .insns = {
4292 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4293 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4295 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004296 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4297 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004298 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4299 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4300 offsetof(struct test_val, foo)),
4301 BPF_EXIT_INSN(),
4302 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004303 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004304 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
4305 .result = REJECT,
4306 },
4307 {
4308 "invalid map access into an array with a register",
4309 .insns = {
4310 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4311 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4312 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4313 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4315 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004316 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4317 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4318 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4319 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004320 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4321 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004322 BPF_EXIT_INSN(),
4323 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004324 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004325 .errstr = "R0 min value is outside of the array range",
4326 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004327 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004328 },
4329 {
4330 "invalid map access into an array with a variable",
4331 .insns = {
4332 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4333 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4335 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004336 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4337 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004338 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4339 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4340 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4341 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004342 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4343 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004344 BPF_EXIT_INSN(),
4345 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004346 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004347 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
Josef Bacik48461132016-09-28 10:54:32 -04004348 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004349 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004350 },
4351 {
4352 "invalid map access into an array with no floor check",
4353 .insns = {
4354 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4355 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4356 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4357 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004358 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4359 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004360 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
Edward Creef65b1842017-08-07 15:27:12 +01004361 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik48461132016-09-28 10:54:32 -04004362 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4363 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4364 BPF_MOV32_IMM(BPF_REG_1, 0),
4365 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4366 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004367 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4368 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004369 BPF_EXIT_INSN(),
4370 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004371 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004372 .errstr_unpriv = "R0 leaks addr",
4373 .errstr = "R0 unbounded memory access",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004374 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04004375 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004376 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004377 },
4378 {
4379 "invalid map access into an array with a invalid max check",
4380 .insns = {
4381 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4382 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4384 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004385 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4386 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004387 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4388 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4389 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4390 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4391 BPF_MOV32_IMM(BPF_REG_1, 0),
4392 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4393 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004394 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4395 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004396 BPF_EXIT_INSN(),
4397 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004398 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004399 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004400 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004401 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04004402 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004403 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004404 },
4405 {
4406 "invalid map access into an array with a invalid max check",
4407 .insns = {
4408 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4409 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4410 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4411 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004412 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4413 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004414 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4415 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4416 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4417 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4418 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4419 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004420 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4421 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004422 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4423 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004424 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4425 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004426 BPF_EXIT_INSN(),
4427 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004428 .fixup_map2 = { 3, 11 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004429 .errstr = "R0 pointer += pointer",
Josef Bacik48461132016-09-28 10:54:32 -04004430 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004431 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004432 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02004433 {
4434 "multiple registers share map_lookup_elem result",
4435 .insns = {
4436 BPF_MOV64_IMM(BPF_REG_1, 10),
4437 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4438 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4439 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4440 BPF_LD_MAP_FD(BPF_REG_1, 0),
4441 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4442 BPF_FUNC_map_lookup_elem),
4443 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4444 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4445 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4446 BPF_EXIT_INSN(),
4447 },
4448 .fixup_map1 = { 4 },
4449 .result = ACCEPT,
4450 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4451 },
4452 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004453 "alu ops on ptr_to_map_value_or_null, 1",
4454 .insns = {
4455 BPF_MOV64_IMM(BPF_REG_1, 10),
4456 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4457 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4458 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4459 BPF_LD_MAP_FD(BPF_REG_1, 0),
4460 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4461 BPF_FUNC_map_lookup_elem),
4462 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4463 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
4464 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
4465 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4466 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4467 BPF_EXIT_INSN(),
4468 },
4469 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004470 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004471 .result = REJECT,
4472 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4473 },
4474 {
4475 "alu ops on ptr_to_map_value_or_null, 2",
4476 .insns = {
4477 BPF_MOV64_IMM(BPF_REG_1, 10),
4478 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4479 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4481 BPF_LD_MAP_FD(BPF_REG_1, 0),
4482 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4483 BPF_FUNC_map_lookup_elem),
4484 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4485 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
4486 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4487 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4488 BPF_EXIT_INSN(),
4489 },
4490 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004491 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004492 .result = REJECT,
4493 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4494 },
4495 {
4496 "alu ops on ptr_to_map_value_or_null, 3",
4497 .insns = {
4498 BPF_MOV64_IMM(BPF_REG_1, 10),
4499 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4500 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4501 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4502 BPF_LD_MAP_FD(BPF_REG_1, 0),
4503 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4504 BPF_FUNC_map_lookup_elem),
4505 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4506 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
4507 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4508 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4509 BPF_EXIT_INSN(),
4510 },
4511 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004512 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004513 .result = REJECT,
4514 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4515 },
4516 {
Thomas Graf57a09bf2016-10-18 19:51:19 +02004517 "invalid memory access with multiple map_lookup_elem calls",
4518 .insns = {
4519 BPF_MOV64_IMM(BPF_REG_1, 10),
4520 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4521 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4522 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4523 BPF_LD_MAP_FD(BPF_REG_1, 0),
4524 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4525 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4526 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4527 BPF_FUNC_map_lookup_elem),
4528 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4529 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4530 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4531 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4532 BPF_FUNC_map_lookup_elem),
4533 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4534 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4535 BPF_EXIT_INSN(),
4536 },
4537 .fixup_map1 = { 4 },
4538 .result = REJECT,
4539 .errstr = "R4 !read_ok",
4540 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4541 },
4542 {
4543 "valid indirect map_lookup_elem access with 2nd lookup in branch",
4544 .insns = {
4545 BPF_MOV64_IMM(BPF_REG_1, 10),
4546 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4547 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4548 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4549 BPF_LD_MAP_FD(BPF_REG_1, 0),
4550 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4551 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4552 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4553 BPF_FUNC_map_lookup_elem),
4554 BPF_MOV64_IMM(BPF_REG_2, 10),
4555 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
4556 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4557 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4558 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4559 BPF_FUNC_map_lookup_elem),
4560 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4561 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4562 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4563 BPF_EXIT_INSN(),
4564 },
4565 .fixup_map1 = { 4 },
4566 .result = ACCEPT,
4567 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4568 },
Josef Bacike9548902016-11-29 12:35:19 -05004569 {
4570 "invalid map access from else condition",
4571 .insns = {
4572 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4573 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4574 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4575 BPF_LD_MAP_FD(BPF_REG_1, 0),
4576 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
4577 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4578 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4579 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
4580 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4581 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4582 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4583 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
4584 BPF_EXIT_INSN(),
4585 },
4586 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004587 .errstr = "R0 unbounded memory access",
Josef Bacike9548902016-11-29 12:35:19 -05004588 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01004589 .errstr_unpriv = "R0 leaks addr",
Josef Bacike9548902016-11-29 12:35:19 -05004590 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004591 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacike9548902016-11-29 12:35:19 -05004592 },
Gianluca Borello3c8397442016-12-03 12:31:33 -08004593 {
4594 "constant register |= constant should keep constant type",
4595 .insns = {
4596 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4597 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4598 BPF_MOV64_IMM(BPF_REG_2, 34),
4599 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
4600 BPF_MOV64_IMM(BPF_REG_3, 0),
4601 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4602 BPF_EXIT_INSN(),
4603 },
4604 .result = ACCEPT,
4605 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4606 },
4607 {
4608 "constant register |= constant should not bypass stack boundary checks",
4609 .insns = {
4610 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4611 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4612 BPF_MOV64_IMM(BPF_REG_2, 34),
4613 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
4614 BPF_MOV64_IMM(BPF_REG_3, 0),
4615 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4616 BPF_EXIT_INSN(),
4617 },
4618 .errstr = "invalid stack type R1 off=-48 access_size=58",
4619 .result = REJECT,
4620 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4621 },
4622 {
4623 "constant register |= constant register should keep constant type",
4624 .insns = {
4625 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4627 BPF_MOV64_IMM(BPF_REG_2, 34),
4628 BPF_MOV64_IMM(BPF_REG_4, 13),
4629 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4630 BPF_MOV64_IMM(BPF_REG_3, 0),
4631 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4632 BPF_EXIT_INSN(),
4633 },
4634 .result = ACCEPT,
4635 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4636 },
4637 {
4638 "constant register |= constant register should not bypass stack boundary checks",
4639 .insns = {
4640 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4641 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4642 BPF_MOV64_IMM(BPF_REG_2, 34),
4643 BPF_MOV64_IMM(BPF_REG_4, 24),
4644 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4645 BPF_MOV64_IMM(BPF_REG_3, 0),
4646 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4647 BPF_EXIT_INSN(),
4648 },
4649 .errstr = "invalid stack type R1 off=-48 access_size=58",
4650 .result = REJECT,
4651 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4652 },
Thomas Graf3f731d82016-12-05 10:30:52 +01004653 {
4654 "invalid direct packet write for LWT_IN",
4655 .insns = {
4656 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4657 offsetof(struct __sk_buff, data)),
4658 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4659 offsetof(struct __sk_buff, data_end)),
4660 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4661 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4662 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4663 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4664 BPF_MOV64_IMM(BPF_REG_0, 0),
4665 BPF_EXIT_INSN(),
4666 },
4667 .errstr = "cannot write into packet",
4668 .result = REJECT,
4669 .prog_type = BPF_PROG_TYPE_LWT_IN,
4670 },
4671 {
4672 "invalid direct packet write for LWT_OUT",
4673 .insns = {
4674 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4675 offsetof(struct __sk_buff, data)),
4676 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4677 offsetof(struct __sk_buff, data_end)),
4678 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4679 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4680 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4681 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4682 BPF_MOV64_IMM(BPF_REG_0, 0),
4683 BPF_EXIT_INSN(),
4684 },
4685 .errstr = "cannot write into packet",
4686 .result = REJECT,
4687 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4688 },
4689 {
4690 "direct packet write for LWT_XMIT",
4691 .insns = {
4692 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4693 offsetof(struct __sk_buff, data)),
4694 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4695 offsetof(struct __sk_buff, data_end)),
4696 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4697 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4698 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4699 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4700 BPF_MOV64_IMM(BPF_REG_0, 0),
4701 BPF_EXIT_INSN(),
4702 },
4703 .result = ACCEPT,
4704 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4705 },
4706 {
4707 "direct packet read for LWT_IN",
4708 .insns = {
4709 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4710 offsetof(struct __sk_buff, data)),
4711 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4712 offsetof(struct __sk_buff, data_end)),
4713 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4714 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4715 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4716 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4717 BPF_MOV64_IMM(BPF_REG_0, 0),
4718 BPF_EXIT_INSN(),
4719 },
4720 .result = ACCEPT,
4721 .prog_type = BPF_PROG_TYPE_LWT_IN,
4722 },
4723 {
4724 "direct packet read for LWT_OUT",
4725 .insns = {
4726 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4727 offsetof(struct __sk_buff, data)),
4728 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4729 offsetof(struct __sk_buff, data_end)),
4730 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4731 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4732 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4733 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4734 BPF_MOV64_IMM(BPF_REG_0, 0),
4735 BPF_EXIT_INSN(),
4736 },
4737 .result = ACCEPT,
4738 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4739 },
4740 {
4741 "direct packet read for LWT_XMIT",
4742 .insns = {
4743 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4744 offsetof(struct __sk_buff, data)),
4745 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4746 offsetof(struct __sk_buff, data_end)),
4747 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4748 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4749 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4750 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4751 BPF_MOV64_IMM(BPF_REG_0, 0),
4752 BPF_EXIT_INSN(),
4753 },
4754 .result = ACCEPT,
4755 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4756 },
4757 {
Alexei Starovoitovb1977682017-03-24 15:57:33 -07004758 "overlapping checks for direct packet access",
4759 .insns = {
4760 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4761 offsetof(struct __sk_buff, data)),
4762 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4763 offsetof(struct __sk_buff, data_end)),
4764 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4765 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4766 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
4767 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4768 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
4769 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
4770 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
4771 BPF_MOV64_IMM(BPF_REG_0, 0),
4772 BPF_EXIT_INSN(),
4773 },
4774 .result = ACCEPT,
4775 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4776 },
4777 {
Thomas Graf3f731d82016-12-05 10:30:52 +01004778 "invalid access of tc_classid for LWT_IN",
4779 .insns = {
4780 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4781 offsetof(struct __sk_buff, tc_classid)),
4782 BPF_EXIT_INSN(),
4783 },
4784 .result = REJECT,
4785 .errstr = "invalid bpf_context access",
4786 },
4787 {
4788 "invalid access of tc_classid for LWT_OUT",
4789 .insns = {
4790 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4791 offsetof(struct __sk_buff, tc_classid)),
4792 BPF_EXIT_INSN(),
4793 },
4794 .result = REJECT,
4795 .errstr = "invalid bpf_context access",
4796 },
4797 {
4798 "invalid access of tc_classid for LWT_XMIT",
4799 .insns = {
4800 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4801 offsetof(struct __sk_buff, tc_classid)),
4802 BPF_EXIT_INSN(),
4803 },
4804 .result = REJECT,
4805 .errstr = "invalid bpf_context access",
4806 },
Gianluca Borello57225692017-01-09 10:19:47 -08004807 {
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004808 "leak pointer into ctx 1",
4809 .insns = {
4810 BPF_MOV64_IMM(BPF_REG_0, 0),
4811 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4812 offsetof(struct __sk_buff, cb[0])),
4813 BPF_LD_MAP_FD(BPF_REG_2, 0),
4814 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
4815 offsetof(struct __sk_buff, cb[0])),
4816 BPF_EXIT_INSN(),
4817 },
4818 .fixup_map1 = { 2 },
4819 .errstr_unpriv = "R2 leaks addr into mem",
4820 .result_unpriv = REJECT,
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01004821 .result = REJECT,
4822 .errstr = "BPF_XADD stores into R1 context is not allowed",
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004823 },
4824 {
4825 "leak pointer into ctx 2",
4826 .insns = {
4827 BPF_MOV64_IMM(BPF_REG_0, 0),
4828 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4829 offsetof(struct __sk_buff, cb[0])),
4830 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
4831 offsetof(struct __sk_buff, cb[0])),
4832 BPF_EXIT_INSN(),
4833 },
4834 .errstr_unpriv = "R10 leaks addr into mem",
4835 .result_unpriv = REJECT,
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01004836 .result = REJECT,
4837 .errstr = "BPF_XADD stores into R1 context is not allowed",
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004838 },
4839 {
4840 "leak pointer into ctx 3",
4841 .insns = {
4842 BPF_MOV64_IMM(BPF_REG_0, 0),
4843 BPF_LD_MAP_FD(BPF_REG_2, 0),
4844 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
4845 offsetof(struct __sk_buff, cb[0])),
4846 BPF_EXIT_INSN(),
4847 },
4848 .fixup_map1 = { 1 },
4849 .errstr_unpriv = "R2 leaks addr into ctx",
4850 .result_unpriv = REJECT,
4851 .result = ACCEPT,
4852 },
4853 {
4854 "leak pointer into map val",
4855 .insns = {
4856 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4857 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4858 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4860 BPF_LD_MAP_FD(BPF_REG_1, 0),
4861 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4862 BPF_FUNC_map_lookup_elem),
4863 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4864 BPF_MOV64_IMM(BPF_REG_3, 0),
4865 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
4866 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
4867 BPF_MOV64_IMM(BPF_REG_0, 0),
4868 BPF_EXIT_INSN(),
4869 },
4870 .fixup_map1 = { 4 },
4871 .errstr_unpriv = "R6 leaks addr into mem",
4872 .result_unpriv = REJECT,
4873 .result = ACCEPT,
4874 },
4875 {
Gianluca Borello57225692017-01-09 10:19:47 -08004876 "helper access to map: full range",
4877 .insns = {
4878 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4879 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4880 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4881 BPF_LD_MAP_FD(BPF_REG_1, 0),
4882 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4883 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4884 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4885 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4886 BPF_MOV64_IMM(BPF_REG_3, 0),
4887 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4888 BPF_EXIT_INSN(),
4889 },
4890 .fixup_map2 = { 3 },
4891 .result = ACCEPT,
4892 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4893 },
4894 {
4895 "helper access to map: partial range",
4896 .insns = {
4897 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4899 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4900 BPF_LD_MAP_FD(BPF_REG_1, 0),
4901 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4902 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4903 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4904 BPF_MOV64_IMM(BPF_REG_2, 8),
4905 BPF_MOV64_IMM(BPF_REG_3, 0),
4906 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4907 BPF_EXIT_INSN(),
4908 },
4909 .fixup_map2 = { 3 },
4910 .result = ACCEPT,
4911 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4912 },
4913 {
4914 "helper access to map: empty range",
4915 .insns = {
4916 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4917 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4918 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4919 BPF_LD_MAP_FD(BPF_REG_1, 0),
4920 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004921 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4922 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4923 BPF_MOV64_IMM(BPF_REG_2, 0),
4924 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08004925 BPF_EXIT_INSN(),
4926 },
4927 .fixup_map2 = { 3 },
4928 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
4929 .result = REJECT,
4930 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4931 },
4932 {
4933 "helper access to map: out-of-bound range",
4934 .insns = {
4935 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4936 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4937 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4938 BPF_LD_MAP_FD(BPF_REG_1, 0),
4939 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4940 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4941 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4942 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
4943 BPF_MOV64_IMM(BPF_REG_3, 0),
4944 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4945 BPF_EXIT_INSN(),
4946 },
4947 .fixup_map2 = { 3 },
4948 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
4949 .result = REJECT,
4950 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4951 },
4952 {
4953 "helper access to map: negative range",
4954 .insns = {
4955 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4956 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4957 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4958 BPF_LD_MAP_FD(BPF_REG_1, 0),
4959 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4960 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4961 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4962 BPF_MOV64_IMM(BPF_REG_2, -8),
4963 BPF_MOV64_IMM(BPF_REG_3, 0),
4964 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4965 BPF_EXIT_INSN(),
4966 },
4967 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004968 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004969 .result = REJECT,
4970 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4971 },
4972 {
4973 "helper access to adjusted map (via const imm): full range",
4974 .insns = {
4975 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4976 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4977 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4978 BPF_LD_MAP_FD(BPF_REG_1, 0),
4979 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4980 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4981 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4982 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4983 offsetof(struct test_val, foo)),
4984 BPF_MOV64_IMM(BPF_REG_2,
4985 sizeof(struct test_val) -
4986 offsetof(struct test_val, foo)),
4987 BPF_MOV64_IMM(BPF_REG_3, 0),
4988 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4989 BPF_EXIT_INSN(),
4990 },
4991 .fixup_map2 = { 3 },
4992 .result = ACCEPT,
4993 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4994 },
4995 {
4996 "helper access to adjusted map (via const imm): partial range",
4997 .insns = {
4998 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4999 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5000 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5001 BPF_LD_MAP_FD(BPF_REG_1, 0),
5002 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5003 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5004 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5006 offsetof(struct test_val, foo)),
5007 BPF_MOV64_IMM(BPF_REG_2, 8),
5008 BPF_MOV64_IMM(BPF_REG_3, 0),
5009 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5010 BPF_EXIT_INSN(),
5011 },
5012 .fixup_map2 = { 3 },
5013 .result = ACCEPT,
5014 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5015 },
5016 {
5017 "helper access to adjusted map (via const imm): empty range",
5018 .insns = {
5019 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5020 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5021 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5022 BPF_LD_MAP_FD(BPF_REG_1, 0),
5023 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005024 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Gianluca Borello57225692017-01-09 10:19:47 -08005025 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5026 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5027 offsetof(struct test_val, foo)),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005028 BPF_MOV64_IMM(BPF_REG_2, 0),
5029 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005030 BPF_EXIT_INSN(),
5031 },
5032 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005033 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
Gianluca Borello57225692017-01-09 10:19:47 -08005034 .result = REJECT,
5035 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5036 },
5037 {
5038 "helper access to adjusted map (via const imm): out-of-bound range",
5039 .insns = {
5040 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5041 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5042 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5043 BPF_LD_MAP_FD(BPF_REG_1, 0),
5044 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5045 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5046 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5047 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5048 offsetof(struct test_val, foo)),
5049 BPF_MOV64_IMM(BPF_REG_2,
5050 sizeof(struct test_val) -
5051 offsetof(struct test_val, foo) + 8),
5052 BPF_MOV64_IMM(BPF_REG_3, 0),
5053 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5054 BPF_EXIT_INSN(),
5055 },
5056 .fixup_map2 = { 3 },
5057 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
5058 .result = REJECT,
5059 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5060 },
5061 {
5062 "helper access to adjusted map (via const imm): negative range (> adjustment)",
5063 .insns = {
5064 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5065 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5066 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5067 BPF_LD_MAP_FD(BPF_REG_1, 0),
5068 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5069 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5070 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5071 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5072 offsetof(struct test_val, foo)),
5073 BPF_MOV64_IMM(BPF_REG_2, -8),
5074 BPF_MOV64_IMM(BPF_REG_3, 0),
5075 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5076 BPF_EXIT_INSN(),
5077 },
5078 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005079 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005080 .result = REJECT,
5081 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5082 },
5083 {
5084 "helper access to adjusted map (via const imm): negative range (< adjustment)",
5085 .insns = {
5086 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5087 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5088 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5089 BPF_LD_MAP_FD(BPF_REG_1, 0),
5090 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5091 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5092 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5093 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5094 offsetof(struct test_val, foo)),
5095 BPF_MOV64_IMM(BPF_REG_2, -1),
5096 BPF_MOV64_IMM(BPF_REG_3, 0),
5097 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5098 BPF_EXIT_INSN(),
5099 },
5100 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005101 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005102 .result = REJECT,
5103 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5104 },
5105 {
5106 "helper access to adjusted map (via const reg): full range",
5107 .insns = {
5108 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5109 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5110 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5111 BPF_LD_MAP_FD(BPF_REG_1, 0),
5112 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5113 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5114 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5115 BPF_MOV64_IMM(BPF_REG_3,
5116 offsetof(struct test_val, foo)),
5117 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5118 BPF_MOV64_IMM(BPF_REG_2,
5119 sizeof(struct test_val) -
5120 offsetof(struct test_val, foo)),
5121 BPF_MOV64_IMM(BPF_REG_3, 0),
5122 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5123 BPF_EXIT_INSN(),
5124 },
5125 .fixup_map2 = { 3 },
5126 .result = ACCEPT,
5127 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5128 },
5129 {
5130 "helper access to adjusted map (via const reg): partial range",
5131 .insns = {
5132 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5133 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5134 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5135 BPF_LD_MAP_FD(BPF_REG_1, 0),
5136 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5137 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5138 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5139 BPF_MOV64_IMM(BPF_REG_3,
5140 offsetof(struct test_val, foo)),
5141 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5142 BPF_MOV64_IMM(BPF_REG_2, 8),
5143 BPF_MOV64_IMM(BPF_REG_3, 0),
5144 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5145 BPF_EXIT_INSN(),
5146 },
5147 .fixup_map2 = { 3 },
5148 .result = ACCEPT,
5149 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5150 },
5151 {
5152 "helper access to adjusted map (via const reg): empty range",
5153 .insns = {
5154 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5155 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5156 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5157 BPF_LD_MAP_FD(BPF_REG_1, 0),
5158 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005159 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
Gianluca Borello57225692017-01-09 10:19:47 -08005160 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5161 BPF_MOV64_IMM(BPF_REG_3, 0),
5162 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005163 BPF_MOV64_IMM(BPF_REG_2, 0),
5164 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005165 BPF_EXIT_INSN(),
5166 },
5167 .fixup_map2 = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005168 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08005169 .result = REJECT,
5170 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5171 },
5172 {
5173 "helper access to adjusted map (via const reg): out-of-bound range",
5174 .insns = {
5175 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5176 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5177 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5178 BPF_LD_MAP_FD(BPF_REG_1, 0),
5179 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5180 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5181 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5182 BPF_MOV64_IMM(BPF_REG_3,
5183 offsetof(struct test_val, foo)),
5184 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5185 BPF_MOV64_IMM(BPF_REG_2,
5186 sizeof(struct test_val) -
5187 offsetof(struct test_val, foo) + 8),
5188 BPF_MOV64_IMM(BPF_REG_3, 0),
5189 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5190 BPF_EXIT_INSN(),
5191 },
5192 .fixup_map2 = { 3 },
5193 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
5194 .result = REJECT,
5195 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5196 },
5197 {
5198 "helper access to adjusted map (via const reg): negative range (> adjustment)",
5199 .insns = {
5200 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5201 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5202 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5203 BPF_LD_MAP_FD(BPF_REG_1, 0),
5204 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5205 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5206 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5207 BPF_MOV64_IMM(BPF_REG_3,
5208 offsetof(struct test_val, foo)),
5209 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5210 BPF_MOV64_IMM(BPF_REG_2, -8),
5211 BPF_MOV64_IMM(BPF_REG_3, 0),
5212 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5213 BPF_EXIT_INSN(),
5214 },
5215 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005216 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005217 .result = REJECT,
5218 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5219 },
5220 {
5221 "helper access to adjusted map (via const reg): negative range (< adjustment)",
5222 .insns = {
5223 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5224 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5225 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5226 BPF_LD_MAP_FD(BPF_REG_1, 0),
5227 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5228 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5229 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5230 BPF_MOV64_IMM(BPF_REG_3,
5231 offsetof(struct test_val, foo)),
5232 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5233 BPF_MOV64_IMM(BPF_REG_2, -1),
5234 BPF_MOV64_IMM(BPF_REG_3, 0),
5235 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5236 BPF_EXIT_INSN(),
5237 },
5238 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005239 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005240 .result = REJECT,
5241 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5242 },
5243 {
5244 "helper access to adjusted map (via variable): full range",
5245 .insns = {
5246 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5247 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5248 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5249 BPF_LD_MAP_FD(BPF_REG_1, 0),
5250 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5251 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5252 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5253 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5254 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5255 offsetof(struct test_val, foo), 4),
5256 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5257 BPF_MOV64_IMM(BPF_REG_2,
5258 sizeof(struct test_val) -
5259 offsetof(struct test_val, foo)),
5260 BPF_MOV64_IMM(BPF_REG_3, 0),
5261 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5262 BPF_EXIT_INSN(),
5263 },
5264 .fixup_map2 = { 3 },
5265 .result = ACCEPT,
5266 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5267 },
5268 {
5269 "helper access to adjusted map (via variable): partial range",
5270 .insns = {
5271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5273 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5274 BPF_LD_MAP_FD(BPF_REG_1, 0),
5275 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5276 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5277 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5278 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5279 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5280 offsetof(struct test_val, foo), 4),
5281 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5282 BPF_MOV64_IMM(BPF_REG_2, 8),
5283 BPF_MOV64_IMM(BPF_REG_3, 0),
5284 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5285 BPF_EXIT_INSN(),
5286 },
5287 .fixup_map2 = { 3 },
5288 .result = ACCEPT,
5289 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5290 },
5291 {
5292 "helper access to adjusted map (via variable): empty range",
5293 .insns = {
5294 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5295 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5296 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5297 BPF_LD_MAP_FD(BPF_REG_1, 0),
5298 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005299 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
Gianluca Borello57225692017-01-09 10:19:47 -08005300 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5301 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5302 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005303 offsetof(struct test_val, foo), 3),
Gianluca Borello57225692017-01-09 10:19:47 -08005304 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005305 BPF_MOV64_IMM(BPF_REG_2, 0),
5306 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005307 BPF_EXIT_INSN(),
5308 },
5309 .fixup_map2 = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005310 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08005311 .result = REJECT,
5312 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5313 },
5314 {
5315 "helper access to adjusted map (via variable): no max check",
5316 .insns = {
5317 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5318 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5319 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5320 BPF_LD_MAP_FD(BPF_REG_1, 0),
5321 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5322 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5323 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5324 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5325 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Edward Creef65b1842017-08-07 15:27:12 +01005326 BPF_MOV64_IMM(BPF_REG_2, 1),
Gianluca Borello57225692017-01-09 10:19:47 -08005327 BPF_MOV64_IMM(BPF_REG_3, 0),
5328 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5329 BPF_EXIT_INSN(),
5330 },
5331 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005332 .errstr = "R1 unbounded memory access",
Gianluca Borello57225692017-01-09 10:19:47 -08005333 .result = REJECT,
5334 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5335 },
5336 {
5337 "helper access to adjusted map (via variable): wrong max check",
5338 .insns = {
5339 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5340 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5341 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5342 BPF_LD_MAP_FD(BPF_REG_1, 0),
5343 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5344 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5345 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5346 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5347 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5348 offsetof(struct test_val, foo), 4),
5349 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5350 BPF_MOV64_IMM(BPF_REG_2,
5351 sizeof(struct test_val) -
5352 offsetof(struct test_val, foo) + 1),
5353 BPF_MOV64_IMM(BPF_REG_3, 0),
5354 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5355 BPF_EXIT_INSN(),
5356 },
5357 .fixup_map2 = { 3 },
5358 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
5359 .result = REJECT,
5360 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5361 },
Gianluca Borellof0318d02017-01-09 10:19:48 -08005362 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02005363 "helper access to map: bounds check using <, good access",
5364 .insns = {
5365 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5366 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5367 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5368 BPF_LD_MAP_FD(BPF_REG_1, 0),
5369 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5370 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5371 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5372 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5373 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
5374 BPF_MOV64_IMM(BPF_REG_0, 0),
5375 BPF_EXIT_INSN(),
5376 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5377 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5378 BPF_MOV64_IMM(BPF_REG_0, 0),
5379 BPF_EXIT_INSN(),
5380 },
5381 .fixup_map2 = { 3 },
5382 .result = ACCEPT,
5383 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5384 },
5385 {
5386 "helper access to map: bounds check using <, bad access",
5387 .insns = {
5388 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5389 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5390 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5391 BPF_LD_MAP_FD(BPF_REG_1, 0),
5392 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5393 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5394 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5395 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5396 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
5397 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5398 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5399 BPF_MOV64_IMM(BPF_REG_0, 0),
5400 BPF_EXIT_INSN(),
5401 BPF_MOV64_IMM(BPF_REG_0, 0),
5402 BPF_EXIT_INSN(),
5403 },
5404 .fixup_map2 = { 3 },
5405 .result = REJECT,
5406 .errstr = "R1 unbounded memory access",
5407 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5408 },
5409 {
5410 "helper access to map: bounds check using <=, good access",
5411 .insns = {
5412 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5413 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5414 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5415 BPF_LD_MAP_FD(BPF_REG_1, 0),
5416 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5417 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5418 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5419 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5420 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
5421 BPF_MOV64_IMM(BPF_REG_0, 0),
5422 BPF_EXIT_INSN(),
5423 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5424 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5425 BPF_MOV64_IMM(BPF_REG_0, 0),
5426 BPF_EXIT_INSN(),
5427 },
5428 .fixup_map2 = { 3 },
5429 .result = ACCEPT,
5430 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5431 },
5432 {
5433 "helper access to map: bounds check using <=, bad access",
5434 .insns = {
5435 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5436 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5437 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5438 BPF_LD_MAP_FD(BPF_REG_1, 0),
5439 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5440 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5441 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5442 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5443 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
5444 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5445 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5446 BPF_MOV64_IMM(BPF_REG_0, 0),
5447 BPF_EXIT_INSN(),
5448 BPF_MOV64_IMM(BPF_REG_0, 0),
5449 BPF_EXIT_INSN(),
5450 },
5451 .fixup_map2 = { 3 },
5452 .result = REJECT,
5453 .errstr = "R1 unbounded memory access",
5454 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5455 },
5456 {
5457 "helper access to map: bounds check using s<, good access",
5458 .insns = {
5459 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5460 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5461 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5462 BPF_LD_MAP_FD(BPF_REG_1, 0),
5463 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5464 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5465 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5466 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5467 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5468 BPF_MOV64_IMM(BPF_REG_0, 0),
5469 BPF_EXIT_INSN(),
5470 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
5471 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5472 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5473 BPF_MOV64_IMM(BPF_REG_0, 0),
5474 BPF_EXIT_INSN(),
5475 },
5476 .fixup_map2 = { 3 },
5477 .result = ACCEPT,
5478 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5479 },
5480 {
5481 "helper access to map: bounds check using s<, good access 2",
5482 .insns = {
5483 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5484 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5485 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5486 BPF_LD_MAP_FD(BPF_REG_1, 0),
5487 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5488 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5489 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5490 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5491 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5492 BPF_MOV64_IMM(BPF_REG_0, 0),
5493 BPF_EXIT_INSN(),
5494 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5495 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5496 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5497 BPF_MOV64_IMM(BPF_REG_0, 0),
5498 BPF_EXIT_INSN(),
5499 },
5500 .fixup_map2 = { 3 },
5501 .result = ACCEPT,
5502 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5503 },
5504 {
5505 "helper access to map: bounds check using s<, bad access",
5506 .insns = {
5507 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5508 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5509 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5510 BPF_LD_MAP_FD(BPF_REG_1, 0),
5511 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5512 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5513 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5514 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5515 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5516 BPF_MOV64_IMM(BPF_REG_0, 0),
5517 BPF_EXIT_INSN(),
5518 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5519 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5520 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5521 BPF_MOV64_IMM(BPF_REG_0, 0),
5522 BPF_EXIT_INSN(),
5523 },
5524 .fixup_map2 = { 3 },
5525 .result = REJECT,
5526 .errstr = "R1 min value is negative",
5527 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5528 },
5529 {
5530 "helper access to map: bounds check using s<=, good access",
5531 .insns = {
5532 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5533 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5534 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5535 BPF_LD_MAP_FD(BPF_REG_1, 0),
5536 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5537 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5538 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5539 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5540 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5541 BPF_MOV64_IMM(BPF_REG_0, 0),
5542 BPF_EXIT_INSN(),
5543 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
5544 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5545 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5546 BPF_MOV64_IMM(BPF_REG_0, 0),
5547 BPF_EXIT_INSN(),
5548 },
5549 .fixup_map2 = { 3 },
5550 .result = ACCEPT,
5551 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5552 },
5553 {
5554 "helper access to map: bounds check using s<=, good access 2",
5555 .insns = {
5556 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5557 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5558 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5559 BPF_LD_MAP_FD(BPF_REG_1, 0),
5560 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5561 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5562 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5563 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5564 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5565 BPF_MOV64_IMM(BPF_REG_0, 0),
5566 BPF_EXIT_INSN(),
5567 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5568 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5569 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5570 BPF_MOV64_IMM(BPF_REG_0, 0),
5571 BPF_EXIT_INSN(),
5572 },
5573 .fixup_map2 = { 3 },
5574 .result = ACCEPT,
5575 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5576 },
5577 {
5578 "helper access to map: bounds check using s<=, bad access",
5579 .insns = {
5580 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5581 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5582 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5583 BPF_LD_MAP_FD(BPF_REG_1, 0),
5584 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5585 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5586 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5587 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5588 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5589 BPF_MOV64_IMM(BPF_REG_0, 0),
5590 BPF_EXIT_INSN(),
5591 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5592 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5593 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5594 BPF_MOV64_IMM(BPF_REG_0, 0),
5595 BPF_EXIT_INSN(),
5596 },
5597 .fixup_map2 = { 3 },
5598 .result = REJECT,
5599 .errstr = "R1 min value is negative",
5600 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5601 },
5602 {
Paul Chaignon5f90dd62018-04-24 15:08:19 +02005603 "map lookup helper access to map",
5604 .insns = {
5605 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5606 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5607 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5608 BPF_LD_MAP_FD(BPF_REG_1, 0),
5609 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5610 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5611 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
5612 BPF_LD_MAP_FD(BPF_REG_1, 0),
5613 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5614 BPF_EXIT_INSN(),
5615 },
5616 .fixup_map3 = { 3, 8 },
5617 .result = ACCEPT,
5618 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5619 },
5620 {
5621 "map update helper access to map",
5622 .insns = {
5623 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5624 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5625 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5626 BPF_LD_MAP_FD(BPF_REG_1, 0),
5627 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5628 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5629 BPF_MOV64_IMM(BPF_REG_4, 0),
5630 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
5631 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
5632 BPF_LD_MAP_FD(BPF_REG_1, 0),
5633 BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
5634 BPF_EXIT_INSN(),
5635 },
5636 .fixup_map3 = { 3, 10 },
5637 .result = ACCEPT,
5638 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5639 },
5640 {
5641 "map update helper access to map: wrong size",
5642 .insns = {
5643 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5644 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5645 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5646 BPF_LD_MAP_FD(BPF_REG_1, 0),
5647 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5648 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5649 BPF_MOV64_IMM(BPF_REG_4, 0),
5650 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
5651 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
5652 BPF_LD_MAP_FD(BPF_REG_1, 0),
5653 BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
5654 BPF_EXIT_INSN(),
5655 },
5656 .fixup_map1 = { 3 },
5657 .fixup_map3 = { 10 },
5658 .result = REJECT,
5659 .errstr = "invalid access to map value, value_size=8 off=0 size=16",
5660 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5661 },
5662 {
5663 "map helper access to adjusted map (via const imm)",
5664 .insns = {
5665 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5666 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5667 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5668 BPF_LD_MAP_FD(BPF_REG_1, 0),
5669 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5670 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5671 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
5672 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
5673 offsetof(struct other_val, bar)),
5674 BPF_LD_MAP_FD(BPF_REG_1, 0),
5675 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5676 BPF_EXIT_INSN(),
5677 },
5678 .fixup_map3 = { 3, 9 },
5679 .result = ACCEPT,
5680 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5681 },
5682 {
5683 "map helper access to adjusted map (via const imm): out-of-bound 1",
5684 .insns = {
5685 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5686 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5687 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5688 BPF_LD_MAP_FD(BPF_REG_1, 0),
5689 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5690 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5691 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
5692 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
5693 sizeof(struct other_val) - 4),
5694 BPF_LD_MAP_FD(BPF_REG_1, 0),
5695 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5696 BPF_EXIT_INSN(),
5697 },
5698 .fixup_map3 = { 3, 9 },
5699 .result = REJECT,
5700 .errstr = "invalid access to map value, value_size=16 off=12 size=8",
5701 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5702 },
5703 {
5704 "map helper access to adjusted map (via const imm): out-of-bound 2",
5705 .insns = {
5706 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5707 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5708 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5709 BPF_LD_MAP_FD(BPF_REG_1, 0),
5710 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5711 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5712 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
5713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5714 BPF_LD_MAP_FD(BPF_REG_1, 0),
5715 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5716 BPF_EXIT_INSN(),
5717 },
5718 .fixup_map3 = { 3, 9 },
5719 .result = REJECT,
5720 .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
5721 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5722 },
5723 {
5724 "map helper access to adjusted map (via const reg)",
5725 .insns = {
5726 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5727 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5728 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5729 BPF_LD_MAP_FD(BPF_REG_1, 0),
5730 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5731 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5732 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
5733 BPF_MOV64_IMM(BPF_REG_3,
5734 offsetof(struct other_val, bar)),
5735 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
5736 BPF_LD_MAP_FD(BPF_REG_1, 0),
5737 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5738 BPF_EXIT_INSN(),
5739 },
5740 .fixup_map3 = { 3, 10 },
5741 .result = ACCEPT,
5742 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5743 },
5744 {
5745 "map helper access to adjusted map (via const reg): out-of-bound 1",
5746 .insns = {
5747 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5748 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5749 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5750 BPF_LD_MAP_FD(BPF_REG_1, 0),
5751 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5752 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5753 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
5754 BPF_MOV64_IMM(BPF_REG_3,
5755 sizeof(struct other_val) - 4),
5756 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
5757 BPF_LD_MAP_FD(BPF_REG_1, 0),
5758 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5759 BPF_EXIT_INSN(),
5760 },
5761 .fixup_map3 = { 3, 10 },
5762 .result = REJECT,
5763 .errstr = "invalid access to map value, value_size=16 off=12 size=8",
5764 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5765 },
5766 {
5767 "map helper access to adjusted map (via const reg): out-of-bound 2",
5768 .insns = {
5769 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5770 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5771 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5772 BPF_LD_MAP_FD(BPF_REG_1, 0),
5773 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5774 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5775 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
5776 BPF_MOV64_IMM(BPF_REG_3, -4),
5777 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
5778 BPF_LD_MAP_FD(BPF_REG_1, 0),
5779 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5780 BPF_EXIT_INSN(),
5781 },
5782 .fixup_map3 = { 3, 10 },
5783 .result = REJECT,
5784 .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
5785 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5786 },
5787 {
5788 "map helper access to adjusted map (via variable)",
5789 .insns = {
5790 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5791 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5792 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5793 BPF_LD_MAP_FD(BPF_REG_1, 0),
5794 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5795 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5796 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
5797 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5798 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5799 offsetof(struct other_val, bar), 4),
5800 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
5801 BPF_LD_MAP_FD(BPF_REG_1, 0),
5802 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5803 BPF_EXIT_INSN(),
5804 },
5805 .fixup_map3 = { 3, 11 },
5806 .result = ACCEPT,
5807 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5808 },
5809 {
5810 "map helper access to adjusted map (via variable): no max check",
5811 .insns = {
5812 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5813 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5814 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5815 BPF_LD_MAP_FD(BPF_REG_1, 0),
5816 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5817 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5818 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
5819 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5820 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
5821 BPF_LD_MAP_FD(BPF_REG_1, 0),
5822 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5823 BPF_EXIT_INSN(),
5824 },
5825 .fixup_map3 = { 3, 10 },
5826 .result = REJECT,
5827 .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
5828 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5829 },
5830 {
5831 "map helper access to adjusted map (via variable): wrong max check",
5832 .insns = {
5833 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5834 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5835 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5836 BPF_LD_MAP_FD(BPF_REG_1, 0),
5837 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5838 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5839 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
5840 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5841 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5842 offsetof(struct other_val, bar) + 1, 4),
5843 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
5844 BPF_LD_MAP_FD(BPF_REG_1, 0),
5845 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5846 BPF_EXIT_INSN(),
5847 },
5848 .fixup_map3 = { 3, 11 },
5849 .result = REJECT,
5850 .errstr = "invalid access to map value, value_size=16 off=9 size=8",
5851 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5852 },
5853 {
Gianluca Borellof0318d02017-01-09 10:19:48 -08005854 "map element value is preserved across register spilling",
5855 .insns = {
5856 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5857 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5858 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5859 BPF_LD_MAP_FD(BPF_REG_1, 0),
5860 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5861 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5862 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5863 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5864 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5865 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5866 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5867 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5868 BPF_EXIT_INSN(),
5869 },
5870 .fixup_map2 = { 3 },
5871 .errstr_unpriv = "R0 leaks addr",
5872 .result = ACCEPT,
5873 .result_unpriv = REJECT,
5874 },
5875 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005876 "map element value or null is marked on register spilling",
5877 .insns = {
5878 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5879 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5880 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5881 BPF_LD_MAP_FD(BPF_REG_1, 0),
5882 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5883 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5884 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
5885 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5886 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5887 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5888 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5889 BPF_EXIT_INSN(),
5890 },
5891 .fixup_map2 = { 3 },
5892 .errstr_unpriv = "R0 leaks addr",
5893 .result = ACCEPT,
5894 .result_unpriv = REJECT,
5895 },
5896 {
5897 "map element value store of cleared call register",
5898 .insns = {
5899 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5901 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5902 BPF_LD_MAP_FD(BPF_REG_1, 0),
5903 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5904 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5905 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
5906 BPF_EXIT_INSN(),
5907 },
5908 .fixup_map2 = { 3 },
5909 .errstr_unpriv = "R1 !read_ok",
5910 .errstr = "R1 !read_ok",
5911 .result = REJECT,
5912 .result_unpriv = REJECT,
5913 },
5914 {
5915 "map element value with unaligned store",
5916 .insns = {
5917 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5918 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5919 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5920 BPF_LD_MAP_FD(BPF_REG_1, 0),
5921 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5922 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
5923 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5924 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5925 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
5926 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
5927 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5928 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
5929 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
5930 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
5931 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
5932 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
5933 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
5934 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
5935 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
5936 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
5937 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
5938 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
5939 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
5940 BPF_EXIT_INSN(),
5941 },
5942 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005943 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005944 .result = ACCEPT,
5945 .result_unpriv = REJECT,
5946 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5947 },
5948 {
5949 "map element value with unaligned load",
5950 .insns = {
5951 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5952 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5953 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5954 BPF_LD_MAP_FD(BPF_REG_1, 0),
5955 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5956 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5957 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5958 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
5959 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5960 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5961 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
5962 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5963 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
5964 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
5965 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
5966 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5967 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
5968 BPF_EXIT_INSN(),
5969 },
5970 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005971 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005972 .result = ACCEPT,
5973 .result_unpriv = REJECT,
5974 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5975 },
5976 {
5977 "map element value illegal alu op, 1",
5978 .insns = {
5979 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5980 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5981 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5982 BPF_LD_MAP_FD(BPF_REG_1, 0),
5983 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5984 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5985 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
5986 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5987 BPF_EXIT_INSN(),
5988 },
5989 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005990 .errstr = "R0 bitwise operator &= on pointer",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005991 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005992 },
5993 {
5994 "map element value illegal alu op, 2",
5995 .insns = {
5996 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5997 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5998 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5999 BPF_LD_MAP_FD(BPF_REG_1, 0),
6000 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6001 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6002 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
6003 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6004 BPF_EXIT_INSN(),
6005 },
6006 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08006007 .errstr = "R0 32-bit pointer arithmetic prohibited",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006008 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006009 },
6010 {
6011 "map element value illegal alu op, 3",
6012 .insns = {
6013 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6014 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6015 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6016 BPF_LD_MAP_FD(BPF_REG_1, 0),
6017 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6018 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6019 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
6020 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6021 BPF_EXIT_INSN(),
6022 },
6023 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08006024 .errstr = "R0 pointer arithmetic with /= operator",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006025 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006026 },
6027 {
6028 "map element value illegal alu op, 4",
6029 .insns = {
6030 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6032 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6033 BPF_LD_MAP_FD(BPF_REG_1, 0),
6034 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6035 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6036 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
6037 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6038 BPF_EXIT_INSN(),
6039 },
6040 .fixup_map2 = { 3 },
6041 .errstr_unpriv = "R0 pointer arithmetic prohibited",
6042 .errstr = "invalid mem access 'inv'",
6043 .result = REJECT,
6044 .result_unpriv = REJECT,
6045 },
6046 {
6047 "map element value illegal alu op, 5",
6048 .insns = {
6049 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6050 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6051 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6052 BPF_LD_MAP_FD(BPF_REG_1, 0),
6053 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6054 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6055 BPF_MOV64_IMM(BPF_REG_3, 4096),
6056 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6057 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6058 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6059 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
6060 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
6061 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6062 BPF_EXIT_INSN(),
6063 },
6064 .fixup_map2 = { 3 },
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006065 .errstr = "R0 invalid mem access 'inv'",
6066 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006067 },
6068 {
6069 "map element value is preserved across register spilling",
Gianluca Borellof0318d02017-01-09 10:19:48 -08006070 .insns = {
6071 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6072 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6073 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6074 BPF_LD_MAP_FD(BPF_REG_1, 0),
6075 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6076 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6077 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
6078 offsetof(struct test_val, foo)),
6079 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6080 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6081 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
6082 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6083 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6084 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6085 BPF_EXIT_INSN(),
6086 },
6087 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006088 .errstr_unpriv = "R0 leaks addr",
Gianluca Borellof0318d02017-01-09 10:19:48 -08006089 .result = ACCEPT,
6090 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006091 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Gianluca Borellof0318d02017-01-09 10:19:48 -08006092 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08006093 {
6094 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
6095 .insns = {
6096 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6097 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6098 BPF_MOV64_IMM(BPF_REG_0, 0),
6099 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6100 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6101 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6102 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6103 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6104 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6105 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6106 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6107 BPF_MOV64_IMM(BPF_REG_2, 16),
6108 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6109 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6110 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6111 BPF_MOV64_IMM(BPF_REG_4, 0),
6112 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6113 BPF_MOV64_IMM(BPF_REG_3, 0),
6114 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6115 BPF_MOV64_IMM(BPF_REG_0, 0),
6116 BPF_EXIT_INSN(),
6117 },
6118 .result = ACCEPT,
6119 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6120 },
6121 {
6122 "helper access to variable memory: stack, bitwise AND, zero included",
6123 .insns = {
6124 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6125 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6126 BPF_MOV64_IMM(BPF_REG_2, 16),
6127 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6128 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6129 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6130 BPF_MOV64_IMM(BPF_REG_3, 0),
6131 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6132 BPF_EXIT_INSN(),
6133 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08006134 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006135 .result = REJECT,
6136 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6137 },
6138 {
6139 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
6140 .insns = {
6141 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6142 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6143 BPF_MOV64_IMM(BPF_REG_2, 16),
6144 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6145 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6146 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
6147 BPF_MOV64_IMM(BPF_REG_4, 0),
6148 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6149 BPF_MOV64_IMM(BPF_REG_3, 0),
6150 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6151 BPF_MOV64_IMM(BPF_REG_0, 0),
6152 BPF_EXIT_INSN(),
6153 },
6154 .errstr = "invalid stack type R1 off=-64 access_size=65",
6155 .result = REJECT,
6156 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6157 },
6158 {
6159 "helper access to variable memory: stack, JMP, correct bounds",
6160 .insns = {
6161 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6162 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6163 BPF_MOV64_IMM(BPF_REG_0, 0),
6164 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6165 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6166 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6167 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6168 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6169 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6170 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6171 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6172 BPF_MOV64_IMM(BPF_REG_2, 16),
6173 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6174 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6175 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
6176 BPF_MOV64_IMM(BPF_REG_4, 0),
6177 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6178 BPF_MOV64_IMM(BPF_REG_3, 0),
6179 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6180 BPF_MOV64_IMM(BPF_REG_0, 0),
6181 BPF_EXIT_INSN(),
6182 },
6183 .result = ACCEPT,
6184 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6185 },
6186 {
6187 "helper access to variable memory: stack, JMP (signed), correct bounds",
6188 .insns = {
6189 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6190 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6191 BPF_MOV64_IMM(BPF_REG_0, 0),
6192 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6193 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6194 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6195 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6196 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6197 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6198 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6199 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6200 BPF_MOV64_IMM(BPF_REG_2, 16),
6201 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6202 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6203 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
6204 BPF_MOV64_IMM(BPF_REG_4, 0),
6205 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
6206 BPF_MOV64_IMM(BPF_REG_3, 0),
6207 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6208 BPF_MOV64_IMM(BPF_REG_0, 0),
6209 BPF_EXIT_INSN(),
6210 },
6211 .result = ACCEPT,
6212 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6213 },
6214 {
6215 "helper access to variable memory: stack, JMP, bounds + offset",
6216 .insns = {
6217 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6219 BPF_MOV64_IMM(BPF_REG_2, 16),
6220 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6221 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6222 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
6223 BPF_MOV64_IMM(BPF_REG_4, 0),
6224 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
6225 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
6226 BPF_MOV64_IMM(BPF_REG_3, 0),
6227 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6228 BPF_MOV64_IMM(BPF_REG_0, 0),
6229 BPF_EXIT_INSN(),
6230 },
6231 .errstr = "invalid stack type R1 off=-64 access_size=65",
6232 .result = REJECT,
6233 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6234 },
6235 {
6236 "helper access to variable memory: stack, JMP, wrong max",
6237 .insns = {
6238 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6239 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6240 BPF_MOV64_IMM(BPF_REG_2, 16),
6241 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6242 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6243 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
6244 BPF_MOV64_IMM(BPF_REG_4, 0),
6245 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6246 BPF_MOV64_IMM(BPF_REG_3, 0),
6247 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6248 BPF_MOV64_IMM(BPF_REG_0, 0),
6249 BPF_EXIT_INSN(),
6250 },
6251 .errstr = "invalid stack type R1 off=-64 access_size=65",
6252 .result = REJECT,
6253 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6254 },
6255 {
6256 "helper access to variable memory: stack, JMP, no max check",
6257 .insns = {
6258 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6259 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6260 BPF_MOV64_IMM(BPF_REG_2, 16),
6261 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6262 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6263 BPF_MOV64_IMM(BPF_REG_4, 0),
6264 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6265 BPF_MOV64_IMM(BPF_REG_3, 0),
6266 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6267 BPF_MOV64_IMM(BPF_REG_0, 0),
6268 BPF_EXIT_INSN(),
6269 },
Edward Creef65b1842017-08-07 15:27:12 +01006270 /* because max wasn't checked, signed min is negative */
6271 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006272 .result = REJECT,
6273 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6274 },
6275 {
6276 "helper access to variable memory: stack, JMP, no min check",
6277 .insns = {
6278 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6279 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6280 BPF_MOV64_IMM(BPF_REG_2, 16),
6281 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6282 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6283 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
6284 BPF_MOV64_IMM(BPF_REG_3, 0),
6285 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6286 BPF_MOV64_IMM(BPF_REG_0, 0),
6287 BPF_EXIT_INSN(),
6288 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08006289 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006290 .result = REJECT,
6291 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6292 },
6293 {
6294 "helper access to variable memory: stack, JMP (signed), no min check",
6295 .insns = {
6296 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6297 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6298 BPF_MOV64_IMM(BPF_REG_2, 16),
6299 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6300 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6301 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
6302 BPF_MOV64_IMM(BPF_REG_3, 0),
6303 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6304 BPF_MOV64_IMM(BPF_REG_0, 0),
6305 BPF_EXIT_INSN(),
6306 },
6307 .errstr = "R2 min value is negative",
6308 .result = REJECT,
6309 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6310 },
6311 {
6312 "helper access to variable memory: map, JMP, correct bounds",
6313 .insns = {
6314 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6315 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6316 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6317 BPF_LD_MAP_FD(BPF_REG_1, 0),
6318 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6319 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6320 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6321 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6322 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6323 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6324 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6325 sizeof(struct test_val), 4),
6326 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02006327 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006328 BPF_MOV64_IMM(BPF_REG_3, 0),
6329 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6330 BPF_MOV64_IMM(BPF_REG_0, 0),
6331 BPF_EXIT_INSN(),
6332 },
6333 .fixup_map2 = { 3 },
6334 .result = ACCEPT,
6335 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6336 },
6337 {
6338 "helper access to variable memory: map, JMP, wrong max",
6339 .insns = {
6340 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6341 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6342 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6343 BPF_LD_MAP_FD(BPF_REG_1, 0),
6344 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6345 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6346 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6347 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6348 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6349 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6350 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6351 sizeof(struct test_val) + 1, 4),
6352 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02006353 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006354 BPF_MOV64_IMM(BPF_REG_3, 0),
6355 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6356 BPF_MOV64_IMM(BPF_REG_0, 0),
6357 BPF_EXIT_INSN(),
6358 },
6359 .fixup_map2 = { 3 },
6360 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
6361 .result = REJECT,
6362 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6363 },
6364 {
6365 "helper access to variable memory: map adjusted, JMP, correct bounds",
6366 .insns = {
6367 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6368 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6369 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6370 BPF_LD_MAP_FD(BPF_REG_1, 0),
6371 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6372 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6373 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6374 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
6375 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6376 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6377 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6378 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6379 sizeof(struct test_val) - 20, 4),
6380 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02006381 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006382 BPF_MOV64_IMM(BPF_REG_3, 0),
6383 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6384 BPF_MOV64_IMM(BPF_REG_0, 0),
6385 BPF_EXIT_INSN(),
6386 },
6387 .fixup_map2 = { 3 },
6388 .result = ACCEPT,
6389 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6390 },
6391 {
6392 "helper access to variable memory: map adjusted, JMP, wrong max",
6393 .insns = {
6394 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6395 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6396 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6397 BPF_LD_MAP_FD(BPF_REG_1, 0),
6398 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6399 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6400 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6401 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
6402 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6403 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6404 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6405 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6406 sizeof(struct test_val) - 19, 4),
6407 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02006408 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006409 BPF_MOV64_IMM(BPF_REG_3, 0),
6410 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6411 BPF_MOV64_IMM(BPF_REG_0, 0),
6412 BPF_EXIT_INSN(),
6413 },
6414 .fixup_map2 = { 3 },
6415 .errstr = "R1 min value is outside of the array range",
6416 .result = REJECT,
6417 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6418 },
6419 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006420 "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Edward Creef65b1842017-08-07 15:27:12 +01006421 .insns = {
6422 BPF_MOV64_IMM(BPF_REG_1, 0),
6423 BPF_MOV64_IMM(BPF_REG_2, 0),
6424 BPF_MOV64_IMM(BPF_REG_3, 0),
6425 BPF_MOV64_IMM(BPF_REG_4, 0),
6426 BPF_MOV64_IMM(BPF_REG_5, 0),
6427 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6428 BPF_EXIT_INSN(),
6429 },
6430 .result = ACCEPT,
6431 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6432 },
6433 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006434 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006435 .insns = {
6436 BPF_MOV64_IMM(BPF_REG_1, 0),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08006437 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01006438 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6439 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006440 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6441 BPF_MOV64_IMM(BPF_REG_3, 0),
6442 BPF_MOV64_IMM(BPF_REG_4, 0),
6443 BPF_MOV64_IMM(BPF_REG_5, 0),
6444 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6445 BPF_EXIT_INSN(),
6446 },
Edward Creef65b1842017-08-07 15:27:12 +01006447 .errstr = "R1 type=inv expected=fp",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006448 .result = REJECT,
6449 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6450 },
6451 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006452 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006453 .insns = {
6454 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6455 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6456 BPF_MOV64_IMM(BPF_REG_2, 0),
6457 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
6458 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
6459 BPF_MOV64_IMM(BPF_REG_3, 0),
6460 BPF_MOV64_IMM(BPF_REG_4, 0),
6461 BPF_MOV64_IMM(BPF_REG_5, 0),
6462 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6463 BPF_EXIT_INSN(),
6464 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08006465 .result = ACCEPT,
6466 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6467 },
6468 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006469 "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006470 .insns = {
6471 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6472 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6473 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6474 BPF_LD_MAP_FD(BPF_REG_1, 0),
6475 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6476 BPF_FUNC_map_lookup_elem),
6477 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6478 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6479 BPF_MOV64_IMM(BPF_REG_2, 0),
6480 BPF_MOV64_IMM(BPF_REG_3, 0),
6481 BPF_MOV64_IMM(BPF_REG_4, 0),
6482 BPF_MOV64_IMM(BPF_REG_5, 0),
6483 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6484 BPF_EXIT_INSN(),
6485 },
6486 .fixup_map1 = { 3 },
6487 .result = ACCEPT,
6488 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6489 },
6490 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006491 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006492 .insns = {
6493 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6494 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6495 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6496 BPF_LD_MAP_FD(BPF_REG_1, 0),
6497 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6498 BPF_FUNC_map_lookup_elem),
6499 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6500 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6501 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
6502 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6503 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6504 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
6505 BPF_MOV64_IMM(BPF_REG_3, 0),
6506 BPF_MOV64_IMM(BPF_REG_4, 0),
6507 BPF_MOV64_IMM(BPF_REG_5, 0),
6508 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6509 BPF_EXIT_INSN(),
6510 },
6511 .fixup_map1 = { 3 },
6512 .result = ACCEPT,
6513 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6514 },
6515 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006516 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006517 .insns = {
6518 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6519 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6520 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6521 BPF_LD_MAP_FD(BPF_REG_1, 0),
6522 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6523 BPF_FUNC_map_lookup_elem),
6524 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6525 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6526 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6527 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6528 BPF_MOV64_IMM(BPF_REG_3, 0),
6529 BPF_MOV64_IMM(BPF_REG_4, 0),
6530 BPF_MOV64_IMM(BPF_REG_5, 0),
6531 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6532 BPF_EXIT_INSN(),
6533 },
6534 .fixup_map1 = { 3 },
6535 .result = ACCEPT,
6536 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6537 },
6538 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006539 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006540 .insns = {
6541 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6542 offsetof(struct __sk_buff, data)),
6543 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6544 offsetof(struct __sk_buff, data_end)),
6545 BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
6546 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
6547 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
6548 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
6549 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
6550 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6551 BPF_MOV64_IMM(BPF_REG_3, 0),
6552 BPF_MOV64_IMM(BPF_REG_4, 0),
6553 BPF_MOV64_IMM(BPF_REG_5, 0),
6554 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6555 BPF_EXIT_INSN(),
6556 },
6557 .result = ACCEPT,
Gianluca Borello06c1c042017-01-09 10:19:49 -08006558 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006559 .retval = 0 /* csum_diff of 64-byte packet */,
Gianluca Borello06c1c042017-01-09 10:19:49 -08006560 },
6561 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006562 "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
6563 .insns = {
6564 BPF_MOV64_IMM(BPF_REG_1, 0),
6565 BPF_MOV64_IMM(BPF_REG_2, 0),
6566 BPF_MOV64_IMM(BPF_REG_3, 0),
6567 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6568 BPF_EXIT_INSN(),
6569 },
6570 .errstr = "R1 type=inv expected=fp",
6571 .result = REJECT,
6572 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6573 },
6574 {
6575 "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
6576 .insns = {
6577 BPF_MOV64_IMM(BPF_REG_1, 0),
6578 BPF_MOV64_IMM(BPF_REG_2, 1),
6579 BPF_MOV64_IMM(BPF_REG_3, 0),
6580 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6581 BPF_EXIT_INSN(),
6582 },
6583 .errstr = "R1 type=inv expected=fp",
6584 .result = REJECT,
6585 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6586 },
6587 {
6588 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6589 .insns = {
6590 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6591 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6592 BPF_MOV64_IMM(BPF_REG_2, 0),
6593 BPF_MOV64_IMM(BPF_REG_3, 0),
6594 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6595 BPF_EXIT_INSN(),
6596 },
6597 .result = ACCEPT,
6598 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6599 },
6600 {
6601 "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6602 .insns = {
6603 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6604 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6605 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6606 BPF_LD_MAP_FD(BPF_REG_1, 0),
6607 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6608 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6609 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6610 BPF_MOV64_IMM(BPF_REG_2, 0),
6611 BPF_MOV64_IMM(BPF_REG_3, 0),
6612 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6613 BPF_EXIT_INSN(),
6614 },
6615 .fixup_map1 = { 3 },
6616 .result = ACCEPT,
6617 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6618 },
6619 {
6620 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6621 .insns = {
6622 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6623 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6624 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6625 BPF_LD_MAP_FD(BPF_REG_1, 0),
6626 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6627 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6628 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6629 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6630 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6631 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6632 BPF_MOV64_IMM(BPF_REG_3, 0),
6633 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6634 BPF_EXIT_INSN(),
6635 },
6636 .fixup_map1 = { 3 },
6637 .result = ACCEPT,
6638 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6639 },
6640 {
6641 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6642 .insns = {
6643 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6644 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6645 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6646 BPF_LD_MAP_FD(BPF_REG_1, 0),
6647 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6648 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6649 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6650 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6651 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
6652 BPF_MOV64_IMM(BPF_REG_3, 0),
6653 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6654 BPF_EXIT_INSN(),
6655 },
6656 .fixup_map1 = { 3 },
6657 .result = ACCEPT,
6658 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6659 },
6660 {
Gianluca Borello06c1c042017-01-09 10:19:49 -08006661 "helper access to variable memory: 8 bytes leak",
6662 .insns = {
6663 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6665 BPF_MOV64_IMM(BPF_REG_0, 0),
6666 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6667 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6668 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6669 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6670 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6671 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6672 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08006673 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01006674 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6675 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006676 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
6677 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
6678 BPF_MOV64_IMM(BPF_REG_3, 0),
6679 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6680 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6681 BPF_EXIT_INSN(),
6682 },
6683 .errstr = "invalid indirect read from stack off -64+32 size 64",
6684 .result = REJECT,
6685 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6686 },
6687 {
6688 "helper access to variable memory: 8 bytes no leak (init memory)",
6689 .insns = {
6690 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6691 BPF_MOV64_IMM(BPF_REG_0, 0),
6692 BPF_MOV64_IMM(BPF_REG_0, 0),
6693 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6694 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6695 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6696 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6697 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6698 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6699 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6700 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6701 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6702 BPF_MOV64_IMM(BPF_REG_2, 0),
6703 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
6704 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
6705 BPF_MOV64_IMM(BPF_REG_3, 0),
6706 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6707 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6708 BPF_EXIT_INSN(),
6709 },
6710 .result = ACCEPT,
6711 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6712 },
Josef Bacik29200c12017-02-03 16:25:23 -05006713 {
6714 "invalid and of negative number",
6715 .insns = {
6716 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6717 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6718 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6719 BPF_LD_MAP_FD(BPF_REG_1, 0),
6720 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6721 BPF_FUNC_map_lookup_elem),
6722 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Edward Creef65b1842017-08-07 15:27:12 +01006723 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik29200c12017-02-03 16:25:23 -05006724 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
6725 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
6726 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6727 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
6728 offsetof(struct test_val, foo)),
6729 BPF_EXIT_INSN(),
6730 },
6731 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006732 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05006733 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006734 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik29200c12017-02-03 16:25:23 -05006735 },
6736 {
6737 "invalid range check",
6738 .insns = {
6739 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6740 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6741 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6742 BPF_LD_MAP_FD(BPF_REG_1, 0),
6743 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6744 BPF_FUNC_map_lookup_elem),
6745 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
6746 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
6747 BPF_MOV64_IMM(BPF_REG_9, 1),
6748 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
6749 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
6750 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
6751 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
6752 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
6753 BPF_MOV32_IMM(BPF_REG_3, 1),
6754 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
6755 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
6756 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
6757 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
6758 BPF_MOV64_REG(BPF_REG_0, 0),
6759 BPF_EXIT_INSN(),
6760 },
6761 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006762 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05006763 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006764 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006765 },
6766 {
6767 "map in map access",
6768 .insns = {
6769 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6770 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6771 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6772 BPF_LD_MAP_FD(BPF_REG_1, 0),
6773 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6774 BPF_FUNC_map_lookup_elem),
6775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6776 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6777 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6778 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6779 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6780 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6781 BPF_FUNC_map_lookup_elem),
6782 BPF_MOV64_REG(BPF_REG_0, 0),
6783 BPF_EXIT_INSN(),
6784 },
6785 .fixup_map_in_map = { 3 },
6786 .result = ACCEPT,
6787 },
6788 {
6789 "invalid inner map pointer",
6790 .insns = {
6791 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6792 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6794 BPF_LD_MAP_FD(BPF_REG_1, 0),
6795 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6796 BPF_FUNC_map_lookup_elem),
6797 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6798 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6799 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6800 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6801 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6802 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6803 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6804 BPF_FUNC_map_lookup_elem),
6805 BPF_MOV64_REG(BPF_REG_0, 0),
6806 BPF_EXIT_INSN(),
6807 },
6808 .fixup_map_in_map = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08006809 .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006810 .result = REJECT,
6811 },
6812 {
6813 "forgot null checking on the inner map pointer",
6814 .insns = {
6815 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6816 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6817 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6818 BPF_LD_MAP_FD(BPF_REG_1, 0),
6819 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6820 BPF_FUNC_map_lookup_elem),
6821 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6822 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6823 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6824 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6825 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6826 BPF_FUNC_map_lookup_elem),
6827 BPF_MOV64_REG(BPF_REG_0, 0),
6828 BPF_EXIT_INSN(),
6829 },
6830 .fixup_map_in_map = { 3 },
6831 .errstr = "R1 type=map_value_or_null expected=map_ptr",
6832 .result = REJECT,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006833 },
6834 {
6835 "ld_abs: check calling conv, r1",
6836 .insns = {
6837 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6838 BPF_MOV64_IMM(BPF_REG_1, 0),
6839 BPF_LD_ABS(BPF_W, -0x200000),
6840 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
6841 BPF_EXIT_INSN(),
6842 },
6843 .errstr = "R1 !read_ok",
6844 .result = REJECT,
6845 },
6846 {
6847 "ld_abs: check calling conv, r2",
6848 .insns = {
6849 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6850 BPF_MOV64_IMM(BPF_REG_2, 0),
6851 BPF_LD_ABS(BPF_W, -0x200000),
6852 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6853 BPF_EXIT_INSN(),
6854 },
6855 .errstr = "R2 !read_ok",
6856 .result = REJECT,
6857 },
6858 {
6859 "ld_abs: check calling conv, r3",
6860 .insns = {
6861 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6862 BPF_MOV64_IMM(BPF_REG_3, 0),
6863 BPF_LD_ABS(BPF_W, -0x200000),
6864 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6865 BPF_EXIT_INSN(),
6866 },
6867 .errstr = "R3 !read_ok",
6868 .result = REJECT,
6869 },
6870 {
6871 "ld_abs: check calling conv, r4",
6872 .insns = {
6873 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6874 BPF_MOV64_IMM(BPF_REG_4, 0),
6875 BPF_LD_ABS(BPF_W, -0x200000),
6876 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6877 BPF_EXIT_INSN(),
6878 },
6879 .errstr = "R4 !read_ok",
6880 .result = REJECT,
6881 },
6882 {
6883 "ld_abs: check calling conv, r5",
6884 .insns = {
6885 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6886 BPF_MOV64_IMM(BPF_REG_5, 0),
6887 BPF_LD_ABS(BPF_W, -0x200000),
6888 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6889 BPF_EXIT_INSN(),
6890 },
6891 .errstr = "R5 !read_ok",
6892 .result = REJECT,
6893 },
6894 {
6895 "ld_abs: check calling conv, r7",
6896 .insns = {
6897 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6898 BPF_MOV64_IMM(BPF_REG_7, 0),
6899 BPF_LD_ABS(BPF_W, -0x200000),
6900 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6901 BPF_EXIT_INSN(),
6902 },
6903 .result = ACCEPT,
6904 },
6905 {
Daniel Borkmann87ab8192017-12-14 21:07:27 +01006906 "ld_abs: tests on r6 and skb data reload helper",
6907 .insns = {
6908 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6909 BPF_LD_ABS(BPF_B, 0),
6910 BPF_LD_ABS(BPF_H, 0),
6911 BPF_LD_ABS(BPF_W, 0),
6912 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
6913 BPF_MOV64_IMM(BPF_REG_6, 0),
6914 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
6915 BPF_MOV64_IMM(BPF_REG_2, 1),
6916 BPF_MOV64_IMM(BPF_REG_3, 2),
6917 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6918 BPF_FUNC_skb_vlan_push),
6919 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
6920 BPF_LD_ABS(BPF_B, 0),
6921 BPF_LD_ABS(BPF_H, 0),
6922 BPF_LD_ABS(BPF_W, 0),
6923 BPF_MOV64_IMM(BPF_REG_0, 42),
6924 BPF_EXIT_INSN(),
6925 },
6926 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6927 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006928 .retval = 42 /* ultimate return value */,
Daniel Borkmann87ab8192017-12-14 21:07:27 +01006929 },
6930 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006931 "ld_ind: check calling conv, r1",
6932 .insns = {
6933 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6934 BPF_MOV64_IMM(BPF_REG_1, 1),
6935 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
6936 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
6937 BPF_EXIT_INSN(),
6938 },
6939 .errstr = "R1 !read_ok",
6940 .result = REJECT,
6941 },
6942 {
6943 "ld_ind: check calling conv, r2",
6944 .insns = {
6945 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6946 BPF_MOV64_IMM(BPF_REG_2, 1),
6947 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
6948 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6949 BPF_EXIT_INSN(),
6950 },
6951 .errstr = "R2 !read_ok",
6952 .result = REJECT,
6953 },
6954 {
6955 "ld_ind: check calling conv, r3",
6956 .insns = {
6957 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6958 BPF_MOV64_IMM(BPF_REG_3, 1),
6959 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
6960 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6961 BPF_EXIT_INSN(),
6962 },
6963 .errstr = "R3 !read_ok",
6964 .result = REJECT,
6965 },
6966 {
6967 "ld_ind: check calling conv, r4",
6968 .insns = {
6969 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6970 BPF_MOV64_IMM(BPF_REG_4, 1),
6971 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
6972 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6973 BPF_EXIT_INSN(),
6974 },
6975 .errstr = "R4 !read_ok",
6976 .result = REJECT,
6977 },
6978 {
6979 "ld_ind: check calling conv, r5",
6980 .insns = {
6981 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6982 BPF_MOV64_IMM(BPF_REG_5, 1),
6983 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
6984 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6985 BPF_EXIT_INSN(),
6986 },
6987 .errstr = "R5 !read_ok",
6988 .result = REJECT,
6989 },
6990 {
6991 "ld_ind: check calling conv, r7",
6992 .insns = {
6993 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6994 BPF_MOV64_IMM(BPF_REG_7, 1),
6995 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
6996 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6997 BPF_EXIT_INSN(),
6998 },
6999 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007000 .retval = 1,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02007001 },
Yonghong Song18f3d6b2017-06-13 15:52:14 -07007002 {
7003 "check bpf_perf_event_data->sample_period byte load permitted",
7004 .insns = {
7005 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02007006#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07007007 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
7008 offsetof(struct bpf_perf_event_data, sample_period)),
7009#else
7010 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
7011 offsetof(struct bpf_perf_event_data, sample_period) + 7),
7012#endif
7013 BPF_EXIT_INSN(),
7014 },
7015 .result = ACCEPT,
7016 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7017 },
7018 {
7019 "check bpf_perf_event_data->sample_period half load permitted",
7020 .insns = {
7021 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02007022#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07007023 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7024 offsetof(struct bpf_perf_event_data, sample_period)),
7025#else
7026 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7027 offsetof(struct bpf_perf_event_data, sample_period) + 6),
7028#endif
7029 BPF_EXIT_INSN(),
7030 },
7031 .result = ACCEPT,
7032 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7033 },
7034 {
7035 "check bpf_perf_event_data->sample_period word load permitted",
7036 .insns = {
7037 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02007038#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07007039 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7040 offsetof(struct bpf_perf_event_data, sample_period)),
7041#else
7042 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7043 offsetof(struct bpf_perf_event_data, sample_period) + 4),
7044#endif
7045 BPF_EXIT_INSN(),
7046 },
7047 .result = ACCEPT,
7048 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7049 },
7050 {
7051 "check bpf_perf_event_data->sample_period dword load permitted",
7052 .insns = {
7053 BPF_MOV64_IMM(BPF_REG_0, 0),
7054 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
7055 offsetof(struct bpf_perf_event_data, sample_period)),
7056 BPF_EXIT_INSN(),
7057 },
7058 .result = ACCEPT,
7059 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7060 },
7061 {
7062 "check skb->data half load not permitted",
7063 .insns = {
7064 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02007065#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07007066 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7067 offsetof(struct __sk_buff, data)),
7068#else
7069 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7070 offsetof(struct __sk_buff, data) + 2),
7071#endif
7072 BPF_EXIT_INSN(),
7073 },
7074 .result = REJECT,
7075 .errstr = "invalid bpf_context access",
7076 },
7077 {
7078 "check skb->tc_classid half load not permitted for lwt prog",
7079 .insns = {
7080 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02007081#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07007082 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7083 offsetof(struct __sk_buff, tc_classid)),
7084#else
7085 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7086 offsetof(struct __sk_buff, tc_classid) + 2),
7087#endif
7088 BPF_EXIT_INSN(),
7089 },
7090 .result = REJECT,
7091 .errstr = "invalid bpf_context access",
7092 .prog_type = BPF_PROG_TYPE_LWT_IN,
7093 },
Edward Creeb7122962017-07-21 00:00:24 +02007094 {
7095 "bounds checks mixing signed and unsigned, positive bounds",
7096 .insns = {
7097 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7098 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7099 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7100 BPF_LD_MAP_FD(BPF_REG_1, 0),
7101 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7102 BPF_FUNC_map_lookup_elem),
7103 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7104 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7105 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7106 BPF_MOV64_IMM(BPF_REG_2, 2),
7107 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
7108 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
7109 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7110 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7111 BPF_MOV64_IMM(BPF_REG_0, 0),
7112 BPF_EXIT_INSN(),
7113 },
7114 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007115 .errstr = "unbounded min value",
Edward Creeb7122962017-07-21 00:00:24 +02007116 .result = REJECT,
Edward Creeb7122962017-07-21 00:00:24 +02007117 },
7118 {
7119 "bounds checks mixing signed and unsigned",
7120 .insns = {
7121 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7122 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7123 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7124 BPF_LD_MAP_FD(BPF_REG_1, 0),
7125 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7126 BPF_FUNC_map_lookup_elem),
7127 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7128 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7129 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7130 BPF_MOV64_IMM(BPF_REG_2, -1),
7131 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
7132 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7133 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7134 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7135 BPF_MOV64_IMM(BPF_REG_0, 0),
7136 BPF_EXIT_INSN(),
7137 },
7138 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007139 .errstr = "unbounded min value",
Edward Creeb7122962017-07-21 00:00:24 +02007140 .result = REJECT,
Edward Creeb7122962017-07-21 00:00:24 +02007141 },
Daniel Borkmann86412502017-07-21 00:00:25 +02007142 {
7143 "bounds checks mixing signed and unsigned, variant 2",
7144 .insns = {
7145 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7146 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7147 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7148 BPF_LD_MAP_FD(BPF_REG_1, 0),
7149 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7150 BPF_FUNC_map_lookup_elem),
7151 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7152 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7153 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7154 BPF_MOV64_IMM(BPF_REG_2, -1),
7155 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
7156 BPF_MOV64_IMM(BPF_REG_8, 0),
7157 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
7158 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
7159 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
7160 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
7161 BPF_MOV64_IMM(BPF_REG_0, 0),
7162 BPF_EXIT_INSN(),
7163 },
7164 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007165 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007166 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007167 },
7168 {
7169 "bounds checks mixing signed and unsigned, variant 3",
7170 .insns = {
7171 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7172 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7173 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7174 BPF_LD_MAP_FD(BPF_REG_1, 0),
7175 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7176 BPF_FUNC_map_lookup_elem),
7177 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7178 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7179 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7180 BPF_MOV64_IMM(BPF_REG_2, -1),
7181 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
7182 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
7183 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
7184 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
7185 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
7186 BPF_MOV64_IMM(BPF_REG_0, 0),
7187 BPF_EXIT_INSN(),
7188 },
7189 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007190 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007191 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007192 },
7193 {
7194 "bounds checks mixing signed and unsigned, variant 4",
7195 .insns = {
7196 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7197 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7198 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7199 BPF_LD_MAP_FD(BPF_REG_1, 0),
7200 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7201 BPF_FUNC_map_lookup_elem),
7202 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7203 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7204 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7205 BPF_MOV64_IMM(BPF_REG_2, 1),
7206 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
7207 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7208 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7209 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7210 BPF_MOV64_IMM(BPF_REG_0, 0),
7211 BPF_EXIT_INSN(),
7212 },
7213 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01007214 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007215 },
7216 {
7217 "bounds checks mixing signed and unsigned, variant 5",
7218 .insns = {
7219 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7220 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7222 BPF_LD_MAP_FD(BPF_REG_1, 0),
7223 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7224 BPF_FUNC_map_lookup_elem),
7225 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7226 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7227 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7228 BPF_MOV64_IMM(BPF_REG_2, -1),
7229 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
7230 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
7231 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
7232 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7233 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7234 BPF_MOV64_IMM(BPF_REG_0, 0),
7235 BPF_EXIT_INSN(),
7236 },
7237 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007238 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007239 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007240 },
7241 {
7242 "bounds checks mixing signed and unsigned, variant 6",
7243 .insns = {
7244 BPF_MOV64_IMM(BPF_REG_2, 0),
7245 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
7246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
7247 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7248 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
7249 BPF_MOV64_IMM(BPF_REG_6, -1),
7250 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
7251 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
7252 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
7253 BPF_MOV64_IMM(BPF_REG_5, 0),
7254 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
7255 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7256 BPF_FUNC_skb_load_bytes),
7257 BPF_MOV64_IMM(BPF_REG_0, 0),
7258 BPF_EXIT_INSN(),
7259 },
Daniel Borkmann86412502017-07-21 00:00:25 +02007260 .errstr = "R4 min value is negative, either use unsigned",
7261 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007262 },
7263 {
7264 "bounds checks mixing signed and unsigned, variant 7",
7265 .insns = {
7266 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7267 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7268 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7269 BPF_LD_MAP_FD(BPF_REG_1, 0),
7270 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7271 BPF_FUNC_map_lookup_elem),
7272 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7273 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7274 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7275 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
7276 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
7277 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7278 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7279 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7280 BPF_MOV64_IMM(BPF_REG_0, 0),
7281 BPF_EXIT_INSN(),
7282 },
7283 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01007284 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007285 },
7286 {
7287 "bounds checks mixing signed and unsigned, variant 8",
7288 .insns = {
7289 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7290 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7291 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7292 BPF_LD_MAP_FD(BPF_REG_1, 0),
7293 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7294 BPF_FUNC_map_lookup_elem),
Daniel Borkmann86412502017-07-21 00:00:25 +02007295 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7296 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7297 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7298 BPF_MOV64_IMM(BPF_REG_2, -1),
7299 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7300 BPF_MOV64_IMM(BPF_REG_0, 0),
7301 BPF_EXIT_INSN(),
7302 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7303 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7304 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7305 BPF_MOV64_IMM(BPF_REG_0, 0),
7306 BPF_EXIT_INSN(),
7307 },
7308 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007309 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007310 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007311 },
7312 {
Edward Creef65b1842017-08-07 15:27:12 +01007313 "bounds checks mixing signed and unsigned, variant 9",
Daniel Borkmann86412502017-07-21 00:00:25 +02007314 .insns = {
7315 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7316 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7317 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7318 BPF_LD_MAP_FD(BPF_REG_1, 0),
7319 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7320 BPF_FUNC_map_lookup_elem),
7321 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7322 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7323 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7324 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
7325 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7326 BPF_MOV64_IMM(BPF_REG_0, 0),
7327 BPF_EXIT_INSN(),
7328 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7329 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7330 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7331 BPF_MOV64_IMM(BPF_REG_0, 0),
7332 BPF_EXIT_INSN(),
7333 },
7334 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01007335 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007336 },
7337 {
Edward Creef65b1842017-08-07 15:27:12 +01007338 "bounds checks mixing signed and unsigned, variant 10",
Daniel Borkmann86412502017-07-21 00:00:25 +02007339 .insns = {
7340 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7341 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7342 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7343 BPF_LD_MAP_FD(BPF_REG_1, 0),
7344 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7345 BPF_FUNC_map_lookup_elem),
7346 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7347 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7348 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7349 BPF_MOV64_IMM(BPF_REG_2, 0),
7350 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7351 BPF_MOV64_IMM(BPF_REG_0, 0),
7352 BPF_EXIT_INSN(),
7353 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7354 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7355 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7356 BPF_MOV64_IMM(BPF_REG_0, 0),
7357 BPF_EXIT_INSN(),
7358 },
7359 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007360 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007361 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007362 },
7363 {
Edward Creef65b1842017-08-07 15:27:12 +01007364 "bounds checks mixing signed and unsigned, variant 11",
Daniel Borkmann86412502017-07-21 00:00:25 +02007365 .insns = {
7366 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7367 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7368 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7369 BPF_LD_MAP_FD(BPF_REG_1, 0),
7370 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7371 BPF_FUNC_map_lookup_elem),
7372 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7373 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7374 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7375 BPF_MOV64_IMM(BPF_REG_2, -1),
7376 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7377 /* Dead branch. */
7378 BPF_MOV64_IMM(BPF_REG_0, 0),
7379 BPF_EXIT_INSN(),
7380 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7381 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7382 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7383 BPF_MOV64_IMM(BPF_REG_0, 0),
7384 BPF_EXIT_INSN(),
7385 },
7386 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007387 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007388 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007389 },
7390 {
Edward Creef65b1842017-08-07 15:27:12 +01007391 "bounds checks mixing signed and unsigned, variant 12",
Daniel Borkmann86412502017-07-21 00:00:25 +02007392 .insns = {
7393 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7394 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7395 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7396 BPF_LD_MAP_FD(BPF_REG_1, 0),
7397 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7398 BPF_FUNC_map_lookup_elem),
7399 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7400 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7401 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7402 BPF_MOV64_IMM(BPF_REG_2, -6),
7403 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7404 BPF_MOV64_IMM(BPF_REG_0, 0),
7405 BPF_EXIT_INSN(),
7406 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7407 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7408 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7409 BPF_MOV64_IMM(BPF_REG_0, 0),
7410 BPF_EXIT_INSN(),
7411 },
7412 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007413 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007414 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007415 },
7416 {
Edward Creef65b1842017-08-07 15:27:12 +01007417 "bounds checks mixing signed and unsigned, variant 13",
Daniel Borkmann86412502017-07-21 00:00:25 +02007418 .insns = {
7419 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7420 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7421 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7422 BPF_LD_MAP_FD(BPF_REG_1, 0),
7423 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7424 BPF_FUNC_map_lookup_elem),
7425 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7426 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7427 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7428 BPF_MOV64_IMM(BPF_REG_2, 2),
7429 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7430 BPF_MOV64_IMM(BPF_REG_7, 1),
7431 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
7432 BPF_MOV64_IMM(BPF_REG_0, 0),
7433 BPF_EXIT_INSN(),
7434 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
7435 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
7436 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
7437 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7438 BPF_MOV64_IMM(BPF_REG_0, 0),
7439 BPF_EXIT_INSN(),
7440 },
7441 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007442 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007443 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007444 },
7445 {
Edward Creef65b1842017-08-07 15:27:12 +01007446 "bounds checks mixing signed and unsigned, variant 14",
Daniel Borkmann86412502017-07-21 00:00:25 +02007447 .insns = {
7448 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
7449 offsetof(struct __sk_buff, mark)),
7450 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7451 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7453 BPF_LD_MAP_FD(BPF_REG_1, 0),
7454 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7455 BPF_FUNC_map_lookup_elem),
7456 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7457 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7458 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7459 BPF_MOV64_IMM(BPF_REG_2, -1),
7460 BPF_MOV64_IMM(BPF_REG_8, 2),
7461 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
7462 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
7463 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7464 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7465 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7466 BPF_MOV64_IMM(BPF_REG_0, 0),
7467 BPF_EXIT_INSN(),
7468 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
7469 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
7470 },
7471 .fixup_map1 = { 4 },
Daniel Borkmann6f161012018-01-18 01:15:21 +01007472 .errstr = "R0 invalid mem access 'inv'",
Daniel Borkmann86412502017-07-21 00:00:25 +02007473 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007474 },
7475 {
Edward Creef65b1842017-08-07 15:27:12 +01007476 "bounds checks mixing signed and unsigned, variant 15",
Daniel Borkmann86412502017-07-21 00:00:25 +02007477 .insns = {
7478 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7479 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7481 BPF_LD_MAP_FD(BPF_REG_1, 0),
7482 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7483 BPF_FUNC_map_lookup_elem),
7484 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7485 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7486 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7487 BPF_MOV64_IMM(BPF_REG_2, -6),
7488 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7489 BPF_MOV64_IMM(BPF_REG_0, 0),
7490 BPF_EXIT_INSN(),
7491 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7492 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
7493 BPF_MOV64_IMM(BPF_REG_0, 0),
7494 BPF_EXIT_INSN(),
7495 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7496 BPF_MOV64_IMM(BPF_REG_0, 0),
7497 BPF_EXIT_INSN(),
7498 },
7499 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007500 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007501 .result = REJECT,
7502 .result_unpriv = REJECT,
7503 },
Edward Cree545722c2017-07-21 14:36:57 +01007504 {
Edward Creef65b1842017-08-07 15:27:12 +01007505 "subtraction bounds (map value) variant 1",
Edward Cree545722c2017-07-21 14:36:57 +01007506 .insns = {
7507 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7508 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7509 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7510 BPF_LD_MAP_FD(BPF_REG_1, 0),
7511 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7512 BPF_FUNC_map_lookup_elem),
7513 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7514 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7515 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
7516 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
7517 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
7518 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
7519 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
7520 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7521 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7522 BPF_EXIT_INSN(),
7523 BPF_MOV64_IMM(BPF_REG_0, 0),
7524 BPF_EXIT_INSN(),
7525 },
7526 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01007527 .errstr = "R0 max value is outside of the array range",
7528 .result = REJECT,
7529 },
7530 {
7531 "subtraction bounds (map value) variant 2",
7532 .insns = {
7533 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7534 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7535 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7536 BPF_LD_MAP_FD(BPF_REG_1, 0),
7537 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7538 BPF_FUNC_map_lookup_elem),
7539 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7540 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7541 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
7542 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
7543 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
7544 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
7545 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7546 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7547 BPF_EXIT_INSN(),
7548 BPF_MOV64_IMM(BPF_REG_0, 0),
7549 BPF_EXIT_INSN(),
7550 },
7551 .fixup_map1 = { 3 },
Edward Cree545722c2017-07-21 14:36:57 +01007552 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
7553 .result = REJECT,
Edward Cree545722c2017-07-21 14:36:57 +01007554 },
Edward Cree69c4e8a2017-08-07 15:29:51 +01007555 {
Jann Horn2255f8d2017-12-18 20:12:01 -08007556 "bounds check based on zero-extended MOV",
7557 .insns = {
7558 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7559 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7560 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7561 BPF_LD_MAP_FD(BPF_REG_1, 0),
7562 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7563 BPF_FUNC_map_lookup_elem),
7564 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7565 /* r2 = 0x0000'0000'ffff'ffff */
7566 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
7567 /* r2 = 0 */
7568 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
7569 /* no-op */
7570 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7571 /* access at offset 0 */
7572 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7573 /* exit */
7574 BPF_MOV64_IMM(BPF_REG_0, 0),
7575 BPF_EXIT_INSN(),
7576 },
7577 .fixup_map1 = { 3 },
7578 .result = ACCEPT
7579 },
7580 {
7581 "bounds check based on sign-extended MOV. test1",
7582 .insns = {
7583 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7584 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7585 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7586 BPF_LD_MAP_FD(BPF_REG_1, 0),
7587 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7588 BPF_FUNC_map_lookup_elem),
7589 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7590 /* r2 = 0xffff'ffff'ffff'ffff */
7591 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
7592 /* r2 = 0xffff'ffff */
7593 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
7594 /* r0 = <oob pointer> */
7595 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7596 /* access to OOB pointer */
7597 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7598 /* exit */
7599 BPF_MOV64_IMM(BPF_REG_0, 0),
7600 BPF_EXIT_INSN(),
7601 },
7602 .fixup_map1 = { 3 },
7603 .errstr = "map_value pointer and 4294967295",
7604 .result = REJECT
7605 },
7606 {
7607 "bounds check based on sign-extended MOV. test2",
7608 .insns = {
7609 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7610 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7611 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7612 BPF_LD_MAP_FD(BPF_REG_1, 0),
7613 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7614 BPF_FUNC_map_lookup_elem),
7615 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7616 /* r2 = 0xffff'ffff'ffff'ffff */
7617 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
7618 /* r2 = 0xfff'ffff */
7619 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
7620 /* r0 = <oob pointer> */
7621 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7622 /* access to OOB pointer */
7623 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7624 /* exit */
7625 BPF_MOV64_IMM(BPF_REG_0, 0),
7626 BPF_EXIT_INSN(),
7627 },
7628 .fixup_map1 = { 3 },
7629 .errstr = "R0 min value is outside of the array range",
7630 .result = REJECT
7631 },
7632 {
7633 "bounds check based on reg_off + var_off + insn_off. test1",
7634 .insns = {
7635 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7636 offsetof(struct __sk_buff, mark)),
7637 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7638 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7639 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7640 BPF_LD_MAP_FD(BPF_REG_1, 0),
7641 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7642 BPF_FUNC_map_lookup_elem),
7643 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7644 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
7645 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
7646 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
7647 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
7648 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
7649 BPF_MOV64_IMM(BPF_REG_0, 0),
7650 BPF_EXIT_INSN(),
7651 },
7652 .fixup_map1 = { 4 },
7653 .errstr = "value_size=8 off=1073741825",
7654 .result = REJECT,
7655 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7656 },
7657 {
7658 "bounds check based on reg_off + var_off + insn_off. test2",
7659 .insns = {
7660 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7661 offsetof(struct __sk_buff, mark)),
7662 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7663 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7665 BPF_LD_MAP_FD(BPF_REG_1, 0),
7666 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7667 BPF_FUNC_map_lookup_elem),
7668 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7669 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
7670 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
7671 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
7672 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
7673 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
7674 BPF_MOV64_IMM(BPF_REG_0, 0),
7675 BPF_EXIT_INSN(),
7676 },
7677 .fixup_map1 = { 4 },
7678 .errstr = "value 1073741823",
7679 .result = REJECT,
7680 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7681 },
7682 {
7683 "bounds check after truncation of non-boundary-crossing range",
7684 .insns = {
7685 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7686 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7687 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7688 BPF_LD_MAP_FD(BPF_REG_1, 0),
7689 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7690 BPF_FUNC_map_lookup_elem),
7691 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7692 /* r1 = [0x00, 0xff] */
7693 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7694 BPF_MOV64_IMM(BPF_REG_2, 1),
7695 /* r2 = 0x10'0000'0000 */
7696 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
7697 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
7698 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
7699 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
7700 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7701 /* r1 = [0x00, 0xff] */
7702 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
7703 /* r1 = 0 */
7704 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7705 /* no-op */
7706 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7707 /* access at offset 0 */
7708 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7709 /* exit */
7710 BPF_MOV64_IMM(BPF_REG_0, 0),
7711 BPF_EXIT_INSN(),
7712 },
7713 .fixup_map1 = { 3 },
7714 .result = ACCEPT
7715 },
7716 {
7717 "bounds check after truncation of boundary-crossing range (1)",
7718 .insns = {
7719 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7720 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7722 BPF_LD_MAP_FD(BPF_REG_1, 0),
7723 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7724 BPF_FUNC_map_lookup_elem),
7725 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7726 /* r1 = [0x00, 0xff] */
7727 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7728 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7729 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
7730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7731 /* r1 = [0xffff'ff80, 0xffff'ffff] or
7732 * [0x0000'0000, 0x0000'007f]
7733 */
7734 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
7735 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7736 /* r1 = [0x00, 0xff] or
7737 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7738 */
7739 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7740 /* r1 = 0 or
7741 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7742 */
7743 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7744 /* no-op or OOB pointer computation */
7745 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7746 /* potentially OOB access */
7747 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7748 /* exit */
7749 BPF_MOV64_IMM(BPF_REG_0, 0),
7750 BPF_EXIT_INSN(),
7751 },
7752 .fixup_map1 = { 3 },
7753 /* not actually fully unbounded, but the bound is very high */
7754 .errstr = "R0 unbounded memory access",
7755 .result = REJECT
7756 },
7757 {
7758 "bounds check after truncation of boundary-crossing range (2)",
7759 .insns = {
7760 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7761 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7762 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7763 BPF_LD_MAP_FD(BPF_REG_1, 0),
7764 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7765 BPF_FUNC_map_lookup_elem),
7766 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7767 /* r1 = [0x00, 0xff] */
7768 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7769 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7770 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
7771 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7772 /* r1 = [0xffff'ff80, 0xffff'ffff] or
7773 * [0x0000'0000, 0x0000'007f]
7774 * difference to previous test: truncation via MOV32
7775 * instead of ALU32.
7776 */
7777 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
7778 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7779 /* r1 = [0x00, 0xff] or
7780 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7781 */
7782 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7783 /* r1 = 0 or
7784 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7785 */
7786 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7787 /* no-op or OOB pointer computation */
7788 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7789 /* potentially OOB access */
7790 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7791 /* exit */
7792 BPF_MOV64_IMM(BPF_REG_0, 0),
7793 BPF_EXIT_INSN(),
7794 },
7795 .fixup_map1 = { 3 },
7796 /* not actually fully unbounded, but the bound is very high */
7797 .errstr = "R0 unbounded memory access",
7798 .result = REJECT
7799 },
7800 {
7801 "bounds check after wrapping 32-bit addition",
7802 .insns = {
7803 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7804 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7805 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7806 BPF_LD_MAP_FD(BPF_REG_1, 0),
7807 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7808 BPF_FUNC_map_lookup_elem),
7809 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7810 /* r1 = 0x7fff'ffff */
7811 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
7812 /* r1 = 0xffff'fffe */
7813 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7814 /* r1 = 0 */
7815 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
7816 /* no-op */
7817 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7818 /* access at offset 0 */
7819 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7820 /* exit */
7821 BPF_MOV64_IMM(BPF_REG_0, 0),
7822 BPF_EXIT_INSN(),
7823 },
7824 .fixup_map1 = { 3 },
7825 .result = ACCEPT
7826 },
7827 {
7828 "bounds check after shift with oversized count operand",
7829 .insns = {
7830 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7831 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7832 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7833 BPF_LD_MAP_FD(BPF_REG_1, 0),
7834 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7835 BPF_FUNC_map_lookup_elem),
7836 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7837 BPF_MOV64_IMM(BPF_REG_2, 32),
7838 BPF_MOV64_IMM(BPF_REG_1, 1),
7839 /* r1 = (u32)1 << (u32)32 = ? */
7840 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
7841 /* r1 = [0x0000, 0xffff] */
7842 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
7843 /* computes unknown pointer, potentially OOB */
7844 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7845 /* potentially OOB access */
7846 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7847 /* exit */
7848 BPF_MOV64_IMM(BPF_REG_0, 0),
7849 BPF_EXIT_INSN(),
7850 },
7851 .fixup_map1 = { 3 },
7852 .errstr = "R0 max value is outside of the array range",
7853 .result = REJECT
7854 },
7855 {
7856 "bounds check after right shift of maybe-negative number",
7857 .insns = {
7858 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7859 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7860 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7861 BPF_LD_MAP_FD(BPF_REG_1, 0),
7862 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7863 BPF_FUNC_map_lookup_elem),
7864 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7865 /* r1 = [0x00, 0xff] */
7866 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7867 /* r1 = [-0x01, 0xfe] */
7868 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
7869 /* r1 = 0 or 0xff'ffff'ffff'ffff */
7870 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7871 /* r1 = 0 or 0xffff'ffff'ffff */
7872 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7873 /* computes unknown pointer, potentially OOB */
7874 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7875 /* potentially OOB access */
7876 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7877 /* exit */
7878 BPF_MOV64_IMM(BPF_REG_0, 0),
7879 BPF_EXIT_INSN(),
7880 },
7881 .fixup_map1 = { 3 },
7882 .errstr = "R0 unbounded memory access",
7883 .result = REJECT
7884 },
7885 {
7886 "bounds check map access with off+size signed 32bit overflow. test1",
7887 .insns = {
7888 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7889 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7890 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7891 BPF_LD_MAP_FD(BPF_REG_1, 0),
7892 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7893 BPF_FUNC_map_lookup_elem),
7894 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7895 BPF_EXIT_INSN(),
7896 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
7897 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7898 BPF_JMP_A(0),
7899 BPF_EXIT_INSN(),
7900 },
7901 .fixup_map1 = { 3 },
7902 .errstr = "map_value pointer and 2147483646",
7903 .result = REJECT
7904 },
7905 {
7906 "bounds check map access with off+size signed 32bit overflow. test2",
7907 .insns = {
7908 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7909 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7910 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7911 BPF_LD_MAP_FD(BPF_REG_1, 0),
7912 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7913 BPF_FUNC_map_lookup_elem),
7914 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7915 BPF_EXIT_INSN(),
7916 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7917 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7918 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7919 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7920 BPF_JMP_A(0),
7921 BPF_EXIT_INSN(),
7922 },
7923 .fixup_map1 = { 3 },
7924 .errstr = "pointer offset 1073741822",
7925 .result = REJECT
7926 },
7927 {
7928 "bounds check map access with off+size signed 32bit overflow. test3",
7929 .insns = {
7930 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7931 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7932 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7933 BPF_LD_MAP_FD(BPF_REG_1, 0),
7934 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7935 BPF_FUNC_map_lookup_elem),
7936 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7937 BPF_EXIT_INSN(),
7938 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7939 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7940 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7941 BPF_JMP_A(0),
7942 BPF_EXIT_INSN(),
7943 },
7944 .fixup_map1 = { 3 },
7945 .errstr = "pointer offset -1073741822",
7946 .result = REJECT
7947 },
7948 {
7949 "bounds check map access with off+size signed 32bit overflow. test4",
7950 .insns = {
7951 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7952 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7953 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7954 BPF_LD_MAP_FD(BPF_REG_1, 0),
7955 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7956 BPF_FUNC_map_lookup_elem),
7957 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7958 BPF_EXIT_INSN(),
7959 BPF_MOV64_IMM(BPF_REG_1, 1000000),
7960 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
7961 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7962 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7963 BPF_JMP_A(0),
7964 BPF_EXIT_INSN(),
7965 },
7966 .fixup_map1 = { 3 },
7967 .errstr = "map_value pointer and 1000000000000",
7968 .result = REJECT
7969 },
7970 {
7971 "pointer/scalar confusion in state equality check (way 1)",
7972 .insns = {
7973 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7974 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7975 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7976 BPF_LD_MAP_FD(BPF_REG_1, 0),
7977 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7978 BPF_FUNC_map_lookup_elem),
7979 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7980 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7981 BPF_JMP_A(1),
7982 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7983 BPF_JMP_A(0),
7984 BPF_EXIT_INSN(),
7985 },
7986 .fixup_map1 = { 3 },
7987 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007988 .retval = POINTER_VALUE,
Jann Horn2255f8d2017-12-18 20:12:01 -08007989 .result_unpriv = REJECT,
7990 .errstr_unpriv = "R0 leaks addr as return value"
7991 },
7992 {
7993 "pointer/scalar confusion in state equality check (way 2)",
7994 .insns = {
7995 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7996 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7997 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7998 BPF_LD_MAP_FD(BPF_REG_1, 0),
7999 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8000 BPF_FUNC_map_lookup_elem),
8001 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
8002 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
8003 BPF_JMP_A(1),
8004 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8005 BPF_EXIT_INSN(),
8006 },
8007 .fixup_map1 = { 3 },
8008 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08008009 .retval = POINTER_VALUE,
Jann Horn2255f8d2017-12-18 20:12:01 -08008010 .result_unpriv = REJECT,
8011 .errstr_unpriv = "R0 leaks addr as return value"
8012 },
8013 {
Edward Cree69c4e8a2017-08-07 15:29:51 +01008014 "variable-offset ctx access",
8015 .insns = {
8016 /* Get an unknown value */
8017 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8018 /* Make it small and 4-byte aligned */
8019 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8020 /* add it to skb. We now have either &skb->len or
8021 * &skb->pkt_type, but we don't know which
8022 */
8023 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8024 /* dereference it */
8025 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8026 BPF_EXIT_INSN(),
8027 },
8028 .errstr = "variable ctx access var_off=(0x0; 0x4)",
8029 .result = REJECT,
8030 .prog_type = BPF_PROG_TYPE_LWT_IN,
8031 },
8032 {
8033 "variable-offset stack access",
8034 .insns = {
8035 /* Fill the top 8 bytes of the stack */
8036 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8037 /* Get an unknown value */
8038 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8039 /* Make it small and 4-byte aligned */
8040 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8041 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
8042 /* add it to fp. We now have either fp-4 or fp-8, but
8043 * we don't know which
8044 */
8045 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8046 /* dereference it */
8047 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
8048 BPF_EXIT_INSN(),
8049 },
8050 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
8051 .result = REJECT,
8052 .prog_type = BPF_PROG_TYPE_LWT_IN,
8053 },
Edward Creed893dc22017-08-23 15:09:46 +01008054 {
Jann Horn2255f8d2017-12-18 20:12:01 -08008055 "indirect variable-offset stack access",
8056 .insns = {
8057 /* Fill the top 8 bytes of the stack */
8058 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8059 /* Get an unknown value */
8060 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8061 /* Make it small and 4-byte aligned */
8062 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8063 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
8064 /* add it to fp. We now have either fp-4 or fp-8, but
8065 * we don't know which
8066 */
8067 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8068 /* dereference it indirectly */
8069 BPF_LD_MAP_FD(BPF_REG_1, 0),
8070 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8071 BPF_FUNC_map_lookup_elem),
8072 BPF_MOV64_IMM(BPF_REG_0, 0),
8073 BPF_EXIT_INSN(),
8074 },
8075 .fixup_map1 = { 5 },
8076 .errstr = "variable stack read R2",
8077 .result = REJECT,
8078 .prog_type = BPF_PROG_TYPE_LWT_IN,
8079 },
8080 {
8081 "direct stack access with 32-bit wraparound. test1",
8082 .insns = {
8083 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8084 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8086 BPF_MOV32_IMM(BPF_REG_0, 0),
8087 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8088 BPF_EXIT_INSN()
8089 },
8090 .errstr = "fp pointer and 2147483647",
8091 .result = REJECT
8092 },
8093 {
8094 "direct stack access with 32-bit wraparound. test2",
8095 .insns = {
8096 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8097 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
8098 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
8099 BPF_MOV32_IMM(BPF_REG_0, 0),
8100 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8101 BPF_EXIT_INSN()
8102 },
8103 .errstr = "fp pointer and 1073741823",
8104 .result = REJECT
8105 },
8106 {
8107 "direct stack access with 32-bit wraparound. test3",
8108 .insns = {
8109 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8110 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
8111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
8112 BPF_MOV32_IMM(BPF_REG_0, 0),
8113 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8114 BPF_EXIT_INSN()
8115 },
8116 .errstr = "fp pointer offset 1073741822",
8117 .result = REJECT
8118 },
8119 {
Edward Creed893dc22017-08-23 15:09:46 +01008120 "liveness pruning and write screening",
8121 .insns = {
8122 /* Get an unknown value */
8123 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8124 /* branch conditions teach us nothing about R2 */
8125 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
8126 BPF_MOV64_IMM(BPF_REG_0, 0),
8127 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
8128 BPF_MOV64_IMM(BPF_REG_0, 0),
8129 BPF_EXIT_INSN(),
8130 },
8131 .errstr = "R0 !read_ok",
8132 .result = REJECT,
8133 .prog_type = BPF_PROG_TYPE_LWT_IN,
8134 },
Alexei Starovoitovdf20cb72017-08-23 15:10:26 +01008135 {
8136 "varlen_map_value_access pruning",
8137 .insns = {
8138 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8139 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8140 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8141 BPF_LD_MAP_FD(BPF_REG_1, 0),
8142 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8143 BPF_FUNC_map_lookup_elem),
8144 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8145 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
8146 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
8147 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
8148 BPF_MOV32_IMM(BPF_REG_1, 0),
8149 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
8150 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8151 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
8152 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
8153 offsetof(struct test_val, foo)),
8154 BPF_EXIT_INSN(),
8155 },
8156 .fixup_map2 = { 3 },
8157 .errstr_unpriv = "R0 leaks addr",
8158 .errstr = "R0 unbounded memory access",
8159 .result_unpriv = REJECT,
8160 .result = REJECT,
8161 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8162 },
Edward Creee67b8a62017-09-15 14:37:38 +01008163 {
8164 "invalid 64-bit BPF_END",
8165 .insns = {
8166 BPF_MOV32_IMM(BPF_REG_0, 0),
8167 {
8168 .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
8169 .dst_reg = BPF_REG_0,
8170 .src_reg = 0,
8171 .off = 0,
8172 .imm = 32,
8173 },
8174 BPF_EXIT_INSN(),
8175 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01008176 .errstr = "unknown opcode d7",
Edward Creee67b8a62017-09-15 14:37:38 +01008177 .result = REJECT,
8178 },
Daniel Borkmann22c88522017-09-25 02:25:53 +02008179 {
Daniel Borkmann65073a62018-01-31 12:58:56 +01008180 "XDP, using ifindex from netdev",
8181 .insns = {
8182 BPF_MOV64_IMM(BPF_REG_0, 0),
8183 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8184 offsetof(struct xdp_md, ingress_ifindex)),
8185 BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
8186 BPF_MOV64_IMM(BPF_REG_0, 1),
8187 BPF_EXIT_INSN(),
8188 },
8189 .result = ACCEPT,
8190 .prog_type = BPF_PROG_TYPE_XDP,
8191 .retval = 1,
8192 },
8193 {
Daniel Borkmann22c88522017-09-25 02:25:53 +02008194 "meta access, test1",
8195 .insns = {
8196 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8197 offsetof(struct xdp_md, data_meta)),
8198 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8199 offsetof(struct xdp_md, data)),
8200 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8201 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8202 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8203 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8204 BPF_MOV64_IMM(BPF_REG_0, 0),
8205 BPF_EXIT_INSN(),
8206 },
8207 .result = ACCEPT,
8208 .prog_type = BPF_PROG_TYPE_XDP,
8209 },
8210 {
8211 "meta access, test2",
8212 .insns = {
8213 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8214 offsetof(struct xdp_md, data_meta)),
8215 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8216 offsetof(struct xdp_md, data)),
8217 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8218 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
8219 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8221 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8222 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8223 BPF_MOV64_IMM(BPF_REG_0, 0),
8224 BPF_EXIT_INSN(),
8225 },
8226 .result = REJECT,
8227 .errstr = "invalid access to packet, off=-8",
8228 .prog_type = BPF_PROG_TYPE_XDP,
8229 },
8230 {
8231 "meta access, test3",
8232 .insns = {
8233 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8234 offsetof(struct xdp_md, data_meta)),
8235 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8236 offsetof(struct xdp_md, data_end)),
8237 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8239 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8240 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8241 BPF_MOV64_IMM(BPF_REG_0, 0),
8242 BPF_EXIT_INSN(),
8243 },
8244 .result = REJECT,
8245 .errstr = "invalid access to packet",
8246 .prog_type = BPF_PROG_TYPE_XDP,
8247 },
8248 {
8249 "meta access, test4",
8250 .insns = {
8251 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8252 offsetof(struct xdp_md, data_meta)),
8253 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8254 offsetof(struct xdp_md, data_end)),
8255 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8256 offsetof(struct xdp_md, data)),
8257 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8258 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8259 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8260 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8261 BPF_MOV64_IMM(BPF_REG_0, 0),
8262 BPF_EXIT_INSN(),
8263 },
8264 .result = REJECT,
8265 .errstr = "invalid access to packet",
8266 .prog_type = BPF_PROG_TYPE_XDP,
8267 },
8268 {
8269 "meta access, test5",
8270 .insns = {
8271 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8272 offsetof(struct xdp_md, data_meta)),
8273 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8274 offsetof(struct xdp_md, data)),
8275 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8276 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8277 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
8278 BPF_MOV64_IMM(BPF_REG_2, -8),
8279 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8280 BPF_FUNC_xdp_adjust_meta),
8281 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
8282 BPF_MOV64_IMM(BPF_REG_0, 0),
8283 BPF_EXIT_INSN(),
8284 },
8285 .result = REJECT,
8286 .errstr = "R3 !read_ok",
8287 .prog_type = BPF_PROG_TYPE_XDP,
8288 },
8289 {
8290 "meta access, test6",
8291 .insns = {
8292 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8293 offsetof(struct xdp_md, data_meta)),
8294 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8295 offsetof(struct xdp_md, data)),
8296 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8297 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8298 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8299 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8300 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
8301 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8302 BPF_MOV64_IMM(BPF_REG_0, 0),
8303 BPF_EXIT_INSN(),
8304 },
8305 .result = REJECT,
8306 .errstr = "invalid access to packet",
8307 .prog_type = BPF_PROG_TYPE_XDP,
8308 },
8309 {
8310 "meta access, test7",
8311 .insns = {
8312 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8313 offsetof(struct xdp_md, data_meta)),
8314 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8315 offsetof(struct xdp_md, data)),
8316 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8317 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8318 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8319 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8320 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8321 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8322 BPF_MOV64_IMM(BPF_REG_0, 0),
8323 BPF_EXIT_INSN(),
8324 },
8325 .result = ACCEPT,
8326 .prog_type = BPF_PROG_TYPE_XDP,
8327 },
8328 {
8329 "meta access, test8",
8330 .insns = {
8331 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8332 offsetof(struct xdp_md, data_meta)),
8333 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8334 offsetof(struct xdp_md, data)),
8335 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8336 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
8337 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8338 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8339 BPF_MOV64_IMM(BPF_REG_0, 0),
8340 BPF_EXIT_INSN(),
8341 },
8342 .result = ACCEPT,
8343 .prog_type = BPF_PROG_TYPE_XDP,
8344 },
8345 {
8346 "meta access, test9",
8347 .insns = {
8348 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8349 offsetof(struct xdp_md, data_meta)),
8350 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8351 offsetof(struct xdp_md, data)),
8352 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8353 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
8354 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
8355 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8356 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8357 BPF_MOV64_IMM(BPF_REG_0, 0),
8358 BPF_EXIT_INSN(),
8359 },
8360 .result = REJECT,
8361 .errstr = "invalid access to packet",
8362 .prog_type = BPF_PROG_TYPE_XDP,
8363 },
8364 {
8365 "meta access, test10",
8366 .insns = {
8367 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8368 offsetof(struct xdp_md, data_meta)),
8369 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8370 offsetof(struct xdp_md, data)),
8371 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8372 offsetof(struct xdp_md, data_end)),
8373 BPF_MOV64_IMM(BPF_REG_5, 42),
8374 BPF_MOV64_IMM(BPF_REG_6, 24),
8375 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
8376 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
8377 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
8378 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
8379 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
8380 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
8381 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
8382 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
8383 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
8384 BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
8385 BPF_MOV64_IMM(BPF_REG_0, 0),
8386 BPF_EXIT_INSN(),
8387 },
8388 .result = REJECT,
8389 .errstr = "invalid access to packet",
8390 .prog_type = BPF_PROG_TYPE_XDP,
8391 },
8392 {
8393 "meta access, test11",
8394 .insns = {
8395 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8396 offsetof(struct xdp_md, data_meta)),
8397 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8398 offsetof(struct xdp_md, data)),
8399 BPF_MOV64_IMM(BPF_REG_5, 42),
8400 BPF_MOV64_IMM(BPF_REG_6, 24),
8401 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
8402 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
8403 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
8404 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
8405 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
8406 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
8407 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
8408 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
8409 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
8410 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
8411 BPF_MOV64_IMM(BPF_REG_0, 0),
8412 BPF_EXIT_INSN(),
8413 },
8414 .result = ACCEPT,
8415 .prog_type = BPF_PROG_TYPE_XDP,
8416 },
8417 {
8418 "meta access, test12",
8419 .insns = {
8420 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8421 offsetof(struct xdp_md, data_meta)),
8422 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8423 offsetof(struct xdp_md, data)),
8424 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8425 offsetof(struct xdp_md, data_end)),
8426 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
8427 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
8428 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
8429 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
8430 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
8431 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
8432 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
8433 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8434 BPF_MOV64_IMM(BPF_REG_0, 0),
8435 BPF_EXIT_INSN(),
8436 },
8437 .result = ACCEPT,
8438 .prog_type = BPF_PROG_TYPE_XDP,
8439 },
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07008440 {
Jakub Kicinski28e33f92017-10-16 11:16:55 -07008441 "arithmetic ops make PTR_TO_CTX unusable",
8442 .insns = {
8443 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
8444 offsetof(struct __sk_buff, data) -
8445 offsetof(struct __sk_buff, mark)),
8446 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8447 offsetof(struct __sk_buff, mark)),
8448 BPF_EXIT_INSN(),
8449 },
8450 .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
8451 .result = REJECT,
8452 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8453 },
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008454 {
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008455 "pkt_end - pkt_start is allowed",
8456 .insns = {
8457 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8458 offsetof(struct __sk_buff, data_end)),
8459 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8460 offsetof(struct __sk_buff, data)),
8461 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
8462 BPF_EXIT_INSN(),
8463 },
8464 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08008465 .retval = TEST_DATA_LEN,
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008466 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8467 },
8468 {
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008469 "XDP pkt read, pkt_end mangling, bad access 1",
8470 .insns = {
8471 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8472 offsetof(struct xdp_md, data)),
8473 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8474 offsetof(struct xdp_md, data_end)),
8475 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8476 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8477 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
8478 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8479 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8480 BPF_MOV64_IMM(BPF_REG_0, 0),
8481 BPF_EXIT_INSN(),
8482 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008483 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008484 .result = REJECT,
8485 .prog_type = BPF_PROG_TYPE_XDP,
8486 },
8487 {
8488 "XDP pkt read, pkt_end mangling, bad access 2",
8489 .insns = {
8490 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8491 offsetof(struct xdp_md, data)),
8492 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8493 offsetof(struct xdp_md, data_end)),
8494 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8495 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8496 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
8497 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8498 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8499 BPF_MOV64_IMM(BPF_REG_0, 0),
8500 BPF_EXIT_INSN(),
8501 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008502 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008503 .result = REJECT,
8504 .prog_type = BPF_PROG_TYPE_XDP,
8505 },
8506 {
8507 "XDP pkt read, pkt_data' > pkt_end, good access",
8508 .insns = {
8509 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8510 offsetof(struct xdp_md, data)),
8511 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8512 offsetof(struct xdp_md, data_end)),
8513 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8514 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8515 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8516 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8517 BPF_MOV64_IMM(BPF_REG_0, 0),
8518 BPF_EXIT_INSN(),
8519 },
8520 .result = ACCEPT,
8521 .prog_type = BPF_PROG_TYPE_XDP,
8522 },
8523 {
8524 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
8525 .insns = {
8526 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8527 offsetof(struct xdp_md, data)),
8528 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8529 offsetof(struct xdp_md, data_end)),
8530 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8532 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8533 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8534 BPF_MOV64_IMM(BPF_REG_0, 0),
8535 BPF_EXIT_INSN(),
8536 },
8537 .errstr = "R1 offset is outside of the packet",
8538 .result = REJECT,
8539 .prog_type = BPF_PROG_TYPE_XDP,
8540 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8541 },
8542 {
8543 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
8544 .insns = {
8545 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8546 offsetof(struct xdp_md, data)),
8547 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8548 offsetof(struct xdp_md, data_end)),
8549 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8550 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8551 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
8552 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8553 BPF_MOV64_IMM(BPF_REG_0, 0),
8554 BPF_EXIT_INSN(),
8555 },
8556 .errstr = "R1 offset is outside of the packet",
8557 .result = REJECT,
8558 .prog_type = BPF_PROG_TYPE_XDP,
8559 },
8560 {
8561 "XDP pkt read, pkt_end > pkt_data', good access",
8562 .insns = {
8563 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8564 offsetof(struct xdp_md, data)),
8565 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8566 offsetof(struct xdp_md, data_end)),
8567 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8569 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8570 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8571 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8572 BPF_MOV64_IMM(BPF_REG_0, 0),
8573 BPF_EXIT_INSN(),
8574 },
8575 .result = ACCEPT,
8576 .prog_type = BPF_PROG_TYPE_XDP,
8577 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8578 },
8579 {
8580 "XDP pkt read, pkt_end > pkt_data', bad access 1",
8581 .insns = {
8582 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8583 offsetof(struct xdp_md, data)),
8584 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8585 offsetof(struct xdp_md, data_end)),
8586 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8587 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8588 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8589 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8590 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8591 BPF_MOV64_IMM(BPF_REG_0, 0),
8592 BPF_EXIT_INSN(),
8593 },
8594 .errstr = "R1 offset is outside of the packet",
8595 .result = REJECT,
8596 .prog_type = BPF_PROG_TYPE_XDP,
8597 },
8598 {
8599 "XDP pkt read, pkt_end > pkt_data', bad access 2",
8600 .insns = {
8601 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8602 offsetof(struct xdp_md, data)),
8603 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8604 offsetof(struct xdp_md, data_end)),
8605 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8606 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8607 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8608 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8609 BPF_MOV64_IMM(BPF_REG_0, 0),
8610 BPF_EXIT_INSN(),
8611 },
8612 .errstr = "R1 offset is outside of the packet",
8613 .result = REJECT,
8614 .prog_type = BPF_PROG_TYPE_XDP,
8615 },
8616 {
8617 "XDP pkt read, pkt_data' < pkt_end, good access",
8618 .insns = {
8619 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8620 offsetof(struct xdp_md, data)),
8621 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8622 offsetof(struct xdp_md, data_end)),
8623 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8624 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8625 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8626 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8627 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8628 BPF_MOV64_IMM(BPF_REG_0, 0),
8629 BPF_EXIT_INSN(),
8630 },
8631 .result = ACCEPT,
8632 .prog_type = BPF_PROG_TYPE_XDP,
8633 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8634 },
8635 {
8636 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
8637 .insns = {
8638 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8639 offsetof(struct xdp_md, data)),
8640 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8641 offsetof(struct xdp_md, data_end)),
8642 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8643 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8644 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8645 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8646 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8647 BPF_MOV64_IMM(BPF_REG_0, 0),
8648 BPF_EXIT_INSN(),
8649 },
8650 .errstr = "R1 offset is outside of the packet",
8651 .result = REJECT,
8652 .prog_type = BPF_PROG_TYPE_XDP,
8653 },
8654 {
8655 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
8656 .insns = {
8657 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8658 offsetof(struct xdp_md, data)),
8659 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8660 offsetof(struct xdp_md, data_end)),
8661 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8663 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8664 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8665 BPF_MOV64_IMM(BPF_REG_0, 0),
8666 BPF_EXIT_INSN(),
8667 },
8668 .errstr = "R1 offset is outside of the packet",
8669 .result = REJECT,
8670 .prog_type = BPF_PROG_TYPE_XDP,
8671 },
8672 {
8673 "XDP pkt read, pkt_end < pkt_data', good access",
8674 .insns = {
8675 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8676 offsetof(struct xdp_md, data)),
8677 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8678 offsetof(struct xdp_md, data_end)),
8679 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8680 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8681 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8682 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8683 BPF_MOV64_IMM(BPF_REG_0, 0),
8684 BPF_EXIT_INSN(),
8685 },
8686 .result = ACCEPT,
8687 .prog_type = BPF_PROG_TYPE_XDP,
8688 },
8689 {
8690 "XDP pkt read, pkt_end < pkt_data', bad access 1",
8691 .insns = {
8692 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8693 offsetof(struct xdp_md, data)),
8694 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8695 offsetof(struct xdp_md, data_end)),
8696 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8697 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8698 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8699 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8700 BPF_MOV64_IMM(BPF_REG_0, 0),
8701 BPF_EXIT_INSN(),
8702 },
8703 .errstr = "R1 offset is outside of the packet",
8704 .result = REJECT,
8705 .prog_type = BPF_PROG_TYPE_XDP,
8706 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8707 },
8708 {
8709 "XDP pkt read, pkt_end < pkt_data', bad access 2",
8710 .insns = {
8711 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8712 offsetof(struct xdp_md, data)),
8713 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8714 offsetof(struct xdp_md, data_end)),
8715 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8716 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8717 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
8718 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8719 BPF_MOV64_IMM(BPF_REG_0, 0),
8720 BPF_EXIT_INSN(),
8721 },
8722 .errstr = "R1 offset is outside of the packet",
8723 .result = REJECT,
8724 .prog_type = BPF_PROG_TYPE_XDP,
8725 },
8726 {
8727 "XDP pkt read, pkt_data' >= pkt_end, good access",
8728 .insns = {
8729 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8730 offsetof(struct xdp_md, data)),
8731 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8732 offsetof(struct xdp_md, data_end)),
8733 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8734 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8735 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8736 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8737 BPF_MOV64_IMM(BPF_REG_0, 0),
8738 BPF_EXIT_INSN(),
8739 },
8740 .result = ACCEPT,
8741 .prog_type = BPF_PROG_TYPE_XDP,
8742 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8743 },
8744 {
8745 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
8746 .insns = {
8747 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8748 offsetof(struct xdp_md, data)),
8749 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8750 offsetof(struct xdp_md, data_end)),
8751 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8752 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8753 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8754 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8755 BPF_MOV64_IMM(BPF_REG_0, 0),
8756 BPF_EXIT_INSN(),
8757 },
8758 .errstr = "R1 offset is outside of the packet",
8759 .result = REJECT,
8760 .prog_type = BPF_PROG_TYPE_XDP,
8761 },
8762 {
8763 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
8764 .insns = {
8765 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8766 offsetof(struct xdp_md, data)),
8767 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8768 offsetof(struct xdp_md, data_end)),
8769 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8770 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8771 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
8772 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8773 BPF_MOV64_IMM(BPF_REG_0, 0),
8774 BPF_EXIT_INSN(),
8775 },
8776 .errstr = "R1 offset is outside of the packet",
8777 .result = REJECT,
8778 .prog_type = BPF_PROG_TYPE_XDP,
8779 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8780 },
8781 {
8782 "XDP pkt read, pkt_end >= pkt_data', good access",
8783 .insns = {
8784 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8785 offsetof(struct xdp_md, data)),
8786 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8787 offsetof(struct xdp_md, data_end)),
8788 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8789 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8790 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8791 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8792 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8793 BPF_MOV64_IMM(BPF_REG_0, 0),
8794 BPF_EXIT_INSN(),
8795 },
8796 .result = ACCEPT,
8797 .prog_type = BPF_PROG_TYPE_XDP,
8798 },
8799 {
8800 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
8801 .insns = {
8802 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8803 offsetof(struct xdp_md, data)),
8804 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8805 offsetof(struct xdp_md, data_end)),
8806 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8807 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8808 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8809 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8810 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8811 BPF_MOV64_IMM(BPF_REG_0, 0),
8812 BPF_EXIT_INSN(),
8813 },
8814 .errstr = "R1 offset is outside of the packet",
8815 .result = REJECT,
8816 .prog_type = BPF_PROG_TYPE_XDP,
8817 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8818 },
8819 {
8820 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
8821 .insns = {
8822 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8823 offsetof(struct xdp_md, data)),
8824 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8825 offsetof(struct xdp_md, data_end)),
8826 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8827 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8828 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8829 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8830 BPF_MOV64_IMM(BPF_REG_0, 0),
8831 BPF_EXIT_INSN(),
8832 },
8833 .errstr = "R1 offset is outside of the packet",
8834 .result = REJECT,
8835 .prog_type = BPF_PROG_TYPE_XDP,
8836 },
8837 {
8838 "XDP pkt read, pkt_data' <= pkt_end, good access",
8839 .insns = {
8840 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8841 offsetof(struct xdp_md, data)),
8842 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8843 offsetof(struct xdp_md, data_end)),
8844 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8846 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8847 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8848 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8849 BPF_MOV64_IMM(BPF_REG_0, 0),
8850 BPF_EXIT_INSN(),
8851 },
8852 .result = ACCEPT,
8853 .prog_type = BPF_PROG_TYPE_XDP,
8854 },
8855 {
8856 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
8857 .insns = {
8858 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8859 offsetof(struct xdp_md, data)),
8860 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8861 offsetof(struct xdp_md, data_end)),
8862 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8863 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8864 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8865 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8866 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8867 BPF_MOV64_IMM(BPF_REG_0, 0),
8868 BPF_EXIT_INSN(),
8869 },
8870 .errstr = "R1 offset is outside of the packet",
8871 .result = REJECT,
8872 .prog_type = BPF_PROG_TYPE_XDP,
8873 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8874 },
8875 {
8876 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
8877 .insns = {
8878 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8879 offsetof(struct xdp_md, data)),
8880 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8881 offsetof(struct xdp_md, data_end)),
8882 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8883 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8884 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8885 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8886 BPF_MOV64_IMM(BPF_REG_0, 0),
8887 BPF_EXIT_INSN(),
8888 },
8889 .errstr = "R1 offset is outside of the packet",
8890 .result = REJECT,
8891 .prog_type = BPF_PROG_TYPE_XDP,
8892 },
8893 {
8894 "XDP pkt read, pkt_end <= pkt_data', good access",
8895 .insns = {
8896 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8897 offsetof(struct xdp_md, data)),
8898 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8899 offsetof(struct xdp_md, data_end)),
8900 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8901 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8902 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8903 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8904 BPF_MOV64_IMM(BPF_REG_0, 0),
8905 BPF_EXIT_INSN(),
8906 },
8907 .result = ACCEPT,
8908 .prog_type = BPF_PROG_TYPE_XDP,
8909 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8910 },
8911 {
8912 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
8913 .insns = {
8914 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8915 offsetof(struct xdp_md, data)),
8916 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8917 offsetof(struct xdp_md, data_end)),
8918 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8919 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8920 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8921 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8922 BPF_MOV64_IMM(BPF_REG_0, 0),
8923 BPF_EXIT_INSN(),
8924 },
8925 .errstr = "R1 offset is outside of the packet",
8926 .result = REJECT,
8927 .prog_type = BPF_PROG_TYPE_XDP,
8928 },
8929 {
8930 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
8931 .insns = {
8932 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8933 offsetof(struct xdp_md, data)),
8934 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8935 offsetof(struct xdp_md, data_end)),
8936 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8937 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8938 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
8939 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8940 BPF_MOV64_IMM(BPF_REG_0, 0),
8941 BPF_EXIT_INSN(),
8942 },
8943 .errstr = "R1 offset is outside of the packet",
8944 .result = REJECT,
8945 .prog_type = BPF_PROG_TYPE_XDP,
8946 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8947 },
Daniel Borkmannb06723d2017-11-01 23:58:09 +01008948 {
Daniel Borkmann634eab12017-11-01 23:58:11 +01008949 "XDP pkt read, pkt_meta' > pkt_data, good access",
8950 .insns = {
8951 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8952 offsetof(struct xdp_md, data_meta)),
8953 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8954 offsetof(struct xdp_md, data)),
8955 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8956 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8957 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8958 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8959 BPF_MOV64_IMM(BPF_REG_0, 0),
8960 BPF_EXIT_INSN(),
8961 },
8962 .result = ACCEPT,
8963 .prog_type = BPF_PROG_TYPE_XDP,
8964 },
8965 {
8966 "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
8967 .insns = {
8968 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8969 offsetof(struct xdp_md, data_meta)),
8970 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8971 offsetof(struct xdp_md, data)),
8972 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8973 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8974 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8975 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8976 BPF_MOV64_IMM(BPF_REG_0, 0),
8977 BPF_EXIT_INSN(),
8978 },
8979 .errstr = "R1 offset is outside of the packet",
8980 .result = REJECT,
8981 .prog_type = BPF_PROG_TYPE_XDP,
8982 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8983 },
8984 {
8985 "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
8986 .insns = {
8987 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8988 offsetof(struct xdp_md, data_meta)),
8989 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8990 offsetof(struct xdp_md, data)),
8991 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8992 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8993 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
8994 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8995 BPF_MOV64_IMM(BPF_REG_0, 0),
8996 BPF_EXIT_INSN(),
8997 },
8998 .errstr = "R1 offset is outside of the packet",
8999 .result = REJECT,
9000 .prog_type = BPF_PROG_TYPE_XDP,
9001 },
9002 {
9003 "XDP pkt read, pkt_data > pkt_meta', good access",
9004 .insns = {
9005 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9006 offsetof(struct xdp_md, data_meta)),
9007 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9008 offsetof(struct xdp_md, data)),
9009 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9010 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9011 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9012 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9013 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9014 BPF_MOV64_IMM(BPF_REG_0, 0),
9015 BPF_EXIT_INSN(),
9016 },
9017 .result = ACCEPT,
9018 .prog_type = BPF_PROG_TYPE_XDP,
9019 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9020 },
9021 {
9022 "XDP pkt read, pkt_data > pkt_meta', bad access 1",
9023 .insns = {
9024 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9025 offsetof(struct xdp_md, data_meta)),
9026 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9027 offsetof(struct xdp_md, data)),
9028 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9029 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9030 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9031 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9032 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9033 BPF_MOV64_IMM(BPF_REG_0, 0),
9034 BPF_EXIT_INSN(),
9035 },
9036 .errstr = "R1 offset is outside of the packet",
9037 .result = REJECT,
9038 .prog_type = BPF_PROG_TYPE_XDP,
9039 },
9040 {
9041 "XDP pkt read, pkt_data > pkt_meta', bad access 2",
9042 .insns = {
9043 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9044 offsetof(struct xdp_md, data_meta)),
9045 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9046 offsetof(struct xdp_md, data)),
9047 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9048 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9049 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9050 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9051 BPF_MOV64_IMM(BPF_REG_0, 0),
9052 BPF_EXIT_INSN(),
9053 },
9054 .errstr = "R1 offset is outside of the packet",
9055 .result = REJECT,
9056 .prog_type = BPF_PROG_TYPE_XDP,
9057 },
9058 {
9059 "XDP pkt read, pkt_meta' < pkt_data, good access",
9060 .insns = {
9061 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9062 offsetof(struct xdp_md, data_meta)),
9063 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9064 offsetof(struct xdp_md, data)),
9065 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9067 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9068 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9069 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9070 BPF_MOV64_IMM(BPF_REG_0, 0),
9071 BPF_EXIT_INSN(),
9072 },
9073 .result = ACCEPT,
9074 .prog_type = BPF_PROG_TYPE_XDP,
9075 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9076 },
9077 {
9078 "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
9079 .insns = {
9080 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9081 offsetof(struct xdp_md, data_meta)),
9082 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9083 offsetof(struct xdp_md, data)),
9084 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9086 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9087 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9088 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9089 BPF_MOV64_IMM(BPF_REG_0, 0),
9090 BPF_EXIT_INSN(),
9091 },
9092 .errstr = "R1 offset is outside of the packet",
9093 .result = REJECT,
9094 .prog_type = BPF_PROG_TYPE_XDP,
9095 },
9096 {
9097 "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
9098 .insns = {
9099 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9100 offsetof(struct xdp_md, data_meta)),
9101 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9102 offsetof(struct xdp_md, data)),
9103 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9105 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9106 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9107 BPF_MOV64_IMM(BPF_REG_0, 0),
9108 BPF_EXIT_INSN(),
9109 },
9110 .errstr = "R1 offset is outside of the packet",
9111 .result = REJECT,
9112 .prog_type = BPF_PROG_TYPE_XDP,
9113 },
9114 {
9115 "XDP pkt read, pkt_data < pkt_meta', good access",
9116 .insns = {
9117 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9118 offsetof(struct xdp_md, data_meta)),
9119 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9120 offsetof(struct xdp_md, data)),
9121 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9123 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9124 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9125 BPF_MOV64_IMM(BPF_REG_0, 0),
9126 BPF_EXIT_INSN(),
9127 },
9128 .result = ACCEPT,
9129 .prog_type = BPF_PROG_TYPE_XDP,
9130 },
9131 {
9132 "XDP pkt read, pkt_data < pkt_meta', bad access 1",
9133 .insns = {
9134 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9135 offsetof(struct xdp_md, data_meta)),
9136 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9137 offsetof(struct xdp_md, data)),
9138 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9139 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9140 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9141 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9142 BPF_MOV64_IMM(BPF_REG_0, 0),
9143 BPF_EXIT_INSN(),
9144 },
9145 .errstr = "R1 offset is outside of the packet",
9146 .result = REJECT,
9147 .prog_type = BPF_PROG_TYPE_XDP,
9148 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9149 },
9150 {
9151 "XDP pkt read, pkt_data < pkt_meta', bad access 2",
9152 .insns = {
9153 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9154 offsetof(struct xdp_md, data_meta)),
9155 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9156 offsetof(struct xdp_md, data)),
9157 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9158 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9159 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9160 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9161 BPF_MOV64_IMM(BPF_REG_0, 0),
9162 BPF_EXIT_INSN(),
9163 },
9164 .errstr = "R1 offset is outside of the packet",
9165 .result = REJECT,
9166 .prog_type = BPF_PROG_TYPE_XDP,
9167 },
9168 {
9169 "XDP pkt read, pkt_meta' >= pkt_data, good access",
9170 .insns = {
9171 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9172 offsetof(struct xdp_md, data_meta)),
9173 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9174 offsetof(struct xdp_md, data)),
9175 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9176 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9177 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9178 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9179 BPF_MOV64_IMM(BPF_REG_0, 0),
9180 BPF_EXIT_INSN(),
9181 },
9182 .result = ACCEPT,
9183 .prog_type = BPF_PROG_TYPE_XDP,
9184 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9185 },
9186 {
9187 "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
9188 .insns = {
9189 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9190 offsetof(struct xdp_md, data_meta)),
9191 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9192 offsetof(struct xdp_md, data)),
9193 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9195 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9196 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9197 BPF_MOV64_IMM(BPF_REG_0, 0),
9198 BPF_EXIT_INSN(),
9199 },
9200 .errstr = "R1 offset is outside of the packet",
9201 .result = REJECT,
9202 .prog_type = BPF_PROG_TYPE_XDP,
9203 },
9204 {
9205 "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
9206 .insns = {
9207 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9208 offsetof(struct xdp_md, data_meta)),
9209 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9210 offsetof(struct xdp_md, data)),
9211 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9212 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9213 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
9214 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9215 BPF_MOV64_IMM(BPF_REG_0, 0),
9216 BPF_EXIT_INSN(),
9217 },
9218 .errstr = "R1 offset is outside of the packet",
9219 .result = REJECT,
9220 .prog_type = BPF_PROG_TYPE_XDP,
9221 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9222 },
9223 {
9224 "XDP pkt read, pkt_data >= pkt_meta', good access",
9225 .insns = {
9226 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9227 offsetof(struct xdp_md, data_meta)),
9228 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9229 offsetof(struct xdp_md, data)),
9230 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9231 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9232 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9233 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9234 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9235 BPF_MOV64_IMM(BPF_REG_0, 0),
9236 BPF_EXIT_INSN(),
9237 },
9238 .result = ACCEPT,
9239 .prog_type = BPF_PROG_TYPE_XDP,
9240 },
9241 {
9242 "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
9243 .insns = {
9244 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9245 offsetof(struct xdp_md, data_meta)),
9246 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9247 offsetof(struct xdp_md, data)),
9248 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9249 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9250 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9251 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9252 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9253 BPF_MOV64_IMM(BPF_REG_0, 0),
9254 BPF_EXIT_INSN(),
9255 },
9256 .errstr = "R1 offset is outside of the packet",
9257 .result = REJECT,
9258 .prog_type = BPF_PROG_TYPE_XDP,
9259 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9260 },
9261 {
9262 "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
9263 .insns = {
9264 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9265 offsetof(struct xdp_md, data_meta)),
9266 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9267 offsetof(struct xdp_md, data)),
9268 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9269 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9270 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9271 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9272 BPF_MOV64_IMM(BPF_REG_0, 0),
9273 BPF_EXIT_INSN(),
9274 },
9275 .errstr = "R1 offset is outside of the packet",
9276 .result = REJECT,
9277 .prog_type = BPF_PROG_TYPE_XDP,
9278 },
9279 {
9280 "XDP pkt read, pkt_meta' <= pkt_data, good access",
9281 .insns = {
9282 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9283 offsetof(struct xdp_md, data_meta)),
9284 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9285 offsetof(struct xdp_md, data)),
9286 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9287 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9288 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9289 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9290 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9291 BPF_MOV64_IMM(BPF_REG_0, 0),
9292 BPF_EXIT_INSN(),
9293 },
9294 .result = ACCEPT,
9295 .prog_type = BPF_PROG_TYPE_XDP,
9296 },
9297 {
9298 "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
9299 .insns = {
9300 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9301 offsetof(struct xdp_md, data_meta)),
9302 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9303 offsetof(struct xdp_md, data)),
9304 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9305 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9306 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9307 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9308 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9309 BPF_MOV64_IMM(BPF_REG_0, 0),
9310 BPF_EXIT_INSN(),
9311 },
9312 .errstr = "R1 offset is outside of the packet",
9313 .result = REJECT,
9314 .prog_type = BPF_PROG_TYPE_XDP,
9315 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9316 },
9317 {
9318 "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
9319 .insns = {
9320 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9321 offsetof(struct xdp_md, data_meta)),
9322 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9323 offsetof(struct xdp_md, data)),
9324 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9325 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9326 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9327 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9328 BPF_MOV64_IMM(BPF_REG_0, 0),
9329 BPF_EXIT_INSN(),
9330 },
9331 .errstr = "R1 offset is outside of the packet",
9332 .result = REJECT,
9333 .prog_type = BPF_PROG_TYPE_XDP,
9334 },
9335 {
9336 "XDP pkt read, pkt_data <= pkt_meta', good access",
9337 .insns = {
9338 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9339 offsetof(struct xdp_md, data_meta)),
9340 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9341 offsetof(struct xdp_md, data)),
9342 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9343 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9344 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9345 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9346 BPF_MOV64_IMM(BPF_REG_0, 0),
9347 BPF_EXIT_INSN(),
9348 },
9349 .result = ACCEPT,
9350 .prog_type = BPF_PROG_TYPE_XDP,
9351 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9352 },
9353 {
9354 "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
9355 .insns = {
9356 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9357 offsetof(struct xdp_md, data_meta)),
9358 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9359 offsetof(struct xdp_md, data)),
9360 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9361 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9362 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9363 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9364 BPF_MOV64_IMM(BPF_REG_0, 0),
9365 BPF_EXIT_INSN(),
9366 },
9367 .errstr = "R1 offset is outside of the packet",
9368 .result = REJECT,
9369 .prog_type = BPF_PROG_TYPE_XDP,
9370 },
9371 {
9372 "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
9373 .insns = {
9374 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9375 offsetof(struct xdp_md, data_meta)),
9376 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9377 offsetof(struct xdp_md, data)),
9378 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9379 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9380 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
9381 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9382 BPF_MOV64_IMM(BPF_REG_0, 0),
9383 BPF_EXIT_INSN(),
9384 },
9385 .errstr = "R1 offset is outside of the packet",
9386 .result = REJECT,
9387 .prog_type = BPF_PROG_TYPE_XDP,
9388 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9389 },
9390 {
Daniel Borkmann6f161012018-01-18 01:15:21 +01009391 "check deducing bounds from const, 1",
9392 .insns = {
9393 BPF_MOV64_IMM(BPF_REG_0, 1),
9394 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
9395 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9396 BPF_EXIT_INSN(),
9397 },
9398 .result = REJECT,
9399 .errstr = "R0 tried to subtract pointer from scalar",
9400 },
9401 {
9402 "check deducing bounds from const, 2",
9403 .insns = {
9404 BPF_MOV64_IMM(BPF_REG_0, 1),
9405 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
9406 BPF_EXIT_INSN(),
9407 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
9408 BPF_EXIT_INSN(),
9409 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9410 BPF_EXIT_INSN(),
9411 },
9412 .result = ACCEPT,
Yonghong Song35136922018-01-22 22:10:59 -08009413 .retval = 1,
Daniel Borkmann6f161012018-01-18 01:15:21 +01009414 },
9415 {
9416 "check deducing bounds from const, 3",
9417 .insns = {
9418 BPF_MOV64_IMM(BPF_REG_0, 0),
9419 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
9420 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9421 BPF_EXIT_INSN(),
9422 },
9423 .result = REJECT,
9424 .errstr = "R0 tried to subtract pointer from scalar",
9425 },
9426 {
9427 "check deducing bounds from const, 4",
9428 .insns = {
9429 BPF_MOV64_IMM(BPF_REG_0, 0),
9430 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
9431 BPF_EXIT_INSN(),
9432 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9433 BPF_EXIT_INSN(),
9434 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9435 BPF_EXIT_INSN(),
9436 },
9437 .result = ACCEPT,
9438 },
9439 {
9440 "check deducing bounds from const, 5",
9441 .insns = {
9442 BPF_MOV64_IMM(BPF_REG_0, 0),
9443 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9444 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9445 BPF_EXIT_INSN(),
9446 },
9447 .result = REJECT,
9448 .errstr = "R0 tried to subtract pointer from scalar",
9449 },
9450 {
9451 "check deducing bounds from const, 6",
9452 .insns = {
9453 BPF_MOV64_IMM(BPF_REG_0, 0),
9454 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9455 BPF_EXIT_INSN(),
9456 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9457 BPF_EXIT_INSN(),
9458 },
9459 .result = REJECT,
9460 .errstr = "R0 tried to subtract pointer from scalar",
9461 },
9462 {
9463 "check deducing bounds from const, 7",
9464 .insns = {
9465 BPF_MOV64_IMM(BPF_REG_0, ~0),
9466 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
9467 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9468 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9469 offsetof(struct __sk_buff, mark)),
9470 BPF_EXIT_INSN(),
9471 },
9472 .result = REJECT,
9473 .errstr = "dereference of modified ctx ptr",
9474 },
9475 {
9476 "check deducing bounds from const, 8",
9477 .insns = {
9478 BPF_MOV64_IMM(BPF_REG_0, ~0),
9479 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9480 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
9481 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9482 offsetof(struct __sk_buff, mark)),
9483 BPF_EXIT_INSN(),
9484 },
9485 .result = REJECT,
9486 .errstr = "dereference of modified ctx ptr",
9487 },
9488 {
9489 "check deducing bounds from const, 9",
9490 .insns = {
9491 BPF_MOV64_IMM(BPF_REG_0, 0),
9492 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
9493 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9494 BPF_EXIT_INSN(),
9495 },
9496 .result = REJECT,
9497 .errstr = "R0 tried to subtract pointer from scalar",
9498 },
9499 {
9500 "check deducing bounds from const, 10",
9501 .insns = {
9502 BPF_MOV64_IMM(BPF_REG_0, 0),
9503 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
9504 /* Marks reg as unknown. */
9505 BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
9506 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9507 BPF_EXIT_INSN(),
9508 },
9509 .result = REJECT,
9510 .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
9511 },
9512 {
Daniel Borkmannb06723d2017-11-01 23:58:09 +01009513 "bpf_exit with invalid return code. test1",
9514 .insns = {
9515 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9516 BPF_EXIT_INSN(),
9517 },
9518 .errstr = "R0 has value (0x0; 0xffffffff)",
9519 .result = REJECT,
9520 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9521 },
9522 {
9523 "bpf_exit with invalid return code. test2",
9524 .insns = {
9525 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9526 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
9527 BPF_EXIT_INSN(),
9528 },
9529 .result = ACCEPT,
9530 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9531 },
9532 {
9533 "bpf_exit with invalid return code. test3",
9534 .insns = {
9535 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9536 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
9537 BPF_EXIT_INSN(),
9538 },
9539 .errstr = "R0 has value (0x0; 0x3)",
9540 .result = REJECT,
9541 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9542 },
9543 {
9544 "bpf_exit with invalid return code. test4",
9545 .insns = {
9546 BPF_MOV64_IMM(BPF_REG_0, 1),
9547 BPF_EXIT_INSN(),
9548 },
9549 .result = ACCEPT,
9550 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9551 },
9552 {
9553 "bpf_exit with invalid return code. test5",
9554 .insns = {
9555 BPF_MOV64_IMM(BPF_REG_0, 2),
9556 BPF_EXIT_INSN(),
9557 },
9558 .errstr = "R0 has value (0x2; 0x0)",
9559 .result = REJECT,
9560 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9561 },
9562 {
9563 "bpf_exit with invalid return code. test6",
9564 .insns = {
9565 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9566 BPF_EXIT_INSN(),
9567 },
9568 .errstr = "R0 is not a known value (ctx)",
9569 .result = REJECT,
9570 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9571 },
9572 {
9573 "bpf_exit with invalid return code. test7",
9574 .insns = {
9575 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9576 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
9577 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
9578 BPF_EXIT_INSN(),
9579 },
9580 .errstr = "R0 has unknown scalar value",
9581 .result = REJECT,
9582 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9583 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009584 {
9585 "calls: basic sanity",
9586 .insns = {
9587 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9588 BPF_MOV64_IMM(BPF_REG_0, 1),
9589 BPF_EXIT_INSN(),
9590 BPF_MOV64_IMM(BPF_REG_0, 2),
9591 BPF_EXIT_INSN(),
9592 },
9593 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9594 .result = ACCEPT,
9595 },
9596 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009597 "calls: not on unpriviledged",
9598 .insns = {
9599 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9600 BPF_MOV64_IMM(BPF_REG_0, 1),
9601 BPF_EXIT_INSN(),
9602 BPF_MOV64_IMM(BPF_REG_0, 2),
9603 BPF_EXIT_INSN(),
9604 },
9605 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
9606 .result_unpriv = REJECT,
9607 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009608 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009609 },
9610 {
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01009611 "calls: div by 0 in subprog",
9612 .insns = {
9613 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9614 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9615 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9616 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
9617 offsetof(struct __sk_buff, data_end)),
9618 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
9619 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
9620 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
9621 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9622 BPF_MOV64_IMM(BPF_REG_0, 1),
9623 BPF_EXIT_INSN(),
9624 BPF_MOV32_IMM(BPF_REG_2, 0),
9625 BPF_MOV32_IMM(BPF_REG_3, 1),
9626 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
9627 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9628 offsetof(struct __sk_buff, data)),
9629 BPF_EXIT_INSN(),
9630 },
9631 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9632 .result = ACCEPT,
9633 .retval = 1,
9634 },
9635 {
9636 "calls: multiple ret types in subprog 1",
9637 .insns = {
9638 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9639 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9640 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9641 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
9642 offsetof(struct __sk_buff, data_end)),
9643 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
9644 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
9645 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
9646 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9647 BPF_MOV64_IMM(BPF_REG_0, 1),
9648 BPF_EXIT_INSN(),
9649 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9650 offsetof(struct __sk_buff, data)),
9651 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9652 BPF_MOV32_IMM(BPF_REG_0, 42),
9653 BPF_EXIT_INSN(),
9654 },
9655 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9656 .result = REJECT,
9657 .errstr = "R0 invalid mem access 'inv'",
9658 },
9659 {
9660 "calls: multiple ret types in subprog 2",
9661 .insns = {
9662 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9663 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9664 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9665 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
9666 offsetof(struct __sk_buff, data_end)),
9667 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
9668 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
9669 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
9670 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9671 BPF_MOV64_IMM(BPF_REG_0, 1),
9672 BPF_EXIT_INSN(),
9673 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9674 offsetof(struct __sk_buff, data)),
9675 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9676 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
9677 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9678 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9679 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9680 BPF_LD_MAP_FD(BPF_REG_1, 0),
9681 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9682 BPF_FUNC_map_lookup_elem),
9683 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9684 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
9685 offsetof(struct __sk_buff, data)),
9686 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
9687 BPF_EXIT_INSN(),
9688 },
9689 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9690 .fixup_map1 = { 16 },
9691 .result = REJECT,
9692 .errstr = "R0 min value is outside of the array range",
9693 },
9694 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009695 "calls: overlapping caller/callee",
9696 .insns = {
9697 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
9698 BPF_MOV64_IMM(BPF_REG_0, 1),
9699 BPF_EXIT_INSN(),
9700 },
9701 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9702 .errstr = "last insn is not an exit or jmp",
9703 .result = REJECT,
9704 },
9705 {
9706 "calls: wrong recursive calls",
9707 .insns = {
9708 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
9709 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
9710 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
9711 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
9712 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
9713 BPF_MOV64_IMM(BPF_REG_0, 1),
9714 BPF_EXIT_INSN(),
9715 },
9716 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9717 .errstr = "jump out of range",
9718 .result = REJECT,
9719 },
9720 {
9721 "calls: wrong src reg",
9722 .insns = {
9723 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
9724 BPF_MOV64_IMM(BPF_REG_0, 1),
9725 BPF_EXIT_INSN(),
9726 },
9727 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9728 .errstr = "BPF_CALL uses reserved fields",
9729 .result = REJECT,
9730 },
9731 {
9732 "calls: wrong off value",
9733 .insns = {
9734 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
9735 BPF_MOV64_IMM(BPF_REG_0, 1),
9736 BPF_EXIT_INSN(),
9737 BPF_MOV64_IMM(BPF_REG_0, 2),
9738 BPF_EXIT_INSN(),
9739 },
9740 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9741 .errstr = "BPF_CALL uses reserved fields",
9742 .result = REJECT,
9743 },
9744 {
9745 "calls: jump back loop",
9746 .insns = {
9747 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
9748 BPF_MOV64_IMM(BPF_REG_0, 1),
9749 BPF_EXIT_INSN(),
9750 },
9751 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9752 .errstr = "back-edge from insn 0 to 0",
9753 .result = REJECT,
9754 },
9755 {
9756 "calls: conditional call",
9757 .insns = {
9758 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9759 offsetof(struct __sk_buff, mark)),
9760 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9761 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9762 BPF_MOV64_IMM(BPF_REG_0, 1),
9763 BPF_EXIT_INSN(),
9764 BPF_MOV64_IMM(BPF_REG_0, 2),
9765 BPF_EXIT_INSN(),
9766 },
9767 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9768 .errstr = "jump out of range",
9769 .result = REJECT,
9770 },
9771 {
9772 "calls: conditional call 2",
9773 .insns = {
9774 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9775 offsetof(struct __sk_buff, mark)),
9776 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9777 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9778 BPF_MOV64_IMM(BPF_REG_0, 1),
9779 BPF_EXIT_INSN(),
9780 BPF_MOV64_IMM(BPF_REG_0, 2),
9781 BPF_EXIT_INSN(),
9782 BPF_MOV64_IMM(BPF_REG_0, 3),
9783 BPF_EXIT_INSN(),
9784 },
9785 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9786 .result = ACCEPT,
9787 },
9788 {
9789 "calls: conditional call 3",
9790 .insns = {
9791 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9792 offsetof(struct __sk_buff, mark)),
9793 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9794 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
9795 BPF_MOV64_IMM(BPF_REG_0, 1),
9796 BPF_EXIT_INSN(),
9797 BPF_MOV64_IMM(BPF_REG_0, 1),
9798 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
9799 BPF_MOV64_IMM(BPF_REG_0, 3),
9800 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
9801 },
9802 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9803 .errstr = "back-edge from insn",
9804 .result = REJECT,
9805 },
9806 {
9807 "calls: conditional call 4",
9808 .insns = {
9809 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9810 offsetof(struct __sk_buff, mark)),
9811 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9812 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9813 BPF_MOV64_IMM(BPF_REG_0, 1),
9814 BPF_EXIT_INSN(),
9815 BPF_MOV64_IMM(BPF_REG_0, 1),
9816 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
9817 BPF_MOV64_IMM(BPF_REG_0, 3),
9818 BPF_EXIT_INSN(),
9819 },
9820 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9821 .result = ACCEPT,
9822 },
9823 {
9824 "calls: conditional call 5",
9825 .insns = {
9826 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9827 offsetof(struct __sk_buff, mark)),
9828 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9829 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9830 BPF_MOV64_IMM(BPF_REG_0, 1),
9831 BPF_EXIT_INSN(),
9832 BPF_MOV64_IMM(BPF_REG_0, 1),
9833 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
9834 BPF_MOV64_IMM(BPF_REG_0, 3),
9835 BPF_EXIT_INSN(),
9836 },
9837 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9838 .errstr = "back-edge from insn",
9839 .result = REJECT,
9840 },
9841 {
9842 "calls: conditional call 6",
9843 .insns = {
9844 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9845 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
9846 BPF_EXIT_INSN(),
9847 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9848 offsetof(struct __sk_buff, mark)),
9849 BPF_EXIT_INSN(),
9850 },
9851 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9852 .errstr = "back-edge from insn",
9853 .result = REJECT,
9854 },
9855 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009856 "calls: using r0 returned by callee",
9857 .insns = {
9858 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9859 BPF_EXIT_INSN(),
9860 BPF_MOV64_IMM(BPF_REG_0, 2),
9861 BPF_EXIT_INSN(),
9862 },
9863 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9864 .result = ACCEPT,
9865 },
9866 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009867 "calls: using uninit r0 from callee",
9868 .insns = {
9869 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9870 BPF_EXIT_INSN(),
9871 BPF_EXIT_INSN(),
9872 },
9873 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9874 .errstr = "!read_ok",
9875 .result = REJECT,
9876 },
9877 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009878 "calls: callee is using r1",
9879 .insns = {
9880 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9881 BPF_EXIT_INSN(),
9882 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9883 offsetof(struct __sk_buff, len)),
9884 BPF_EXIT_INSN(),
9885 },
9886 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
9887 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009888 .retval = TEST_DATA_LEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009889 },
9890 {
9891 "calls: callee using args1",
9892 .insns = {
9893 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9894 BPF_EXIT_INSN(),
9895 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9896 BPF_EXIT_INSN(),
9897 },
9898 .errstr_unpriv = "allowed for root only",
9899 .result_unpriv = REJECT,
9900 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009901 .retval = POINTER_VALUE,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009902 },
9903 {
9904 "calls: callee using wrong args2",
9905 .insns = {
9906 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9907 BPF_EXIT_INSN(),
9908 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9909 BPF_EXIT_INSN(),
9910 },
9911 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9912 .errstr = "R2 !read_ok",
9913 .result = REJECT,
9914 },
9915 {
9916 "calls: callee using two args",
9917 .insns = {
9918 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9919 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
9920 offsetof(struct __sk_buff, len)),
9921 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
9922 offsetof(struct __sk_buff, len)),
9923 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9924 BPF_EXIT_INSN(),
9925 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9926 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
9927 BPF_EXIT_INSN(),
9928 },
9929 .errstr_unpriv = "allowed for root only",
9930 .result_unpriv = REJECT,
9931 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009932 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009933 },
9934 {
9935 "calls: callee changing pkt pointers",
9936 .insns = {
9937 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
9938 offsetof(struct xdp_md, data)),
9939 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
9940 offsetof(struct xdp_md, data_end)),
9941 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
9942 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
9943 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
9944 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9945 /* clear_all_pkt_pointers() has to walk all frames
9946 * to make sure that pkt pointers in the caller
9947 * are cleared when callee is calling a helper that
9948 * adjusts packet size
9949 */
9950 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
9951 BPF_MOV32_IMM(BPF_REG_0, 0),
9952 BPF_EXIT_INSN(),
9953 BPF_MOV64_IMM(BPF_REG_2, 0),
9954 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9955 BPF_FUNC_xdp_adjust_head),
9956 BPF_EXIT_INSN(),
9957 },
9958 .result = REJECT,
9959 .errstr = "R6 invalid mem access 'inv'",
9960 .prog_type = BPF_PROG_TYPE_XDP,
9961 },
9962 {
9963 "calls: two calls with args",
9964 .insns = {
9965 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9966 BPF_EXIT_INSN(),
9967 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9968 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9969 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9970 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9971 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9972 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9973 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9974 BPF_EXIT_INSN(),
9975 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9976 offsetof(struct __sk_buff, len)),
9977 BPF_EXIT_INSN(),
9978 },
9979 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9980 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009981 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009982 },
9983 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009984 "calls: calls with stack arith",
9985 .insns = {
9986 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9987 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9988 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9989 BPF_EXIT_INSN(),
9990 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9991 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9992 BPF_EXIT_INSN(),
9993 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9994 BPF_MOV64_IMM(BPF_REG_0, 42),
9995 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
9996 BPF_EXIT_INSN(),
9997 },
9998 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9999 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010000 .retval = 42,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010001 },
10002 {
10003 "calls: calls with misaligned stack access",
10004 .insns = {
10005 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10006 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
10007 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10008 BPF_EXIT_INSN(),
10009 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
10010 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10011 BPF_EXIT_INSN(),
10012 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
10013 BPF_MOV64_IMM(BPF_REG_0, 42),
10014 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
10015 BPF_EXIT_INSN(),
10016 },
10017 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10018 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
10019 .errstr = "misaligned stack access",
10020 .result = REJECT,
10021 },
10022 {
10023 "calls: calls control flow, jump test",
10024 .insns = {
10025 BPF_MOV64_IMM(BPF_REG_0, 42),
10026 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10027 BPF_MOV64_IMM(BPF_REG_0, 43),
10028 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10029 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
10030 BPF_EXIT_INSN(),
10031 },
10032 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10033 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010034 .retval = 43,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010035 },
10036 {
10037 "calls: calls control flow, jump test 2",
10038 .insns = {
10039 BPF_MOV64_IMM(BPF_REG_0, 42),
10040 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10041 BPF_MOV64_IMM(BPF_REG_0, 43),
10042 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10043 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
10044 BPF_EXIT_INSN(),
10045 },
10046 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10047 .errstr = "jump out of range from insn 1 to 4",
10048 .result = REJECT,
10049 },
10050 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010051 "calls: two calls with bad jump",
10052 .insns = {
10053 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10054 BPF_EXIT_INSN(),
10055 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10056 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10057 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10058 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10059 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10060 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10061 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10062 BPF_EXIT_INSN(),
10063 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10064 offsetof(struct __sk_buff, len)),
10065 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
10066 BPF_EXIT_INSN(),
10067 },
10068 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10069 .errstr = "jump out of range from insn 11 to 9",
10070 .result = REJECT,
10071 },
10072 {
10073 "calls: recursive call. test1",
10074 .insns = {
10075 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10076 BPF_EXIT_INSN(),
10077 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10078 BPF_EXIT_INSN(),
10079 },
10080 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10081 .errstr = "back-edge",
10082 .result = REJECT,
10083 },
10084 {
10085 "calls: recursive call. test2",
10086 .insns = {
10087 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10088 BPF_EXIT_INSN(),
10089 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
10090 BPF_EXIT_INSN(),
10091 },
10092 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10093 .errstr = "back-edge",
10094 .result = REJECT,
10095 },
10096 {
10097 "calls: unreachable code",
10098 .insns = {
10099 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10100 BPF_EXIT_INSN(),
10101 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10102 BPF_EXIT_INSN(),
10103 BPF_MOV64_IMM(BPF_REG_0, 0),
10104 BPF_EXIT_INSN(),
10105 BPF_MOV64_IMM(BPF_REG_0, 0),
10106 BPF_EXIT_INSN(),
10107 },
10108 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10109 .errstr = "unreachable insn 6",
10110 .result = REJECT,
10111 },
10112 {
10113 "calls: invalid call",
10114 .insns = {
10115 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10116 BPF_EXIT_INSN(),
10117 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
10118 BPF_EXIT_INSN(),
10119 },
10120 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10121 .errstr = "invalid destination",
10122 .result = REJECT,
10123 },
10124 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010125 "calls: invalid call 2",
10126 .insns = {
10127 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10128 BPF_EXIT_INSN(),
10129 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
10130 BPF_EXIT_INSN(),
10131 },
10132 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10133 .errstr = "invalid destination",
10134 .result = REJECT,
10135 },
10136 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010137 "calls: jumping across function bodies. test1",
10138 .insns = {
10139 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10140 BPF_MOV64_IMM(BPF_REG_0, 0),
10141 BPF_EXIT_INSN(),
10142 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
10143 BPF_EXIT_INSN(),
10144 },
10145 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10146 .errstr = "jump out of range",
10147 .result = REJECT,
10148 },
10149 {
10150 "calls: jumping across function bodies. test2",
10151 .insns = {
10152 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
10153 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10154 BPF_MOV64_IMM(BPF_REG_0, 0),
10155 BPF_EXIT_INSN(),
10156 BPF_EXIT_INSN(),
10157 },
10158 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10159 .errstr = "jump out of range",
10160 .result = REJECT,
10161 },
10162 {
10163 "calls: call without exit",
10164 .insns = {
10165 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10166 BPF_EXIT_INSN(),
10167 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10168 BPF_EXIT_INSN(),
10169 BPF_MOV64_IMM(BPF_REG_0, 0),
10170 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
10171 },
10172 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10173 .errstr = "not an exit",
10174 .result = REJECT,
10175 },
10176 {
10177 "calls: call into middle of ld_imm64",
10178 .insns = {
10179 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10180 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10181 BPF_MOV64_IMM(BPF_REG_0, 0),
10182 BPF_EXIT_INSN(),
10183 BPF_LD_IMM64(BPF_REG_0, 0),
10184 BPF_EXIT_INSN(),
10185 },
10186 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10187 .errstr = "last insn",
10188 .result = REJECT,
10189 },
10190 {
10191 "calls: call into middle of other call",
10192 .insns = {
10193 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10194 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10195 BPF_MOV64_IMM(BPF_REG_0, 0),
10196 BPF_EXIT_INSN(),
10197 BPF_MOV64_IMM(BPF_REG_0, 0),
10198 BPF_MOV64_IMM(BPF_REG_0, 0),
10199 BPF_EXIT_INSN(),
10200 },
10201 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10202 .errstr = "last insn",
10203 .result = REJECT,
10204 },
10205 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010206 "calls: ld_abs with changing ctx data in callee",
10207 .insns = {
10208 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10209 BPF_LD_ABS(BPF_B, 0),
10210 BPF_LD_ABS(BPF_H, 0),
10211 BPF_LD_ABS(BPF_W, 0),
10212 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
10213 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
10214 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
10215 BPF_LD_ABS(BPF_B, 0),
10216 BPF_LD_ABS(BPF_H, 0),
10217 BPF_LD_ABS(BPF_W, 0),
10218 BPF_EXIT_INSN(),
10219 BPF_MOV64_IMM(BPF_REG_2, 1),
10220 BPF_MOV64_IMM(BPF_REG_3, 2),
10221 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10222 BPF_FUNC_skb_vlan_push),
10223 BPF_EXIT_INSN(),
10224 },
10225 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10226 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
10227 .result = REJECT,
10228 },
10229 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010230 "calls: two calls with bad fallthrough",
10231 .insns = {
10232 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10233 BPF_EXIT_INSN(),
10234 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10235 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10236 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10237 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10238 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10239 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10240 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10241 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
10242 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10243 offsetof(struct __sk_buff, len)),
10244 BPF_EXIT_INSN(),
10245 },
10246 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10247 .errstr = "not an exit",
10248 .result = REJECT,
10249 },
10250 {
10251 "calls: two calls with stack read",
10252 .insns = {
10253 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10254 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10255 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10256 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10257 BPF_EXIT_INSN(),
10258 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10259 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10260 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10261 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10262 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10263 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10264 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10265 BPF_EXIT_INSN(),
10266 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10267 BPF_EXIT_INSN(),
10268 },
10269 .prog_type = BPF_PROG_TYPE_XDP,
10270 .result = ACCEPT,
10271 },
10272 {
10273 "calls: two calls with stack write",
10274 .insns = {
10275 /* main prog */
10276 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10277 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10278 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10279 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10281 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10282 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10283 BPF_EXIT_INSN(),
10284
10285 /* subprog 1 */
10286 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10287 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10288 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
10289 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
10290 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10291 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10292 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
10293 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
10294 /* write into stack frame of main prog */
10295 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10296 BPF_EXIT_INSN(),
10297
10298 /* subprog 2 */
10299 /* read from stack frame of main prog */
10300 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10301 BPF_EXIT_INSN(),
10302 },
10303 .prog_type = BPF_PROG_TYPE_XDP,
10304 .result = ACCEPT,
10305 },
10306 {
Jann Horn6b80ad22017-12-22 19:12:35 +010010307 "calls: stack overflow using two frames (pre-call access)",
10308 .insns = {
10309 /* prog 1 */
10310 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10311 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
10312 BPF_EXIT_INSN(),
10313
10314 /* prog 2 */
10315 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10316 BPF_MOV64_IMM(BPF_REG_0, 0),
10317 BPF_EXIT_INSN(),
10318 },
10319 .prog_type = BPF_PROG_TYPE_XDP,
10320 .errstr = "combined stack size",
10321 .result = REJECT,
10322 },
10323 {
10324 "calls: stack overflow using two frames (post-call access)",
10325 .insns = {
10326 /* prog 1 */
10327 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
10328 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10329 BPF_EXIT_INSN(),
10330
10331 /* prog 2 */
10332 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10333 BPF_MOV64_IMM(BPF_REG_0, 0),
10334 BPF_EXIT_INSN(),
10335 },
10336 .prog_type = BPF_PROG_TYPE_XDP,
10337 .errstr = "combined stack size",
10338 .result = REJECT,
10339 },
10340 {
Alexei Starovoitov6b86c422017-12-25 13:15:41 -080010341 "calls: stack depth check using three frames. test1",
10342 .insns = {
10343 /* main */
10344 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10345 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
10346 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
10347 BPF_MOV64_IMM(BPF_REG_0, 0),
10348 BPF_EXIT_INSN(),
10349 /* A */
10350 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10351 BPF_EXIT_INSN(),
10352 /* B */
10353 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
10354 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10355 BPF_EXIT_INSN(),
10356 },
10357 .prog_type = BPF_PROG_TYPE_XDP,
10358 /* stack_main=32, stack_A=256, stack_B=64
10359 * and max(main+A, main+A+B) < 512
10360 */
10361 .result = ACCEPT,
10362 },
10363 {
10364 "calls: stack depth check using three frames. test2",
10365 .insns = {
10366 /* main */
10367 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10368 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
10369 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
10370 BPF_MOV64_IMM(BPF_REG_0, 0),
10371 BPF_EXIT_INSN(),
10372 /* A */
10373 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10374 BPF_EXIT_INSN(),
10375 /* B */
10376 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
10377 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10378 BPF_EXIT_INSN(),
10379 },
10380 .prog_type = BPF_PROG_TYPE_XDP,
10381 /* stack_main=32, stack_A=64, stack_B=256
10382 * and max(main+A, main+A+B) < 512
10383 */
10384 .result = ACCEPT,
10385 },
10386 {
10387 "calls: stack depth check using three frames. test3",
10388 .insns = {
10389 /* main */
10390 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10391 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
10392 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10393 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
10394 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
10395 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10396 BPF_MOV64_IMM(BPF_REG_0, 0),
10397 BPF_EXIT_INSN(),
10398 /* A */
10399 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
10400 BPF_EXIT_INSN(),
10401 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
10402 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
10403 /* B */
10404 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
10405 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
10406 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10407 BPF_EXIT_INSN(),
10408 },
10409 .prog_type = BPF_PROG_TYPE_XDP,
10410 /* stack_main=64, stack_A=224, stack_B=256
10411 * and max(main+A, main+A+B) > 512
10412 */
10413 .errstr = "combined stack",
10414 .result = REJECT,
10415 },
10416 {
10417 "calls: stack depth check using three frames. test4",
10418 /* void main(void) {
10419 * func1(0);
10420 * func1(1);
10421 * func2(1);
10422 * }
10423 * void func1(int alloc_or_recurse) {
10424 * if (alloc_or_recurse) {
10425 * frame_pointer[-300] = 1;
10426 * } else {
10427 * func2(alloc_or_recurse);
10428 * }
10429 * }
10430 * void func2(int alloc_or_recurse) {
10431 * if (alloc_or_recurse) {
10432 * frame_pointer[-300] = 1;
10433 * }
10434 * }
10435 */
10436 .insns = {
10437 /* main */
10438 BPF_MOV64_IMM(BPF_REG_1, 0),
10439 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
10440 BPF_MOV64_IMM(BPF_REG_1, 1),
10441 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10442 BPF_MOV64_IMM(BPF_REG_1, 1),
10443 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
10444 BPF_MOV64_IMM(BPF_REG_0, 0),
10445 BPF_EXIT_INSN(),
10446 /* A */
10447 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
10448 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10449 BPF_EXIT_INSN(),
10450 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
10451 BPF_EXIT_INSN(),
10452 /* B */
10453 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
10454 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10455 BPF_EXIT_INSN(),
10456 },
10457 .prog_type = BPF_PROG_TYPE_XDP,
10458 .result = REJECT,
10459 .errstr = "combined stack",
10460 },
10461 {
Alexei Starovoitovaada9ce2017-12-25 13:15:42 -080010462 "calls: stack depth check using three frames. test5",
10463 .insns = {
10464 /* main */
10465 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
10466 BPF_EXIT_INSN(),
10467 /* A */
10468 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
10469 BPF_EXIT_INSN(),
10470 /* B */
10471 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
10472 BPF_EXIT_INSN(),
10473 /* C */
10474 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
10475 BPF_EXIT_INSN(),
10476 /* D */
10477 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
10478 BPF_EXIT_INSN(),
10479 /* E */
10480 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
10481 BPF_EXIT_INSN(),
10482 /* F */
10483 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
10484 BPF_EXIT_INSN(),
10485 /* G */
10486 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
10487 BPF_EXIT_INSN(),
10488 /* H */
10489 BPF_MOV64_IMM(BPF_REG_0, 0),
10490 BPF_EXIT_INSN(),
10491 },
10492 .prog_type = BPF_PROG_TYPE_XDP,
10493 .errstr = "call stack",
10494 .result = REJECT,
10495 },
10496 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010497 "calls: spill into caller stack frame",
10498 .insns = {
10499 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10500 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10501 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10502 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10503 BPF_EXIT_INSN(),
10504 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
10505 BPF_MOV64_IMM(BPF_REG_0, 0),
10506 BPF_EXIT_INSN(),
10507 },
10508 .prog_type = BPF_PROG_TYPE_XDP,
10509 .errstr = "cannot spill",
10510 .result = REJECT,
10511 },
10512 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010513 "calls: write into caller stack frame",
10514 .insns = {
10515 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10516 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10517 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10518 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10519 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10520 BPF_EXIT_INSN(),
10521 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
10522 BPF_MOV64_IMM(BPF_REG_0, 0),
10523 BPF_EXIT_INSN(),
10524 },
10525 .prog_type = BPF_PROG_TYPE_XDP,
10526 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010527 .retval = 42,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010528 },
10529 {
10530 "calls: write into callee stack frame",
10531 .insns = {
10532 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10533 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
10534 BPF_EXIT_INSN(),
10535 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
10536 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
10537 BPF_EXIT_INSN(),
10538 },
10539 .prog_type = BPF_PROG_TYPE_XDP,
10540 .errstr = "cannot return stack pointer",
10541 .result = REJECT,
10542 },
10543 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010544 "calls: two calls with stack write and void return",
10545 .insns = {
10546 /* main prog */
10547 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10548 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10549 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10550 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10551 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10552 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10553 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10554 BPF_EXIT_INSN(),
10555
10556 /* subprog 1 */
10557 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10558 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10559 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10560 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10561 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10562 BPF_EXIT_INSN(),
10563
10564 /* subprog 2 */
10565 /* write into stack frame of main prog */
10566 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
10567 BPF_EXIT_INSN(), /* void return */
10568 },
10569 .prog_type = BPF_PROG_TYPE_XDP,
10570 .result = ACCEPT,
10571 },
10572 {
10573 "calls: ambiguous return value",
10574 .insns = {
10575 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10576 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
10577 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
10578 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10579 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10580 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
10581 BPF_EXIT_INSN(),
10582 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
10583 BPF_MOV64_IMM(BPF_REG_0, 0),
10584 BPF_EXIT_INSN(),
10585 },
10586 .errstr_unpriv = "allowed for root only",
10587 .result_unpriv = REJECT,
10588 .errstr = "R0 !read_ok",
10589 .result = REJECT,
10590 },
10591 {
10592 "calls: two calls that return map_value",
10593 .insns = {
10594 /* main prog */
10595 /* pass fp-16, fp-8 into a function */
10596 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10597 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10598 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10599 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10600 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10601
10602 /* fetch map_value_ptr from the stack of this function */
10603 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
10604 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
10605 /* write into map value */
10606 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10607 /* fetch secound map_value_ptr from the stack */
10608 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10609 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
10610 /* write into map value */
10611 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10612 BPF_MOV64_IMM(BPF_REG_0, 0),
10613 BPF_EXIT_INSN(),
10614
10615 /* subprog 1 */
10616 /* call 3rd function twice */
10617 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10618 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10619 /* first time with fp-8 */
10620 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10621 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10622 /* second time with fp-16 */
10623 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10624 BPF_EXIT_INSN(),
10625
10626 /* subprog 2 */
10627 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10628 /* lookup from map */
10629 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10630 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10631 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10632 BPF_LD_MAP_FD(BPF_REG_1, 0),
10633 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10634 BPF_FUNC_map_lookup_elem),
10635 /* write map_value_ptr into stack frame of main prog */
10636 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10637 BPF_MOV64_IMM(BPF_REG_0, 0),
10638 BPF_EXIT_INSN(), /* return 0 */
10639 },
10640 .prog_type = BPF_PROG_TYPE_XDP,
10641 .fixup_map1 = { 23 },
10642 .result = ACCEPT,
10643 },
10644 {
10645 "calls: two calls that return map_value with bool condition",
10646 .insns = {
10647 /* main prog */
10648 /* pass fp-16, fp-8 into a function */
10649 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10650 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10651 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10652 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10653 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10654 BPF_MOV64_IMM(BPF_REG_0, 0),
10655 BPF_EXIT_INSN(),
10656
10657 /* subprog 1 */
10658 /* call 3rd function twice */
10659 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10660 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10661 /* first time with fp-8 */
10662 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
10663 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
10664 /* fetch map_value_ptr from the stack of this function */
10665 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10666 /* write into map value */
10667 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10668 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10669 /* second time with fp-16 */
10670 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10671 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
10672 /* fetch secound map_value_ptr from the stack */
10673 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
10674 /* write into map value */
10675 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10676 BPF_EXIT_INSN(),
10677
10678 /* subprog 2 */
10679 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10680 /* lookup from map */
10681 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10682 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10683 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10684 BPF_LD_MAP_FD(BPF_REG_1, 0),
10685 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10686 BPF_FUNC_map_lookup_elem),
10687 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10688 BPF_MOV64_IMM(BPF_REG_0, 0),
10689 BPF_EXIT_INSN(), /* return 0 */
10690 /* write map_value_ptr into stack frame of main prog */
10691 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10692 BPF_MOV64_IMM(BPF_REG_0, 1),
10693 BPF_EXIT_INSN(), /* return 1 */
10694 },
10695 .prog_type = BPF_PROG_TYPE_XDP,
10696 .fixup_map1 = { 23 },
10697 .result = ACCEPT,
10698 },
10699 {
10700 "calls: two calls that return map_value with incorrect bool check",
10701 .insns = {
10702 /* main prog */
10703 /* pass fp-16, fp-8 into a function */
10704 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10705 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10706 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10707 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10708 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10709 BPF_MOV64_IMM(BPF_REG_0, 0),
10710 BPF_EXIT_INSN(),
10711
10712 /* subprog 1 */
10713 /* call 3rd function twice */
10714 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10715 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10716 /* first time with fp-8 */
10717 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
10718 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
10719 /* fetch map_value_ptr from the stack of this function */
10720 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10721 /* write into map value */
10722 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10723 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10724 /* second time with fp-16 */
10725 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10726 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10727 /* fetch secound map_value_ptr from the stack */
10728 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
10729 /* write into map value */
10730 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10731 BPF_EXIT_INSN(),
10732
10733 /* subprog 2 */
10734 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10735 /* lookup from map */
10736 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10737 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10739 BPF_LD_MAP_FD(BPF_REG_1, 0),
10740 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10741 BPF_FUNC_map_lookup_elem),
10742 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10743 BPF_MOV64_IMM(BPF_REG_0, 0),
10744 BPF_EXIT_INSN(), /* return 0 */
10745 /* write map_value_ptr into stack frame of main prog */
10746 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10747 BPF_MOV64_IMM(BPF_REG_0, 1),
10748 BPF_EXIT_INSN(), /* return 1 */
10749 },
10750 .prog_type = BPF_PROG_TYPE_XDP,
10751 .fixup_map1 = { 23 },
10752 .result = REJECT,
10753 .errstr = "invalid read from stack off -16+0 size 8",
10754 },
10755 {
10756 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
10757 .insns = {
10758 /* main prog */
10759 /* pass fp-16, fp-8 into a function */
10760 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10762 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10763 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10764 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10765 BPF_MOV64_IMM(BPF_REG_0, 0),
10766 BPF_EXIT_INSN(),
10767
10768 /* subprog 1 */
10769 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10770 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10771 /* 1st lookup from map */
10772 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10773 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10774 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10775 BPF_LD_MAP_FD(BPF_REG_1, 0),
10776 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10777 BPF_FUNC_map_lookup_elem),
10778 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10779 BPF_MOV64_IMM(BPF_REG_8, 0),
10780 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10781 /* write map_value_ptr into stack frame of main prog at fp-8 */
10782 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10783 BPF_MOV64_IMM(BPF_REG_8, 1),
10784
10785 /* 2nd lookup from map */
10786 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
10787 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10788 BPF_LD_MAP_FD(BPF_REG_1, 0),
10789 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
10790 BPF_FUNC_map_lookup_elem),
10791 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10792 BPF_MOV64_IMM(BPF_REG_9, 0),
10793 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10794 /* write map_value_ptr into stack frame of main prog at fp-16 */
10795 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10796 BPF_MOV64_IMM(BPF_REG_9, 1),
10797
10798 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10799 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
10800 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10801 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10802 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10803 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
10804 BPF_EXIT_INSN(),
10805
10806 /* subprog 2 */
10807 /* if arg2 == 1 do *arg1 = 0 */
10808 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10809 /* fetch map_value_ptr from the stack of this function */
10810 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10811 /* write into map value */
10812 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10813
10814 /* if arg4 == 1 do *arg3 = 0 */
10815 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10816 /* fetch map_value_ptr from the stack of this function */
10817 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10818 /* write into map value */
10819 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
10820 BPF_EXIT_INSN(),
10821 },
10822 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10823 .fixup_map1 = { 12, 22 },
10824 .result = REJECT,
10825 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
10826 },
10827 {
10828 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
10829 .insns = {
10830 /* main prog */
10831 /* pass fp-16, fp-8 into a function */
10832 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10833 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10834 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10835 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10836 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10837 BPF_MOV64_IMM(BPF_REG_0, 0),
10838 BPF_EXIT_INSN(),
10839
10840 /* subprog 1 */
10841 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10842 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10843 /* 1st lookup from map */
10844 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10845 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10846 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10847 BPF_LD_MAP_FD(BPF_REG_1, 0),
10848 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10849 BPF_FUNC_map_lookup_elem),
10850 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10851 BPF_MOV64_IMM(BPF_REG_8, 0),
10852 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10853 /* write map_value_ptr into stack frame of main prog at fp-8 */
10854 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10855 BPF_MOV64_IMM(BPF_REG_8, 1),
10856
10857 /* 2nd lookup from map */
10858 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
10859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10860 BPF_LD_MAP_FD(BPF_REG_1, 0),
10861 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
10862 BPF_FUNC_map_lookup_elem),
10863 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10864 BPF_MOV64_IMM(BPF_REG_9, 0),
10865 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10866 /* write map_value_ptr into stack frame of main prog at fp-16 */
10867 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10868 BPF_MOV64_IMM(BPF_REG_9, 1),
10869
10870 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10871 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
10872 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10873 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10874 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10875 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
10876 BPF_EXIT_INSN(),
10877
10878 /* subprog 2 */
10879 /* if arg2 == 1 do *arg1 = 0 */
10880 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10881 /* fetch map_value_ptr from the stack of this function */
10882 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10883 /* write into map value */
10884 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10885
10886 /* if arg4 == 1 do *arg3 = 0 */
10887 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10888 /* fetch map_value_ptr from the stack of this function */
10889 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10890 /* write into map value */
10891 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10892 BPF_EXIT_INSN(),
10893 },
10894 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10895 .fixup_map1 = { 12, 22 },
10896 .result = ACCEPT,
10897 },
10898 {
10899 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
10900 .insns = {
10901 /* main prog */
10902 /* pass fp-16, fp-8 into a function */
10903 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10904 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10905 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10906 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10907 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
10908 BPF_MOV64_IMM(BPF_REG_0, 0),
10909 BPF_EXIT_INSN(),
10910
10911 /* subprog 1 */
10912 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10913 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10914 /* 1st lookup from map */
10915 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
10916 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10917 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
10918 BPF_LD_MAP_FD(BPF_REG_1, 0),
10919 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10920 BPF_FUNC_map_lookup_elem),
10921 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10922 BPF_MOV64_IMM(BPF_REG_8, 0),
10923 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10924 /* write map_value_ptr into stack frame of main prog at fp-8 */
10925 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10926 BPF_MOV64_IMM(BPF_REG_8, 1),
10927
10928 /* 2nd lookup from map */
10929 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10930 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
10931 BPF_LD_MAP_FD(BPF_REG_1, 0),
10932 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10933 BPF_FUNC_map_lookup_elem),
10934 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10935 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
10936 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10937 /* write map_value_ptr into stack frame of main prog at fp-16 */
10938 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10939 BPF_MOV64_IMM(BPF_REG_9, 1),
10940
10941 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10942 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
10943 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10944 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10945 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10946 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
10947 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
10948
10949 /* subprog 2 */
10950 /* if arg2 == 1 do *arg1 = 0 */
10951 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10952 /* fetch map_value_ptr from the stack of this function */
10953 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10954 /* write into map value */
10955 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10956
10957 /* if arg4 == 1 do *arg3 = 0 */
10958 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10959 /* fetch map_value_ptr from the stack of this function */
10960 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10961 /* write into map value */
10962 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
10963 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
10964 },
10965 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10966 .fixup_map1 = { 12, 22 },
10967 .result = REJECT,
10968 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
10969 },
10970 {
10971 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
10972 .insns = {
10973 /* main prog */
10974 /* pass fp-16, fp-8 into a function */
10975 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10976 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10977 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10978 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10979 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10980 BPF_MOV64_IMM(BPF_REG_0, 0),
10981 BPF_EXIT_INSN(),
10982
10983 /* subprog 1 */
10984 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10985 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10986 /* 1st lookup from map */
10987 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10988 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10989 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10990 BPF_LD_MAP_FD(BPF_REG_1, 0),
10991 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10992 BPF_FUNC_map_lookup_elem),
10993 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
10994 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10995 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10996 BPF_MOV64_IMM(BPF_REG_8, 0),
10997 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10998 BPF_MOV64_IMM(BPF_REG_8, 1),
10999
11000 /* 2nd lookup from map */
11001 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11002 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11003 BPF_LD_MAP_FD(BPF_REG_1, 0),
11004 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11005 BPF_FUNC_map_lookup_elem),
11006 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
11007 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11008 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11009 BPF_MOV64_IMM(BPF_REG_9, 0),
11010 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11011 BPF_MOV64_IMM(BPF_REG_9, 1),
11012
11013 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11014 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11015 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11016 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11017 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11018 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11019 BPF_EXIT_INSN(),
11020
11021 /* subprog 2 */
11022 /* if arg2 == 1 do *arg1 = 0 */
11023 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11024 /* fetch map_value_ptr from the stack of this function */
11025 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11026 /* write into map value */
11027 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11028
11029 /* if arg4 == 1 do *arg3 = 0 */
11030 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11031 /* fetch map_value_ptr from the stack of this function */
11032 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11033 /* write into map value */
11034 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11035 BPF_EXIT_INSN(),
11036 },
11037 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11038 .fixup_map1 = { 12, 22 },
11039 .result = ACCEPT,
11040 },
11041 {
11042 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
11043 .insns = {
11044 /* main prog */
11045 /* pass fp-16, fp-8 into a function */
11046 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11047 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11048 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11049 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11050 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11051 BPF_MOV64_IMM(BPF_REG_0, 0),
11052 BPF_EXIT_INSN(),
11053
11054 /* subprog 1 */
11055 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11056 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11057 /* 1st lookup from map */
11058 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11059 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11060 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11061 BPF_LD_MAP_FD(BPF_REG_1, 0),
11062 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11063 BPF_FUNC_map_lookup_elem),
11064 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11065 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11066 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11067 BPF_MOV64_IMM(BPF_REG_8, 0),
11068 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11069 BPF_MOV64_IMM(BPF_REG_8, 1),
11070
11071 /* 2nd lookup from map */
11072 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11073 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11074 BPF_LD_MAP_FD(BPF_REG_1, 0),
11075 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11076 BPF_FUNC_map_lookup_elem),
11077 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
11078 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11079 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11080 BPF_MOV64_IMM(BPF_REG_9, 0),
11081 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11082 BPF_MOV64_IMM(BPF_REG_9, 1),
11083
11084 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11085 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11086 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11087 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11088 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11089 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11090 BPF_EXIT_INSN(),
11091
11092 /* subprog 2 */
11093 /* if arg2 == 1 do *arg1 = 0 */
11094 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11095 /* fetch map_value_ptr from the stack of this function */
11096 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11097 /* write into map value */
11098 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11099
11100 /* if arg4 == 0 do *arg3 = 0 */
11101 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
11102 /* fetch map_value_ptr from the stack of this function */
11103 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11104 /* write into map value */
11105 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11106 BPF_EXIT_INSN(),
11107 },
11108 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11109 .fixup_map1 = { 12, 22 },
11110 .result = REJECT,
11111 .errstr = "R0 invalid mem access 'inv'",
11112 },
11113 {
11114 "calls: pkt_ptr spill into caller stack",
11115 .insns = {
11116 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11118 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11119 BPF_EXIT_INSN(),
11120
11121 /* subprog 1 */
11122 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11123 offsetof(struct __sk_buff, data)),
11124 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11125 offsetof(struct __sk_buff, data_end)),
11126 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11127 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11128 /* spill unchecked pkt_ptr into stack of caller */
11129 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11130 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11131 /* now the pkt range is verified, read pkt_ptr from stack */
11132 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11133 /* write 4 bytes into packet */
11134 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11135 BPF_EXIT_INSN(),
11136 },
11137 .result = ACCEPT,
11138 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011139 .retval = POINTER_VALUE,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011140 },
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080011141 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080011142 "calls: pkt_ptr spill into caller stack 2",
11143 .insns = {
11144 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11145 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11146 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11147 /* Marking is still kept, but not in all cases safe. */
11148 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11149 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
11150 BPF_EXIT_INSN(),
11151
11152 /* subprog 1 */
11153 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11154 offsetof(struct __sk_buff, data)),
11155 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11156 offsetof(struct __sk_buff, data_end)),
11157 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11158 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11159 /* spill unchecked pkt_ptr into stack of caller */
11160 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11161 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11162 /* now the pkt range is verified, read pkt_ptr from stack */
11163 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11164 /* write 4 bytes into packet */
11165 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11166 BPF_EXIT_INSN(),
11167 },
11168 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11169 .errstr = "invalid access to packet",
11170 .result = REJECT,
11171 },
11172 {
11173 "calls: pkt_ptr spill into caller stack 3",
11174 .insns = {
11175 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11176 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11177 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11178 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
11179 /* Marking is still kept and safe here. */
11180 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11181 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
11182 BPF_EXIT_INSN(),
11183
11184 /* subprog 1 */
11185 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11186 offsetof(struct __sk_buff, data)),
11187 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11188 offsetof(struct __sk_buff, data_end)),
11189 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11190 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11191 /* spill unchecked pkt_ptr into stack of caller */
11192 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11193 BPF_MOV64_IMM(BPF_REG_5, 0),
11194 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11195 BPF_MOV64_IMM(BPF_REG_5, 1),
11196 /* now the pkt range is verified, read pkt_ptr from stack */
11197 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11198 /* write 4 bytes into packet */
11199 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11200 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11201 BPF_EXIT_INSN(),
11202 },
11203 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11204 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011205 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080011206 },
11207 {
11208 "calls: pkt_ptr spill into caller stack 4",
11209 .insns = {
11210 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11211 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11212 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11213 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
11214 /* Check marking propagated. */
11215 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11216 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
11217 BPF_EXIT_INSN(),
11218
11219 /* subprog 1 */
11220 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11221 offsetof(struct __sk_buff, data)),
11222 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11223 offsetof(struct __sk_buff, data_end)),
11224 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11225 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11226 /* spill unchecked pkt_ptr into stack of caller */
11227 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11228 BPF_MOV64_IMM(BPF_REG_5, 0),
11229 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11230 BPF_MOV64_IMM(BPF_REG_5, 1),
11231 /* don't read back pkt_ptr from stack here */
11232 /* write 4 bytes into packet */
11233 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11234 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11235 BPF_EXIT_INSN(),
11236 },
11237 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11238 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011239 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080011240 },
11241 {
11242 "calls: pkt_ptr spill into caller stack 5",
11243 .insns = {
11244 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11245 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11246 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
11247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11248 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11249 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11250 BPF_EXIT_INSN(),
11251
11252 /* subprog 1 */
11253 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11254 offsetof(struct __sk_buff, data)),
11255 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11256 offsetof(struct __sk_buff, data_end)),
11257 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11258 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11259 BPF_MOV64_IMM(BPF_REG_5, 0),
11260 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11261 /* spill checked pkt_ptr into stack of caller */
11262 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11263 BPF_MOV64_IMM(BPF_REG_5, 1),
11264 /* don't read back pkt_ptr from stack here */
11265 /* write 4 bytes into packet */
11266 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11267 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11268 BPF_EXIT_INSN(),
11269 },
11270 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11271 .errstr = "same insn cannot be used with different",
11272 .result = REJECT,
11273 },
11274 {
11275 "calls: pkt_ptr spill into caller stack 6",
11276 .insns = {
11277 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11278 offsetof(struct __sk_buff, data_end)),
11279 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11281 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11282 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11283 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11284 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11285 BPF_EXIT_INSN(),
11286
11287 /* subprog 1 */
11288 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11289 offsetof(struct __sk_buff, data)),
11290 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11291 offsetof(struct __sk_buff, data_end)),
11292 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11294 BPF_MOV64_IMM(BPF_REG_5, 0),
11295 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11296 /* spill checked pkt_ptr into stack of caller */
11297 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11298 BPF_MOV64_IMM(BPF_REG_5, 1),
11299 /* don't read back pkt_ptr from stack here */
11300 /* write 4 bytes into packet */
11301 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11302 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11303 BPF_EXIT_INSN(),
11304 },
11305 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11306 .errstr = "R4 invalid mem access",
11307 .result = REJECT,
11308 },
11309 {
11310 "calls: pkt_ptr spill into caller stack 7",
11311 .insns = {
11312 BPF_MOV64_IMM(BPF_REG_2, 0),
11313 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11314 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11315 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11316 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11317 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11318 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11319 BPF_EXIT_INSN(),
11320
11321 /* subprog 1 */
11322 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11323 offsetof(struct __sk_buff, data)),
11324 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11325 offsetof(struct __sk_buff, data_end)),
11326 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11327 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11328 BPF_MOV64_IMM(BPF_REG_5, 0),
11329 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11330 /* spill checked pkt_ptr into stack of caller */
11331 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11332 BPF_MOV64_IMM(BPF_REG_5, 1),
11333 /* don't read back pkt_ptr from stack here */
11334 /* write 4 bytes into packet */
11335 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11336 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11337 BPF_EXIT_INSN(),
11338 },
11339 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11340 .errstr = "R4 invalid mem access",
11341 .result = REJECT,
11342 },
11343 {
11344 "calls: pkt_ptr spill into caller stack 8",
11345 .insns = {
11346 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11347 offsetof(struct __sk_buff, data)),
11348 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11349 offsetof(struct __sk_buff, data_end)),
11350 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11351 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11352 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
11353 BPF_EXIT_INSN(),
11354 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11356 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11357 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11358 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11359 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11360 BPF_EXIT_INSN(),
11361
11362 /* subprog 1 */
11363 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11364 offsetof(struct __sk_buff, data)),
11365 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11366 offsetof(struct __sk_buff, data_end)),
11367 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11368 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11369 BPF_MOV64_IMM(BPF_REG_5, 0),
11370 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11371 /* spill checked pkt_ptr into stack of caller */
11372 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11373 BPF_MOV64_IMM(BPF_REG_5, 1),
11374 /* don't read back pkt_ptr from stack here */
11375 /* write 4 bytes into packet */
11376 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11377 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11378 BPF_EXIT_INSN(),
11379 },
11380 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11381 .result = ACCEPT,
11382 },
11383 {
11384 "calls: pkt_ptr spill into caller stack 9",
11385 .insns = {
11386 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11387 offsetof(struct __sk_buff, data)),
11388 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11389 offsetof(struct __sk_buff, data_end)),
11390 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11391 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11392 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
11393 BPF_EXIT_INSN(),
11394 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11395 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11396 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11397 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11398 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11399 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11400 BPF_EXIT_INSN(),
11401
11402 /* subprog 1 */
11403 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11404 offsetof(struct __sk_buff, data)),
11405 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11406 offsetof(struct __sk_buff, data_end)),
11407 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11408 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11409 BPF_MOV64_IMM(BPF_REG_5, 0),
11410 /* spill unchecked pkt_ptr into stack of caller */
11411 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11412 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11413 BPF_MOV64_IMM(BPF_REG_5, 1),
11414 /* don't read back pkt_ptr from stack here */
11415 /* write 4 bytes into packet */
11416 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11417 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11418 BPF_EXIT_INSN(),
11419 },
11420 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11421 .errstr = "invalid access to packet",
11422 .result = REJECT,
11423 },
11424 {
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080011425 "calls: caller stack init to zero or map_value_or_null",
11426 .insns = {
11427 BPF_MOV64_IMM(BPF_REG_0, 0),
11428 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
11429 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11430 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11431 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11432 /* fetch map_value_or_null or const_zero from stack */
11433 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11434 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11435 /* store into map_value */
11436 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
11437 BPF_EXIT_INSN(),
11438
11439 /* subprog 1 */
11440 /* if (ctx == 0) return; */
11441 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
11442 /* else bpf_map_lookup() and *(fp - 8) = r0 */
11443 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
11444 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11445 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11446 BPF_LD_MAP_FD(BPF_REG_1, 0),
11447 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11448 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11449 BPF_FUNC_map_lookup_elem),
11450 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11451 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11452 BPF_EXIT_INSN(),
11453 },
11454 .fixup_map1 = { 13 },
11455 .result = ACCEPT,
11456 .prog_type = BPF_PROG_TYPE_XDP,
11457 },
11458 {
11459 "calls: stack init to zero and pruning",
11460 .insns = {
11461 /* first make allocated_stack 16 byte */
11462 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
11463 /* now fork the execution such that the false branch
11464 * of JGT insn will be verified second and it skisp zero
11465 * init of fp-8 stack slot. If stack liveness marking
11466 * is missing live_read marks from call map_lookup
11467 * processing then pruning will incorrectly assume
11468 * that fp-8 stack slot was unused in the fall-through
11469 * branch and will accept the program incorrectly
11470 */
11471 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
11472 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11473 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
11474 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11475 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11476 BPF_LD_MAP_FD(BPF_REG_1, 0),
11477 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11478 BPF_FUNC_map_lookup_elem),
11479 BPF_EXIT_INSN(),
11480 },
11481 .fixup_map2 = { 6 },
11482 .errstr = "invalid indirect read from stack off -8+0 size 8",
11483 .result = REJECT,
11484 .prog_type = BPF_PROG_TYPE_XDP,
11485 },
Gianluca Borellofd05e572017-12-23 10:09:55 +000011486 {
11487 "search pruning: all branches should be verified (nop operation)",
11488 .insns = {
11489 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11490 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11491 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
11492 BPF_LD_MAP_FD(BPF_REG_1, 0),
11493 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
11494 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
11495 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
11496 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
11497 BPF_MOV64_IMM(BPF_REG_4, 0),
11498 BPF_JMP_A(1),
11499 BPF_MOV64_IMM(BPF_REG_4, 1),
11500 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
11501 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
11502 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
11503 BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
11504 BPF_MOV64_IMM(BPF_REG_6, 0),
11505 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
11506 BPF_EXIT_INSN(),
11507 },
11508 .fixup_map1 = { 3 },
11509 .errstr = "R6 invalid mem access 'inv'",
11510 .result = REJECT,
11511 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11512 },
11513 {
11514 "search pruning: all branches should be verified (invalid stack access)",
11515 .insns = {
11516 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11517 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11518 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
11519 BPF_LD_MAP_FD(BPF_REG_1, 0),
11520 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
11521 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
11522 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
11523 BPF_MOV64_IMM(BPF_REG_4, 0),
11524 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
11525 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
11526 BPF_JMP_A(1),
11527 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
11528 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
11529 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
11530 BPF_EXIT_INSN(),
11531 },
11532 .fixup_map1 = { 3 },
11533 .errstr = "invalid read from stack off -16+0 size 8",
11534 .result = REJECT,
11535 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11536 },
Daniel Borkmann23d191a2018-02-24 01:08:03 +010011537 {
11538 "jit: lsh, rsh, arsh by 1",
11539 .insns = {
11540 BPF_MOV64_IMM(BPF_REG_0, 1),
11541 BPF_MOV64_IMM(BPF_REG_1, 0xff),
11542 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
11543 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
11544 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
11545 BPF_EXIT_INSN(),
11546 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
11547 BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
11548 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
11549 BPF_EXIT_INSN(),
11550 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
11551 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
11552 BPF_EXIT_INSN(),
11553 BPF_MOV64_IMM(BPF_REG_0, 2),
11554 BPF_EXIT_INSN(),
11555 },
11556 .result = ACCEPT,
11557 .retval = 2,
11558 },
11559 {
11560 "jit: mov32 for ldimm64, 1",
11561 .insns = {
11562 BPF_MOV64_IMM(BPF_REG_0, 2),
11563 BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
11564 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
11565 BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
11566 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
11567 BPF_MOV64_IMM(BPF_REG_0, 1),
11568 BPF_EXIT_INSN(),
11569 },
11570 .result = ACCEPT,
11571 .retval = 2,
11572 },
11573 {
11574 "jit: mov32 for ldimm64, 2",
11575 .insns = {
11576 BPF_MOV64_IMM(BPF_REG_0, 1),
11577 BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
11578 BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
11579 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
11580 BPF_MOV64_IMM(BPF_REG_0, 2),
11581 BPF_EXIT_INSN(),
11582 },
11583 .result = ACCEPT,
11584 .retval = 2,
11585 },
11586 {
11587 "jit: various mul tests",
11588 .insns = {
11589 BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
11590 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
11591 BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
11592 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
11593 BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
11594 BPF_MOV64_IMM(BPF_REG_0, 1),
11595 BPF_EXIT_INSN(),
11596 BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
11597 BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
11598 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
11599 BPF_MOV64_IMM(BPF_REG_0, 1),
11600 BPF_EXIT_INSN(),
11601 BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
11602 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
11603 BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
11604 BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
11605 BPF_MOV64_IMM(BPF_REG_0, 1),
11606 BPF_EXIT_INSN(),
11607 BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
11608 BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
11609 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
11610 BPF_MOV64_IMM(BPF_REG_0, 1),
11611 BPF_EXIT_INSN(),
11612 BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
11613 BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
11614 BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
11615 BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
11616 BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
11617 BPF_MOV64_IMM(BPF_REG_0, 1),
11618 BPF_EXIT_INSN(),
11619 BPF_MOV64_IMM(BPF_REG_0, 2),
11620 BPF_EXIT_INSN(),
11621 },
11622 .result = ACCEPT,
11623 .retval = 2,
11624 },
David S. Miller0f3e9c92018-03-06 00:53:44 -050011625 {
Daniel Borkmannca369602018-02-23 22:29:05 +010011626 "xadd/w check unaligned stack",
11627 .insns = {
11628 BPF_MOV64_IMM(BPF_REG_0, 1),
11629 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
11630 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
11631 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11632 BPF_EXIT_INSN(),
11633 },
11634 .result = REJECT,
11635 .errstr = "misaligned stack access off",
11636 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11637 },
11638 {
11639 "xadd/w check unaligned map",
11640 .insns = {
11641 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11642 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11643 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11644 BPF_LD_MAP_FD(BPF_REG_1, 0),
11645 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11646 BPF_FUNC_map_lookup_elem),
11647 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
11648 BPF_EXIT_INSN(),
11649 BPF_MOV64_IMM(BPF_REG_1, 1),
11650 BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
11651 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
11652 BPF_EXIT_INSN(),
11653 },
11654 .fixup_map1 = { 3 },
11655 .result = REJECT,
11656 .errstr = "misaligned value access off",
11657 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11658 },
11659 {
11660 "xadd/w check unaligned pkt",
11661 .insns = {
11662 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11663 offsetof(struct xdp_md, data)),
11664 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11665 offsetof(struct xdp_md, data_end)),
11666 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11667 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11668 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
11669 BPF_MOV64_IMM(BPF_REG_0, 99),
11670 BPF_JMP_IMM(BPF_JA, 0, 0, 6),
11671 BPF_MOV64_IMM(BPF_REG_0, 1),
11672 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11673 BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
11674 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
11675 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
11676 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
11677 BPF_EXIT_INSN(),
11678 },
11679 .result = REJECT,
11680 .errstr = "BPF_XADD stores into R2 packet",
11681 .prog_type = BPF_PROG_TYPE_XDP,
11682 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011683};
11684
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011685static int probe_filter_length(const struct bpf_insn *fp)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011686{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011687 int len;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011688
11689 for (len = MAX_INSNS - 1; len > 0; --len)
11690 if (fp[len].code != 0 || fp[len].imm != 0)
11691 break;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011692 return len + 1;
11693}
11694
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011695static int create_map(uint32_t size_value, uint32_t max_elem)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011696{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011697 int fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011698
Mickaël Salaünf4874d02017-02-10 00:21:43 +010011699 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011700 size_value, max_elem, BPF_F_NO_PREALLOC);
11701 if (fd < 0)
11702 printf("Failed to create hash map '%s'!\n", strerror(errno));
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011703
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011704 return fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011705}
11706
Daniel Borkmannb33eb732018-02-26 22:34:33 +010011707static int create_prog_dummy1(void)
11708{
11709 struct bpf_insn prog[] = {
11710 BPF_MOV64_IMM(BPF_REG_0, 42),
11711 BPF_EXIT_INSN(),
11712 };
11713
11714 return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog,
11715 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
11716}
11717
11718static int create_prog_dummy2(int mfd, int idx)
11719{
11720 struct bpf_insn prog[] = {
11721 BPF_MOV64_IMM(BPF_REG_3, idx),
11722 BPF_LD_MAP_FD(BPF_REG_2, mfd),
11723 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11724 BPF_FUNC_tail_call),
11725 BPF_MOV64_IMM(BPF_REG_0, 41),
11726 BPF_EXIT_INSN(),
11727 };
11728
11729 return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog,
11730 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
11731}
11732
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011733static int create_prog_array(void)
11734{
Daniel Borkmannb33eb732018-02-26 22:34:33 +010011735 int p1key = 0, p2key = 1;
11736 int mfd, p1fd, p2fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011737
Daniel Borkmannb33eb732018-02-26 22:34:33 +010011738 mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
11739 sizeof(int), 4, 0);
11740 if (mfd < 0) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011741 printf("Failed to create prog array '%s'!\n", strerror(errno));
Daniel Borkmannb33eb732018-02-26 22:34:33 +010011742 return -1;
11743 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011744
Daniel Borkmannb33eb732018-02-26 22:34:33 +010011745 p1fd = create_prog_dummy1();
11746 p2fd = create_prog_dummy2(mfd, p2key);
11747 if (p1fd < 0 || p2fd < 0)
11748 goto out;
11749 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
11750 goto out;
11751 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
11752 goto out;
11753 close(p2fd);
11754 close(p1fd);
11755
11756 return mfd;
11757out:
11758 close(p2fd);
11759 close(p1fd);
11760 close(mfd);
11761 return -1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011762}
11763
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011764static int create_map_in_map(void)
11765{
11766 int inner_map_fd, outer_map_fd;
11767
11768 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
11769 sizeof(int), 1, 0);
11770 if (inner_map_fd < 0) {
11771 printf("Failed to create array '%s'!\n", strerror(errno));
11772 return inner_map_fd;
11773 }
11774
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070011775 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011776 sizeof(int), inner_map_fd, 1, 0);
11777 if (outer_map_fd < 0)
11778 printf("Failed to create array of maps '%s'!\n",
11779 strerror(errno));
11780
11781 close(inner_map_fd);
11782
11783 return outer_map_fd;
11784}
11785
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011786static char bpf_vlog[32768];
11787
11788static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011789 int *map_fds)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011790{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011791 int *fixup_map1 = test->fixup_map1;
11792 int *fixup_map2 = test->fixup_map2;
Paul Chaignon5f90dd62018-04-24 15:08:19 +020011793 int *fixup_map3 = test->fixup_map3;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011794 int *fixup_prog = test->fixup_prog;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011795 int *fixup_map_in_map = test->fixup_map_in_map;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011796
11797 /* Allocating HTs with 1 elem is fine here, since we only test
11798 * for verifier and not do a runtime lookup, so the only thing
11799 * that really matters is value size in this case.
11800 */
11801 if (*fixup_map1) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011802 map_fds[0] = create_map(sizeof(long long), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011803 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011804 prog[*fixup_map1].imm = map_fds[0];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011805 fixup_map1++;
11806 } while (*fixup_map1);
11807 }
11808
11809 if (*fixup_map2) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011810 map_fds[1] = create_map(sizeof(struct test_val), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011811 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011812 prog[*fixup_map2].imm = map_fds[1];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011813 fixup_map2++;
11814 } while (*fixup_map2);
11815 }
11816
Paul Chaignon5f90dd62018-04-24 15:08:19 +020011817 if (*fixup_map3) {
11818 map_fds[1] = create_map(sizeof(struct other_val), 1);
11819 do {
11820 prog[*fixup_map3].imm = map_fds[1];
11821 fixup_map3++;
11822 } while (*fixup_map3);
11823 }
11824
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011825 if (*fixup_prog) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011826 map_fds[2] = create_prog_array();
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011827 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011828 prog[*fixup_prog].imm = map_fds[2];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011829 fixup_prog++;
11830 } while (*fixup_prog);
11831 }
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011832
11833 if (*fixup_map_in_map) {
11834 map_fds[3] = create_map_in_map();
11835 do {
11836 prog[*fixup_map_in_map].imm = map_fds[3];
11837 fixup_map_in_map++;
11838 } while (*fixup_map_in_map);
11839 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011840}
11841
11842static void do_test_single(struct bpf_test *test, bool unpriv,
11843 int *passes, int *errors)
11844{
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011845 int fd_prog, expected_ret, reject_from_alignment;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011846 struct bpf_insn *prog = test->insns;
11847 int prog_len = probe_filter_length(prog);
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011848 char data_in[TEST_DATA_LEN] = {};
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011849 int prog_type = test->prog_type;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011850 int map_fds[MAX_NR_MAPS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011851 const char *expected_err;
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011852 uint32_t retval;
11853 int i, err;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011854
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011855 for (i = 0; i < MAX_NR_MAPS; i++)
11856 map_fds[i] = -1;
11857
11858 do_test_fixup(test, prog, map_fds);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011859
Daniel Borkmann614d0d72017-05-25 01:05:09 +020011860 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
11861 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmannd6554902017-07-21 00:00:22 +020011862 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011863
11864 expected_ret = unpriv && test->result_unpriv != UNDEF ?
11865 test->result_unpriv : test->result;
11866 expected_err = unpriv && test->errstr_unpriv ?
11867 test->errstr_unpriv : test->errstr;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011868
11869 reject_from_alignment = fd_prog < 0 &&
11870 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
11871 strstr(bpf_vlog, "Unknown alignment.");
11872#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
11873 if (reject_from_alignment) {
11874 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
11875 strerror(errno));
11876 goto fail_log;
11877 }
11878#endif
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011879 if (expected_ret == ACCEPT) {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011880 if (fd_prog < 0 && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011881 printf("FAIL\nFailed to load prog '%s'!\n",
11882 strerror(errno));
11883 goto fail_log;
11884 }
11885 } else {
11886 if (fd_prog >= 0) {
11887 printf("FAIL\nUnexpected success to load!\n");
11888 goto fail_log;
11889 }
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011890 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
Joe Stringer95f87a92018-02-14 13:50:34 -080011891 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
11892 expected_err, bpf_vlog);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011893 goto fail_log;
11894 }
11895 }
11896
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011897 if (fd_prog >= 0) {
11898 err = bpf_prog_test_run(fd_prog, 1, data_in, sizeof(data_in),
11899 NULL, NULL, &retval, NULL);
11900 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
11901 printf("Unexpected bpf_prog_test_run error\n");
11902 goto fail_log;
11903 }
11904 if (!err && retval != test->retval &&
11905 test->retval != POINTER_VALUE) {
11906 printf("FAIL retval %d != %d\n", retval, test->retval);
11907 goto fail_log;
11908 }
11909 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011910 (*passes)++;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011911 printf("OK%s\n", reject_from_alignment ?
11912 " (NOTE: reject due to unknown alignment)" : "");
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011913close_fds:
11914 close(fd_prog);
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011915 for (i = 0; i < MAX_NR_MAPS; i++)
11916 close(map_fds[i]);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011917 sched_yield();
11918 return;
11919fail_log:
11920 (*errors)++;
11921 printf("%s", bpf_vlog);
11922 goto close_fds;
11923}
11924
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011925static bool is_admin(void)
11926{
11927 cap_t caps;
11928 cap_flag_value_t sysadmin = CAP_CLEAR;
11929 const cap_value_t cap_val = CAP_SYS_ADMIN;
11930
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080011931#ifdef CAP_IS_SUPPORTED
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011932 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
11933 perror("cap_get_flag");
11934 return false;
11935 }
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080011936#endif
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011937 caps = cap_get_proc();
11938 if (!caps) {
11939 perror("cap_get_proc");
11940 return false;
11941 }
11942 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
11943 perror("cap_get_flag");
11944 if (cap_free(caps))
11945 perror("cap_free");
11946 return (sysadmin == CAP_SET);
11947}
11948
11949static int set_admin(bool admin)
11950{
11951 cap_t caps;
11952 const cap_value_t cap_val = CAP_SYS_ADMIN;
11953 int ret = -1;
11954
11955 caps = cap_get_proc();
11956 if (!caps) {
11957 perror("cap_get_proc");
11958 return -1;
11959 }
11960 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
11961 admin ? CAP_SET : CAP_CLEAR)) {
11962 perror("cap_set_flag");
11963 goto out;
11964 }
11965 if (cap_set_proc(caps)) {
11966 perror("cap_set_proc");
11967 goto out;
11968 }
11969 ret = 0;
11970out:
11971 if (cap_free(caps))
11972 perror("cap_free");
11973 return ret;
11974}
11975
Joe Stringer0a6748742018-02-14 13:50:36 -080011976static void get_unpriv_disabled()
11977{
11978 char buf[2];
11979 FILE *fd;
11980
11981 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
11982 if (fgets(buf, 2, fd) == buf && atoi(buf))
11983 unpriv_disabled = true;
11984 fclose(fd);
11985}
11986
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011987static int do_test(bool unpriv, unsigned int from, unsigned int to)
11988{
Joe Stringerd0a0e492018-02-14 13:50:35 -080011989 int i, passes = 0, errors = 0, skips = 0;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011990
11991 for (i = from; i < to; i++) {
11992 struct bpf_test *test = &tests[i];
11993
11994 /* Program types that are not supported by non-root we
11995 * skip right away.
11996 */
Joe Stringer0a6748742018-02-14 13:50:36 -080011997 if (!test->prog_type && unpriv_disabled) {
11998 printf("#%d/u %s SKIP\n", i, test->descr);
11999 skips++;
12000 } else if (!test->prog_type) {
Mickaël Salaünd02d8982017-02-10 00:21:37 +010012001 if (!unpriv)
12002 set_admin(false);
12003 printf("#%d/u %s ", i, test->descr);
12004 do_test_single(test, true, &passes, &errors);
12005 if (!unpriv)
12006 set_admin(true);
12007 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020012008
Joe Stringerd0a0e492018-02-14 13:50:35 -080012009 if (unpriv) {
12010 printf("#%d/p %s SKIP\n", i, test->descr);
12011 skips++;
12012 } else {
Mickaël Salaünd02d8982017-02-10 00:21:37 +010012013 printf("#%d/p %s ", i, test->descr);
12014 do_test_single(test, false, &passes, &errors);
12015 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020012016 }
12017
Joe Stringerd0a0e492018-02-14 13:50:35 -080012018 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
12019 skips, errors);
Jesper Dangaard Brouerefe5f9c2017-06-13 15:17:19 +020012020 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020012021}
12022
12023int main(int argc, char **argv)
12024{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020012025 unsigned int from = 0, to = ARRAY_SIZE(tests);
Mickaël Salaünd02d8982017-02-10 00:21:37 +010012026 bool unpriv = !is_admin();
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070012027
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020012028 if (argc == 3) {
12029 unsigned int l = atoi(argv[argc - 2]);
12030 unsigned int u = atoi(argv[argc - 1]);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070012031
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020012032 if (l < to && u < to) {
12033 from = l;
12034 to = u + 1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070012035 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020012036 } else if (argc == 2) {
12037 unsigned int t = atoi(argv[argc - 1]);
Alexei Starovoitovbf508872015-10-07 22:23:23 -070012038
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020012039 if (t < to) {
12040 from = t;
12041 to = t + 1;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070012042 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070012043 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070012044
Joe Stringer0a6748742018-02-14 13:50:36 -080012045 get_unpriv_disabled();
12046 if (unpriv && unpriv_disabled) {
12047 printf("Cannot run as unprivileged user with sysctl %s.\n",
12048 UNPRIV_SYSCTL);
12049 return EXIT_FAILURE;
12050 }
12051
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020012052 return do_test(unpriv, from, to);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070012053}