blob: 697bd83de295524d3f1c1f423a7ba503c9e2fcd4 [file] [log] [blame]
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08005 * Copyright (c) 2017 Facebook
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
10 */
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011
Daniel Borkmann2c460622017-08-04 22:24:41 +020012#include <endian.h>
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080013#include <asm/types.h>
14#include <linux/types.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010015#include <stdint.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070016#include <stdio.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010017#include <stdlib.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070018#include <unistd.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070019#include <errno.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070020#include <string.h>
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -070021#include <stddef.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070022#include <stdbool.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020023#include <sched.h>
Daniel Borkmann21ccaf22018-01-26 23:33:48 +010024#include <limits.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020025
Mickaël Salaünd02d8982017-02-10 00:21:37 +010026#include <sys/capability.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070027#include <sys/resource.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070028
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020029#include <linux/unistd.h>
30#include <linux/filter.h>
31#include <linux/bpf_perf_event.h>
32#include <linux/bpf.h>
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080033#include <linux/if_ether.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070034
Mickaël Salaün2ee89fb2017-02-10 00:21:38 +010035#include <bpf/bpf.h>
36
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020037#ifdef HAVE_GENHDR
38# include "autoconf.h"
39#else
40# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42# endif
43#endif
44
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020045#include "../../../include/linux/filter.h"
46
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020047#ifndef ARRAY_SIZE
48# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
49#endif
50
51#define MAX_INSNS 512
52#define MAX_FIXUPS 8
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070053#define MAX_NR_MAPS 4
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080054#define POINTER_VALUE 0xcafe4all
55#define TEST_DATA_LEN 64
Alexei Starovoitovbf508872015-10-07 22:23:23 -070056
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020057#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
Daniel Borkmann614d0d72017-05-25 01:05:09 +020058#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020059
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070060struct bpf_test {
61 const char *descr;
62 struct bpf_insn insns[MAX_INSNS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020063 int fixup_map1[MAX_FIXUPS];
64 int fixup_map2[MAX_FIXUPS];
65 int fixup_prog[MAX_FIXUPS];
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070066 int fixup_map_in_map[MAX_FIXUPS];
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070067 const char *errstr;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070068 const char *errstr_unpriv;
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080069 uint32_t retval;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070070 enum {
Alexei Starovoitovbf508872015-10-07 22:23:23 -070071 UNDEF,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070072 ACCEPT,
73 REJECT
Alexei Starovoitovbf508872015-10-07 22:23:23 -070074 } result, result_unpriv;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070075 enum bpf_prog_type prog_type;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020076 uint8_t flags;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070077};
78
Josef Bacik48461132016-09-28 10:54:32 -040079/* Note we want this to be 64 bit aligned so that the end of our array is
80 * actually the end of the structure.
81 */
82#define MAX_ENTRIES 11
Josef Bacik48461132016-09-28 10:54:32 -040083
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020084struct test_val {
85 unsigned int index;
86 int foo[MAX_ENTRIES];
Josef Bacik48461132016-09-28 10:54:32 -040087};
88
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070089static struct bpf_test tests[] = {
90 {
91 "add+sub+mul",
92 .insns = {
93 BPF_MOV64_IMM(BPF_REG_1, 1),
94 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
95 BPF_MOV64_IMM(BPF_REG_2, 3),
96 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
97 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
98 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
99 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
100 BPF_EXIT_INSN(),
101 },
102 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800103 .retval = -3,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700104 },
105 {
Daniel Borkmann87c17932018-01-20 01:24:32 +0100106 "DIV32 by 0, zero check 1",
107 .insns = {
108 BPF_MOV32_IMM(BPF_REG_0, 42),
109 BPF_MOV32_IMM(BPF_REG_1, 0),
110 BPF_MOV32_IMM(BPF_REG_2, 1),
111 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
112 BPF_EXIT_INSN(),
113 },
114 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100115 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100116 },
117 {
118 "DIV32 by 0, zero check 2",
119 .insns = {
120 BPF_MOV32_IMM(BPF_REG_0, 42),
121 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
122 BPF_MOV32_IMM(BPF_REG_2, 1),
123 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
124 BPF_EXIT_INSN(),
125 },
126 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100127 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100128 },
129 {
130 "DIV64 by 0, zero check",
131 .insns = {
132 BPF_MOV32_IMM(BPF_REG_0, 42),
133 BPF_MOV32_IMM(BPF_REG_1, 0),
134 BPF_MOV32_IMM(BPF_REG_2, 1),
135 BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
136 BPF_EXIT_INSN(),
137 },
138 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100139 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100140 },
141 {
142 "MOD32 by 0, zero check 1",
143 .insns = {
144 BPF_MOV32_IMM(BPF_REG_0, 42),
145 BPF_MOV32_IMM(BPF_REG_1, 0),
146 BPF_MOV32_IMM(BPF_REG_2, 1),
147 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
148 BPF_EXIT_INSN(),
149 },
150 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100151 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100152 },
153 {
154 "MOD32 by 0, zero check 2",
155 .insns = {
156 BPF_MOV32_IMM(BPF_REG_0, 42),
157 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
158 BPF_MOV32_IMM(BPF_REG_2, 1),
159 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
160 BPF_EXIT_INSN(),
161 },
162 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100163 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100164 },
165 {
166 "MOD64 by 0, zero check",
167 .insns = {
168 BPF_MOV32_IMM(BPF_REG_0, 42),
169 BPF_MOV32_IMM(BPF_REG_1, 0),
170 BPF_MOV32_IMM(BPF_REG_2, 1),
171 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
172 BPF_EXIT_INSN(),
173 },
174 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100175 .retval = 42,
176 },
177 {
178 "DIV32 by 0, zero check ok, cls",
179 .insns = {
180 BPF_MOV32_IMM(BPF_REG_0, 42),
181 BPF_MOV32_IMM(BPF_REG_1, 2),
182 BPF_MOV32_IMM(BPF_REG_2, 16),
183 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
184 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
185 BPF_EXIT_INSN(),
186 },
187 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
188 .result = ACCEPT,
189 .retval = 8,
190 },
191 {
192 "DIV32 by 0, zero check 1, cls",
193 .insns = {
194 BPF_MOV32_IMM(BPF_REG_1, 0),
195 BPF_MOV32_IMM(BPF_REG_0, 1),
196 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
197 BPF_EXIT_INSN(),
198 },
199 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
200 .result = ACCEPT,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100201 .retval = 0,
202 },
203 {
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100204 "DIV32 by 0, zero check 2, cls",
205 .insns = {
206 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
207 BPF_MOV32_IMM(BPF_REG_0, 1),
208 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
209 BPF_EXIT_INSN(),
210 },
211 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
212 .result = ACCEPT,
213 .retval = 0,
214 },
215 {
216 "DIV64 by 0, zero check, cls",
217 .insns = {
218 BPF_MOV32_IMM(BPF_REG_1, 0),
219 BPF_MOV32_IMM(BPF_REG_0, 1),
220 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
221 BPF_EXIT_INSN(),
222 },
223 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
224 .result = ACCEPT,
225 .retval = 0,
226 },
227 {
228 "MOD32 by 0, zero check ok, cls",
229 .insns = {
230 BPF_MOV32_IMM(BPF_REG_0, 42),
231 BPF_MOV32_IMM(BPF_REG_1, 3),
232 BPF_MOV32_IMM(BPF_REG_2, 5),
233 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
234 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
235 BPF_EXIT_INSN(),
236 },
237 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
238 .result = ACCEPT,
239 .retval = 2,
240 },
241 {
242 "MOD32 by 0, zero check 1, cls",
243 .insns = {
244 BPF_MOV32_IMM(BPF_REG_1, 0),
245 BPF_MOV32_IMM(BPF_REG_0, 1),
246 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
247 BPF_EXIT_INSN(),
248 },
249 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
250 .result = ACCEPT,
251 .retval = 1,
252 },
253 {
254 "MOD32 by 0, zero check 2, cls",
255 .insns = {
256 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
257 BPF_MOV32_IMM(BPF_REG_0, 1),
258 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
259 BPF_EXIT_INSN(),
260 },
261 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
262 .result = ACCEPT,
263 .retval = 1,
264 },
265 {
266 "MOD64 by 0, zero check 1, cls",
267 .insns = {
268 BPF_MOV32_IMM(BPF_REG_1, 0),
269 BPF_MOV32_IMM(BPF_REG_0, 2),
270 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
271 BPF_EXIT_INSN(),
272 },
273 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
274 .result = ACCEPT,
275 .retval = 2,
276 },
277 {
278 "MOD64 by 0, zero check 2, cls",
279 .insns = {
280 BPF_MOV32_IMM(BPF_REG_1, 0),
281 BPF_MOV32_IMM(BPF_REG_0, -1),
282 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
283 BPF_EXIT_INSN(),
284 },
285 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
286 .result = ACCEPT,
287 .retval = -1,
288 },
289 /* Just make sure that JITs used udiv/umod as otherwise we get
290 * an exception from INT_MIN/-1 overflow similarly as with div
291 * by zero.
292 */
293 {
294 "DIV32 overflow, check 1",
295 .insns = {
296 BPF_MOV32_IMM(BPF_REG_1, -1),
297 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
298 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
299 BPF_EXIT_INSN(),
300 },
301 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
302 .result = ACCEPT,
303 .retval = 0,
304 },
305 {
306 "DIV32 overflow, check 2",
307 .insns = {
308 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
309 BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
310 BPF_EXIT_INSN(),
311 },
312 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
313 .result = ACCEPT,
314 .retval = 0,
315 },
316 {
317 "DIV64 overflow, check 1",
318 .insns = {
319 BPF_MOV64_IMM(BPF_REG_1, -1),
320 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
321 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
322 BPF_EXIT_INSN(),
323 },
324 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
325 .result = ACCEPT,
326 .retval = 0,
327 },
328 {
329 "DIV64 overflow, check 2",
330 .insns = {
331 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
332 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
333 BPF_EXIT_INSN(),
334 },
335 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
336 .result = ACCEPT,
337 .retval = 0,
338 },
339 {
340 "MOD32 overflow, check 1",
341 .insns = {
342 BPF_MOV32_IMM(BPF_REG_1, -1),
343 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
344 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
345 BPF_EXIT_INSN(),
346 },
347 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
348 .result = ACCEPT,
349 .retval = INT_MIN,
350 },
351 {
352 "MOD32 overflow, check 2",
353 .insns = {
354 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
355 BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
356 BPF_EXIT_INSN(),
357 },
358 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
359 .result = ACCEPT,
360 .retval = INT_MIN,
361 },
362 {
363 "MOD64 overflow, check 1",
364 .insns = {
365 BPF_MOV64_IMM(BPF_REG_1, -1),
366 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
367 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
368 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
369 BPF_MOV32_IMM(BPF_REG_0, 0),
370 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
371 BPF_MOV32_IMM(BPF_REG_0, 1),
372 BPF_EXIT_INSN(),
373 },
374 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
375 .result = ACCEPT,
376 .retval = 1,
377 },
378 {
379 "MOD64 overflow, check 2",
380 .insns = {
381 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
382 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
383 BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
384 BPF_MOV32_IMM(BPF_REG_0, 0),
385 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
386 BPF_MOV32_IMM(BPF_REG_0, 1),
387 BPF_EXIT_INSN(),
388 },
389 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
390 .result = ACCEPT,
391 .retval = 1,
392 },
393 {
394 "xor32 zero extend check",
395 .insns = {
396 BPF_MOV32_IMM(BPF_REG_2, -1),
397 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
398 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
399 BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
400 BPF_MOV32_IMM(BPF_REG_0, 2),
401 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
402 BPF_MOV32_IMM(BPF_REG_0, 1),
403 BPF_EXIT_INSN(),
404 },
405 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
406 .result = ACCEPT,
407 .retval = 1,
408 },
409 {
Daniel Borkmann87c17932018-01-20 01:24:32 +0100410 "empty prog",
411 .insns = {
412 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100413 .errstr = "unknown opcode 00",
Daniel Borkmann87c17932018-01-20 01:24:32 +0100414 .result = REJECT,
415 },
416 {
417 "only exit insn",
418 .insns = {
419 BPF_EXIT_INSN(),
420 },
421 .errstr = "R0 !read_ok",
422 .result = REJECT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700423 },
424 {
425 "unreachable",
426 .insns = {
427 BPF_EXIT_INSN(),
428 BPF_EXIT_INSN(),
429 },
430 .errstr = "unreachable",
431 .result = REJECT,
432 },
433 {
434 "unreachable2",
435 .insns = {
436 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
437 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
438 BPF_EXIT_INSN(),
439 },
440 .errstr = "unreachable",
441 .result = REJECT,
442 },
443 {
444 "out of range jump",
445 .insns = {
446 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
447 BPF_EXIT_INSN(),
448 },
449 .errstr = "jump out of range",
450 .result = REJECT,
451 },
452 {
453 "out of range jump2",
454 .insns = {
455 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
456 BPF_EXIT_INSN(),
457 },
458 .errstr = "jump out of range",
459 .result = REJECT,
460 },
461 {
462 "test1 ld_imm64",
463 .insns = {
464 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
465 BPF_LD_IMM64(BPF_REG_0, 0),
466 BPF_LD_IMM64(BPF_REG_0, 0),
467 BPF_LD_IMM64(BPF_REG_0, 1),
468 BPF_LD_IMM64(BPF_REG_0, 1),
469 BPF_MOV64_IMM(BPF_REG_0, 2),
470 BPF_EXIT_INSN(),
471 },
472 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700473 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700474 .result = REJECT,
475 },
476 {
477 "test2 ld_imm64",
478 .insns = {
479 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
480 BPF_LD_IMM64(BPF_REG_0, 0),
481 BPF_LD_IMM64(BPF_REG_0, 0),
482 BPF_LD_IMM64(BPF_REG_0, 1),
483 BPF_LD_IMM64(BPF_REG_0, 1),
484 BPF_EXIT_INSN(),
485 },
486 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700487 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700488 .result = REJECT,
489 },
490 {
491 "test3 ld_imm64",
492 .insns = {
493 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
494 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
495 BPF_LD_IMM64(BPF_REG_0, 0),
496 BPF_LD_IMM64(BPF_REG_0, 0),
497 BPF_LD_IMM64(BPF_REG_0, 1),
498 BPF_LD_IMM64(BPF_REG_0, 1),
499 BPF_EXIT_INSN(),
500 },
501 .errstr = "invalid bpf_ld_imm64 insn",
502 .result = REJECT,
503 },
504 {
505 "test4 ld_imm64",
506 .insns = {
507 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
508 BPF_EXIT_INSN(),
509 },
510 .errstr = "invalid bpf_ld_imm64 insn",
511 .result = REJECT,
512 },
513 {
514 "test5 ld_imm64",
515 .insns = {
516 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
517 },
518 .errstr = "invalid bpf_ld_imm64 insn",
519 .result = REJECT,
520 },
521 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200522 "test6 ld_imm64",
523 .insns = {
524 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
525 BPF_RAW_INSN(0, 0, 0, 0, 0),
526 BPF_EXIT_INSN(),
527 },
528 .result = ACCEPT,
529 },
530 {
531 "test7 ld_imm64",
532 .insns = {
533 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
534 BPF_RAW_INSN(0, 0, 0, 0, 1),
535 BPF_EXIT_INSN(),
536 },
537 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800538 .retval = 1,
Daniel Borkmann728a8532017-04-27 01:39:32 +0200539 },
540 {
541 "test8 ld_imm64",
542 .insns = {
543 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
544 BPF_RAW_INSN(0, 0, 0, 0, 1),
545 BPF_EXIT_INSN(),
546 },
547 .errstr = "uses reserved fields",
548 .result = REJECT,
549 },
550 {
551 "test9 ld_imm64",
552 .insns = {
553 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
554 BPF_RAW_INSN(0, 0, 0, 1, 1),
555 BPF_EXIT_INSN(),
556 },
557 .errstr = "invalid bpf_ld_imm64 insn",
558 .result = REJECT,
559 },
560 {
561 "test10 ld_imm64",
562 .insns = {
563 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
564 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
565 BPF_EXIT_INSN(),
566 },
567 .errstr = "invalid bpf_ld_imm64 insn",
568 .result = REJECT,
569 },
570 {
571 "test11 ld_imm64",
572 .insns = {
573 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
574 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
575 BPF_EXIT_INSN(),
576 },
577 .errstr = "invalid bpf_ld_imm64 insn",
578 .result = REJECT,
579 },
580 {
581 "test12 ld_imm64",
582 .insns = {
583 BPF_MOV64_IMM(BPF_REG_1, 0),
584 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
585 BPF_RAW_INSN(0, 0, 0, 0, 1),
586 BPF_EXIT_INSN(),
587 },
588 .errstr = "not pointing to valid bpf_map",
589 .result = REJECT,
590 },
591 {
592 "test13 ld_imm64",
593 .insns = {
594 BPF_MOV64_IMM(BPF_REG_1, 0),
595 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
596 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
597 BPF_EXIT_INSN(),
598 },
599 .errstr = "invalid bpf_ld_imm64 insn",
600 .result = REJECT,
601 },
602 {
Daniel Borkmann7891a872018-01-10 20:04:37 +0100603 "arsh32 on imm",
604 .insns = {
605 BPF_MOV64_IMM(BPF_REG_0, 1),
606 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
607 BPF_EXIT_INSN(),
608 },
609 .result = REJECT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100610 .errstr = "unknown opcode c4",
Daniel Borkmann7891a872018-01-10 20:04:37 +0100611 },
612 {
613 "arsh32 on reg",
614 .insns = {
615 BPF_MOV64_IMM(BPF_REG_0, 1),
616 BPF_MOV64_IMM(BPF_REG_1, 5),
617 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
618 BPF_EXIT_INSN(),
619 },
620 .result = REJECT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100621 .errstr = "unknown opcode cc",
Daniel Borkmann7891a872018-01-10 20:04:37 +0100622 },
623 {
624 "arsh64 on imm",
625 .insns = {
626 BPF_MOV64_IMM(BPF_REG_0, 1),
627 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
628 BPF_EXIT_INSN(),
629 },
630 .result = ACCEPT,
631 },
632 {
633 "arsh64 on reg",
634 .insns = {
635 BPF_MOV64_IMM(BPF_REG_0, 1),
636 BPF_MOV64_IMM(BPF_REG_1, 5),
637 BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
638 BPF_EXIT_INSN(),
639 },
640 .result = ACCEPT,
641 },
642 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700643 "no bpf_exit",
644 .insns = {
645 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
646 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -0800647 .errstr = "not an exit",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700648 .result = REJECT,
649 },
650 {
651 "loop (back-edge)",
652 .insns = {
653 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
654 BPF_EXIT_INSN(),
655 },
656 .errstr = "back-edge",
657 .result = REJECT,
658 },
659 {
660 "loop2 (back-edge)",
661 .insns = {
662 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
663 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
664 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
665 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
666 BPF_EXIT_INSN(),
667 },
668 .errstr = "back-edge",
669 .result = REJECT,
670 },
671 {
672 "conditional loop",
673 .insns = {
674 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
675 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
676 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
677 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
678 BPF_EXIT_INSN(),
679 },
680 .errstr = "back-edge",
681 .result = REJECT,
682 },
683 {
684 "read uninitialized register",
685 .insns = {
686 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
687 BPF_EXIT_INSN(),
688 },
689 .errstr = "R2 !read_ok",
690 .result = REJECT,
691 },
692 {
693 "read invalid register",
694 .insns = {
695 BPF_MOV64_REG(BPF_REG_0, -1),
696 BPF_EXIT_INSN(),
697 },
698 .errstr = "R15 is invalid",
699 .result = REJECT,
700 },
701 {
702 "program doesn't init R0 before exit",
703 .insns = {
704 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
705 BPF_EXIT_INSN(),
706 },
707 .errstr = "R0 !read_ok",
708 .result = REJECT,
709 },
710 {
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700711 "program doesn't init R0 before exit in all branches",
712 .insns = {
713 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
714 BPF_MOV64_IMM(BPF_REG_0, 1),
715 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
716 BPF_EXIT_INSN(),
717 },
718 .errstr = "R0 !read_ok",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700719 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700720 .result = REJECT,
721 },
722 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700723 "stack out of bounds",
724 .insns = {
725 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
726 BPF_EXIT_INSN(),
727 },
728 .errstr = "invalid stack",
729 .result = REJECT,
730 },
731 {
732 "invalid call insn1",
733 .insns = {
734 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
735 BPF_EXIT_INSN(),
736 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100737 .errstr = "unknown opcode 8d",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700738 .result = REJECT,
739 },
740 {
741 "invalid call insn2",
742 .insns = {
743 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
744 BPF_EXIT_INSN(),
745 },
746 .errstr = "BPF_CALL uses reserved",
747 .result = REJECT,
748 },
749 {
750 "invalid function call",
751 .insns = {
752 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
753 BPF_EXIT_INSN(),
754 },
Daniel Borkmanne00c7b22016-11-26 01:28:09 +0100755 .errstr = "invalid func unknown#1234567",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700756 .result = REJECT,
757 },
758 {
759 "uninitialized stack1",
760 .insns = {
761 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
762 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
763 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200764 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
765 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700766 BPF_EXIT_INSN(),
767 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200768 .fixup_map1 = { 2 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700769 .errstr = "invalid indirect read from stack",
770 .result = REJECT,
771 },
772 {
773 "uninitialized stack2",
774 .insns = {
775 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
776 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
777 BPF_EXIT_INSN(),
778 },
779 .errstr = "invalid read from stack",
780 .result = REJECT,
781 },
782 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200783 "invalid fp arithmetic",
784 /* If this gets ever changed, make sure JITs can deal with it. */
785 .insns = {
786 BPF_MOV64_IMM(BPF_REG_0, 0),
787 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
788 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
789 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
790 BPF_EXIT_INSN(),
791 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -0800792 .errstr = "R1 subtraction from stack pointer",
Daniel Borkmann728a8532017-04-27 01:39:32 +0200793 .result = REJECT,
794 },
795 {
796 "non-invalid fp arithmetic",
797 .insns = {
798 BPF_MOV64_IMM(BPF_REG_0, 0),
799 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
800 BPF_EXIT_INSN(),
801 },
802 .result = ACCEPT,
803 },
804 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200805 "invalid argument register",
806 .insns = {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200807 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
808 BPF_FUNC_get_cgroup_classid),
809 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
810 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200811 BPF_EXIT_INSN(),
812 },
813 .errstr = "R1 !read_ok",
814 .result = REJECT,
815 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
816 },
817 {
818 "non-invalid argument register",
819 .insns = {
820 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200821 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
822 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200823 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200824 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
825 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200826 BPF_EXIT_INSN(),
827 },
828 .result = ACCEPT,
829 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
830 },
831 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700832 "check valid spill/fill",
833 .insns = {
834 /* spill R1(ctx) into stack */
835 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700836 /* fill it back into R2 */
837 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700838 /* should be able to access R0 = *(R2 + 8) */
Daniel Borkmannf91fe172015-03-01 12:31:41 +0100839 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
840 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700841 BPF_EXIT_INSN(),
842 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700843 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700844 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700845 .result_unpriv = REJECT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800846 .retval = POINTER_VALUE,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700847 },
848 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +0200849 "check valid spill/fill, skb mark",
850 .insns = {
851 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
852 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
853 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
854 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
855 offsetof(struct __sk_buff, mark)),
856 BPF_EXIT_INSN(),
857 },
858 .result = ACCEPT,
859 .result_unpriv = ACCEPT,
860 },
861 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700862 "check corrupted spill/fill",
863 .insns = {
864 /* spill R1(ctx) into stack */
865 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700866 /* mess up with R1 pointer on stack */
867 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700868 /* fill back into R0 should fail */
869 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700870 BPF_EXIT_INSN(),
871 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700872 .errstr_unpriv = "attempt to corrupt spilled",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700873 .errstr = "corrupted spill",
874 .result = REJECT,
875 },
876 {
877 "invalid src register in STX",
878 .insns = {
879 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
880 BPF_EXIT_INSN(),
881 },
882 .errstr = "R15 is invalid",
883 .result = REJECT,
884 },
885 {
886 "invalid dst register in STX",
887 .insns = {
888 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
889 BPF_EXIT_INSN(),
890 },
891 .errstr = "R14 is invalid",
892 .result = REJECT,
893 },
894 {
895 "invalid dst register in ST",
896 .insns = {
897 BPF_ST_MEM(BPF_B, 14, -1, -1),
898 BPF_EXIT_INSN(),
899 },
900 .errstr = "R14 is invalid",
901 .result = REJECT,
902 },
903 {
904 "invalid src register in LDX",
905 .insns = {
906 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
907 BPF_EXIT_INSN(),
908 },
909 .errstr = "R12 is invalid",
910 .result = REJECT,
911 },
912 {
913 "invalid dst register in LDX",
914 .insns = {
915 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
916 BPF_EXIT_INSN(),
917 },
918 .errstr = "R11 is invalid",
919 .result = REJECT,
920 },
921 {
922 "junk insn",
923 .insns = {
924 BPF_RAW_INSN(0, 0, 0, 0, 0),
925 BPF_EXIT_INSN(),
926 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100927 .errstr = "unknown opcode 00",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700928 .result = REJECT,
929 },
930 {
931 "junk insn2",
932 .insns = {
933 BPF_RAW_INSN(1, 0, 0, 0, 0),
934 BPF_EXIT_INSN(),
935 },
936 .errstr = "BPF_LDX uses reserved fields",
937 .result = REJECT,
938 },
939 {
940 "junk insn3",
941 .insns = {
942 BPF_RAW_INSN(-1, 0, 0, 0, 0),
943 BPF_EXIT_INSN(),
944 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100945 .errstr = "unknown opcode ff",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700946 .result = REJECT,
947 },
948 {
949 "junk insn4",
950 .insns = {
951 BPF_RAW_INSN(-1, -1, -1, -1, -1),
952 BPF_EXIT_INSN(),
953 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100954 .errstr = "unknown opcode ff",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700955 .result = REJECT,
956 },
957 {
958 "junk insn5",
959 .insns = {
960 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
961 BPF_EXIT_INSN(),
962 },
963 .errstr = "BPF_ALU uses reserved fields",
964 .result = REJECT,
965 },
966 {
967 "misaligned read from stack",
968 .insns = {
969 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
970 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
971 BPF_EXIT_INSN(),
972 },
Edward Creef65b1842017-08-07 15:27:12 +0100973 .errstr = "misaligned stack access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700974 .result = REJECT,
975 },
976 {
977 "invalid map_fd for function call",
978 .insns = {
979 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
980 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
981 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
982 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200983 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
984 BPF_FUNC_map_delete_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700985 BPF_EXIT_INSN(),
986 },
987 .errstr = "fd 0 is not pointing to valid bpf_map",
988 .result = REJECT,
989 },
990 {
991 "don't check return value before access",
992 .insns = {
993 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
994 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
995 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
996 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200997 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
998 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700999 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1000 BPF_EXIT_INSN(),
1001 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001002 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001003 .errstr = "R0 invalid mem access 'map_value_or_null'",
1004 .result = REJECT,
1005 },
1006 {
1007 "access memory with incorrect alignment",
1008 .insns = {
1009 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1010 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1012 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001013 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1014 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001015 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1016 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1017 BPF_EXIT_INSN(),
1018 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001019 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01001020 .errstr = "misaligned value access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001021 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001022 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001023 },
1024 {
1025 "sometimes access memory with incorrect alignment",
1026 .insns = {
1027 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1028 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1029 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1030 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001031 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1032 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001033 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1034 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1035 BPF_EXIT_INSN(),
1036 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1037 BPF_EXIT_INSN(),
1038 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001039 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001040 .errstr = "R0 invalid mem access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001041 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001042 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001043 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001044 },
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001045 {
1046 "jump test 1",
1047 .insns = {
1048 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1049 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1050 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1051 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1052 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1053 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1054 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1055 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1056 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1057 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1058 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1059 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1060 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1061 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1062 BPF_MOV64_IMM(BPF_REG_0, 0),
1063 BPF_EXIT_INSN(),
1064 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001065 .errstr_unpriv = "R1 pointer comparison",
1066 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001067 .result = ACCEPT,
1068 },
1069 {
1070 "jump test 2",
1071 .insns = {
1072 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1073 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1074 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1075 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1076 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1077 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1078 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1079 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1080 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1081 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1082 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1083 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1084 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1085 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1086 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1087 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1088 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1089 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1090 BPF_MOV64_IMM(BPF_REG_0, 0),
1091 BPF_EXIT_INSN(),
1092 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001093 .errstr_unpriv = "R1 pointer comparison",
1094 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001095 .result = ACCEPT,
1096 },
1097 {
1098 "jump test 3",
1099 .insns = {
1100 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1101 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1102 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1104 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1105 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1106 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1107 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1108 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1109 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1110 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1112 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1113 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1114 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1116 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1117 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1118 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1120 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1121 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1122 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1123 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1124 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001125 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1126 BPF_FUNC_map_delete_elem),
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001127 BPF_EXIT_INSN(),
1128 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001129 .fixup_map1 = { 24 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001130 .errstr_unpriv = "R1 pointer comparison",
1131 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001132 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08001133 .retval = -ENOENT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001134 },
1135 {
1136 "jump test 4",
1137 .insns = {
1138 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1139 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1140 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1141 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1142 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1143 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1144 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1145 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1146 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1147 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1148 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1149 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1150 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1151 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1152 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1154 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1155 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1156 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1157 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1158 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1159 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1160 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1161 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1162 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1163 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1164 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1166 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1168 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1169 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1170 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1171 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1172 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1173 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1174 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1175 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1176 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1177 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1178 BPF_MOV64_IMM(BPF_REG_0, 0),
1179 BPF_EXIT_INSN(),
1180 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001181 .errstr_unpriv = "R1 pointer comparison",
1182 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001183 .result = ACCEPT,
1184 },
Alexei Starovoitov342ded42014-10-28 15:11:42 -07001185 {
1186 "jump test 5",
1187 .insns = {
1188 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1189 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1190 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1191 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1192 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1193 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1194 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1195 BPF_MOV64_IMM(BPF_REG_0, 0),
1196 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1197 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1198 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1199 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1200 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1201 BPF_MOV64_IMM(BPF_REG_0, 0),
1202 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1203 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1204 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1205 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1206 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1207 BPF_MOV64_IMM(BPF_REG_0, 0),
1208 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1209 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1210 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1211 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1212 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1213 BPF_MOV64_IMM(BPF_REG_0, 0),
1214 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1215 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1216 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1217 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1218 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1219 BPF_MOV64_IMM(BPF_REG_0, 0),
1220 BPF_EXIT_INSN(),
1221 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001222 .errstr_unpriv = "R1 pointer comparison",
1223 .result_unpriv = REJECT,
Alexei Starovoitov342ded42014-10-28 15:11:42 -07001224 .result = ACCEPT,
1225 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001226 {
1227 "access skb fields ok",
1228 .insns = {
1229 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1230 offsetof(struct __sk_buff, len)),
1231 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1232 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1233 offsetof(struct __sk_buff, mark)),
1234 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1235 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1236 offsetof(struct __sk_buff, pkt_type)),
1237 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1238 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1239 offsetof(struct __sk_buff, queue_mapping)),
1240 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitovc2497392015-03-16 18:06:02 -07001241 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1242 offsetof(struct __sk_buff, protocol)),
1243 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1244 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1245 offsetof(struct __sk_buff, vlan_present)),
1246 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1247 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1248 offsetof(struct __sk_buff, vlan_tci)),
1249 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Daniel Borkmannb1d9fc42017-04-19 23:01:17 +02001250 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1251 offsetof(struct __sk_buff, napi_id)),
1252 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001253 BPF_EXIT_INSN(),
1254 },
1255 .result = ACCEPT,
1256 },
1257 {
1258 "access skb fields bad1",
1259 .insns = {
1260 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1261 BPF_EXIT_INSN(),
1262 },
1263 .errstr = "invalid bpf_context access",
1264 .result = REJECT,
1265 },
1266 {
1267 "access skb fields bad2",
1268 .insns = {
1269 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1270 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1273 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001274 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1275 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001276 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1277 BPF_EXIT_INSN(),
1278 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1279 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1280 offsetof(struct __sk_buff, pkt_type)),
1281 BPF_EXIT_INSN(),
1282 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001283 .fixup_map1 = { 4 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001284 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001285 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001286 .result = REJECT,
1287 },
1288 {
1289 "access skb fields bad3",
1290 .insns = {
1291 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1292 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1293 offsetof(struct __sk_buff, pkt_type)),
1294 BPF_EXIT_INSN(),
1295 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1296 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1297 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1298 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001299 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1300 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001301 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1302 BPF_EXIT_INSN(),
1303 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1304 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1305 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001306 .fixup_map1 = { 6 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001307 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001308 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001309 .result = REJECT,
1310 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001311 {
1312 "access skb fields bad4",
1313 .insns = {
1314 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1315 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1316 offsetof(struct __sk_buff, len)),
1317 BPF_MOV64_IMM(BPF_REG_0, 0),
1318 BPF_EXIT_INSN(),
1319 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1320 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1321 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1322 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001323 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1324 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001325 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1326 BPF_EXIT_INSN(),
1327 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1328 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1329 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001330 .fixup_map1 = { 7 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001331 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001332 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001333 .result = REJECT,
1334 },
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001335 {
John Fastabend41bc94f2017-08-15 22:33:56 -07001336 "invalid access __sk_buff family",
1337 .insns = {
1338 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1339 offsetof(struct __sk_buff, family)),
1340 BPF_EXIT_INSN(),
1341 },
1342 .errstr = "invalid bpf_context access",
1343 .result = REJECT,
1344 },
1345 {
1346 "invalid access __sk_buff remote_ip4",
1347 .insns = {
1348 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1349 offsetof(struct __sk_buff, remote_ip4)),
1350 BPF_EXIT_INSN(),
1351 },
1352 .errstr = "invalid bpf_context access",
1353 .result = REJECT,
1354 },
1355 {
1356 "invalid access __sk_buff local_ip4",
1357 .insns = {
1358 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1359 offsetof(struct __sk_buff, local_ip4)),
1360 BPF_EXIT_INSN(),
1361 },
1362 .errstr = "invalid bpf_context access",
1363 .result = REJECT,
1364 },
1365 {
1366 "invalid access __sk_buff remote_ip6",
1367 .insns = {
1368 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1369 offsetof(struct __sk_buff, remote_ip6)),
1370 BPF_EXIT_INSN(),
1371 },
1372 .errstr = "invalid bpf_context access",
1373 .result = REJECT,
1374 },
1375 {
1376 "invalid access __sk_buff local_ip6",
1377 .insns = {
1378 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1379 offsetof(struct __sk_buff, local_ip6)),
1380 BPF_EXIT_INSN(),
1381 },
1382 .errstr = "invalid bpf_context access",
1383 .result = REJECT,
1384 },
1385 {
1386 "invalid access __sk_buff remote_port",
1387 .insns = {
1388 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1389 offsetof(struct __sk_buff, remote_port)),
1390 BPF_EXIT_INSN(),
1391 },
1392 .errstr = "invalid bpf_context access",
1393 .result = REJECT,
1394 },
1395 {
1396 "invalid access __sk_buff remote_port",
1397 .insns = {
1398 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1399 offsetof(struct __sk_buff, local_port)),
1400 BPF_EXIT_INSN(),
1401 },
1402 .errstr = "invalid bpf_context access",
1403 .result = REJECT,
1404 },
1405 {
1406 "valid access __sk_buff family",
1407 .insns = {
1408 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1409 offsetof(struct __sk_buff, family)),
1410 BPF_EXIT_INSN(),
1411 },
1412 .result = ACCEPT,
1413 .prog_type = BPF_PROG_TYPE_SK_SKB,
1414 },
1415 {
1416 "valid access __sk_buff remote_ip4",
1417 .insns = {
1418 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1419 offsetof(struct __sk_buff, remote_ip4)),
1420 BPF_EXIT_INSN(),
1421 },
1422 .result = ACCEPT,
1423 .prog_type = BPF_PROG_TYPE_SK_SKB,
1424 },
1425 {
1426 "valid access __sk_buff local_ip4",
1427 .insns = {
1428 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1429 offsetof(struct __sk_buff, local_ip4)),
1430 BPF_EXIT_INSN(),
1431 },
1432 .result = ACCEPT,
1433 .prog_type = BPF_PROG_TYPE_SK_SKB,
1434 },
1435 {
1436 "valid access __sk_buff remote_ip6",
1437 .insns = {
1438 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1439 offsetof(struct __sk_buff, remote_ip6[0])),
1440 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1441 offsetof(struct __sk_buff, remote_ip6[1])),
1442 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1443 offsetof(struct __sk_buff, remote_ip6[2])),
1444 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1445 offsetof(struct __sk_buff, remote_ip6[3])),
1446 BPF_EXIT_INSN(),
1447 },
1448 .result = ACCEPT,
1449 .prog_type = BPF_PROG_TYPE_SK_SKB,
1450 },
1451 {
1452 "valid access __sk_buff local_ip6",
1453 .insns = {
1454 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1455 offsetof(struct __sk_buff, local_ip6[0])),
1456 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1457 offsetof(struct __sk_buff, local_ip6[1])),
1458 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1459 offsetof(struct __sk_buff, local_ip6[2])),
1460 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1461 offsetof(struct __sk_buff, local_ip6[3])),
1462 BPF_EXIT_INSN(),
1463 },
1464 .result = ACCEPT,
1465 .prog_type = BPF_PROG_TYPE_SK_SKB,
1466 },
1467 {
1468 "valid access __sk_buff remote_port",
1469 .insns = {
1470 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1471 offsetof(struct __sk_buff, remote_port)),
1472 BPF_EXIT_INSN(),
1473 },
1474 .result = ACCEPT,
1475 .prog_type = BPF_PROG_TYPE_SK_SKB,
1476 },
1477 {
1478 "valid access __sk_buff remote_port",
1479 .insns = {
1480 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1481 offsetof(struct __sk_buff, local_port)),
1482 BPF_EXIT_INSN(),
1483 },
1484 .result = ACCEPT,
1485 .prog_type = BPF_PROG_TYPE_SK_SKB,
1486 },
1487 {
John Fastabended850542017-08-28 07:11:24 -07001488 "invalid access of tc_classid for SK_SKB",
1489 .insns = {
1490 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1491 offsetof(struct __sk_buff, tc_classid)),
1492 BPF_EXIT_INSN(),
1493 },
1494 .result = REJECT,
1495 .prog_type = BPF_PROG_TYPE_SK_SKB,
1496 .errstr = "invalid bpf_context access",
1497 },
1498 {
John Fastabendf7e9cb12017-10-18 07:10:58 -07001499 "invalid access of skb->mark for SK_SKB",
1500 .insns = {
1501 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1502 offsetof(struct __sk_buff, mark)),
1503 BPF_EXIT_INSN(),
1504 },
1505 .result = REJECT,
1506 .prog_type = BPF_PROG_TYPE_SK_SKB,
1507 .errstr = "invalid bpf_context access",
1508 },
1509 {
1510 "check skb->mark is not writeable by SK_SKB",
John Fastabended850542017-08-28 07:11:24 -07001511 .insns = {
1512 BPF_MOV64_IMM(BPF_REG_0, 0),
1513 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1514 offsetof(struct __sk_buff, mark)),
1515 BPF_EXIT_INSN(),
1516 },
John Fastabendf7e9cb12017-10-18 07:10:58 -07001517 .result = REJECT,
John Fastabended850542017-08-28 07:11:24 -07001518 .prog_type = BPF_PROG_TYPE_SK_SKB,
John Fastabendf7e9cb12017-10-18 07:10:58 -07001519 .errstr = "invalid bpf_context access",
John Fastabended850542017-08-28 07:11:24 -07001520 },
1521 {
1522 "check skb->tc_index is writeable by SK_SKB",
1523 .insns = {
1524 BPF_MOV64_IMM(BPF_REG_0, 0),
1525 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1526 offsetof(struct __sk_buff, tc_index)),
1527 BPF_EXIT_INSN(),
1528 },
1529 .result = ACCEPT,
1530 .prog_type = BPF_PROG_TYPE_SK_SKB,
1531 },
1532 {
1533 "check skb->priority is writeable by SK_SKB",
1534 .insns = {
1535 BPF_MOV64_IMM(BPF_REG_0, 0),
1536 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1537 offsetof(struct __sk_buff, priority)),
1538 BPF_EXIT_INSN(),
1539 },
1540 .result = ACCEPT,
1541 .prog_type = BPF_PROG_TYPE_SK_SKB,
1542 },
1543 {
1544 "direct packet read for SK_SKB",
1545 .insns = {
1546 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1547 offsetof(struct __sk_buff, data)),
1548 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1549 offsetof(struct __sk_buff, data_end)),
1550 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1551 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1552 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1553 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1554 BPF_MOV64_IMM(BPF_REG_0, 0),
1555 BPF_EXIT_INSN(),
1556 },
1557 .result = ACCEPT,
1558 .prog_type = BPF_PROG_TYPE_SK_SKB,
1559 },
1560 {
1561 "direct packet write for SK_SKB",
1562 .insns = {
1563 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1564 offsetof(struct __sk_buff, data)),
1565 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1566 offsetof(struct __sk_buff, data_end)),
1567 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1569 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1570 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1571 BPF_MOV64_IMM(BPF_REG_0, 0),
1572 BPF_EXIT_INSN(),
1573 },
1574 .result = ACCEPT,
1575 .prog_type = BPF_PROG_TYPE_SK_SKB,
1576 },
1577 {
1578 "overlapping checks for direct packet access SK_SKB",
1579 .insns = {
1580 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1581 offsetof(struct __sk_buff, data)),
1582 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1583 offsetof(struct __sk_buff, data_end)),
1584 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1585 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1586 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1587 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1588 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1589 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1590 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1591 BPF_MOV64_IMM(BPF_REG_0, 0),
1592 BPF_EXIT_INSN(),
1593 },
1594 .result = ACCEPT,
1595 .prog_type = BPF_PROG_TYPE_SK_SKB,
1596 },
1597 {
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001598 "check skb->mark is not writeable by sockets",
1599 .insns = {
1600 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1601 offsetof(struct __sk_buff, mark)),
1602 BPF_EXIT_INSN(),
1603 },
1604 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001605 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001606 .result = REJECT,
1607 },
1608 {
1609 "check skb->tc_index is not writeable by sockets",
1610 .insns = {
1611 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1612 offsetof(struct __sk_buff, tc_index)),
1613 BPF_EXIT_INSN(),
1614 },
1615 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001616 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001617 .result = REJECT,
1618 },
1619 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001620 "check cb access: byte",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001621 .insns = {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001622 BPF_MOV64_IMM(BPF_REG_0, 0),
1623 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1624 offsetof(struct __sk_buff, cb[0])),
1625 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1626 offsetof(struct __sk_buff, cb[0]) + 1),
1627 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1628 offsetof(struct __sk_buff, cb[0]) + 2),
1629 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1630 offsetof(struct __sk_buff, cb[0]) + 3),
1631 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1632 offsetof(struct __sk_buff, cb[1])),
1633 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1634 offsetof(struct __sk_buff, cb[1]) + 1),
1635 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1636 offsetof(struct __sk_buff, cb[1]) + 2),
1637 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1638 offsetof(struct __sk_buff, cb[1]) + 3),
1639 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1640 offsetof(struct __sk_buff, cb[2])),
1641 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1642 offsetof(struct __sk_buff, cb[2]) + 1),
1643 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1644 offsetof(struct __sk_buff, cb[2]) + 2),
1645 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1646 offsetof(struct __sk_buff, cb[2]) + 3),
1647 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1648 offsetof(struct __sk_buff, cb[3])),
1649 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1650 offsetof(struct __sk_buff, cb[3]) + 1),
1651 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1652 offsetof(struct __sk_buff, cb[3]) + 2),
1653 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1654 offsetof(struct __sk_buff, cb[3]) + 3),
1655 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1656 offsetof(struct __sk_buff, cb[4])),
1657 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1658 offsetof(struct __sk_buff, cb[4]) + 1),
1659 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1660 offsetof(struct __sk_buff, cb[4]) + 2),
1661 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1662 offsetof(struct __sk_buff, cb[4]) + 3),
1663 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1664 offsetof(struct __sk_buff, cb[0])),
1665 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1666 offsetof(struct __sk_buff, cb[0]) + 1),
1667 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1668 offsetof(struct __sk_buff, cb[0]) + 2),
1669 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1670 offsetof(struct __sk_buff, cb[0]) + 3),
1671 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1672 offsetof(struct __sk_buff, cb[1])),
1673 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1674 offsetof(struct __sk_buff, cb[1]) + 1),
1675 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1676 offsetof(struct __sk_buff, cb[1]) + 2),
1677 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1678 offsetof(struct __sk_buff, cb[1]) + 3),
1679 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1680 offsetof(struct __sk_buff, cb[2])),
1681 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1682 offsetof(struct __sk_buff, cb[2]) + 1),
1683 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1684 offsetof(struct __sk_buff, cb[2]) + 2),
1685 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1686 offsetof(struct __sk_buff, cb[2]) + 3),
1687 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1688 offsetof(struct __sk_buff, cb[3])),
1689 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1690 offsetof(struct __sk_buff, cb[3]) + 1),
1691 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1692 offsetof(struct __sk_buff, cb[3]) + 2),
1693 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1694 offsetof(struct __sk_buff, cb[3]) + 3),
1695 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1696 offsetof(struct __sk_buff, cb[4])),
1697 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1698 offsetof(struct __sk_buff, cb[4]) + 1),
1699 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1700 offsetof(struct __sk_buff, cb[4]) + 2),
1701 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1702 offsetof(struct __sk_buff, cb[4]) + 3),
1703 BPF_EXIT_INSN(),
1704 },
1705 .result = ACCEPT,
1706 },
1707 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001708 "__sk_buff->hash, offset 0, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001709 .insns = {
1710 BPF_MOV64_IMM(BPF_REG_0, 0),
1711 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001712 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001713 BPF_EXIT_INSN(),
1714 },
1715 .errstr = "invalid bpf_context access",
1716 .result = REJECT,
1717 },
1718 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001719 "__sk_buff->tc_index, offset 3, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001720 .insns = {
1721 BPF_MOV64_IMM(BPF_REG_0, 0),
1722 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001723 offsetof(struct __sk_buff, tc_index) + 3),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001724 BPF_EXIT_INSN(),
1725 },
1726 .errstr = "invalid bpf_context access",
1727 .result = REJECT,
1728 },
1729 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001730 "check skb->hash byte load permitted",
1731 .insns = {
1732 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001733#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001734 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1735 offsetof(struct __sk_buff, hash)),
1736#else
1737 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1738 offsetof(struct __sk_buff, hash) + 3),
1739#endif
1740 BPF_EXIT_INSN(),
1741 },
1742 .result = ACCEPT,
1743 },
1744 {
1745 "check skb->hash byte load not permitted 1",
1746 .insns = {
1747 BPF_MOV64_IMM(BPF_REG_0, 0),
1748 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1749 offsetof(struct __sk_buff, hash) + 1),
1750 BPF_EXIT_INSN(),
1751 },
1752 .errstr = "invalid bpf_context access",
1753 .result = REJECT,
1754 },
1755 {
1756 "check skb->hash byte load not permitted 2",
1757 .insns = {
1758 BPF_MOV64_IMM(BPF_REG_0, 0),
1759 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1760 offsetof(struct __sk_buff, hash) + 2),
1761 BPF_EXIT_INSN(),
1762 },
1763 .errstr = "invalid bpf_context access",
1764 .result = REJECT,
1765 },
1766 {
1767 "check skb->hash byte load not permitted 3",
1768 .insns = {
1769 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001770#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001771 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1772 offsetof(struct __sk_buff, hash) + 3),
1773#else
1774 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1775 offsetof(struct __sk_buff, hash)),
1776#endif
1777 BPF_EXIT_INSN(),
1778 },
1779 .errstr = "invalid bpf_context access",
1780 .result = REJECT,
1781 },
1782 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001783 "check cb access: byte, wrong type",
1784 .insns = {
1785 BPF_MOV64_IMM(BPF_REG_0, 0),
1786 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001787 offsetof(struct __sk_buff, cb[0])),
1788 BPF_EXIT_INSN(),
1789 },
1790 .errstr = "invalid bpf_context access",
1791 .result = REJECT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001792 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1793 },
1794 {
1795 "check cb access: half",
1796 .insns = {
1797 BPF_MOV64_IMM(BPF_REG_0, 0),
1798 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1799 offsetof(struct __sk_buff, cb[0])),
1800 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1801 offsetof(struct __sk_buff, cb[0]) + 2),
1802 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1803 offsetof(struct __sk_buff, cb[1])),
1804 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1805 offsetof(struct __sk_buff, cb[1]) + 2),
1806 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1807 offsetof(struct __sk_buff, cb[2])),
1808 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1809 offsetof(struct __sk_buff, cb[2]) + 2),
1810 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1811 offsetof(struct __sk_buff, cb[3])),
1812 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1813 offsetof(struct __sk_buff, cb[3]) + 2),
1814 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1815 offsetof(struct __sk_buff, cb[4])),
1816 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1817 offsetof(struct __sk_buff, cb[4]) + 2),
1818 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1819 offsetof(struct __sk_buff, cb[0])),
1820 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1821 offsetof(struct __sk_buff, cb[0]) + 2),
1822 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1823 offsetof(struct __sk_buff, cb[1])),
1824 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1825 offsetof(struct __sk_buff, cb[1]) + 2),
1826 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1827 offsetof(struct __sk_buff, cb[2])),
1828 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1829 offsetof(struct __sk_buff, cb[2]) + 2),
1830 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1831 offsetof(struct __sk_buff, cb[3])),
1832 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1833 offsetof(struct __sk_buff, cb[3]) + 2),
1834 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1835 offsetof(struct __sk_buff, cb[4])),
1836 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1837 offsetof(struct __sk_buff, cb[4]) + 2),
1838 BPF_EXIT_INSN(),
1839 },
1840 .result = ACCEPT,
1841 },
1842 {
1843 "check cb access: half, unaligned",
1844 .insns = {
1845 BPF_MOV64_IMM(BPF_REG_0, 0),
1846 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1847 offsetof(struct __sk_buff, cb[0]) + 1),
1848 BPF_EXIT_INSN(),
1849 },
Edward Creef65b1842017-08-07 15:27:12 +01001850 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001851 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001852 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001853 },
1854 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001855 "check __sk_buff->hash, offset 0, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001856 .insns = {
1857 BPF_MOV64_IMM(BPF_REG_0, 0),
1858 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001859 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001860 BPF_EXIT_INSN(),
1861 },
1862 .errstr = "invalid bpf_context access",
1863 .result = REJECT,
1864 },
1865 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001866 "check __sk_buff->tc_index, offset 2, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001867 .insns = {
1868 BPF_MOV64_IMM(BPF_REG_0, 0),
1869 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001870 offsetof(struct __sk_buff, tc_index) + 2),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001871 BPF_EXIT_INSN(),
1872 },
1873 .errstr = "invalid bpf_context access",
1874 .result = REJECT,
1875 },
1876 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001877 "check skb->hash half load permitted",
1878 .insns = {
1879 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001880#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001881 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1882 offsetof(struct __sk_buff, hash)),
1883#else
1884 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1885 offsetof(struct __sk_buff, hash) + 2),
1886#endif
1887 BPF_EXIT_INSN(),
1888 },
1889 .result = ACCEPT,
1890 },
1891 {
1892 "check skb->hash half load not permitted",
1893 .insns = {
1894 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001895#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001896 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1897 offsetof(struct __sk_buff, hash) + 2),
1898#else
1899 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1900 offsetof(struct __sk_buff, hash)),
1901#endif
1902 BPF_EXIT_INSN(),
1903 },
1904 .errstr = "invalid bpf_context access",
1905 .result = REJECT,
1906 },
1907 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001908 "check cb access: half, wrong type",
1909 .insns = {
1910 BPF_MOV64_IMM(BPF_REG_0, 0),
1911 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1912 offsetof(struct __sk_buff, cb[0])),
1913 BPF_EXIT_INSN(),
1914 },
1915 .errstr = "invalid bpf_context access",
1916 .result = REJECT,
1917 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1918 },
1919 {
1920 "check cb access: word",
1921 .insns = {
1922 BPF_MOV64_IMM(BPF_REG_0, 0),
1923 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1924 offsetof(struct __sk_buff, cb[0])),
1925 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1926 offsetof(struct __sk_buff, cb[1])),
1927 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1928 offsetof(struct __sk_buff, cb[2])),
1929 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1930 offsetof(struct __sk_buff, cb[3])),
1931 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1932 offsetof(struct __sk_buff, cb[4])),
1933 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1934 offsetof(struct __sk_buff, cb[0])),
1935 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1936 offsetof(struct __sk_buff, cb[1])),
1937 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1938 offsetof(struct __sk_buff, cb[2])),
1939 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1940 offsetof(struct __sk_buff, cb[3])),
1941 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1942 offsetof(struct __sk_buff, cb[4])),
1943 BPF_EXIT_INSN(),
1944 },
1945 .result = ACCEPT,
1946 },
1947 {
1948 "check cb access: word, unaligned 1",
1949 .insns = {
1950 BPF_MOV64_IMM(BPF_REG_0, 0),
1951 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1952 offsetof(struct __sk_buff, cb[0]) + 2),
1953 BPF_EXIT_INSN(),
1954 },
Edward Creef65b1842017-08-07 15:27:12 +01001955 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001956 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001957 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001958 },
1959 {
1960 "check cb access: word, unaligned 2",
1961 .insns = {
1962 BPF_MOV64_IMM(BPF_REG_0, 0),
1963 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1964 offsetof(struct __sk_buff, cb[4]) + 1),
1965 BPF_EXIT_INSN(),
1966 },
Edward Creef65b1842017-08-07 15:27:12 +01001967 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001968 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001969 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001970 },
1971 {
1972 "check cb access: word, unaligned 3",
1973 .insns = {
1974 BPF_MOV64_IMM(BPF_REG_0, 0),
1975 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1976 offsetof(struct __sk_buff, cb[4]) + 2),
1977 BPF_EXIT_INSN(),
1978 },
Edward Creef65b1842017-08-07 15:27:12 +01001979 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001980 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001981 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001982 },
1983 {
1984 "check cb access: word, unaligned 4",
1985 .insns = {
1986 BPF_MOV64_IMM(BPF_REG_0, 0),
1987 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1988 offsetof(struct __sk_buff, cb[4]) + 3),
1989 BPF_EXIT_INSN(),
1990 },
Edward Creef65b1842017-08-07 15:27:12 +01001991 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001992 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001993 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001994 },
1995 {
1996 "check cb access: double",
1997 .insns = {
1998 BPF_MOV64_IMM(BPF_REG_0, 0),
1999 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2000 offsetof(struct __sk_buff, cb[0])),
2001 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2002 offsetof(struct __sk_buff, cb[2])),
2003 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2004 offsetof(struct __sk_buff, cb[0])),
2005 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2006 offsetof(struct __sk_buff, cb[2])),
2007 BPF_EXIT_INSN(),
2008 },
2009 .result = ACCEPT,
2010 },
2011 {
2012 "check cb access: double, unaligned 1",
2013 .insns = {
2014 BPF_MOV64_IMM(BPF_REG_0, 0),
2015 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2016 offsetof(struct __sk_buff, cb[1])),
2017 BPF_EXIT_INSN(),
2018 },
Edward Creef65b1842017-08-07 15:27:12 +01002019 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002020 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002021 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002022 },
2023 {
2024 "check cb access: double, unaligned 2",
2025 .insns = {
2026 BPF_MOV64_IMM(BPF_REG_0, 0),
2027 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2028 offsetof(struct __sk_buff, cb[3])),
2029 BPF_EXIT_INSN(),
2030 },
Edward Creef65b1842017-08-07 15:27:12 +01002031 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002032 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002033 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002034 },
2035 {
2036 "check cb access: double, oob 1",
2037 .insns = {
2038 BPF_MOV64_IMM(BPF_REG_0, 0),
2039 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2040 offsetof(struct __sk_buff, cb[4])),
2041 BPF_EXIT_INSN(),
2042 },
2043 .errstr = "invalid bpf_context access",
2044 .result = REJECT,
2045 },
2046 {
2047 "check cb access: double, oob 2",
2048 .insns = {
2049 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002050 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2051 offsetof(struct __sk_buff, cb[4])),
2052 BPF_EXIT_INSN(),
2053 },
2054 .errstr = "invalid bpf_context access",
2055 .result = REJECT,
2056 },
2057 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002058 "check __sk_buff->ifindex dw store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002059 .insns = {
2060 BPF_MOV64_IMM(BPF_REG_0, 0),
Yonghong Song31fd8582017-06-13 15:52:13 -07002061 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2062 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002063 BPF_EXIT_INSN(),
2064 },
2065 .errstr = "invalid bpf_context access",
2066 .result = REJECT,
2067 },
2068 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002069 "check __sk_buff->ifindex dw load not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002070 .insns = {
2071 BPF_MOV64_IMM(BPF_REG_0, 0),
2072 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
Yonghong Song31fd8582017-06-13 15:52:13 -07002073 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002074 BPF_EXIT_INSN(),
2075 },
2076 .errstr = "invalid bpf_context access",
2077 .result = REJECT,
2078 },
2079 {
2080 "check cb access: double, wrong type",
2081 .insns = {
2082 BPF_MOV64_IMM(BPF_REG_0, 0),
2083 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2084 offsetof(struct __sk_buff, cb[0])),
2085 BPF_EXIT_INSN(),
2086 },
2087 .errstr = "invalid bpf_context access",
2088 .result = REJECT,
2089 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002090 },
2091 {
2092 "check out of range skb->cb access",
2093 .insns = {
2094 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002095 offsetof(struct __sk_buff, cb[0]) + 256),
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002096 BPF_EXIT_INSN(),
2097 },
2098 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002099 .errstr_unpriv = "",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002100 .result = REJECT,
2101 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
2102 },
2103 {
2104 "write skb fields from socket prog",
2105 .insns = {
2106 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2107 offsetof(struct __sk_buff, cb[4])),
2108 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2109 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2110 offsetof(struct __sk_buff, mark)),
2111 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2112 offsetof(struct __sk_buff, tc_index)),
2113 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2114 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2115 offsetof(struct __sk_buff, cb[0])),
2116 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2117 offsetof(struct __sk_buff, cb[2])),
2118 BPF_EXIT_INSN(),
2119 },
2120 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002121 .errstr_unpriv = "R1 leaks addr",
2122 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002123 },
2124 {
2125 "write skb fields from tc_cls_act prog",
2126 .insns = {
2127 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2128 offsetof(struct __sk_buff, cb[0])),
2129 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2130 offsetof(struct __sk_buff, mark)),
2131 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2132 offsetof(struct __sk_buff, tc_index)),
2133 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2134 offsetof(struct __sk_buff, tc_index)),
2135 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2136 offsetof(struct __sk_buff, cb[3])),
2137 BPF_EXIT_INSN(),
2138 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002139 .errstr_unpriv = "",
2140 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002141 .result = ACCEPT,
2142 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2143 },
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002144 {
2145 "PTR_TO_STACK store/load",
2146 .insns = {
2147 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2148 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2149 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2150 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2151 BPF_EXIT_INSN(),
2152 },
2153 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002154 .retval = 0xfaceb00c,
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002155 },
2156 {
2157 "PTR_TO_STACK store/load - bad alignment on off",
2158 .insns = {
2159 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2160 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2161 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2162 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2163 BPF_EXIT_INSN(),
2164 },
2165 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002166 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002167 },
2168 {
2169 "PTR_TO_STACK store/load - bad alignment on reg",
2170 .insns = {
2171 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2173 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2174 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2175 BPF_EXIT_INSN(),
2176 },
2177 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002178 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002179 },
2180 {
2181 "PTR_TO_STACK store/load - out of bounds low",
2182 .insns = {
2183 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2185 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2186 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2187 BPF_EXIT_INSN(),
2188 },
2189 .result = REJECT,
2190 .errstr = "invalid stack off=-79992 size=8",
2191 },
2192 {
2193 "PTR_TO_STACK store/load - out of bounds high",
2194 .insns = {
2195 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2196 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2197 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2198 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2199 BPF_EXIT_INSN(),
2200 },
2201 .result = REJECT,
2202 .errstr = "invalid stack off=0 size=8",
2203 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002204 {
2205 "unpriv: return pointer",
2206 .insns = {
2207 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2208 BPF_EXIT_INSN(),
2209 },
2210 .result = ACCEPT,
2211 .result_unpriv = REJECT,
2212 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002213 .retval = POINTER_VALUE,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002214 },
2215 {
2216 "unpriv: add const to pointer",
2217 .insns = {
2218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2219 BPF_MOV64_IMM(BPF_REG_0, 0),
2220 BPF_EXIT_INSN(),
2221 },
2222 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002223 },
2224 {
2225 "unpriv: add pointer to pointer",
2226 .insns = {
2227 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2228 BPF_MOV64_IMM(BPF_REG_0, 0),
2229 BPF_EXIT_INSN(),
2230 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08002231 .result = REJECT,
2232 .errstr = "R1 pointer += pointer",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002233 },
2234 {
2235 "unpriv: neg pointer",
2236 .insns = {
2237 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2238 BPF_MOV64_IMM(BPF_REG_0, 0),
2239 BPF_EXIT_INSN(),
2240 },
2241 .result = ACCEPT,
2242 .result_unpriv = REJECT,
2243 .errstr_unpriv = "R1 pointer arithmetic",
2244 },
2245 {
2246 "unpriv: cmp pointer with const",
2247 .insns = {
2248 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2249 BPF_MOV64_IMM(BPF_REG_0, 0),
2250 BPF_EXIT_INSN(),
2251 },
2252 .result = ACCEPT,
2253 .result_unpriv = REJECT,
2254 .errstr_unpriv = "R1 pointer comparison",
2255 },
2256 {
2257 "unpriv: cmp pointer with pointer",
2258 .insns = {
2259 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2260 BPF_MOV64_IMM(BPF_REG_0, 0),
2261 BPF_EXIT_INSN(),
2262 },
2263 .result = ACCEPT,
2264 .result_unpriv = REJECT,
2265 .errstr_unpriv = "R10 pointer comparison",
2266 },
2267 {
2268 "unpriv: check that printk is disallowed",
2269 .insns = {
2270 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2271 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2273 BPF_MOV64_IMM(BPF_REG_2, 8),
2274 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002275 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2276 BPF_FUNC_trace_printk),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002277 BPF_MOV64_IMM(BPF_REG_0, 0),
2278 BPF_EXIT_INSN(),
2279 },
Daniel Borkmann0eb69842016-12-15 01:39:10 +01002280 .errstr_unpriv = "unknown func bpf_trace_printk#6",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002281 .result_unpriv = REJECT,
2282 .result = ACCEPT,
2283 },
2284 {
2285 "unpriv: pass pointer to helper function",
2286 .insns = {
2287 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2288 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2289 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2290 BPF_LD_MAP_FD(BPF_REG_1, 0),
2291 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2292 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002293 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2294 BPF_FUNC_map_update_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002295 BPF_MOV64_IMM(BPF_REG_0, 0),
2296 BPF_EXIT_INSN(),
2297 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002298 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002299 .errstr_unpriv = "R4 leaks addr",
2300 .result_unpriv = REJECT,
2301 .result = ACCEPT,
2302 },
2303 {
2304 "unpriv: indirectly pass pointer on stack to helper function",
2305 .insns = {
2306 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2307 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2308 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2309 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002310 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2311 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002312 BPF_MOV64_IMM(BPF_REG_0, 0),
2313 BPF_EXIT_INSN(),
2314 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002315 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002316 .errstr = "invalid indirect read from stack off -8+0 size 8",
2317 .result = REJECT,
2318 },
2319 {
2320 "unpriv: mangle pointer on stack 1",
2321 .insns = {
2322 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2323 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2324 BPF_MOV64_IMM(BPF_REG_0, 0),
2325 BPF_EXIT_INSN(),
2326 },
2327 .errstr_unpriv = "attempt to corrupt spilled",
2328 .result_unpriv = REJECT,
2329 .result = ACCEPT,
2330 },
2331 {
2332 "unpriv: mangle pointer on stack 2",
2333 .insns = {
2334 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2335 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2336 BPF_MOV64_IMM(BPF_REG_0, 0),
2337 BPF_EXIT_INSN(),
2338 },
2339 .errstr_unpriv = "attempt to corrupt spilled",
2340 .result_unpriv = REJECT,
2341 .result = ACCEPT,
2342 },
2343 {
2344 "unpriv: read pointer from stack in small chunks",
2345 .insns = {
2346 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2347 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2348 BPF_MOV64_IMM(BPF_REG_0, 0),
2349 BPF_EXIT_INSN(),
2350 },
2351 .errstr = "invalid size",
2352 .result = REJECT,
2353 },
2354 {
2355 "unpriv: write pointer into ctx",
2356 .insns = {
2357 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2358 BPF_MOV64_IMM(BPF_REG_0, 0),
2359 BPF_EXIT_INSN(),
2360 },
2361 .errstr_unpriv = "R1 leaks addr",
2362 .result_unpriv = REJECT,
2363 .errstr = "invalid bpf_context access",
2364 .result = REJECT,
2365 },
2366 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002367 "unpriv: spill/fill of ctx",
2368 .insns = {
2369 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2370 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2371 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2372 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2373 BPF_MOV64_IMM(BPF_REG_0, 0),
2374 BPF_EXIT_INSN(),
2375 },
2376 .result = ACCEPT,
2377 },
2378 {
2379 "unpriv: spill/fill of ctx 2",
2380 .insns = {
2381 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2382 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2383 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2384 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002385 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2386 BPF_FUNC_get_hash_recalc),
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002387 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002388 BPF_EXIT_INSN(),
2389 },
2390 .result = ACCEPT,
2391 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2392 },
2393 {
2394 "unpriv: spill/fill of ctx 3",
2395 .insns = {
2396 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2397 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2398 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2399 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2400 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002401 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2402 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002403 BPF_EXIT_INSN(),
2404 },
2405 .result = REJECT,
2406 .errstr = "R1 type=fp expected=ctx",
2407 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2408 },
2409 {
2410 "unpriv: spill/fill of ctx 4",
2411 .insns = {
2412 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2413 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2414 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2415 BPF_MOV64_IMM(BPF_REG_0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002416 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2417 BPF_REG_0, -8, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002418 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002419 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2420 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002421 BPF_EXIT_INSN(),
2422 },
2423 .result = REJECT,
2424 .errstr = "R1 type=inv expected=ctx",
2425 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2426 },
2427 {
2428 "unpriv: spill/fill of different pointers stx",
2429 .insns = {
2430 BPF_MOV64_IMM(BPF_REG_3, 42),
2431 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2432 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2433 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2434 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2436 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2437 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2438 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2439 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2440 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2441 offsetof(struct __sk_buff, mark)),
2442 BPF_MOV64_IMM(BPF_REG_0, 0),
2443 BPF_EXIT_INSN(),
2444 },
2445 .result = REJECT,
2446 .errstr = "same insn cannot be used with different pointers",
2447 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2448 },
2449 {
2450 "unpriv: spill/fill of different pointers ldx",
2451 .insns = {
2452 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2454 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2455 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2457 -(__s32)offsetof(struct bpf_perf_event_data,
2458 sample_period) - 8),
2459 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2460 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2461 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2462 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2463 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2464 offsetof(struct bpf_perf_event_data,
2465 sample_period)),
2466 BPF_MOV64_IMM(BPF_REG_0, 0),
2467 BPF_EXIT_INSN(),
2468 },
2469 .result = REJECT,
2470 .errstr = "same insn cannot be used with different pointers",
2471 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
2472 },
2473 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002474 "unpriv: write pointer into map elem value",
2475 .insns = {
2476 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2477 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2478 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2479 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002480 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2481 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002482 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2483 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2484 BPF_EXIT_INSN(),
2485 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002486 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002487 .errstr_unpriv = "R0 leaks addr",
2488 .result_unpriv = REJECT,
2489 .result = ACCEPT,
2490 },
2491 {
2492 "unpriv: partial copy of pointer",
2493 .insns = {
2494 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2495 BPF_MOV64_IMM(BPF_REG_0, 0),
2496 BPF_EXIT_INSN(),
2497 },
2498 .errstr_unpriv = "R10 partial copy",
2499 .result_unpriv = REJECT,
2500 .result = ACCEPT,
2501 },
2502 {
2503 "unpriv: pass pointer to tail_call",
2504 .insns = {
2505 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2506 BPF_LD_MAP_FD(BPF_REG_2, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002507 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2508 BPF_FUNC_tail_call),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002509 BPF_MOV64_IMM(BPF_REG_0, 0),
2510 BPF_EXIT_INSN(),
2511 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002512 .fixup_prog = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002513 .errstr_unpriv = "R3 leaks addr into helper",
2514 .result_unpriv = REJECT,
2515 .result = ACCEPT,
2516 },
2517 {
2518 "unpriv: cmp map pointer with zero",
2519 .insns = {
2520 BPF_MOV64_IMM(BPF_REG_1, 0),
2521 BPF_LD_MAP_FD(BPF_REG_1, 0),
2522 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2523 BPF_MOV64_IMM(BPF_REG_0, 0),
2524 BPF_EXIT_INSN(),
2525 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002526 .fixup_map1 = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002527 .errstr_unpriv = "R1 pointer comparison",
2528 .result_unpriv = REJECT,
2529 .result = ACCEPT,
2530 },
2531 {
2532 "unpriv: write into frame pointer",
2533 .insns = {
2534 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2535 BPF_MOV64_IMM(BPF_REG_0, 0),
2536 BPF_EXIT_INSN(),
2537 },
2538 .errstr = "frame pointer is read only",
2539 .result = REJECT,
2540 },
2541 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002542 "unpriv: spill/fill frame pointer",
2543 .insns = {
2544 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2546 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2547 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2548 BPF_MOV64_IMM(BPF_REG_0, 0),
2549 BPF_EXIT_INSN(),
2550 },
2551 .errstr = "frame pointer is read only",
2552 .result = REJECT,
2553 },
2554 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002555 "unpriv: cmp of frame pointer",
2556 .insns = {
2557 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2558 BPF_MOV64_IMM(BPF_REG_0, 0),
2559 BPF_EXIT_INSN(),
2560 },
2561 .errstr_unpriv = "R10 pointer comparison",
2562 .result_unpriv = REJECT,
2563 .result = ACCEPT,
2564 },
2565 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02002566 "unpriv: adding of fp",
2567 .insns = {
2568 BPF_MOV64_IMM(BPF_REG_0, 0),
2569 BPF_MOV64_IMM(BPF_REG_1, 0),
2570 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2571 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2572 BPF_EXIT_INSN(),
2573 },
Edward Creef65b1842017-08-07 15:27:12 +01002574 .result = ACCEPT,
Daniel Borkmann728a8532017-04-27 01:39:32 +02002575 },
2576 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002577 "unpriv: cmp of stack pointer",
2578 .insns = {
2579 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2580 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2581 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2582 BPF_MOV64_IMM(BPF_REG_0, 0),
2583 BPF_EXIT_INSN(),
2584 },
2585 .errstr_unpriv = "R2 pointer comparison",
2586 .result_unpriv = REJECT,
2587 .result = ACCEPT,
2588 },
2589 {
Yonghong Song332270f2017-04-29 22:52:42 -07002590 "stack pointer arithmetic",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002591 .insns = {
Yonghong Song332270f2017-04-29 22:52:42 -07002592 BPF_MOV64_IMM(BPF_REG_1, 4),
2593 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2594 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
2595 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2596 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2597 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2598 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
2599 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2600 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2601 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2602 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002603 BPF_MOV64_IMM(BPF_REG_0, 0),
2604 BPF_EXIT_INSN(),
2605 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002606 .result = ACCEPT,
2607 },
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002608 {
2609 "raw_stack: no skb_load_bytes",
2610 .insns = {
2611 BPF_MOV64_IMM(BPF_REG_2, 4),
2612 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2613 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2614 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2615 BPF_MOV64_IMM(BPF_REG_4, 8),
2616 /* Call to skb_load_bytes() omitted. */
2617 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2618 BPF_EXIT_INSN(),
2619 },
2620 .result = REJECT,
2621 .errstr = "invalid read from stack off -8+0 size 8",
2622 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2623 },
2624 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002625 "raw_stack: skb_load_bytes, negative len",
2626 .insns = {
2627 BPF_MOV64_IMM(BPF_REG_2, 4),
2628 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2629 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2630 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2631 BPF_MOV64_IMM(BPF_REG_4, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002632 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2633 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002634 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2635 BPF_EXIT_INSN(),
2636 },
2637 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002638 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002639 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2640 },
2641 {
2642 "raw_stack: skb_load_bytes, negative len 2",
2643 .insns = {
2644 BPF_MOV64_IMM(BPF_REG_2, 4),
2645 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2646 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2647 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2648 BPF_MOV64_IMM(BPF_REG_4, ~0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002649 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2650 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002651 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2652 BPF_EXIT_INSN(),
2653 },
2654 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002655 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002656 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2657 },
2658 {
2659 "raw_stack: skb_load_bytes, zero len",
2660 .insns = {
2661 BPF_MOV64_IMM(BPF_REG_2, 4),
2662 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2663 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2664 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2665 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002666 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2667 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002668 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2669 BPF_EXIT_INSN(),
2670 },
2671 .result = REJECT,
2672 .errstr = "invalid stack type R3",
2673 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2674 },
2675 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002676 "raw_stack: skb_load_bytes, no init",
2677 .insns = {
2678 BPF_MOV64_IMM(BPF_REG_2, 4),
2679 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2680 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2681 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2682 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002683 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2684 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002685 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2686 BPF_EXIT_INSN(),
2687 },
2688 .result = ACCEPT,
2689 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2690 },
2691 {
2692 "raw_stack: skb_load_bytes, init",
2693 .insns = {
2694 BPF_MOV64_IMM(BPF_REG_2, 4),
2695 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2696 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2697 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
2698 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2699 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002700 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2701 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002702 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2703 BPF_EXIT_INSN(),
2704 },
2705 .result = ACCEPT,
2706 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2707 },
2708 {
2709 "raw_stack: skb_load_bytes, spilled regs around bounds",
2710 .insns = {
2711 BPF_MOV64_IMM(BPF_REG_2, 4),
2712 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002714 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2715 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002716 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2717 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002718 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2719 BPF_FUNC_skb_load_bytes),
2720 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2721 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002722 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2723 offsetof(struct __sk_buff, mark)),
2724 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2725 offsetof(struct __sk_buff, priority)),
2726 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2727 BPF_EXIT_INSN(),
2728 },
2729 .result = ACCEPT,
2730 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2731 },
2732 {
2733 "raw_stack: skb_load_bytes, spilled regs corruption",
2734 .insns = {
2735 BPF_MOV64_IMM(BPF_REG_2, 4),
2736 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2737 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002738 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002739 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2740 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002741 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2742 BPF_FUNC_skb_load_bytes),
2743 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002744 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2745 offsetof(struct __sk_buff, mark)),
2746 BPF_EXIT_INSN(),
2747 },
2748 .result = REJECT,
2749 .errstr = "R0 invalid mem access 'inv'",
2750 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2751 },
2752 {
2753 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2754 .insns = {
2755 BPF_MOV64_IMM(BPF_REG_2, 4),
2756 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2757 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002758 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2759 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2760 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002761 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2762 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002763 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2764 BPF_FUNC_skb_load_bytes),
2765 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2766 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2767 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002768 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2769 offsetof(struct __sk_buff, mark)),
2770 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2771 offsetof(struct __sk_buff, priority)),
2772 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2773 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2774 offsetof(struct __sk_buff, pkt_type)),
2775 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2776 BPF_EXIT_INSN(),
2777 },
2778 .result = REJECT,
2779 .errstr = "R3 invalid mem access 'inv'",
2780 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2781 },
2782 {
2783 "raw_stack: skb_load_bytes, spilled regs + data",
2784 .insns = {
2785 BPF_MOV64_IMM(BPF_REG_2, 4),
2786 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2787 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002788 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2789 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2790 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002791 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2792 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002793 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2794 BPF_FUNC_skb_load_bytes),
2795 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2796 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2797 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002798 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2799 offsetof(struct __sk_buff, mark)),
2800 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2801 offsetof(struct __sk_buff, priority)),
2802 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2803 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2804 BPF_EXIT_INSN(),
2805 },
2806 .result = ACCEPT,
2807 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2808 },
2809 {
2810 "raw_stack: skb_load_bytes, invalid access 1",
2811 .insns = {
2812 BPF_MOV64_IMM(BPF_REG_2, 4),
2813 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2814 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2815 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2816 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002817 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2818 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002819 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2820 BPF_EXIT_INSN(),
2821 },
2822 .result = REJECT,
2823 .errstr = "invalid stack type R3 off=-513 access_size=8",
2824 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2825 },
2826 {
2827 "raw_stack: skb_load_bytes, invalid access 2",
2828 .insns = {
2829 BPF_MOV64_IMM(BPF_REG_2, 4),
2830 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2831 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2832 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2833 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002834 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2835 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002836 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2837 BPF_EXIT_INSN(),
2838 },
2839 .result = REJECT,
2840 .errstr = "invalid stack type R3 off=-1 access_size=8",
2841 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2842 },
2843 {
2844 "raw_stack: skb_load_bytes, invalid access 3",
2845 .insns = {
2846 BPF_MOV64_IMM(BPF_REG_2, 4),
2847 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2848 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2849 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2850 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002851 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2852 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002853 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2854 BPF_EXIT_INSN(),
2855 },
2856 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002857 .errstr = "R4 min value is negative",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002858 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2859 },
2860 {
2861 "raw_stack: skb_load_bytes, invalid access 4",
2862 .insns = {
2863 BPF_MOV64_IMM(BPF_REG_2, 4),
2864 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2865 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2866 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2867 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002868 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2869 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002870 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2871 BPF_EXIT_INSN(),
2872 },
2873 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002874 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002875 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2876 },
2877 {
2878 "raw_stack: skb_load_bytes, invalid access 5",
2879 .insns = {
2880 BPF_MOV64_IMM(BPF_REG_2, 4),
2881 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2882 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2883 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2884 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002885 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2886 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002887 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2888 BPF_EXIT_INSN(),
2889 },
2890 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002891 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002892 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2893 },
2894 {
2895 "raw_stack: skb_load_bytes, invalid access 6",
2896 .insns = {
2897 BPF_MOV64_IMM(BPF_REG_2, 4),
2898 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2899 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2900 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2901 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002902 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2903 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002904 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2905 BPF_EXIT_INSN(),
2906 },
2907 .result = REJECT,
2908 .errstr = "invalid stack type R3 off=-512 access_size=0",
2909 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2910 },
2911 {
2912 "raw_stack: skb_load_bytes, large access",
2913 .insns = {
2914 BPF_MOV64_IMM(BPF_REG_2, 4),
2915 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2916 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2917 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2918 BPF_MOV64_IMM(BPF_REG_4, 512),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002919 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2920 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002921 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2922 BPF_EXIT_INSN(),
2923 },
2924 .result = ACCEPT,
2925 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2926 },
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002927 {
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01002928 "context stores via ST",
2929 .insns = {
2930 BPF_MOV64_IMM(BPF_REG_0, 0),
2931 BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
2932 BPF_EXIT_INSN(),
2933 },
2934 .errstr = "BPF_ST stores into R1 context is not allowed",
2935 .result = REJECT,
2936 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2937 },
2938 {
2939 "context stores via XADD",
2940 .insns = {
2941 BPF_MOV64_IMM(BPF_REG_0, 0),
2942 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
2943 BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
2944 BPF_EXIT_INSN(),
2945 },
2946 .errstr = "BPF_XADD stores into R1 context is not allowed",
2947 .result = REJECT,
2948 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2949 },
2950 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002951 "direct packet access: test1",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002952 .insns = {
2953 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2954 offsetof(struct __sk_buff, data)),
2955 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2956 offsetof(struct __sk_buff, data_end)),
2957 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2958 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2959 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2960 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2961 BPF_MOV64_IMM(BPF_REG_0, 0),
2962 BPF_EXIT_INSN(),
2963 },
2964 .result = ACCEPT,
2965 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2966 },
2967 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002968 "direct packet access: test2",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002969 .insns = {
2970 BPF_MOV64_IMM(BPF_REG_0, 1),
2971 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2972 offsetof(struct __sk_buff, data_end)),
2973 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2974 offsetof(struct __sk_buff, data)),
2975 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2976 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2977 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2978 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2979 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2980 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2981 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2982 offsetof(struct __sk_buff, data)),
2983 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08002984 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2985 offsetof(struct __sk_buff, len)),
Edward Cree1f9ab382017-08-07 15:29:11 +01002986 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
2987 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002988 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2989 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2990 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2991 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2992 offsetof(struct __sk_buff, data_end)),
2993 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2994 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2995 BPF_MOV64_IMM(BPF_REG_0, 0),
2996 BPF_EXIT_INSN(),
2997 },
2998 .result = ACCEPT,
2999 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3000 },
3001 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003002 "direct packet access: test3",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003003 .insns = {
3004 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3005 offsetof(struct __sk_buff, data)),
3006 BPF_MOV64_IMM(BPF_REG_0, 0),
3007 BPF_EXIT_INSN(),
3008 },
3009 .errstr = "invalid bpf_context access off=76",
3010 .result = REJECT,
3011 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3012 },
3013 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003014 "direct packet access: test4 (write)",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003015 .insns = {
3016 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3017 offsetof(struct __sk_buff, data)),
3018 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3019 offsetof(struct __sk_buff, data_end)),
3020 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3021 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3022 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3023 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3024 BPF_MOV64_IMM(BPF_REG_0, 0),
3025 BPF_EXIT_INSN(),
3026 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003027 .result = ACCEPT,
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003028 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3029 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003030 {
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02003031 "direct packet access: test5 (pkt_end >= reg, good access)",
3032 .insns = {
3033 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3034 offsetof(struct __sk_buff, data)),
3035 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3036 offsetof(struct __sk_buff, data_end)),
3037 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3038 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3039 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3040 BPF_MOV64_IMM(BPF_REG_0, 1),
3041 BPF_EXIT_INSN(),
3042 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3043 BPF_MOV64_IMM(BPF_REG_0, 0),
3044 BPF_EXIT_INSN(),
3045 },
3046 .result = ACCEPT,
3047 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3048 },
3049 {
3050 "direct packet access: test6 (pkt_end >= reg, bad access)",
3051 .insns = {
3052 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3053 offsetof(struct __sk_buff, data)),
3054 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3055 offsetof(struct __sk_buff, data_end)),
3056 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3057 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3058 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3059 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3060 BPF_MOV64_IMM(BPF_REG_0, 1),
3061 BPF_EXIT_INSN(),
3062 BPF_MOV64_IMM(BPF_REG_0, 0),
3063 BPF_EXIT_INSN(),
3064 },
3065 .errstr = "invalid access to packet",
3066 .result = REJECT,
3067 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3068 },
3069 {
3070 "direct packet access: test7 (pkt_end >= reg, both accesses)",
3071 .insns = {
3072 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3073 offsetof(struct __sk_buff, data)),
3074 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3075 offsetof(struct __sk_buff, data_end)),
3076 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3077 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3078 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3079 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3080 BPF_MOV64_IMM(BPF_REG_0, 1),
3081 BPF_EXIT_INSN(),
3082 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3083 BPF_MOV64_IMM(BPF_REG_0, 0),
3084 BPF_EXIT_INSN(),
3085 },
3086 .errstr = "invalid access to packet",
3087 .result = REJECT,
3088 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3089 },
3090 {
3091 "direct packet access: test8 (double test, variant 1)",
3092 .insns = {
3093 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3094 offsetof(struct __sk_buff, data)),
3095 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3096 offsetof(struct __sk_buff, data_end)),
3097 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3098 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3099 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3100 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3101 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3102 BPF_MOV64_IMM(BPF_REG_0, 1),
3103 BPF_EXIT_INSN(),
3104 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3105 BPF_MOV64_IMM(BPF_REG_0, 0),
3106 BPF_EXIT_INSN(),
3107 },
3108 .result = ACCEPT,
3109 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3110 },
3111 {
3112 "direct packet access: test9 (double test, variant 2)",
3113 .insns = {
3114 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3115 offsetof(struct __sk_buff, data)),
3116 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3117 offsetof(struct __sk_buff, data_end)),
3118 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3120 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3121 BPF_MOV64_IMM(BPF_REG_0, 1),
3122 BPF_EXIT_INSN(),
3123 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3124 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3125 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3126 BPF_MOV64_IMM(BPF_REG_0, 0),
3127 BPF_EXIT_INSN(),
3128 },
3129 .result = ACCEPT,
3130 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3131 },
3132 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003133 "direct packet access: test10 (write invalid)",
3134 .insns = {
3135 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3136 offsetof(struct __sk_buff, data)),
3137 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3138 offsetof(struct __sk_buff, data_end)),
3139 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3140 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3141 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3142 BPF_MOV64_IMM(BPF_REG_0, 0),
3143 BPF_EXIT_INSN(),
3144 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3145 BPF_MOV64_IMM(BPF_REG_0, 0),
3146 BPF_EXIT_INSN(),
3147 },
3148 .errstr = "invalid access to packet",
3149 .result = REJECT,
3150 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3151 },
3152 {
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003153 "direct packet access: test11 (shift, good access)",
3154 .insns = {
3155 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3156 offsetof(struct __sk_buff, data)),
3157 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3158 offsetof(struct __sk_buff, data_end)),
3159 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3160 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3161 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3162 BPF_MOV64_IMM(BPF_REG_3, 144),
3163 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3164 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3165 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3166 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3167 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3168 BPF_MOV64_IMM(BPF_REG_0, 1),
3169 BPF_EXIT_INSN(),
3170 BPF_MOV64_IMM(BPF_REG_0, 0),
3171 BPF_EXIT_INSN(),
3172 },
3173 .result = ACCEPT,
3174 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003175 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003176 },
3177 {
3178 "direct packet access: test12 (and, good access)",
3179 .insns = {
3180 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3181 offsetof(struct __sk_buff, data)),
3182 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3183 offsetof(struct __sk_buff, data_end)),
3184 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3185 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3186 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3187 BPF_MOV64_IMM(BPF_REG_3, 144),
3188 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3189 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3190 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3191 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3192 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3193 BPF_MOV64_IMM(BPF_REG_0, 1),
3194 BPF_EXIT_INSN(),
3195 BPF_MOV64_IMM(BPF_REG_0, 0),
3196 BPF_EXIT_INSN(),
3197 },
3198 .result = ACCEPT,
3199 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003200 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003201 },
3202 {
3203 "direct packet access: test13 (branches, good access)",
3204 .insns = {
3205 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3206 offsetof(struct __sk_buff, data)),
3207 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3208 offsetof(struct __sk_buff, data_end)),
3209 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3211 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3212 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3213 offsetof(struct __sk_buff, mark)),
3214 BPF_MOV64_IMM(BPF_REG_4, 1),
3215 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3216 BPF_MOV64_IMM(BPF_REG_3, 14),
3217 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3218 BPF_MOV64_IMM(BPF_REG_3, 24),
3219 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3221 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3222 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3223 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3224 BPF_MOV64_IMM(BPF_REG_0, 1),
3225 BPF_EXIT_INSN(),
3226 BPF_MOV64_IMM(BPF_REG_0, 0),
3227 BPF_EXIT_INSN(),
3228 },
3229 .result = ACCEPT,
3230 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003231 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003232 },
3233 {
William Tu63dfef72017-02-04 08:37:29 -08003234 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3235 .insns = {
3236 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3237 offsetof(struct __sk_buff, data)),
3238 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3239 offsetof(struct __sk_buff, data_end)),
3240 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3241 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3242 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3243 BPF_MOV64_IMM(BPF_REG_5, 12),
3244 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3245 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3246 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3247 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3248 BPF_MOV64_IMM(BPF_REG_0, 1),
3249 BPF_EXIT_INSN(),
3250 BPF_MOV64_IMM(BPF_REG_0, 0),
3251 BPF_EXIT_INSN(),
3252 },
3253 .result = ACCEPT,
3254 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003255 .retval = 1,
William Tu63dfef72017-02-04 08:37:29 -08003256 },
3257 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003258 "direct packet access: test15 (spill with xadd)",
3259 .insns = {
3260 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3261 offsetof(struct __sk_buff, data)),
3262 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3263 offsetof(struct __sk_buff, data_end)),
3264 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3265 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3266 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3267 BPF_MOV64_IMM(BPF_REG_5, 4096),
3268 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3269 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3270 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3271 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3272 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3273 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3274 BPF_MOV64_IMM(BPF_REG_0, 0),
3275 BPF_EXIT_INSN(),
3276 },
3277 .errstr = "R2 invalid mem access 'inv'",
3278 .result = REJECT,
3279 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3280 },
3281 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02003282 "direct packet access: test16 (arith on data_end)",
3283 .insns = {
3284 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3285 offsetof(struct __sk_buff, data)),
3286 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3287 offsetof(struct __sk_buff, data_end)),
3288 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3289 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3290 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3291 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3292 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3293 BPF_MOV64_IMM(BPF_REG_0, 0),
3294 BPF_EXIT_INSN(),
3295 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08003296 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmann728a8532017-04-27 01:39:32 +02003297 .result = REJECT,
3298 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3299 },
3300 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003301 "direct packet access: test17 (pruning, alignment)",
3302 .insns = {
3303 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3304 offsetof(struct __sk_buff, data)),
3305 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3306 offsetof(struct __sk_buff, data_end)),
3307 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3308 offsetof(struct __sk_buff, mark)),
3309 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3310 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3311 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3312 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3313 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3314 BPF_MOV64_IMM(BPF_REG_0, 0),
3315 BPF_EXIT_INSN(),
3316 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3317 BPF_JMP_A(-6),
3318 },
Edward Creef65b1842017-08-07 15:27:12 +01003319 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003320 .result = REJECT,
3321 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3322 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3323 },
3324 {
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003325 "direct packet access: test18 (imm += pkt_ptr, 1)",
3326 .insns = {
3327 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3328 offsetof(struct __sk_buff, data)),
3329 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3330 offsetof(struct __sk_buff, data_end)),
3331 BPF_MOV64_IMM(BPF_REG_0, 8),
3332 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3333 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3334 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3335 BPF_MOV64_IMM(BPF_REG_0, 0),
3336 BPF_EXIT_INSN(),
3337 },
3338 .result = ACCEPT,
3339 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3340 },
3341 {
3342 "direct packet access: test19 (imm += pkt_ptr, 2)",
3343 .insns = {
3344 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3345 offsetof(struct __sk_buff, data)),
3346 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3347 offsetof(struct __sk_buff, data_end)),
3348 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3349 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3350 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3351 BPF_MOV64_IMM(BPF_REG_4, 4),
3352 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3353 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3354 BPF_MOV64_IMM(BPF_REG_0, 0),
3355 BPF_EXIT_INSN(),
3356 },
3357 .result = ACCEPT,
3358 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3359 },
3360 {
3361 "direct packet access: test20 (x += pkt_ptr, 1)",
3362 .insns = {
3363 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3364 offsetof(struct __sk_buff, data)),
3365 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3366 offsetof(struct __sk_buff, data_end)),
3367 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3368 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3369 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003370 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003371 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3372 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3373 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003374 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003375 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3376 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3377 BPF_MOV64_IMM(BPF_REG_0, 0),
3378 BPF_EXIT_INSN(),
3379 },
3380 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3381 .result = ACCEPT,
3382 },
3383 {
3384 "direct packet access: test21 (x += pkt_ptr, 2)",
3385 .insns = {
3386 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3387 offsetof(struct __sk_buff, data)),
3388 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3389 offsetof(struct __sk_buff, data_end)),
3390 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3391 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3392 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3393 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3394 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3395 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003396 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003397 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3398 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003399 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003400 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3401 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3402 BPF_MOV64_IMM(BPF_REG_0, 0),
3403 BPF_EXIT_INSN(),
3404 },
3405 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3406 .result = ACCEPT,
3407 },
3408 {
3409 "direct packet access: test22 (x += pkt_ptr, 3)",
3410 .insns = {
3411 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3412 offsetof(struct __sk_buff, data)),
3413 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3414 offsetof(struct __sk_buff, data_end)),
3415 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3416 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3417 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3418 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3419 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3420 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3421 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3422 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3423 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3424 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003425 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003426 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3427 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3428 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3429 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3430 BPF_MOV64_IMM(BPF_REG_2, 1),
3431 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3432 BPF_MOV64_IMM(BPF_REG_0, 0),
3433 BPF_EXIT_INSN(),
3434 },
3435 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3436 .result = ACCEPT,
3437 },
3438 {
3439 "direct packet access: test23 (x += pkt_ptr, 4)",
3440 .insns = {
3441 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3442 offsetof(struct __sk_buff, data)),
3443 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3444 offsetof(struct __sk_buff, data_end)),
3445 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3446 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3447 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3448 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3449 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3450 BPF_MOV64_IMM(BPF_REG_0, 31),
3451 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3452 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3453 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3454 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3455 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3456 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3457 BPF_MOV64_IMM(BPF_REG_0, 0),
3458 BPF_EXIT_INSN(),
3459 },
3460 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3461 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003462 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003463 },
3464 {
3465 "direct packet access: test24 (x += pkt_ptr, 5)",
3466 .insns = {
3467 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3468 offsetof(struct __sk_buff, data)),
3469 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3470 offsetof(struct __sk_buff, data_end)),
3471 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3472 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3473 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3474 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3475 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3476 BPF_MOV64_IMM(BPF_REG_0, 64),
3477 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3478 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3479 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
Edward Cree1f9ab382017-08-07 15:29:11 +01003480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003481 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3482 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3483 BPF_MOV64_IMM(BPF_REG_0, 0),
3484 BPF_EXIT_INSN(),
3485 },
3486 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3487 .result = ACCEPT,
3488 },
3489 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02003490 "direct packet access: test25 (marking on <, good access)",
3491 .insns = {
3492 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3493 offsetof(struct __sk_buff, data)),
3494 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3495 offsetof(struct __sk_buff, data_end)),
3496 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3497 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3498 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3499 BPF_MOV64_IMM(BPF_REG_0, 0),
3500 BPF_EXIT_INSN(),
3501 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3502 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3503 },
3504 .result = ACCEPT,
3505 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3506 },
3507 {
3508 "direct packet access: test26 (marking on <, bad access)",
3509 .insns = {
3510 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3511 offsetof(struct __sk_buff, data)),
3512 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3513 offsetof(struct __sk_buff, data_end)),
3514 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3515 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3516 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
3517 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3518 BPF_MOV64_IMM(BPF_REG_0, 0),
3519 BPF_EXIT_INSN(),
3520 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
3521 },
3522 .result = REJECT,
3523 .errstr = "invalid access to packet",
3524 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3525 },
3526 {
3527 "direct packet access: test27 (marking on <=, good access)",
3528 .insns = {
3529 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3530 offsetof(struct __sk_buff, data)),
3531 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3532 offsetof(struct __sk_buff, data_end)),
3533 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3534 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3535 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
3536 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3537 BPF_MOV64_IMM(BPF_REG_0, 1),
3538 BPF_EXIT_INSN(),
3539 },
3540 .result = ACCEPT,
3541 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003542 .retval = 1,
Daniel Borkmann31e482b2017-08-10 01:40:03 +02003543 },
3544 {
3545 "direct packet access: test28 (marking on <=, bad access)",
3546 .insns = {
3547 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3548 offsetof(struct __sk_buff, data)),
3549 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3550 offsetof(struct __sk_buff, data_end)),
3551 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3552 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3553 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
3554 BPF_MOV64_IMM(BPF_REG_0, 1),
3555 BPF_EXIT_INSN(),
3556 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3557 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3558 },
3559 .result = REJECT,
3560 .errstr = "invalid access to packet",
3561 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3562 },
3563 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003564 "helper access to packet: test1, valid packet_ptr range",
3565 .insns = {
3566 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3567 offsetof(struct xdp_md, data)),
3568 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3569 offsetof(struct xdp_md, data_end)),
3570 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3571 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3572 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3573 BPF_LD_MAP_FD(BPF_REG_1, 0),
3574 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3575 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003576 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3577 BPF_FUNC_map_update_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003578 BPF_MOV64_IMM(BPF_REG_0, 0),
3579 BPF_EXIT_INSN(),
3580 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003581 .fixup_map1 = { 5 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003582 .result_unpriv = ACCEPT,
3583 .result = ACCEPT,
3584 .prog_type = BPF_PROG_TYPE_XDP,
3585 },
3586 {
3587 "helper access to packet: test2, unchecked packet_ptr",
3588 .insns = {
3589 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3590 offsetof(struct xdp_md, data)),
3591 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003592 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3593 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003594 BPF_MOV64_IMM(BPF_REG_0, 0),
3595 BPF_EXIT_INSN(),
3596 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003597 .fixup_map1 = { 1 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003598 .result = REJECT,
3599 .errstr = "invalid access to packet",
3600 .prog_type = BPF_PROG_TYPE_XDP,
3601 },
3602 {
3603 "helper access to packet: test3, variable add",
3604 .insns = {
3605 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3606 offsetof(struct xdp_md, data)),
3607 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3608 offsetof(struct xdp_md, data_end)),
3609 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3610 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3611 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3612 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3613 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3614 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3615 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3617 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3618 BPF_LD_MAP_FD(BPF_REG_1, 0),
3619 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003620 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3621 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003622 BPF_MOV64_IMM(BPF_REG_0, 0),
3623 BPF_EXIT_INSN(),
3624 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003625 .fixup_map1 = { 11 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003626 .result = ACCEPT,
3627 .prog_type = BPF_PROG_TYPE_XDP,
3628 },
3629 {
3630 "helper access to packet: test4, packet_ptr with bad range",
3631 .insns = {
3632 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3633 offsetof(struct xdp_md, data)),
3634 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3635 offsetof(struct xdp_md, data_end)),
3636 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3637 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3638 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3639 BPF_MOV64_IMM(BPF_REG_0, 0),
3640 BPF_EXIT_INSN(),
3641 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003642 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3643 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003644 BPF_MOV64_IMM(BPF_REG_0, 0),
3645 BPF_EXIT_INSN(),
3646 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003647 .fixup_map1 = { 7 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003648 .result = REJECT,
3649 .errstr = "invalid access to packet",
3650 .prog_type = BPF_PROG_TYPE_XDP,
3651 },
3652 {
3653 "helper access to packet: test5, packet_ptr with too short range",
3654 .insns = {
3655 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3656 offsetof(struct xdp_md, data)),
3657 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3658 offsetof(struct xdp_md, data_end)),
3659 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3660 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3661 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3662 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3663 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003664 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3665 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003666 BPF_MOV64_IMM(BPF_REG_0, 0),
3667 BPF_EXIT_INSN(),
3668 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003669 .fixup_map1 = { 6 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003670 .result = REJECT,
3671 .errstr = "invalid access to packet",
3672 .prog_type = BPF_PROG_TYPE_XDP,
3673 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003674 {
3675 "helper access to packet: test6, cls valid packet_ptr range",
3676 .insns = {
3677 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3678 offsetof(struct __sk_buff, data)),
3679 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3680 offsetof(struct __sk_buff, data_end)),
3681 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3682 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3683 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3684 BPF_LD_MAP_FD(BPF_REG_1, 0),
3685 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3686 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003687 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3688 BPF_FUNC_map_update_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003689 BPF_MOV64_IMM(BPF_REG_0, 0),
3690 BPF_EXIT_INSN(),
3691 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003692 .fixup_map1 = { 5 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003693 .result = ACCEPT,
3694 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3695 },
3696 {
3697 "helper access to packet: test7, cls unchecked packet_ptr",
3698 .insns = {
3699 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3700 offsetof(struct __sk_buff, data)),
3701 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003702 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3703 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003704 BPF_MOV64_IMM(BPF_REG_0, 0),
3705 BPF_EXIT_INSN(),
3706 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003707 .fixup_map1 = { 1 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003708 .result = REJECT,
3709 .errstr = "invalid access to packet",
3710 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3711 },
3712 {
3713 "helper access to packet: test8, cls variable add",
3714 .insns = {
3715 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3716 offsetof(struct __sk_buff, data)),
3717 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3718 offsetof(struct __sk_buff, data_end)),
3719 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3720 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3721 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3722 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3723 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3724 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3725 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3726 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3727 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3728 BPF_LD_MAP_FD(BPF_REG_1, 0),
3729 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003730 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3731 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003732 BPF_MOV64_IMM(BPF_REG_0, 0),
3733 BPF_EXIT_INSN(),
3734 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003735 .fixup_map1 = { 11 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003736 .result = ACCEPT,
3737 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3738 },
3739 {
3740 "helper access to packet: test9, cls packet_ptr with bad range",
3741 .insns = {
3742 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3743 offsetof(struct __sk_buff, data)),
3744 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3745 offsetof(struct __sk_buff, data_end)),
3746 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3747 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3748 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3749 BPF_MOV64_IMM(BPF_REG_0, 0),
3750 BPF_EXIT_INSN(),
3751 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003752 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3753 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003754 BPF_MOV64_IMM(BPF_REG_0, 0),
3755 BPF_EXIT_INSN(),
3756 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003757 .fixup_map1 = { 7 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003758 .result = REJECT,
3759 .errstr = "invalid access to packet",
3760 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3761 },
3762 {
3763 "helper access to packet: test10, cls packet_ptr with too short range",
3764 .insns = {
3765 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3766 offsetof(struct __sk_buff, data)),
3767 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3768 offsetof(struct __sk_buff, data_end)),
3769 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3770 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3771 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3772 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3773 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003774 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3775 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003776 BPF_MOV64_IMM(BPF_REG_0, 0),
3777 BPF_EXIT_INSN(),
3778 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003779 .fixup_map1 = { 6 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003780 .result = REJECT,
3781 .errstr = "invalid access to packet",
3782 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3783 },
3784 {
3785 "helper access to packet: test11, cls unsuitable helper 1",
3786 .insns = {
3787 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3788 offsetof(struct __sk_buff, data)),
3789 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3790 offsetof(struct __sk_buff, data_end)),
3791 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3792 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
3794 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
3795 BPF_MOV64_IMM(BPF_REG_2, 0),
3796 BPF_MOV64_IMM(BPF_REG_4, 42),
3797 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003798 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3799 BPF_FUNC_skb_store_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003800 BPF_MOV64_IMM(BPF_REG_0, 0),
3801 BPF_EXIT_INSN(),
3802 },
3803 .result = REJECT,
3804 .errstr = "helper access to the packet",
3805 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3806 },
3807 {
3808 "helper access to packet: test12, cls unsuitable helper 2",
3809 .insns = {
3810 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3811 offsetof(struct __sk_buff, data)),
3812 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3813 offsetof(struct __sk_buff, data_end)),
3814 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
3816 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
3817 BPF_MOV64_IMM(BPF_REG_2, 0),
3818 BPF_MOV64_IMM(BPF_REG_4, 4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003819 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3820 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003821 BPF_MOV64_IMM(BPF_REG_0, 0),
3822 BPF_EXIT_INSN(),
3823 },
3824 .result = REJECT,
3825 .errstr = "helper access to the packet",
3826 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3827 },
3828 {
3829 "helper access to packet: test13, cls helper ok",
3830 .insns = {
3831 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3832 offsetof(struct __sk_buff, data)),
3833 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3834 offsetof(struct __sk_buff, data_end)),
3835 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3836 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3837 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3838 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3839 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3840 BPF_MOV64_IMM(BPF_REG_2, 4),
3841 BPF_MOV64_IMM(BPF_REG_3, 0),
3842 BPF_MOV64_IMM(BPF_REG_4, 0),
3843 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003844 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3845 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003846 BPF_MOV64_IMM(BPF_REG_0, 0),
3847 BPF_EXIT_INSN(),
3848 },
3849 .result = ACCEPT,
3850 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3851 },
3852 {
Edward Creef65b1842017-08-07 15:27:12 +01003853 "helper access to packet: test14, cls helper ok sub",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003854 .insns = {
3855 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3856 offsetof(struct __sk_buff, data)),
3857 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3858 offsetof(struct __sk_buff, data_end)),
3859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3860 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3861 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3862 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3863 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
3864 BPF_MOV64_IMM(BPF_REG_2, 4),
3865 BPF_MOV64_IMM(BPF_REG_3, 0),
3866 BPF_MOV64_IMM(BPF_REG_4, 0),
3867 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003868 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3869 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003870 BPF_MOV64_IMM(BPF_REG_0, 0),
3871 BPF_EXIT_INSN(),
3872 },
Edward Creef65b1842017-08-07 15:27:12 +01003873 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003874 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3875 },
3876 {
Edward Creef65b1842017-08-07 15:27:12 +01003877 "helper access to packet: test15, cls helper fail sub",
3878 .insns = {
3879 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3880 offsetof(struct __sk_buff, data)),
3881 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3882 offsetof(struct __sk_buff, data_end)),
3883 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3884 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3885 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3886 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3887 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
3888 BPF_MOV64_IMM(BPF_REG_2, 4),
3889 BPF_MOV64_IMM(BPF_REG_3, 0),
3890 BPF_MOV64_IMM(BPF_REG_4, 0),
3891 BPF_MOV64_IMM(BPF_REG_5, 0),
3892 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3893 BPF_FUNC_csum_diff),
3894 BPF_MOV64_IMM(BPF_REG_0, 0),
3895 BPF_EXIT_INSN(),
3896 },
3897 .result = REJECT,
3898 .errstr = "invalid access to packet",
3899 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3900 },
3901 {
3902 "helper access to packet: test16, cls helper fail range 1",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003903 .insns = {
3904 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3905 offsetof(struct __sk_buff, data)),
3906 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3907 offsetof(struct __sk_buff, data_end)),
3908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3909 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3910 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3911 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3912 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3913 BPF_MOV64_IMM(BPF_REG_2, 8),
3914 BPF_MOV64_IMM(BPF_REG_3, 0),
3915 BPF_MOV64_IMM(BPF_REG_4, 0),
3916 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003917 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3918 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003919 BPF_MOV64_IMM(BPF_REG_0, 0),
3920 BPF_EXIT_INSN(),
3921 },
3922 .result = REJECT,
3923 .errstr = "invalid access to packet",
3924 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3925 },
3926 {
Edward Creef65b1842017-08-07 15:27:12 +01003927 "helper access to packet: test17, cls helper fail range 2",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003928 .insns = {
3929 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3930 offsetof(struct __sk_buff, data)),
3931 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3932 offsetof(struct __sk_buff, data_end)),
3933 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3934 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3935 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3936 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3937 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3938 BPF_MOV64_IMM(BPF_REG_2, -9),
3939 BPF_MOV64_IMM(BPF_REG_3, 0),
3940 BPF_MOV64_IMM(BPF_REG_4, 0),
3941 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003942 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3943 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003944 BPF_MOV64_IMM(BPF_REG_0, 0),
3945 BPF_EXIT_INSN(),
3946 },
3947 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003948 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003949 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3950 },
3951 {
Edward Creef65b1842017-08-07 15:27:12 +01003952 "helper access to packet: test18, cls helper fail range 3",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003953 .insns = {
3954 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3955 offsetof(struct __sk_buff, data)),
3956 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3957 offsetof(struct __sk_buff, data_end)),
3958 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3959 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3960 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3961 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3962 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3963 BPF_MOV64_IMM(BPF_REG_2, ~0),
3964 BPF_MOV64_IMM(BPF_REG_3, 0),
3965 BPF_MOV64_IMM(BPF_REG_4, 0),
3966 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003967 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3968 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003969 BPF_MOV64_IMM(BPF_REG_0, 0),
3970 BPF_EXIT_INSN(),
3971 },
3972 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003973 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003974 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3975 },
3976 {
Yonghong Songb6ff6392017-11-12 14:49:11 -08003977 "helper access to packet: test19, cls helper range zero",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003978 .insns = {
3979 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3980 offsetof(struct __sk_buff, data)),
3981 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3982 offsetof(struct __sk_buff, data_end)),
3983 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3984 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3985 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3986 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3987 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3988 BPF_MOV64_IMM(BPF_REG_2, 0),
3989 BPF_MOV64_IMM(BPF_REG_3, 0),
3990 BPF_MOV64_IMM(BPF_REG_4, 0),
3991 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003992 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3993 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003994 BPF_MOV64_IMM(BPF_REG_0, 0),
3995 BPF_EXIT_INSN(),
3996 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08003997 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003998 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3999 },
4000 {
Edward Creef65b1842017-08-07 15:27:12 +01004001 "helper access to packet: test20, pkt end as input",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004002 .insns = {
4003 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4004 offsetof(struct __sk_buff, data)),
4005 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4006 offsetof(struct __sk_buff, data_end)),
4007 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4008 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4009 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4010 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4011 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4012 BPF_MOV64_IMM(BPF_REG_2, 4),
4013 BPF_MOV64_IMM(BPF_REG_3, 0),
4014 BPF_MOV64_IMM(BPF_REG_4, 0),
4015 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004016 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4017 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004018 BPF_MOV64_IMM(BPF_REG_0, 0),
4019 BPF_EXIT_INSN(),
4020 },
4021 .result = REJECT,
4022 .errstr = "R1 type=pkt_end expected=fp",
4023 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4024 },
4025 {
Edward Creef65b1842017-08-07 15:27:12 +01004026 "helper access to packet: test21, wrong reg",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004027 .insns = {
4028 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4029 offsetof(struct __sk_buff, data)),
4030 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4031 offsetof(struct __sk_buff, data_end)),
4032 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4033 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4034 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4035 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4036 BPF_MOV64_IMM(BPF_REG_2, 4),
4037 BPF_MOV64_IMM(BPF_REG_3, 0),
4038 BPF_MOV64_IMM(BPF_REG_4, 0),
4039 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004040 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4041 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004042 BPF_MOV64_IMM(BPF_REG_0, 0),
4043 BPF_EXIT_INSN(),
4044 },
4045 .result = REJECT,
4046 .errstr = "invalid access to packet",
4047 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4048 },
Josef Bacik48461132016-09-28 10:54:32 -04004049 {
4050 "valid map access into an array with a constant",
4051 .insns = {
4052 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4053 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4054 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4055 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004056 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4057 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004058 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004059 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4060 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004061 BPF_EXIT_INSN(),
4062 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004063 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004064 .errstr_unpriv = "R0 leaks addr",
4065 .result_unpriv = REJECT,
4066 .result = ACCEPT,
4067 },
4068 {
4069 "valid map access into an array with a register",
4070 .insns = {
4071 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4072 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4073 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4074 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004075 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4076 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004077 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4078 BPF_MOV64_IMM(BPF_REG_1, 4),
4079 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4080 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004081 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4082 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004083 BPF_EXIT_INSN(),
4084 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004085 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004086 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004087 .result_unpriv = REJECT,
4088 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004089 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004090 },
4091 {
4092 "valid map access into an array with a variable",
4093 .insns = {
4094 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4095 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4096 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4097 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004098 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4099 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004100 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4101 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4102 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4103 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4104 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004105 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4106 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004107 BPF_EXIT_INSN(),
4108 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004109 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004110 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004111 .result_unpriv = REJECT,
4112 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004113 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004114 },
4115 {
4116 "valid map access into an array with a signed variable",
4117 .insns = {
4118 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4119 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4120 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4121 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004122 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4123 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004124 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4125 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4126 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4127 BPF_MOV32_IMM(BPF_REG_1, 0),
4128 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4129 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4130 BPF_MOV32_IMM(BPF_REG_1, 0),
4131 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4132 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004133 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4134 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004135 BPF_EXIT_INSN(),
4136 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004137 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004138 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004139 .result_unpriv = REJECT,
4140 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004141 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004142 },
4143 {
4144 "invalid map access into an array with a constant",
4145 .insns = {
4146 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4147 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4148 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4149 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004150 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4151 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004152 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4153 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4154 offsetof(struct test_val, foo)),
4155 BPF_EXIT_INSN(),
4156 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004157 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004158 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
4159 .result = REJECT,
4160 },
4161 {
4162 "invalid map access into an array with a register",
4163 .insns = {
4164 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4165 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4166 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4167 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004168 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4169 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004170 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4171 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4172 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4173 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004174 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4175 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004176 BPF_EXIT_INSN(),
4177 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004178 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004179 .errstr = "R0 min value is outside of the array range",
4180 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004181 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004182 },
4183 {
4184 "invalid map access into an array with a variable",
4185 .insns = {
4186 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4187 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4189 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004190 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4191 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004192 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4193 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4194 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4195 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004196 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4197 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004198 BPF_EXIT_INSN(),
4199 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004200 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004201 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
Josef Bacik48461132016-09-28 10:54:32 -04004202 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004203 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004204 },
4205 {
4206 "invalid map access into an array with no floor check",
4207 .insns = {
4208 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4209 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4211 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004212 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4213 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004214 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
Edward Creef65b1842017-08-07 15:27:12 +01004215 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik48461132016-09-28 10:54:32 -04004216 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4217 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4218 BPF_MOV32_IMM(BPF_REG_1, 0),
4219 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4220 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004221 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4222 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004223 BPF_EXIT_INSN(),
4224 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004225 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004226 .errstr_unpriv = "R0 leaks addr",
4227 .errstr = "R0 unbounded memory access",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004228 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04004229 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004230 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004231 },
4232 {
4233 "invalid map access into an array with a invalid max check",
4234 .insns = {
4235 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4236 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4238 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004239 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4240 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004241 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4242 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4243 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4244 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4245 BPF_MOV32_IMM(BPF_REG_1, 0),
4246 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4247 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004248 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4249 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004250 BPF_EXIT_INSN(),
4251 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004252 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004253 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004254 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004255 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04004256 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004257 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004258 },
4259 {
4260 "invalid map access into an array with a invalid max check",
4261 .insns = {
4262 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4263 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4265 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004266 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4267 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004268 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4269 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4270 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4273 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004274 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4275 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004276 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4277 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004278 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4279 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004280 BPF_EXIT_INSN(),
4281 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004282 .fixup_map2 = { 3, 11 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004283 .errstr = "R0 pointer += pointer",
Josef Bacik48461132016-09-28 10:54:32 -04004284 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004285 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004286 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02004287 {
4288 "multiple registers share map_lookup_elem result",
4289 .insns = {
4290 BPF_MOV64_IMM(BPF_REG_1, 10),
4291 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4292 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4294 BPF_LD_MAP_FD(BPF_REG_1, 0),
4295 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4296 BPF_FUNC_map_lookup_elem),
4297 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4298 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4299 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4300 BPF_EXIT_INSN(),
4301 },
4302 .fixup_map1 = { 4 },
4303 .result = ACCEPT,
4304 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4305 },
4306 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004307 "alu ops on ptr_to_map_value_or_null, 1",
4308 .insns = {
4309 BPF_MOV64_IMM(BPF_REG_1, 10),
4310 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4311 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4312 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4313 BPF_LD_MAP_FD(BPF_REG_1, 0),
4314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4315 BPF_FUNC_map_lookup_elem),
4316 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4317 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
4318 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
4319 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4320 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4321 BPF_EXIT_INSN(),
4322 },
4323 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004324 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004325 .result = REJECT,
4326 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4327 },
4328 {
4329 "alu ops on ptr_to_map_value_or_null, 2",
4330 .insns = {
4331 BPF_MOV64_IMM(BPF_REG_1, 10),
4332 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4333 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4335 BPF_LD_MAP_FD(BPF_REG_1, 0),
4336 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4337 BPF_FUNC_map_lookup_elem),
4338 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4339 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
4340 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4341 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4342 BPF_EXIT_INSN(),
4343 },
4344 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004345 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004346 .result = REJECT,
4347 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4348 },
4349 {
4350 "alu ops on ptr_to_map_value_or_null, 3",
4351 .insns = {
4352 BPF_MOV64_IMM(BPF_REG_1, 10),
4353 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4354 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4356 BPF_LD_MAP_FD(BPF_REG_1, 0),
4357 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4358 BPF_FUNC_map_lookup_elem),
4359 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4360 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
4361 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4362 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4363 BPF_EXIT_INSN(),
4364 },
4365 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004366 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004367 .result = REJECT,
4368 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4369 },
4370 {
Thomas Graf57a09bf2016-10-18 19:51:19 +02004371 "invalid memory access with multiple map_lookup_elem calls",
4372 .insns = {
4373 BPF_MOV64_IMM(BPF_REG_1, 10),
4374 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4375 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4376 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4377 BPF_LD_MAP_FD(BPF_REG_1, 0),
4378 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4379 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4380 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4381 BPF_FUNC_map_lookup_elem),
4382 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4383 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4384 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4385 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4386 BPF_FUNC_map_lookup_elem),
4387 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4388 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4389 BPF_EXIT_INSN(),
4390 },
4391 .fixup_map1 = { 4 },
4392 .result = REJECT,
4393 .errstr = "R4 !read_ok",
4394 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4395 },
4396 {
4397 "valid indirect map_lookup_elem access with 2nd lookup in branch",
4398 .insns = {
4399 BPF_MOV64_IMM(BPF_REG_1, 10),
4400 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4401 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4403 BPF_LD_MAP_FD(BPF_REG_1, 0),
4404 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4405 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4406 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4407 BPF_FUNC_map_lookup_elem),
4408 BPF_MOV64_IMM(BPF_REG_2, 10),
4409 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
4410 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4411 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4412 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4413 BPF_FUNC_map_lookup_elem),
4414 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4415 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4416 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4417 BPF_EXIT_INSN(),
4418 },
4419 .fixup_map1 = { 4 },
4420 .result = ACCEPT,
4421 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4422 },
Josef Bacike9548902016-11-29 12:35:19 -05004423 {
4424 "invalid map access from else condition",
4425 .insns = {
4426 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4427 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4428 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4429 BPF_LD_MAP_FD(BPF_REG_1, 0),
4430 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
4431 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4432 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4433 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
4434 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4435 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4436 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4437 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
4438 BPF_EXIT_INSN(),
4439 },
4440 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004441 .errstr = "R0 unbounded memory access",
Josef Bacike9548902016-11-29 12:35:19 -05004442 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01004443 .errstr_unpriv = "R0 leaks addr",
Josef Bacike9548902016-11-29 12:35:19 -05004444 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004445 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacike9548902016-11-29 12:35:19 -05004446 },
Gianluca Borello3c8397442016-12-03 12:31:33 -08004447 {
4448 "constant register |= constant should keep constant type",
4449 .insns = {
4450 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4451 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4452 BPF_MOV64_IMM(BPF_REG_2, 34),
4453 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
4454 BPF_MOV64_IMM(BPF_REG_3, 0),
4455 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4456 BPF_EXIT_INSN(),
4457 },
4458 .result = ACCEPT,
4459 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4460 },
4461 {
4462 "constant register |= constant should not bypass stack boundary checks",
4463 .insns = {
4464 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4465 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4466 BPF_MOV64_IMM(BPF_REG_2, 34),
4467 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
4468 BPF_MOV64_IMM(BPF_REG_3, 0),
4469 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4470 BPF_EXIT_INSN(),
4471 },
4472 .errstr = "invalid stack type R1 off=-48 access_size=58",
4473 .result = REJECT,
4474 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4475 },
4476 {
4477 "constant register |= constant register should keep constant type",
4478 .insns = {
4479 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4481 BPF_MOV64_IMM(BPF_REG_2, 34),
4482 BPF_MOV64_IMM(BPF_REG_4, 13),
4483 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4484 BPF_MOV64_IMM(BPF_REG_3, 0),
4485 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4486 BPF_EXIT_INSN(),
4487 },
4488 .result = ACCEPT,
4489 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4490 },
4491 {
4492 "constant register |= constant register should not bypass stack boundary checks",
4493 .insns = {
4494 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4495 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4496 BPF_MOV64_IMM(BPF_REG_2, 34),
4497 BPF_MOV64_IMM(BPF_REG_4, 24),
4498 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4499 BPF_MOV64_IMM(BPF_REG_3, 0),
4500 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4501 BPF_EXIT_INSN(),
4502 },
4503 .errstr = "invalid stack type R1 off=-48 access_size=58",
4504 .result = REJECT,
4505 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4506 },
Thomas Graf3f731d82016-12-05 10:30:52 +01004507 {
4508 "invalid direct packet write for LWT_IN",
4509 .insns = {
4510 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4511 offsetof(struct __sk_buff, data)),
4512 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4513 offsetof(struct __sk_buff, data_end)),
4514 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4515 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4516 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4517 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4518 BPF_MOV64_IMM(BPF_REG_0, 0),
4519 BPF_EXIT_INSN(),
4520 },
4521 .errstr = "cannot write into packet",
4522 .result = REJECT,
4523 .prog_type = BPF_PROG_TYPE_LWT_IN,
4524 },
4525 {
4526 "invalid direct packet write for LWT_OUT",
4527 .insns = {
4528 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4529 offsetof(struct __sk_buff, data)),
4530 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4531 offsetof(struct __sk_buff, data_end)),
4532 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4533 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4534 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4535 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4536 BPF_MOV64_IMM(BPF_REG_0, 0),
4537 BPF_EXIT_INSN(),
4538 },
4539 .errstr = "cannot write into packet",
4540 .result = REJECT,
4541 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4542 },
4543 {
4544 "direct packet write for LWT_XMIT",
4545 .insns = {
4546 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4547 offsetof(struct __sk_buff, data)),
4548 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4549 offsetof(struct __sk_buff, data_end)),
4550 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4551 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4552 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4553 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4554 BPF_MOV64_IMM(BPF_REG_0, 0),
4555 BPF_EXIT_INSN(),
4556 },
4557 .result = ACCEPT,
4558 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4559 },
4560 {
4561 "direct packet read for LWT_IN",
4562 .insns = {
4563 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4564 offsetof(struct __sk_buff, data)),
4565 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4566 offsetof(struct __sk_buff, data_end)),
4567 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4569 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4570 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4571 BPF_MOV64_IMM(BPF_REG_0, 0),
4572 BPF_EXIT_INSN(),
4573 },
4574 .result = ACCEPT,
4575 .prog_type = BPF_PROG_TYPE_LWT_IN,
4576 },
4577 {
4578 "direct packet read for LWT_OUT",
4579 .insns = {
4580 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4581 offsetof(struct __sk_buff, data)),
4582 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4583 offsetof(struct __sk_buff, data_end)),
4584 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4585 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4586 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4587 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4588 BPF_MOV64_IMM(BPF_REG_0, 0),
4589 BPF_EXIT_INSN(),
4590 },
4591 .result = ACCEPT,
4592 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4593 },
4594 {
4595 "direct packet read for LWT_XMIT",
4596 .insns = {
4597 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4598 offsetof(struct __sk_buff, data)),
4599 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4600 offsetof(struct __sk_buff, data_end)),
4601 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4602 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4603 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4604 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4605 BPF_MOV64_IMM(BPF_REG_0, 0),
4606 BPF_EXIT_INSN(),
4607 },
4608 .result = ACCEPT,
4609 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4610 },
4611 {
Alexei Starovoitovb1977682017-03-24 15:57:33 -07004612 "overlapping checks for direct packet access",
4613 .insns = {
4614 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4615 offsetof(struct __sk_buff, data)),
4616 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4617 offsetof(struct __sk_buff, data_end)),
4618 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4619 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4620 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
4621 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4622 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
4623 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
4624 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
4625 BPF_MOV64_IMM(BPF_REG_0, 0),
4626 BPF_EXIT_INSN(),
4627 },
4628 .result = ACCEPT,
4629 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4630 },
4631 {
Thomas Graf3f731d82016-12-05 10:30:52 +01004632 "invalid access of tc_classid for LWT_IN",
4633 .insns = {
4634 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4635 offsetof(struct __sk_buff, tc_classid)),
4636 BPF_EXIT_INSN(),
4637 },
4638 .result = REJECT,
4639 .errstr = "invalid bpf_context access",
4640 },
4641 {
4642 "invalid access of tc_classid for LWT_OUT",
4643 .insns = {
4644 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4645 offsetof(struct __sk_buff, tc_classid)),
4646 BPF_EXIT_INSN(),
4647 },
4648 .result = REJECT,
4649 .errstr = "invalid bpf_context access",
4650 },
4651 {
4652 "invalid access of tc_classid for LWT_XMIT",
4653 .insns = {
4654 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4655 offsetof(struct __sk_buff, tc_classid)),
4656 BPF_EXIT_INSN(),
4657 },
4658 .result = REJECT,
4659 .errstr = "invalid bpf_context access",
4660 },
Gianluca Borello57225692017-01-09 10:19:47 -08004661 {
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004662 "leak pointer into ctx 1",
4663 .insns = {
4664 BPF_MOV64_IMM(BPF_REG_0, 0),
4665 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4666 offsetof(struct __sk_buff, cb[0])),
4667 BPF_LD_MAP_FD(BPF_REG_2, 0),
4668 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
4669 offsetof(struct __sk_buff, cb[0])),
4670 BPF_EXIT_INSN(),
4671 },
4672 .fixup_map1 = { 2 },
4673 .errstr_unpriv = "R2 leaks addr into mem",
4674 .result_unpriv = REJECT,
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01004675 .result = REJECT,
4676 .errstr = "BPF_XADD stores into R1 context is not allowed",
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004677 },
4678 {
4679 "leak pointer into ctx 2",
4680 .insns = {
4681 BPF_MOV64_IMM(BPF_REG_0, 0),
4682 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4683 offsetof(struct __sk_buff, cb[0])),
4684 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
4685 offsetof(struct __sk_buff, cb[0])),
4686 BPF_EXIT_INSN(),
4687 },
4688 .errstr_unpriv = "R10 leaks addr into mem",
4689 .result_unpriv = REJECT,
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01004690 .result = REJECT,
4691 .errstr = "BPF_XADD stores into R1 context is not allowed",
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004692 },
4693 {
4694 "leak pointer into ctx 3",
4695 .insns = {
4696 BPF_MOV64_IMM(BPF_REG_0, 0),
4697 BPF_LD_MAP_FD(BPF_REG_2, 0),
4698 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
4699 offsetof(struct __sk_buff, cb[0])),
4700 BPF_EXIT_INSN(),
4701 },
4702 .fixup_map1 = { 1 },
4703 .errstr_unpriv = "R2 leaks addr into ctx",
4704 .result_unpriv = REJECT,
4705 .result = ACCEPT,
4706 },
4707 {
4708 "leak pointer into map val",
4709 .insns = {
4710 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4711 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4712 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4714 BPF_LD_MAP_FD(BPF_REG_1, 0),
4715 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4716 BPF_FUNC_map_lookup_elem),
4717 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4718 BPF_MOV64_IMM(BPF_REG_3, 0),
4719 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
4720 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
4721 BPF_MOV64_IMM(BPF_REG_0, 0),
4722 BPF_EXIT_INSN(),
4723 },
4724 .fixup_map1 = { 4 },
4725 .errstr_unpriv = "R6 leaks addr into mem",
4726 .result_unpriv = REJECT,
4727 .result = ACCEPT,
4728 },
4729 {
Gianluca Borello57225692017-01-09 10:19:47 -08004730 "helper access to map: full range",
4731 .insns = {
4732 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4733 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4734 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4735 BPF_LD_MAP_FD(BPF_REG_1, 0),
4736 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4737 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4738 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4739 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4740 BPF_MOV64_IMM(BPF_REG_3, 0),
4741 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4742 BPF_EXIT_INSN(),
4743 },
4744 .fixup_map2 = { 3 },
4745 .result = ACCEPT,
4746 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4747 },
4748 {
4749 "helper access to map: partial range",
4750 .insns = {
4751 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4752 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4753 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4754 BPF_LD_MAP_FD(BPF_REG_1, 0),
4755 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4756 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4757 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4758 BPF_MOV64_IMM(BPF_REG_2, 8),
4759 BPF_MOV64_IMM(BPF_REG_3, 0),
4760 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4761 BPF_EXIT_INSN(),
4762 },
4763 .fixup_map2 = { 3 },
4764 .result = ACCEPT,
4765 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4766 },
4767 {
4768 "helper access to map: empty range",
4769 .insns = {
4770 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4771 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4772 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4773 BPF_LD_MAP_FD(BPF_REG_1, 0),
4774 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4776 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4777 BPF_MOV64_IMM(BPF_REG_2, 0),
4778 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08004779 BPF_EXIT_INSN(),
4780 },
4781 .fixup_map2 = { 3 },
4782 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
4783 .result = REJECT,
4784 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4785 },
4786 {
4787 "helper access to map: out-of-bound range",
4788 .insns = {
4789 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4790 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4791 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4792 BPF_LD_MAP_FD(BPF_REG_1, 0),
4793 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4794 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4795 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4796 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
4797 BPF_MOV64_IMM(BPF_REG_3, 0),
4798 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4799 BPF_EXIT_INSN(),
4800 },
4801 .fixup_map2 = { 3 },
4802 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
4803 .result = REJECT,
4804 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4805 },
4806 {
4807 "helper access to map: negative range",
4808 .insns = {
4809 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4810 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4811 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4812 BPF_LD_MAP_FD(BPF_REG_1, 0),
4813 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4814 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4815 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4816 BPF_MOV64_IMM(BPF_REG_2, -8),
4817 BPF_MOV64_IMM(BPF_REG_3, 0),
4818 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4819 BPF_EXIT_INSN(),
4820 },
4821 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004822 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004823 .result = REJECT,
4824 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4825 },
4826 {
4827 "helper access to adjusted map (via const imm): full range",
4828 .insns = {
4829 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4830 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4831 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4832 BPF_LD_MAP_FD(BPF_REG_1, 0),
4833 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4834 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4835 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4836 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4837 offsetof(struct test_val, foo)),
4838 BPF_MOV64_IMM(BPF_REG_2,
4839 sizeof(struct test_val) -
4840 offsetof(struct test_val, foo)),
4841 BPF_MOV64_IMM(BPF_REG_3, 0),
4842 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4843 BPF_EXIT_INSN(),
4844 },
4845 .fixup_map2 = { 3 },
4846 .result = ACCEPT,
4847 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4848 },
4849 {
4850 "helper access to adjusted map (via const imm): partial range",
4851 .insns = {
4852 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4854 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4855 BPF_LD_MAP_FD(BPF_REG_1, 0),
4856 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4857 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4858 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4860 offsetof(struct test_val, foo)),
4861 BPF_MOV64_IMM(BPF_REG_2, 8),
4862 BPF_MOV64_IMM(BPF_REG_3, 0),
4863 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4864 BPF_EXIT_INSN(),
4865 },
4866 .fixup_map2 = { 3 },
4867 .result = ACCEPT,
4868 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4869 },
4870 {
4871 "helper access to adjusted map (via const imm): empty range",
4872 .insns = {
4873 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4874 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4875 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4876 BPF_LD_MAP_FD(BPF_REG_1, 0),
4877 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004878 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Gianluca Borello57225692017-01-09 10:19:47 -08004879 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4880 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4881 offsetof(struct test_val, foo)),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004882 BPF_MOV64_IMM(BPF_REG_2, 0),
4883 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08004884 BPF_EXIT_INSN(),
4885 },
4886 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004887 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
Gianluca Borello57225692017-01-09 10:19:47 -08004888 .result = REJECT,
4889 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4890 },
4891 {
4892 "helper access to adjusted map (via const imm): out-of-bound range",
4893 .insns = {
4894 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4895 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4896 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4897 BPF_LD_MAP_FD(BPF_REG_1, 0),
4898 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4899 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4900 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4901 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4902 offsetof(struct test_val, foo)),
4903 BPF_MOV64_IMM(BPF_REG_2,
4904 sizeof(struct test_val) -
4905 offsetof(struct test_val, foo) + 8),
4906 BPF_MOV64_IMM(BPF_REG_3, 0),
4907 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4908 BPF_EXIT_INSN(),
4909 },
4910 .fixup_map2 = { 3 },
4911 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4912 .result = REJECT,
4913 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4914 },
4915 {
4916 "helper access to adjusted map (via const imm): negative range (> adjustment)",
4917 .insns = {
4918 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4919 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4920 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4921 BPF_LD_MAP_FD(BPF_REG_1, 0),
4922 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4923 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4924 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4925 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4926 offsetof(struct test_val, foo)),
4927 BPF_MOV64_IMM(BPF_REG_2, -8),
4928 BPF_MOV64_IMM(BPF_REG_3, 0),
4929 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4930 BPF_EXIT_INSN(),
4931 },
4932 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004933 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004934 .result = REJECT,
4935 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4936 },
4937 {
4938 "helper access to adjusted map (via const imm): negative range (< adjustment)",
4939 .insns = {
4940 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4941 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4942 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4943 BPF_LD_MAP_FD(BPF_REG_1, 0),
4944 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4945 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4946 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4947 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4948 offsetof(struct test_val, foo)),
4949 BPF_MOV64_IMM(BPF_REG_2, -1),
4950 BPF_MOV64_IMM(BPF_REG_3, 0),
4951 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4952 BPF_EXIT_INSN(),
4953 },
4954 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004955 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004956 .result = REJECT,
4957 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4958 },
4959 {
4960 "helper access to adjusted map (via const reg): full range",
4961 .insns = {
4962 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4964 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4965 BPF_LD_MAP_FD(BPF_REG_1, 0),
4966 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4967 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4968 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4969 BPF_MOV64_IMM(BPF_REG_3,
4970 offsetof(struct test_val, foo)),
4971 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4972 BPF_MOV64_IMM(BPF_REG_2,
4973 sizeof(struct test_val) -
4974 offsetof(struct test_val, foo)),
4975 BPF_MOV64_IMM(BPF_REG_3, 0),
4976 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4977 BPF_EXIT_INSN(),
4978 },
4979 .fixup_map2 = { 3 },
4980 .result = ACCEPT,
4981 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4982 },
4983 {
4984 "helper access to adjusted map (via const reg): partial range",
4985 .insns = {
4986 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4987 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4988 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4989 BPF_LD_MAP_FD(BPF_REG_1, 0),
4990 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4991 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4992 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4993 BPF_MOV64_IMM(BPF_REG_3,
4994 offsetof(struct test_val, foo)),
4995 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4996 BPF_MOV64_IMM(BPF_REG_2, 8),
4997 BPF_MOV64_IMM(BPF_REG_3, 0),
4998 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4999 BPF_EXIT_INSN(),
5000 },
5001 .fixup_map2 = { 3 },
5002 .result = ACCEPT,
5003 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5004 },
5005 {
5006 "helper access to adjusted map (via const reg): empty range",
5007 .insns = {
5008 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5009 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5010 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5011 BPF_LD_MAP_FD(BPF_REG_1, 0),
5012 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005013 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
Gianluca Borello57225692017-01-09 10:19:47 -08005014 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5015 BPF_MOV64_IMM(BPF_REG_3, 0),
5016 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005017 BPF_MOV64_IMM(BPF_REG_2, 0),
5018 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005019 BPF_EXIT_INSN(),
5020 },
5021 .fixup_map2 = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005022 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08005023 .result = REJECT,
5024 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5025 },
5026 {
5027 "helper access to adjusted map (via const reg): out-of-bound range",
5028 .insns = {
5029 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5031 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5032 BPF_LD_MAP_FD(BPF_REG_1, 0),
5033 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5034 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5035 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5036 BPF_MOV64_IMM(BPF_REG_3,
5037 offsetof(struct test_val, foo)),
5038 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5039 BPF_MOV64_IMM(BPF_REG_2,
5040 sizeof(struct test_val) -
5041 offsetof(struct test_val, foo) + 8),
5042 BPF_MOV64_IMM(BPF_REG_3, 0),
5043 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5044 BPF_EXIT_INSN(),
5045 },
5046 .fixup_map2 = { 3 },
5047 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
5048 .result = REJECT,
5049 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5050 },
5051 {
5052 "helper access to adjusted map (via const reg): negative range (> adjustment)",
5053 .insns = {
5054 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5055 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5056 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5057 BPF_LD_MAP_FD(BPF_REG_1, 0),
5058 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5059 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5060 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5061 BPF_MOV64_IMM(BPF_REG_3,
5062 offsetof(struct test_val, foo)),
5063 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5064 BPF_MOV64_IMM(BPF_REG_2, -8),
5065 BPF_MOV64_IMM(BPF_REG_3, 0),
5066 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5067 BPF_EXIT_INSN(),
5068 },
5069 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005070 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005071 .result = REJECT,
5072 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5073 },
5074 {
5075 "helper access to adjusted map (via const reg): negative range (< adjustment)",
5076 .insns = {
5077 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5078 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5079 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5080 BPF_LD_MAP_FD(BPF_REG_1, 0),
5081 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5082 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5083 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5084 BPF_MOV64_IMM(BPF_REG_3,
5085 offsetof(struct test_val, foo)),
5086 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5087 BPF_MOV64_IMM(BPF_REG_2, -1),
5088 BPF_MOV64_IMM(BPF_REG_3, 0),
5089 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5090 BPF_EXIT_INSN(),
5091 },
5092 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005093 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005094 .result = REJECT,
5095 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5096 },
5097 {
5098 "helper access to adjusted map (via variable): full range",
5099 .insns = {
5100 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5101 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5102 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5103 BPF_LD_MAP_FD(BPF_REG_1, 0),
5104 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5105 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5106 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5107 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5108 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5109 offsetof(struct test_val, foo), 4),
5110 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5111 BPF_MOV64_IMM(BPF_REG_2,
5112 sizeof(struct test_val) -
5113 offsetof(struct test_val, foo)),
5114 BPF_MOV64_IMM(BPF_REG_3, 0),
5115 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5116 BPF_EXIT_INSN(),
5117 },
5118 .fixup_map2 = { 3 },
5119 .result = ACCEPT,
5120 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5121 },
5122 {
5123 "helper access to adjusted map (via variable): partial range",
5124 .insns = {
5125 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5126 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5127 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5128 BPF_LD_MAP_FD(BPF_REG_1, 0),
5129 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5130 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5131 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5132 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5133 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5134 offsetof(struct test_val, foo), 4),
5135 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5136 BPF_MOV64_IMM(BPF_REG_2, 8),
5137 BPF_MOV64_IMM(BPF_REG_3, 0),
5138 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5139 BPF_EXIT_INSN(),
5140 },
5141 .fixup_map2 = { 3 },
5142 .result = ACCEPT,
5143 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5144 },
5145 {
5146 "helper access to adjusted map (via variable): empty range",
5147 .insns = {
5148 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5149 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5150 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5151 BPF_LD_MAP_FD(BPF_REG_1, 0),
5152 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
Gianluca Borello57225692017-01-09 10:19:47 -08005154 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5155 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5156 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005157 offsetof(struct test_val, foo), 3),
Gianluca Borello57225692017-01-09 10:19:47 -08005158 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005159 BPF_MOV64_IMM(BPF_REG_2, 0),
5160 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005161 BPF_EXIT_INSN(),
5162 },
5163 .fixup_map2 = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005164 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08005165 .result = REJECT,
5166 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5167 },
5168 {
5169 "helper access to adjusted map (via variable): no max check",
5170 .insns = {
5171 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5173 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5174 BPF_LD_MAP_FD(BPF_REG_1, 0),
5175 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5176 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5177 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5178 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5179 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Edward Creef65b1842017-08-07 15:27:12 +01005180 BPF_MOV64_IMM(BPF_REG_2, 1),
Gianluca Borello57225692017-01-09 10:19:47 -08005181 BPF_MOV64_IMM(BPF_REG_3, 0),
5182 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5183 BPF_EXIT_INSN(),
5184 },
5185 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005186 .errstr = "R1 unbounded memory access",
Gianluca Borello57225692017-01-09 10:19:47 -08005187 .result = REJECT,
5188 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5189 },
5190 {
5191 "helper access to adjusted map (via variable): wrong max check",
5192 .insns = {
5193 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5195 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5196 BPF_LD_MAP_FD(BPF_REG_1, 0),
5197 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5198 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5199 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5200 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5201 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5202 offsetof(struct test_val, foo), 4),
5203 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5204 BPF_MOV64_IMM(BPF_REG_2,
5205 sizeof(struct test_val) -
5206 offsetof(struct test_val, foo) + 1),
5207 BPF_MOV64_IMM(BPF_REG_3, 0),
5208 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5209 BPF_EXIT_INSN(),
5210 },
5211 .fixup_map2 = { 3 },
5212 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
5213 .result = REJECT,
5214 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5215 },
Gianluca Borellof0318d02017-01-09 10:19:48 -08005216 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02005217 "helper access to map: bounds check using <, good access",
5218 .insns = {
5219 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5221 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5222 BPF_LD_MAP_FD(BPF_REG_1, 0),
5223 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5224 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5225 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5226 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5227 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
5228 BPF_MOV64_IMM(BPF_REG_0, 0),
5229 BPF_EXIT_INSN(),
5230 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5231 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5232 BPF_MOV64_IMM(BPF_REG_0, 0),
5233 BPF_EXIT_INSN(),
5234 },
5235 .fixup_map2 = { 3 },
5236 .result = ACCEPT,
5237 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5238 },
5239 {
5240 "helper access to map: bounds check using <, bad access",
5241 .insns = {
5242 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5243 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5244 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5245 BPF_LD_MAP_FD(BPF_REG_1, 0),
5246 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5247 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5248 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5249 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5250 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
5251 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5252 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5253 BPF_MOV64_IMM(BPF_REG_0, 0),
5254 BPF_EXIT_INSN(),
5255 BPF_MOV64_IMM(BPF_REG_0, 0),
5256 BPF_EXIT_INSN(),
5257 },
5258 .fixup_map2 = { 3 },
5259 .result = REJECT,
5260 .errstr = "R1 unbounded memory access",
5261 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5262 },
5263 {
5264 "helper access to map: bounds check using <=, good access",
5265 .insns = {
5266 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5267 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5268 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5269 BPF_LD_MAP_FD(BPF_REG_1, 0),
5270 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5271 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5272 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5273 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5274 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
5275 BPF_MOV64_IMM(BPF_REG_0, 0),
5276 BPF_EXIT_INSN(),
5277 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5278 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5279 BPF_MOV64_IMM(BPF_REG_0, 0),
5280 BPF_EXIT_INSN(),
5281 },
5282 .fixup_map2 = { 3 },
5283 .result = ACCEPT,
5284 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5285 },
5286 {
5287 "helper access to map: bounds check using <=, bad access",
5288 .insns = {
5289 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5290 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5291 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5292 BPF_LD_MAP_FD(BPF_REG_1, 0),
5293 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5294 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5295 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5296 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5297 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
5298 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5299 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5300 BPF_MOV64_IMM(BPF_REG_0, 0),
5301 BPF_EXIT_INSN(),
5302 BPF_MOV64_IMM(BPF_REG_0, 0),
5303 BPF_EXIT_INSN(),
5304 },
5305 .fixup_map2 = { 3 },
5306 .result = REJECT,
5307 .errstr = "R1 unbounded memory access",
5308 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5309 },
5310 {
5311 "helper access to map: bounds check using s<, good access",
5312 .insns = {
5313 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5314 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5315 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5316 BPF_LD_MAP_FD(BPF_REG_1, 0),
5317 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5318 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5319 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5320 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5321 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5322 BPF_MOV64_IMM(BPF_REG_0, 0),
5323 BPF_EXIT_INSN(),
5324 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
5325 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5326 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5327 BPF_MOV64_IMM(BPF_REG_0, 0),
5328 BPF_EXIT_INSN(),
5329 },
5330 .fixup_map2 = { 3 },
5331 .result = ACCEPT,
5332 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5333 },
5334 {
5335 "helper access to map: bounds check using s<, good access 2",
5336 .insns = {
5337 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5339 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5340 BPF_LD_MAP_FD(BPF_REG_1, 0),
5341 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5342 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5343 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5344 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5345 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5346 BPF_MOV64_IMM(BPF_REG_0, 0),
5347 BPF_EXIT_INSN(),
5348 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5349 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5350 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5351 BPF_MOV64_IMM(BPF_REG_0, 0),
5352 BPF_EXIT_INSN(),
5353 },
5354 .fixup_map2 = { 3 },
5355 .result = ACCEPT,
5356 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5357 },
5358 {
5359 "helper access to map: bounds check using s<, bad access",
5360 .insns = {
5361 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5362 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5363 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5364 BPF_LD_MAP_FD(BPF_REG_1, 0),
5365 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5366 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5367 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5368 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5369 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5370 BPF_MOV64_IMM(BPF_REG_0, 0),
5371 BPF_EXIT_INSN(),
5372 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5373 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5374 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5375 BPF_MOV64_IMM(BPF_REG_0, 0),
5376 BPF_EXIT_INSN(),
5377 },
5378 .fixup_map2 = { 3 },
5379 .result = REJECT,
5380 .errstr = "R1 min value is negative",
5381 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5382 },
5383 {
5384 "helper access to map: bounds check using s<=, good access",
5385 .insns = {
5386 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5388 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5389 BPF_LD_MAP_FD(BPF_REG_1, 0),
5390 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5391 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5392 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5393 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5394 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5395 BPF_MOV64_IMM(BPF_REG_0, 0),
5396 BPF_EXIT_INSN(),
5397 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
5398 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5399 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5400 BPF_MOV64_IMM(BPF_REG_0, 0),
5401 BPF_EXIT_INSN(),
5402 },
5403 .fixup_map2 = { 3 },
5404 .result = ACCEPT,
5405 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5406 },
5407 {
5408 "helper access to map: bounds check using s<=, good access 2",
5409 .insns = {
5410 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5411 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5412 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5413 BPF_LD_MAP_FD(BPF_REG_1, 0),
5414 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5415 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5416 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5417 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5418 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5419 BPF_MOV64_IMM(BPF_REG_0, 0),
5420 BPF_EXIT_INSN(),
5421 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5422 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5423 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5424 BPF_MOV64_IMM(BPF_REG_0, 0),
5425 BPF_EXIT_INSN(),
5426 },
5427 .fixup_map2 = { 3 },
5428 .result = ACCEPT,
5429 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5430 },
5431 {
5432 "helper access to map: bounds check using s<=, bad access",
5433 .insns = {
5434 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5436 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5437 BPF_LD_MAP_FD(BPF_REG_1, 0),
5438 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5439 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5440 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5441 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5442 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5443 BPF_MOV64_IMM(BPF_REG_0, 0),
5444 BPF_EXIT_INSN(),
5445 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5446 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5447 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5448 BPF_MOV64_IMM(BPF_REG_0, 0),
5449 BPF_EXIT_INSN(),
5450 },
5451 .fixup_map2 = { 3 },
5452 .result = REJECT,
5453 .errstr = "R1 min value is negative",
5454 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5455 },
5456 {
Gianluca Borellof0318d02017-01-09 10:19:48 -08005457 "map element value is preserved across register spilling",
5458 .insns = {
5459 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5460 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5461 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5462 BPF_LD_MAP_FD(BPF_REG_1, 0),
5463 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5464 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5465 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5466 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5467 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5468 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5469 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5470 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5471 BPF_EXIT_INSN(),
5472 },
5473 .fixup_map2 = { 3 },
5474 .errstr_unpriv = "R0 leaks addr",
5475 .result = ACCEPT,
5476 .result_unpriv = REJECT,
5477 },
5478 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005479 "map element value or null is marked on register spilling",
5480 .insns = {
5481 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5482 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5483 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5484 BPF_LD_MAP_FD(BPF_REG_1, 0),
5485 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5486 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5487 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
5488 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5489 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5490 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5491 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5492 BPF_EXIT_INSN(),
5493 },
5494 .fixup_map2 = { 3 },
5495 .errstr_unpriv = "R0 leaks addr",
5496 .result = ACCEPT,
5497 .result_unpriv = REJECT,
5498 },
5499 {
5500 "map element value store of cleared call register",
5501 .insns = {
5502 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5503 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5504 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5505 BPF_LD_MAP_FD(BPF_REG_1, 0),
5506 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5507 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5508 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
5509 BPF_EXIT_INSN(),
5510 },
5511 .fixup_map2 = { 3 },
5512 .errstr_unpriv = "R1 !read_ok",
5513 .errstr = "R1 !read_ok",
5514 .result = REJECT,
5515 .result_unpriv = REJECT,
5516 },
5517 {
5518 "map element value with unaligned store",
5519 .insns = {
5520 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5522 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5523 BPF_LD_MAP_FD(BPF_REG_1, 0),
5524 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5525 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
5526 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5527 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5528 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
5529 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
5530 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5531 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
5532 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
5533 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
5534 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
5535 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
5536 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
5537 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
5538 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
5539 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
5540 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
5541 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
5542 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
5543 BPF_EXIT_INSN(),
5544 },
5545 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005546 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005547 .result = ACCEPT,
5548 .result_unpriv = REJECT,
5549 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5550 },
5551 {
5552 "map element value with unaligned load",
5553 .insns = {
5554 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5555 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5556 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5557 BPF_LD_MAP_FD(BPF_REG_1, 0),
5558 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5559 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5560 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5561 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
5562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5563 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5564 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
5565 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5566 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
5567 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
5568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
5569 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5570 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
5571 BPF_EXIT_INSN(),
5572 },
5573 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005574 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005575 .result = ACCEPT,
5576 .result_unpriv = REJECT,
5577 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5578 },
5579 {
5580 "map element value illegal alu op, 1",
5581 .insns = {
5582 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5583 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5584 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5585 BPF_LD_MAP_FD(BPF_REG_1, 0),
5586 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5587 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5588 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
5589 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5590 BPF_EXIT_INSN(),
5591 },
5592 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005593 .errstr = "R0 bitwise operator &= on pointer",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005594 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005595 },
5596 {
5597 "map element value illegal alu op, 2",
5598 .insns = {
5599 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5600 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5601 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5602 BPF_LD_MAP_FD(BPF_REG_1, 0),
5603 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5604 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5605 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
5606 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5607 BPF_EXIT_INSN(),
5608 },
5609 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005610 .errstr = "R0 32-bit pointer arithmetic prohibited",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005611 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005612 },
5613 {
5614 "map element value illegal alu op, 3",
5615 .insns = {
5616 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5617 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5618 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5619 BPF_LD_MAP_FD(BPF_REG_1, 0),
5620 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5622 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
5623 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5624 BPF_EXIT_INSN(),
5625 },
5626 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005627 .errstr = "R0 pointer arithmetic with /= operator",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005628 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005629 },
5630 {
5631 "map element value illegal alu op, 4",
5632 .insns = {
5633 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5634 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5635 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5636 BPF_LD_MAP_FD(BPF_REG_1, 0),
5637 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5638 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5639 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
5640 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5641 BPF_EXIT_INSN(),
5642 },
5643 .fixup_map2 = { 3 },
5644 .errstr_unpriv = "R0 pointer arithmetic prohibited",
5645 .errstr = "invalid mem access 'inv'",
5646 .result = REJECT,
5647 .result_unpriv = REJECT,
5648 },
5649 {
5650 "map element value illegal alu op, 5",
5651 .insns = {
5652 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5653 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5654 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5655 BPF_LD_MAP_FD(BPF_REG_1, 0),
5656 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5657 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5658 BPF_MOV64_IMM(BPF_REG_3, 4096),
5659 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5661 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5662 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
5663 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
5664 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5665 BPF_EXIT_INSN(),
5666 },
5667 .fixup_map2 = { 3 },
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005668 .errstr = "R0 invalid mem access 'inv'",
5669 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005670 },
5671 {
5672 "map element value is preserved across register spilling",
Gianluca Borellof0318d02017-01-09 10:19:48 -08005673 .insns = {
5674 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5675 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5676 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5677 BPF_LD_MAP_FD(BPF_REG_1, 0),
5678 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5679 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5680 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
5681 offsetof(struct test_val, foo)),
5682 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5683 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5685 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5686 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5687 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5688 BPF_EXIT_INSN(),
5689 },
5690 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005691 .errstr_unpriv = "R0 leaks addr",
Gianluca Borellof0318d02017-01-09 10:19:48 -08005692 .result = ACCEPT,
5693 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005694 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Gianluca Borellof0318d02017-01-09 10:19:48 -08005695 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08005696 {
5697 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5698 .insns = {
5699 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5700 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5701 BPF_MOV64_IMM(BPF_REG_0, 0),
5702 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5703 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5704 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5705 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5706 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5707 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5708 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5709 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5710 BPF_MOV64_IMM(BPF_REG_2, 16),
5711 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5712 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5713 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5714 BPF_MOV64_IMM(BPF_REG_4, 0),
5715 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5716 BPF_MOV64_IMM(BPF_REG_3, 0),
5717 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5718 BPF_MOV64_IMM(BPF_REG_0, 0),
5719 BPF_EXIT_INSN(),
5720 },
5721 .result = ACCEPT,
5722 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5723 },
5724 {
5725 "helper access to variable memory: stack, bitwise AND, zero included",
5726 .insns = {
5727 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5728 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5729 BPF_MOV64_IMM(BPF_REG_2, 16),
5730 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5731 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5732 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5733 BPF_MOV64_IMM(BPF_REG_3, 0),
5734 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5735 BPF_EXIT_INSN(),
5736 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08005737 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005738 .result = REJECT,
5739 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5740 },
5741 {
5742 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5743 .insns = {
5744 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5745 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5746 BPF_MOV64_IMM(BPF_REG_2, 16),
5747 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5748 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5749 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
5750 BPF_MOV64_IMM(BPF_REG_4, 0),
5751 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5752 BPF_MOV64_IMM(BPF_REG_3, 0),
5753 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5754 BPF_MOV64_IMM(BPF_REG_0, 0),
5755 BPF_EXIT_INSN(),
5756 },
5757 .errstr = "invalid stack type R1 off=-64 access_size=65",
5758 .result = REJECT,
5759 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5760 },
5761 {
5762 "helper access to variable memory: stack, JMP, correct bounds",
5763 .insns = {
5764 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5765 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5766 BPF_MOV64_IMM(BPF_REG_0, 0),
5767 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5768 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5769 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5770 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5771 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5772 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5773 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5774 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5775 BPF_MOV64_IMM(BPF_REG_2, 16),
5776 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5777 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5778 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
5779 BPF_MOV64_IMM(BPF_REG_4, 0),
5780 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5781 BPF_MOV64_IMM(BPF_REG_3, 0),
5782 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5783 BPF_MOV64_IMM(BPF_REG_0, 0),
5784 BPF_EXIT_INSN(),
5785 },
5786 .result = ACCEPT,
5787 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5788 },
5789 {
5790 "helper access to variable memory: stack, JMP (signed), correct bounds",
5791 .insns = {
5792 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5794 BPF_MOV64_IMM(BPF_REG_0, 0),
5795 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5796 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5797 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5798 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5799 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5800 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5801 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5802 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5803 BPF_MOV64_IMM(BPF_REG_2, 16),
5804 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5805 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5806 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
5807 BPF_MOV64_IMM(BPF_REG_4, 0),
5808 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5809 BPF_MOV64_IMM(BPF_REG_3, 0),
5810 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5811 BPF_MOV64_IMM(BPF_REG_0, 0),
5812 BPF_EXIT_INSN(),
5813 },
5814 .result = ACCEPT,
5815 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5816 },
5817 {
5818 "helper access to variable memory: stack, JMP, bounds + offset",
5819 .insns = {
5820 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5821 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5822 BPF_MOV64_IMM(BPF_REG_2, 16),
5823 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5824 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5825 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
5826 BPF_MOV64_IMM(BPF_REG_4, 0),
5827 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
5828 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5829 BPF_MOV64_IMM(BPF_REG_3, 0),
5830 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5831 BPF_MOV64_IMM(BPF_REG_0, 0),
5832 BPF_EXIT_INSN(),
5833 },
5834 .errstr = "invalid stack type R1 off=-64 access_size=65",
5835 .result = REJECT,
5836 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5837 },
5838 {
5839 "helper access to variable memory: stack, JMP, wrong max",
5840 .insns = {
5841 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5842 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5843 BPF_MOV64_IMM(BPF_REG_2, 16),
5844 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5845 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5846 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
5847 BPF_MOV64_IMM(BPF_REG_4, 0),
5848 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5849 BPF_MOV64_IMM(BPF_REG_3, 0),
5850 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5851 BPF_MOV64_IMM(BPF_REG_0, 0),
5852 BPF_EXIT_INSN(),
5853 },
5854 .errstr = "invalid stack type R1 off=-64 access_size=65",
5855 .result = REJECT,
5856 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5857 },
5858 {
5859 "helper access to variable memory: stack, JMP, no max check",
5860 .insns = {
5861 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5862 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5863 BPF_MOV64_IMM(BPF_REG_2, 16),
5864 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5865 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5866 BPF_MOV64_IMM(BPF_REG_4, 0),
5867 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5868 BPF_MOV64_IMM(BPF_REG_3, 0),
5869 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5870 BPF_MOV64_IMM(BPF_REG_0, 0),
5871 BPF_EXIT_INSN(),
5872 },
Edward Creef65b1842017-08-07 15:27:12 +01005873 /* because max wasn't checked, signed min is negative */
5874 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005875 .result = REJECT,
5876 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5877 },
5878 {
5879 "helper access to variable memory: stack, JMP, no min check",
5880 .insns = {
5881 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5882 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5883 BPF_MOV64_IMM(BPF_REG_2, 16),
5884 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5885 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5886 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
5887 BPF_MOV64_IMM(BPF_REG_3, 0),
5888 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5889 BPF_MOV64_IMM(BPF_REG_0, 0),
5890 BPF_EXIT_INSN(),
5891 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08005892 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005893 .result = REJECT,
5894 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5895 },
5896 {
5897 "helper access to variable memory: stack, JMP (signed), no min check",
5898 .insns = {
5899 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5901 BPF_MOV64_IMM(BPF_REG_2, 16),
5902 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5903 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5904 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
5905 BPF_MOV64_IMM(BPF_REG_3, 0),
5906 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5907 BPF_MOV64_IMM(BPF_REG_0, 0),
5908 BPF_EXIT_INSN(),
5909 },
5910 .errstr = "R2 min value is negative",
5911 .result = REJECT,
5912 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5913 },
5914 {
5915 "helper access to variable memory: map, JMP, correct bounds",
5916 .insns = {
5917 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5918 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5919 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5920 BPF_LD_MAP_FD(BPF_REG_1, 0),
5921 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5922 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5923 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5924 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5925 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5926 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5927 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5928 sizeof(struct test_val), 4),
5929 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005930 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005931 BPF_MOV64_IMM(BPF_REG_3, 0),
5932 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5933 BPF_MOV64_IMM(BPF_REG_0, 0),
5934 BPF_EXIT_INSN(),
5935 },
5936 .fixup_map2 = { 3 },
5937 .result = ACCEPT,
5938 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5939 },
5940 {
5941 "helper access to variable memory: map, JMP, wrong max",
5942 .insns = {
5943 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5945 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5946 BPF_LD_MAP_FD(BPF_REG_1, 0),
5947 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5948 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5949 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5950 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5951 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5952 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5953 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5954 sizeof(struct test_val) + 1, 4),
5955 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005956 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005957 BPF_MOV64_IMM(BPF_REG_3, 0),
5958 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5959 BPF_MOV64_IMM(BPF_REG_0, 0),
5960 BPF_EXIT_INSN(),
5961 },
5962 .fixup_map2 = { 3 },
5963 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
5964 .result = REJECT,
5965 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5966 },
5967 {
5968 "helper access to variable memory: map adjusted, JMP, correct bounds",
5969 .insns = {
5970 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5971 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5972 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5973 BPF_LD_MAP_FD(BPF_REG_1, 0),
5974 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5975 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5976 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5977 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5978 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5979 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5980 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5981 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5982 sizeof(struct test_val) - 20, 4),
5983 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005984 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005985 BPF_MOV64_IMM(BPF_REG_3, 0),
5986 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5987 BPF_MOV64_IMM(BPF_REG_0, 0),
5988 BPF_EXIT_INSN(),
5989 },
5990 .fixup_map2 = { 3 },
5991 .result = ACCEPT,
5992 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5993 },
5994 {
5995 "helper access to variable memory: map adjusted, JMP, wrong max",
5996 .insns = {
5997 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5998 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5999 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6000 BPF_LD_MAP_FD(BPF_REG_1, 0),
6001 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6002 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6003 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6004 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
6005 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6006 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6007 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6008 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6009 sizeof(struct test_val) - 19, 4),
6010 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02006011 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006012 BPF_MOV64_IMM(BPF_REG_3, 0),
6013 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6014 BPF_MOV64_IMM(BPF_REG_0, 0),
6015 BPF_EXIT_INSN(),
6016 },
6017 .fixup_map2 = { 3 },
6018 .errstr = "R1 min value is outside of the array range",
6019 .result = REJECT,
6020 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6021 },
6022 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006023 "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Edward Creef65b1842017-08-07 15:27:12 +01006024 .insns = {
6025 BPF_MOV64_IMM(BPF_REG_1, 0),
6026 BPF_MOV64_IMM(BPF_REG_2, 0),
6027 BPF_MOV64_IMM(BPF_REG_3, 0),
6028 BPF_MOV64_IMM(BPF_REG_4, 0),
6029 BPF_MOV64_IMM(BPF_REG_5, 0),
6030 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6031 BPF_EXIT_INSN(),
6032 },
6033 .result = ACCEPT,
6034 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6035 },
6036 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006037 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006038 .insns = {
6039 BPF_MOV64_IMM(BPF_REG_1, 0),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08006040 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01006041 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6042 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006043 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6044 BPF_MOV64_IMM(BPF_REG_3, 0),
6045 BPF_MOV64_IMM(BPF_REG_4, 0),
6046 BPF_MOV64_IMM(BPF_REG_5, 0),
6047 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6048 BPF_EXIT_INSN(),
6049 },
Edward Creef65b1842017-08-07 15:27:12 +01006050 .errstr = "R1 type=inv expected=fp",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006051 .result = REJECT,
6052 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6053 },
6054 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006055 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006056 .insns = {
6057 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6058 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6059 BPF_MOV64_IMM(BPF_REG_2, 0),
6060 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
6061 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
6062 BPF_MOV64_IMM(BPF_REG_3, 0),
6063 BPF_MOV64_IMM(BPF_REG_4, 0),
6064 BPF_MOV64_IMM(BPF_REG_5, 0),
6065 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6066 BPF_EXIT_INSN(),
6067 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08006068 .result = ACCEPT,
6069 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6070 },
6071 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006072 "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006073 .insns = {
6074 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6075 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6076 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6077 BPF_LD_MAP_FD(BPF_REG_1, 0),
6078 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6079 BPF_FUNC_map_lookup_elem),
6080 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6081 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6082 BPF_MOV64_IMM(BPF_REG_2, 0),
6083 BPF_MOV64_IMM(BPF_REG_3, 0),
6084 BPF_MOV64_IMM(BPF_REG_4, 0),
6085 BPF_MOV64_IMM(BPF_REG_5, 0),
6086 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6087 BPF_EXIT_INSN(),
6088 },
6089 .fixup_map1 = { 3 },
6090 .result = ACCEPT,
6091 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6092 },
6093 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006094 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006095 .insns = {
6096 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6097 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6098 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6099 BPF_LD_MAP_FD(BPF_REG_1, 0),
6100 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6101 BPF_FUNC_map_lookup_elem),
6102 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6103 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6104 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
6105 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6106 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6107 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
6108 BPF_MOV64_IMM(BPF_REG_3, 0),
6109 BPF_MOV64_IMM(BPF_REG_4, 0),
6110 BPF_MOV64_IMM(BPF_REG_5, 0),
6111 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6112 BPF_EXIT_INSN(),
6113 },
6114 .fixup_map1 = { 3 },
6115 .result = ACCEPT,
6116 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6117 },
6118 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006119 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006120 .insns = {
6121 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6122 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6123 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6124 BPF_LD_MAP_FD(BPF_REG_1, 0),
6125 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6126 BPF_FUNC_map_lookup_elem),
6127 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6128 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6129 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6130 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6131 BPF_MOV64_IMM(BPF_REG_3, 0),
6132 BPF_MOV64_IMM(BPF_REG_4, 0),
6133 BPF_MOV64_IMM(BPF_REG_5, 0),
6134 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6135 BPF_EXIT_INSN(),
6136 },
6137 .fixup_map1 = { 3 },
6138 .result = ACCEPT,
6139 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6140 },
6141 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006142 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006143 .insns = {
6144 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6145 offsetof(struct __sk_buff, data)),
6146 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6147 offsetof(struct __sk_buff, data_end)),
6148 BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
6149 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
6150 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
6151 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
6152 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
6153 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6154 BPF_MOV64_IMM(BPF_REG_3, 0),
6155 BPF_MOV64_IMM(BPF_REG_4, 0),
6156 BPF_MOV64_IMM(BPF_REG_5, 0),
6157 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6158 BPF_EXIT_INSN(),
6159 },
6160 .result = ACCEPT,
Gianluca Borello06c1c042017-01-09 10:19:49 -08006161 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006162 .retval = 0 /* csum_diff of 64-byte packet */,
Gianluca Borello06c1c042017-01-09 10:19:49 -08006163 },
6164 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006165 "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
6166 .insns = {
6167 BPF_MOV64_IMM(BPF_REG_1, 0),
6168 BPF_MOV64_IMM(BPF_REG_2, 0),
6169 BPF_MOV64_IMM(BPF_REG_3, 0),
6170 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6171 BPF_EXIT_INSN(),
6172 },
6173 .errstr = "R1 type=inv expected=fp",
6174 .result = REJECT,
6175 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6176 },
6177 {
6178 "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
6179 .insns = {
6180 BPF_MOV64_IMM(BPF_REG_1, 0),
6181 BPF_MOV64_IMM(BPF_REG_2, 1),
6182 BPF_MOV64_IMM(BPF_REG_3, 0),
6183 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6184 BPF_EXIT_INSN(),
6185 },
6186 .errstr = "R1 type=inv expected=fp",
6187 .result = REJECT,
6188 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6189 },
6190 {
6191 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6192 .insns = {
6193 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6195 BPF_MOV64_IMM(BPF_REG_2, 0),
6196 BPF_MOV64_IMM(BPF_REG_3, 0),
6197 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6198 BPF_EXIT_INSN(),
6199 },
6200 .result = ACCEPT,
6201 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6202 },
6203 {
6204 "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6205 .insns = {
6206 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6207 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6209 BPF_LD_MAP_FD(BPF_REG_1, 0),
6210 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6211 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6212 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6213 BPF_MOV64_IMM(BPF_REG_2, 0),
6214 BPF_MOV64_IMM(BPF_REG_3, 0),
6215 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6216 BPF_EXIT_INSN(),
6217 },
6218 .fixup_map1 = { 3 },
6219 .result = ACCEPT,
6220 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6221 },
6222 {
6223 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6224 .insns = {
6225 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6226 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6227 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6228 BPF_LD_MAP_FD(BPF_REG_1, 0),
6229 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6230 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6231 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6232 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6233 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6234 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6235 BPF_MOV64_IMM(BPF_REG_3, 0),
6236 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6237 BPF_EXIT_INSN(),
6238 },
6239 .fixup_map1 = { 3 },
6240 .result = ACCEPT,
6241 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6242 },
6243 {
6244 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6245 .insns = {
6246 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6247 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6249 BPF_LD_MAP_FD(BPF_REG_1, 0),
6250 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6251 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6252 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6253 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6254 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
6255 BPF_MOV64_IMM(BPF_REG_3, 0),
6256 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6257 BPF_EXIT_INSN(),
6258 },
6259 .fixup_map1 = { 3 },
6260 .result = ACCEPT,
6261 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6262 },
6263 {
Gianluca Borello06c1c042017-01-09 10:19:49 -08006264 "helper access to variable memory: 8 bytes leak",
6265 .insns = {
6266 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6267 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6268 BPF_MOV64_IMM(BPF_REG_0, 0),
6269 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6270 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6271 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6272 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6273 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6274 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6275 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08006276 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01006277 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6278 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006279 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
6280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
6281 BPF_MOV64_IMM(BPF_REG_3, 0),
6282 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6283 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6284 BPF_EXIT_INSN(),
6285 },
6286 .errstr = "invalid indirect read from stack off -64+32 size 64",
6287 .result = REJECT,
6288 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6289 },
6290 {
6291 "helper access to variable memory: 8 bytes no leak (init memory)",
6292 .insns = {
6293 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6294 BPF_MOV64_IMM(BPF_REG_0, 0),
6295 BPF_MOV64_IMM(BPF_REG_0, 0),
6296 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6297 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6298 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6299 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6300 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6301 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6302 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6303 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6304 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6305 BPF_MOV64_IMM(BPF_REG_2, 0),
6306 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
6307 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
6308 BPF_MOV64_IMM(BPF_REG_3, 0),
6309 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6310 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6311 BPF_EXIT_INSN(),
6312 },
6313 .result = ACCEPT,
6314 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6315 },
Josef Bacik29200c12017-02-03 16:25:23 -05006316 {
6317 "invalid and of negative number",
6318 .insns = {
6319 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6320 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6321 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6322 BPF_LD_MAP_FD(BPF_REG_1, 0),
6323 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6324 BPF_FUNC_map_lookup_elem),
6325 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Edward Creef65b1842017-08-07 15:27:12 +01006326 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik29200c12017-02-03 16:25:23 -05006327 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
6328 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
6329 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6330 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
6331 offsetof(struct test_val, foo)),
6332 BPF_EXIT_INSN(),
6333 },
6334 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006335 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05006336 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006337 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik29200c12017-02-03 16:25:23 -05006338 },
6339 {
6340 "invalid range check",
6341 .insns = {
6342 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6343 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6344 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6345 BPF_LD_MAP_FD(BPF_REG_1, 0),
6346 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6347 BPF_FUNC_map_lookup_elem),
6348 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
6349 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
6350 BPF_MOV64_IMM(BPF_REG_9, 1),
6351 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
6352 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
6353 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
6354 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
6355 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
6356 BPF_MOV32_IMM(BPF_REG_3, 1),
6357 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
6358 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
6359 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
6360 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
6361 BPF_MOV64_REG(BPF_REG_0, 0),
6362 BPF_EXIT_INSN(),
6363 },
6364 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006365 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05006366 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006367 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006368 },
6369 {
6370 "map in map access",
6371 .insns = {
6372 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6373 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6374 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6375 BPF_LD_MAP_FD(BPF_REG_1, 0),
6376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6377 BPF_FUNC_map_lookup_elem),
6378 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6379 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6380 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6381 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6382 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6383 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6384 BPF_FUNC_map_lookup_elem),
6385 BPF_MOV64_REG(BPF_REG_0, 0),
6386 BPF_EXIT_INSN(),
6387 },
6388 .fixup_map_in_map = { 3 },
6389 .result = ACCEPT,
6390 },
6391 {
6392 "invalid inner map pointer",
6393 .insns = {
6394 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6395 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6396 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6397 BPF_LD_MAP_FD(BPF_REG_1, 0),
6398 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6399 BPF_FUNC_map_lookup_elem),
6400 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6401 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6402 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6403 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6404 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6406 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6407 BPF_FUNC_map_lookup_elem),
6408 BPF_MOV64_REG(BPF_REG_0, 0),
6409 BPF_EXIT_INSN(),
6410 },
6411 .fixup_map_in_map = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08006412 .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006413 .result = REJECT,
6414 },
6415 {
6416 "forgot null checking on the inner map pointer",
6417 .insns = {
6418 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6419 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6421 BPF_LD_MAP_FD(BPF_REG_1, 0),
6422 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6423 BPF_FUNC_map_lookup_elem),
6424 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6425 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6426 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6427 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6428 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6429 BPF_FUNC_map_lookup_elem),
6430 BPF_MOV64_REG(BPF_REG_0, 0),
6431 BPF_EXIT_INSN(),
6432 },
6433 .fixup_map_in_map = { 3 },
6434 .errstr = "R1 type=map_value_or_null expected=map_ptr",
6435 .result = REJECT,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006436 },
6437 {
6438 "ld_abs: check calling conv, r1",
6439 .insns = {
6440 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6441 BPF_MOV64_IMM(BPF_REG_1, 0),
6442 BPF_LD_ABS(BPF_W, -0x200000),
6443 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
6444 BPF_EXIT_INSN(),
6445 },
6446 .errstr = "R1 !read_ok",
6447 .result = REJECT,
6448 },
6449 {
6450 "ld_abs: check calling conv, r2",
6451 .insns = {
6452 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6453 BPF_MOV64_IMM(BPF_REG_2, 0),
6454 BPF_LD_ABS(BPF_W, -0x200000),
6455 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6456 BPF_EXIT_INSN(),
6457 },
6458 .errstr = "R2 !read_ok",
6459 .result = REJECT,
6460 },
6461 {
6462 "ld_abs: check calling conv, r3",
6463 .insns = {
6464 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6465 BPF_MOV64_IMM(BPF_REG_3, 0),
6466 BPF_LD_ABS(BPF_W, -0x200000),
6467 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6468 BPF_EXIT_INSN(),
6469 },
6470 .errstr = "R3 !read_ok",
6471 .result = REJECT,
6472 },
6473 {
6474 "ld_abs: check calling conv, r4",
6475 .insns = {
6476 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6477 BPF_MOV64_IMM(BPF_REG_4, 0),
6478 BPF_LD_ABS(BPF_W, -0x200000),
6479 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6480 BPF_EXIT_INSN(),
6481 },
6482 .errstr = "R4 !read_ok",
6483 .result = REJECT,
6484 },
6485 {
6486 "ld_abs: check calling conv, r5",
6487 .insns = {
6488 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6489 BPF_MOV64_IMM(BPF_REG_5, 0),
6490 BPF_LD_ABS(BPF_W, -0x200000),
6491 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6492 BPF_EXIT_INSN(),
6493 },
6494 .errstr = "R5 !read_ok",
6495 .result = REJECT,
6496 },
6497 {
6498 "ld_abs: check calling conv, r7",
6499 .insns = {
6500 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6501 BPF_MOV64_IMM(BPF_REG_7, 0),
6502 BPF_LD_ABS(BPF_W, -0x200000),
6503 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6504 BPF_EXIT_INSN(),
6505 },
6506 .result = ACCEPT,
6507 },
6508 {
Daniel Borkmann87ab8192017-12-14 21:07:27 +01006509 "ld_abs: tests on r6 and skb data reload helper",
6510 .insns = {
6511 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6512 BPF_LD_ABS(BPF_B, 0),
6513 BPF_LD_ABS(BPF_H, 0),
6514 BPF_LD_ABS(BPF_W, 0),
6515 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
6516 BPF_MOV64_IMM(BPF_REG_6, 0),
6517 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
6518 BPF_MOV64_IMM(BPF_REG_2, 1),
6519 BPF_MOV64_IMM(BPF_REG_3, 2),
6520 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6521 BPF_FUNC_skb_vlan_push),
6522 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
6523 BPF_LD_ABS(BPF_B, 0),
6524 BPF_LD_ABS(BPF_H, 0),
6525 BPF_LD_ABS(BPF_W, 0),
6526 BPF_MOV64_IMM(BPF_REG_0, 42),
6527 BPF_EXIT_INSN(),
6528 },
6529 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6530 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006531 .retval = 42 /* ultimate return value */,
Daniel Borkmann87ab8192017-12-14 21:07:27 +01006532 },
6533 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006534 "ld_ind: check calling conv, r1",
6535 .insns = {
6536 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6537 BPF_MOV64_IMM(BPF_REG_1, 1),
6538 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
6539 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
6540 BPF_EXIT_INSN(),
6541 },
6542 .errstr = "R1 !read_ok",
6543 .result = REJECT,
6544 },
6545 {
6546 "ld_ind: check calling conv, r2",
6547 .insns = {
6548 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6549 BPF_MOV64_IMM(BPF_REG_2, 1),
6550 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
6551 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6552 BPF_EXIT_INSN(),
6553 },
6554 .errstr = "R2 !read_ok",
6555 .result = REJECT,
6556 },
6557 {
6558 "ld_ind: check calling conv, r3",
6559 .insns = {
6560 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6561 BPF_MOV64_IMM(BPF_REG_3, 1),
6562 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
6563 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6564 BPF_EXIT_INSN(),
6565 },
6566 .errstr = "R3 !read_ok",
6567 .result = REJECT,
6568 },
6569 {
6570 "ld_ind: check calling conv, r4",
6571 .insns = {
6572 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6573 BPF_MOV64_IMM(BPF_REG_4, 1),
6574 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
6575 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6576 BPF_EXIT_INSN(),
6577 },
6578 .errstr = "R4 !read_ok",
6579 .result = REJECT,
6580 },
6581 {
6582 "ld_ind: check calling conv, r5",
6583 .insns = {
6584 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6585 BPF_MOV64_IMM(BPF_REG_5, 1),
6586 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
6587 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6588 BPF_EXIT_INSN(),
6589 },
6590 .errstr = "R5 !read_ok",
6591 .result = REJECT,
6592 },
6593 {
6594 "ld_ind: check calling conv, r7",
6595 .insns = {
6596 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6597 BPF_MOV64_IMM(BPF_REG_7, 1),
6598 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
6599 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6600 BPF_EXIT_INSN(),
6601 },
6602 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006603 .retval = 1,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006604 },
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006605 {
6606 "check bpf_perf_event_data->sample_period byte load permitted",
6607 .insns = {
6608 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006609#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006610 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6611 offsetof(struct bpf_perf_event_data, sample_period)),
6612#else
6613 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6614 offsetof(struct bpf_perf_event_data, sample_period) + 7),
6615#endif
6616 BPF_EXIT_INSN(),
6617 },
6618 .result = ACCEPT,
6619 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6620 },
6621 {
6622 "check bpf_perf_event_data->sample_period half load permitted",
6623 .insns = {
6624 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006625#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006626 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6627 offsetof(struct bpf_perf_event_data, sample_period)),
6628#else
6629 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6630 offsetof(struct bpf_perf_event_data, sample_period) + 6),
6631#endif
6632 BPF_EXIT_INSN(),
6633 },
6634 .result = ACCEPT,
6635 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6636 },
6637 {
6638 "check bpf_perf_event_data->sample_period word load permitted",
6639 .insns = {
6640 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006641#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006642 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6643 offsetof(struct bpf_perf_event_data, sample_period)),
6644#else
6645 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6646 offsetof(struct bpf_perf_event_data, sample_period) + 4),
6647#endif
6648 BPF_EXIT_INSN(),
6649 },
6650 .result = ACCEPT,
6651 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6652 },
6653 {
6654 "check bpf_perf_event_data->sample_period dword load permitted",
6655 .insns = {
6656 BPF_MOV64_IMM(BPF_REG_0, 0),
6657 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
6658 offsetof(struct bpf_perf_event_data, sample_period)),
6659 BPF_EXIT_INSN(),
6660 },
6661 .result = ACCEPT,
6662 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6663 },
6664 {
6665 "check skb->data half load not permitted",
6666 .insns = {
6667 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006668#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006669 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6670 offsetof(struct __sk_buff, data)),
6671#else
6672 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6673 offsetof(struct __sk_buff, data) + 2),
6674#endif
6675 BPF_EXIT_INSN(),
6676 },
6677 .result = REJECT,
6678 .errstr = "invalid bpf_context access",
6679 },
6680 {
6681 "check skb->tc_classid half load not permitted for lwt prog",
6682 .insns = {
6683 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006684#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006685 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6686 offsetof(struct __sk_buff, tc_classid)),
6687#else
6688 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6689 offsetof(struct __sk_buff, tc_classid) + 2),
6690#endif
6691 BPF_EXIT_INSN(),
6692 },
6693 .result = REJECT,
6694 .errstr = "invalid bpf_context access",
6695 .prog_type = BPF_PROG_TYPE_LWT_IN,
6696 },
Edward Creeb7122962017-07-21 00:00:24 +02006697 {
6698 "bounds checks mixing signed and unsigned, positive bounds",
6699 .insns = {
6700 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6701 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6702 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6703 BPF_LD_MAP_FD(BPF_REG_1, 0),
6704 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6705 BPF_FUNC_map_lookup_elem),
6706 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6707 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6708 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6709 BPF_MOV64_IMM(BPF_REG_2, 2),
6710 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
6711 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
6712 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6713 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6714 BPF_MOV64_IMM(BPF_REG_0, 0),
6715 BPF_EXIT_INSN(),
6716 },
6717 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006718 .errstr = "unbounded min value",
Edward Creeb7122962017-07-21 00:00:24 +02006719 .result = REJECT,
Edward Creeb7122962017-07-21 00:00:24 +02006720 },
6721 {
6722 "bounds checks mixing signed and unsigned",
6723 .insns = {
6724 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6725 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6726 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6727 BPF_LD_MAP_FD(BPF_REG_1, 0),
6728 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6729 BPF_FUNC_map_lookup_elem),
6730 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6731 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6732 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6733 BPF_MOV64_IMM(BPF_REG_2, -1),
6734 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6735 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6736 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6737 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6738 BPF_MOV64_IMM(BPF_REG_0, 0),
6739 BPF_EXIT_INSN(),
6740 },
6741 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006742 .errstr = "unbounded min value",
Edward Creeb7122962017-07-21 00:00:24 +02006743 .result = REJECT,
Edward Creeb7122962017-07-21 00:00:24 +02006744 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006745 {
6746 "bounds checks mixing signed and unsigned, variant 2",
6747 .insns = {
6748 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6749 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6750 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6751 BPF_LD_MAP_FD(BPF_REG_1, 0),
6752 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6753 BPF_FUNC_map_lookup_elem),
6754 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6755 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6756 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6757 BPF_MOV64_IMM(BPF_REG_2, -1),
6758 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6759 BPF_MOV64_IMM(BPF_REG_8, 0),
6760 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
6761 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6762 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6763 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6764 BPF_MOV64_IMM(BPF_REG_0, 0),
6765 BPF_EXIT_INSN(),
6766 },
6767 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006768 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006769 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006770 },
6771 {
6772 "bounds checks mixing signed and unsigned, variant 3",
6773 .insns = {
6774 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6775 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6776 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6777 BPF_LD_MAP_FD(BPF_REG_1, 0),
6778 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6779 BPF_FUNC_map_lookup_elem),
6780 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6781 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6782 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6783 BPF_MOV64_IMM(BPF_REG_2, -1),
6784 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
6785 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
6786 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6787 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6788 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6789 BPF_MOV64_IMM(BPF_REG_0, 0),
6790 BPF_EXIT_INSN(),
6791 },
6792 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006793 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006794 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006795 },
6796 {
6797 "bounds checks mixing signed and unsigned, variant 4",
6798 .insns = {
6799 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6800 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6802 BPF_LD_MAP_FD(BPF_REG_1, 0),
6803 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6804 BPF_FUNC_map_lookup_elem),
6805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6806 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6807 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6808 BPF_MOV64_IMM(BPF_REG_2, 1),
6809 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
6810 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6811 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6812 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6813 BPF_MOV64_IMM(BPF_REG_0, 0),
6814 BPF_EXIT_INSN(),
6815 },
6816 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006817 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006818 },
6819 {
6820 "bounds checks mixing signed and unsigned, variant 5",
6821 .insns = {
6822 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6823 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6824 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6825 BPF_LD_MAP_FD(BPF_REG_1, 0),
6826 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6827 BPF_FUNC_map_lookup_elem),
6828 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6829 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6830 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6831 BPF_MOV64_IMM(BPF_REG_2, -1),
6832 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6833 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
6834 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
6835 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6836 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6837 BPF_MOV64_IMM(BPF_REG_0, 0),
6838 BPF_EXIT_INSN(),
6839 },
6840 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006841 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006842 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006843 },
6844 {
6845 "bounds checks mixing signed and unsigned, variant 6",
6846 .insns = {
6847 BPF_MOV64_IMM(BPF_REG_2, 0),
6848 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
6849 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
6850 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6851 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
6852 BPF_MOV64_IMM(BPF_REG_6, -1),
6853 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
6854 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
6855 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
6856 BPF_MOV64_IMM(BPF_REG_5, 0),
6857 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
6858 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6859 BPF_FUNC_skb_load_bytes),
6860 BPF_MOV64_IMM(BPF_REG_0, 0),
6861 BPF_EXIT_INSN(),
6862 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006863 .errstr = "R4 min value is negative, either use unsigned",
6864 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006865 },
6866 {
6867 "bounds checks mixing signed and unsigned, variant 7",
6868 .insns = {
6869 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6870 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6871 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6872 BPF_LD_MAP_FD(BPF_REG_1, 0),
6873 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6874 BPF_FUNC_map_lookup_elem),
6875 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6876 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6877 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6878 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
6879 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6880 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6881 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6882 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6883 BPF_MOV64_IMM(BPF_REG_0, 0),
6884 BPF_EXIT_INSN(),
6885 },
6886 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006887 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006888 },
6889 {
6890 "bounds checks mixing signed and unsigned, variant 8",
6891 .insns = {
6892 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6893 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6894 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6895 BPF_LD_MAP_FD(BPF_REG_1, 0),
6896 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6897 BPF_FUNC_map_lookup_elem),
Daniel Borkmann86412502017-07-21 00:00:25 +02006898 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6899 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6900 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6901 BPF_MOV64_IMM(BPF_REG_2, -1),
6902 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6903 BPF_MOV64_IMM(BPF_REG_0, 0),
6904 BPF_EXIT_INSN(),
6905 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6906 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6907 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6908 BPF_MOV64_IMM(BPF_REG_0, 0),
6909 BPF_EXIT_INSN(),
6910 },
6911 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006912 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006913 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006914 },
6915 {
Edward Creef65b1842017-08-07 15:27:12 +01006916 "bounds checks mixing signed and unsigned, variant 9",
Daniel Borkmann86412502017-07-21 00:00:25 +02006917 .insns = {
6918 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6919 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6920 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6921 BPF_LD_MAP_FD(BPF_REG_1, 0),
6922 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6923 BPF_FUNC_map_lookup_elem),
6924 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6925 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6926 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6927 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
6928 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6929 BPF_MOV64_IMM(BPF_REG_0, 0),
6930 BPF_EXIT_INSN(),
6931 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6932 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6933 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6934 BPF_MOV64_IMM(BPF_REG_0, 0),
6935 BPF_EXIT_INSN(),
6936 },
6937 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006938 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006939 },
6940 {
Edward Creef65b1842017-08-07 15:27:12 +01006941 "bounds checks mixing signed and unsigned, variant 10",
Daniel Borkmann86412502017-07-21 00:00:25 +02006942 .insns = {
6943 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6944 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6946 BPF_LD_MAP_FD(BPF_REG_1, 0),
6947 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6948 BPF_FUNC_map_lookup_elem),
6949 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6950 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6951 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6952 BPF_MOV64_IMM(BPF_REG_2, 0),
6953 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6954 BPF_MOV64_IMM(BPF_REG_0, 0),
6955 BPF_EXIT_INSN(),
6956 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6957 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6958 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6959 BPF_MOV64_IMM(BPF_REG_0, 0),
6960 BPF_EXIT_INSN(),
6961 },
6962 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006963 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006964 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006965 },
6966 {
Edward Creef65b1842017-08-07 15:27:12 +01006967 "bounds checks mixing signed and unsigned, variant 11",
Daniel Borkmann86412502017-07-21 00:00:25 +02006968 .insns = {
6969 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6970 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6971 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6972 BPF_LD_MAP_FD(BPF_REG_1, 0),
6973 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6974 BPF_FUNC_map_lookup_elem),
6975 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6976 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6977 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6978 BPF_MOV64_IMM(BPF_REG_2, -1),
6979 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6980 /* Dead branch. */
6981 BPF_MOV64_IMM(BPF_REG_0, 0),
6982 BPF_EXIT_INSN(),
6983 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6984 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6985 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6986 BPF_MOV64_IMM(BPF_REG_0, 0),
6987 BPF_EXIT_INSN(),
6988 },
6989 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006990 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006991 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006992 },
6993 {
Edward Creef65b1842017-08-07 15:27:12 +01006994 "bounds checks mixing signed and unsigned, variant 12",
Daniel Borkmann86412502017-07-21 00:00:25 +02006995 .insns = {
6996 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6997 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6998 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6999 BPF_LD_MAP_FD(BPF_REG_1, 0),
7000 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7001 BPF_FUNC_map_lookup_elem),
7002 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7003 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7004 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7005 BPF_MOV64_IMM(BPF_REG_2, -6),
7006 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7007 BPF_MOV64_IMM(BPF_REG_0, 0),
7008 BPF_EXIT_INSN(),
7009 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7010 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7011 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7012 BPF_MOV64_IMM(BPF_REG_0, 0),
7013 BPF_EXIT_INSN(),
7014 },
7015 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007016 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007017 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007018 },
7019 {
Edward Creef65b1842017-08-07 15:27:12 +01007020 "bounds checks mixing signed and unsigned, variant 13",
Daniel Borkmann86412502017-07-21 00:00:25 +02007021 .insns = {
7022 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7023 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7024 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7025 BPF_LD_MAP_FD(BPF_REG_1, 0),
7026 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7027 BPF_FUNC_map_lookup_elem),
7028 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7029 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7030 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7031 BPF_MOV64_IMM(BPF_REG_2, 2),
7032 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7033 BPF_MOV64_IMM(BPF_REG_7, 1),
7034 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
7035 BPF_MOV64_IMM(BPF_REG_0, 0),
7036 BPF_EXIT_INSN(),
7037 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
7038 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
7039 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
7040 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7041 BPF_MOV64_IMM(BPF_REG_0, 0),
7042 BPF_EXIT_INSN(),
7043 },
7044 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007045 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007046 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007047 },
7048 {
Edward Creef65b1842017-08-07 15:27:12 +01007049 "bounds checks mixing signed and unsigned, variant 14",
Daniel Borkmann86412502017-07-21 00:00:25 +02007050 .insns = {
7051 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
7052 offsetof(struct __sk_buff, mark)),
7053 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7054 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7055 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7056 BPF_LD_MAP_FD(BPF_REG_1, 0),
7057 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7058 BPF_FUNC_map_lookup_elem),
7059 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7060 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7061 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7062 BPF_MOV64_IMM(BPF_REG_2, -1),
7063 BPF_MOV64_IMM(BPF_REG_8, 2),
7064 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
7065 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
7066 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7067 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7068 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7069 BPF_MOV64_IMM(BPF_REG_0, 0),
7070 BPF_EXIT_INSN(),
7071 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
7072 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
7073 },
7074 .fixup_map1 = { 4 },
Daniel Borkmann6f161012018-01-18 01:15:21 +01007075 .errstr = "R0 invalid mem access 'inv'",
Daniel Borkmann86412502017-07-21 00:00:25 +02007076 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007077 },
7078 {
Edward Creef65b1842017-08-07 15:27:12 +01007079 "bounds checks mixing signed and unsigned, variant 15",
Daniel Borkmann86412502017-07-21 00:00:25 +02007080 .insns = {
7081 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7082 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7084 BPF_LD_MAP_FD(BPF_REG_1, 0),
7085 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7086 BPF_FUNC_map_lookup_elem),
7087 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7088 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7089 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7090 BPF_MOV64_IMM(BPF_REG_2, -6),
7091 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7092 BPF_MOV64_IMM(BPF_REG_0, 0),
7093 BPF_EXIT_INSN(),
7094 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7095 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
7096 BPF_MOV64_IMM(BPF_REG_0, 0),
7097 BPF_EXIT_INSN(),
7098 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7099 BPF_MOV64_IMM(BPF_REG_0, 0),
7100 BPF_EXIT_INSN(),
7101 },
7102 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007103 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007104 .result = REJECT,
7105 .result_unpriv = REJECT,
7106 },
Edward Cree545722c2017-07-21 14:36:57 +01007107 {
Edward Creef65b1842017-08-07 15:27:12 +01007108 "subtraction bounds (map value) variant 1",
Edward Cree545722c2017-07-21 14:36:57 +01007109 .insns = {
7110 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7111 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7112 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7113 BPF_LD_MAP_FD(BPF_REG_1, 0),
7114 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7115 BPF_FUNC_map_lookup_elem),
7116 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7117 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7118 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
7119 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
7120 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
7121 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
7122 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
7123 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7124 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7125 BPF_EXIT_INSN(),
7126 BPF_MOV64_IMM(BPF_REG_0, 0),
7127 BPF_EXIT_INSN(),
7128 },
7129 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01007130 .errstr = "R0 max value is outside of the array range",
7131 .result = REJECT,
7132 },
7133 {
7134 "subtraction bounds (map value) variant 2",
7135 .insns = {
7136 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7137 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7138 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7139 BPF_LD_MAP_FD(BPF_REG_1, 0),
7140 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7141 BPF_FUNC_map_lookup_elem),
7142 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7143 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7144 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
7145 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
7146 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
7147 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
7148 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7149 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7150 BPF_EXIT_INSN(),
7151 BPF_MOV64_IMM(BPF_REG_0, 0),
7152 BPF_EXIT_INSN(),
7153 },
7154 .fixup_map1 = { 3 },
Edward Cree545722c2017-07-21 14:36:57 +01007155 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
7156 .result = REJECT,
Edward Cree545722c2017-07-21 14:36:57 +01007157 },
Edward Cree69c4e8a2017-08-07 15:29:51 +01007158 {
Jann Horn2255f8d2017-12-18 20:12:01 -08007159 "bounds check based on zero-extended MOV",
7160 .insns = {
7161 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7162 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7163 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7164 BPF_LD_MAP_FD(BPF_REG_1, 0),
7165 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7166 BPF_FUNC_map_lookup_elem),
7167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7168 /* r2 = 0x0000'0000'ffff'ffff */
7169 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
7170 /* r2 = 0 */
7171 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
7172 /* no-op */
7173 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7174 /* access at offset 0 */
7175 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7176 /* exit */
7177 BPF_MOV64_IMM(BPF_REG_0, 0),
7178 BPF_EXIT_INSN(),
7179 },
7180 .fixup_map1 = { 3 },
7181 .result = ACCEPT
7182 },
7183 {
7184 "bounds check based on sign-extended MOV. test1",
7185 .insns = {
7186 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7187 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7189 BPF_LD_MAP_FD(BPF_REG_1, 0),
7190 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7191 BPF_FUNC_map_lookup_elem),
7192 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7193 /* r2 = 0xffff'ffff'ffff'ffff */
7194 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
7195 /* r2 = 0xffff'ffff */
7196 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
7197 /* r0 = <oob pointer> */
7198 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7199 /* access to OOB pointer */
7200 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7201 /* exit */
7202 BPF_MOV64_IMM(BPF_REG_0, 0),
7203 BPF_EXIT_INSN(),
7204 },
7205 .fixup_map1 = { 3 },
7206 .errstr = "map_value pointer and 4294967295",
7207 .result = REJECT
7208 },
7209 {
7210 "bounds check based on sign-extended MOV. test2",
7211 .insns = {
7212 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7213 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7214 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7215 BPF_LD_MAP_FD(BPF_REG_1, 0),
7216 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7217 BPF_FUNC_map_lookup_elem),
7218 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7219 /* r2 = 0xffff'ffff'ffff'ffff */
7220 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
7221 /* r2 = 0xfff'ffff */
7222 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
7223 /* r0 = <oob pointer> */
7224 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7225 /* access to OOB pointer */
7226 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7227 /* exit */
7228 BPF_MOV64_IMM(BPF_REG_0, 0),
7229 BPF_EXIT_INSN(),
7230 },
7231 .fixup_map1 = { 3 },
7232 .errstr = "R0 min value is outside of the array range",
7233 .result = REJECT
7234 },
7235 {
7236 "bounds check based on reg_off + var_off + insn_off. test1",
7237 .insns = {
7238 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7239 offsetof(struct __sk_buff, mark)),
7240 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7241 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7242 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7243 BPF_LD_MAP_FD(BPF_REG_1, 0),
7244 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7245 BPF_FUNC_map_lookup_elem),
7246 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7247 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
7248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
7249 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
7250 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
7251 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
7252 BPF_MOV64_IMM(BPF_REG_0, 0),
7253 BPF_EXIT_INSN(),
7254 },
7255 .fixup_map1 = { 4 },
7256 .errstr = "value_size=8 off=1073741825",
7257 .result = REJECT,
7258 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7259 },
7260 {
7261 "bounds check based on reg_off + var_off + insn_off. test2",
7262 .insns = {
7263 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7264 offsetof(struct __sk_buff, mark)),
7265 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7266 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7267 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7268 BPF_LD_MAP_FD(BPF_REG_1, 0),
7269 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7270 BPF_FUNC_map_lookup_elem),
7271 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7272 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
7273 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
7274 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
7275 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
7276 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
7277 BPF_MOV64_IMM(BPF_REG_0, 0),
7278 BPF_EXIT_INSN(),
7279 },
7280 .fixup_map1 = { 4 },
7281 .errstr = "value 1073741823",
7282 .result = REJECT,
7283 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7284 },
7285 {
7286 "bounds check after truncation of non-boundary-crossing range",
7287 .insns = {
7288 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7289 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7290 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7291 BPF_LD_MAP_FD(BPF_REG_1, 0),
7292 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7293 BPF_FUNC_map_lookup_elem),
7294 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7295 /* r1 = [0x00, 0xff] */
7296 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7297 BPF_MOV64_IMM(BPF_REG_2, 1),
7298 /* r2 = 0x10'0000'0000 */
7299 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
7300 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
7301 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
7302 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
7303 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7304 /* r1 = [0x00, 0xff] */
7305 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
7306 /* r1 = 0 */
7307 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7308 /* no-op */
7309 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7310 /* access at offset 0 */
7311 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7312 /* exit */
7313 BPF_MOV64_IMM(BPF_REG_0, 0),
7314 BPF_EXIT_INSN(),
7315 },
7316 .fixup_map1 = { 3 },
7317 .result = ACCEPT
7318 },
7319 {
7320 "bounds check after truncation of boundary-crossing range (1)",
7321 .insns = {
7322 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7323 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7324 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7325 BPF_LD_MAP_FD(BPF_REG_1, 0),
7326 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7327 BPF_FUNC_map_lookup_elem),
7328 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7329 /* r1 = [0x00, 0xff] */
7330 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7332 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
7333 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7334 /* r1 = [0xffff'ff80, 0xffff'ffff] or
7335 * [0x0000'0000, 0x0000'007f]
7336 */
7337 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
7338 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7339 /* r1 = [0x00, 0xff] or
7340 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7341 */
7342 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7343 /* r1 = 0 or
7344 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7345 */
7346 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7347 /* no-op or OOB pointer computation */
7348 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7349 /* potentially OOB access */
7350 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7351 /* exit */
7352 BPF_MOV64_IMM(BPF_REG_0, 0),
7353 BPF_EXIT_INSN(),
7354 },
7355 .fixup_map1 = { 3 },
7356 /* not actually fully unbounded, but the bound is very high */
7357 .errstr = "R0 unbounded memory access",
7358 .result = REJECT
7359 },
7360 {
7361 "bounds check after truncation of boundary-crossing range (2)",
7362 .insns = {
7363 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7364 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7365 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7366 BPF_LD_MAP_FD(BPF_REG_1, 0),
7367 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7368 BPF_FUNC_map_lookup_elem),
7369 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7370 /* r1 = [0x00, 0xff] */
7371 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7372 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7373 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
7374 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7375 /* r1 = [0xffff'ff80, 0xffff'ffff] or
7376 * [0x0000'0000, 0x0000'007f]
7377 * difference to previous test: truncation via MOV32
7378 * instead of ALU32.
7379 */
7380 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
7381 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7382 /* r1 = [0x00, 0xff] or
7383 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7384 */
7385 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7386 /* r1 = 0 or
7387 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7388 */
7389 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7390 /* no-op or OOB pointer computation */
7391 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7392 /* potentially OOB access */
7393 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7394 /* exit */
7395 BPF_MOV64_IMM(BPF_REG_0, 0),
7396 BPF_EXIT_INSN(),
7397 },
7398 .fixup_map1 = { 3 },
7399 /* not actually fully unbounded, but the bound is very high */
7400 .errstr = "R0 unbounded memory access",
7401 .result = REJECT
7402 },
7403 {
7404 "bounds check after wrapping 32-bit addition",
7405 .insns = {
7406 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7407 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7408 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7409 BPF_LD_MAP_FD(BPF_REG_1, 0),
7410 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7411 BPF_FUNC_map_lookup_elem),
7412 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7413 /* r1 = 0x7fff'ffff */
7414 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
7415 /* r1 = 0xffff'fffe */
7416 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7417 /* r1 = 0 */
7418 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
7419 /* no-op */
7420 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7421 /* access at offset 0 */
7422 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7423 /* exit */
7424 BPF_MOV64_IMM(BPF_REG_0, 0),
7425 BPF_EXIT_INSN(),
7426 },
7427 .fixup_map1 = { 3 },
7428 .result = ACCEPT
7429 },
7430 {
7431 "bounds check after shift with oversized count operand",
7432 .insns = {
7433 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7434 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7436 BPF_LD_MAP_FD(BPF_REG_1, 0),
7437 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7438 BPF_FUNC_map_lookup_elem),
7439 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7440 BPF_MOV64_IMM(BPF_REG_2, 32),
7441 BPF_MOV64_IMM(BPF_REG_1, 1),
7442 /* r1 = (u32)1 << (u32)32 = ? */
7443 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
7444 /* r1 = [0x0000, 0xffff] */
7445 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
7446 /* computes unknown pointer, potentially OOB */
7447 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7448 /* potentially OOB access */
7449 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7450 /* exit */
7451 BPF_MOV64_IMM(BPF_REG_0, 0),
7452 BPF_EXIT_INSN(),
7453 },
7454 .fixup_map1 = { 3 },
7455 .errstr = "R0 max value is outside of the array range",
7456 .result = REJECT
7457 },
7458 {
7459 "bounds check after right shift of maybe-negative number",
7460 .insns = {
7461 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7462 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7463 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7464 BPF_LD_MAP_FD(BPF_REG_1, 0),
7465 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7466 BPF_FUNC_map_lookup_elem),
7467 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7468 /* r1 = [0x00, 0xff] */
7469 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7470 /* r1 = [-0x01, 0xfe] */
7471 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
7472 /* r1 = 0 or 0xff'ffff'ffff'ffff */
7473 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7474 /* r1 = 0 or 0xffff'ffff'ffff */
7475 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7476 /* computes unknown pointer, potentially OOB */
7477 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7478 /* potentially OOB access */
7479 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7480 /* exit */
7481 BPF_MOV64_IMM(BPF_REG_0, 0),
7482 BPF_EXIT_INSN(),
7483 },
7484 .fixup_map1 = { 3 },
7485 .errstr = "R0 unbounded memory access",
7486 .result = REJECT
7487 },
7488 {
7489 "bounds check map access with off+size signed 32bit overflow. test1",
7490 .insns = {
7491 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7492 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7493 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7494 BPF_LD_MAP_FD(BPF_REG_1, 0),
7495 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7496 BPF_FUNC_map_lookup_elem),
7497 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7498 BPF_EXIT_INSN(),
7499 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
7500 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7501 BPF_JMP_A(0),
7502 BPF_EXIT_INSN(),
7503 },
7504 .fixup_map1 = { 3 },
7505 .errstr = "map_value pointer and 2147483646",
7506 .result = REJECT
7507 },
7508 {
7509 "bounds check map access with off+size signed 32bit overflow. test2",
7510 .insns = {
7511 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7512 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7513 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7514 BPF_LD_MAP_FD(BPF_REG_1, 0),
7515 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7516 BPF_FUNC_map_lookup_elem),
7517 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7518 BPF_EXIT_INSN(),
7519 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7520 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7522 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7523 BPF_JMP_A(0),
7524 BPF_EXIT_INSN(),
7525 },
7526 .fixup_map1 = { 3 },
7527 .errstr = "pointer offset 1073741822",
7528 .result = REJECT
7529 },
7530 {
7531 "bounds check map access with off+size signed 32bit overflow. test3",
7532 .insns = {
7533 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7534 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7535 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7536 BPF_LD_MAP_FD(BPF_REG_1, 0),
7537 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7538 BPF_FUNC_map_lookup_elem),
7539 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7540 BPF_EXIT_INSN(),
7541 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7542 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7543 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7544 BPF_JMP_A(0),
7545 BPF_EXIT_INSN(),
7546 },
7547 .fixup_map1 = { 3 },
7548 .errstr = "pointer offset -1073741822",
7549 .result = REJECT
7550 },
7551 {
7552 "bounds check map access with off+size signed 32bit overflow. test4",
7553 .insns = {
7554 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7555 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7557 BPF_LD_MAP_FD(BPF_REG_1, 0),
7558 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7559 BPF_FUNC_map_lookup_elem),
7560 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7561 BPF_EXIT_INSN(),
7562 BPF_MOV64_IMM(BPF_REG_1, 1000000),
7563 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
7564 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7565 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7566 BPF_JMP_A(0),
7567 BPF_EXIT_INSN(),
7568 },
7569 .fixup_map1 = { 3 },
7570 .errstr = "map_value pointer and 1000000000000",
7571 .result = REJECT
7572 },
7573 {
7574 "pointer/scalar confusion in state equality check (way 1)",
7575 .insns = {
7576 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7577 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7578 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7579 BPF_LD_MAP_FD(BPF_REG_1, 0),
7580 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7581 BPF_FUNC_map_lookup_elem),
7582 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7583 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7584 BPF_JMP_A(1),
7585 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7586 BPF_JMP_A(0),
7587 BPF_EXIT_INSN(),
7588 },
7589 .fixup_map1 = { 3 },
7590 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007591 .retval = POINTER_VALUE,
Jann Horn2255f8d2017-12-18 20:12:01 -08007592 .result_unpriv = REJECT,
7593 .errstr_unpriv = "R0 leaks addr as return value"
7594 },
7595 {
7596 "pointer/scalar confusion in state equality check (way 2)",
7597 .insns = {
7598 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7599 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7600 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7601 BPF_LD_MAP_FD(BPF_REG_1, 0),
7602 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7603 BPF_FUNC_map_lookup_elem),
7604 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
7605 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7606 BPF_JMP_A(1),
7607 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7608 BPF_EXIT_INSN(),
7609 },
7610 .fixup_map1 = { 3 },
7611 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007612 .retval = POINTER_VALUE,
Jann Horn2255f8d2017-12-18 20:12:01 -08007613 .result_unpriv = REJECT,
7614 .errstr_unpriv = "R0 leaks addr as return value"
7615 },
7616 {
Edward Cree69c4e8a2017-08-07 15:29:51 +01007617 "variable-offset ctx access",
7618 .insns = {
7619 /* Get an unknown value */
7620 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7621 /* Make it small and 4-byte aligned */
7622 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7623 /* add it to skb. We now have either &skb->len or
7624 * &skb->pkt_type, but we don't know which
7625 */
7626 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
7627 /* dereference it */
7628 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
7629 BPF_EXIT_INSN(),
7630 },
7631 .errstr = "variable ctx access var_off=(0x0; 0x4)",
7632 .result = REJECT,
7633 .prog_type = BPF_PROG_TYPE_LWT_IN,
7634 },
7635 {
7636 "variable-offset stack access",
7637 .insns = {
7638 /* Fill the top 8 bytes of the stack */
7639 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7640 /* Get an unknown value */
7641 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7642 /* Make it small and 4-byte aligned */
7643 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7644 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7645 /* add it to fp. We now have either fp-4 or fp-8, but
7646 * we don't know which
7647 */
7648 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7649 /* dereference it */
7650 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
7651 BPF_EXIT_INSN(),
7652 },
7653 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
7654 .result = REJECT,
7655 .prog_type = BPF_PROG_TYPE_LWT_IN,
7656 },
Edward Creed893dc22017-08-23 15:09:46 +01007657 {
Jann Horn2255f8d2017-12-18 20:12:01 -08007658 "indirect variable-offset stack access",
7659 .insns = {
7660 /* Fill the top 8 bytes of the stack */
7661 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7662 /* Get an unknown value */
7663 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7664 /* Make it small and 4-byte aligned */
7665 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7666 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7667 /* add it to fp. We now have either fp-4 or fp-8, but
7668 * we don't know which
7669 */
7670 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7671 /* dereference it indirectly */
7672 BPF_LD_MAP_FD(BPF_REG_1, 0),
7673 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7674 BPF_FUNC_map_lookup_elem),
7675 BPF_MOV64_IMM(BPF_REG_0, 0),
7676 BPF_EXIT_INSN(),
7677 },
7678 .fixup_map1 = { 5 },
7679 .errstr = "variable stack read R2",
7680 .result = REJECT,
7681 .prog_type = BPF_PROG_TYPE_LWT_IN,
7682 },
7683 {
7684 "direct stack access with 32-bit wraparound. test1",
7685 .insns = {
7686 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7687 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7688 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7689 BPF_MOV32_IMM(BPF_REG_0, 0),
7690 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7691 BPF_EXIT_INSN()
7692 },
7693 .errstr = "fp pointer and 2147483647",
7694 .result = REJECT
7695 },
7696 {
7697 "direct stack access with 32-bit wraparound. test2",
7698 .insns = {
7699 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7700 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7701 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7702 BPF_MOV32_IMM(BPF_REG_0, 0),
7703 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7704 BPF_EXIT_INSN()
7705 },
7706 .errstr = "fp pointer and 1073741823",
7707 .result = REJECT
7708 },
7709 {
7710 "direct stack access with 32-bit wraparound. test3",
7711 .insns = {
7712 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7714 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7715 BPF_MOV32_IMM(BPF_REG_0, 0),
7716 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7717 BPF_EXIT_INSN()
7718 },
7719 .errstr = "fp pointer offset 1073741822",
7720 .result = REJECT
7721 },
7722 {
Edward Creed893dc22017-08-23 15:09:46 +01007723 "liveness pruning and write screening",
7724 .insns = {
7725 /* Get an unknown value */
7726 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7727 /* branch conditions teach us nothing about R2 */
7728 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
7729 BPF_MOV64_IMM(BPF_REG_0, 0),
7730 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
7731 BPF_MOV64_IMM(BPF_REG_0, 0),
7732 BPF_EXIT_INSN(),
7733 },
7734 .errstr = "R0 !read_ok",
7735 .result = REJECT,
7736 .prog_type = BPF_PROG_TYPE_LWT_IN,
7737 },
Alexei Starovoitovdf20cb72017-08-23 15:10:26 +01007738 {
7739 "varlen_map_value_access pruning",
7740 .insns = {
7741 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7742 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7744 BPF_LD_MAP_FD(BPF_REG_1, 0),
7745 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7746 BPF_FUNC_map_lookup_elem),
7747 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7748 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7749 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
7750 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
7751 BPF_MOV32_IMM(BPF_REG_1, 0),
7752 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
7753 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7754 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
7755 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7756 offsetof(struct test_val, foo)),
7757 BPF_EXIT_INSN(),
7758 },
7759 .fixup_map2 = { 3 },
7760 .errstr_unpriv = "R0 leaks addr",
7761 .errstr = "R0 unbounded memory access",
7762 .result_unpriv = REJECT,
7763 .result = REJECT,
7764 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7765 },
Edward Creee67b8a62017-09-15 14:37:38 +01007766 {
7767 "invalid 64-bit BPF_END",
7768 .insns = {
7769 BPF_MOV32_IMM(BPF_REG_0, 0),
7770 {
7771 .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
7772 .dst_reg = BPF_REG_0,
7773 .src_reg = 0,
7774 .off = 0,
7775 .imm = 32,
7776 },
7777 BPF_EXIT_INSN(),
7778 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01007779 .errstr = "unknown opcode d7",
Edward Creee67b8a62017-09-15 14:37:38 +01007780 .result = REJECT,
7781 },
Daniel Borkmann22c88522017-09-25 02:25:53 +02007782 {
7783 "meta access, test1",
7784 .insns = {
7785 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7786 offsetof(struct xdp_md, data_meta)),
7787 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7788 offsetof(struct xdp_md, data)),
7789 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7790 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7791 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7792 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7793 BPF_MOV64_IMM(BPF_REG_0, 0),
7794 BPF_EXIT_INSN(),
7795 },
7796 .result = ACCEPT,
7797 .prog_type = BPF_PROG_TYPE_XDP,
7798 },
7799 {
7800 "meta access, test2",
7801 .insns = {
7802 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7803 offsetof(struct xdp_md, data_meta)),
7804 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7805 offsetof(struct xdp_md, data)),
7806 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7807 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
7808 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7809 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7810 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7811 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7812 BPF_MOV64_IMM(BPF_REG_0, 0),
7813 BPF_EXIT_INSN(),
7814 },
7815 .result = REJECT,
7816 .errstr = "invalid access to packet, off=-8",
7817 .prog_type = BPF_PROG_TYPE_XDP,
7818 },
7819 {
7820 "meta access, test3",
7821 .insns = {
7822 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7823 offsetof(struct xdp_md, data_meta)),
7824 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7825 offsetof(struct xdp_md, data_end)),
7826 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7827 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7828 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7829 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7830 BPF_MOV64_IMM(BPF_REG_0, 0),
7831 BPF_EXIT_INSN(),
7832 },
7833 .result = REJECT,
7834 .errstr = "invalid access to packet",
7835 .prog_type = BPF_PROG_TYPE_XDP,
7836 },
7837 {
7838 "meta access, test4",
7839 .insns = {
7840 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7841 offsetof(struct xdp_md, data_meta)),
7842 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7843 offsetof(struct xdp_md, data_end)),
7844 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7845 offsetof(struct xdp_md, data)),
7846 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7847 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7848 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7849 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7850 BPF_MOV64_IMM(BPF_REG_0, 0),
7851 BPF_EXIT_INSN(),
7852 },
7853 .result = REJECT,
7854 .errstr = "invalid access to packet",
7855 .prog_type = BPF_PROG_TYPE_XDP,
7856 },
7857 {
7858 "meta access, test5",
7859 .insns = {
7860 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7861 offsetof(struct xdp_md, data_meta)),
7862 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7863 offsetof(struct xdp_md, data)),
7864 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7865 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7866 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
7867 BPF_MOV64_IMM(BPF_REG_2, -8),
7868 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7869 BPF_FUNC_xdp_adjust_meta),
7870 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
7871 BPF_MOV64_IMM(BPF_REG_0, 0),
7872 BPF_EXIT_INSN(),
7873 },
7874 .result = REJECT,
7875 .errstr = "R3 !read_ok",
7876 .prog_type = BPF_PROG_TYPE_XDP,
7877 },
7878 {
7879 "meta access, test6",
7880 .insns = {
7881 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7882 offsetof(struct xdp_md, data_meta)),
7883 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7884 offsetof(struct xdp_md, data)),
7885 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7886 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7887 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7888 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7889 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
7890 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7891 BPF_MOV64_IMM(BPF_REG_0, 0),
7892 BPF_EXIT_INSN(),
7893 },
7894 .result = REJECT,
7895 .errstr = "invalid access to packet",
7896 .prog_type = BPF_PROG_TYPE_XDP,
7897 },
7898 {
7899 "meta access, test7",
7900 .insns = {
7901 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7902 offsetof(struct xdp_md, data_meta)),
7903 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7904 offsetof(struct xdp_md, data)),
7905 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7906 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7907 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7909 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7910 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7911 BPF_MOV64_IMM(BPF_REG_0, 0),
7912 BPF_EXIT_INSN(),
7913 },
7914 .result = ACCEPT,
7915 .prog_type = BPF_PROG_TYPE_XDP,
7916 },
7917 {
7918 "meta access, test8",
7919 .insns = {
7920 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7921 offsetof(struct xdp_md, data_meta)),
7922 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7923 offsetof(struct xdp_md, data)),
7924 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7925 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
7926 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7927 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7928 BPF_MOV64_IMM(BPF_REG_0, 0),
7929 BPF_EXIT_INSN(),
7930 },
7931 .result = ACCEPT,
7932 .prog_type = BPF_PROG_TYPE_XDP,
7933 },
7934 {
7935 "meta access, test9",
7936 .insns = {
7937 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7938 offsetof(struct xdp_md, data_meta)),
7939 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7940 offsetof(struct xdp_md, data)),
7941 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7942 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
7943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
7944 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7945 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7946 BPF_MOV64_IMM(BPF_REG_0, 0),
7947 BPF_EXIT_INSN(),
7948 },
7949 .result = REJECT,
7950 .errstr = "invalid access to packet",
7951 .prog_type = BPF_PROG_TYPE_XDP,
7952 },
7953 {
7954 "meta access, test10",
7955 .insns = {
7956 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7957 offsetof(struct xdp_md, data_meta)),
7958 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7959 offsetof(struct xdp_md, data)),
7960 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7961 offsetof(struct xdp_md, data_end)),
7962 BPF_MOV64_IMM(BPF_REG_5, 42),
7963 BPF_MOV64_IMM(BPF_REG_6, 24),
7964 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
7965 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
7966 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
7967 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
7968 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
7969 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
7970 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
7971 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
7972 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
7973 BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
7974 BPF_MOV64_IMM(BPF_REG_0, 0),
7975 BPF_EXIT_INSN(),
7976 },
7977 .result = REJECT,
7978 .errstr = "invalid access to packet",
7979 .prog_type = BPF_PROG_TYPE_XDP,
7980 },
7981 {
7982 "meta access, test11",
7983 .insns = {
7984 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7985 offsetof(struct xdp_md, data_meta)),
7986 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7987 offsetof(struct xdp_md, data)),
7988 BPF_MOV64_IMM(BPF_REG_5, 42),
7989 BPF_MOV64_IMM(BPF_REG_6, 24),
7990 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
7991 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
7992 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
7993 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
7994 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
7995 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
7996 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
7997 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
7998 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
7999 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
8000 BPF_MOV64_IMM(BPF_REG_0, 0),
8001 BPF_EXIT_INSN(),
8002 },
8003 .result = ACCEPT,
8004 .prog_type = BPF_PROG_TYPE_XDP,
8005 },
8006 {
8007 "meta access, test12",
8008 .insns = {
8009 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8010 offsetof(struct xdp_md, data_meta)),
8011 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8012 offsetof(struct xdp_md, data)),
8013 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8014 offsetof(struct xdp_md, data_end)),
8015 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
8016 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
8017 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
8018 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
8019 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
8020 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
8021 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
8022 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8023 BPF_MOV64_IMM(BPF_REG_0, 0),
8024 BPF_EXIT_INSN(),
8025 },
8026 .result = ACCEPT,
8027 .prog_type = BPF_PROG_TYPE_XDP,
8028 },
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07008029 {
Jakub Kicinski28e33f92017-10-16 11:16:55 -07008030 "arithmetic ops make PTR_TO_CTX unusable",
8031 .insns = {
8032 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
8033 offsetof(struct __sk_buff, data) -
8034 offsetof(struct __sk_buff, mark)),
8035 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8036 offsetof(struct __sk_buff, mark)),
8037 BPF_EXIT_INSN(),
8038 },
8039 .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
8040 .result = REJECT,
8041 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8042 },
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008043 {
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008044 "pkt_end - pkt_start is allowed",
8045 .insns = {
8046 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8047 offsetof(struct __sk_buff, data_end)),
8048 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8049 offsetof(struct __sk_buff, data)),
8050 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
8051 BPF_EXIT_INSN(),
8052 },
8053 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08008054 .retval = TEST_DATA_LEN,
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008055 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8056 },
8057 {
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008058 "XDP pkt read, pkt_end mangling, bad access 1",
8059 .insns = {
8060 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8061 offsetof(struct xdp_md, data)),
8062 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8063 offsetof(struct xdp_md, data_end)),
8064 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8065 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
8067 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8068 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8069 BPF_MOV64_IMM(BPF_REG_0, 0),
8070 BPF_EXIT_INSN(),
8071 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008072 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008073 .result = REJECT,
8074 .prog_type = BPF_PROG_TYPE_XDP,
8075 },
8076 {
8077 "XDP pkt read, pkt_end mangling, bad access 2",
8078 .insns = {
8079 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8080 offsetof(struct xdp_md, data)),
8081 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8082 offsetof(struct xdp_md, data_end)),
8083 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8084 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8085 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
8086 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8087 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8088 BPF_MOV64_IMM(BPF_REG_0, 0),
8089 BPF_EXIT_INSN(),
8090 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008091 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008092 .result = REJECT,
8093 .prog_type = BPF_PROG_TYPE_XDP,
8094 },
8095 {
8096 "XDP pkt read, pkt_data' > pkt_end, good access",
8097 .insns = {
8098 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8099 offsetof(struct xdp_md, data)),
8100 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8101 offsetof(struct xdp_md, data_end)),
8102 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8104 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8105 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8106 BPF_MOV64_IMM(BPF_REG_0, 0),
8107 BPF_EXIT_INSN(),
8108 },
8109 .result = ACCEPT,
8110 .prog_type = BPF_PROG_TYPE_XDP,
8111 },
8112 {
8113 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
8114 .insns = {
8115 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8116 offsetof(struct xdp_md, data)),
8117 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8118 offsetof(struct xdp_md, data_end)),
8119 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8120 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8121 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8122 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8123 BPF_MOV64_IMM(BPF_REG_0, 0),
8124 BPF_EXIT_INSN(),
8125 },
8126 .errstr = "R1 offset is outside of the packet",
8127 .result = REJECT,
8128 .prog_type = BPF_PROG_TYPE_XDP,
8129 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8130 },
8131 {
8132 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
8133 .insns = {
8134 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8135 offsetof(struct xdp_md, data)),
8136 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8137 offsetof(struct xdp_md, data_end)),
8138 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8139 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8140 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
8141 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8142 BPF_MOV64_IMM(BPF_REG_0, 0),
8143 BPF_EXIT_INSN(),
8144 },
8145 .errstr = "R1 offset is outside of the packet",
8146 .result = REJECT,
8147 .prog_type = BPF_PROG_TYPE_XDP,
8148 },
8149 {
8150 "XDP pkt read, pkt_end > pkt_data', good access",
8151 .insns = {
8152 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8153 offsetof(struct xdp_md, data)),
8154 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8155 offsetof(struct xdp_md, data_end)),
8156 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8157 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8158 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8159 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8160 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8161 BPF_MOV64_IMM(BPF_REG_0, 0),
8162 BPF_EXIT_INSN(),
8163 },
8164 .result = ACCEPT,
8165 .prog_type = BPF_PROG_TYPE_XDP,
8166 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8167 },
8168 {
8169 "XDP pkt read, pkt_end > pkt_data', bad access 1",
8170 .insns = {
8171 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8172 offsetof(struct xdp_md, data)),
8173 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8174 offsetof(struct xdp_md, data_end)),
8175 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8176 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8177 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8178 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8179 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8180 BPF_MOV64_IMM(BPF_REG_0, 0),
8181 BPF_EXIT_INSN(),
8182 },
8183 .errstr = "R1 offset is outside of the packet",
8184 .result = REJECT,
8185 .prog_type = BPF_PROG_TYPE_XDP,
8186 },
8187 {
8188 "XDP pkt read, pkt_end > pkt_data', bad access 2",
8189 .insns = {
8190 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8191 offsetof(struct xdp_md, data)),
8192 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8193 offsetof(struct xdp_md, data_end)),
8194 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8195 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8196 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8197 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8198 BPF_MOV64_IMM(BPF_REG_0, 0),
8199 BPF_EXIT_INSN(),
8200 },
8201 .errstr = "R1 offset is outside of the packet",
8202 .result = REJECT,
8203 .prog_type = BPF_PROG_TYPE_XDP,
8204 },
8205 {
8206 "XDP pkt read, pkt_data' < pkt_end, good access",
8207 .insns = {
8208 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8209 offsetof(struct xdp_md, data)),
8210 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8211 offsetof(struct xdp_md, data_end)),
8212 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8213 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8214 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8215 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8216 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8217 BPF_MOV64_IMM(BPF_REG_0, 0),
8218 BPF_EXIT_INSN(),
8219 },
8220 .result = ACCEPT,
8221 .prog_type = BPF_PROG_TYPE_XDP,
8222 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8223 },
8224 {
8225 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
8226 .insns = {
8227 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8228 offsetof(struct xdp_md, data)),
8229 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8230 offsetof(struct xdp_md, data_end)),
8231 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8232 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8233 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8234 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8235 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8236 BPF_MOV64_IMM(BPF_REG_0, 0),
8237 BPF_EXIT_INSN(),
8238 },
8239 .errstr = "R1 offset is outside of the packet",
8240 .result = REJECT,
8241 .prog_type = BPF_PROG_TYPE_XDP,
8242 },
8243 {
8244 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
8245 .insns = {
8246 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8247 offsetof(struct xdp_md, data)),
8248 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8249 offsetof(struct xdp_md, data_end)),
8250 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8251 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8252 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8253 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8254 BPF_MOV64_IMM(BPF_REG_0, 0),
8255 BPF_EXIT_INSN(),
8256 },
8257 .errstr = "R1 offset is outside of the packet",
8258 .result = REJECT,
8259 .prog_type = BPF_PROG_TYPE_XDP,
8260 },
8261 {
8262 "XDP pkt read, pkt_end < pkt_data', good access",
8263 .insns = {
8264 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8265 offsetof(struct xdp_md, data)),
8266 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8267 offsetof(struct xdp_md, data_end)),
8268 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8269 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8270 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8271 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8272 BPF_MOV64_IMM(BPF_REG_0, 0),
8273 BPF_EXIT_INSN(),
8274 },
8275 .result = ACCEPT,
8276 .prog_type = BPF_PROG_TYPE_XDP,
8277 },
8278 {
8279 "XDP pkt read, pkt_end < pkt_data', bad access 1",
8280 .insns = {
8281 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8282 offsetof(struct xdp_md, data)),
8283 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8284 offsetof(struct xdp_md, data_end)),
8285 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8286 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8287 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8288 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8289 BPF_MOV64_IMM(BPF_REG_0, 0),
8290 BPF_EXIT_INSN(),
8291 },
8292 .errstr = "R1 offset is outside of the packet",
8293 .result = REJECT,
8294 .prog_type = BPF_PROG_TYPE_XDP,
8295 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8296 },
8297 {
8298 "XDP pkt read, pkt_end < pkt_data', bad access 2",
8299 .insns = {
8300 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8301 offsetof(struct xdp_md, data)),
8302 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8303 offsetof(struct xdp_md, data_end)),
8304 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8305 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8306 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
8307 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8308 BPF_MOV64_IMM(BPF_REG_0, 0),
8309 BPF_EXIT_INSN(),
8310 },
8311 .errstr = "R1 offset is outside of the packet",
8312 .result = REJECT,
8313 .prog_type = BPF_PROG_TYPE_XDP,
8314 },
8315 {
8316 "XDP pkt read, pkt_data' >= pkt_end, good access",
8317 .insns = {
8318 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8319 offsetof(struct xdp_md, data)),
8320 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8321 offsetof(struct xdp_md, data_end)),
8322 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8323 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8324 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8325 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8326 BPF_MOV64_IMM(BPF_REG_0, 0),
8327 BPF_EXIT_INSN(),
8328 },
8329 .result = ACCEPT,
8330 .prog_type = BPF_PROG_TYPE_XDP,
8331 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8332 },
8333 {
8334 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
8335 .insns = {
8336 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8337 offsetof(struct xdp_md, data)),
8338 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8339 offsetof(struct xdp_md, data_end)),
8340 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8341 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8342 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8343 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8344 BPF_MOV64_IMM(BPF_REG_0, 0),
8345 BPF_EXIT_INSN(),
8346 },
8347 .errstr = "R1 offset is outside of the packet",
8348 .result = REJECT,
8349 .prog_type = BPF_PROG_TYPE_XDP,
8350 },
8351 {
8352 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
8353 .insns = {
8354 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8355 offsetof(struct xdp_md, data)),
8356 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8357 offsetof(struct xdp_md, data_end)),
8358 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8359 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8360 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
8361 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8362 BPF_MOV64_IMM(BPF_REG_0, 0),
8363 BPF_EXIT_INSN(),
8364 },
8365 .errstr = "R1 offset is outside of the packet",
8366 .result = REJECT,
8367 .prog_type = BPF_PROG_TYPE_XDP,
8368 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8369 },
8370 {
8371 "XDP pkt read, pkt_end >= pkt_data', good access",
8372 .insns = {
8373 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8374 offsetof(struct xdp_md, data)),
8375 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8376 offsetof(struct xdp_md, data_end)),
8377 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8378 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8379 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8380 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8381 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8382 BPF_MOV64_IMM(BPF_REG_0, 0),
8383 BPF_EXIT_INSN(),
8384 },
8385 .result = ACCEPT,
8386 .prog_type = BPF_PROG_TYPE_XDP,
8387 },
8388 {
8389 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
8390 .insns = {
8391 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8392 offsetof(struct xdp_md, data)),
8393 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8394 offsetof(struct xdp_md, data_end)),
8395 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8396 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8397 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8398 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8399 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8400 BPF_MOV64_IMM(BPF_REG_0, 0),
8401 BPF_EXIT_INSN(),
8402 },
8403 .errstr = "R1 offset is outside of the packet",
8404 .result = REJECT,
8405 .prog_type = BPF_PROG_TYPE_XDP,
8406 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8407 },
8408 {
8409 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
8410 .insns = {
8411 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8412 offsetof(struct xdp_md, data)),
8413 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8414 offsetof(struct xdp_md, data_end)),
8415 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8416 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8417 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8418 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8419 BPF_MOV64_IMM(BPF_REG_0, 0),
8420 BPF_EXIT_INSN(),
8421 },
8422 .errstr = "R1 offset is outside of the packet",
8423 .result = REJECT,
8424 .prog_type = BPF_PROG_TYPE_XDP,
8425 },
8426 {
8427 "XDP pkt read, pkt_data' <= pkt_end, good access",
8428 .insns = {
8429 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8430 offsetof(struct xdp_md, data)),
8431 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8432 offsetof(struct xdp_md, data_end)),
8433 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8434 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8435 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8436 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8437 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8438 BPF_MOV64_IMM(BPF_REG_0, 0),
8439 BPF_EXIT_INSN(),
8440 },
8441 .result = ACCEPT,
8442 .prog_type = BPF_PROG_TYPE_XDP,
8443 },
8444 {
8445 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
8446 .insns = {
8447 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8448 offsetof(struct xdp_md, data)),
8449 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8450 offsetof(struct xdp_md, data_end)),
8451 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8453 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8454 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8455 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8456 BPF_MOV64_IMM(BPF_REG_0, 0),
8457 BPF_EXIT_INSN(),
8458 },
8459 .errstr = "R1 offset is outside of the packet",
8460 .result = REJECT,
8461 .prog_type = BPF_PROG_TYPE_XDP,
8462 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8463 },
8464 {
8465 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
8466 .insns = {
8467 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8468 offsetof(struct xdp_md, data)),
8469 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8470 offsetof(struct xdp_md, data_end)),
8471 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8472 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8473 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8474 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8475 BPF_MOV64_IMM(BPF_REG_0, 0),
8476 BPF_EXIT_INSN(),
8477 },
8478 .errstr = "R1 offset is outside of the packet",
8479 .result = REJECT,
8480 .prog_type = BPF_PROG_TYPE_XDP,
8481 },
8482 {
8483 "XDP pkt read, pkt_end <= pkt_data', good access",
8484 .insns = {
8485 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8486 offsetof(struct xdp_md, data)),
8487 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8488 offsetof(struct xdp_md, data_end)),
8489 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8490 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8491 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8492 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8493 BPF_MOV64_IMM(BPF_REG_0, 0),
8494 BPF_EXIT_INSN(),
8495 },
8496 .result = ACCEPT,
8497 .prog_type = BPF_PROG_TYPE_XDP,
8498 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8499 },
8500 {
8501 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
8502 .insns = {
8503 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8504 offsetof(struct xdp_md, data)),
8505 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8506 offsetof(struct xdp_md, data_end)),
8507 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8508 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8509 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8510 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8511 BPF_MOV64_IMM(BPF_REG_0, 0),
8512 BPF_EXIT_INSN(),
8513 },
8514 .errstr = "R1 offset is outside of the packet",
8515 .result = REJECT,
8516 .prog_type = BPF_PROG_TYPE_XDP,
8517 },
8518 {
8519 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
8520 .insns = {
8521 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8522 offsetof(struct xdp_md, data)),
8523 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8524 offsetof(struct xdp_md, data_end)),
8525 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8526 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8527 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
8528 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8529 BPF_MOV64_IMM(BPF_REG_0, 0),
8530 BPF_EXIT_INSN(),
8531 },
8532 .errstr = "R1 offset is outside of the packet",
8533 .result = REJECT,
8534 .prog_type = BPF_PROG_TYPE_XDP,
8535 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8536 },
Daniel Borkmannb06723d2017-11-01 23:58:09 +01008537 {
Daniel Borkmann634eab12017-11-01 23:58:11 +01008538 "XDP pkt read, pkt_meta' > pkt_data, good access",
8539 .insns = {
8540 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8541 offsetof(struct xdp_md, data_meta)),
8542 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8543 offsetof(struct xdp_md, data)),
8544 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8546 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8547 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8548 BPF_MOV64_IMM(BPF_REG_0, 0),
8549 BPF_EXIT_INSN(),
8550 },
8551 .result = ACCEPT,
8552 .prog_type = BPF_PROG_TYPE_XDP,
8553 },
8554 {
8555 "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
8556 .insns = {
8557 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8558 offsetof(struct xdp_md, data_meta)),
8559 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8560 offsetof(struct xdp_md, data)),
8561 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8563 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8564 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8565 BPF_MOV64_IMM(BPF_REG_0, 0),
8566 BPF_EXIT_INSN(),
8567 },
8568 .errstr = "R1 offset is outside of the packet",
8569 .result = REJECT,
8570 .prog_type = BPF_PROG_TYPE_XDP,
8571 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8572 },
8573 {
8574 "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
8575 .insns = {
8576 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8577 offsetof(struct xdp_md, data_meta)),
8578 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8579 offsetof(struct xdp_md, data)),
8580 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8581 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8582 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
8583 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8584 BPF_MOV64_IMM(BPF_REG_0, 0),
8585 BPF_EXIT_INSN(),
8586 },
8587 .errstr = "R1 offset is outside of the packet",
8588 .result = REJECT,
8589 .prog_type = BPF_PROG_TYPE_XDP,
8590 },
8591 {
8592 "XDP pkt read, pkt_data > pkt_meta', good access",
8593 .insns = {
8594 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8595 offsetof(struct xdp_md, data_meta)),
8596 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8597 offsetof(struct xdp_md, data)),
8598 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8599 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8600 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8601 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8602 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8603 BPF_MOV64_IMM(BPF_REG_0, 0),
8604 BPF_EXIT_INSN(),
8605 },
8606 .result = ACCEPT,
8607 .prog_type = BPF_PROG_TYPE_XDP,
8608 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8609 },
8610 {
8611 "XDP pkt read, pkt_data > pkt_meta', bad access 1",
8612 .insns = {
8613 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8614 offsetof(struct xdp_md, data_meta)),
8615 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8616 offsetof(struct xdp_md, data)),
8617 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8618 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8619 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8620 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8621 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8622 BPF_MOV64_IMM(BPF_REG_0, 0),
8623 BPF_EXIT_INSN(),
8624 },
8625 .errstr = "R1 offset is outside of the packet",
8626 .result = REJECT,
8627 .prog_type = BPF_PROG_TYPE_XDP,
8628 },
8629 {
8630 "XDP pkt read, pkt_data > pkt_meta', bad access 2",
8631 .insns = {
8632 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8633 offsetof(struct xdp_md, data_meta)),
8634 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8635 offsetof(struct xdp_md, data)),
8636 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8637 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8638 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8639 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8640 BPF_MOV64_IMM(BPF_REG_0, 0),
8641 BPF_EXIT_INSN(),
8642 },
8643 .errstr = "R1 offset is outside of the packet",
8644 .result = REJECT,
8645 .prog_type = BPF_PROG_TYPE_XDP,
8646 },
8647 {
8648 "XDP pkt read, pkt_meta' < pkt_data, good access",
8649 .insns = {
8650 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8651 offsetof(struct xdp_md, data_meta)),
8652 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8653 offsetof(struct xdp_md, data)),
8654 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8655 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8656 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8657 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8658 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8659 BPF_MOV64_IMM(BPF_REG_0, 0),
8660 BPF_EXIT_INSN(),
8661 },
8662 .result = ACCEPT,
8663 .prog_type = BPF_PROG_TYPE_XDP,
8664 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8665 },
8666 {
8667 "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
8668 .insns = {
8669 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8670 offsetof(struct xdp_md, data_meta)),
8671 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8672 offsetof(struct xdp_md, data)),
8673 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8674 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8675 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8676 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8677 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8678 BPF_MOV64_IMM(BPF_REG_0, 0),
8679 BPF_EXIT_INSN(),
8680 },
8681 .errstr = "R1 offset is outside of the packet",
8682 .result = REJECT,
8683 .prog_type = BPF_PROG_TYPE_XDP,
8684 },
8685 {
8686 "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
8687 .insns = {
8688 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8689 offsetof(struct xdp_md, data_meta)),
8690 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8691 offsetof(struct xdp_md, data)),
8692 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8693 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8694 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8695 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8696 BPF_MOV64_IMM(BPF_REG_0, 0),
8697 BPF_EXIT_INSN(),
8698 },
8699 .errstr = "R1 offset is outside of the packet",
8700 .result = REJECT,
8701 .prog_type = BPF_PROG_TYPE_XDP,
8702 },
8703 {
8704 "XDP pkt read, pkt_data < pkt_meta', good access",
8705 .insns = {
8706 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8707 offsetof(struct xdp_md, data_meta)),
8708 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8709 offsetof(struct xdp_md, data)),
8710 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8711 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8712 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8713 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8714 BPF_MOV64_IMM(BPF_REG_0, 0),
8715 BPF_EXIT_INSN(),
8716 },
8717 .result = ACCEPT,
8718 .prog_type = BPF_PROG_TYPE_XDP,
8719 },
8720 {
8721 "XDP pkt read, pkt_data < pkt_meta', bad access 1",
8722 .insns = {
8723 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8724 offsetof(struct xdp_md, data_meta)),
8725 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8726 offsetof(struct xdp_md, data)),
8727 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8728 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8729 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8730 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8731 BPF_MOV64_IMM(BPF_REG_0, 0),
8732 BPF_EXIT_INSN(),
8733 },
8734 .errstr = "R1 offset is outside of the packet",
8735 .result = REJECT,
8736 .prog_type = BPF_PROG_TYPE_XDP,
8737 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8738 },
8739 {
8740 "XDP pkt read, pkt_data < pkt_meta', bad access 2",
8741 .insns = {
8742 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8743 offsetof(struct xdp_md, data_meta)),
8744 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8745 offsetof(struct xdp_md, data)),
8746 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8747 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8748 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
8749 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8750 BPF_MOV64_IMM(BPF_REG_0, 0),
8751 BPF_EXIT_INSN(),
8752 },
8753 .errstr = "R1 offset is outside of the packet",
8754 .result = REJECT,
8755 .prog_type = BPF_PROG_TYPE_XDP,
8756 },
8757 {
8758 "XDP pkt read, pkt_meta' >= pkt_data, good access",
8759 .insns = {
8760 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8761 offsetof(struct xdp_md, data_meta)),
8762 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8763 offsetof(struct xdp_md, data)),
8764 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8765 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8766 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8767 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8768 BPF_MOV64_IMM(BPF_REG_0, 0),
8769 BPF_EXIT_INSN(),
8770 },
8771 .result = ACCEPT,
8772 .prog_type = BPF_PROG_TYPE_XDP,
8773 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8774 },
8775 {
8776 "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
8777 .insns = {
8778 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8779 offsetof(struct xdp_md, data_meta)),
8780 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8781 offsetof(struct xdp_md, data)),
8782 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8783 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8784 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8785 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8786 BPF_MOV64_IMM(BPF_REG_0, 0),
8787 BPF_EXIT_INSN(),
8788 },
8789 .errstr = "R1 offset is outside of the packet",
8790 .result = REJECT,
8791 .prog_type = BPF_PROG_TYPE_XDP,
8792 },
8793 {
8794 "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
8795 .insns = {
8796 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8797 offsetof(struct xdp_md, data_meta)),
8798 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8799 offsetof(struct xdp_md, data)),
8800 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8802 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
8803 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8804 BPF_MOV64_IMM(BPF_REG_0, 0),
8805 BPF_EXIT_INSN(),
8806 },
8807 .errstr = "R1 offset is outside of the packet",
8808 .result = REJECT,
8809 .prog_type = BPF_PROG_TYPE_XDP,
8810 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8811 },
8812 {
8813 "XDP pkt read, pkt_data >= pkt_meta', good access",
8814 .insns = {
8815 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8816 offsetof(struct xdp_md, data_meta)),
8817 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8818 offsetof(struct xdp_md, data)),
8819 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8820 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8821 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8822 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8823 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8824 BPF_MOV64_IMM(BPF_REG_0, 0),
8825 BPF_EXIT_INSN(),
8826 },
8827 .result = ACCEPT,
8828 .prog_type = BPF_PROG_TYPE_XDP,
8829 },
8830 {
8831 "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
8832 .insns = {
8833 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8834 offsetof(struct xdp_md, data_meta)),
8835 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8836 offsetof(struct xdp_md, data)),
8837 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8838 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8839 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8840 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8841 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8842 BPF_MOV64_IMM(BPF_REG_0, 0),
8843 BPF_EXIT_INSN(),
8844 },
8845 .errstr = "R1 offset is outside of the packet",
8846 .result = REJECT,
8847 .prog_type = BPF_PROG_TYPE_XDP,
8848 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8849 },
8850 {
8851 "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
8852 .insns = {
8853 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8854 offsetof(struct xdp_md, data_meta)),
8855 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8856 offsetof(struct xdp_md, data)),
8857 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8858 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8859 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8860 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8861 BPF_MOV64_IMM(BPF_REG_0, 0),
8862 BPF_EXIT_INSN(),
8863 },
8864 .errstr = "R1 offset is outside of the packet",
8865 .result = REJECT,
8866 .prog_type = BPF_PROG_TYPE_XDP,
8867 },
8868 {
8869 "XDP pkt read, pkt_meta' <= pkt_data, good access",
8870 .insns = {
8871 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8872 offsetof(struct xdp_md, data_meta)),
8873 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8874 offsetof(struct xdp_md, data)),
8875 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8876 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8877 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8878 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8879 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8880 BPF_MOV64_IMM(BPF_REG_0, 0),
8881 BPF_EXIT_INSN(),
8882 },
8883 .result = ACCEPT,
8884 .prog_type = BPF_PROG_TYPE_XDP,
8885 },
8886 {
8887 "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
8888 .insns = {
8889 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8890 offsetof(struct xdp_md, data_meta)),
8891 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8892 offsetof(struct xdp_md, data)),
8893 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8894 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8895 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8896 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8897 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8898 BPF_MOV64_IMM(BPF_REG_0, 0),
8899 BPF_EXIT_INSN(),
8900 },
8901 .errstr = "R1 offset is outside of the packet",
8902 .result = REJECT,
8903 .prog_type = BPF_PROG_TYPE_XDP,
8904 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8905 },
8906 {
8907 "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
8908 .insns = {
8909 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8910 offsetof(struct xdp_md, data_meta)),
8911 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8912 offsetof(struct xdp_md, data)),
8913 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8914 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8915 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8916 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8917 BPF_MOV64_IMM(BPF_REG_0, 0),
8918 BPF_EXIT_INSN(),
8919 },
8920 .errstr = "R1 offset is outside of the packet",
8921 .result = REJECT,
8922 .prog_type = BPF_PROG_TYPE_XDP,
8923 },
8924 {
8925 "XDP pkt read, pkt_data <= pkt_meta', good access",
8926 .insns = {
8927 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8928 offsetof(struct xdp_md, data_meta)),
8929 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8930 offsetof(struct xdp_md, data)),
8931 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8932 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8933 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8934 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8935 BPF_MOV64_IMM(BPF_REG_0, 0),
8936 BPF_EXIT_INSN(),
8937 },
8938 .result = ACCEPT,
8939 .prog_type = BPF_PROG_TYPE_XDP,
8940 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8941 },
8942 {
8943 "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
8944 .insns = {
8945 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8946 offsetof(struct xdp_md, data_meta)),
8947 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8948 offsetof(struct xdp_md, data)),
8949 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8950 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8951 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8952 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8953 BPF_MOV64_IMM(BPF_REG_0, 0),
8954 BPF_EXIT_INSN(),
8955 },
8956 .errstr = "R1 offset is outside of the packet",
8957 .result = REJECT,
8958 .prog_type = BPF_PROG_TYPE_XDP,
8959 },
8960 {
8961 "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
8962 .insns = {
8963 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8964 offsetof(struct xdp_md, data_meta)),
8965 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8966 offsetof(struct xdp_md, data)),
8967 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8968 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8969 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
8970 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8971 BPF_MOV64_IMM(BPF_REG_0, 0),
8972 BPF_EXIT_INSN(),
8973 },
8974 .errstr = "R1 offset is outside of the packet",
8975 .result = REJECT,
8976 .prog_type = BPF_PROG_TYPE_XDP,
8977 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8978 },
8979 {
Daniel Borkmann6f161012018-01-18 01:15:21 +01008980 "check deducing bounds from const, 1",
8981 .insns = {
8982 BPF_MOV64_IMM(BPF_REG_0, 1),
8983 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
8984 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8985 BPF_EXIT_INSN(),
8986 },
8987 .result = REJECT,
8988 .errstr = "R0 tried to subtract pointer from scalar",
8989 },
8990 {
8991 "check deducing bounds from const, 2",
8992 .insns = {
8993 BPF_MOV64_IMM(BPF_REG_0, 1),
8994 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
8995 BPF_EXIT_INSN(),
8996 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
8997 BPF_EXIT_INSN(),
8998 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
8999 BPF_EXIT_INSN(),
9000 },
9001 .result = ACCEPT,
Yonghong Song35136922018-01-22 22:10:59 -08009002 .retval = 1,
Daniel Borkmann6f161012018-01-18 01:15:21 +01009003 },
9004 {
9005 "check deducing bounds from const, 3",
9006 .insns = {
9007 BPF_MOV64_IMM(BPF_REG_0, 0),
9008 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
9009 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9010 BPF_EXIT_INSN(),
9011 },
9012 .result = REJECT,
9013 .errstr = "R0 tried to subtract pointer from scalar",
9014 },
9015 {
9016 "check deducing bounds from const, 4",
9017 .insns = {
9018 BPF_MOV64_IMM(BPF_REG_0, 0),
9019 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
9020 BPF_EXIT_INSN(),
9021 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9022 BPF_EXIT_INSN(),
9023 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9024 BPF_EXIT_INSN(),
9025 },
9026 .result = ACCEPT,
9027 },
9028 {
9029 "check deducing bounds from const, 5",
9030 .insns = {
9031 BPF_MOV64_IMM(BPF_REG_0, 0),
9032 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9033 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9034 BPF_EXIT_INSN(),
9035 },
9036 .result = REJECT,
9037 .errstr = "R0 tried to subtract pointer from scalar",
9038 },
9039 {
9040 "check deducing bounds from const, 6",
9041 .insns = {
9042 BPF_MOV64_IMM(BPF_REG_0, 0),
9043 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9044 BPF_EXIT_INSN(),
9045 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9046 BPF_EXIT_INSN(),
9047 },
9048 .result = REJECT,
9049 .errstr = "R0 tried to subtract pointer from scalar",
9050 },
9051 {
9052 "check deducing bounds from const, 7",
9053 .insns = {
9054 BPF_MOV64_IMM(BPF_REG_0, ~0),
9055 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
9056 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9057 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9058 offsetof(struct __sk_buff, mark)),
9059 BPF_EXIT_INSN(),
9060 },
9061 .result = REJECT,
9062 .errstr = "dereference of modified ctx ptr",
9063 },
9064 {
9065 "check deducing bounds from const, 8",
9066 .insns = {
9067 BPF_MOV64_IMM(BPF_REG_0, ~0),
9068 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9069 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
9070 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9071 offsetof(struct __sk_buff, mark)),
9072 BPF_EXIT_INSN(),
9073 },
9074 .result = REJECT,
9075 .errstr = "dereference of modified ctx ptr",
9076 },
9077 {
9078 "check deducing bounds from const, 9",
9079 .insns = {
9080 BPF_MOV64_IMM(BPF_REG_0, 0),
9081 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
9082 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9083 BPF_EXIT_INSN(),
9084 },
9085 .result = REJECT,
9086 .errstr = "R0 tried to subtract pointer from scalar",
9087 },
9088 {
9089 "check deducing bounds from const, 10",
9090 .insns = {
9091 BPF_MOV64_IMM(BPF_REG_0, 0),
9092 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
9093 /* Marks reg as unknown. */
9094 BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
9095 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9096 BPF_EXIT_INSN(),
9097 },
9098 .result = REJECT,
9099 .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
9100 },
9101 {
Daniel Borkmannb06723d2017-11-01 23:58:09 +01009102 "bpf_exit with invalid return code. test1",
9103 .insns = {
9104 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9105 BPF_EXIT_INSN(),
9106 },
9107 .errstr = "R0 has value (0x0; 0xffffffff)",
9108 .result = REJECT,
9109 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9110 },
9111 {
9112 "bpf_exit with invalid return code. test2",
9113 .insns = {
9114 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9115 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
9116 BPF_EXIT_INSN(),
9117 },
9118 .result = ACCEPT,
9119 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9120 },
9121 {
9122 "bpf_exit with invalid return code. test3",
9123 .insns = {
9124 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9125 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
9126 BPF_EXIT_INSN(),
9127 },
9128 .errstr = "R0 has value (0x0; 0x3)",
9129 .result = REJECT,
9130 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9131 },
9132 {
9133 "bpf_exit with invalid return code. test4",
9134 .insns = {
9135 BPF_MOV64_IMM(BPF_REG_0, 1),
9136 BPF_EXIT_INSN(),
9137 },
9138 .result = ACCEPT,
9139 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9140 },
9141 {
9142 "bpf_exit with invalid return code. test5",
9143 .insns = {
9144 BPF_MOV64_IMM(BPF_REG_0, 2),
9145 BPF_EXIT_INSN(),
9146 },
9147 .errstr = "R0 has value (0x2; 0x0)",
9148 .result = REJECT,
9149 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9150 },
9151 {
9152 "bpf_exit with invalid return code. test6",
9153 .insns = {
9154 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9155 BPF_EXIT_INSN(),
9156 },
9157 .errstr = "R0 is not a known value (ctx)",
9158 .result = REJECT,
9159 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9160 },
9161 {
9162 "bpf_exit with invalid return code. test7",
9163 .insns = {
9164 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9165 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
9166 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
9167 BPF_EXIT_INSN(),
9168 },
9169 .errstr = "R0 has unknown scalar value",
9170 .result = REJECT,
9171 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9172 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009173 {
9174 "calls: basic sanity",
9175 .insns = {
9176 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9177 BPF_MOV64_IMM(BPF_REG_0, 1),
9178 BPF_EXIT_INSN(),
9179 BPF_MOV64_IMM(BPF_REG_0, 2),
9180 BPF_EXIT_INSN(),
9181 },
9182 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9183 .result = ACCEPT,
9184 },
9185 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009186 "calls: not on unpriviledged",
9187 .insns = {
9188 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9189 BPF_MOV64_IMM(BPF_REG_0, 1),
9190 BPF_EXIT_INSN(),
9191 BPF_MOV64_IMM(BPF_REG_0, 2),
9192 BPF_EXIT_INSN(),
9193 },
9194 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
9195 .result_unpriv = REJECT,
9196 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009197 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009198 },
9199 {
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01009200 "calls: div by 0 in subprog",
9201 .insns = {
9202 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9203 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9204 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9205 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
9206 offsetof(struct __sk_buff, data_end)),
9207 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
9208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
9209 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
9210 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9211 BPF_MOV64_IMM(BPF_REG_0, 1),
9212 BPF_EXIT_INSN(),
9213 BPF_MOV32_IMM(BPF_REG_2, 0),
9214 BPF_MOV32_IMM(BPF_REG_3, 1),
9215 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
9216 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9217 offsetof(struct __sk_buff, data)),
9218 BPF_EXIT_INSN(),
9219 },
9220 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9221 .result = ACCEPT,
9222 .retval = 1,
9223 },
9224 {
9225 "calls: multiple ret types in subprog 1",
9226 .insns = {
9227 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9228 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9229 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9230 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
9231 offsetof(struct __sk_buff, data_end)),
9232 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
9233 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
9234 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
9235 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9236 BPF_MOV64_IMM(BPF_REG_0, 1),
9237 BPF_EXIT_INSN(),
9238 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9239 offsetof(struct __sk_buff, data)),
9240 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9241 BPF_MOV32_IMM(BPF_REG_0, 42),
9242 BPF_EXIT_INSN(),
9243 },
9244 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9245 .result = REJECT,
9246 .errstr = "R0 invalid mem access 'inv'",
9247 },
9248 {
9249 "calls: multiple ret types in subprog 2",
9250 .insns = {
9251 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9252 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9253 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9254 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
9255 offsetof(struct __sk_buff, data_end)),
9256 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
9257 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
9258 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
9259 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9260 BPF_MOV64_IMM(BPF_REG_0, 1),
9261 BPF_EXIT_INSN(),
9262 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9263 offsetof(struct __sk_buff, data)),
9264 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9265 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
9266 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9267 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9268 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9269 BPF_LD_MAP_FD(BPF_REG_1, 0),
9270 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9271 BPF_FUNC_map_lookup_elem),
9272 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9273 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
9274 offsetof(struct __sk_buff, data)),
9275 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
9276 BPF_EXIT_INSN(),
9277 },
9278 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9279 .fixup_map1 = { 16 },
9280 .result = REJECT,
9281 .errstr = "R0 min value is outside of the array range",
9282 },
9283 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009284 "calls: overlapping caller/callee",
9285 .insns = {
9286 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
9287 BPF_MOV64_IMM(BPF_REG_0, 1),
9288 BPF_EXIT_INSN(),
9289 },
9290 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9291 .errstr = "last insn is not an exit or jmp",
9292 .result = REJECT,
9293 },
9294 {
9295 "calls: wrong recursive calls",
9296 .insns = {
9297 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
9298 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
9299 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
9300 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
9301 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
9302 BPF_MOV64_IMM(BPF_REG_0, 1),
9303 BPF_EXIT_INSN(),
9304 },
9305 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9306 .errstr = "jump out of range",
9307 .result = REJECT,
9308 },
9309 {
9310 "calls: wrong src reg",
9311 .insns = {
9312 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
9313 BPF_MOV64_IMM(BPF_REG_0, 1),
9314 BPF_EXIT_INSN(),
9315 },
9316 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9317 .errstr = "BPF_CALL uses reserved fields",
9318 .result = REJECT,
9319 },
9320 {
9321 "calls: wrong off value",
9322 .insns = {
9323 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
9324 BPF_MOV64_IMM(BPF_REG_0, 1),
9325 BPF_EXIT_INSN(),
9326 BPF_MOV64_IMM(BPF_REG_0, 2),
9327 BPF_EXIT_INSN(),
9328 },
9329 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9330 .errstr = "BPF_CALL uses reserved fields",
9331 .result = REJECT,
9332 },
9333 {
9334 "calls: jump back loop",
9335 .insns = {
9336 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
9337 BPF_MOV64_IMM(BPF_REG_0, 1),
9338 BPF_EXIT_INSN(),
9339 },
9340 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9341 .errstr = "back-edge from insn 0 to 0",
9342 .result = REJECT,
9343 },
9344 {
9345 "calls: conditional call",
9346 .insns = {
9347 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9348 offsetof(struct __sk_buff, mark)),
9349 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9350 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9351 BPF_MOV64_IMM(BPF_REG_0, 1),
9352 BPF_EXIT_INSN(),
9353 BPF_MOV64_IMM(BPF_REG_0, 2),
9354 BPF_EXIT_INSN(),
9355 },
9356 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9357 .errstr = "jump out of range",
9358 .result = REJECT,
9359 },
9360 {
9361 "calls: conditional call 2",
9362 .insns = {
9363 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9364 offsetof(struct __sk_buff, mark)),
9365 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9366 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9367 BPF_MOV64_IMM(BPF_REG_0, 1),
9368 BPF_EXIT_INSN(),
9369 BPF_MOV64_IMM(BPF_REG_0, 2),
9370 BPF_EXIT_INSN(),
9371 BPF_MOV64_IMM(BPF_REG_0, 3),
9372 BPF_EXIT_INSN(),
9373 },
9374 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9375 .result = ACCEPT,
9376 },
9377 {
9378 "calls: conditional call 3",
9379 .insns = {
9380 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9381 offsetof(struct __sk_buff, mark)),
9382 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9383 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
9384 BPF_MOV64_IMM(BPF_REG_0, 1),
9385 BPF_EXIT_INSN(),
9386 BPF_MOV64_IMM(BPF_REG_0, 1),
9387 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
9388 BPF_MOV64_IMM(BPF_REG_0, 3),
9389 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
9390 },
9391 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9392 .errstr = "back-edge from insn",
9393 .result = REJECT,
9394 },
9395 {
9396 "calls: conditional call 4",
9397 .insns = {
9398 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9399 offsetof(struct __sk_buff, mark)),
9400 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9401 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9402 BPF_MOV64_IMM(BPF_REG_0, 1),
9403 BPF_EXIT_INSN(),
9404 BPF_MOV64_IMM(BPF_REG_0, 1),
9405 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
9406 BPF_MOV64_IMM(BPF_REG_0, 3),
9407 BPF_EXIT_INSN(),
9408 },
9409 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9410 .result = ACCEPT,
9411 },
9412 {
9413 "calls: conditional call 5",
9414 .insns = {
9415 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9416 offsetof(struct __sk_buff, mark)),
9417 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9418 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9419 BPF_MOV64_IMM(BPF_REG_0, 1),
9420 BPF_EXIT_INSN(),
9421 BPF_MOV64_IMM(BPF_REG_0, 1),
9422 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
9423 BPF_MOV64_IMM(BPF_REG_0, 3),
9424 BPF_EXIT_INSN(),
9425 },
9426 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9427 .errstr = "back-edge from insn",
9428 .result = REJECT,
9429 },
9430 {
9431 "calls: conditional call 6",
9432 .insns = {
9433 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9434 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
9435 BPF_EXIT_INSN(),
9436 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9437 offsetof(struct __sk_buff, mark)),
9438 BPF_EXIT_INSN(),
9439 },
9440 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9441 .errstr = "back-edge from insn",
9442 .result = REJECT,
9443 },
9444 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009445 "calls: using r0 returned by callee",
9446 .insns = {
9447 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9448 BPF_EXIT_INSN(),
9449 BPF_MOV64_IMM(BPF_REG_0, 2),
9450 BPF_EXIT_INSN(),
9451 },
9452 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9453 .result = ACCEPT,
9454 },
9455 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009456 "calls: using uninit r0 from callee",
9457 .insns = {
9458 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9459 BPF_EXIT_INSN(),
9460 BPF_EXIT_INSN(),
9461 },
9462 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9463 .errstr = "!read_ok",
9464 .result = REJECT,
9465 },
9466 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009467 "calls: callee is using r1",
9468 .insns = {
9469 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9470 BPF_EXIT_INSN(),
9471 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9472 offsetof(struct __sk_buff, len)),
9473 BPF_EXIT_INSN(),
9474 },
9475 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
9476 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009477 .retval = TEST_DATA_LEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009478 },
9479 {
9480 "calls: callee using args1",
9481 .insns = {
9482 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9483 BPF_EXIT_INSN(),
9484 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9485 BPF_EXIT_INSN(),
9486 },
9487 .errstr_unpriv = "allowed for root only",
9488 .result_unpriv = REJECT,
9489 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009490 .retval = POINTER_VALUE,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009491 },
9492 {
9493 "calls: callee using wrong args2",
9494 .insns = {
9495 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9496 BPF_EXIT_INSN(),
9497 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9498 BPF_EXIT_INSN(),
9499 },
9500 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9501 .errstr = "R2 !read_ok",
9502 .result = REJECT,
9503 },
9504 {
9505 "calls: callee using two args",
9506 .insns = {
9507 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9508 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
9509 offsetof(struct __sk_buff, len)),
9510 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
9511 offsetof(struct __sk_buff, len)),
9512 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9513 BPF_EXIT_INSN(),
9514 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9515 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
9516 BPF_EXIT_INSN(),
9517 },
9518 .errstr_unpriv = "allowed for root only",
9519 .result_unpriv = REJECT,
9520 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009521 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009522 },
9523 {
9524 "calls: callee changing pkt pointers",
9525 .insns = {
9526 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
9527 offsetof(struct xdp_md, data)),
9528 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
9529 offsetof(struct xdp_md, data_end)),
9530 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
9531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
9532 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
9533 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9534 /* clear_all_pkt_pointers() has to walk all frames
9535 * to make sure that pkt pointers in the caller
9536 * are cleared when callee is calling a helper that
9537 * adjusts packet size
9538 */
9539 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
9540 BPF_MOV32_IMM(BPF_REG_0, 0),
9541 BPF_EXIT_INSN(),
9542 BPF_MOV64_IMM(BPF_REG_2, 0),
9543 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9544 BPF_FUNC_xdp_adjust_head),
9545 BPF_EXIT_INSN(),
9546 },
9547 .result = REJECT,
9548 .errstr = "R6 invalid mem access 'inv'",
9549 .prog_type = BPF_PROG_TYPE_XDP,
9550 },
9551 {
9552 "calls: two calls with args",
9553 .insns = {
9554 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9555 BPF_EXIT_INSN(),
9556 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9557 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9558 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9559 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9560 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9561 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9562 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9563 BPF_EXIT_INSN(),
9564 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9565 offsetof(struct __sk_buff, len)),
9566 BPF_EXIT_INSN(),
9567 },
9568 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9569 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009570 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009571 },
9572 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009573 "calls: calls with stack arith",
9574 .insns = {
9575 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9576 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9577 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9578 BPF_EXIT_INSN(),
9579 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9580 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9581 BPF_EXIT_INSN(),
9582 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9583 BPF_MOV64_IMM(BPF_REG_0, 42),
9584 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
9585 BPF_EXIT_INSN(),
9586 },
9587 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9588 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009589 .retval = 42,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009590 },
9591 {
9592 "calls: calls with misaligned stack access",
9593 .insns = {
9594 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9595 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
9596 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9597 BPF_EXIT_INSN(),
9598 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
9599 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9600 BPF_EXIT_INSN(),
9601 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
9602 BPF_MOV64_IMM(BPF_REG_0, 42),
9603 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
9604 BPF_EXIT_INSN(),
9605 },
9606 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9607 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
9608 .errstr = "misaligned stack access",
9609 .result = REJECT,
9610 },
9611 {
9612 "calls: calls control flow, jump test",
9613 .insns = {
9614 BPF_MOV64_IMM(BPF_REG_0, 42),
9615 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9616 BPF_MOV64_IMM(BPF_REG_0, 43),
9617 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9618 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
9619 BPF_EXIT_INSN(),
9620 },
9621 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9622 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009623 .retval = 43,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009624 },
9625 {
9626 "calls: calls control flow, jump test 2",
9627 .insns = {
9628 BPF_MOV64_IMM(BPF_REG_0, 42),
9629 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9630 BPF_MOV64_IMM(BPF_REG_0, 43),
9631 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9632 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
9633 BPF_EXIT_INSN(),
9634 },
9635 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9636 .errstr = "jump out of range from insn 1 to 4",
9637 .result = REJECT,
9638 },
9639 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009640 "calls: two calls with bad jump",
9641 .insns = {
9642 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9643 BPF_EXIT_INSN(),
9644 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9645 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9646 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9647 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9648 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9649 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9650 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9651 BPF_EXIT_INSN(),
9652 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9653 offsetof(struct __sk_buff, len)),
9654 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
9655 BPF_EXIT_INSN(),
9656 },
9657 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9658 .errstr = "jump out of range from insn 11 to 9",
9659 .result = REJECT,
9660 },
9661 {
9662 "calls: recursive call. test1",
9663 .insns = {
9664 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9665 BPF_EXIT_INSN(),
9666 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
9667 BPF_EXIT_INSN(),
9668 },
9669 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9670 .errstr = "back-edge",
9671 .result = REJECT,
9672 },
9673 {
9674 "calls: recursive call. test2",
9675 .insns = {
9676 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9677 BPF_EXIT_INSN(),
9678 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
9679 BPF_EXIT_INSN(),
9680 },
9681 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9682 .errstr = "back-edge",
9683 .result = REJECT,
9684 },
9685 {
9686 "calls: unreachable code",
9687 .insns = {
9688 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9689 BPF_EXIT_INSN(),
9690 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9691 BPF_EXIT_INSN(),
9692 BPF_MOV64_IMM(BPF_REG_0, 0),
9693 BPF_EXIT_INSN(),
9694 BPF_MOV64_IMM(BPF_REG_0, 0),
9695 BPF_EXIT_INSN(),
9696 },
9697 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9698 .errstr = "unreachable insn 6",
9699 .result = REJECT,
9700 },
9701 {
9702 "calls: invalid call",
9703 .insns = {
9704 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9705 BPF_EXIT_INSN(),
9706 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
9707 BPF_EXIT_INSN(),
9708 },
9709 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9710 .errstr = "invalid destination",
9711 .result = REJECT,
9712 },
9713 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009714 "calls: invalid call 2",
9715 .insns = {
9716 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9717 BPF_EXIT_INSN(),
9718 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
9719 BPF_EXIT_INSN(),
9720 },
9721 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9722 .errstr = "invalid destination",
9723 .result = REJECT,
9724 },
9725 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009726 "calls: jumping across function bodies. test1",
9727 .insns = {
9728 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9729 BPF_MOV64_IMM(BPF_REG_0, 0),
9730 BPF_EXIT_INSN(),
9731 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
9732 BPF_EXIT_INSN(),
9733 },
9734 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9735 .errstr = "jump out of range",
9736 .result = REJECT,
9737 },
9738 {
9739 "calls: jumping across function bodies. test2",
9740 .insns = {
9741 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
9742 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9743 BPF_MOV64_IMM(BPF_REG_0, 0),
9744 BPF_EXIT_INSN(),
9745 BPF_EXIT_INSN(),
9746 },
9747 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9748 .errstr = "jump out of range",
9749 .result = REJECT,
9750 },
9751 {
9752 "calls: call without exit",
9753 .insns = {
9754 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9755 BPF_EXIT_INSN(),
9756 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9757 BPF_EXIT_INSN(),
9758 BPF_MOV64_IMM(BPF_REG_0, 0),
9759 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
9760 },
9761 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9762 .errstr = "not an exit",
9763 .result = REJECT,
9764 },
9765 {
9766 "calls: call into middle of ld_imm64",
9767 .insns = {
9768 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9769 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9770 BPF_MOV64_IMM(BPF_REG_0, 0),
9771 BPF_EXIT_INSN(),
9772 BPF_LD_IMM64(BPF_REG_0, 0),
9773 BPF_EXIT_INSN(),
9774 },
9775 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9776 .errstr = "last insn",
9777 .result = REJECT,
9778 },
9779 {
9780 "calls: call into middle of other call",
9781 .insns = {
9782 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9783 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9784 BPF_MOV64_IMM(BPF_REG_0, 0),
9785 BPF_EXIT_INSN(),
9786 BPF_MOV64_IMM(BPF_REG_0, 0),
9787 BPF_MOV64_IMM(BPF_REG_0, 0),
9788 BPF_EXIT_INSN(),
9789 },
9790 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9791 .errstr = "last insn",
9792 .result = REJECT,
9793 },
9794 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009795 "calls: ld_abs with changing ctx data in callee",
9796 .insns = {
9797 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9798 BPF_LD_ABS(BPF_B, 0),
9799 BPF_LD_ABS(BPF_H, 0),
9800 BPF_LD_ABS(BPF_W, 0),
9801 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
9802 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
9803 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
9804 BPF_LD_ABS(BPF_B, 0),
9805 BPF_LD_ABS(BPF_H, 0),
9806 BPF_LD_ABS(BPF_W, 0),
9807 BPF_EXIT_INSN(),
9808 BPF_MOV64_IMM(BPF_REG_2, 1),
9809 BPF_MOV64_IMM(BPF_REG_3, 2),
9810 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9811 BPF_FUNC_skb_vlan_push),
9812 BPF_EXIT_INSN(),
9813 },
9814 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9815 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
9816 .result = REJECT,
9817 },
9818 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009819 "calls: two calls with bad fallthrough",
9820 .insns = {
9821 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9822 BPF_EXIT_INSN(),
9823 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9824 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9825 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9826 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9827 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9828 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9829 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9830 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
9831 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9832 offsetof(struct __sk_buff, len)),
9833 BPF_EXIT_INSN(),
9834 },
9835 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9836 .errstr = "not an exit",
9837 .result = REJECT,
9838 },
9839 {
9840 "calls: two calls with stack read",
9841 .insns = {
9842 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9843 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9844 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9845 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9846 BPF_EXIT_INSN(),
9847 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9848 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9849 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9850 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9851 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9852 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9853 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9854 BPF_EXIT_INSN(),
9855 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9856 BPF_EXIT_INSN(),
9857 },
9858 .prog_type = BPF_PROG_TYPE_XDP,
9859 .result = ACCEPT,
9860 },
9861 {
9862 "calls: two calls with stack write",
9863 .insns = {
9864 /* main prog */
9865 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9866 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9867 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9868 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9869 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9870 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9871 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
9872 BPF_EXIT_INSN(),
9873
9874 /* subprog 1 */
9875 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9876 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9877 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
9878 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
9879 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9880 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9881 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
9882 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
9883 /* write into stack frame of main prog */
9884 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
9885 BPF_EXIT_INSN(),
9886
9887 /* subprog 2 */
9888 /* read from stack frame of main prog */
9889 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9890 BPF_EXIT_INSN(),
9891 },
9892 .prog_type = BPF_PROG_TYPE_XDP,
9893 .result = ACCEPT,
9894 },
9895 {
Jann Horn6b80ad22017-12-22 19:12:35 +01009896 "calls: stack overflow using two frames (pre-call access)",
9897 .insns = {
9898 /* prog 1 */
9899 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9900 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
9901 BPF_EXIT_INSN(),
9902
9903 /* prog 2 */
9904 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9905 BPF_MOV64_IMM(BPF_REG_0, 0),
9906 BPF_EXIT_INSN(),
9907 },
9908 .prog_type = BPF_PROG_TYPE_XDP,
9909 .errstr = "combined stack size",
9910 .result = REJECT,
9911 },
9912 {
9913 "calls: stack overflow using two frames (post-call access)",
9914 .insns = {
9915 /* prog 1 */
9916 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
9917 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9918 BPF_EXIT_INSN(),
9919
9920 /* prog 2 */
9921 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9922 BPF_MOV64_IMM(BPF_REG_0, 0),
9923 BPF_EXIT_INSN(),
9924 },
9925 .prog_type = BPF_PROG_TYPE_XDP,
9926 .errstr = "combined stack size",
9927 .result = REJECT,
9928 },
9929 {
Alexei Starovoitov6b86c422017-12-25 13:15:41 -08009930 "calls: stack depth check using three frames. test1",
9931 .insns = {
9932 /* main */
9933 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
9934 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
9935 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
9936 BPF_MOV64_IMM(BPF_REG_0, 0),
9937 BPF_EXIT_INSN(),
9938 /* A */
9939 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
9940 BPF_EXIT_INSN(),
9941 /* B */
9942 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
9943 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
9944 BPF_EXIT_INSN(),
9945 },
9946 .prog_type = BPF_PROG_TYPE_XDP,
9947 /* stack_main=32, stack_A=256, stack_B=64
9948 * and max(main+A, main+A+B) < 512
9949 */
9950 .result = ACCEPT,
9951 },
9952 {
9953 "calls: stack depth check using three frames. test2",
9954 .insns = {
9955 /* main */
9956 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
9957 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
9958 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
9959 BPF_MOV64_IMM(BPF_REG_0, 0),
9960 BPF_EXIT_INSN(),
9961 /* A */
9962 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
9963 BPF_EXIT_INSN(),
9964 /* B */
9965 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
9966 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
9967 BPF_EXIT_INSN(),
9968 },
9969 .prog_type = BPF_PROG_TYPE_XDP,
9970 /* stack_main=32, stack_A=64, stack_B=256
9971 * and max(main+A, main+A+B) < 512
9972 */
9973 .result = ACCEPT,
9974 },
9975 {
9976 "calls: stack depth check using three frames. test3",
9977 .insns = {
9978 /* main */
9979 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9980 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
9981 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9982 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
9983 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
9984 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
9985 BPF_MOV64_IMM(BPF_REG_0, 0),
9986 BPF_EXIT_INSN(),
9987 /* A */
9988 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
9989 BPF_EXIT_INSN(),
9990 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
9991 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
9992 /* B */
9993 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
9994 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
9995 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
9996 BPF_EXIT_INSN(),
9997 },
9998 .prog_type = BPF_PROG_TYPE_XDP,
9999 /* stack_main=64, stack_A=224, stack_B=256
10000 * and max(main+A, main+A+B) > 512
10001 */
10002 .errstr = "combined stack",
10003 .result = REJECT,
10004 },
10005 {
10006 "calls: stack depth check using three frames. test4",
10007 /* void main(void) {
10008 * func1(0);
10009 * func1(1);
10010 * func2(1);
10011 * }
10012 * void func1(int alloc_or_recurse) {
10013 * if (alloc_or_recurse) {
10014 * frame_pointer[-300] = 1;
10015 * } else {
10016 * func2(alloc_or_recurse);
10017 * }
10018 * }
10019 * void func2(int alloc_or_recurse) {
10020 * if (alloc_or_recurse) {
10021 * frame_pointer[-300] = 1;
10022 * }
10023 * }
10024 */
10025 .insns = {
10026 /* main */
10027 BPF_MOV64_IMM(BPF_REG_1, 0),
10028 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
10029 BPF_MOV64_IMM(BPF_REG_1, 1),
10030 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10031 BPF_MOV64_IMM(BPF_REG_1, 1),
10032 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
10033 BPF_MOV64_IMM(BPF_REG_0, 0),
10034 BPF_EXIT_INSN(),
10035 /* A */
10036 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
10037 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10038 BPF_EXIT_INSN(),
10039 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
10040 BPF_EXIT_INSN(),
10041 /* B */
10042 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
10043 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10044 BPF_EXIT_INSN(),
10045 },
10046 .prog_type = BPF_PROG_TYPE_XDP,
10047 .result = REJECT,
10048 .errstr = "combined stack",
10049 },
10050 {
Alexei Starovoitovaada9ce2017-12-25 13:15:42 -080010051 "calls: stack depth check using three frames. test5",
10052 .insns = {
10053 /* main */
10054 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
10055 BPF_EXIT_INSN(),
10056 /* A */
10057 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
10058 BPF_EXIT_INSN(),
10059 /* B */
10060 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
10061 BPF_EXIT_INSN(),
10062 /* C */
10063 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
10064 BPF_EXIT_INSN(),
10065 /* D */
10066 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
10067 BPF_EXIT_INSN(),
10068 /* E */
10069 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
10070 BPF_EXIT_INSN(),
10071 /* F */
10072 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
10073 BPF_EXIT_INSN(),
10074 /* G */
10075 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
10076 BPF_EXIT_INSN(),
10077 /* H */
10078 BPF_MOV64_IMM(BPF_REG_0, 0),
10079 BPF_EXIT_INSN(),
10080 },
10081 .prog_type = BPF_PROG_TYPE_XDP,
10082 .errstr = "call stack",
10083 .result = REJECT,
10084 },
10085 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010086 "calls: spill into caller stack frame",
10087 .insns = {
10088 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10089 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10090 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10091 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10092 BPF_EXIT_INSN(),
10093 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
10094 BPF_MOV64_IMM(BPF_REG_0, 0),
10095 BPF_EXIT_INSN(),
10096 },
10097 .prog_type = BPF_PROG_TYPE_XDP,
10098 .errstr = "cannot spill",
10099 .result = REJECT,
10100 },
10101 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010102 "calls: write into caller stack frame",
10103 .insns = {
10104 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10105 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10106 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10107 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10108 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10109 BPF_EXIT_INSN(),
10110 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
10111 BPF_MOV64_IMM(BPF_REG_0, 0),
10112 BPF_EXIT_INSN(),
10113 },
10114 .prog_type = BPF_PROG_TYPE_XDP,
10115 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010116 .retval = 42,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010117 },
10118 {
10119 "calls: write into callee stack frame",
10120 .insns = {
10121 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10122 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
10123 BPF_EXIT_INSN(),
10124 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
10125 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
10126 BPF_EXIT_INSN(),
10127 },
10128 .prog_type = BPF_PROG_TYPE_XDP,
10129 .errstr = "cannot return stack pointer",
10130 .result = REJECT,
10131 },
10132 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010133 "calls: two calls with stack write and void return",
10134 .insns = {
10135 /* main prog */
10136 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10137 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10138 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10139 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10140 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10141 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10142 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10143 BPF_EXIT_INSN(),
10144
10145 /* subprog 1 */
10146 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10147 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10148 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10149 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10150 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10151 BPF_EXIT_INSN(),
10152
10153 /* subprog 2 */
10154 /* write into stack frame of main prog */
10155 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
10156 BPF_EXIT_INSN(), /* void return */
10157 },
10158 .prog_type = BPF_PROG_TYPE_XDP,
10159 .result = ACCEPT,
10160 },
10161 {
10162 "calls: ambiguous return value",
10163 .insns = {
10164 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10165 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
10166 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
10167 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10168 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10169 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
10170 BPF_EXIT_INSN(),
10171 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
10172 BPF_MOV64_IMM(BPF_REG_0, 0),
10173 BPF_EXIT_INSN(),
10174 },
10175 .errstr_unpriv = "allowed for root only",
10176 .result_unpriv = REJECT,
10177 .errstr = "R0 !read_ok",
10178 .result = REJECT,
10179 },
10180 {
10181 "calls: two calls that return map_value",
10182 .insns = {
10183 /* main prog */
10184 /* pass fp-16, fp-8 into a function */
10185 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10186 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10187 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10189 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10190
10191 /* fetch map_value_ptr from the stack of this function */
10192 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
10193 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
10194 /* write into map value */
10195 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10196 /* fetch secound map_value_ptr from the stack */
10197 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10198 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
10199 /* write into map value */
10200 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10201 BPF_MOV64_IMM(BPF_REG_0, 0),
10202 BPF_EXIT_INSN(),
10203
10204 /* subprog 1 */
10205 /* call 3rd function twice */
10206 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10207 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10208 /* first time with fp-8 */
10209 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10210 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10211 /* second time with fp-16 */
10212 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10213 BPF_EXIT_INSN(),
10214
10215 /* subprog 2 */
10216 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10217 /* lookup from map */
10218 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10219 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10221 BPF_LD_MAP_FD(BPF_REG_1, 0),
10222 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10223 BPF_FUNC_map_lookup_elem),
10224 /* write map_value_ptr into stack frame of main prog */
10225 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10226 BPF_MOV64_IMM(BPF_REG_0, 0),
10227 BPF_EXIT_INSN(), /* return 0 */
10228 },
10229 .prog_type = BPF_PROG_TYPE_XDP,
10230 .fixup_map1 = { 23 },
10231 .result = ACCEPT,
10232 },
10233 {
10234 "calls: two calls that return map_value with bool condition",
10235 .insns = {
10236 /* main prog */
10237 /* pass fp-16, fp-8 into a function */
10238 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10239 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10240 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10241 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10242 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10243 BPF_MOV64_IMM(BPF_REG_0, 0),
10244 BPF_EXIT_INSN(),
10245
10246 /* subprog 1 */
10247 /* call 3rd function twice */
10248 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10249 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10250 /* first time with fp-8 */
10251 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
10252 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
10253 /* fetch map_value_ptr from the stack of this function */
10254 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10255 /* write into map value */
10256 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10257 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10258 /* second time with fp-16 */
10259 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10260 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
10261 /* fetch secound map_value_ptr from the stack */
10262 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
10263 /* write into map value */
10264 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10265 BPF_EXIT_INSN(),
10266
10267 /* subprog 2 */
10268 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10269 /* lookup from map */
10270 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10273 BPF_LD_MAP_FD(BPF_REG_1, 0),
10274 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10275 BPF_FUNC_map_lookup_elem),
10276 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10277 BPF_MOV64_IMM(BPF_REG_0, 0),
10278 BPF_EXIT_INSN(), /* return 0 */
10279 /* write map_value_ptr into stack frame of main prog */
10280 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10281 BPF_MOV64_IMM(BPF_REG_0, 1),
10282 BPF_EXIT_INSN(), /* return 1 */
10283 },
10284 .prog_type = BPF_PROG_TYPE_XDP,
10285 .fixup_map1 = { 23 },
10286 .result = ACCEPT,
10287 },
10288 {
10289 "calls: two calls that return map_value with incorrect bool check",
10290 .insns = {
10291 /* main prog */
10292 /* pass fp-16, fp-8 into a function */
10293 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10295 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10296 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10297 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10298 BPF_MOV64_IMM(BPF_REG_0, 0),
10299 BPF_EXIT_INSN(),
10300
10301 /* subprog 1 */
10302 /* call 3rd function twice */
10303 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10304 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10305 /* first time with fp-8 */
10306 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
10307 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
10308 /* fetch map_value_ptr from the stack of this function */
10309 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10310 /* write into map value */
10311 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10312 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10313 /* second time with fp-16 */
10314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10315 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10316 /* fetch secound map_value_ptr from the stack */
10317 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
10318 /* write into map value */
10319 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10320 BPF_EXIT_INSN(),
10321
10322 /* subprog 2 */
10323 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10324 /* lookup from map */
10325 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10326 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10327 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10328 BPF_LD_MAP_FD(BPF_REG_1, 0),
10329 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10330 BPF_FUNC_map_lookup_elem),
10331 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10332 BPF_MOV64_IMM(BPF_REG_0, 0),
10333 BPF_EXIT_INSN(), /* return 0 */
10334 /* write map_value_ptr into stack frame of main prog */
10335 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10336 BPF_MOV64_IMM(BPF_REG_0, 1),
10337 BPF_EXIT_INSN(), /* return 1 */
10338 },
10339 .prog_type = BPF_PROG_TYPE_XDP,
10340 .fixup_map1 = { 23 },
10341 .result = REJECT,
10342 .errstr = "invalid read from stack off -16+0 size 8",
10343 },
10344 {
10345 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
10346 .insns = {
10347 /* main prog */
10348 /* pass fp-16, fp-8 into a function */
10349 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10350 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10351 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10352 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10353 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10354 BPF_MOV64_IMM(BPF_REG_0, 0),
10355 BPF_EXIT_INSN(),
10356
10357 /* subprog 1 */
10358 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10359 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10360 /* 1st lookup from map */
10361 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10362 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10363 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10364 BPF_LD_MAP_FD(BPF_REG_1, 0),
10365 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10366 BPF_FUNC_map_lookup_elem),
10367 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10368 BPF_MOV64_IMM(BPF_REG_8, 0),
10369 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10370 /* write map_value_ptr into stack frame of main prog at fp-8 */
10371 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10372 BPF_MOV64_IMM(BPF_REG_8, 1),
10373
10374 /* 2nd lookup from map */
10375 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
10376 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10377 BPF_LD_MAP_FD(BPF_REG_1, 0),
10378 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
10379 BPF_FUNC_map_lookup_elem),
10380 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10381 BPF_MOV64_IMM(BPF_REG_9, 0),
10382 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10383 /* write map_value_ptr into stack frame of main prog at fp-16 */
10384 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10385 BPF_MOV64_IMM(BPF_REG_9, 1),
10386
10387 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10388 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
10389 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10390 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10391 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10392 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
10393 BPF_EXIT_INSN(),
10394
10395 /* subprog 2 */
10396 /* if arg2 == 1 do *arg1 = 0 */
10397 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10398 /* fetch map_value_ptr from the stack of this function */
10399 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10400 /* write into map value */
10401 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10402
10403 /* if arg4 == 1 do *arg3 = 0 */
10404 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10405 /* fetch map_value_ptr from the stack of this function */
10406 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10407 /* write into map value */
10408 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
10409 BPF_EXIT_INSN(),
10410 },
10411 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10412 .fixup_map1 = { 12, 22 },
10413 .result = REJECT,
10414 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
10415 },
10416 {
10417 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
10418 .insns = {
10419 /* main prog */
10420 /* pass fp-16, fp-8 into a function */
10421 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10422 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10423 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10424 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10425 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10426 BPF_MOV64_IMM(BPF_REG_0, 0),
10427 BPF_EXIT_INSN(),
10428
10429 /* subprog 1 */
10430 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10431 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10432 /* 1st lookup from map */
10433 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10434 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10436 BPF_LD_MAP_FD(BPF_REG_1, 0),
10437 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10438 BPF_FUNC_map_lookup_elem),
10439 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10440 BPF_MOV64_IMM(BPF_REG_8, 0),
10441 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10442 /* write map_value_ptr into stack frame of main prog at fp-8 */
10443 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10444 BPF_MOV64_IMM(BPF_REG_8, 1),
10445
10446 /* 2nd lookup from map */
10447 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
10448 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10449 BPF_LD_MAP_FD(BPF_REG_1, 0),
10450 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
10451 BPF_FUNC_map_lookup_elem),
10452 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10453 BPF_MOV64_IMM(BPF_REG_9, 0),
10454 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10455 /* write map_value_ptr into stack frame of main prog at fp-16 */
10456 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10457 BPF_MOV64_IMM(BPF_REG_9, 1),
10458
10459 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10460 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
10461 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10462 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10463 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10464 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
10465 BPF_EXIT_INSN(),
10466
10467 /* subprog 2 */
10468 /* if arg2 == 1 do *arg1 = 0 */
10469 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10470 /* fetch map_value_ptr from the stack of this function */
10471 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10472 /* write into map value */
10473 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10474
10475 /* if arg4 == 1 do *arg3 = 0 */
10476 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10477 /* fetch map_value_ptr from the stack of this function */
10478 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10479 /* write into map value */
10480 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10481 BPF_EXIT_INSN(),
10482 },
10483 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10484 .fixup_map1 = { 12, 22 },
10485 .result = ACCEPT,
10486 },
10487 {
10488 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
10489 .insns = {
10490 /* main prog */
10491 /* pass fp-16, fp-8 into a function */
10492 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10493 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10494 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10495 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10496 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
10497 BPF_MOV64_IMM(BPF_REG_0, 0),
10498 BPF_EXIT_INSN(),
10499
10500 /* subprog 1 */
10501 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10502 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10503 /* 1st lookup from map */
10504 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
10505 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10506 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
10507 BPF_LD_MAP_FD(BPF_REG_1, 0),
10508 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10509 BPF_FUNC_map_lookup_elem),
10510 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10511 BPF_MOV64_IMM(BPF_REG_8, 0),
10512 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10513 /* write map_value_ptr into stack frame of main prog at fp-8 */
10514 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10515 BPF_MOV64_IMM(BPF_REG_8, 1),
10516
10517 /* 2nd lookup from map */
10518 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10519 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
10520 BPF_LD_MAP_FD(BPF_REG_1, 0),
10521 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10522 BPF_FUNC_map_lookup_elem),
10523 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10524 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
10525 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10526 /* write map_value_ptr into stack frame of main prog at fp-16 */
10527 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10528 BPF_MOV64_IMM(BPF_REG_9, 1),
10529
10530 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10531 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
10532 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10533 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10534 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10535 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
10536 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
10537
10538 /* subprog 2 */
10539 /* if arg2 == 1 do *arg1 = 0 */
10540 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10541 /* fetch map_value_ptr from the stack of this function */
10542 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10543 /* write into map value */
10544 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10545
10546 /* if arg4 == 1 do *arg3 = 0 */
10547 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10548 /* fetch map_value_ptr from the stack of this function */
10549 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10550 /* write into map value */
10551 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
10552 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
10553 },
10554 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10555 .fixup_map1 = { 12, 22 },
10556 .result = REJECT,
10557 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
10558 },
10559 {
10560 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
10561 .insns = {
10562 /* main prog */
10563 /* pass fp-16, fp-8 into a function */
10564 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10565 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10566 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10567 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10568 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10569 BPF_MOV64_IMM(BPF_REG_0, 0),
10570 BPF_EXIT_INSN(),
10571
10572 /* subprog 1 */
10573 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10574 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10575 /* 1st lookup from map */
10576 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10577 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10578 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10579 BPF_LD_MAP_FD(BPF_REG_1, 0),
10580 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10581 BPF_FUNC_map_lookup_elem),
10582 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
10583 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10584 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10585 BPF_MOV64_IMM(BPF_REG_8, 0),
10586 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10587 BPF_MOV64_IMM(BPF_REG_8, 1),
10588
10589 /* 2nd lookup from map */
10590 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10591 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10592 BPF_LD_MAP_FD(BPF_REG_1, 0),
10593 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10594 BPF_FUNC_map_lookup_elem),
10595 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
10596 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10597 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10598 BPF_MOV64_IMM(BPF_REG_9, 0),
10599 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10600 BPF_MOV64_IMM(BPF_REG_9, 1),
10601
10602 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10603 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10604 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10605 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10606 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10607 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10608 BPF_EXIT_INSN(),
10609
10610 /* subprog 2 */
10611 /* if arg2 == 1 do *arg1 = 0 */
10612 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10613 /* fetch map_value_ptr from the stack of this function */
10614 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10615 /* write into map value */
10616 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10617
10618 /* if arg4 == 1 do *arg3 = 0 */
10619 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10620 /* fetch map_value_ptr from the stack of this function */
10621 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10622 /* write into map value */
10623 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10624 BPF_EXIT_INSN(),
10625 },
10626 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10627 .fixup_map1 = { 12, 22 },
10628 .result = ACCEPT,
10629 },
10630 {
10631 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
10632 .insns = {
10633 /* main prog */
10634 /* pass fp-16, fp-8 into a function */
10635 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10636 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10637 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10638 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10639 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10640 BPF_MOV64_IMM(BPF_REG_0, 0),
10641 BPF_EXIT_INSN(),
10642
10643 /* subprog 1 */
10644 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10645 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10646 /* 1st lookup from map */
10647 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10648 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10649 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10650 BPF_LD_MAP_FD(BPF_REG_1, 0),
10651 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10652 BPF_FUNC_map_lookup_elem),
10653 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
10654 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10655 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10656 BPF_MOV64_IMM(BPF_REG_8, 0),
10657 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10658 BPF_MOV64_IMM(BPF_REG_8, 1),
10659
10660 /* 2nd lookup from map */
10661 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10663 BPF_LD_MAP_FD(BPF_REG_1, 0),
10664 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10665 BPF_FUNC_map_lookup_elem),
10666 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
10667 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10668 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10669 BPF_MOV64_IMM(BPF_REG_9, 0),
10670 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10671 BPF_MOV64_IMM(BPF_REG_9, 1),
10672
10673 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10674 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10675 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10676 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10677 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10678 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10679 BPF_EXIT_INSN(),
10680
10681 /* subprog 2 */
10682 /* if arg2 == 1 do *arg1 = 0 */
10683 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10684 /* fetch map_value_ptr from the stack of this function */
10685 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10686 /* write into map value */
10687 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10688
10689 /* if arg4 == 0 do *arg3 = 0 */
10690 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
10691 /* fetch map_value_ptr from the stack of this function */
10692 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10693 /* write into map value */
10694 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10695 BPF_EXIT_INSN(),
10696 },
10697 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10698 .fixup_map1 = { 12, 22 },
10699 .result = REJECT,
10700 .errstr = "R0 invalid mem access 'inv'",
10701 },
10702 {
10703 "calls: pkt_ptr spill into caller stack",
10704 .insns = {
10705 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10706 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10707 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10708 BPF_EXIT_INSN(),
10709
10710 /* subprog 1 */
10711 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10712 offsetof(struct __sk_buff, data)),
10713 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10714 offsetof(struct __sk_buff, data_end)),
10715 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10716 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10717 /* spill unchecked pkt_ptr into stack of caller */
10718 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10719 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10720 /* now the pkt range is verified, read pkt_ptr from stack */
10721 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
10722 /* write 4 bytes into packet */
10723 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10724 BPF_EXIT_INSN(),
10725 },
10726 .result = ACCEPT,
10727 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010728 .retval = POINTER_VALUE,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010729 },
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080010730 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010731 "calls: pkt_ptr spill into caller stack 2",
10732 .insns = {
10733 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10734 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10735 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10736 /* Marking is still kept, but not in all cases safe. */
10737 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10738 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
10739 BPF_EXIT_INSN(),
10740
10741 /* subprog 1 */
10742 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10743 offsetof(struct __sk_buff, data)),
10744 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10745 offsetof(struct __sk_buff, data_end)),
10746 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10747 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10748 /* spill unchecked pkt_ptr into stack of caller */
10749 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10750 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10751 /* now the pkt range is verified, read pkt_ptr from stack */
10752 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
10753 /* write 4 bytes into packet */
10754 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10755 BPF_EXIT_INSN(),
10756 },
10757 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10758 .errstr = "invalid access to packet",
10759 .result = REJECT,
10760 },
10761 {
10762 "calls: pkt_ptr spill into caller stack 3",
10763 .insns = {
10764 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10765 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10766 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10767 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
10768 /* Marking is still kept and safe here. */
10769 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10770 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
10771 BPF_EXIT_INSN(),
10772
10773 /* subprog 1 */
10774 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10775 offsetof(struct __sk_buff, data)),
10776 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10777 offsetof(struct __sk_buff, data_end)),
10778 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10779 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10780 /* spill unchecked pkt_ptr into stack of caller */
10781 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10782 BPF_MOV64_IMM(BPF_REG_5, 0),
10783 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10784 BPF_MOV64_IMM(BPF_REG_5, 1),
10785 /* now the pkt range is verified, read pkt_ptr from stack */
10786 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
10787 /* write 4 bytes into packet */
10788 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10789 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10790 BPF_EXIT_INSN(),
10791 },
10792 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10793 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010794 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010795 },
10796 {
10797 "calls: pkt_ptr spill into caller stack 4",
10798 .insns = {
10799 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10800 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10801 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10802 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
10803 /* Check marking propagated. */
10804 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10805 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
10806 BPF_EXIT_INSN(),
10807
10808 /* subprog 1 */
10809 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10810 offsetof(struct __sk_buff, data)),
10811 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10812 offsetof(struct __sk_buff, data_end)),
10813 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10814 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10815 /* spill unchecked pkt_ptr into stack of caller */
10816 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10817 BPF_MOV64_IMM(BPF_REG_5, 0),
10818 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10819 BPF_MOV64_IMM(BPF_REG_5, 1),
10820 /* don't read back pkt_ptr from stack here */
10821 /* write 4 bytes into packet */
10822 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10823 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10824 BPF_EXIT_INSN(),
10825 },
10826 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10827 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010828 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010829 },
10830 {
10831 "calls: pkt_ptr spill into caller stack 5",
10832 .insns = {
10833 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10834 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10835 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
10836 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10837 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10838 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10839 BPF_EXIT_INSN(),
10840
10841 /* subprog 1 */
10842 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10843 offsetof(struct __sk_buff, data)),
10844 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10845 offsetof(struct __sk_buff, data_end)),
10846 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10847 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10848 BPF_MOV64_IMM(BPF_REG_5, 0),
10849 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10850 /* spill checked pkt_ptr into stack of caller */
10851 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10852 BPF_MOV64_IMM(BPF_REG_5, 1),
10853 /* don't read back pkt_ptr from stack here */
10854 /* write 4 bytes into packet */
10855 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10856 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10857 BPF_EXIT_INSN(),
10858 },
10859 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10860 .errstr = "same insn cannot be used with different",
10861 .result = REJECT,
10862 },
10863 {
10864 "calls: pkt_ptr spill into caller stack 6",
10865 .insns = {
10866 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10867 offsetof(struct __sk_buff, data_end)),
10868 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10869 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10870 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10871 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10872 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10873 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10874 BPF_EXIT_INSN(),
10875
10876 /* subprog 1 */
10877 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10878 offsetof(struct __sk_buff, data)),
10879 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10880 offsetof(struct __sk_buff, data_end)),
10881 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10882 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10883 BPF_MOV64_IMM(BPF_REG_5, 0),
10884 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10885 /* spill checked pkt_ptr into stack of caller */
10886 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10887 BPF_MOV64_IMM(BPF_REG_5, 1),
10888 /* don't read back pkt_ptr from stack here */
10889 /* write 4 bytes into packet */
10890 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10891 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10892 BPF_EXIT_INSN(),
10893 },
10894 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10895 .errstr = "R4 invalid mem access",
10896 .result = REJECT,
10897 },
10898 {
10899 "calls: pkt_ptr spill into caller stack 7",
10900 .insns = {
10901 BPF_MOV64_IMM(BPF_REG_2, 0),
10902 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10903 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10904 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10905 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10906 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10907 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10908 BPF_EXIT_INSN(),
10909
10910 /* subprog 1 */
10911 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10912 offsetof(struct __sk_buff, data)),
10913 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10914 offsetof(struct __sk_buff, data_end)),
10915 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10916 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10917 BPF_MOV64_IMM(BPF_REG_5, 0),
10918 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10919 /* spill checked pkt_ptr into stack of caller */
10920 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10921 BPF_MOV64_IMM(BPF_REG_5, 1),
10922 /* don't read back pkt_ptr from stack here */
10923 /* write 4 bytes into packet */
10924 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10925 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10926 BPF_EXIT_INSN(),
10927 },
10928 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10929 .errstr = "R4 invalid mem access",
10930 .result = REJECT,
10931 },
10932 {
10933 "calls: pkt_ptr spill into caller stack 8",
10934 .insns = {
10935 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10936 offsetof(struct __sk_buff, data)),
10937 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10938 offsetof(struct __sk_buff, data_end)),
10939 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10940 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10941 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
10942 BPF_EXIT_INSN(),
10943 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10945 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10946 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10947 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10948 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10949 BPF_EXIT_INSN(),
10950
10951 /* subprog 1 */
10952 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10953 offsetof(struct __sk_buff, data)),
10954 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10955 offsetof(struct __sk_buff, data_end)),
10956 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10957 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10958 BPF_MOV64_IMM(BPF_REG_5, 0),
10959 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10960 /* spill checked pkt_ptr into stack of caller */
10961 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10962 BPF_MOV64_IMM(BPF_REG_5, 1),
10963 /* don't read back pkt_ptr from stack here */
10964 /* write 4 bytes into packet */
10965 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10966 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10967 BPF_EXIT_INSN(),
10968 },
10969 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10970 .result = ACCEPT,
10971 },
10972 {
10973 "calls: pkt_ptr spill into caller stack 9",
10974 .insns = {
10975 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10976 offsetof(struct __sk_buff, data)),
10977 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10978 offsetof(struct __sk_buff, data_end)),
10979 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10980 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10981 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
10982 BPF_EXIT_INSN(),
10983 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10984 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10985 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10986 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10987 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10988 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10989 BPF_EXIT_INSN(),
10990
10991 /* subprog 1 */
10992 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10993 offsetof(struct __sk_buff, data)),
10994 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10995 offsetof(struct __sk_buff, data_end)),
10996 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10997 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10998 BPF_MOV64_IMM(BPF_REG_5, 0),
10999 /* spill unchecked pkt_ptr into stack of caller */
11000 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11001 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11002 BPF_MOV64_IMM(BPF_REG_5, 1),
11003 /* don't read back pkt_ptr from stack here */
11004 /* write 4 bytes into packet */
11005 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11006 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11007 BPF_EXIT_INSN(),
11008 },
11009 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11010 .errstr = "invalid access to packet",
11011 .result = REJECT,
11012 },
11013 {
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080011014 "calls: caller stack init to zero or map_value_or_null",
11015 .insns = {
11016 BPF_MOV64_IMM(BPF_REG_0, 0),
11017 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
11018 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11019 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11020 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11021 /* fetch map_value_or_null or const_zero from stack */
11022 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11023 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11024 /* store into map_value */
11025 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
11026 BPF_EXIT_INSN(),
11027
11028 /* subprog 1 */
11029 /* if (ctx == 0) return; */
11030 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
11031 /* else bpf_map_lookup() and *(fp - 8) = r0 */
11032 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
11033 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11034 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11035 BPF_LD_MAP_FD(BPF_REG_1, 0),
11036 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11037 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11038 BPF_FUNC_map_lookup_elem),
11039 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11040 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11041 BPF_EXIT_INSN(),
11042 },
11043 .fixup_map1 = { 13 },
11044 .result = ACCEPT,
11045 .prog_type = BPF_PROG_TYPE_XDP,
11046 },
11047 {
11048 "calls: stack init to zero and pruning",
11049 .insns = {
11050 /* first make allocated_stack 16 byte */
11051 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
11052 /* now fork the execution such that the false branch
11053 * of JGT insn will be verified second and it skisp zero
11054 * init of fp-8 stack slot. If stack liveness marking
11055 * is missing live_read marks from call map_lookup
11056 * processing then pruning will incorrectly assume
11057 * that fp-8 stack slot was unused in the fall-through
11058 * branch and will accept the program incorrectly
11059 */
11060 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
11061 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11062 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
11063 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11064 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11065 BPF_LD_MAP_FD(BPF_REG_1, 0),
11066 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11067 BPF_FUNC_map_lookup_elem),
11068 BPF_EXIT_INSN(),
11069 },
11070 .fixup_map2 = { 6 },
11071 .errstr = "invalid indirect read from stack off -8+0 size 8",
11072 .result = REJECT,
11073 .prog_type = BPF_PROG_TYPE_XDP,
11074 },
Gianluca Borellofd05e572017-12-23 10:09:55 +000011075 {
11076 "search pruning: all branches should be verified (nop operation)",
11077 .insns = {
11078 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11079 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11080 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
11081 BPF_LD_MAP_FD(BPF_REG_1, 0),
11082 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
11083 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
11084 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
11085 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
11086 BPF_MOV64_IMM(BPF_REG_4, 0),
11087 BPF_JMP_A(1),
11088 BPF_MOV64_IMM(BPF_REG_4, 1),
11089 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
11090 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
11091 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
11092 BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
11093 BPF_MOV64_IMM(BPF_REG_6, 0),
11094 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
11095 BPF_EXIT_INSN(),
11096 },
11097 .fixup_map1 = { 3 },
11098 .errstr = "R6 invalid mem access 'inv'",
11099 .result = REJECT,
11100 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11101 },
11102 {
11103 "search pruning: all branches should be verified (invalid stack access)",
11104 .insns = {
11105 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11106 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11107 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
11108 BPF_LD_MAP_FD(BPF_REG_1, 0),
11109 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
11110 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
11111 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
11112 BPF_MOV64_IMM(BPF_REG_4, 0),
11113 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
11114 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
11115 BPF_JMP_A(1),
11116 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
11117 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
11118 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
11119 BPF_EXIT_INSN(),
11120 },
11121 .fixup_map1 = { 3 },
11122 .errstr = "invalid read from stack off -16+0 size 8",
11123 .result = REJECT,
11124 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11125 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011126};
11127
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011128static int probe_filter_length(const struct bpf_insn *fp)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011129{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011130 int len;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011131
11132 for (len = MAX_INSNS - 1; len > 0; --len)
11133 if (fp[len].code != 0 || fp[len].imm != 0)
11134 break;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011135 return len + 1;
11136}
11137
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011138static int create_map(uint32_t size_value, uint32_t max_elem)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011139{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011140 int fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011141
Mickaël Salaünf4874d02017-02-10 00:21:43 +010011142 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011143 size_value, max_elem, BPF_F_NO_PREALLOC);
11144 if (fd < 0)
11145 printf("Failed to create hash map '%s'!\n", strerror(errno));
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011146
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011147 return fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011148}
11149
11150static int create_prog_array(void)
11151{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011152 int fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011153
Mickaël Salaünf4874d02017-02-10 00:21:43 +010011154 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011155 sizeof(int), 4, 0);
11156 if (fd < 0)
11157 printf("Failed to create prog array '%s'!\n", strerror(errno));
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011158
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011159 return fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011160}
11161
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011162static int create_map_in_map(void)
11163{
11164 int inner_map_fd, outer_map_fd;
11165
11166 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
11167 sizeof(int), 1, 0);
11168 if (inner_map_fd < 0) {
11169 printf("Failed to create array '%s'!\n", strerror(errno));
11170 return inner_map_fd;
11171 }
11172
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070011173 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011174 sizeof(int), inner_map_fd, 1, 0);
11175 if (outer_map_fd < 0)
11176 printf("Failed to create array of maps '%s'!\n",
11177 strerror(errno));
11178
11179 close(inner_map_fd);
11180
11181 return outer_map_fd;
11182}
11183
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011184static char bpf_vlog[32768];
11185
11186static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011187 int *map_fds)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011188{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011189 int *fixup_map1 = test->fixup_map1;
11190 int *fixup_map2 = test->fixup_map2;
11191 int *fixup_prog = test->fixup_prog;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011192 int *fixup_map_in_map = test->fixup_map_in_map;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011193
11194 /* Allocating HTs with 1 elem is fine here, since we only test
11195 * for verifier and not do a runtime lookup, so the only thing
11196 * that really matters is value size in this case.
11197 */
11198 if (*fixup_map1) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011199 map_fds[0] = create_map(sizeof(long long), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011200 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011201 prog[*fixup_map1].imm = map_fds[0];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011202 fixup_map1++;
11203 } while (*fixup_map1);
11204 }
11205
11206 if (*fixup_map2) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011207 map_fds[1] = create_map(sizeof(struct test_val), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011208 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011209 prog[*fixup_map2].imm = map_fds[1];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011210 fixup_map2++;
11211 } while (*fixup_map2);
11212 }
11213
11214 if (*fixup_prog) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011215 map_fds[2] = create_prog_array();
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011216 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011217 prog[*fixup_prog].imm = map_fds[2];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011218 fixup_prog++;
11219 } while (*fixup_prog);
11220 }
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011221
11222 if (*fixup_map_in_map) {
11223 map_fds[3] = create_map_in_map();
11224 do {
11225 prog[*fixup_map_in_map].imm = map_fds[3];
11226 fixup_map_in_map++;
11227 } while (*fixup_map_in_map);
11228 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011229}
11230
11231static void do_test_single(struct bpf_test *test, bool unpriv,
11232 int *passes, int *errors)
11233{
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011234 int fd_prog, expected_ret, reject_from_alignment;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011235 struct bpf_insn *prog = test->insns;
11236 int prog_len = probe_filter_length(prog);
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011237 char data_in[TEST_DATA_LEN] = {};
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011238 int prog_type = test->prog_type;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011239 int map_fds[MAX_NR_MAPS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011240 const char *expected_err;
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011241 uint32_t retval;
11242 int i, err;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011243
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011244 for (i = 0; i < MAX_NR_MAPS; i++)
11245 map_fds[i] = -1;
11246
11247 do_test_fixup(test, prog, map_fds);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011248
Daniel Borkmann614d0d72017-05-25 01:05:09 +020011249 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
11250 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmannd6554902017-07-21 00:00:22 +020011251 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011252
11253 expected_ret = unpriv && test->result_unpriv != UNDEF ?
11254 test->result_unpriv : test->result;
11255 expected_err = unpriv && test->errstr_unpriv ?
11256 test->errstr_unpriv : test->errstr;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011257
11258 reject_from_alignment = fd_prog < 0 &&
11259 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
11260 strstr(bpf_vlog, "Unknown alignment.");
11261#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
11262 if (reject_from_alignment) {
11263 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
11264 strerror(errno));
11265 goto fail_log;
11266 }
11267#endif
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011268 if (expected_ret == ACCEPT) {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011269 if (fd_prog < 0 && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011270 printf("FAIL\nFailed to load prog '%s'!\n",
11271 strerror(errno));
11272 goto fail_log;
11273 }
11274 } else {
11275 if (fd_prog >= 0) {
11276 printf("FAIL\nUnexpected success to load!\n");
11277 goto fail_log;
11278 }
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011279 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011280 printf("FAIL\nUnexpected error message!\n");
11281 goto fail_log;
11282 }
11283 }
11284
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011285 if (fd_prog >= 0) {
11286 err = bpf_prog_test_run(fd_prog, 1, data_in, sizeof(data_in),
11287 NULL, NULL, &retval, NULL);
11288 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
11289 printf("Unexpected bpf_prog_test_run error\n");
11290 goto fail_log;
11291 }
11292 if (!err && retval != test->retval &&
11293 test->retval != POINTER_VALUE) {
11294 printf("FAIL retval %d != %d\n", retval, test->retval);
11295 goto fail_log;
11296 }
11297 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011298 (*passes)++;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011299 printf("OK%s\n", reject_from_alignment ?
11300 " (NOTE: reject due to unknown alignment)" : "");
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011301close_fds:
11302 close(fd_prog);
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011303 for (i = 0; i < MAX_NR_MAPS; i++)
11304 close(map_fds[i]);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011305 sched_yield();
11306 return;
11307fail_log:
11308 (*errors)++;
11309 printf("%s", bpf_vlog);
11310 goto close_fds;
11311}
11312
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011313static bool is_admin(void)
11314{
11315 cap_t caps;
11316 cap_flag_value_t sysadmin = CAP_CLEAR;
11317 const cap_value_t cap_val = CAP_SYS_ADMIN;
11318
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080011319#ifdef CAP_IS_SUPPORTED
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011320 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
11321 perror("cap_get_flag");
11322 return false;
11323 }
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080011324#endif
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011325 caps = cap_get_proc();
11326 if (!caps) {
11327 perror("cap_get_proc");
11328 return false;
11329 }
11330 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
11331 perror("cap_get_flag");
11332 if (cap_free(caps))
11333 perror("cap_free");
11334 return (sysadmin == CAP_SET);
11335}
11336
11337static int set_admin(bool admin)
11338{
11339 cap_t caps;
11340 const cap_value_t cap_val = CAP_SYS_ADMIN;
11341 int ret = -1;
11342
11343 caps = cap_get_proc();
11344 if (!caps) {
11345 perror("cap_get_proc");
11346 return -1;
11347 }
11348 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
11349 admin ? CAP_SET : CAP_CLEAR)) {
11350 perror("cap_set_flag");
11351 goto out;
11352 }
11353 if (cap_set_proc(caps)) {
11354 perror("cap_set_proc");
11355 goto out;
11356 }
11357 ret = 0;
11358out:
11359 if (cap_free(caps))
11360 perror("cap_free");
11361 return ret;
11362}
11363
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011364static int do_test(bool unpriv, unsigned int from, unsigned int to)
11365{
11366 int i, passes = 0, errors = 0;
11367
11368 for (i = from; i < to; i++) {
11369 struct bpf_test *test = &tests[i];
11370
11371 /* Program types that are not supported by non-root we
11372 * skip right away.
11373 */
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011374 if (!test->prog_type) {
11375 if (!unpriv)
11376 set_admin(false);
11377 printf("#%d/u %s ", i, test->descr);
11378 do_test_single(test, true, &passes, &errors);
11379 if (!unpriv)
11380 set_admin(true);
11381 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011382
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011383 if (!unpriv) {
11384 printf("#%d/p %s ", i, test->descr);
11385 do_test_single(test, false, &passes, &errors);
11386 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011387 }
11388
11389 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
Jesper Dangaard Brouerefe5f9c2017-06-13 15:17:19 +020011390 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011391}
11392
11393int main(int argc, char **argv)
11394{
11395 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
11396 struct rlimit rlim = { 1 << 20, 1 << 20 };
11397 unsigned int from = 0, to = ARRAY_SIZE(tests);
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011398 bool unpriv = !is_admin();
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011399
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011400 if (argc == 3) {
11401 unsigned int l = atoi(argv[argc - 2]);
11402 unsigned int u = atoi(argv[argc - 1]);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011403
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011404 if (l < to && u < to) {
11405 from = l;
11406 to = u + 1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011407 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011408 } else if (argc == 2) {
11409 unsigned int t = atoi(argv[argc - 1]);
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011410
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011411 if (t < to) {
11412 from = t;
11413 to = t + 1;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011414 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011415 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011416
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011417 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
11418 return do_test(unpriv, from, to);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011419}