blob: f1ae8d09770fc0b11c3917e6a631443aafd4606e [file] [log] [blame]
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08005 * Copyright (c) 2017 Facebook
Joe Stringerb584ab82018-10-02 13:35:38 -07006 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07007 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
11 */
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020012
Daniel Borkmann2c460622017-08-04 22:24:41 +020013#include <endian.h>
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080014#include <asm/types.h>
15#include <linux/types.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010016#include <stdint.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070017#include <stdio.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010018#include <stdlib.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070019#include <unistd.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070020#include <errno.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070021#include <string.h>
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -070022#include <stddef.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070023#include <stdbool.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020024#include <sched.h>
Daniel Borkmann21ccaf22018-01-26 23:33:48 +010025#include <limits.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020026
Mickaël Salaünd02d8982017-02-10 00:21:37 +010027#include <sys/capability.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070028
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020029#include <linux/unistd.h>
30#include <linux/filter.h>
31#include <linux/bpf_perf_event.h>
32#include <linux/bpf.h>
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080033#include <linux/if_ether.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070034
Mickaël Salaün2ee89fb2017-02-10 00:21:38 +010035#include <bpf/bpf.h>
36
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020037#ifdef HAVE_GENHDR
38# include "autoconf.h"
39#else
40# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42# endif
43#endif
Daniel Borkmannfe8d6622018-02-26 22:34:32 +010044#include "bpf_rlimit.h"
Daniel Borkmanna82d8cd2018-05-14 23:22:34 +020045#include "bpf_rand.h"
Martin KaFai Lauaa5f0c92018-08-08 01:01:27 -070046#include "bpf_util.h"
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020047#include "../../../include/linux/filter.h"
48
Daniel Borkmann93731ef2018-05-04 01:08:13 +020049#define MAX_INSNS BPF_MAXINSNS
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020050#define MAX_FIXUPS 8
Prashant Bhole7c85c442018-10-09 10:04:54 +090051#define MAX_NR_MAPS 13
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080052#define POINTER_VALUE 0xcafe4all
53#define TEST_DATA_LEN 64
Alexei Starovoitovbf508872015-10-07 22:23:23 -070054
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020055#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
Daniel Borkmann614d0d72017-05-25 01:05:09 +020056#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020057
Joe Stringer0a6748742018-02-14 13:50:36 -080058#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
59static bool unpriv_disabled = false;
60
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070061struct bpf_test {
62 const char *descr;
63 struct bpf_insn insns[MAX_INSNS];
Prashant Bhole908142e2018-10-09 10:04:53 +090064 int fixup_map_hash_8b[MAX_FIXUPS];
65 int fixup_map_hash_48b[MAX_FIXUPS];
66 int fixup_map_hash_16b[MAX_FIXUPS];
67 int fixup_map_array_48b[MAX_FIXUPS];
Prashant Bhole7c85c442018-10-09 10:04:54 +090068 int fixup_map_sockmap[MAX_FIXUPS];
69 int fixup_map_sockhash[MAX_FIXUPS];
70 int fixup_map_xskmap[MAX_FIXUPS];
71 int fixup_map_stacktrace[MAX_FIXUPS];
Daniel Borkmann06be0862018-06-02 23:06:31 +020072 int fixup_prog1[MAX_FIXUPS];
73 int fixup_prog2[MAX_FIXUPS];
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070074 int fixup_map_in_map[MAX_FIXUPS];
Roman Gushchind4c9f572018-08-02 14:27:28 -070075 int fixup_cgroup_storage[MAX_FIXUPS];
Roman Gushchina3c60542018-09-28 14:45:53 +000076 int fixup_percpu_cgroup_storage[MAX_FIXUPS];
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070077 const char *errstr;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070078 const char *errstr_unpriv;
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080079 uint32_t retval;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070080 enum {
Alexei Starovoitovbf508872015-10-07 22:23:23 -070081 UNDEF,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070082 ACCEPT,
83 REJECT
Alexei Starovoitovbf508872015-10-07 22:23:23 -070084 } result, result_unpriv;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070085 enum bpf_prog_type prog_type;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020086 uint8_t flags;
Daniel Borkmann93731ef2018-05-04 01:08:13 +020087 __u8 data[TEST_DATA_LEN];
88 void (*fill_helper)(struct bpf_test *self);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070089};
90
Josef Bacik48461132016-09-28 10:54:32 -040091/* Note we want this to be 64 bit aligned so that the end of our array is
92 * actually the end of the structure.
93 */
94#define MAX_ENTRIES 11
Josef Bacik48461132016-09-28 10:54:32 -040095
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020096struct test_val {
97 unsigned int index;
98 int foo[MAX_ENTRIES];
Josef Bacik48461132016-09-28 10:54:32 -040099};
100
Paul Chaignon5f90dd62018-04-24 15:08:19 +0200101struct other_val {
102 long long foo;
103 long long bar;
104};
105
Daniel Borkmann93731ef2018-05-04 01:08:13 +0200106static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
107{
108 /* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
109#define PUSH_CNT 51
110 unsigned int len = BPF_MAXINSNS;
111 struct bpf_insn *insn = self->insns;
112 int i = 0, j, k = 0;
113
114 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
115loop:
116 for (j = 0; j < PUSH_CNT; j++) {
117 insn[i++] = BPF_LD_ABS(BPF_B, 0);
118 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
119 i++;
120 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
121 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
122 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
123 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
124 BPF_FUNC_skb_vlan_push),
125 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
126 i++;
127 }
128
129 for (j = 0; j < PUSH_CNT; j++) {
130 insn[i++] = BPF_LD_ABS(BPF_B, 0);
131 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
132 i++;
133 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
134 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
135 BPF_FUNC_skb_vlan_pop),
136 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
137 i++;
138 }
139 if (++k < 5)
140 goto loop;
141
142 for (; i < len - 1; i++)
143 insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
144 insn[len - 1] = BPF_EXIT_INSN();
145}
146
147static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
148{
149 struct bpf_insn *insn = self->insns;
150 unsigned int len = BPF_MAXINSNS;
151 int i = 0;
152
153 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
154 insn[i++] = BPF_LD_ABS(BPF_B, 0);
155 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
156 i++;
157 while (i < len - 1)
158 insn[i++] = BPF_LD_ABS(BPF_B, 1);
159 insn[i] = BPF_EXIT_INSN();
160}
161
Daniel Borkmanna82d8cd2018-05-14 23:22:34 +0200162static void bpf_fill_rand_ld_dw(struct bpf_test *self)
163{
164 struct bpf_insn *insn = self->insns;
165 uint64_t res = 0;
166 int i = 0;
167
168 insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
169 while (i < self->retval) {
170 uint64_t val = bpf_semi_rand_get();
171 struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
172
173 res ^= val;
174 insn[i++] = tmp[0];
175 insn[i++] = tmp[1];
176 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
177 }
178 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
179 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
180 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
181 insn[i] = BPF_EXIT_INSN();
182 res ^= (res >> 32);
183 self->retval = (uint32_t)res;
184}
185
Joe Stringerb584ab82018-10-02 13:35:38 -0700186/* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
187#define BPF_SK_LOOKUP \
188 /* struct bpf_sock_tuple tuple = {} */ \
189 BPF_MOV64_IMM(BPF_REG_2, 0), \
190 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
191 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
192 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
193 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
194 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
195 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
196 /* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */ \
197 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
198 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
199 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
200 BPF_MOV64_IMM(BPF_REG_4, 0), \
201 BPF_MOV64_IMM(BPF_REG_5, 0), \
202 BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
203
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700204static struct bpf_test tests[] = {
205 {
206 "add+sub+mul",
207 .insns = {
208 BPF_MOV64_IMM(BPF_REG_1, 1),
209 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
210 BPF_MOV64_IMM(BPF_REG_2, 3),
211 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
212 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
213 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
214 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
215 BPF_EXIT_INSN(),
216 },
217 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800218 .retval = -3,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700219 },
220 {
Daniel Borkmann87c17932018-01-20 01:24:32 +0100221 "DIV32 by 0, zero check 1",
222 .insns = {
223 BPF_MOV32_IMM(BPF_REG_0, 42),
224 BPF_MOV32_IMM(BPF_REG_1, 0),
225 BPF_MOV32_IMM(BPF_REG_2, 1),
226 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
227 BPF_EXIT_INSN(),
228 },
229 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100230 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100231 },
232 {
233 "DIV32 by 0, zero check 2",
234 .insns = {
235 BPF_MOV32_IMM(BPF_REG_0, 42),
236 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
237 BPF_MOV32_IMM(BPF_REG_2, 1),
238 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
239 BPF_EXIT_INSN(),
240 },
241 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100242 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100243 },
244 {
245 "DIV64 by 0, zero check",
246 .insns = {
247 BPF_MOV32_IMM(BPF_REG_0, 42),
248 BPF_MOV32_IMM(BPF_REG_1, 0),
249 BPF_MOV32_IMM(BPF_REG_2, 1),
250 BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
251 BPF_EXIT_INSN(),
252 },
253 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100254 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100255 },
256 {
257 "MOD32 by 0, zero check 1",
258 .insns = {
259 BPF_MOV32_IMM(BPF_REG_0, 42),
260 BPF_MOV32_IMM(BPF_REG_1, 0),
261 BPF_MOV32_IMM(BPF_REG_2, 1),
262 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
263 BPF_EXIT_INSN(),
264 },
265 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100266 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100267 },
268 {
269 "MOD32 by 0, zero check 2",
270 .insns = {
271 BPF_MOV32_IMM(BPF_REG_0, 42),
272 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
273 BPF_MOV32_IMM(BPF_REG_2, 1),
274 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
275 BPF_EXIT_INSN(),
276 },
277 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100278 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100279 },
280 {
281 "MOD64 by 0, zero check",
282 .insns = {
283 BPF_MOV32_IMM(BPF_REG_0, 42),
284 BPF_MOV32_IMM(BPF_REG_1, 0),
285 BPF_MOV32_IMM(BPF_REG_2, 1),
286 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
287 BPF_EXIT_INSN(),
288 },
289 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100290 .retval = 42,
291 },
292 {
293 "DIV32 by 0, zero check ok, cls",
294 .insns = {
295 BPF_MOV32_IMM(BPF_REG_0, 42),
296 BPF_MOV32_IMM(BPF_REG_1, 2),
297 BPF_MOV32_IMM(BPF_REG_2, 16),
298 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
299 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
300 BPF_EXIT_INSN(),
301 },
302 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
303 .result = ACCEPT,
304 .retval = 8,
305 },
306 {
307 "DIV32 by 0, zero check 1, cls",
308 .insns = {
309 BPF_MOV32_IMM(BPF_REG_1, 0),
310 BPF_MOV32_IMM(BPF_REG_0, 1),
311 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
312 BPF_EXIT_INSN(),
313 },
314 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
315 .result = ACCEPT,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100316 .retval = 0,
317 },
318 {
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100319 "DIV32 by 0, zero check 2, cls",
320 .insns = {
321 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
322 BPF_MOV32_IMM(BPF_REG_0, 1),
323 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
324 BPF_EXIT_INSN(),
325 },
326 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
327 .result = ACCEPT,
328 .retval = 0,
329 },
330 {
331 "DIV64 by 0, zero check, cls",
332 .insns = {
333 BPF_MOV32_IMM(BPF_REG_1, 0),
334 BPF_MOV32_IMM(BPF_REG_0, 1),
335 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
336 BPF_EXIT_INSN(),
337 },
338 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
339 .result = ACCEPT,
340 .retval = 0,
341 },
342 {
343 "MOD32 by 0, zero check ok, cls",
344 .insns = {
345 BPF_MOV32_IMM(BPF_REG_0, 42),
346 BPF_MOV32_IMM(BPF_REG_1, 3),
347 BPF_MOV32_IMM(BPF_REG_2, 5),
348 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
349 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
350 BPF_EXIT_INSN(),
351 },
352 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
353 .result = ACCEPT,
354 .retval = 2,
355 },
356 {
357 "MOD32 by 0, zero check 1, cls",
358 .insns = {
359 BPF_MOV32_IMM(BPF_REG_1, 0),
360 BPF_MOV32_IMM(BPF_REG_0, 1),
361 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
362 BPF_EXIT_INSN(),
363 },
364 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
365 .result = ACCEPT,
366 .retval = 1,
367 },
368 {
369 "MOD32 by 0, zero check 2, cls",
370 .insns = {
371 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
372 BPF_MOV32_IMM(BPF_REG_0, 1),
373 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
374 BPF_EXIT_INSN(),
375 },
376 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
377 .result = ACCEPT,
378 .retval = 1,
379 },
380 {
381 "MOD64 by 0, zero check 1, cls",
382 .insns = {
383 BPF_MOV32_IMM(BPF_REG_1, 0),
384 BPF_MOV32_IMM(BPF_REG_0, 2),
385 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
386 BPF_EXIT_INSN(),
387 },
388 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
389 .result = ACCEPT,
390 .retval = 2,
391 },
392 {
393 "MOD64 by 0, zero check 2, cls",
394 .insns = {
395 BPF_MOV32_IMM(BPF_REG_1, 0),
396 BPF_MOV32_IMM(BPF_REG_0, -1),
397 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
398 BPF_EXIT_INSN(),
399 },
400 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
401 .result = ACCEPT,
402 .retval = -1,
403 },
404 /* Just make sure that JITs used udiv/umod as otherwise we get
405 * an exception from INT_MIN/-1 overflow similarly as with div
406 * by zero.
407 */
408 {
409 "DIV32 overflow, check 1",
410 .insns = {
411 BPF_MOV32_IMM(BPF_REG_1, -1),
412 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
413 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
414 BPF_EXIT_INSN(),
415 },
416 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
417 .result = ACCEPT,
418 .retval = 0,
419 },
420 {
421 "DIV32 overflow, check 2",
422 .insns = {
423 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
424 BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
425 BPF_EXIT_INSN(),
426 },
427 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
428 .result = ACCEPT,
429 .retval = 0,
430 },
431 {
432 "DIV64 overflow, check 1",
433 .insns = {
434 BPF_MOV64_IMM(BPF_REG_1, -1),
435 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
436 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
437 BPF_EXIT_INSN(),
438 },
439 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
440 .result = ACCEPT,
441 .retval = 0,
442 },
443 {
444 "DIV64 overflow, check 2",
445 .insns = {
446 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
447 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
448 BPF_EXIT_INSN(),
449 },
450 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
451 .result = ACCEPT,
452 .retval = 0,
453 },
454 {
455 "MOD32 overflow, check 1",
456 .insns = {
457 BPF_MOV32_IMM(BPF_REG_1, -1),
458 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
459 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
460 BPF_EXIT_INSN(),
461 },
462 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
463 .result = ACCEPT,
464 .retval = INT_MIN,
465 },
466 {
467 "MOD32 overflow, check 2",
468 .insns = {
469 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
470 BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
471 BPF_EXIT_INSN(),
472 },
473 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
474 .result = ACCEPT,
475 .retval = INT_MIN,
476 },
477 {
478 "MOD64 overflow, check 1",
479 .insns = {
480 BPF_MOV64_IMM(BPF_REG_1, -1),
481 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
482 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
483 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
484 BPF_MOV32_IMM(BPF_REG_0, 0),
485 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
486 BPF_MOV32_IMM(BPF_REG_0, 1),
487 BPF_EXIT_INSN(),
488 },
489 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
490 .result = ACCEPT,
491 .retval = 1,
492 },
493 {
494 "MOD64 overflow, check 2",
495 .insns = {
496 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
497 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
498 BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
499 BPF_MOV32_IMM(BPF_REG_0, 0),
500 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
501 BPF_MOV32_IMM(BPF_REG_0, 1),
502 BPF_EXIT_INSN(),
503 },
504 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
505 .result = ACCEPT,
506 .retval = 1,
507 },
508 {
509 "xor32 zero extend check",
510 .insns = {
511 BPF_MOV32_IMM(BPF_REG_2, -1),
512 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
513 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
514 BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
515 BPF_MOV32_IMM(BPF_REG_0, 2),
516 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
517 BPF_MOV32_IMM(BPF_REG_0, 1),
518 BPF_EXIT_INSN(),
519 },
520 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
521 .result = ACCEPT,
522 .retval = 1,
523 },
524 {
Daniel Borkmann87c17932018-01-20 01:24:32 +0100525 "empty prog",
526 .insns = {
527 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100528 .errstr = "unknown opcode 00",
Daniel Borkmann87c17932018-01-20 01:24:32 +0100529 .result = REJECT,
530 },
531 {
532 "only exit insn",
533 .insns = {
534 BPF_EXIT_INSN(),
535 },
536 .errstr = "R0 !read_ok",
537 .result = REJECT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700538 },
539 {
540 "unreachable",
541 .insns = {
542 BPF_EXIT_INSN(),
543 BPF_EXIT_INSN(),
544 },
545 .errstr = "unreachable",
546 .result = REJECT,
547 },
548 {
549 "unreachable2",
550 .insns = {
551 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
552 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
553 BPF_EXIT_INSN(),
554 },
555 .errstr = "unreachable",
556 .result = REJECT,
557 },
558 {
559 "out of range jump",
560 .insns = {
561 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
562 BPF_EXIT_INSN(),
563 },
564 .errstr = "jump out of range",
565 .result = REJECT,
566 },
567 {
568 "out of range jump2",
569 .insns = {
570 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
571 BPF_EXIT_INSN(),
572 },
573 .errstr = "jump out of range",
574 .result = REJECT,
575 },
576 {
577 "test1 ld_imm64",
578 .insns = {
579 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
580 BPF_LD_IMM64(BPF_REG_0, 0),
581 BPF_LD_IMM64(BPF_REG_0, 0),
582 BPF_LD_IMM64(BPF_REG_0, 1),
583 BPF_LD_IMM64(BPF_REG_0, 1),
584 BPF_MOV64_IMM(BPF_REG_0, 2),
585 BPF_EXIT_INSN(),
586 },
587 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700588 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700589 .result = REJECT,
590 },
591 {
592 "test2 ld_imm64",
593 .insns = {
594 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
595 BPF_LD_IMM64(BPF_REG_0, 0),
596 BPF_LD_IMM64(BPF_REG_0, 0),
597 BPF_LD_IMM64(BPF_REG_0, 1),
598 BPF_LD_IMM64(BPF_REG_0, 1),
599 BPF_EXIT_INSN(),
600 },
601 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700602 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700603 .result = REJECT,
604 },
605 {
606 "test3 ld_imm64",
607 .insns = {
608 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
609 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
610 BPF_LD_IMM64(BPF_REG_0, 0),
611 BPF_LD_IMM64(BPF_REG_0, 0),
612 BPF_LD_IMM64(BPF_REG_0, 1),
613 BPF_LD_IMM64(BPF_REG_0, 1),
614 BPF_EXIT_INSN(),
615 },
616 .errstr = "invalid bpf_ld_imm64 insn",
617 .result = REJECT,
618 },
619 {
620 "test4 ld_imm64",
621 .insns = {
622 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
623 BPF_EXIT_INSN(),
624 },
625 .errstr = "invalid bpf_ld_imm64 insn",
626 .result = REJECT,
627 },
628 {
629 "test5 ld_imm64",
630 .insns = {
631 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
632 },
633 .errstr = "invalid bpf_ld_imm64 insn",
634 .result = REJECT,
635 },
636 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200637 "test6 ld_imm64",
638 .insns = {
639 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
640 BPF_RAW_INSN(0, 0, 0, 0, 0),
641 BPF_EXIT_INSN(),
642 },
643 .result = ACCEPT,
644 },
645 {
646 "test7 ld_imm64",
647 .insns = {
648 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
649 BPF_RAW_INSN(0, 0, 0, 0, 1),
650 BPF_EXIT_INSN(),
651 },
652 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800653 .retval = 1,
Daniel Borkmann728a8532017-04-27 01:39:32 +0200654 },
655 {
656 "test8 ld_imm64",
657 .insns = {
658 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
659 BPF_RAW_INSN(0, 0, 0, 0, 1),
660 BPF_EXIT_INSN(),
661 },
662 .errstr = "uses reserved fields",
663 .result = REJECT,
664 },
665 {
666 "test9 ld_imm64",
667 .insns = {
668 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
669 BPF_RAW_INSN(0, 0, 0, 1, 1),
670 BPF_EXIT_INSN(),
671 },
672 .errstr = "invalid bpf_ld_imm64 insn",
673 .result = REJECT,
674 },
675 {
676 "test10 ld_imm64",
677 .insns = {
678 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
679 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
680 BPF_EXIT_INSN(),
681 },
682 .errstr = "invalid bpf_ld_imm64 insn",
683 .result = REJECT,
684 },
685 {
686 "test11 ld_imm64",
687 .insns = {
688 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
689 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
690 BPF_EXIT_INSN(),
691 },
692 .errstr = "invalid bpf_ld_imm64 insn",
693 .result = REJECT,
694 },
695 {
696 "test12 ld_imm64",
697 .insns = {
698 BPF_MOV64_IMM(BPF_REG_1, 0),
699 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
700 BPF_RAW_INSN(0, 0, 0, 0, 1),
701 BPF_EXIT_INSN(),
702 },
703 .errstr = "not pointing to valid bpf_map",
704 .result = REJECT,
705 },
706 {
707 "test13 ld_imm64",
708 .insns = {
709 BPF_MOV64_IMM(BPF_REG_1, 0),
710 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
711 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
712 BPF_EXIT_INSN(),
713 },
714 .errstr = "invalid bpf_ld_imm64 insn",
715 .result = REJECT,
716 },
717 {
Daniel Borkmann7891a872018-01-10 20:04:37 +0100718 "arsh32 on imm",
719 .insns = {
720 BPF_MOV64_IMM(BPF_REG_0, 1),
721 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
722 BPF_EXIT_INSN(),
723 },
724 .result = REJECT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100725 .errstr = "unknown opcode c4",
Daniel Borkmann7891a872018-01-10 20:04:37 +0100726 },
727 {
728 "arsh32 on reg",
729 .insns = {
730 BPF_MOV64_IMM(BPF_REG_0, 1),
731 BPF_MOV64_IMM(BPF_REG_1, 5),
732 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
733 BPF_EXIT_INSN(),
734 },
735 .result = REJECT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100736 .errstr = "unknown opcode cc",
Daniel Borkmann7891a872018-01-10 20:04:37 +0100737 },
738 {
739 "arsh64 on imm",
740 .insns = {
741 BPF_MOV64_IMM(BPF_REG_0, 1),
742 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
743 BPF_EXIT_INSN(),
744 },
745 .result = ACCEPT,
746 },
747 {
748 "arsh64 on reg",
749 .insns = {
750 BPF_MOV64_IMM(BPF_REG_0, 1),
751 BPF_MOV64_IMM(BPF_REG_1, 5),
752 BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
753 BPF_EXIT_INSN(),
754 },
755 .result = ACCEPT,
756 },
757 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700758 "no bpf_exit",
759 .insns = {
760 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
761 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -0800762 .errstr = "not an exit",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700763 .result = REJECT,
764 },
765 {
766 "loop (back-edge)",
767 .insns = {
768 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
769 BPF_EXIT_INSN(),
770 },
771 .errstr = "back-edge",
772 .result = REJECT,
773 },
774 {
775 "loop2 (back-edge)",
776 .insns = {
777 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
778 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
779 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
780 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
781 BPF_EXIT_INSN(),
782 },
783 .errstr = "back-edge",
784 .result = REJECT,
785 },
786 {
787 "conditional loop",
788 .insns = {
789 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
790 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
791 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
792 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
793 BPF_EXIT_INSN(),
794 },
795 .errstr = "back-edge",
796 .result = REJECT,
797 },
798 {
799 "read uninitialized register",
800 .insns = {
801 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
802 BPF_EXIT_INSN(),
803 },
804 .errstr = "R2 !read_ok",
805 .result = REJECT,
806 },
807 {
808 "read invalid register",
809 .insns = {
810 BPF_MOV64_REG(BPF_REG_0, -1),
811 BPF_EXIT_INSN(),
812 },
813 .errstr = "R15 is invalid",
814 .result = REJECT,
815 },
816 {
817 "program doesn't init R0 before exit",
818 .insns = {
819 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
820 BPF_EXIT_INSN(),
821 },
822 .errstr = "R0 !read_ok",
823 .result = REJECT,
824 },
825 {
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700826 "program doesn't init R0 before exit in all branches",
827 .insns = {
828 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
829 BPF_MOV64_IMM(BPF_REG_0, 1),
830 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
831 BPF_EXIT_INSN(),
832 },
833 .errstr = "R0 !read_ok",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700834 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700835 .result = REJECT,
836 },
837 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700838 "stack out of bounds",
839 .insns = {
840 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
841 BPF_EXIT_INSN(),
842 },
843 .errstr = "invalid stack",
844 .result = REJECT,
845 },
846 {
847 "invalid call insn1",
848 .insns = {
849 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
850 BPF_EXIT_INSN(),
851 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100852 .errstr = "unknown opcode 8d",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700853 .result = REJECT,
854 },
855 {
856 "invalid call insn2",
857 .insns = {
858 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
859 BPF_EXIT_INSN(),
860 },
861 .errstr = "BPF_CALL uses reserved",
862 .result = REJECT,
863 },
864 {
865 "invalid function call",
866 .insns = {
867 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
868 BPF_EXIT_INSN(),
869 },
Daniel Borkmanne00c7b22016-11-26 01:28:09 +0100870 .errstr = "invalid func unknown#1234567",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700871 .result = REJECT,
872 },
873 {
874 "uninitialized stack1",
875 .insns = {
876 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
877 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
878 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200879 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
880 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700881 BPF_EXIT_INSN(),
882 },
Prashant Bhole908142e2018-10-09 10:04:53 +0900883 .fixup_map_hash_8b = { 2 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700884 .errstr = "invalid indirect read from stack",
885 .result = REJECT,
886 },
887 {
888 "uninitialized stack2",
889 .insns = {
890 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
891 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
892 BPF_EXIT_INSN(),
893 },
894 .errstr = "invalid read from stack",
895 .result = REJECT,
896 },
897 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200898 "invalid fp arithmetic",
899 /* If this gets ever changed, make sure JITs can deal with it. */
900 .insns = {
901 BPF_MOV64_IMM(BPF_REG_0, 0),
902 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
903 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
904 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
905 BPF_EXIT_INSN(),
906 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -0800907 .errstr = "R1 subtraction from stack pointer",
Daniel Borkmann728a8532017-04-27 01:39:32 +0200908 .result = REJECT,
909 },
910 {
911 "non-invalid fp arithmetic",
912 .insns = {
913 BPF_MOV64_IMM(BPF_REG_0, 0),
914 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
915 BPF_EXIT_INSN(),
916 },
917 .result = ACCEPT,
918 },
919 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200920 "invalid argument register",
921 .insns = {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200922 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
923 BPF_FUNC_get_cgroup_classid),
924 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
925 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200926 BPF_EXIT_INSN(),
927 },
928 .errstr = "R1 !read_ok",
929 .result = REJECT,
930 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
931 },
932 {
933 "non-invalid argument register",
934 .insns = {
935 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200936 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
937 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200938 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200939 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
940 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200941 BPF_EXIT_INSN(),
942 },
943 .result = ACCEPT,
944 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
945 },
946 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700947 "check valid spill/fill",
948 .insns = {
949 /* spill R1(ctx) into stack */
950 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700951 /* fill it back into R2 */
952 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700953 /* should be able to access R0 = *(R2 + 8) */
Daniel Borkmannf91fe172015-03-01 12:31:41 +0100954 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
955 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700956 BPF_EXIT_INSN(),
957 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700958 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700959 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700960 .result_unpriv = REJECT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800961 .retval = POINTER_VALUE,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700962 },
963 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +0200964 "check valid spill/fill, skb mark",
965 .insns = {
966 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
967 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
968 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
969 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
970 offsetof(struct __sk_buff, mark)),
971 BPF_EXIT_INSN(),
972 },
973 .result = ACCEPT,
974 .result_unpriv = ACCEPT,
975 },
976 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700977 "check corrupted spill/fill",
978 .insns = {
979 /* spill R1(ctx) into stack */
980 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700981 /* mess up with R1 pointer on stack */
982 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700983 /* fill back into R0 should fail */
984 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700985 BPF_EXIT_INSN(),
986 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700987 .errstr_unpriv = "attempt to corrupt spilled",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700988 .errstr = "corrupted spill",
989 .result = REJECT,
990 },
991 {
992 "invalid src register in STX",
993 .insns = {
994 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
995 BPF_EXIT_INSN(),
996 },
997 .errstr = "R15 is invalid",
998 .result = REJECT,
999 },
1000 {
1001 "invalid dst register in STX",
1002 .insns = {
1003 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
1004 BPF_EXIT_INSN(),
1005 },
1006 .errstr = "R14 is invalid",
1007 .result = REJECT,
1008 },
1009 {
1010 "invalid dst register in ST",
1011 .insns = {
1012 BPF_ST_MEM(BPF_B, 14, -1, -1),
1013 BPF_EXIT_INSN(),
1014 },
1015 .errstr = "R14 is invalid",
1016 .result = REJECT,
1017 },
1018 {
1019 "invalid src register in LDX",
1020 .insns = {
1021 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
1022 BPF_EXIT_INSN(),
1023 },
1024 .errstr = "R12 is invalid",
1025 .result = REJECT,
1026 },
1027 {
1028 "invalid dst register in LDX",
1029 .insns = {
1030 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
1031 BPF_EXIT_INSN(),
1032 },
1033 .errstr = "R11 is invalid",
1034 .result = REJECT,
1035 },
1036 {
1037 "junk insn",
1038 .insns = {
1039 BPF_RAW_INSN(0, 0, 0, 0, 0),
1040 BPF_EXIT_INSN(),
1041 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01001042 .errstr = "unknown opcode 00",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001043 .result = REJECT,
1044 },
1045 {
1046 "junk insn2",
1047 .insns = {
1048 BPF_RAW_INSN(1, 0, 0, 0, 0),
1049 BPF_EXIT_INSN(),
1050 },
1051 .errstr = "BPF_LDX uses reserved fields",
1052 .result = REJECT,
1053 },
1054 {
1055 "junk insn3",
1056 .insns = {
1057 BPF_RAW_INSN(-1, 0, 0, 0, 0),
1058 BPF_EXIT_INSN(),
1059 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01001060 .errstr = "unknown opcode ff",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001061 .result = REJECT,
1062 },
1063 {
1064 "junk insn4",
1065 .insns = {
1066 BPF_RAW_INSN(-1, -1, -1, -1, -1),
1067 BPF_EXIT_INSN(),
1068 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01001069 .errstr = "unknown opcode ff",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001070 .result = REJECT,
1071 },
1072 {
1073 "junk insn5",
1074 .insns = {
1075 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
1076 BPF_EXIT_INSN(),
1077 },
1078 .errstr = "BPF_ALU uses reserved fields",
1079 .result = REJECT,
1080 },
1081 {
1082 "misaligned read from stack",
1083 .insns = {
1084 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1085 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
1086 BPF_EXIT_INSN(),
1087 },
Edward Creef65b1842017-08-07 15:27:12 +01001088 .errstr = "misaligned stack access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001089 .result = REJECT,
1090 },
1091 {
1092 "invalid map_fd for function call",
1093 .insns = {
1094 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1095 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
1096 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1097 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001098 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1099 BPF_FUNC_map_delete_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001100 BPF_EXIT_INSN(),
1101 },
1102 .errstr = "fd 0 is not pointing to valid bpf_map",
1103 .result = REJECT,
1104 },
1105 {
1106 "don't check return value before access",
1107 .insns = {
1108 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1109 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1110 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1111 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001112 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1113 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001114 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1115 BPF_EXIT_INSN(),
1116 },
Prashant Bhole908142e2018-10-09 10:04:53 +09001117 .fixup_map_hash_8b = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001118 .errstr = "R0 invalid mem access 'map_value_or_null'",
1119 .result = REJECT,
1120 },
1121 {
1122 "access memory with incorrect alignment",
1123 .insns = {
1124 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1125 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1126 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1127 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001128 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1129 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001130 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1131 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1132 BPF_EXIT_INSN(),
1133 },
Prashant Bhole908142e2018-10-09 10:04:53 +09001134 .fixup_map_hash_8b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01001135 .errstr = "misaligned value access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001136 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001137 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001138 },
1139 {
1140 "sometimes access memory with incorrect alignment",
1141 .insns = {
1142 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1143 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1144 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1145 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001146 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1147 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001148 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1149 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1150 BPF_EXIT_INSN(),
1151 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1152 BPF_EXIT_INSN(),
1153 },
Prashant Bhole908142e2018-10-09 10:04:53 +09001154 .fixup_map_hash_8b = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001155 .errstr = "R0 invalid mem access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001156 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001157 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001158 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001159 },
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001160 {
1161 "jump test 1",
1162 .insns = {
1163 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1164 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1166 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1168 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1169 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1170 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1171 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1172 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1173 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1174 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1175 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1176 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1177 BPF_MOV64_IMM(BPF_REG_0, 0),
1178 BPF_EXIT_INSN(),
1179 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001180 .errstr_unpriv = "R1 pointer comparison",
1181 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001182 .result = ACCEPT,
1183 },
1184 {
1185 "jump test 2",
1186 .insns = {
1187 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1188 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1189 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1190 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1191 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1192 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1193 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1194 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1195 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1196 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1197 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1198 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1199 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1200 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1201 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1202 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1203 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1204 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1205 BPF_MOV64_IMM(BPF_REG_0, 0),
1206 BPF_EXIT_INSN(),
1207 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001208 .errstr_unpriv = "R1 pointer comparison",
1209 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001210 .result = ACCEPT,
1211 },
1212 {
1213 "jump test 3",
1214 .insns = {
1215 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1216 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1217 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1219 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1220 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1221 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1222 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1223 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1224 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1225 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1226 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1227 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1228 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1229 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1230 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1231 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1232 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1233 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1234 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1235 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1236 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1237 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1239 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001240 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1241 BPF_FUNC_map_delete_elem),
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001242 BPF_EXIT_INSN(),
1243 },
Prashant Bhole908142e2018-10-09 10:04:53 +09001244 .fixup_map_hash_8b = { 24 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001245 .errstr_unpriv = "R1 pointer comparison",
1246 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001247 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08001248 .retval = -ENOENT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001249 },
1250 {
1251 "jump test 4",
1252 .insns = {
1253 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1254 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1255 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1256 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1257 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1258 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1259 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1260 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1261 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1262 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1263 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1264 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1265 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1266 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1267 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1268 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1269 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1270 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1271 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1272 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1273 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1274 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1275 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1276 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1277 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1278 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1279 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1280 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1281 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1282 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1283 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1284 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1285 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1286 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1287 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1288 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1289 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1290 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1291 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1292 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1293 BPF_MOV64_IMM(BPF_REG_0, 0),
1294 BPF_EXIT_INSN(),
1295 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001296 .errstr_unpriv = "R1 pointer comparison",
1297 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001298 .result = ACCEPT,
1299 },
Alexei Starovoitov342ded42014-10-28 15:11:42 -07001300 {
1301 "jump test 5",
1302 .insns = {
1303 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1304 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1305 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1306 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1307 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1308 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1309 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1310 BPF_MOV64_IMM(BPF_REG_0, 0),
1311 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1312 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1313 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1314 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1315 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1316 BPF_MOV64_IMM(BPF_REG_0, 0),
1317 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1318 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1319 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1320 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1321 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1322 BPF_MOV64_IMM(BPF_REG_0, 0),
1323 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1324 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1325 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1326 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1327 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1328 BPF_MOV64_IMM(BPF_REG_0, 0),
1329 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1330 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1331 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1332 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1333 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1334 BPF_MOV64_IMM(BPF_REG_0, 0),
1335 BPF_EXIT_INSN(),
1336 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001337 .errstr_unpriv = "R1 pointer comparison",
1338 .result_unpriv = REJECT,
Alexei Starovoitov342ded42014-10-28 15:11:42 -07001339 .result = ACCEPT,
1340 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001341 {
1342 "access skb fields ok",
1343 .insns = {
1344 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1345 offsetof(struct __sk_buff, len)),
1346 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1347 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1348 offsetof(struct __sk_buff, mark)),
1349 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1350 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1351 offsetof(struct __sk_buff, pkt_type)),
1352 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1353 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1354 offsetof(struct __sk_buff, queue_mapping)),
1355 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitovc2497392015-03-16 18:06:02 -07001356 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1357 offsetof(struct __sk_buff, protocol)),
1358 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1359 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1360 offsetof(struct __sk_buff, vlan_present)),
1361 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1362 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1363 offsetof(struct __sk_buff, vlan_tci)),
1364 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Daniel Borkmannb1d9fc42017-04-19 23:01:17 +02001365 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1366 offsetof(struct __sk_buff, napi_id)),
1367 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001368 BPF_EXIT_INSN(),
1369 },
1370 .result = ACCEPT,
1371 },
1372 {
1373 "access skb fields bad1",
1374 .insns = {
1375 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1376 BPF_EXIT_INSN(),
1377 },
1378 .errstr = "invalid bpf_context access",
1379 .result = REJECT,
1380 },
1381 {
1382 "access skb fields bad2",
1383 .insns = {
1384 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1385 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1386 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1388 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001389 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1390 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001391 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1392 BPF_EXIT_INSN(),
1393 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1394 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1395 offsetof(struct __sk_buff, pkt_type)),
1396 BPF_EXIT_INSN(),
1397 },
Prashant Bhole908142e2018-10-09 10:04:53 +09001398 .fixup_map_hash_8b = { 4 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001399 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001400 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001401 .result = REJECT,
1402 },
1403 {
1404 "access skb fields bad3",
1405 .insns = {
1406 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1407 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1408 offsetof(struct __sk_buff, pkt_type)),
1409 BPF_EXIT_INSN(),
1410 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1411 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1412 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1413 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001414 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1415 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001416 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1417 BPF_EXIT_INSN(),
1418 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1419 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1420 },
Prashant Bhole908142e2018-10-09 10:04:53 +09001421 .fixup_map_hash_8b = { 6 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001422 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001423 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001424 .result = REJECT,
1425 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001426 {
1427 "access skb fields bad4",
1428 .insns = {
1429 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1430 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1431 offsetof(struct __sk_buff, len)),
1432 BPF_MOV64_IMM(BPF_REG_0, 0),
1433 BPF_EXIT_INSN(),
1434 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1435 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1436 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1437 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001438 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1439 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001440 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1441 BPF_EXIT_INSN(),
1442 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1443 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1444 },
Prashant Bhole908142e2018-10-09 10:04:53 +09001445 .fixup_map_hash_8b = { 7 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001446 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001447 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001448 .result = REJECT,
1449 },
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001450 {
John Fastabend41bc94f2017-08-15 22:33:56 -07001451 "invalid access __sk_buff family",
1452 .insns = {
1453 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1454 offsetof(struct __sk_buff, family)),
1455 BPF_EXIT_INSN(),
1456 },
1457 .errstr = "invalid bpf_context access",
1458 .result = REJECT,
1459 },
1460 {
1461 "invalid access __sk_buff remote_ip4",
1462 .insns = {
1463 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1464 offsetof(struct __sk_buff, remote_ip4)),
1465 BPF_EXIT_INSN(),
1466 },
1467 .errstr = "invalid bpf_context access",
1468 .result = REJECT,
1469 },
1470 {
1471 "invalid access __sk_buff local_ip4",
1472 .insns = {
1473 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1474 offsetof(struct __sk_buff, local_ip4)),
1475 BPF_EXIT_INSN(),
1476 },
1477 .errstr = "invalid bpf_context access",
1478 .result = REJECT,
1479 },
1480 {
1481 "invalid access __sk_buff remote_ip6",
1482 .insns = {
1483 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1484 offsetof(struct __sk_buff, remote_ip6)),
1485 BPF_EXIT_INSN(),
1486 },
1487 .errstr = "invalid bpf_context access",
1488 .result = REJECT,
1489 },
1490 {
1491 "invalid access __sk_buff local_ip6",
1492 .insns = {
1493 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1494 offsetof(struct __sk_buff, local_ip6)),
1495 BPF_EXIT_INSN(),
1496 },
1497 .errstr = "invalid bpf_context access",
1498 .result = REJECT,
1499 },
1500 {
1501 "invalid access __sk_buff remote_port",
1502 .insns = {
1503 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1504 offsetof(struct __sk_buff, remote_port)),
1505 BPF_EXIT_INSN(),
1506 },
1507 .errstr = "invalid bpf_context access",
1508 .result = REJECT,
1509 },
1510 {
1511 "invalid access __sk_buff remote_port",
1512 .insns = {
1513 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1514 offsetof(struct __sk_buff, local_port)),
1515 BPF_EXIT_INSN(),
1516 },
1517 .errstr = "invalid bpf_context access",
1518 .result = REJECT,
1519 },
1520 {
1521 "valid access __sk_buff family",
1522 .insns = {
1523 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1524 offsetof(struct __sk_buff, family)),
1525 BPF_EXIT_INSN(),
1526 },
1527 .result = ACCEPT,
1528 .prog_type = BPF_PROG_TYPE_SK_SKB,
1529 },
1530 {
1531 "valid access __sk_buff remote_ip4",
1532 .insns = {
1533 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1534 offsetof(struct __sk_buff, remote_ip4)),
1535 BPF_EXIT_INSN(),
1536 },
1537 .result = ACCEPT,
1538 .prog_type = BPF_PROG_TYPE_SK_SKB,
1539 },
1540 {
1541 "valid access __sk_buff local_ip4",
1542 .insns = {
1543 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1544 offsetof(struct __sk_buff, local_ip4)),
1545 BPF_EXIT_INSN(),
1546 },
1547 .result = ACCEPT,
1548 .prog_type = BPF_PROG_TYPE_SK_SKB,
1549 },
1550 {
1551 "valid access __sk_buff remote_ip6",
1552 .insns = {
1553 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1554 offsetof(struct __sk_buff, remote_ip6[0])),
1555 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1556 offsetof(struct __sk_buff, remote_ip6[1])),
1557 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1558 offsetof(struct __sk_buff, remote_ip6[2])),
1559 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1560 offsetof(struct __sk_buff, remote_ip6[3])),
1561 BPF_EXIT_INSN(),
1562 },
1563 .result = ACCEPT,
1564 .prog_type = BPF_PROG_TYPE_SK_SKB,
1565 },
1566 {
1567 "valid access __sk_buff local_ip6",
1568 .insns = {
1569 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1570 offsetof(struct __sk_buff, local_ip6[0])),
1571 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1572 offsetof(struct __sk_buff, local_ip6[1])),
1573 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1574 offsetof(struct __sk_buff, local_ip6[2])),
1575 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1576 offsetof(struct __sk_buff, local_ip6[3])),
1577 BPF_EXIT_INSN(),
1578 },
1579 .result = ACCEPT,
1580 .prog_type = BPF_PROG_TYPE_SK_SKB,
1581 },
1582 {
1583 "valid access __sk_buff remote_port",
1584 .insns = {
1585 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1586 offsetof(struct __sk_buff, remote_port)),
1587 BPF_EXIT_INSN(),
1588 },
1589 .result = ACCEPT,
1590 .prog_type = BPF_PROG_TYPE_SK_SKB,
1591 },
1592 {
1593 "valid access __sk_buff remote_port",
1594 .insns = {
1595 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1596 offsetof(struct __sk_buff, local_port)),
1597 BPF_EXIT_INSN(),
1598 },
1599 .result = ACCEPT,
1600 .prog_type = BPF_PROG_TYPE_SK_SKB,
1601 },
1602 {
John Fastabended850542017-08-28 07:11:24 -07001603 "invalid access of tc_classid for SK_SKB",
1604 .insns = {
1605 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1606 offsetof(struct __sk_buff, tc_classid)),
1607 BPF_EXIT_INSN(),
1608 },
1609 .result = REJECT,
1610 .prog_type = BPF_PROG_TYPE_SK_SKB,
1611 .errstr = "invalid bpf_context access",
1612 },
1613 {
John Fastabendf7e9cb12017-10-18 07:10:58 -07001614 "invalid access of skb->mark for SK_SKB",
1615 .insns = {
1616 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1617 offsetof(struct __sk_buff, mark)),
1618 BPF_EXIT_INSN(),
1619 },
1620 .result = REJECT,
1621 .prog_type = BPF_PROG_TYPE_SK_SKB,
1622 .errstr = "invalid bpf_context access",
1623 },
1624 {
1625 "check skb->mark is not writeable by SK_SKB",
John Fastabended850542017-08-28 07:11:24 -07001626 .insns = {
1627 BPF_MOV64_IMM(BPF_REG_0, 0),
1628 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1629 offsetof(struct __sk_buff, mark)),
1630 BPF_EXIT_INSN(),
1631 },
John Fastabendf7e9cb12017-10-18 07:10:58 -07001632 .result = REJECT,
John Fastabended850542017-08-28 07:11:24 -07001633 .prog_type = BPF_PROG_TYPE_SK_SKB,
John Fastabendf7e9cb12017-10-18 07:10:58 -07001634 .errstr = "invalid bpf_context access",
John Fastabended850542017-08-28 07:11:24 -07001635 },
1636 {
1637 "check skb->tc_index is writeable by SK_SKB",
1638 .insns = {
1639 BPF_MOV64_IMM(BPF_REG_0, 0),
1640 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1641 offsetof(struct __sk_buff, tc_index)),
1642 BPF_EXIT_INSN(),
1643 },
1644 .result = ACCEPT,
1645 .prog_type = BPF_PROG_TYPE_SK_SKB,
1646 },
1647 {
1648 "check skb->priority is writeable by SK_SKB",
1649 .insns = {
1650 BPF_MOV64_IMM(BPF_REG_0, 0),
1651 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1652 offsetof(struct __sk_buff, priority)),
1653 BPF_EXIT_INSN(),
1654 },
1655 .result = ACCEPT,
1656 .prog_type = BPF_PROG_TYPE_SK_SKB,
1657 },
1658 {
1659 "direct packet read for SK_SKB",
1660 .insns = {
1661 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1662 offsetof(struct __sk_buff, data)),
1663 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1664 offsetof(struct __sk_buff, data_end)),
1665 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1666 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1667 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1668 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1669 BPF_MOV64_IMM(BPF_REG_0, 0),
1670 BPF_EXIT_INSN(),
1671 },
1672 .result = ACCEPT,
1673 .prog_type = BPF_PROG_TYPE_SK_SKB,
1674 },
1675 {
1676 "direct packet write for SK_SKB",
1677 .insns = {
1678 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1679 offsetof(struct __sk_buff, data)),
1680 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1681 offsetof(struct __sk_buff, data_end)),
1682 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1683 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1684 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1685 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1686 BPF_MOV64_IMM(BPF_REG_0, 0),
1687 BPF_EXIT_INSN(),
1688 },
1689 .result = ACCEPT,
1690 .prog_type = BPF_PROG_TYPE_SK_SKB,
1691 },
1692 {
1693 "overlapping checks for direct packet access SK_SKB",
1694 .insns = {
1695 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1696 offsetof(struct __sk_buff, data)),
1697 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1698 offsetof(struct __sk_buff, data_end)),
1699 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1700 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1701 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1702 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1703 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1704 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1705 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1706 BPF_MOV64_IMM(BPF_REG_0, 0),
1707 BPF_EXIT_INSN(),
1708 },
1709 .result = ACCEPT,
1710 .prog_type = BPF_PROG_TYPE_SK_SKB,
1711 },
1712 {
John Fastabend4da0dca2018-05-17 14:17:03 -07001713 "valid access family in SK_MSG",
1714 .insns = {
1715 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1716 offsetof(struct sk_msg_md, family)),
1717 BPF_EXIT_INSN(),
1718 },
1719 .result = ACCEPT,
1720 .prog_type = BPF_PROG_TYPE_SK_MSG,
1721 },
1722 {
1723 "valid access remote_ip4 in SK_MSG",
1724 .insns = {
1725 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1726 offsetof(struct sk_msg_md, remote_ip4)),
1727 BPF_EXIT_INSN(),
1728 },
1729 .result = ACCEPT,
1730 .prog_type = BPF_PROG_TYPE_SK_MSG,
1731 },
1732 {
1733 "valid access local_ip4 in SK_MSG",
1734 .insns = {
1735 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1736 offsetof(struct sk_msg_md, local_ip4)),
1737 BPF_EXIT_INSN(),
1738 },
1739 .result = ACCEPT,
1740 .prog_type = BPF_PROG_TYPE_SK_MSG,
1741 },
1742 {
1743 "valid access remote_port in SK_MSG",
1744 .insns = {
1745 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1746 offsetof(struct sk_msg_md, remote_port)),
1747 BPF_EXIT_INSN(),
1748 },
1749 .result = ACCEPT,
1750 .prog_type = BPF_PROG_TYPE_SK_MSG,
1751 },
1752 {
1753 "valid access local_port in SK_MSG",
1754 .insns = {
1755 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1756 offsetof(struct sk_msg_md, local_port)),
1757 BPF_EXIT_INSN(),
1758 },
1759 .result = ACCEPT,
1760 .prog_type = BPF_PROG_TYPE_SK_MSG,
1761 },
1762 {
1763 "valid access remote_ip6 in SK_MSG",
1764 .insns = {
1765 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1766 offsetof(struct sk_msg_md, remote_ip6[0])),
1767 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1768 offsetof(struct sk_msg_md, remote_ip6[1])),
1769 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1770 offsetof(struct sk_msg_md, remote_ip6[2])),
1771 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1772 offsetof(struct sk_msg_md, remote_ip6[3])),
1773 BPF_EXIT_INSN(),
1774 },
1775 .result = ACCEPT,
1776 .prog_type = BPF_PROG_TYPE_SK_SKB,
1777 },
1778 {
1779 "valid access local_ip6 in SK_MSG",
1780 .insns = {
1781 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1782 offsetof(struct sk_msg_md, local_ip6[0])),
1783 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1784 offsetof(struct sk_msg_md, local_ip6[1])),
1785 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1786 offsetof(struct sk_msg_md, local_ip6[2])),
1787 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1788 offsetof(struct sk_msg_md, local_ip6[3])),
1789 BPF_EXIT_INSN(),
1790 },
1791 .result = ACCEPT,
1792 .prog_type = BPF_PROG_TYPE_SK_SKB,
1793 },
1794 {
1795 "invalid 64B read of family in SK_MSG",
1796 .insns = {
1797 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1798 offsetof(struct sk_msg_md, family)),
1799 BPF_EXIT_INSN(),
1800 },
1801 .errstr = "invalid bpf_context access",
1802 .result = REJECT,
1803 .prog_type = BPF_PROG_TYPE_SK_MSG,
1804 },
1805 {
1806 "invalid read past end of SK_MSG",
1807 .insns = {
1808 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1809 offsetof(struct sk_msg_md, local_port) + 4),
1810 BPF_EXIT_INSN(),
1811 },
1812 .errstr = "R0 !read_ok",
1813 .result = REJECT,
1814 .prog_type = BPF_PROG_TYPE_SK_MSG,
1815 },
1816 {
1817 "invalid read offset in SK_MSG",
1818 .insns = {
1819 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1820 offsetof(struct sk_msg_md, family) + 1),
1821 BPF_EXIT_INSN(),
1822 },
1823 .errstr = "invalid bpf_context access",
1824 .result = REJECT,
1825 .prog_type = BPF_PROG_TYPE_SK_MSG,
1826 },
1827 {
John Fastabend1acc60b2018-03-18 12:57:36 -07001828 "direct packet read for SK_MSG",
1829 .insns = {
1830 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1831 offsetof(struct sk_msg_md, data)),
1832 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1833 offsetof(struct sk_msg_md, data_end)),
1834 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1835 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1836 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1837 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1838 BPF_MOV64_IMM(BPF_REG_0, 0),
1839 BPF_EXIT_INSN(),
1840 },
1841 .result = ACCEPT,
1842 .prog_type = BPF_PROG_TYPE_SK_MSG,
1843 },
1844 {
1845 "direct packet write for SK_MSG",
1846 .insns = {
1847 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1848 offsetof(struct sk_msg_md, data)),
1849 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1850 offsetof(struct sk_msg_md, data_end)),
1851 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1852 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1853 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1854 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1855 BPF_MOV64_IMM(BPF_REG_0, 0),
1856 BPF_EXIT_INSN(),
1857 },
1858 .result = ACCEPT,
1859 .prog_type = BPF_PROG_TYPE_SK_MSG,
1860 },
1861 {
1862 "overlapping checks for direct packet access SK_MSG",
1863 .insns = {
1864 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1865 offsetof(struct sk_msg_md, data)),
1866 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1867 offsetof(struct sk_msg_md, data_end)),
1868 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1869 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1870 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1871 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1872 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1873 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1874 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1875 BPF_MOV64_IMM(BPF_REG_0, 0),
1876 BPF_EXIT_INSN(),
1877 },
1878 .result = ACCEPT,
1879 .prog_type = BPF_PROG_TYPE_SK_MSG,
1880 },
1881 {
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001882 "check skb->mark is not writeable by sockets",
1883 .insns = {
1884 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1885 offsetof(struct __sk_buff, mark)),
1886 BPF_EXIT_INSN(),
1887 },
1888 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001889 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001890 .result = REJECT,
1891 },
1892 {
1893 "check skb->tc_index is not writeable by sockets",
1894 .insns = {
1895 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1896 offsetof(struct __sk_buff, tc_index)),
1897 BPF_EXIT_INSN(),
1898 },
1899 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001900 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001901 .result = REJECT,
1902 },
1903 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001904 "check cb access: byte",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001905 .insns = {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001906 BPF_MOV64_IMM(BPF_REG_0, 0),
1907 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1908 offsetof(struct __sk_buff, cb[0])),
1909 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1910 offsetof(struct __sk_buff, cb[0]) + 1),
1911 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1912 offsetof(struct __sk_buff, cb[0]) + 2),
1913 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1914 offsetof(struct __sk_buff, cb[0]) + 3),
1915 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1916 offsetof(struct __sk_buff, cb[1])),
1917 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1918 offsetof(struct __sk_buff, cb[1]) + 1),
1919 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1920 offsetof(struct __sk_buff, cb[1]) + 2),
1921 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1922 offsetof(struct __sk_buff, cb[1]) + 3),
1923 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1924 offsetof(struct __sk_buff, cb[2])),
1925 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1926 offsetof(struct __sk_buff, cb[2]) + 1),
1927 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1928 offsetof(struct __sk_buff, cb[2]) + 2),
1929 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1930 offsetof(struct __sk_buff, cb[2]) + 3),
1931 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1932 offsetof(struct __sk_buff, cb[3])),
1933 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1934 offsetof(struct __sk_buff, cb[3]) + 1),
1935 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1936 offsetof(struct __sk_buff, cb[3]) + 2),
1937 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1938 offsetof(struct __sk_buff, cb[3]) + 3),
1939 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1940 offsetof(struct __sk_buff, cb[4])),
1941 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1942 offsetof(struct __sk_buff, cb[4]) + 1),
1943 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1944 offsetof(struct __sk_buff, cb[4]) + 2),
1945 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1946 offsetof(struct __sk_buff, cb[4]) + 3),
1947 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1948 offsetof(struct __sk_buff, cb[0])),
1949 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1950 offsetof(struct __sk_buff, cb[0]) + 1),
1951 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1952 offsetof(struct __sk_buff, cb[0]) + 2),
1953 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1954 offsetof(struct __sk_buff, cb[0]) + 3),
1955 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1956 offsetof(struct __sk_buff, cb[1])),
1957 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1958 offsetof(struct __sk_buff, cb[1]) + 1),
1959 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1960 offsetof(struct __sk_buff, cb[1]) + 2),
1961 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1962 offsetof(struct __sk_buff, cb[1]) + 3),
1963 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1964 offsetof(struct __sk_buff, cb[2])),
1965 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1966 offsetof(struct __sk_buff, cb[2]) + 1),
1967 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1968 offsetof(struct __sk_buff, cb[2]) + 2),
1969 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1970 offsetof(struct __sk_buff, cb[2]) + 3),
1971 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1972 offsetof(struct __sk_buff, cb[3])),
1973 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1974 offsetof(struct __sk_buff, cb[3]) + 1),
1975 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1976 offsetof(struct __sk_buff, cb[3]) + 2),
1977 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1978 offsetof(struct __sk_buff, cb[3]) + 3),
1979 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1980 offsetof(struct __sk_buff, cb[4])),
1981 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1982 offsetof(struct __sk_buff, cb[4]) + 1),
1983 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1984 offsetof(struct __sk_buff, cb[4]) + 2),
1985 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1986 offsetof(struct __sk_buff, cb[4]) + 3),
1987 BPF_EXIT_INSN(),
1988 },
1989 .result = ACCEPT,
1990 },
1991 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001992 "__sk_buff->hash, offset 0, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001993 .insns = {
1994 BPF_MOV64_IMM(BPF_REG_0, 0),
1995 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001996 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001997 BPF_EXIT_INSN(),
1998 },
1999 .errstr = "invalid bpf_context access",
2000 .result = REJECT,
2001 },
2002 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002003 "__sk_buff->tc_index, offset 3, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002004 .insns = {
2005 BPF_MOV64_IMM(BPF_REG_0, 0),
2006 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07002007 offsetof(struct __sk_buff, tc_index) + 3),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002008 BPF_EXIT_INSN(),
2009 },
2010 .errstr = "invalid bpf_context access",
2011 .result = REJECT,
2012 },
2013 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07002014 "check skb->hash byte load permitted",
2015 .insns = {
2016 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02002017#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07002018 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2019 offsetof(struct __sk_buff, hash)),
2020#else
2021 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2022 offsetof(struct __sk_buff, hash) + 3),
2023#endif
2024 BPF_EXIT_INSN(),
2025 },
2026 .result = ACCEPT,
2027 },
2028 {
2029 "check skb->hash byte load not permitted 1",
2030 .insns = {
2031 BPF_MOV64_IMM(BPF_REG_0, 0),
2032 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2033 offsetof(struct __sk_buff, hash) + 1),
2034 BPF_EXIT_INSN(),
2035 },
2036 .errstr = "invalid bpf_context access",
2037 .result = REJECT,
2038 },
2039 {
2040 "check skb->hash byte load not permitted 2",
2041 .insns = {
2042 BPF_MOV64_IMM(BPF_REG_0, 0),
2043 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2044 offsetof(struct __sk_buff, hash) + 2),
2045 BPF_EXIT_INSN(),
2046 },
2047 .errstr = "invalid bpf_context access",
2048 .result = REJECT,
2049 },
2050 {
2051 "check skb->hash byte load not permitted 3",
2052 .insns = {
2053 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02002054#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07002055 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2056 offsetof(struct __sk_buff, hash) + 3),
2057#else
2058 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2059 offsetof(struct __sk_buff, hash)),
2060#endif
2061 BPF_EXIT_INSN(),
2062 },
2063 .errstr = "invalid bpf_context access",
2064 .result = REJECT,
2065 },
2066 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01002067 "check cb access: byte, wrong type",
2068 .insns = {
2069 BPF_MOV64_IMM(BPF_REG_0, 0),
2070 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002071 offsetof(struct __sk_buff, cb[0])),
2072 BPF_EXIT_INSN(),
2073 },
2074 .errstr = "invalid bpf_context access",
2075 .result = REJECT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002076 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2077 },
2078 {
2079 "check cb access: half",
2080 .insns = {
2081 BPF_MOV64_IMM(BPF_REG_0, 0),
2082 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2083 offsetof(struct __sk_buff, cb[0])),
2084 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2085 offsetof(struct __sk_buff, cb[0]) + 2),
2086 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2087 offsetof(struct __sk_buff, cb[1])),
2088 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2089 offsetof(struct __sk_buff, cb[1]) + 2),
2090 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2091 offsetof(struct __sk_buff, cb[2])),
2092 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2093 offsetof(struct __sk_buff, cb[2]) + 2),
2094 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2095 offsetof(struct __sk_buff, cb[3])),
2096 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2097 offsetof(struct __sk_buff, cb[3]) + 2),
2098 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2099 offsetof(struct __sk_buff, cb[4])),
2100 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2101 offsetof(struct __sk_buff, cb[4]) + 2),
2102 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2103 offsetof(struct __sk_buff, cb[0])),
2104 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2105 offsetof(struct __sk_buff, cb[0]) + 2),
2106 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2107 offsetof(struct __sk_buff, cb[1])),
2108 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2109 offsetof(struct __sk_buff, cb[1]) + 2),
2110 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2111 offsetof(struct __sk_buff, cb[2])),
2112 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2113 offsetof(struct __sk_buff, cb[2]) + 2),
2114 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2115 offsetof(struct __sk_buff, cb[3])),
2116 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2117 offsetof(struct __sk_buff, cb[3]) + 2),
2118 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2119 offsetof(struct __sk_buff, cb[4])),
2120 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2121 offsetof(struct __sk_buff, cb[4]) + 2),
2122 BPF_EXIT_INSN(),
2123 },
2124 .result = ACCEPT,
2125 },
2126 {
2127 "check cb access: half, unaligned",
2128 .insns = {
2129 BPF_MOV64_IMM(BPF_REG_0, 0),
2130 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2131 offsetof(struct __sk_buff, cb[0]) + 1),
2132 BPF_EXIT_INSN(),
2133 },
Edward Creef65b1842017-08-07 15:27:12 +01002134 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002135 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002136 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002137 },
2138 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002139 "check __sk_buff->hash, offset 0, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002140 .insns = {
2141 BPF_MOV64_IMM(BPF_REG_0, 0),
2142 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07002143 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002144 BPF_EXIT_INSN(),
2145 },
2146 .errstr = "invalid bpf_context access",
2147 .result = REJECT,
2148 },
2149 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002150 "check __sk_buff->tc_index, offset 2, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002151 .insns = {
2152 BPF_MOV64_IMM(BPF_REG_0, 0),
2153 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07002154 offsetof(struct __sk_buff, tc_index) + 2),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002155 BPF_EXIT_INSN(),
2156 },
2157 .errstr = "invalid bpf_context access",
2158 .result = REJECT,
2159 },
2160 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07002161 "check skb->hash half load permitted",
2162 .insns = {
2163 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02002164#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07002165 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2166 offsetof(struct __sk_buff, hash)),
2167#else
2168 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2169 offsetof(struct __sk_buff, hash) + 2),
2170#endif
2171 BPF_EXIT_INSN(),
2172 },
2173 .result = ACCEPT,
2174 },
2175 {
2176 "check skb->hash half load not permitted",
2177 .insns = {
2178 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02002179#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07002180 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2181 offsetof(struct __sk_buff, hash) + 2),
2182#else
2183 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2184 offsetof(struct __sk_buff, hash)),
2185#endif
2186 BPF_EXIT_INSN(),
2187 },
2188 .errstr = "invalid bpf_context access",
2189 .result = REJECT,
2190 },
2191 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01002192 "check cb access: half, wrong type",
2193 .insns = {
2194 BPF_MOV64_IMM(BPF_REG_0, 0),
2195 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2196 offsetof(struct __sk_buff, cb[0])),
2197 BPF_EXIT_INSN(),
2198 },
2199 .errstr = "invalid bpf_context access",
2200 .result = REJECT,
2201 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2202 },
2203 {
2204 "check cb access: word",
2205 .insns = {
2206 BPF_MOV64_IMM(BPF_REG_0, 0),
2207 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2208 offsetof(struct __sk_buff, cb[0])),
2209 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2210 offsetof(struct __sk_buff, cb[1])),
2211 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2212 offsetof(struct __sk_buff, cb[2])),
2213 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2214 offsetof(struct __sk_buff, cb[3])),
2215 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2216 offsetof(struct __sk_buff, cb[4])),
2217 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2218 offsetof(struct __sk_buff, cb[0])),
2219 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2220 offsetof(struct __sk_buff, cb[1])),
2221 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2222 offsetof(struct __sk_buff, cb[2])),
2223 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2224 offsetof(struct __sk_buff, cb[3])),
2225 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2226 offsetof(struct __sk_buff, cb[4])),
2227 BPF_EXIT_INSN(),
2228 },
2229 .result = ACCEPT,
2230 },
2231 {
2232 "check cb access: word, unaligned 1",
2233 .insns = {
2234 BPF_MOV64_IMM(BPF_REG_0, 0),
2235 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2236 offsetof(struct __sk_buff, cb[0]) + 2),
2237 BPF_EXIT_INSN(),
2238 },
Edward Creef65b1842017-08-07 15:27:12 +01002239 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002240 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002241 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002242 },
2243 {
2244 "check cb access: word, unaligned 2",
2245 .insns = {
2246 BPF_MOV64_IMM(BPF_REG_0, 0),
2247 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2248 offsetof(struct __sk_buff, cb[4]) + 1),
2249 BPF_EXIT_INSN(),
2250 },
Edward Creef65b1842017-08-07 15:27:12 +01002251 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002252 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002253 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002254 },
2255 {
2256 "check cb access: word, unaligned 3",
2257 .insns = {
2258 BPF_MOV64_IMM(BPF_REG_0, 0),
2259 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2260 offsetof(struct __sk_buff, cb[4]) + 2),
2261 BPF_EXIT_INSN(),
2262 },
Edward Creef65b1842017-08-07 15:27:12 +01002263 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002264 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002265 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002266 },
2267 {
2268 "check cb access: word, unaligned 4",
2269 .insns = {
2270 BPF_MOV64_IMM(BPF_REG_0, 0),
2271 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2272 offsetof(struct __sk_buff, cb[4]) + 3),
2273 BPF_EXIT_INSN(),
2274 },
Edward Creef65b1842017-08-07 15:27:12 +01002275 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002276 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002277 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002278 },
2279 {
2280 "check cb access: double",
2281 .insns = {
2282 BPF_MOV64_IMM(BPF_REG_0, 0),
2283 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2284 offsetof(struct __sk_buff, cb[0])),
2285 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2286 offsetof(struct __sk_buff, cb[2])),
2287 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2288 offsetof(struct __sk_buff, cb[0])),
2289 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2290 offsetof(struct __sk_buff, cb[2])),
2291 BPF_EXIT_INSN(),
2292 },
2293 .result = ACCEPT,
2294 },
2295 {
2296 "check cb access: double, unaligned 1",
2297 .insns = {
2298 BPF_MOV64_IMM(BPF_REG_0, 0),
2299 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2300 offsetof(struct __sk_buff, cb[1])),
2301 BPF_EXIT_INSN(),
2302 },
Edward Creef65b1842017-08-07 15:27:12 +01002303 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002304 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002305 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002306 },
2307 {
2308 "check cb access: double, unaligned 2",
2309 .insns = {
2310 BPF_MOV64_IMM(BPF_REG_0, 0),
2311 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2312 offsetof(struct __sk_buff, cb[3])),
2313 BPF_EXIT_INSN(),
2314 },
Edward Creef65b1842017-08-07 15:27:12 +01002315 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002316 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002317 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002318 },
2319 {
2320 "check cb access: double, oob 1",
2321 .insns = {
2322 BPF_MOV64_IMM(BPF_REG_0, 0),
2323 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2324 offsetof(struct __sk_buff, cb[4])),
2325 BPF_EXIT_INSN(),
2326 },
2327 .errstr = "invalid bpf_context access",
2328 .result = REJECT,
2329 },
2330 {
2331 "check cb access: double, oob 2",
2332 .insns = {
2333 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002334 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2335 offsetof(struct __sk_buff, cb[4])),
2336 BPF_EXIT_INSN(),
2337 },
2338 .errstr = "invalid bpf_context access",
2339 .result = REJECT,
2340 },
2341 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002342 "check __sk_buff->ifindex dw store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002343 .insns = {
2344 BPF_MOV64_IMM(BPF_REG_0, 0),
Yonghong Song31fd8582017-06-13 15:52:13 -07002345 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2346 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002347 BPF_EXIT_INSN(),
2348 },
2349 .errstr = "invalid bpf_context access",
2350 .result = REJECT,
2351 },
2352 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002353 "check __sk_buff->ifindex dw load not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002354 .insns = {
2355 BPF_MOV64_IMM(BPF_REG_0, 0),
2356 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
Yonghong Song31fd8582017-06-13 15:52:13 -07002357 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002358 BPF_EXIT_INSN(),
2359 },
2360 .errstr = "invalid bpf_context access",
2361 .result = REJECT,
2362 },
2363 {
2364 "check cb access: double, wrong type",
2365 .insns = {
2366 BPF_MOV64_IMM(BPF_REG_0, 0),
2367 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2368 offsetof(struct __sk_buff, cb[0])),
2369 BPF_EXIT_INSN(),
2370 },
2371 .errstr = "invalid bpf_context access",
2372 .result = REJECT,
2373 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002374 },
2375 {
2376 "check out of range skb->cb access",
2377 .insns = {
2378 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002379 offsetof(struct __sk_buff, cb[0]) + 256),
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002380 BPF_EXIT_INSN(),
2381 },
2382 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002383 .errstr_unpriv = "",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002384 .result = REJECT,
2385 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
2386 },
2387 {
2388 "write skb fields from socket prog",
2389 .insns = {
2390 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2391 offsetof(struct __sk_buff, cb[4])),
2392 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2393 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2394 offsetof(struct __sk_buff, mark)),
2395 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2396 offsetof(struct __sk_buff, tc_index)),
2397 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2398 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2399 offsetof(struct __sk_buff, cb[0])),
2400 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2401 offsetof(struct __sk_buff, cb[2])),
2402 BPF_EXIT_INSN(),
2403 },
2404 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002405 .errstr_unpriv = "R1 leaks addr",
2406 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002407 },
2408 {
2409 "write skb fields from tc_cls_act prog",
2410 .insns = {
2411 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2412 offsetof(struct __sk_buff, cb[0])),
2413 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2414 offsetof(struct __sk_buff, mark)),
2415 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2416 offsetof(struct __sk_buff, tc_index)),
2417 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2418 offsetof(struct __sk_buff, tc_index)),
2419 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2420 offsetof(struct __sk_buff, cb[3])),
2421 BPF_EXIT_INSN(),
2422 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002423 .errstr_unpriv = "",
2424 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002425 .result = ACCEPT,
2426 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2427 },
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002428 {
2429 "PTR_TO_STACK store/load",
2430 .insns = {
2431 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2432 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2433 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2434 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2435 BPF_EXIT_INSN(),
2436 },
2437 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002438 .retval = 0xfaceb00c,
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002439 },
2440 {
2441 "PTR_TO_STACK store/load - bad alignment on off",
2442 .insns = {
2443 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2444 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2445 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2446 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2447 BPF_EXIT_INSN(),
2448 },
2449 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002450 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002451 },
2452 {
2453 "PTR_TO_STACK store/load - bad alignment on reg",
2454 .insns = {
2455 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2457 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2458 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2459 BPF_EXIT_INSN(),
2460 },
2461 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002462 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002463 },
2464 {
2465 "PTR_TO_STACK store/load - out of bounds low",
2466 .insns = {
2467 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2468 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2469 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2470 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2471 BPF_EXIT_INSN(),
2472 },
2473 .result = REJECT,
2474 .errstr = "invalid stack off=-79992 size=8",
2475 },
2476 {
2477 "PTR_TO_STACK store/load - out of bounds high",
2478 .insns = {
2479 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2481 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2482 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2483 BPF_EXIT_INSN(),
2484 },
2485 .result = REJECT,
2486 .errstr = "invalid stack off=0 size=8",
2487 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002488 {
2489 "unpriv: return pointer",
2490 .insns = {
2491 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2492 BPF_EXIT_INSN(),
2493 },
2494 .result = ACCEPT,
2495 .result_unpriv = REJECT,
2496 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002497 .retval = POINTER_VALUE,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002498 },
2499 {
2500 "unpriv: add const to pointer",
2501 .insns = {
2502 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2503 BPF_MOV64_IMM(BPF_REG_0, 0),
2504 BPF_EXIT_INSN(),
2505 },
2506 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002507 },
2508 {
2509 "unpriv: add pointer to pointer",
2510 .insns = {
2511 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2512 BPF_MOV64_IMM(BPF_REG_0, 0),
2513 BPF_EXIT_INSN(),
2514 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08002515 .result = REJECT,
2516 .errstr = "R1 pointer += pointer",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002517 },
2518 {
2519 "unpriv: neg pointer",
2520 .insns = {
2521 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2522 BPF_MOV64_IMM(BPF_REG_0, 0),
2523 BPF_EXIT_INSN(),
2524 },
2525 .result = ACCEPT,
2526 .result_unpriv = REJECT,
2527 .errstr_unpriv = "R1 pointer arithmetic",
2528 },
2529 {
2530 "unpriv: cmp pointer with const",
2531 .insns = {
2532 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2533 BPF_MOV64_IMM(BPF_REG_0, 0),
2534 BPF_EXIT_INSN(),
2535 },
2536 .result = ACCEPT,
2537 .result_unpriv = REJECT,
2538 .errstr_unpriv = "R1 pointer comparison",
2539 },
2540 {
2541 "unpriv: cmp pointer with pointer",
2542 .insns = {
2543 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2544 BPF_MOV64_IMM(BPF_REG_0, 0),
2545 BPF_EXIT_INSN(),
2546 },
2547 .result = ACCEPT,
2548 .result_unpriv = REJECT,
2549 .errstr_unpriv = "R10 pointer comparison",
2550 },
2551 {
2552 "unpriv: check that printk is disallowed",
2553 .insns = {
2554 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2555 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2557 BPF_MOV64_IMM(BPF_REG_2, 8),
2558 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002559 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2560 BPF_FUNC_trace_printk),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002561 BPF_MOV64_IMM(BPF_REG_0, 0),
2562 BPF_EXIT_INSN(),
2563 },
Daniel Borkmann0eb69842016-12-15 01:39:10 +01002564 .errstr_unpriv = "unknown func bpf_trace_printk#6",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002565 .result_unpriv = REJECT,
2566 .result = ACCEPT,
2567 },
2568 {
2569 "unpriv: pass pointer to helper function",
2570 .insns = {
2571 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2572 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2573 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2574 BPF_LD_MAP_FD(BPF_REG_1, 0),
2575 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2576 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002577 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2578 BPF_FUNC_map_update_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002579 BPF_MOV64_IMM(BPF_REG_0, 0),
2580 BPF_EXIT_INSN(),
2581 },
Prashant Bhole908142e2018-10-09 10:04:53 +09002582 .fixup_map_hash_8b = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002583 .errstr_unpriv = "R4 leaks addr",
2584 .result_unpriv = REJECT,
2585 .result = ACCEPT,
2586 },
2587 {
2588 "unpriv: indirectly pass pointer on stack to helper function",
2589 .insns = {
2590 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2591 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2592 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2593 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002594 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2595 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002596 BPF_MOV64_IMM(BPF_REG_0, 0),
2597 BPF_EXIT_INSN(),
2598 },
Prashant Bhole908142e2018-10-09 10:04:53 +09002599 .fixup_map_hash_8b = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002600 .errstr = "invalid indirect read from stack off -8+0 size 8",
2601 .result = REJECT,
2602 },
2603 {
2604 "unpriv: mangle pointer on stack 1",
2605 .insns = {
2606 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2607 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2608 BPF_MOV64_IMM(BPF_REG_0, 0),
2609 BPF_EXIT_INSN(),
2610 },
2611 .errstr_unpriv = "attempt to corrupt spilled",
2612 .result_unpriv = REJECT,
2613 .result = ACCEPT,
2614 },
2615 {
2616 "unpriv: mangle pointer on stack 2",
2617 .insns = {
2618 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2619 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2620 BPF_MOV64_IMM(BPF_REG_0, 0),
2621 BPF_EXIT_INSN(),
2622 },
2623 .errstr_unpriv = "attempt to corrupt spilled",
2624 .result_unpriv = REJECT,
2625 .result = ACCEPT,
2626 },
2627 {
2628 "unpriv: read pointer from stack in small chunks",
2629 .insns = {
2630 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2631 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2632 BPF_MOV64_IMM(BPF_REG_0, 0),
2633 BPF_EXIT_INSN(),
2634 },
2635 .errstr = "invalid size",
2636 .result = REJECT,
2637 },
2638 {
2639 "unpriv: write pointer into ctx",
2640 .insns = {
2641 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2642 BPF_MOV64_IMM(BPF_REG_0, 0),
2643 BPF_EXIT_INSN(),
2644 },
2645 .errstr_unpriv = "R1 leaks addr",
2646 .result_unpriv = REJECT,
2647 .errstr = "invalid bpf_context access",
2648 .result = REJECT,
2649 },
2650 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002651 "unpriv: spill/fill of ctx",
2652 .insns = {
2653 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2654 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2655 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2656 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2657 BPF_MOV64_IMM(BPF_REG_0, 0),
2658 BPF_EXIT_INSN(),
2659 },
2660 .result = ACCEPT,
2661 },
2662 {
2663 "unpriv: spill/fill of ctx 2",
2664 .insns = {
2665 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2666 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2667 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2668 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002669 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2670 BPF_FUNC_get_hash_recalc),
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002671 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002672 BPF_EXIT_INSN(),
2673 },
2674 .result = ACCEPT,
2675 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2676 },
2677 {
2678 "unpriv: spill/fill of ctx 3",
2679 .insns = {
2680 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2681 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2682 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2683 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2684 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002685 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2686 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002687 BPF_EXIT_INSN(),
2688 },
2689 .result = REJECT,
2690 .errstr = "R1 type=fp expected=ctx",
2691 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2692 },
2693 {
2694 "unpriv: spill/fill of ctx 4",
2695 .insns = {
2696 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2697 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2698 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2699 BPF_MOV64_IMM(BPF_REG_0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002700 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2701 BPF_REG_0, -8, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002702 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002703 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2704 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002705 BPF_EXIT_INSN(),
2706 },
2707 .result = REJECT,
2708 .errstr = "R1 type=inv expected=ctx",
2709 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2710 },
2711 {
2712 "unpriv: spill/fill of different pointers stx",
2713 .insns = {
2714 BPF_MOV64_IMM(BPF_REG_3, 42),
2715 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2716 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2717 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2718 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2719 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2720 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2721 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2722 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2723 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2724 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2725 offsetof(struct __sk_buff, mark)),
2726 BPF_MOV64_IMM(BPF_REG_0, 0),
2727 BPF_EXIT_INSN(),
2728 },
2729 .result = REJECT,
2730 .errstr = "same insn cannot be used with different pointers",
2731 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2732 },
2733 {
Joe Stringerb584ab82018-10-02 13:35:38 -07002734 "unpriv: spill/fill of different pointers stx - ctx and sock",
2735 .insns = {
2736 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2737 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
2738 BPF_SK_LOOKUP,
2739 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2740 /* u64 foo; */
2741 /* void *target = &foo; */
2742 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2744 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2745 /* if (skb == NULL) *target = sock; */
2746 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2747 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2748 /* else *target = skb; */
2749 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2750 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2751 /* struct __sk_buff *skb = *target; */
2752 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2753 /* skb->mark = 42; */
2754 BPF_MOV64_IMM(BPF_REG_3, 42),
2755 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2756 offsetof(struct __sk_buff, mark)),
2757 /* if (sk) bpf_sk_release(sk) */
2758 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2759 BPF_EMIT_CALL(BPF_FUNC_sk_release),
2760 BPF_MOV64_IMM(BPF_REG_0, 0),
2761 BPF_EXIT_INSN(),
2762 },
2763 .result = REJECT,
2764 .errstr = "type=ctx expected=sock",
2765 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2766 },
2767 {
2768 "unpriv: spill/fill of different pointers stx - leak sock",
2769 .insns = {
2770 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2771 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
2772 BPF_SK_LOOKUP,
2773 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2774 /* u64 foo; */
2775 /* void *target = &foo; */
2776 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2777 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2778 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2779 /* if (skb == NULL) *target = sock; */
2780 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2781 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2782 /* else *target = skb; */
2783 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2784 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2785 /* struct __sk_buff *skb = *target; */
2786 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2787 /* skb->mark = 42; */
2788 BPF_MOV64_IMM(BPF_REG_3, 42),
2789 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2790 offsetof(struct __sk_buff, mark)),
2791 BPF_EXIT_INSN(),
2792 },
2793 .result = REJECT,
2794 //.errstr = "same insn cannot be used with different pointers",
2795 .errstr = "Unreleased reference",
2796 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2797 },
2798 {
2799 "unpriv: spill/fill of different pointers stx - sock and ctx (read)",
2800 .insns = {
2801 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2802 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
2803 BPF_SK_LOOKUP,
2804 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2805 /* u64 foo; */
2806 /* void *target = &foo; */
2807 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2808 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2809 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2810 /* if (skb) *target = skb */
2811 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2812 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2813 /* else *target = sock */
2814 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2815 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2816 /* struct bpf_sock *sk = *target; */
2817 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2818 /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
2819 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
2820 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2821 offsetof(struct bpf_sock, mark)),
2822 BPF_EMIT_CALL(BPF_FUNC_sk_release),
2823 BPF_MOV64_IMM(BPF_REG_0, 0),
2824 BPF_EXIT_INSN(),
2825 },
2826 .result = REJECT,
2827 .errstr = "same insn cannot be used with different pointers",
2828 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2829 },
2830 {
2831 "unpriv: spill/fill of different pointers stx - sock and ctx (write)",
2832 .insns = {
2833 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2834 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
2835 BPF_SK_LOOKUP,
2836 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2837 /* u64 foo; */
2838 /* void *target = &foo; */
2839 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2840 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2841 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2842 /* if (skb) *target = skb */
2843 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2844 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2845 /* else *target = sock */
2846 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2847 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2848 /* struct bpf_sock *sk = *target; */
2849 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2850 /* if (sk) sk->mark = 42; bpf_sk_release(sk); */
2851 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2852 BPF_MOV64_IMM(BPF_REG_3, 42),
2853 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2854 offsetof(struct bpf_sock, mark)),
2855 BPF_EMIT_CALL(BPF_FUNC_sk_release),
2856 BPF_MOV64_IMM(BPF_REG_0, 0),
2857 BPF_EXIT_INSN(),
2858 },
2859 .result = REJECT,
2860 //.errstr = "same insn cannot be used with different pointers",
2861 .errstr = "cannot write into socket",
2862 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2863 },
2864 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002865 "unpriv: spill/fill of different pointers ldx",
2866 .insns = {
2867 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2868 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2869 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2870 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2871 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2872 -(__s32)offsetof(struct bpf_perf_event_data,
2873 sample_period) - 8),
2874 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2875 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2876 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2877 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2878 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2879 offsetof(struct bpf_perf_event_data,
2880 sample_period)),
2881 BPF_MOV64_IMM(BPF_REG_0, 0),
2882 BPF_EXIT_INSN(),
2883 },
2884 .result = REJECT,
2885 .errstr = "same insn cannot be used with different pointers",
2886 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
2887 },
2888 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002889 "unpriv: write pointer into map elem value",
2890 .insns = {
2891 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2892 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2893 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2894 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002895 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2896 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002897 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2898 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2899 BPF_EXIT_INSN(),
2900 },
Prashant Bhole908142e2018-10-09 10:04:53 +09002901 .fixup_map_hash_8b = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002902 .errstr_unpriv = "R0 leaks addr",
2903 .result_unpriv = REJECT,
2904 .result = ACCEPT,
2905 },
2906 {
2907 "unpriv: partial copy of pointer",
2908 .insns = {
2909 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2910 BPF_MOV64_IMM(BPF_REG_0, 0),
2911 BPF_EXIT_INSN(),
2912 },
2913 .errstr_unpriv = "R10 partial copy",
2914 .result_unpriv = REJECT,
2915 .result = ACCEPT,
2916 },
2917 {
2918 "unpriv: pass pointer to tail_call",
2919 .insns = {
2920 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2921 BPF_LD_MAP_FD(BPF_REG_2, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002922 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2923 BPF_FUNC_tail_call),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002924 BPF_MOV64_IMM(BPF_REG_0, 0),
2925 BPF_EXIT_INSN(),
2926 },
Daniel Borkmann06be0862018-06-02 23:06:31 +02002927 .fixup_prog1 = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002928 .errstr_unpriv = "R3 leaks addr into helper",
2929 .result_unpriv = REJECT,
2930 .result = ACCEPT,
2931 },
2932 {
2933 "unpriv: cmp map pointer with zero",
2934 .insns = {
2935 BPF_MOV64_IMM(BPF_REG_1, 0),
2936 BPF_LD_MAP_FD(BPF_REG_1, 0),
2937 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2938 BPF_MOV64_IMM(BPF_REG_0, 0),
2939 BPF_EXIT_INSN(),
2940 },
Prashant Bhole908142e2018-10-09 10:04:53 +09002941 .fixup_map_hash_8b = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002942 .errstr_unpriv = "R1 pointer comparison",
2943 .result_unpriv = REJECT,
2944 .result = ACCEPT,
2945 },
2946 {
2947 "unpriv: write into frame pointer",
2948 .insns = {
2949 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2950 BPF_MOV64_IMM(BPF_REG_0, 0),
2951 BPF_EXIT_INSN(),
2952 },
2953 .errstr = "frame pointer is read only",
2954 .result = REJECT,
2955 },
2956 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002957 "unpriv: spill/fill frame pointer",
2958 .insns = {
2959 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2960 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2961 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2962 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2963 BPF_MOV64_IMM(BPF_REG_0, 0),
2964 BPF_EXIT_INSN(),
2965 },
2966 .errstr = "frame pointer is read only",
2967 .result = REJECT,
2968 },
2969 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002970 "unpriv: cmp of frame pointer",
2971 .insns = {
2972 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2973 BPF_MOV64_IMM(BPF_REG_0, 0),
2974 BPF_EXIT_INSN(),
2975 },
2976 .errstr_unpriv = "R10 pointer comparison",
2977 .result_unpriv = REJECT,
2978 .result = ACCEPT,
2979 },
2980 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02002981 "unpriv: adding of fp",
2982 .insns = {
2983 BPF_MOV64_IMM(BPF_REG_0, 0),
2984 BPF_MOV64_IMM(BPF_REG_1, 0),
2985 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2986 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2987 BPF_EXIT_INSN(),
2988 },
Edward Creef65b1842017-08-07 15:27:12 +01002989 .result = ACCEPT,
Daniel Borkmann728a8532017-04-27 01:39:32 +02002990 },
2991 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002992 "unpriv: cmp of stack pointer",
2993 .insns = {
2994 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2995 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2996 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2997 BPF_MOV64_IMM(BPF_REG_0, 0),
2998 BPF_EXIT_INSN(),
2999 },
3000 .errstr_unpriv = "R2 pointer comparison",
3001 .result_unpriv = REJECT,
3002 .result = ACCEPT,
3003 },
3004 {
Daniel Borkmannb33eb732018-02-26 22:34:33 +01003005 "runtime/jit: tail_call within bounds, prog once",
3006 .insns = {
3007 BPF_MOV64_IMM(BPF_REG_3, 0),
3008 BPF_LD_MAP_FD(BPF_REG_2, 0),
3009 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3010 BPF_FUNC_tail_call),
3011 BPF_MOV64_IMM(BPF_REG_0, 1),
3012 BPF_EXIT_INSN(),
3013 },
Daniel Borkmann06be0862018-06-02 23:06:31 +02003014 .fixup_prog1 = { 1 },
Daniel Borkmannb33eb732018-02-26 22:34:33 +01003015 .result = ACCEPT,
3016 .retval = 42,
3017 },
3018 {
3019 "runtime/jit: tail_call within bounds, prog loop",
3020 .insns = {
3021 BPF_MOV64_IMM(BPF_REG_3, 1),
3022 BPF_LD_MAP_FD(BPF_REG_2, 0),
3023 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3024 BPF_FUNC_tail_call),
3025 BPF_MOV64_IMM(BPF_REG_0, 1),
3026 BPF_EXIT_INSN(),
3027 },
Daniel Borkmann06be0862018-06-02 23:06:31 +02003028 .fixup_prog1 = { 1 },
Daniel Borkmannb33eb732018-02-26 22:34:33 +01003029 .result = ACCEPT,
3030 .retval = 41,
3031 },
3032 {
3033 "runtime/jit: tail_call within bounds, no prog",
3034 .insns = {
3035 BPF_MOV64_IMM(BPF_REG_3, 2),
3036 BPF_LD_MAP_FD(BPF_REG_2, 0),
3037 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3038 BPF_FUNC_tail_call),
3039 BPF_MOV64_IMM(BPF_REG_0, 1),
3040 BPF_EXIT_INSN(),
3041 },
Daniel Borkmann06be0862018-06-02 23:06:31 +02003042 .fixup_prog1 = { 1 },
Daniel Borkmannb33eb732018-02-26 22:34:33 +01003043 .result = ACCEPT,
3044 .retval = 1,
3045 },
3046 {
3047 "runtime/jit: tail_call out of bounds",
3048 .insns = {
3049 BPF_MOV64_IMM(BPF_REG_3, 256),
3050 BPF_LD_MAP_FD(BPF_REG_2, 0),
3051 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3052 BPF_FUNC_tail_call),
3053 BPF_MOV64_IMM(BPF_REG_0, 2),
3054 BPF_EXIT_INSN(),
3055 },
Daniel Borkmann06be0862018-06-02 23:06:31 +02003056 .fixup_prog1 = { 1 },
Daniel Borkmannb33eb732018-02-26 22:34:33 +01003057 .result = ACCEPT,
3058 .retval = 2,
3059 },
3060 {
Daniel Borkmann16338a92018-02-23 01:03:43 +01003061 "runtime/jit: pass negative index to tail_call",
3062 .insns = {
3063 BPF_MOV64_IMM(BPF_REG_3, -1),
3064 BPF_LD_MAP_FD(BPF_REG_2, 0),
3065 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3066 BPF_FUNC_tail_call),
Daniel Borkmannb33eb732018-02-26 22:34:33 +01003067 BPF_MOV64_IMM(BPF_REG_0, 2),
Daniel Borkmann16338a92018-02-23 01:03:43 +01003068 BPF_EXIT_INSN(),
3069 },
Daniel Borkmann06be0862018-06-02 23:06:31 +02003070 .fixup_prog1 = { 1 },
Daniel Borkmann16338a92018-02-23 01:03:43 +01003071 .result = ACCEPT,
Daniel Borkmannb33eb732018-02-26 22:34:33 +01003072 .retval = 2,
Daniel Borkmann16338a92018-02-23 01:03:43 +01003073 },
3074 {
3075 "runtime/jit: pass > 32bit index to tail_call",
3076 .insns = {
3077 BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
3078 BPF_LD_MAP_FD(BPF_REG_2, 0),
3079 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3080 BPF_FUNC_tail_call),
Daniel Borkmannb33eb732018-02-26 22:34:33 +01003081 BPF_MOV64_IMM(BPF_REG_0, 2),
Daniel Borkmann16338a92018-02-23 01:03:43 +01003082 BPF_EXIT_INSN(),
3083 },
Daniel Borkmann06be0862018-06-02 23:06:31 +02003084 .fixup_prog1 = { 2 },
Daniel Borkmann16338a92018-02-23 01:03:43 +01003085 .result = ACCEPT,
Daniel Borkmannb33eb732018-02-26 22:34:33 +01003086 .retval = 42,
Daniel Borkmann16338a92018-02-23 01:03:43 +01003087 },
3088 {
Yonghong Song332270f2017-04-29 22:52:42 -07003089 "stack pointer arithmetic",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07003090 .insns = {
Yonghong Song332270f2017-04-29 22:52:42 -07003091 BPF_MOV64_IMM(BPF_REG_1, 4),
3092 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
3093 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
3094 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3095 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3096 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3097 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
3098 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3099 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3100 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3101 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07003102 BPF_MOV64_IMM(BPF_REG_0, 0),
3103 BPF_EXIT_INSN(),
3104 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07003105 .result = ACCEPT,
3106 },
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003107 {
3108 "raw_stack: no skb_load_bytes",
3109 .insns = {
3110 BPF_MOV64_IMM(BPF_REG_2, 4),
3111 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3112 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3113 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3114 BPF_MOV64_IMM(BPF_REG_4, 8),
3115 /* Call to skb_load_bytes() omitted. */
3116 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3117 BPF_EXIT_INSN(),
3118 },
3119 .result = REJECT,
3120 .errstr = "invalid read from stack off -8+0 size 8",
3121 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3122 },
3123 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003124 "raw_stack: skb_load_bytes, negative len",
3125 .insns = {
3126 BPF_MOV64_IMM(BPF_REG_2, 4),
3127 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3128 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3129 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3130 BPF_MOV64_IMM(BPF_REG_4, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003131 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3132 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003133 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3134 BPF_EXIT_INSN(),
3135 },
3136 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003137 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003138 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3139 },
3140 {
3141 "raw_stack: skb_load_bytes, negative len 2",
3142 .insns = {
3143 BPF_MOV64_IMM(BPF_REG_2, 4),
3144 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3145 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3146 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3147 BPF_MOV64_IMM(BPF_REG_4, ~0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003148 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3149 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003150 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3151 BPF_EXIT_INSN(),
3152 },
3153 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003154 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003155 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3156 },
3157 {
3158 "raw_stack: skb_load_bytes, zero len",
3159 .insns = {
3160 BPF_MOV64_IMM(BPF_REG_2, 4),
3161 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3162 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3163 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3164 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003165 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3166 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003167 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3168 BPF_EXIT_INSN(),
3169 },
3170 .result = REJECT,
3171 .errstr = "invalid stack type R3",
3172 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3173 },
3174 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003175 "raw_stack: skb_load_bytes, no init",
3176 .insns = {
3177 BPF_MOV64_IMM(BPF_REG_2, 4),
3178 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3179 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3180 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3181 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003182 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3183 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003184 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3185 BPF_EXIT_INSN(),
3186 },
3187 .result = ACCEPT,
3188 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3189 },
3190 {
3191 "raw_stack: skb_load_bytes, init",
3192 .insns = {
3193 BPF_MOV64_IMM(BPF_REG_2, 4),
3194 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3195 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3196 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
3197 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3198 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003199 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3200 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003201 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3202 BPF_EXIT_INSN(),
3203 },
3204 .result = ACCEPT,
3205 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3206 },
3207 {
3208 "raw_stack: skb_load_bytes, spilled regs around bounds",
3209 .insns = {
3210 BPF_MOV64_IMM(BPF_REG_2, 4),
3211 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3212 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003213 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3214 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003215 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3216 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003217 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3218 BPF_FUNC_skb_load_bytes),
3219 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3220 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003221 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3222 offsetof(struct __sk_buff, mark)),
3223 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3224 offsetof(struct __sk_buff, priority)),
3225 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3226 BPF_EXIT_INSN(),
3227 },
3228 .result = ACCEPT,
3229 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3230 },
3231 {
3232 "raw_stack: skb_load_bytes, spilled regs corruption",
3233 .insns = {
3234 BPF_MOV64_IMM(BPF_REG_2, 4),
3235 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3236 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003237 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003238 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3239 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003240 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3241 BPF_FUNC_skb_load_bytes),
3242 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003243 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3244 offsetof(struct __sk_buff, mark)),
3245 BPF_EXIT_INSN(),
3246 },
3247 .result = REJECT,
3248 .errstr = "R0 invalid mem access 'inv'",
3249 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3250 },
3251 {
3252 "raw_stack: skb_load_bytes, spilled regs corruption 2",
3253 .insns = {
3254 BPF_MOV64_IMM(BPF_REG_2, 4),
3255 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3256 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003257 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3258 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3259 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003260 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3261 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003262 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3263 BPF_FUNC_skb_load_bytes),
3264 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3265 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3266 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003267 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3268 offsetof(struct __sk_buff, mark)),
3269 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3270 offsetof(struct __sk_buff, priority)),
3271 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3272 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
3273 offsetof(struct __sk_buff, pkt_type)),
3274 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3275 BPF_EXIT_INSN(),
3276 },
3277 .result = REJECT,
3278 .errstr = "R3 invalid mem access 'inv'",
3279 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3280 },
3281 {
3282 "raw_stack: skb_load_bytes, spilled regs + data",
3283 .insns = {
3284 BPF_MOV64_IMM(BPF_REG_2, 4),
3285 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3286 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003287 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3288 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3289 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003290 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3291 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003292 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3293 BPF_FUNC_skb_load_bytes),
3294 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3295 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3296 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003297 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3298 offsetof(struct __sk_buff, mark)),
3299 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3300 offsetof(struct __sk_buff, priority)),
3301 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3302 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3303 BPF_EXIT_INSN(),
3304 },
3305 .result = ACCEPT,
3306 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3307 },
3308 {
3309 "raw_stack: skb_load_bytes, invalid access 1",
3310 .insns = {
3311 BPF_MOV64_IMM(BPF_REG_2, 4),
3312 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3313 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
3314 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3315 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003316 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3317 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003318 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3319 BPF_EXIT_INSN(),
3320 },
3321 .result = REJECT,
3322 .errstr = "invalid stack type R3 off=-513 access_size=8",
3323 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3324 },
3325 {
3326 "raw_stack: skb_load_bytes, invalid access 2",
3327 .insns = {
3328 BPF_MOV64_IMM(BPF_REG_2, 4),
3329 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3330 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3331 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3332 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003333 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3334 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003335 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3336 BPF_EXIT_INSN(),
3337 },
3338 .result = REJECT,
3339 .errstr = "invalid stack type R3 off=-1 access_size=8",
3340 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3341 },
3342 {
3343 "raw_stack: skb_load_bytes, invalid access 3",
3344 .insns = {
3345 BPF_MOV64_IMM(BPF_REG_2, 4),
3346 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3347 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
3348 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3349 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003350 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3351 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003352 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3353 BPF_EXIT_INSN(),
3354 },
3355 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003356 .errstr = "R4 min value is negative",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003357 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3358 },
3359 {
3360 "raw_stack: skb_load_bytes, invalid access 4",
3361 .insns = {
3362 BPF_MOV64_IMM(BPF_REG_2, 4),
3363 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3364 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3365 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3366 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003367 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3368 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003369 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3370 BPF_EXIT_INSN(),
3371 },
3372 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003373 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003374 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3375 },
3376 {
3377 "raw_stack: skb_load_bytes, invalid access 5",
3378 .insns = {
3379 BPF_MOV64_IMM(BPF_REG_2, 4),
3380 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3381 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3382 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3383 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003384 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3385 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003386 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3387 BPF_EXIT_INSN(),
3388 },
3389 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003390 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003391 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3392 },
3393 {
3394 "raw_stack: skb_load_bytes, invalid access 6",
3395 .insns = {
3396 BPF_MOV64_IMM(BPF_REG_2, 4),
3397 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3398 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3399 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3400 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003401 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3402 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003403 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3404 BPF_EXIT_INSN(),
3405 },
3406 .result = REJECT,
3407 .errstr = "invalid stack type R3 off=-512 access_size=0",
3408 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3409 },
3410 {
3411 "raw_stack: skb_load_bytes, large access",
3412 .insns = {
3413 BPF_MOV64_IMM(BPF_REG_2, 4),
3414 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3415 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3416 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3417 BPF_MOV64_IMM(BPF_REG_4, 512),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003418 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3419 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003420 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3421 BPF_EXIT_INSN(),
3422 },
3423 .result = ACCEPT,
3424 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3425 },
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003426 {
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01003427 "context stores via ST",
3428 .insns = {
3429 BPF_MOV64_IMM(BPF_REG_0, 0),
3430 BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
3431 BPF_EXIT_INSN(),
3432 },
Joe Stringer9d2be442018-10-02 13:35:31 -07003433 .errstr = "BPF_ST stores into R1 inv is not allowed",
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01003434 .result = REJECT,
3435 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3436 },
3437 {
3438 "context stores via XADD",
3439 .insns = {
3440 BPF_MOV64_IMM(BPF_REG_0, 0),
3441 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
3442 BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
3443 BPF_EXIT_INSN(),
3444 },
Joe Stringer9d2be442018-10-02 13:35:31 -07003445 .errstr = "BPF_XADD stores into R1 inv is not allowed",
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01003446 .result = REJECT,
3447 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3448 },
3449 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003450 "direct packet access: test1",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003451 .insns = {
3452 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3453 offsetof(struct __sk_buff, data)),
3454 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3455 offsetof(struct __sk_buff, data_end)),
3456 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3457 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3458 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3459 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3460 BPF_MOV64_IMM(BPF_REG_0, 0),
3461 BPF_EXIT_INSN(),
3462 },
3463 .result = ACCEPT,
3464 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3465 },
3466 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003467 "direct packet access: test2",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003468 .insns = {
3469 BPF_MOV64_IMM(BPF_REG_0, 1),
3470 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
3471 offsetof(struct __sk_buff, data_end)),
3472 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3473 offsetof(struct __sk_buff, data)),
3474 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3475 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3476 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3477 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3478 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3479 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3480 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3481 offsetof(struct __sk_buff, data)),
3482 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08003483 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3484 offsetof(struct __sk_buff, len)),
Edward Cree1f9ab382017-08-07 15:29:11 +01003485 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3486 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003487 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3488 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3489 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3490 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3491 offsetof(struct __sk_buff, data_end)),
3492 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3493 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3494 BPF_MOV64_IMM(BPF_REG_0, 0),
3495 BPF_EXIT_INSN(),
3496 },
3497 .result = ACCEPT,
3498 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3499 },
3500 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003501 "direct packet access: test3",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003502 .insns = {
3503 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3504 offsetof(struct __sk_buff, data)),
3505 BPF_MOV64_IMM(BPF_REG_0, 0),
3506 BPF_EXIT_INSN(),
3507 },
3508 .errstr = "invalid bpf_context access off=76",
3509 .result = REJECT,
3510 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3511 },
3512 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003513 "direct packet access: test4 (write)",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003514 .insns = {
3515 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3516 offsetof(struct __sk_buff, data)),
3517 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3518 offsetof(struct __sk_buff, data_end)),
3519 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3520 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3521 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3522 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3523 BPF_MOV64_IMM(BPF_REG_0, 0),
3524 BPF_EXIT_INSN(),
3525 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003526 .result = ACCEPT,
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003527 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3528 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003529 {
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02003530 "direct packet access: test5 (pkt_end >= reg, good access)",
3531 .insns = {
3532 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3533 offsetof(struct __sk_buff, data)),
3534 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3535 offsetof(struct __sk_buff, data_end)),
3536 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3537 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3538 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3539 BPF_MOV64_IMM(BPF_REG_0, 1),
3540 BPF_EXIT_INSN(),
3541 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3542 BPF_MOV64_IMM(BPF_REG_0, 0),
3543 BPF_EXIT_INSN(),
3544 },
3545 .result = ACCEPT,
3546 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3547 },
3548 {
3549 "direct packet access: test6 (pkt_end >= reg, bad access)",
3550 .insns = {
3551 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3552 offsetof(struct __sk_buff, data)),
3553 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3554 offsetof(struct __sk_buff, data_end)),
3555 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3557 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3558 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3559 BPF_MOV64_IMM(BPF_REG_0, 1),
3560 BPF_EXIT_INSN(),
3561 BPF_MOV64_IMM(BPF_REG_0, 0),
3562 BPF_EXIT_INSN(),
3563 },
3564 .errstr = "invalid access to packet",
3565 .result = REJECT,
3566 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3567 },
3568 {
3569 "direct packet access: test7 (pkt_end >= reg, both accesses)",
3570 .insns = {
3571 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3572 offsetof(struct __sk_buff, data)),
3573 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3574 offsetof(struct __sk_buff, data_end)),
3575 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3576 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3577 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3578 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3579 BPF_MOV64_IMM(BPF_REG_0, 1),
3580 BPF_EXIT_INSN(),
3581 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3582 BPF_MOV64_IMM(BPF_REG_0, 0),
3583 BPF_EXIT_INSN(),
3584 },
3585 .errstr = "invalid access to packet",
3586 .result = REJECT,
3587 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3588 },
3589 {
3590 "direct packet access: test8 (double test, variant 1)",
3591 .insns = {
3592 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3593 offsetof(struct __sk_buff, data)),
3594 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3595 offsetof(struct __sk_buff, data_end)),
3596 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3597 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3598 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3599 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3600 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3601 BPF_MOV64_IMM(BPF_REG_0, 1),
3602 BPF_EXIT_INSN(),
3603 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3604 BPF_MOV64_IMM(BPF_REG_0, 0),
3605 BPF_EXIT_INSN(),
3606 },
3607 .result = ACCEPT,
3608 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3609 },
3610 {
3611 "direct packet access: test9 (double test, variant 2)",
3612 .insns = {
3613 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3614 offsetof(struct __sk_buff, data)),
3615 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3616 offsetof(struct __sk_buff, data_end)),
3617 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3618 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3619 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3620 BPF_MOV64_IMM(BPF_REG_0, 1),
3621 BPF_EXIT_INSN(),
3622 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3623 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3624 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3625 BPF_MOV64_IMM(BPF_REG_0, 0),
3626 BPF_EXIT_INSN(),
3627 },
3628 .result = ACCEPT,
3629 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3630 },
3631 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003632 "direct packet access: test10 (write invalid)",
3633 .insns = {
3634 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3635 offsetof(struct __sk_buff, data)),
3636 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3637 offsetof(struct __sk_buff, data_end)),
3638 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3639 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3640 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3641 BPF_MOV64_IMM(BPF_REG_0, 0),
3642 BPF_EXIT_INSN(),
3643 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3644 BPF_MOV64_IMM(BPF_REG_0, 0),
3645 BPF_EXIT_INSN(),
3646 },
3647 .errstr = "invalid access to packet",
3648 .result = REJECT,
3649 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3650 },
3651 {
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003652 "direct packet access: test11 (shift, good access)",
3653 .insns = {
3654 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3655 offsetof(struct __sk_buff, data)),
3656 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3657 offsetof(struct __sk_buff, data_end)),
3658 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3659 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3660 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3661 BPF_MOV64_IMM(BPF_REG_3, 144),
3662 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3663 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3664 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3665 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3666 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3667 BPF_MOV64_IMM(BPF_REG_0, 1),
3668 BPF_EXIT_INSN(),
3669 BPF_MOV64_IMM(BPF_REG_0, 0),
3670 BPF_EXIT_INSN(),
3671 },
3672 .result = ACCEPT,
3673 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003674 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003675 },
3676 {
3677 "direct packet access: test12 (and, good access)",
3678 .insns = {
3679 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3680 offsetof(struct __sk_buff, data)),
3681 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3682 offsetof(struct __sk_buff, data_end)),
3683 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3685 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3686 BPF_MOV64_IMM(BPF_REG_3, 144),
3687 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3688 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3689 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3690 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3691 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3692 BPF_MOV64_IMM(BPF_REG_0, 1),
3693 BPF_EXIT_INSN(),
3694 BPF_MOV64_IMM(BPF_REG_0, 0),
3695 BPF_EXIT_INSN(),
3696 },
3697 .result = ACCEPT,
3698 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003699 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003700 },
3701 {
3702 "direct packet access: test13 (branches, good access)",
3703 .insns = {
3704 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3705 offsetof(struct __sk_buff, data)),
3706 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3707 offsetof(struct __sk_buff, data_end)),
3708 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3709 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3710 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3711 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3712 offsetof(struct __sk_buff, mark)),
3713 BPF_MOV64_IMM(BPF_REG_4, 1),
3714 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3715 BPF_MOV64_IMM(BPF_REG_3, 14),
3716 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3717 BPF_MOV64_IMM(BPF_REG_3, 24),
3718 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3719 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3720 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3721 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3722 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3723 BPF_MOV64_IMM(BPF_REG_0, 1),
3724 BPF_EXIT_INSN(),
3725 BPF_MOV64_IMM(BPF_REG_0, 0),
3726 BPF_EXIT_INSN(),
3727 },
3728 .result = ACCEPT,
3729 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003730 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003731 },
3732 {
William Tu63dfef72017-02-04 08:37:29 -08003733 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3734 .insns = {
3735 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3736 offsetof(struct __sk_buff, data)),
3737 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3738 offsetof(struct __sk_buff, data_end)),
3739 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3740 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3741 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3742 BPF_MOV64_IMM(BPF_REG_5, 12),
3743 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3744 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3745 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3746 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3747 BPF_MOV64_IMM(BPF_REG_0, 1),
3748 BPF_EXIT_INSN(),
3749 BPF_MOV64_IMM(BPF_REG_0, 0),
3750 BPF_EXIT_INSN(),
3751 },
3752 .result = ACCEPT,
3753 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003754 .retval = 1,
William Tu63dfef72017-02-04 08:37:29 -08003755 },
3756 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003757 "direct packet access: test15 (spill with xadd)",
3758 .insns = {
3759 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3760 offsetof(struct __sk_buff, data)),
3761 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3762 offsetof(struct __sk_buff, data_end)),
3763 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3764 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3765 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3766 BPF_MOV64_IMM(BPF_REG_5, 4096),
3767 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3768 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3769 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3770 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3771 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3772 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3773 BPF_MOV64_IMM(BPF_REG_0, 0),
3774 BPF_EXIT_INSN(),
3775 },
3776 .errstr = "R2 invalid mem access 'inv'",
3777 .result = REJECT,
3778 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3779 },
3780 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02003781 "direct packet access: test16 (arith on data_end)",
3782 .insns = {
3783 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3784 offsetof(struct __sk_buff, data)),
3785 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3786 offsetof(struct __sk_buff, data_end)),
3787 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3788 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3789 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3790 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3791 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3792 BPF_MOV64_IMM(BPF_REG_0, 0),
3793 BPF_EXIT_INSN(),
3794 },
Joe Stringeraad2eea2018-10-02 13:35:30 -07003795 .errstr = "R3 pointer arithmetic on pkt_end",
Daniel Borkmann728a8532017-04-27 01:39:32 +02003796 .result = REJECT,
3797 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3798 },
3799 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003800 "direct packet access: test17 (pruning, alignment)",
3801 .insns = {
3802 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3803 offsetof(struct __sk_buff, data)),
3804 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3805 offsetof(struct __sk_buff, data_end)),
3806 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3807 offsetof(struct __sk_buff, mark)),
3808 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3809 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3810 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3811 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3812 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3813 BPF_MOV64_IMM(BPF_REG_0, 0),
3814 BPF_EXIT_INSN(),
3815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3816 BPF_JMP_A(-6),
3817 },
Edward Creef65b1842017-08-07 15:27:12 +01003818 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003819 .result = REJECT,
3820 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3821 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3822 },
3823 {
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003824 "direct packet access: test18 (imm += pkt_ptr, 1)",
3825 .insns = {
3826 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3827 offsetof(struct __sk_buff, data)),
3828 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3829 offsetof(struct __sk_buff, data_end)),
3830 BPF_MOV64_IMM(BPF_REG_0, 8),
3831 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3832 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3833 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3834 BPF_MOV64_IMM(BPF_REG_0, 0),
3835 BPF_EXIT_INSN(),
3836 },
3837 .result = ACCEPT,
3838 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3839 },
3840 {
3841 "direct packet access: test19 (imm += pkt_ptr, 2)",
3842 .insns = {
3843 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3844 offsetof(struct __sk_buff, data)),
3845 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3846 offsetof(struct __sk_buff, data_end)),
3847 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3848 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3849 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3850 BPF_MOV64_IMM(BPF_REG_4, 4),
3851 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3852 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3853 BPF_MOV64_IMM(BPF_REG_0, 0),
3854 BPF_EXIT_INSN(),
3855 },
3856 .result = ACCEPT,
3857 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3858 },
3859 {
3860 "direct packet access: test20 (x += pkt_ptr, 1)",
3861 .insns = {
3862 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3863 offsetof(struct __sk_buff, data)),
3864 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3865 offsetof(struct __sk_buff, data_end)),
3866 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3867 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3868 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003869 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003870 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3871 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3872 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003873 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003874 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3875 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3876 BPF_MOV64_IMM(BPF_REG_0, 0),
3877 BPF_EXIT_INSN(),
3878 },
3879 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3880 .result = ACCEPT,
3881 },
3882 {
3883 "direct packet access: test21 (x += pkt_ptr, 2)",
3884 .insns = {
3885 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3886 offsetof(struct __sk_buff, data)),
3887 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3888 offsetof(struct __sk_buff, data_end)),
3889 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3890 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3891 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3892 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3893 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3894 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003895 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003896 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3897 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003899 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3900 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3901 BPF_MOV64_IMM(BPF_REG_0, 0),
3902 BPF_EXIT_INSN(),
3903 },
3904 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3905 .result = ACCEPT,
3906 },
3907 {
3908 "direct packet access: test22 (x += pkt_ptr, 3)",
3909 .insns = {
3910 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3911 offsetof(struct __sk_buff, data)),
3912 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3913 offsetof(struct __sk_buff, data_end)),
3914 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3915 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3916 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3917 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3918 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3919 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3920 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3921 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3922 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3923 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003924 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003925 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3926 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3927 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3928 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3929 BPF_MOV64_IMM(BPF_REG_2, 1),
3930 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3931 BPF_MOV64_IMM(BPF_REG_0, 0),
3932 BPF_EXIT_INSN(),
3933 },
3934 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3935 .result = ACCEPT,
3936 },
3937 {
3938 "direct packet access: test23 (x += pkt_ptr, 4)",
3939 .insns = {
3940 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3941 offsetof(struct __sk_buff, data)),
3942 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3943 offsetof(struct __sk_buff, data_end)),
3944 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3945 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3946 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3947 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3948 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3949 BPF_MOV64_IMM(BPF_REG_0, 31),
3950 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3951 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3952 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3953 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3954 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3955 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3956 BPF_MOV64_IMM(BPF_REG_0, 0),
3957 BPF_EXIT_INSN(),
3958 },
3959 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3960 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003961 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003962 },
3963 {
3964 "direct packet access: test24 (x += pkt_ptr, 5)",
3965 .insns = {
3966 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3967 offsetof(struct __sk_buff, data)),
3968 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3969 offsetof(struct __sk_buff, data_end)),
3970 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3971 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3972 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3973 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3974 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3975 BPF_MOV64_IMM(BPF_REG_0, 64),
3976 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3977 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3978 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
Edward Cree1f9ab382017-08-07 15:29:11 +01003979 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003980 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3981 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3982 BPF_MOV64_IMM(BPF_REG_0, 0),
3983 BPF_EXIT_INSN(),
3984 },
3985 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3986 .result = ACCEPT,
3987 },
3988 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02003989 "direct packet access: test25 (marking on <, good access)",
3990 .insns = {
3991 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3992 offsetof(struct __sk_buff, data)),
3993 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3994 offsetof(struct __sk_buff, data_end)),
3995 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3996 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3997 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3998 BPF_MOV64_IMM(BPF_REG_0, 0),
3999 BPF_EXIT_INSN(),
4000 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4001 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4002 },
4003 .result = ACCEPT,
4004 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4005 },
4006 {
4007 "direct packet access: test26 (marking on <, bad access)",
4008 .insns = {
4009 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4010 offsetof(struct __sk_buff, data)),
4011 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4012 offsetof(struct __sk_buff, data_end)),
4013 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4014 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4015 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
4016 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4017 BPF_MOV64_IMM(BPF_REG_0, 0),
4018 BPF_EXIT_INSN(),
4019 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
4020 },
4021 .result = REJECT,
4022 .errstr = "invalid access to packet",
4023 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4024 },
4025 {
4026 "direct packet access: test27 (marking on <=, good access)",
4027 .insns = {
4028 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4029 offsetof(struct __sk_buff, data)),
4030 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4031 offsetof(struct __sk_buff, data_end)),
4032 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4033 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4034 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
4035 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4036 BPF_MOV64_IMM(BPF_REG_0, 1),
4037 BPF_EXIT_INSN(),
4038 },
4039 .result = ACCEPT,
4040 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08004041 .retval = 1,
Daniel Borkmann31e482b2017-08-10 01:40:03 +02004042 },
4043 {
4044 "direct packet access: test28 (marking on <=, bad access)",
4045 .insns = {
4046 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4047 offsetof(struct __sk_buff, data)),
4048 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4049 offsetof(struct __sk_buff, data_end)),
4050 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4051 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4052 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
4053 BPF_MOV64_IMM(BPF_REG_0, 1),
4054 BPF_EXIT_INSN(),
4055 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4056 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4057 },
4058 .result = REJECT,
4059 .errstr = "invalid access to packet",
4060 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4061 },
4062 {
Aaron Yue1633ac02016-08-11 18:17:17 -07004063 "helper access to packet: test1, valid packet_ptr range",
4064 .insns = {
4065 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4066 offsetof(struct xdp_md, data)),
4067 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4068 offsetof(struct xdp_md, data_end)),
4069 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4070 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4071 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4072 BPF_LD_MAP_FD(BPF_REG_1, 0),
4073 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4074 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004075 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4076 BPF_FUNC_map_update_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07004077 BPF_MOV64_IMM(BPF_REG_0, 0),
4078 BPF_EXIT_INSN(),
4079 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004080 .fixup_map_hash_8b = { 5 },
Aaron Yue1633ac02016-08-11 18:17:17 -07004081 .result_unpriv = ACCEPT,
4082 .result = ACCEPT,
4083 .prog_type = BPF_PROG_TYPE_XDP,
4084 },
4085 {
4086 "helper access to packet: test2, unchecked packet_ptr",
4087 .insns = {
4088 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4089 offsetof(struct xdp_md, data)),
4090 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004091 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4092 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07004093 BPF_MOV64_IMM(BPF_REG_0, 0),
4094 BPF_EXIT_INSN(),
4095 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004096 .fixup_map_hash_8b = { 1 },
Aaron Yue1633ac02016-08-11 18:17:17 -07004097 .result = REJECT,
4098 .errstr = "invalid access to packet",
4099 .prog_type = BPF_PROG_TYPE_XDP,
4100 },
4101 {
4102 "helper access to packet: test3, variable add",
4103 .insns = {
4104 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4105 offsetof(struct xdp_md, data)),
4106 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4107 offsetof(struct xdp_md, data_end)),
4108 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4109 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4110 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4111 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4112 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4113 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4114 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4116 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4117 BPF_LD_MAP_FD(BPF_REG_1, 0),
4118 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004119 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4120 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07004121 BPF_MOV64_IMM(BPF_REG_0, 0),
4122 BPF_EXIT_INSN(),
4123 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004124 .fixup_map_hash_8b = { 11 },
Aaron Yue1633ac02016-08-11 18:17:17 -07004125 .result = ACCEPT,
4126 .prog_type = BPF_PROG_TYPE_XDP,
4127 },
4128 {
4129 "helper access to packet: test4, packet_ptr with bad range",
4130 .insns = {
4131 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4132 offsetof(struct xdp_md, data)),
4133 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4134 offsetof(struct xdp_md, data_end)),
4135 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4136 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4137 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4138 BPF_MOV64_IMM(BPF_REG_0, 0),
4139 BPF_EXIT_INSN(),
4140 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004141 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4142 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07004143 BPF_MOV64_IMM(BPF_REG_0, 0),
4144 BPF_EXIT_INSN(),
4145 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004146 .fixup_map_hash_8b = { 7 },
Aaron Yue1633ac02016-08-11 18:17:17 -07004147 .result = REJECT,
4148 .errstr = "invalid access to packet",
4149 .prog_type = BPF_PROG_TYPE_XDP,
4150 },
4151 {
4152 "helper access to packet: test5, packet_ptr with too short range",
4153 .insns = {
4154 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4155 offsetof(struct xdp_md, data)),
4156 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4157 offsetof(struct xdp_md, data_end)),
4158 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4159 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4160 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4161 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4162 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004163 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4164 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07004165 BPF_MOV64_IMM(BPF_REG_0, 0),
4166 BPF_EXIT_INSN(),
4167 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004168 .fixup_map_hash_8b = { 6 },
Aaron Yue1633ac02016-08-11 18:17:17 -07004169 .result = REJECT,
4170 .errstr = "invalid access to packet",
4171 .prog_type = BPF_PROG_TYPE_XDP,
4172 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004173 {
4174 "helper access to packet: test6, cls valid packet_ptr range",
4175 .insns = {
4176 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4177 offsetof(struct __sk_buff, data)),
4178 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4179 offsetof(struct __sk_buff, data_end)),
4180 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4181 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4182 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4183 BPF_LD_MAP_FD(BPF_REG_1, 0),
4184 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4185 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004186 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4187 BPF_FUNC_map_update_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004188 BPF_MOV64_IMM(BPF_REG_0, 0),
4189 BPF_EXIT_INSN(),
4190 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004191 .fixup_map_hash_8b = { 5 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004192 .result = ACCEPT,
4193 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4194 },
4195 {
4196 "helper access to packet: test7, cls unchecked packet_ptr",
4197 .insns = {
4198 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4199 offsetof(struct __sk_buff, data)),
4200 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004201 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4202 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004203 BPF_MOV64_IMM(BPF_REG_0, 0),
4204 BPF_EXIT_INSN(),
4205 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004206 .fixup_map_hash_8b = { 1 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004207 .result = REJECT,
4208 .errstr = "invalid access to packet",
4209 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4210 },
4211 {
4212 "helper access to packet: test8, cls variable add",
4213 .insns = {
4214 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4215 offsetof(struct __sk_buff, data)),
4216 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4217 offsetof(struct __sk_buff, data_end)),
4218 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4219 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4220 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4221 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4222 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4223 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4224 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4225 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4226 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4227 BPF_LD_MAP_FD(BPF_REG_1, 0),
4228 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004229 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4230 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004231 BPF_MOV64_IMM(BPF_REG_0, 0),
4232 BPF_EXIT_INSN(),
4233 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004234 .fixup_map_hash_8b = { 11 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004235 .result = ACCEPT,
4236 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4237 },
4238 {
4239 "helper access to packet: test9, cls packet_ptr with bad range",
4240 .insns = {
4241 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4242 offsetof(struct __sk_buff, data)),
4243 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4244 offsetof(struct __sk_buff, data_end)),
4245 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4247 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4248 BPF_MOV64_IMM(BPF_REG_0, 0),
4249 BPF_EXIT_INSN(),
4250 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004251 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4252 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004253 BPF_MOV64_IMM(BPF_REG_0, 0),
4254 BPF_EXIT_INSN(),
4255 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004256 .fixup_map_hash_8b = { 7 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004257 .result = REJECT,
4258 .errstr = "invalid access to packet",
4259 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4260 },
4261 {
4262 "helper access to packet: test10, cls packet_ptr with too short range",
4263 .insns = {
4264 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4265 offsetof(struct __sk_buff, data)),
4266 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4267 offsetof(struct __sk_buff, data_end)),
4268 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4269 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4270 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4271 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4272 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004273 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4274 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004275 BPF_MOV64_IMM(BPF_REG_0, 0),
4276 BPF_EXIT_INSN(),
4277 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004278 .fixup_map_hash_8b = { 6 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004279 .result = REJECT,
4280 .errstr = "invalid access to packet",
4281 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4282 },
4283 {
4284 "helper access to packet: test11, cls unsuitable helper 1",
4285 .insns = {
4286 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4287 offsetof(struct __sk_buff, data)),
4288 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4289 offsetof(struct __sk_buff, data_end)),
4290 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4291 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4292 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
4293 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
4294 BPF_MOV64_IMM(BPF_REG_2, 0),
4295 BPF_MOV64_IMM(BPF_REG_4, 42),
4296 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004297 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4298 BPF_FUNC_skb_store_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004299 BPF_MOV64_IMM(BPF_REG_0, 0),
4300 BPF_EXIT_INSN(),
4301 },
4302 .result = REJECT,
4303 .errstr = "helper access to the packet",
4304 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4305 },
4306 {
4307 "helper access to packet: test12, cls unsuitable helper 2",
4308 .insns = {
4309 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4310 offsetof(struct __sk_buff, data)),
4311 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4312 offsetof(struct __sk_buff, data_end)),
4313 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4314 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
4315 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
4316 BPF_MOV64_IMM(BPF_REG_2, 0),
4317 BPF_MOV64_IMM(BPF_REG_4, 4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004318 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4319 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004320 BPF_MOV64_IMM(BPF_REG_0, 0),
4321 BPF_EXIT_INSN(),
4322 },
4323 .result = REJECT,
4324 .errstr = "helper access to the packet",
4325 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4326 },
4327 {
4328 "helper access to packet: test13, cls helper ok",
4329 .insns = {
4330 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4331 offsetof(struct __sk_buff, data)),
4332 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4333 offsetof(struct __sk_buff, data_end)),
4334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4335 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4336 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4337 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4338 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4339 BPF_MOV64_IMM(BPF_REG_2, 4),
4340 BPF_MOV64_IMM(BPF_REG_3, 0),
4341 BPF_MOV64_IMM(BPF_REG_4, 0),
4342 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004343 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4344 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004345 BPF_MOV64_IMM(BPF_REG_0, 0),
4346 BPF_EXIT_INSN(),
4347 },
4348 .result = ACCEPT,
4349 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4350 },
4351 {
Edward Creef65b1842017-08-07 15:27:12 +01004352 "helper access to packet: test14, cls helper ok sub",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004353 .insns = {
4354 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4355 offsetof(struct __sk_buff, data)),
4356 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4357 offsetof(struct __sk_buff, data_end)),
4358 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4359 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4361 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4362 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
4363 BPF_MOV64_IMM(BPF_REG_2, 4),
4364 BPF_MOV64_IMM(BPF_REG_3, 0),
4365 BPF_MOV64_IMM(BPF_REG_4, 0),
4366 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004367 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4368 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004369 BPF_MOV64_IMM(BPF_REG_0, 0),
4370 BPF_EXIT_INSN(),
4371 },
Edward Creef65b1842017-08-07 15:27:12 +01004372 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004373 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4374 },
4375 {
Edward Creef65b1842017-08-07 15:27:12 +01004376 "helper access to packet: test15, cls helper fail sub",
4377 .insns = {
4378 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4379 offsetof(struct __sk_buff, data)),
4380 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4381 offsetof(struct __sk_buff, data_end)),
4382 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4383 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4384 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4385 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4386 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
4387 BPF_MOV64_IMM(BPF_REG_2, 4),
4388 BPF_MOV64_IMM(BPF_REG_3, 0),
4389 BPF_MOV64_IMM(BPF_REG_4, 0),
4390 BPF_MOV64_IMM(BPF_REG_5, 0),
4391 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4392 BPF_FUNC_csum_diff),
4393 BPF_MOV64_IMM(BPF_REG_0, 0),
4394 BPF_EXIT_INSN(),
4395 },
4396 .result = REJECT,
4397 .errstr = "invalid access to packet",
4398 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4399 },
4400 {
4401 "helper access to packet: test16, cls helper fail range 1",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004402 .insns = {
4403 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4404 offsetof(struct __sk_buff, data)),
4405 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4406 offsetof(struct __sk_buff, data_end)),
4407 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4408 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4409 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4410 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4411 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4412 BPF_MOV64_IMM(BPF_REG_2, 8),
4413 BPF_MOV64_IMM(BPF_REG_3, 0),
4414 BPF_MOV64_IMM(BPF_REG_4, 0),
4415 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004416 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4417 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004418 BPF_MOV64_IMM(BPF_REG_0, 0),
4419 BPF_EXIT_INSN(),
4420 },
4421 .result = REJECT,
4422 .errstr = "invalid access to packet",
4423 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4424 },
4425 {
Edward Creef65b1842017-08-07 15:27:12 +01004426 "helper access to packet: test17, cls helper fail range 2",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004427 .insns = {
4428 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4429 offsetof(struct __sk_buff, data)),
4430 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4431 offsetof(struct __sk_buff, data_end)),
4432 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4433 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4434 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4435 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4436 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4437 BPF_MOV64_IMM(BPF_REG_2, -9),
4438 BPF_MOV64_IMM(BPF_REG_3, 0),
4439 BPF_MOV64_IMM(BPF_REG_4, 0),
4440 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004441 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4442 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004443 BPF_MOV64_IMM(BPF_REG_0, 0),
4444 BPF_EXIT_INSN(),
4445 },
4446 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01004447 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004448 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4449 },
4450 {
Edward Creef65b1842017-08-07 15:27:12 +01004451 "helper access to packet: test18, cls helper fail range 3",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004452 .insns = {
4453 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4454 offsetof(struct __sk_buff, data)),
4455 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4456 offsetof(struct __sk_buff, data_end)),
4457 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4458 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4459 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4460 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4461 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4462 BPF_MOV64_IMM(BPF_REG_2, ~0),
4463 BPF_MOV64_IMM(BPF_REG_3, 0),
4464 BPF_MOV64_IMM(BPF_REG_4, 0),
4465 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004466 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4467 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004468 BPF_MOV64_IMM(BPF_REG_0, 0),
4469 BPF_EXIT_INSN(),
4470 },
4471 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01004472 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004473 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4474 },
4475 {
Yonghong Songb6ff6392017-11-12 14:49:11 -08004476 "helper access to packet: test19, cls helper range zero",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004477 .insns = {
4478 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4479 offsetof(struct __sk_buff, data)),
4480 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4481 offsetof(struct __sk_buff, data_end)),
4482 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4483 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4484 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4485 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4486 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4487 BPF_MOV64_IMM(BPF_REG_2, 0),
4488 BPF_MOV64_IMM(BPF_REG_3, 0),
4489 BPF_MOV64_IMM(BPF_REG_4, 0),
4490 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004491 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4492 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004493 BPF_MOV64_IMM(BPF_REG_0, 0),
4494 BPF_EXIT_INSN(),
4495 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08004496 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004497 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4498 },
4499 {
Edward Creef65b1842017-08-07 15:27:12 +01004500 "helper access to packet: test20, pkt end as input",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004501 .insns = {
4502 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4503 offsetof(struct __sk_buff, data)),
4504 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4505 offsetof(struct __sk_buff, data_end)),
4506 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4507 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4508 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4509 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4510 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4511 BPF_MOV64_IMM(BPF_REG_2, 4),
4512 BPF_MOV64_IMM(BPF_REG_3, 0),
4513 BPF_MOV64_IMM(BPF_REG_4, 0),
4514 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004515 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4516 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004517 BPF_MOV64_IMM(BPF_REG_0, 0),
4518 BPF_EXIT_INSN(),
4519 },
4520 .result = REJECT,
4521 .errstr = "R1 type=pkt_end expected=fp",
4522 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4523 },
4524 {
Edward Creef65b1842017-08-07 15:27:12 +01004525 "helper access to packet: test21, wrong reg",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004526 .insns = {
4527 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4528 offsetof(struct __sk_buff, data)),
4529 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4530 offsetof(struct __sk_buff, data_end)),
4531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4532 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4533 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4534 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4535 BPF_MOV64_IMM(BPF_REG_2, 4),
4536 BPF_MOV64_IMM(BPF_REG_3, 0),
4537 BPF_MOV64_IMM(BPF_REG_4, 0),
4538 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004539 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4540 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004541 BPF_MOV64_IMM(BPF_REG_0, 0),
4542 BPF_EXIT_INSN(),
4543 },
4544 .result = REJECT,
4545 .errstr = "invalid access to packet",
4546 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4547 },
Josef Bacik48461132016-09-28 10:54:32 -04004548 {
Prashant Bhole7c85c442018-10-09 10:04:54 +09004549 "prevent map lookup in sockmap",
4550 .insns = {
4551 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4552 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4553 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4554 BPF_LD_MAP_FD(BPF_REG_1, 0),
4555 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4556 BPF_FUNC_map_lookup_elem),
4557 BPF_EXIT_INSN(),
4558 },
4559 .fixup_map_sockmap = { 3 },
4560 .result = REJECT,
4561 .errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
4562 .prog_type = BPF_PROG_TYPE_SOCK_OPS,
4563 },
4564 {
4565 "prevent map lookup in sockhash",
4566 .insns = {
4567 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4568 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4570 BPF_LD_MAP_FD(BPF_REG_1, 0),
4571 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4572 BPF_FUNC_map_lookup_elem),
4573 BPF_EXIT_INSN(),
4574 },
4575 .fixup_map_sockhash = { 3 },
4576 .result = REJECT,
4577 .errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
4578 .prog_type = BPF_PROG_TYPE_SOCK_OPS,
4579 },
4580 {
4581 "prevent map lookup in xskmap",
4582 .insns = {
4583 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4584 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4585 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4586 BPF_LD_MAP_FD(BPF_REG_1, 0),
4587 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4588 BPF_FUNC_map_lookup_elem),
4589 BPF_EXIT_INSN(),
4590 },
4591 .fixup_map_xskmap = { 3 },
4592 .result = REJECT,
4593 .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
4594 .prog_type = BPF_PROG_TYPE_XDP,
4595 },
4596 {
4597 "prevent map lookup in stack trace",
4598 .insns = {
4599 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4600 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4601 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4602 BPF_LD_MAP_FD(BPF_REG_1, 0),
4603 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4604 BPF_FUNC_map_lookup_elem),
4605 BPF_EXIT_INSN(),
4606 },
4607 .fixup_map_stacktrace = { 3 },
4608 .result = REJECT,
4609 .errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
4610 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
4611 },
4612 {
4613 "prevent map lookup in prog array",
4614 .insns = {
4615 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4616 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4617 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4618 BPF_LD_MAP_FD(BPF_REG_1, 0),
4619 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4620 BPF_FUNC_map_lookup_elem),
4621 BPF_EXIT_INSN(),
4622 },
4623 .fixup_prog2 = { 3 },
4624 .result = REJECT,
4625 .errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
4626 },
4627 {
Josef Bacik48461132016-09-28 10:54:32 -04004628 "valid map access into an array with a constant",
4629 .insns = {
4630 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4631 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4632 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4633 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004634 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4635 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004636 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004637 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4638 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004639 BPF_EXIT_INSN(),
4640 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004641 .fixup_map_hash_48b = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004642 .errstr_unpriv = "R0 leaks addr",
4643 .result_unpriv = REJECT,
4644 .result = ACCEPT,
4645 },
4646 {
4647 "valid map access into an array with a register",
4648 .insns = {
4649 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4650 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4651 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4652 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004653 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4654 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004655 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4656 BPF_MOV64_IMM(BPF_REG_1, 4),
4657 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4658 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004659 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4660 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004661 BPF_EXIT_INSN(),
4662 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004663 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004664 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004665 .result_unpriv = REJECT,
4666 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004667 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004668 },
4669 {
4670 "valid map access into an array with a variable",
4671 .insns = {
4672 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4673 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4674 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4675 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004676 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4677 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004678 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4679 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4680 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4681 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4682 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004683 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4684 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004685 BPF_EXIT_INSN(),
4686 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004687 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004688 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004689 .result_unpriv = REJECT,
4690 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004691 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004692 },
4693 {
4694 "valid map access into an array with a signed variable",
4695 .insns = {
4696 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4697 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4698 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4699 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004700 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4701 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004702 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4703 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4704 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4705 BPF_MOV32_IMM(BPF_REG_1, 0),
4706 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4707 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4708 BPF_MOV32_IMM(BPF_REG_1, 0),
4709 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4710 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004711 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4712 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004713 BPF_EXIT_INSN(),
4714 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004715 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004716 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004717 .result_unpriv = REJECT,
4718 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004719 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004720 },
4721 {
4722 "invalid map access into an array with a constant",
4723 .insns = {
4724 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4725 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4726 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4727 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004728 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4729 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004730 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4731 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4732 offsetof(struct test_val, foo)),
4733 BPF_EXIT_INSN(),
4734 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004735 .fixup_map_hash_48b = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004736 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
4737 .result = REJECT,
4738 },
4739 {
4740 "invalid map access into an array with a register",
4741 .insns = {
4742 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4743 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4744 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4745 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004746 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4747 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004748 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4749 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4750 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4751 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004752 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4753 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004754 BPF_EXIT_INSN(),
4755 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004756 .fixup_map_hash_48b = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004757 .errstr = "R0 min value is outside of the array range",
4758 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004759 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004760 },
4761 {
4762 "invalid map access into an array with a variable",
4763 .insns = {
4764 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4765 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4766 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4767 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004768 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4769 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004770 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4771 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4772 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4773 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004774 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4775 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004776 BPF_EXIT_INSN(),
4777 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004778 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004779 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
Josef Bacik48461132016-09-28 10:54:32 -04004780 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004781 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004782 },
4783 {
4784 "invalid map access into an array with no floor check",
4785 .insns = {
4786 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4787 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4788 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4789 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004790 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4791 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004792 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
Edward Creef65b1842017-08-07 15:27:12 +01004793 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik48461132016-09-28 10:54:32 -04004794 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4795 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4796 BPF_MOV32_IMM(BPF_REG_1, 0),
4797 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4798 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004799 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4800 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004801 BPF_EXIT_INSN(),
4802 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004803 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004804 .errstr_unpriv = "R0 leaks addr",
4805 .errstr = "R0 unbounded memory access",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004806 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04004807 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004808 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004809 },
4810 {
4811 "invalid map access into an array with a invalid max check",
4812 .insns = {
4813 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4814 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4816 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004817 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4818 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004819 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4820 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4821 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4822 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4823 BPF_MOV32_IMM(BPF_REG_1, 0),
4824 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4825 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004826 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4827 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004828 BPF_EXIT_INSN(),
4829 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004830 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004831 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004832 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004833 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04004834 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004835 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004836 },
4837 {
4838 "invalid map access into an array with a invalid max check",
4839 .insns = {
4840 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4841 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4842 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4843 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004844 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4845 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004846 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4847 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4848 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4849 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4850 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4851 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004852 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4853 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004854 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4855 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004856 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4857 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004858 BPF_EXIT_INSN(),
4859 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004860 .fixup_map_hash_48b = { 3, 11 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004861 .errstr = "R0 pointer += pointer",
Josef Bacik48461132016-09-28 10:54:32 -04004862 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004863 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004864 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02004865 {
Song Liu2cb494a2018-10-19 09:57:58 -07004866 "direct packet read test#1 for CGROUP_SKB",
4867 .insns = {
4868 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4869 offsetof(struct __sk_buff, data)),
4870 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4871 offsetof(struct __sk_buff, data_end)),
4872 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4873 offsetof(struct __sk_buff, len)),
4874 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4875 offsetof(struct __sk_buff, pkt_type)),
4876 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4877 offsetof(struct __sk_buff, mark)),
4878 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
4879 offsetof(struct __sk_buff, mark)),
4880 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4881 offsetof(struct __sk_buff, queue_mapping)),
4882 BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
4883 offsetof(struct __sk_buff, protocol)),
4884 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
4885 offsetof(struct __sk_buff, vlan_present)),
4886 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4888 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4889 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4890 BPF_MOV64_IMM(BPF_REG_0, 0),
4891 BPF_EXIT_INSN(),
4892 },
4893 .result = ACCEPT,
4894 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4895 },
4896 {
4897 "direct packet read test#2 for CGROUP_SKB",
4898 .insns = {
4899 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4900 offsetof(struct __sk_buff, vlan_tci)),
4901 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4902 offsetof(struct __sk_buff, vlan_proto)),
4903 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4904 offsetof(struct __sk_buff, priority)),
4905 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
4906 offsetof(struct __sk_buff, priority)),
4907 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4908 offsetof(struct __sk_buff,
4909 ingress_ifindex)),
4910 BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
4911 offsetof(struct __sk_buff, tc_index)),
4912 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
4913 offsetof(struct __sk_buff, hash)),
4914 BPF_MOV64_IMM(BPF_REG_0, 0),
4915 BPF_EXIT_INSN(),
4916 },
4917 .result = ACCEPT,
4918 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4919 },
4920 {
4921 "direct packet read test#3 for CGROUP_SKB",
4922 .insns = {
4923 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4924 offsetof(struct __sk_buff, cb[0])),
4925 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4926 offsetof(struct __sk_buff, cb[1])),
4927 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4928 offsetof(struct __sk_buff, cb[2])),
4929 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4930 offsetof(struct __sk_buff, cb[3])),
4931 BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
4932 offsetof(struct __sk_buff, cb[4])),
4933 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
4934 offsetof(struct __sk_buff, napi_id)),
4935 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
4936 offsetof(struct __sk_buff, cb[0])),
4937 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
4938 offsetof(struct __sk_buff, cb[1])),
4939 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
4940 offsetof(struct __sk_buff, cb[2])),
4941 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
4942 offsetof(struct __sk_buff, cb[3])),
4943 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
4944 offsetof(struct __sk_buff, cb[4])),
4945 BPF_MOV64_IMM(BPF_REG_0, 0),
4946 BPF_EXIT_INSN(),
4947 },
4948 .result = ACCEPT,
4949 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4950 },
4951 {
4952 "direct packet read test#4 for CGROUP_SKB",
4953 .insns = {
4954 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4955 offsetof(struct __sk_buff, family)),
4956 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4957 offsetof(struct __sk_buff, remote_ip4)),
4958 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4959 offsetof(struct __sk_buff, local_ip4)),
4960 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4961 offsetof(struct __sk_buff, remote_ip6[0])),
4962 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4963 offsetof(struct __sk_buff, remote_ip6[1])),
4964 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4965 offsetof(struct __sk_buff, remote_ip6[2])),
4966 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4967 offsetof(struct __sk_buff, remote_ip6[3])),
4968 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4969 offsetof(struct __sk_buff, local_ip6[0])),
4970 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4971 offsetof(struct __sk_buff, local_ip6[1])),
4972 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4973 offsetof(struct __sk_buff, local_ip6[2])),
4974 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4975 offsetof(struct __sk_buff, local_ip6[3])),
4976 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4977 offsetof(struct __sk_buff, remote_port)),
4978 BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
4979 offsetof(struct __sk_buff, local_port)),
4980 BPF_MOV64_IMM(BPF_REG_0, 0),
4981 BPF_EXIT_INSN(),
4982 },
4983 .result = ACCEPT,
4984 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4985 },
4986 {
4987 "invalid access of tc_classid for CGROUP_SKB",
4988 .insns = {
4989 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4990 offsetof(struct __sk_buff, tc_classid)),
4991 BPF_MOV64_IMM(BPF_REG_0, 0),
4992 BPF_EXIT_INSN(),
4993 },
4994 .result = REJECT,
4995 .errstr = "invalid bpf_context access",
4996 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4997 },
4998 {
4999 "invalid access of data_meta for CGROUP_SKB",
5000 .insns = {
5001 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5002 offsetof(struct __sk_buff, data_meta)),
5003 BPF_MOV64_IMM(BPF_REG_0, 0),
5004 BPF_EXIT_INSN(),
5005 },
5006 .result = REJECT,
5007 .errstr = "invalid bpf_context access",
5008 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5009 },
5010 {
5011 "invalid access of flow_keys for CGROUP_SKB",
5012 .insns = {
5013 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5014 offsetof(struct __sk_buff, flow_keys)),
5015 BPF_MOV64_IMM(BPF_REG_0, 0),
5016 BPF_EXIT_INSN(),
5017 },
5018 .result = REJECT,
5019 .errstr = "invalid bpf_context access",
5020 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5021 },
5022 {
5023 "invalid write access to napi_id for CGROUP_SKB",
5024 .insns = {
5025 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5026 offsetof(struct __sk_buff, napi_id)),
5027 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
5028 offsetof(struct __sk_buff, napi_id)),
5029 BPF_MOV64_IMM(BPF_REG_0, 0),
5030 BPF_EXIT_INSN(),
5031 },
5032 .result = REJECT,
5033 .errstr = "invalid bpf_context access",
5034 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5035 },
5036 {
Roman Gushchind4c9f572018-08-02 14:27:28 -07005037 "valid cgroup storage access",
5038 .insns = {
5039 BPF_MOV64_IMM(BPF_REG_2, 0),
5040 BPF_LD_MAP_FD(BPF_REG_1, 0),
5041 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5042 BPF_FUNC_get_local_storage),
5043 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5044 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5045 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5046 BPF_EXIT_INSN(),
5047 },
5048 .fixup_cgroup_storage = { 1 },
5049 .result = ACCEPT,
5050 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5051 },
5052 {
5053 "invalid cgroup storage access 1",
5054 .insns = {
5055 BPF_MOV64_IMM(BPF_REG_2, 0),
5056 BPF_LD_MAP_FD(BPF_REG_1, 0),
5057 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5058 BPF_FUNC_get_local_storage),
5059 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5060 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5061 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5062 BPF_EXIT_INSN(),
5063 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005064 .fixup_map_hash_8b = { 1 },
Roman Gushchind4c9f572018-08-02 14:27:28 -07005065 .result = REJECT,
5066 .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5067 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5068 },
5069 {
5070 "invalid cgroup storage access 2",
5071 .insns = {
5072 BPF_MOV64_IMM(BPF_REG_2, 0),
5073 BPF_LD_MAP_FD(BPF_REG_1, 1),
5074 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5075 BPF_FUNC_get_local_storage),
5076 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5077 BPF_EXIT_INSN(),
5078 },
5079 .result = REJECT,
5080 .errstr = "fd 1 is not pointing to valid bpf_map",
5081 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5082 },
5083 {
Roman Gushchina3c60542018-09-28 14:45:53 +00005084 "invalid cgroup storage access 3",
Roman Gushchind4c9f572018-08-02 14:27:28 -07005085 .insns = {
5086 BPF_MOV64_IMM(BPF_REG_2, 0),
5087 BPF_LD_MAP_FD(BPF_REG_1, 0),
5088 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5089 BPF_FUNC_get_local_storage),
5090 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5091 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5092 BPF_MOV64_IMM(BPF_REG_0, 0),
5093 BPF_EXIT_INSN(),
5094 },
5095 .fixup_cgroup_storage = { 1 },
5096 .result = REJECT,
5097 .errstr = "invalid access to map value, value_size=64 off=256 size=4",
5098 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5099 },
5100 {
5101 "invalid cgroup storage access 4",
5102 .insns = {
5103 BPF_MOV64_IMM(BPF_REG_2, 0),
5104 BPF_LD_MAP_FD(BPF_REG_1, 0),
5105 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5106 BPF_FUNC_get_local_storage),
5107 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5108 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5109 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5110 BPF_EXIT_INSN(),
5111 },
5112 .fixup_cgroup_storage = { 1 },
5113 .result = REJECT,
5114 .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5115 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5116 },
5117 {
5118 "invalid cgroup storage access 5",
5119 .insns = {
5120 BPF_MOV64_IMM(BPF_REG_2, 7),
5121 BPF_LD_MAP_FD(BPF_REG_1, 0),
5122 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5123 BPF_FUNC_get_local_storage),
5124 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5125 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5126 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5127 BPF_EXIT_INSN(),
5128 },
5129 .fixup_cgroup_storage = { 1 },
5130 .result = REJECT,
5131 .errstr = "get_local_storage() doesn't support non-zero flags",
5132 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5133 },
5134 {
5135 "invalid cgroup storage access 6",
5136 .insns = {
5137 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5138 BPF_LD_MAP_FD(BPF_REG_1, 0),
5139 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5140 BPF_FUNC_get_local_storage),
5141 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5142 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5143 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5144 BPF_EXIT_INSN(),
5145 },
5146 .fixup_cgroup_storage = { 1 },
5147 .result = REJECT,
5148 .errstr = "get_local_storage() doesn't support non-zero flags",
5149 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5150 },
5151 {
Roman Gushchina3c60542018-09-28 14:45:53 +00005152 "valid per-cpu cgroup storage access",
5153 .insns = {
5154 BPF_MOV64_IMM(BPF_REG_2, 0),
5155 BPF_LD_MAP_FD(BPF_REG_1, 0),
5156 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5157 BPF_FUNC_get_local_storage),
5158 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5159 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5160 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5161 BPF_EXIT_INSN(),
5162 },
5163 .fixup_percpu_cgroup_storage = { 1 },
5164 .result = ACCEPT,
5165 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5166 },
5167 {
5168 "invalid per-cpu cgroup storage access 1",
5169 .insns = {
5170 BPF_MOV64_IMM(BPF_REG_2, 0),
5171 BPF_LD_MAP_FD(BPF_REG_1, 0),
5172 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5173 BPF_FUNC_get_local_storage),
5174 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5175 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5176 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5177 BPF_EXIT_INSN(),
5178 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005179 .fixup_map_hash_8b = { 1 },
Roman Gushchina3c60542018-09-28 14:45:53 +00005180 .result = REJECT,
5181 .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5182 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5183 },
5184 {
5185 "invalid per-cpu cgroup storage access 2",
5186 .insns = {
5187 BPF_MOV64_IMM(BPF_REG_2, 0),
5188 BPF_LD_MAP_FD(BPF_REG_1, 1),
5189 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5190 BPF_FUNC_get_local_storage),
5191 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5192 BPF_EXIT_INSN(),
5193 },
5194 .result = REJECT,
5195 .errstr = "fd 1 is not pointing to valid bpf_map",
5196 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5197 },
5198 {
5199 "invalid per-cpu cgroup storage access 3",
5200 .insns = {
5201 BPF_MOV64_IMM(BPF_REG_2, 0),
5202 BPF_LD_MAP_FD(BPF_REG_1, 0),
5203 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5204 BPF_FUNC_get_local_storage),
5205 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5206 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5207 BPF_MOV64_IMM(BPF_REG_0, 0),
5208 BPF_EXIT_INSN(),
5209 },
5210 .fixup_percpu_cgroup_storage = { 1 },
5211 .result = REJECT,
5212 .errstr = "invalid access to map value, value_size=64 off=256 size=4",
5213 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5214 },
5215 {
5216 "invalid per-cpu cgroup storage access 4",
5217 .insns = {
5218 BPF_MOV64_IMM(BPF_REG_2, 0),
5219 BPF_LD_MAP_FD(BPF_REG_1, 0),
5220 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5221 BPF_FUNC_get_local_storage),
5222 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5223 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5224 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5225 BPF_EXIT_INSN(),
5226 },
5227 .fixup_cgroup_storage = { 1 },
5228 .result = REJECT,
5229 .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5230 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5231 },
5232 {
5233 "invalid per-cpu cgroup storage access 5",
5234 .insns = {
5235 BPF_MOV64_IMM(BPF_REG_2, 7),
5236 BPF_LD_MAP_FD(BPF_REG_1, 0),
5237 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5238 BPF_FUNC_get_local_storage),
5239 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5240 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5241 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5242 BPF_EXIT_INSN(),
5243 },
5244 .fixup_percpu_cgroup_storage = { 1 },
5245 .result = REJECT,
5246 .errstr = "get_local_storage() doesn't support non-zero flags",
5247 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5248 },
5249 {
5250 "invalid per-cpu cgroup storage access 6",
5251 .insns = {
5252 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5253 BPF_LD_MAP_FD(BPF_REG_1, 0),
5254 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5255 BPF_FUNC_get_local_storage),
5256 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5257 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5258 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5259 BPF_EXIT_INSN(),
5260 },
5261 .fixup_percpu_cgroup_storage = { 1 },
5262 .result = REJECT,
5263 .errstr = "get_local_storage() doesn't support non-zero flags",
5264 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5265 },
5266 {
Thomas Graf57a09bf2016-10-18 19:51:19 +02005267 "multiple registers share map_lookup_elem result",
5268 .insns = {
5269 BPF_MOV64_IMM(BPF_REG_1, 10),
5270 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5273 BPF_LD_MAP_FD(BPF_REG_1, 0),
5274 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5275 BPF_FUNC_map_lookup_elem),
5276 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5277 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5278 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5279 BPF_EXIT_INSN(),
5280 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005281 .fixup_map_hash_8b = { 4 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02005282 .result = ACCEPT,
5283 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5284 },
5285 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02005286 "alu ops on ptr_to_map_value_or_null, 1",
5287 .insns = {
5288 BPF_MOV64_IMM(BPF_REG_1, 10),
5289 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5290 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5291 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5292 BPF_LD_MAP_FD(BPF_REG_1, 0),
5293 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5294 BPF_FUNC_map_lookup_elem),
5295 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5296 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
5297 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
5298 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5299 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5300 BPF_EXIT_INSN(),
5301 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005302 .fixup_map_hash_8b = { 4 },
Joe Stringeraad2eea2018-10-02 13:35:30 -07005303 .errstr = "R4 pointer arithmetic on map_value_or_null",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02005304 .result = REJECT,
5305 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5306 },
5307 {
5308 "alu ops on ptr_to_map_value_or_null, 2",
5309 .insns = {
5310 BPF_MOV64_IMM(BPF_REG_1, 10),
5311 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5312 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5313 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5314 BPF_LD_MAP_FD(BPF_REG_1, 0),
5315 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5316 BPF_FUNC_map_lookup_elem),
5317 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5318 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
5319 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5320 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5321 BPF_EXIT_INSN(),
5322 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005323 .fixup_map_hash_8b = { 4 },
Joe Stringeraad2eea2018-10-02 13:35:30 -07005324 .errstr = "R4 pointer arithmetic on map_value_or_null",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02005325 .result = REJECT,
5326 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5327 },
5328 {
5329 "alu ops on ptr_to_map_value_or_null, 3",
5330 .insns = {
5331 BPF_MOV64_IMM(BPF_REG_1, 10),
5332 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5333 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5335 BPF_LD_MAP_FD(BPF_REG_1, 0),
5336 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5337 BPF_FUNC_map_lookup_elem),
5338 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5339 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
5340 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5341 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5342 BPF_EXIT_INSN(),
5343 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005344 .fixup_map_hash_8b = { 4 },
Joe Stringeraad2eea2018-10-02 13:35:30 -07005345 .errstr = "R4 pointer arithmetic on map_value_or_null",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02005346 .result = REJECT,
5347 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5348 },
5349 {
Thomas Graf57a09bf2016-10-18 19:51:19 +02005350 "invalid memory access with multiple map_lookup_elem calls",
5351 .insns = {
5352 BPF_MOV64_IMM(BPF_REG_1, 10),
5353 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5354 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5356 BPF_LD_MAP_FD(BPF_REG_1, 0),
5357 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5358 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5359 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5360 BPF_FUNC_map_lookup_elem),
5361 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5362 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5363 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5364 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5365 BPF_FUNC_map_lookup_elem),
5366 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5367 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5368 BPF_EXIT_INSN(),
5369 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005370 .fixup_map_hash_8b = { 4 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02005371 .result = REJECT,
5372 .errstr = "R4 !read_ok",
5373 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5374 },
5375 {
5376 "valid indirect map_lookup_elem access with 2nd lookup in branch",
5377 .insns = {
5378 BPF_MOV64_IMM(BPF_REG_1, 10),
5379 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5380 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5381 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5382 BPF_LD_MAP_FD(BPF_REG_1, 0),
5383 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5384 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5385 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5386 BPF_FUNC_map_lookup_elem),
5387 BPF_MOV64_IMM(BPF_REG_2, 10),
5388 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
5389 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5390 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5391 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5392 BPF_FUNC_map_lookup_elem),
5393 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5394 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5395 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5396 BPF_EXIT_INSN(),
5397 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005398 .fixup_map_hash_8b = { 4 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02005399 .result = ACCEPT,
5400 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5401 },
Josef Bacike9548902016-11-29 12:35:19 -05005402 {
5403 "invalid map access from else condition",
5404 .insns = {
5405 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5406 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5407 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5408 BPF_LD_MAP_FD(BPF_REG_1, 0),
5409 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
5410 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5411 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5412 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
5413 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5414 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5415 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5416 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
5417 BPF_EXIT_INSN(),
5418 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005419 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005420 .errstr = "R0 unbounded memory access",
Josef Bacike9548902016-11-29 12:35:19 -05005421 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01005422 .errstr_unpriv = "R0 leaks addr",
Josef Bacike9548902016-11-29 12:35:19 -05005423 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005424 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacike9548902016-11-29 12:35:19 -05005425 },
Gianluca Borello3c8397442016-12-03 12:31:33 -08005426 {
5427 "constant register |= constant should keep constant type",
5428 .insns = {
5429 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5430 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5431 BPF_MOV64_IMM(BPF_REG_2, 34),
5432 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
5433 BPF_MOV64_IMM(BPF_REG_3, 0),
5434 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5435 BPF_EXIT_INSN(),
5436 },
5437 .result = ACCEPT,
5438 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5439 },
5440 {
5441 "constant register |= constant should not bypass stack boundary checks",
5442 .insns = {
5443 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5444 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5445 BPF_MOV64_IMM(BPF_REG_2, 34),
5446 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
5447 BPF_MOV64_IMM(BPF_REG_3, 0),
5448 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5449 BPF_EXIT_INSN(),
5450 },
5451 .errstr = "invalid stack type R1 off=-48 access_size=58",
5452 .result = REJECT,
5453 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5454 },
5455 {
5456 "constant register |= constant register should keep constant type",
5457 .insns = {
5458 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5459 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5460 BPF_MOV64_IMM(BPF_REG_2, 34),
5461 BPF_MOV64_IMM(BPF_REG_4, 13),
5462 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5463 BPF_MOV64_IMM(BPF_REG_3, 0),
5464 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5465 BPF_EXIT_INSN(),
5466 },
5467 .result = ACCEPT,
5468 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5469 },
5470 {
5471 "constant register |= constant register should not bypass stack boundary checks",
5472 .insns = {
5473 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5474 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5475 BPF_MOV64_IMM(BPF_REG_2, 34),
5476 BPF_MOV64_IMM(BPF_REG_4, 24),
5477 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5478 BPF_MOV64_IMM(BPF_REG_3, 0),
5479 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5480 BPF_EXIT_INSN(),
5481 },
5482 .errstr = "invalid stack type R1 off=-48 access_size=58",
5483 .result = REJECT,
5484 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5485 },
Thomas Graf3f731d82016-12-05 10:30:52 +01005486 {
5487 "invalid direct packet write for LWT_IN",
5488 .insns = {
5489 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5490 offsetof(struct __sk_buff, data)),
5491 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5492 offsetof(struct __sk_buff, data_end)),
5493 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5494 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5495 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5496 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5497 BPF_MOV64_IMM(BPF_REG_0, 0),
5498 BPF_EXIT_INSN(),
5499 },
5500 .errstr = "cannot write into packet",
5501 .result = REJECT,
5502 .prog_type = BPF_PROG_TYPE_LWT_IN,
5503 },
5504 {
5505 "invalid direct packet write for LWT_OUT",
5506 .insns = {
5507 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5508 offsetof(struct __sk_buff, data)),
5509 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5510 offsetof(struct __sk_buff, data_end)),
5511 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5512 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5513 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5514 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5515 BPF_MOV64_IMM(BPF_REG_0, 0),
5516 BPF_EXIT_INSN(),
5517 },
5518 .errstr = "cannot write into packet",
5519 .result = REJECT,
5520 .prog_type = BPF_PROG_TYPE_LWT_OUT,
5521 },
5522 {
5523 "direct packet write for LWT_XMIT",
5524 .insns = {
5525 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5526 offsetof(struct __sk_buff, data)),
5527 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5528 offsetof(struct __sk_buff, data_end)),
5529 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5530 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5531 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5532 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5533 BPF_MOV64_IMM(BPF_REG_0, 0),
5534 BPF_EXIT_INSN(),
5535 },
5536 .result = ACCEPT,
5537 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5538 },
5539 {
5540 "direct packet read for LWT_IN",
5541 .insns = {
5542 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5543 offsetof(struct __sk_buff, data)),
5544 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5545 offsetof(struct __sk_buff, data_end)),
5546 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5547 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5548 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5549 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5550 BPF_MOV64_IMM(BPF_REG_0, 0),
5551 BPF_EXIT_INSN(),
5552 },
5553 .result = ACCEPT,
5554 .prog_type = BPF_PROG_TYPE_LWT_IN,
5555 },
5556 {
5557 "direct packet read for LWT_OUT",
5558 .insns = {
5559 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5560 offsetof(struct __sk_buff, data)),
5561 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5562 offsetof(struct __sk_buff, data_end)),
5563 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5564 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5565 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5566 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5567 BPF_MOV64_IMM(BPF_REG_0, 0),
5568 BPF_EXIT_INSN(),
5569 },
5570 .result = ACCEPT,
5571 .prog_type = BPF_PROG_TYPE_LWT_OUT,
5572 },
5573 {
5574 "direct packet read for LWT_XMIT",
5575 .insns = {
5576 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5577 offsetof(struct __sk_buff, data)),
5578 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5579 offsetof(struct __sk_buff, data_end)),
5580 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5581 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5582 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5583 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5584 BPF_MOV64_IMM(BPF_REG_0, 0),
5585 BPF_EXIT_INSN(),
5586 },
5587 .result = ACCEPT,
5588 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5589 },
5590 {
Alexei Starovoitovb1977682017-03-24 15:57:33 -07005591 "overlapping checks for direct packet access",
5592 .insns = {
5593 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5594 offsetof(struct __sk_buff, data)),
5595 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5596 offsetof(struct __sk_buff, data_end)),
5597 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5598 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5599 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
5600 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
5601 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
5602 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
5603 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
5604 BPF_MOV64_IMM(BPF_REG_0, 0),
5605 BPF_EXIT_INSN(),
5606 },
5607 .result = ACCEPT,
5608 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5609 },
5610 {
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +02005611 "make headroom for LWT_XMIT",
5612 .insns = {
5613 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5614 BPF_MOV64_IMM(BPF_REG_2, 34),
5615 BPF_MOV64_IMM(BPF_REG_3, 0),
5616 BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5617 /* split for s390 to succeed */
5618 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
5619 BPF_MOV64_IMM(BPF_REG_2, 42),
5620 BPF_MOV64_IMM(BPF_REG_3, 0),
5621 BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5622 BPF_MOV64_IMM(BPF_REG_0, 0),
5623 BPF_EXIT_INSN(),
5624 },
5625 .result = ACCEPT,
5626 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5627 },
5628 {
Thomas Graf3f731d82016-12-05 10:30:52 +01005629 "invalid access of tc_classid for LWT_IN",
5630 .insns = {
5631 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5632 offsetof(struct __sk_buff, tc_classid)),
5633 BPF_EXIT_INSN(),
5634 },
5635 .result = REJECT,
5636 .errstr = "invalid bpf_context access",
5637 },
5638 {
5639 "invalid access of tc_classid for LWT_OUT",
5640 .insns = {
5641 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5642 offsetof(struct __sk_buff, tc_classid)),
5643 BPF_EXIT_INSN(),
5644 },
5645 .result = REJECT,
5646 .errstr = "invalid bpf_context access",
5647 },
5648 {
5649 "invalid access of tc_classid for LWT_XMIT",
5650 .insns = {
5651 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5652 offsetof(struct __sk_buff, tc_classid)),
5653 BPF_EXIT_INSN(),
5654 },
5655 .result = REJECT,
5656 .errstr = "invalid bpf_context access",
5657 },
Gianluca Borello57225692017-01-09 10:19:47 -08005658 {
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02005659 "leak pointer into ctx 1",
5660 .insns = {
5661 BPF_MOV64_IMM(BPF_REG_0, 0),
5662 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5663 offsetof(struct __sk_buff, cb[0])),
5664 BPF_LD_MAP_FD(BPF_REG_2, 0),
5665 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
5666 offsetof(struct __sk_buff, cb[0])),
5667 BPF_EXIT_INSN(),
5668 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005669 .fixup_map_hash_8b = { 2 },
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02005670 .errstr_unpriv = "R2 leaks addr into mem",
5671 .result_unpriv = REJECT,
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01005672 .result = REJECT,
Joe Stringer9d2be442018-10-02 13:35:31 -07005673 .errstr = "BPF_XADD stores into R1 inv is not allowed",
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02005674 },
5675 {
5676 "leak pointer into ctx 2",
5677 .insns = {
5678 BPF_MOV64_IMM(BPF_REG_0, 0),
5679 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5680 offsetof(struct __sk_buff, cb[0])),
5681 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
5682 offsetof(struct __sk_buff, cb[0])),
5683 BPF_EXIT_INSN(),
5684 },
5685 .errstr_unpriv = "R10 leaks addr into mem",
5686 .result_unpriv = REJECT,
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01005687 .result = REJECT,
Joe Stringer9d2be442018-10-02 13:35:31 -07005688 .errstr = "BPF_XADD stores into R1 inv is not allowed",
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02005689 },
5690 {
5691 "leak pointer into ctx 3",
5692 .insns = {
5693 BPF_MOV64_IMM(BPF_REG_0, 0),
5694 BPF_LD_MAP_FD(BPF_REG_2, 0),
5695 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
5696 offsetof(struct __sk_buff, cb[0])),
5697 BPF_EXIT_INSN(),
5698 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005699 .fixup_map_hash_8b = { 1 },
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02005700 .errstr_unpriv = "R2 leaks addr into ctx",
5701 .result_unpriv = REJECT,
5702 .result = ACCEPT,
5703 },
5704 {
5705 "leak pointer into map val",
5706 .insns = {
5707 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5708 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5709 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5710 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5711 BPF_LD_MAP_FD(BPF_REG_1, 0),
5712 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5713 BPF_FUNC_map_lookup_elem),
5714 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5715 BPF_MOV64_IMM(BPF_REG_3, 0),
5716 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
5717 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
5718 BPF_MOV64_IMM(BPF_REG_0, 0),
5719 BPF_EXIT_INSN(),
5720 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005721 .fixup_map_hash_8b = { 4 },
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02005722 .errstr_unpriv = "R6 leaks addr into mem",
5723 .result_unpriv = REJECT,
5724 .result = ACCEPT,
5725 },
5726 {
Gianluca Borello57225692017-01-09 10:19:47 -08005727 "helper access to map: full range",
5728 .insns = {
5729 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5731 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5732 BPF_LD_MAP_FD(BPF_REG_1, 0),
5733 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5734 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5735 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5736 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5737 BPF_MOV64_IMM(BPF_REG_3, 0),
5738 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5739 BPF_EXIT_INSN(),
5740 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005741 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005742 .result = ACCEPT,
5743 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5744 },
5745 {
5746 "helper access to map: partial range",
5747 .insns = {
5748 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5749 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5750 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5751 BPF_LD_MAP_FD(BPF_REG_1, 0),
5752 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5753 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5754 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5755 BPF_MOV64_IMM(BPF_REG_2, 8),
5756 BPF_MOV64_IMM(BPF_REG_3, 0),
5757 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5758 BPF_EXIT_INSN(),
5759 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005760 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005761 .result = ACCEPT,
5762 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5763 },
5764 {
5765 "helper access to map: empty range",
5766 .insns = {
5767 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5768 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5769 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5770 BPF_LD_MAP_FD(BPF_REG_1, 0),
5771 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005772 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5773 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5774 BPF_MOV64_IMM(BPF_REG_2, 0),
5775 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005776 BPF_EXIT_INSN(),
5777 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005778 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005779 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
5780 .result = REJECT,
5781 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5782 },
5783 {
5784 "helper access to map: out-of-bound range",
5785 .insns = {
5786 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5787 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5788 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5789 BPF_LD_MAP_FD(BPF_REG_1, 0),
5790 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5791 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5792 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5793 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
5794 BPF_MOV64_IMM(BPF_REG_3, 0),
5795 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5796 BPF_EXIT_INSN(),
5797 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005798 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005799 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
5800 .result = REJECT,
5801 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5802 },
5803 {
5804 "helper access to map: negative range",
5805 .insns = {
5806 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5807 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5808 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5809 BPF_LD_MAP_FD(BPF_REG_1, 0),
5810 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5811 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5812 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5813 BPF_MOV64_IMM(BPF_REG_2, -8),
5814 BPF_MOV64_IMM(BPF_REG_3, 0),
5815 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5816 BPF_EXIT_INSN(),
5817 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005818 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005819 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005820 .result = REJECT,
5821 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5822 },
5823 {
5824 "helper access to adjusted map (via const imm): full range",
5825 .insns = {
5826 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5827 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5828 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5829 BPF_LD_MAP_FD(BPF_REG_1, 0),
5830 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5831 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5832 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5833 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5834 offsetof(struct test_val, foo)),
5835 BPF_MOV64_IMM(BPF_REG_2,
5836 sizeof(struct test_val) -
5837 offsetof(struct test_val, foo)),
5838 BPF_MOV64_IMM(BPF_REG_3, 0),
5839 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5840 BPF_EXIT_INSN(),
5841 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005842 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005843 .result = ACCEPT,
5844 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5845 },
5846 {
5847 "helper access to adjusted map (via const imm): partial range",
5848 .insns = {
5849 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5850 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5851 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5852 BPF_LD_MAP_FD(BPF_REG_1, 0),
5853 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5854 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5855 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5856 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5857 offsetof(struct test_val, foo)),
5858 BPF_MOV64_IMM(BPF_REG_2, 8),
5859 BPF_MOV64_IMM(BPF_REG_3, 0),
5860 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5861 BPF_EXIT_INSN(),
5862 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005863 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005864 .result = ACCEPT,
5865 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5866 },
5867 {
5868 "helper access to adjusted map (via const imm): empty range",
5869 .insns = {
5870 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5871 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5872 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5873 BPF_LD_MAP_FD(BPF_REG_1, 0),
5874 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005875 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Gianluca Borello57225692017-01-09 10:19:47 -08005876 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5877 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5878 offsetof(struct test_val, foo)),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005879 BPF_MOV64_IMM(BPF_REG_2, 0),
5880 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005881 BPF_EXIT_INSN(),
5882 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005883 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005884 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
Gianluca Borello57225692017-01-09 10:19:47 -08005885 .result = REJECT,
5886 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5887 },
5888 {
5889 "helper access to adjusted map (via const imm): out-of-bound range",
5890 .insns = {
5891 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5892 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5893 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5894 BPF_LD_MAP_FD(BPF_REG_1, 0),
5895 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5896 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5897 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5899 offsetof(struct test_val, foo)),
5900 BPF_MOV64_IMM(BPF_REG_2,
5901 sizeof(struct test_val) -
5902 offsetof(struct test_val, foo) + 8),
5903 BPF_MOV64_IMM(BPF_REG_3, 0),
5904 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5905 BPF_EXIT_INSN(),
5906 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005907 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005908 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
5909 .result = REJECT,
5910 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5911 },
5912 {
5913 "helper access to adjusted map (via const imm): negative range (> adjustment)",
5914 .insns = {
5915 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5916 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5917 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5918 BPF_LD_MAP_FD(BPF_REG_1, 0),
5919 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5920 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5921 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5923 offsetof(struct test_val, foo)),
5924 BPF_MOV64_IMM(BPF_REG_2, -8),
5925 BPF_MOV64_IMM(BPF_REG_3, 0),
5926 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5927 BPF_EXIT_INSN(),
5928 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005929 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005930 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005931 .result = REJECT,
5932 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5933 },
5934 {
5935 "helper access to adjusted map (via const imm): negative range (< adjustment)",
5936 .insns = {
5937 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5938 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5939 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5940 BPF_LD_MAP_FD(BPF_REG_1, 0),
5941 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5942 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5943 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5945 offsetof(struct test_val, foo)),
5946 BPF_MOV64_IMM(BPF_REG_2, -1),
5947 BPF_MOV64_IMM(BPF_REG_3, 0),
5948 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5949 BPF_EXIT_INSN(),
5950 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005951 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005952 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005953 .result = REJECT,
5954 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5955 },
5956 {
5957 "helper access to adjusted map (via const reg): full range",
5958 .insns = {
5959 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5960 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5961 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5962 BPF_LD_MAP_FD(BPF_REG_1, 0),
5963 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5964 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5965 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5966 BPF_MOV64_IMM(BPF_REG_3,
5967 offsetof(struct test_val, foo)),
5968 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5969 BPF_MOV64_IMM(BPF_REG_2,
5970 sizeof(struct test_val) -
5971 offsetof(struct test_val, foo)),
5972 BPF_MOV64_IMM(BPF_REG_3, 0),
5973 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5974 BPF_EXIT_INSN(),
5975 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005976 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005977 .result = ACCEPT,
5978 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5979 },
5980 {
5981 "helper access to adjusted map (via const reg): partial range",
5982 .insns = {
5983 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5984 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5985 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5986 BPF_LD_MAP_FD(BPF_REG_1, 0),
5987 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5988 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5989 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5990 BPF_MOV64_IMM(BPF_REG_3,
5991 offsetof(struct test_val, foo)),
5992 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5993 BPF_MOV64_IMM(BPF_REG_2, 8),
5994 BPF_MOV64_IMM(BPF_REG_3, 0),
5995 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5996 BPF_EXIT_INSN(),
5997 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005998 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005999 .result = ACCEPT,
6000 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6001 },
6002 {
6003 "helper access to adjusted map (via const reg): empty range",
6004 .insns = {
6005 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6006 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6007 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6008 BPF_LD_MAP_FD(BPF_REG_1, 0),
6009 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08006010 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
Gianluca Borello57225692017-01-09 10:19:47 -08006011 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6012 BPF_MOV64_IMM(BPF_REG_3, 0),
6013 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08006014 BPF_MOV64_IMM(BPF_REG_2, 0),
6015 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08006016 BPF_EXIT_INSN(),
6017 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006018 .fixup_map_hash_48b = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08006019 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08006020 .result = REJECT,
6021 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6022 },
6023 {
6024 "helper access to adjusted map (via const reg): out-of-bound range",
6025 .insns = {
6026 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6027 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6028 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6029 BPF_LD_MAP_FD(BPF_REG_1, 0),
6030 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6031 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6032 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6033 BPF_MOV64_IMM(BPF_REG_3,
6034 offsetof(struct test_val, foo)),
6035 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6036 BPF_MOV64_IMM(BPF_REG_2,
6037 sizeof(struct test_val) -
6038 offsetof(struct test_val, foo) + 8),
6039 BPF_MOV64_IMM(BPF_REG_3, 0),
6040 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6041 BPF_EXIT_INSN(),
6042 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006043 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08006044 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
6045 .result = REJECT,
6046 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6047 },
6048 {
6049 "helper access to adjusted map (via const reg): negative range (> adjustment)",
6050 .insns = {
6051 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6052 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6053 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6054 BPF_LD_MAP_FD(BPF_REG_1, 0),
6055 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6056 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6057 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6058 BPF_MOV64_IMM(BPF_REG_3,
6059 offsetof(struct test_val, foo)),
6060 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6061 BPF_MOV64_IMM(BPF_REG_2, -8),
6062 BPF_MOV64_IMM(BPF_REG_3, 0),
6063 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6064 BPF_EXIT_INSN(),
6065 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006066 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006067 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08006068 .result = REJECT,
6069 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6070 },
6071 {
6072 "helper access to adjusted map (via const reg): negative range (< adjustment)",
6073 .insns = {
6074 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6075 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6076 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6077 BPF_LD_MAP_FD(BPF_REG_1, 0),
6078 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6079 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6080 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6081 BPF_MOV64_IMM(BPF_REG_3,
6082 offsetof(struct test_val, foo)),
6083 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6084 BPF_MOV64_IMM(BPF_REG_2, -1),
6085 BPF_MOV64_IMM(BPF_REG_3, 0),
6086 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6087 BPF_EXIT_INSN(),
6088 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006089 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006090 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08006091 .result = REJECT,
6092 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6093 },
6094 {
6095 "helper access to adjusted map (via variable): full range",
6096 .insns = {
6097 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6098 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6099 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6100 BPF_LD_MAP_FD(BPF_REG_1, 0),
6101 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6102 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6103 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6104 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6105 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6106 offsetof(struct test_val, foo), 4),
6107 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6108 BPF_MOV64_IMM(BPF_REG_2,
6109 sizeof(struct test_val) -
6110 offsetof(struct test_val, foo)),
6111 BPF_MOV64_IMM(BPF_REG_3, 0),
6112 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6113 BPF_EXIT_INSN(),
6114 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006115 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08006116 .result = ACCEPT,
6117 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6118 },
6119 {
6120 "helper access to adjusted map (via variable): partial range",
6121 .insns = {
6122 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6123 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6124 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6125 BPF_LD_MAP_FD(BPF_REG_1, 0),
6126 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6127 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6128 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6129 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6130 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6131 offsetof(struct test_val, foo), 4),
6132 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6133 BPF_MOV64_IMM(BPF_REG_2, 8),
6134 BPF_MOV64_IMM(BPF_REG_3, 0),
6135 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6136 BPF_EXIT_INSN(),
6137 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006138 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08006139 .result = ACCEPT,
6140 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6141 },
6142 {
6143 "helper access to adjusted map (via variable): empty range",
6144 .insns = {
6145 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6146 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6147 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6148 BPF_LD_MAP_FD(BPF_REG_1, 0),
6149 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08006150 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
Gianluca Borello57225692017-01-09 10:19:47 -08006151 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6152 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6153 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08006154 offsetof(struct test_val, foo), 3),
Gianluca Borello57225692017-01-09 10:19:47 -08006155 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08006156 BPF_MOV64_IMM(BPF_REG_2, 0),
6157 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08006158 BPF_EXIT_INSN(),
6159 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006160 .fixup_map_hash_48b = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08006161 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08006162 .result = REJECT,
6163 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6164 },
6165 {
6166 "helper access to adjusted map (via variable): no max check",
6167 .insns = {
6168 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6169 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6170 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6171 BPF_LD_MAP_FD(BPF_REG_1, 0),
6172 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6173 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6174 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6175 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6176 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Edward Creef65b1842017-08-07 15:27:12 +01006177 BPF_MOV64_IMM(BPF_REG_2, 1),
Gianluca Borello57225692017-01-09 10:19:47 -08006178 BPF_MOV64_IMM(BPF_REG_3, 0),
6179 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6180 BPF_EXIT_INSN(),
6181 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006182 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006183 .errstr = "R1 unbounded memory access",
Gianluca Borello57225692017-01-09 10:19:47 -08006184 .result = REJECT,
6185 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6186 },
6187 {
6188 "helper access to adjusted map (via variable): wrong max check",
6189 .insns = {
6190 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6191 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6192 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6193 BPF_LD_MAP_FD(BPF_REG_1, 0),
6194 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6195 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6196 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6197 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6198 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6199 offsetof(struct test_val, foo), 4),
6200 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6201 BPF_MOV64_IMM(BPF_REG_2,
6202 sizeof(struct test_val) -
6203 offsetof(struct test_val, foo) + 1),
6204 BPF_MOV64_IMM(BPF_REG_3, 0),
6205 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6206 BPF_EXIT_INSN(),
6207 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006208 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08006209 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
6210 .result = REJECT,
6211 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6212 },
Gianluca Borellof0318d02017-01-09 10:19:48 -08006213 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006214 "helper access to map: bounds check using <, good access",
6215 .insns = {
6216 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6217 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6218 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6219 BPF_LD_MAP_FD(BPF_REG_1, 0),
6220 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6221 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6222 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6223 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6224 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
6225 BPF_MOV64_IMM(BPF_REG_0, 0),
6226 BPF_EXIT_INSN(),
6227 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6228 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6229 BPF_MOV64_IMM(BPF_REG_0, 0),
6230 BPF_EXIT_INSN(),
6231 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006232 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006233 .result = ACCEPT,
6234 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6235 },
6236 {
6237 "helper access to map: bounds check using <, bad access",
6238 .insns = {
6239 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6240 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6241 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6242 BPF_LD_MAP_FD(BPF_REG_1, 0),
6243 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6244 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6245 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6246 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6247 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
6248 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6249 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6250 BPF_MOV64_IMM(BPF_REG_0, 0),
6251 BPF_EXIT_INSN(),
6252 BPF_MOV64_IMM(BPF_REG_0, 0),
6253 BPF_EXIT_INSN(),
6254 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006255 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006256 .result = REJECT,
6257 .errstr = "R1 unbounded memory access",
6258 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6259 },
6260 {
6261 "helper access to map: bounds check using <=, good access",
6262 .insns = {
6263 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6265 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6266 BPF_LD_MAP_FD(BPF_REG_1, 0),
6267 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6268 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6269 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6270 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6271 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
6272 BPF_MOV64_IMM(BPF_REG_0, 0),
6273 BPF_EXIT_INSN(),
6274 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6275 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6276 BPF_MOV64_IMM(BPF_REG_0, 0),
6277 BPF_EXIT_INSN(),
6278 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006279 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006280 .result = ACCEPT,
6281 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6282 },
6283 {
6284 "helper access to map: bounds check using <=, bad access",
6285 .insns = {
6286 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6287 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6288 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6289 BPF_LD_MAP_FD(BPF_REG_1, 0),
6290 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6291 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6292 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6293 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6294 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
6295 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6296 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6297 BPF_MOV64_IMM(BPF_REG_0, 0),
6298 BPF_EXIT_INSN(),
6299 BPF_MOV64_IMM(BPF_REG_0, 0),
6300 BPF_EXIT_INSN(),
6301 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006302 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006303 .result = REJECT,
6304 .errstr = "R1 unbounded memory access",
6305 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6306 },
6307 {
6308 "helper access to map: bounds check using s<, good access",
6309 .insns = {
6310 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6311 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6312 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6313 BPF_LD_MAP_FD(BPF_REG_1, 0),
6314 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6315 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6316 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6317 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6318 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6319 BPF_MOV64_IMM(BPF_REG_0, 0),
6320 BPF_EXIT_INSN(),
6321 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
6322 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6323 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6324 BPF_MOV64_IMM(BPF_REG_0, 0),
6325 BPF_EXIT_INSN(),
6326 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006327 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006328 .result = ACCEPT,
6329 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6330 },
6331 {
6332 "helper access to map: bounds check using s<, good access 2",
6333 .insns = {
6334 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6335 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6336 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6337 BPF_LD_MAP_FD(BPF_REG_1, 0),
6338 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6339 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6340 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6341 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6342 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6343 BPF_MOV64_IMM(BPF_REG_0, 0),
6344 BPF_EXIT_INSN(),
6345 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6346 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6347 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6348 BPF_MOV64_IMM(BPF_REG_0, 0),
6349 BPF_EXIT_INSN(),
6350 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006351 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006352 .result = ACCEPT,
6353 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6354 },
6355 {
6356 "helper access to map: bounds check using s<, bad access",
6357 .insns = {
6358 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6359 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6360 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6361 BPF_LD_MAP_FD(BPF_REG_1, 0),
6362 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6363 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6364 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6365 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6366 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6367 BPF_MOV64_IMM(BPF_REG_0, 0),
6368 BPF_EXIT_INSN(),
6369 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6370 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6371 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6372 BPF_MOV64_IMM(BPF_REG_0, 0),
6373 BPF_EXIT_INSN(),
6374 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006375 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006376 .result = REJECT,
6377 .errstr = "R1 min value is negative",
6378 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6379 },
6380 {
6381 "helper access to map: bounds check using s<=, good access",
6382 .insns = {
6383 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6384 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6385 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6386 BPF_LD_MAP_FD(BPF_REG_1, 0),
6387 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6388 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6389 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6390 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6391 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6392 BPF_MOV64_IMM(BPF_REG_0, 0),
6393 BPF_EXIT_INSN(),
6394 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
6395 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6396 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6397 BPF_MOV64_IMM(BPF_REG_0, 0),
6398 BPF_EXIT_INSN(),
6399 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006400 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006401 .result = ACCEPT,
6402 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6403 },
6404 {
6405 "helper access to map: bounds check using s<=, good access 2",
6406 .insns = {
6407 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6408 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6409 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6410 BPF_LD_MAP_FD(BPF_REG_1, 0),
6411 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6412 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6413 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6414 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6415 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6416 BPF_MOV64_IMM(BPF_REG_0, 0),
6417 BPF_EXIT_INSN(),
6418 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6419 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6420 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6421 BPF_MOV64_IMM(BPF_REG_0, 0),
6422 BPF_EXIT_INSN(),
6423 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006424 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006425 .result = ACCEPT,
6426 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6427 },
6428 {
6429 "helper access to map: bounds check using s<=, bad access",
6430 .insns = {
6431 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6432 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6433 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6434 BPF_LD_MAP_FD(BPF_REG_1, 0),
6435 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6436 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6437 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6438 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6439 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6440 BPF_MOV64_IMM(BPF_REG_0, 0),
6441 BPF_EXIT_INSN(),
6442 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6443 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6444 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6445 BPF_MOV64_IMM(BPF_REG_0, 0),
6446 BPF_EXIT_INSN(),
6447 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006448 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006449 .result = REJECT,
6450 .errstr = "R1 min value is negative",
6451 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6452 },
6453 {
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006454 "map lookup helper access to map",
6455 .insns = {
6456 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6457 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6458 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6459 BPF_LD_MAP_FD(BPF_REG_1, 0),
6460 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6461 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6462 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6463 BPF_LD_MAP_FD(BPF_REG_1, 0),
6464 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6465 BPF_EXIT_INSN(),
6466 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006467 .fixup_map_hash_16b = { 3, 8 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006468 .result = ACCEPT,
6469 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6470 },
6471 {
6472 "map update helper access to map",
6473 .insns = {
6474 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6475 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6476 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6477 BPF_LD_MAP_FD(BPF_REG_1, 0),
6478 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6479 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6480 BPF_MOV64_IMM(BPF_REG_4, 0),
6481 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6482 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6483 BPF_LD_MAP_FD(BPF_REG_1, 0),
6484 BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6485 BPF_EXIT_INSN(),
6486 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006487 .fixup_map_hash_16b = { 3, 10 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006488 .result = ACCEPT,
6489 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6490 },
6491 {
6492 "map update helper access to map: wrong size",
6493 .insns = {
6494 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6495 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6496 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6497 BPF_LD_MAP_FD(BPF_REG_1, 0),
6498 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6499 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6500 BPF_MOV64_IMM(BPF_REG_4, 0),
6501 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6502 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6503 BPF_LD_MAP_FD(BPF_REG_1, 0),
6504 BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6505 BPF_EXIT_INSN(),
6506 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006507 .fixup_map_hash_8b = { 3 },
6508 .fixup_map_hash_16b = { 10 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006509 .result = REJECT,
6510 .errstr = "invalid access to map value, value_size=8 off=0 size=16",
6511 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6512 },
6513 {
6514 "map helper access to adjusted map (via const imm)",
6515 .insns = {
6516 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6517 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6518 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6519 BPF_LD_MAP_FD(BPF_REG_1, 0),
6520 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6521 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6522 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6523 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6524 offsetof(struct other_val, bar)),
6525 BPF_LD_MAP_FD(BPF_REG_1, 0),
6526 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6527 BPF_EXIT_INSN(),
6528 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006529 .fixup_map_hash_16b = { 3, 9 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006530 .result = ACCEPT,
6531 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6532 },
6533 {
6534 "map helper access to adjusted map (via const imm): out-of-bound 1",
6535 .insns = {
6536 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6537 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6538 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6539 BPF_LD_MAP_FD(BPF_REG_1, 0),
6540 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6541 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6542 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6543 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6544 sizeof(struct other_val) - 4),
6545 BPF_LD_MAP_FD(BPF_REG_1, 0),
6546 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6547 BPF_EXIT_INSN(),
6548 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006549 .fixup_map_hash_16b = { 3, 9 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006550 .result = REJECT,
6551 .errstr = "invalid access to map value, value_size=16 off=12 size=8",
6552 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6553 },
6554 {
6555 "map helper access to adjusted map (via const imm): out-of-bound 2",
6556 .insns = {
6557 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6558 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6559 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6560 BPF_LD_MAP_FD(BPF_REG_1, 0),
6561 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6562 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6563 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6564 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6565 BPF_LD_MAP_FD(BPF_REG_1, 0),
6566 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6567 BPF_EXIT_INSN(),
6568 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006569 .fixup_map_hash_16b = { 3, 9 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006570 .result = REJECT,
6571 .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6572 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6573 },
6574 {
6575 "map helper access to adjusted map (via const reg)",
6576 .insns = {
6577 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6578 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6579 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6580 BPF_LD_MAP_FD(BPF_REG_1, 0),
6581 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6582 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6583 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6584 BPF_MOV64_IMM(BPF_REG_3,
6585 offsetof(struct other_val, bar)),
6586 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6587 BPF_LD_MAP_FD(BPF_REG_1, 0),
6588 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6589 BPF_EXIT_INSN(),
6590 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006591 .fixup_map_hash_16b = { 3, 10 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006592 .result = ACCEPT,
6593 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6594 },
6595 {
6596 "map helper access to adjusted map (via const reg): out-of-bound 1",
6597 .insns = {
6598 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6599 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6600 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6601 BPF_LD_MAP_FD(BPF_REG_1, 0),
6602 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6603 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6604 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6605 BPF_MOV64_IMM(BPF_REG_3,
6606 sizeof(struct other_val) - 4),
6607 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6608 BPF_LD_MAP_FD(BPF_REG_1, 0),
6609 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6610 BPF_EXIT_INSN(),
6611 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006612 .fixup_map_hash_16b = { 3, 10 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006613 .result = REJECT,
6614 .errstr = "invalid access to map value, value_size=16 off=12 size=8",
6615 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6616 },
6617 {
6618 "map helper access to adjusted map (via const reg): out-of-bound 2",
6619 .insns = {
6620 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6621 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6622 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6623 BPF_LD_MAP_FD(BPF_REG_1, 0),
6624 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6625 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6626 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6627 BPF_MOV64_IMM(BPF_REG_3, -4),
6628 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6629 BPF_LD_MAP_FD(BPF_REG_1, 0),
6630 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6631 BPF_EXIT_INSN(),
6632 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006633 .fixup_map_hash_16b = { 3, 10 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006634 .result = REJECT,
6635 .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6636 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6637 },
6638 {
6639 "map helper access to adjusted map (via variable)",
6640 .insns = {
6641 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6642 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6643 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6644 BPF_LD_MAP_FD(BPF_REG_1, 0),
6645 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6646 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6647 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6648 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6649 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6650 offsetof(struct other_val, bar), 4),
6651 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6652 BPF_LD_MAP_FD(BPF_REG_1, 0),
6653 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6654 BPF_EXIT_INSN(),
6655 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006656 .fixup_map_hash_16b = { 3, 11 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006657 .result = ACCEPT,
6658 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6659 },
6660 {
6661 "map helper access to adjusted map (via variable): no max check",
6662 .insns = {
6663 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6665 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6666 BPF_LD_MAP_FD(BPF_REG_1, 0),
6667 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6668 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6669 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6670 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6671 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6672 BPF_LD_MAP_FD(BPF_REG_1, 0),
6673 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6674 BPF_EXIT_INSN(),
6675 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006676 .fixup_map_hash_16b = { 3, 10 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006677 .result = REJECT,
6678 .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
6679 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6680 },
6681 {
6682 "map helper access to adjusted map (via variable): wrong max check",
6683 .insns = {
6684 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6685 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6686 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6687 BPF_LD_MAP_FD(BPF_REG_1, 0),
6688 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6689 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6690 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6691 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6692 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6693 offsetof(struct other_val, bar) + 1, 4),
6694 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6695 BPF_LD_MAP_FD(BPF_REG_1, 0),
6696 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6697 BPF_EXIT_INSN(),
6698 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006699 .fixup_map_hash_16b = { 3, 11 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006700 .result = REJECT,
6701 .errstr = "invalid access to map value, value_size=16 off=9 size=8",
6702 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6703 },
6704 {
Gianluca Borellof0318d02017-01-09 10:19:48 -08006705 "map element value is preserved across register spilling",
6706 .insns = {
6707 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6709 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6710 BPF_LD_MAP_FD(BPF_REG_1, 0),
6711 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6712 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6713 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6714 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6715 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
6716 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6717 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6718 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6719 BPF_EXIT_INSN(),
6720 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006721 .fixup_map_hash_48b = { 3 },
Gianluca Borellof0318d02017-01-09 10:19:48 -08006722 .errstr_unpriv = "R0 leaks addr",
6723 .result = ACCEPT,
6724 .result_unpriv = REJECT,
6725 },
6726 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006727 "map element value or null is marked on register spilling",
6728 .insns = {
6729 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6731 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6732 BPF_LD_MAP_FD(BPF_REG_1, 0),
6733 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6734 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6735 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
6736 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6737 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6738 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6739 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6740 BPF_EXIT_INSN(),
6741 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006742 .fixup_map_hash_48b = { 3 },
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006743 .errstr_unpriv = "R0 leaks addr",
6744 .result = ACCEPT,
6745 .result_unpriv = REJECT,
6746 },
6747 {
6748 "map element value store of cleared call register",
6749 .insns = {
6750 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6751 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6752 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6753 BPF_LD_MAP_FD(BPF_REG_1, 0),
6754 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6755 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
6756 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
6757 BPF_EXIT_INSN(),
6758 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006759 .fixup_map_hash_48b = { 3 },
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006760 .errstr_unpriv = "R1 !read_ok",
6761 .errstr = "R1 !read_ok",
6762 .result = REJECT,
6763 .result_unpriv = REJECT,
6764 },
6765 {
6766 "map element value with unaligned store",
6767 .insns = {
6768 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6769 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6770 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6771 BPF_LD_MAP_FD(BPF_REG_1, 0),
6772 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
6774 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
6775 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6776 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
6777 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
6778 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
6779 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
6780 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
6781 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
6782 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
6783 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
6784 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
6785 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
6786 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
6787 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
6788 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
6789 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
6790 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
6791 BPF_EXIT_INSN(),
6792 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006793 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006794 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006795 .result = ACCEPT,
6796 .result_unpriv = REJECT,
6797 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6798 },
6799 {
6800 "map element value with unaligned load",
6801 .insns = {
6802 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6803 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6804 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6805 BPF_LD_MAP_FD(BPF_REG_1, 0),
6806 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6807 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6808 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
6809 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
6810 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
6811 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
6812 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
6813 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
6814 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
6815 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
6816 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
6817 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
6818 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
6819 BPF_EXIT_INSN(),
6820 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006821 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006822 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006823 .result = ACCEPT,
6824 .result_unpriv = REJECT,
6825 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6826 },
6827 {
6828 "map element value illegal alu op, 1",
6829 .insns = {
6830 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6831 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6832 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6833 BPF_LD_MAP_FD(BPF_REG_1, 0),
6834 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6835 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6836 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
6837 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6838 BPF_EXIT_INSN(),
6839 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006840 .fixup_map_hash_48b = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08006841 .errstr = "R0 bitwise operator &= on pointer",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006842 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006843 },
6844 {
6845 "map element value illegal alu op, 2",
6846 .insns = {
6847 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6848 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6849 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6850 BPF_LD_MAP_FD(BPF_REG_1, 0),
6851 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6852 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6853 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
6854 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6855 BPF_EXIT_INSN(),
6856 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006857 .fixup_map_hash_48b = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08006858 .errstr = "R0 32-bit pointer arithmetic prohibited",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006859 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006860 },
6861 {
6862 "map element value illegal alu op, 3",
6863 .insns = {
6864 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6865 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6866 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6867 BPF_LD_MAP_FD(BPF_REG_1, 0),
6868 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6869 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6870 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
6871 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6872 BPF_EXIT_INSN(),
6873 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006874 .fixup_map_hash_48b = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08006875 .errstr = "R0 pointer arithmetic with /= operator",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006876 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006877 },
6878 {
6879 "map element value illegal alu op, 4",
6880 .insns = {
6881 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6882 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6883 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6884 BPF_LD_MAP_FD(BPF_REG_1, 0),
6885 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6886 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6887 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
6888 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6889 BPF_EXIT_INSN(),
6890 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006891 .fixup_map_hash_48b = { 3 },
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006892 .errstr_unpriv = "R0 pointer arithmetic prohibited",
6893 .errstr = "invalid mem access 'inv'",
6894 .result = REJECT,
6895 .result_unpriv = REJECT,
6896 },
6897 {
6898 "map element value illegal alu op, 5",
6899 .insns = {
6900 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6901 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6902 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6903 BPF_LD_MAP_FD(BPF_REG_1, 0),
6904 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6905 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6906 BPF_MOV64_IMM(BPF_REG_3, 4096),
6907 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6909 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6910 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
6911 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
6912 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6913 BPF_EXIT_INSN(),
6914 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006915 .fixup_map_hash_48b = { 3 },
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006916 .errstr = "R0 invalid mem access 'inv'",
6917 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006918 },
6919 {
6920 "map element value is preserved across register spilling",
Gianluca Borellof0318d02017-01-09 10:19:48 -08006921 .insns = {
6922 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6923 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6924 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6925 BPF_LD_MAP_FD(BPF_REG_1, 0),
6926 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6927 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6928 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
6929 offsetof(struct test_val, foo)),
6930 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6931 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6932 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
6933 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6934 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6935 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6936 BPF_EXIT_INSN(),
6937 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006938 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006939 .errstr_unpriv = "R0 leaks addr",
Gianluca Borellof0318d02017-01-09 10:19:48 -08006940 .result = ACCEPT,
6941 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006942 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Gianluca Borellof0318d02017-01-09 10:19:48 -08006943 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08006944 {
6945 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
6946 .insns = {
6947 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6948 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6949 BPF_MOV64_IMM(BPF_REG_0, 0),
6950 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6951 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6952 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6953 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6954 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6955 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6956 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6957 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6958 BPF_MOV64_IMM(BPF_REG_2, 16),
6959 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6960 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6961 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6962 BPF_MOV64_IMM(BPF_REG_4, 0),
6963 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6964 BPF_MOV64_IMM(BPF_REG_3, 0),
6965 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6966 BPF_MOV64_IMM(BPF_REG_0, 0),
6967 BPF_EXIT_INSN(),
6968 },
6969 .result = ACCEPT,
6970 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6971 },
6972 {
6973 "helper access to variable memory: stack, bitwise AND, zero included",
6974 .insns = {
6975 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6976 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6977 BPF_MOV64_IMM(BPF_REG_2, 16),
6978 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6979 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6980 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6981 BPF_MOV64_IMM(BPF_REG_3, 0),
6982 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6983 BPF_EXIT_INSN(),
6984 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08006985 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006986 .result = REJECT,
6987 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6988 },
6989 {
6990 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
6991 .insns = {
6992 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6993 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6994 BPF_MOV64_IMM(BPF_REG_2, 16),
6995 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6996 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6997 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
6998 BPF_MOV64_IMM(BPF_REG_4, 0),
6999 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7000 BPF_MOV64_IMM(BPF_REG_3, 0),
7001 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7002 BPF_MOV64_IMM(BPF_REG_0, 0),
7003 BPF_EXIT_INSN(),
7004 },
7005 .errstr = "invalid stack type R1 off=-64 access_size=65",
7006 .result = REJECT,
7007 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7008 },
7009 {
7010 "helper access to variable memory: stack, JMP, correct bounds",
7011 .insns = {
7012 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7014 BPF_MOV64_IMM(BPF_REG_0, 0),
7015 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7016 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7017 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7018 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7019 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7020 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7021 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7022 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7023 BPF_MOV64_IMM(BPF_REG_2, 16),
7024 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7025 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7026 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
7027 BPF_MOV64_IMM(BPF_REG_4, 0),
7028 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7029 BPF_MOV64_IMM(BPF_REG_3, 0),
7030 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7031 BPF_MOV64_IMM(BPF_REG_0, 0),
7032 BPF_EXIT_INSN(),
7033 },
7034 .result = ACCEPT,
7035 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7036 },
7037 {
7038 "helper access to variable memory: stack, JMP (signed), correct bounds",
7039 .insns = {
7040 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7041 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7042 BPF_MOV64_IMM(BPF_REG_0, 0),
7043 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7044 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7045 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7046 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7047 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7048 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7049 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7050 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7051 BPF_MOV64_IMM(BPF_REG_2, 16),
7052 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7053 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7054 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
7055 BPF_MOV64_IMM(BPF_REG_4, 0),
7056 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7057 BPF_MOV64_IMM(BPF_REG_3, 0),
7058 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7059 BPF_MOV64_IMM(BPF_REG_0, 0),
7060 BPF_EXIT_INSN(),
7061 },
7062 .result = ACCEPT,
7063 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7064 },
7065 {
7066 "helper access to variable memory: stack, JMP, bounds + offset",
7067 .insns = {
7068 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7069 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7070 BPF_MOV64_IMM(BPF_REG_2, 16),
7071 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7072 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7073 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
7074 BPF_MOV64_IMM(BPF_REG_4, 0),
7075 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
7076 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7077 BPF_MOV64_IMM(BPF_REG_3, 0),
7078 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7079 BPF_MOV64_IMM(BPF_REG_0, 0),
7080 BPF_EXIT_INSN(),
7081 },
7082 .errstr = "invalid stack type R1 off=-64 access_size=65",
7083 .result = REJECT,
7084 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7085 },
7086 {
7087 "helper access to variable memory: stack, JMP, wrong max",
7088 .insns = {
7089 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7090 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7091 BPF_MOV64_IMM(BPF_REG_2, 16),
7092 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7093 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7094 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
7095 BPF_MOV64_IMM(BPF_REG_4, 0),
7096 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7097 BPF_MOV64_IMM(BPF_REG_3, 0),
7098 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7099 BPF_MOV64_IMM(BPF_REG_0, 0),
7100 BPF_EXIT_INSN(),
7101 },
7102 .errstr = "invalid stack type R1 off=-64 access_size=65",
7103 .result = REJECT,
7104 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7105 },
7106 {
7107 "helper access to variable memory: stack, JMP, no max check",
7108 .insns = {
7109 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7110 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7111 BPF_MOV64_IMM(BPF_REG_2, 16),
7112 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7113 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7114 BPF_MOV64_IMM(BPF_REG_4, 0),
7115 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7116 BPF_MOV64_IMM(BPF_REG_3, 0),
7117 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7118 BPF_MOV64_IMM(BPF_REG_0, 0),
7119 BPF_EXIT_INSN(),
7120 },
Edward Creef65b1842017-08-07 15:27:12 +01007121 /* because max wasn't checked, signed min is negative */
7122 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
Gianluca Borello06c1c042017-01-09 10:19:49 -08007123 .result = REJECT,
7124 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7125 },
7126 {
7127 "helper access to variable memory: stack, JMP, no min check",
7128 .insns = {
7129 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7130 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7131 BPF_MOV64_IMM(BPF_REG_2, 16),
7132 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7133 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7134 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
7135 BPF_MOV64_IMM(BPF_REG_3, 0),
7136 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7137 BPF_MOV64_IMM(BPF_REG_0, 0),
7138 BPF_EXIT_INSN(),
7139 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08007140 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08007141 .result = REJECT,
7142 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7143 },
7144 {
7145 "helper access to variable memory: stack, JMP (signed), no min check",
7146 .insns = {
7147 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7148 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7149 BPF_MOV64_IMM(BPF_REG_2, 16),
7150 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7151 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7152 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
7153 BPF_MOV64_IMM(BPF_REG_3, 0),
7154 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7155 BPF_MOV64_IMM(BPF_REG_0, 0),
7156 BPF_EXIT_INSN(),
7157 },
7158 .errstr = "R2 min value is negative",
7159 .result = REJECT,
7160 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7161 },
7162 {
7163 "helper access to variable memory: map, JMP, correct bounds",
7164 .insns = {
7165 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7166 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7167 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7168 BPF_LD_MAP_FD(BPF_REG_1, 0),
7169 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7170 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7171 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7172 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7173 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7174 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7175 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7176 sizeof(struct test_val), 4),
7177 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02007178 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08007179 BPF_MOV64_IMM(BPF_REG_3, 0),
7180 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7181 BPF_MOV64_IMM(BPF_REG_0, 0),
7182 BPF_EXIT_INSN(),
7183 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007184 .fixup_map_hash_48b = { 3 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08007185 .result = ACCEPT,
7186 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7187 },
7188 {
7189 "helper access to variable memory: map, JMP, wrong max",
7190 .insns = {
7191 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7192 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7193 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7194 BPF_LD_MAP_FD(BPF_REG_1, 0),
7195 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7196 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7197 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7198 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7199 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7200 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7201 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7202 sizeof(struct test_val) + 1, 4),
7203 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02007204 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08007205 BPF_MOV64_IMM(BPF_REG_3, 0),
7206 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7207 BPF_MOV64_IMM(BPF_REG_0, 0),
7208 BPF_EXIT_INSN(),
7209 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007210 .fixup_map_hash_48b = { 3 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08007211 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
7212 .result = REJECT,
7213 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7214 },
7215 {
7216 "helper access to variable memory: map adjusted, JMP, correct bounds",
7217 .insns = {
7218 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7219 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7220 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7221 BPF_LD_MAP_FD(BPF_REG_1, 0),
7222 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7223 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7224 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7225 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
7226 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7227 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7228 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7229 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7230 sizeof(struct test_val) - 20, 4),
7231 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02007232 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08007233 BPF_MOV64_IMM(BPF_REG_3, 0),
7234 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7235 BPF_MOV64_IMM(BPF_REG_0, 0),
7236 BPF_EXIT_INSN(),
7237 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007238 .fixup_map_hash_48b = { 3 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08007239 .result = ACCEPT,
7240 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7241 },
7242 {
7243 "helper access to variable memory: map adjusted, JMP, wrong max",
7244 .insns = {
7245 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7247 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7248 BPF_LD_MAP_FD(BPF_REG_1, 0),
7249 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7250 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7251 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7252 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
7253 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7254 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7255 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7256 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7257 sizeof(struct test_val) - 19, 4),
7258 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02007259 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08007260 BPF_MOV64_IMM(BPF_REG_3, 0),
7261 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7262 BPF_MOV64_IMM(BPF_REG_0, 0),
7263 BPF_EXIT_INSN(),
7264 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007265 .fixup_map_hash_48b = { 3 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08007266 .errstr = "R1 min value is outside of the array range",
7267 .result = REJECT,
7268 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7269 },
7270 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007271 "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Edward Creef65b1842017-08-07 15:27:12 +01007272 .insns = {
7273 BPF_MOV64_IMM(BPF_REG_1, 0),
7274 BPF_MOV64_IMM(BPF_REG_2, 0),
7275 BPF_MOV64_IMM(BPF_REG_3, 0),
7276 BPF_MOV64_IMM(BPF_REG_4, 0),
7277 BPF_MOV64_IMM(BPF_REG_5, 0),
7278 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7279 BPF_EXIT_INSN(),
7280 },
7281 .result = ACCEPT,
7282 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7283 },
7284 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007285 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08007286 .insns = {
7287 BPF_MOV64_IMM(BPF_REG_1, 0),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08007288 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01007289 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7290 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08007291 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7292 BPF_MOV64_IMM(BPF_REG_3, 0),
7293 BPF_MOV64_IMM(BPF_REG_4, 0),
7294 BPF_MOV64_IMM(BPF_REG_5, 0),
7295 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7296 BPF_EXIT_INSN(),
7297 },
Edward Creef65b1842017-08-07 15:27:12 +01007298 .errstr = "R1 type=inv expected=fp",
Gianluca Borello06c1c042017-01-09 10:19:49 -08007299 .result = REJECT,
7300 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7301 },
7302 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007303 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08007304 .insns = {
7305 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7306 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7307 BPF_MOV64_IMM(BPF_REG_2, 0),
7308 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7309 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
7310 BPF_MOV64_IMM(BPF_REG_3, 0),
7311 BPF_MOV64_IMM(BPF_REG_4, 0),
7312 BPF_MOV64_IMM(BPF_REG_5, 0),
7313 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7314 BPF_EXIT_INSN(),
7315 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08007316 .result = ACCEPT,
7317 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7318 },
7319 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007320 "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08007321 .insns = {
7322 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7323 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7324 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7325 BPF_LD_MAP_FD(BPF_REG_1, 0),
7326 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7327 BPF_FUNC_map_lookup_elem),
7328 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7329 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7330 BPF_MOV64_IMM(BPF_REG_2, 0),
7331 BPF_MOV64_IMM(BPF_REG_3, 0),
7332 BPF_MOV64_IMM(BPF_REG_4, 0),
7333 BPF_MOV64_IMM(BPF_REG_5, 0),
7334 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7335 BPF_EXIT_INSN(),
7336 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007337 .fixup_map_hash_8b = { 3 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08007338 .result = ACCEPT,
7339 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7340 },
7341 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007342 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08007343 .insns = {
7344 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7345 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7346 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7347 BPF_LD_MAP_FD(BPF_REG_1, 0),
7348 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7349 BPF_FUNC_map_lookup_elem),
7350 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7351 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7352 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
7353 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7354 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7355 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7356 BPF_MOV64_IMM(BPF_REG_3, 0),
7357 BPF_MOV64_IMM(BPF_REG_4, 0),
7358 BPF_MOV64_IMM(BPF_REG_5, 0),
7359 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7360 BPF_EXIT_INSN(),
7361 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007362 .fixup_map_hash_8b = { 3 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08007363 .result = ACCEPT,
7364 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7365 },
7366 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007367 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08007368 .insns = {
7369 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7370 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7371 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7372 BPF_LD_MAP_FD(BPF_REG_1, 0),
7373 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7374 BPF_FUNC_map_lookup_elem),
7375 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7376 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7377 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7378 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7379 BPF_MOV64_IMM(BPF_REG_3, 0),
7380 BPF_MOV64_IMM(BPF_REG_4, 0),
7381 BPF_MOV64_IMM(BPF_REG_5, 0),
7382 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7383 BPF_EXIT_INSN(),
7384 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007385 .fixup_map_hash_8b = { 3 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08007386 .result = ACCEPT,
7387 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7388 },
7389 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007390 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08007391 .insns = {
7392 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7393 offsetof(struct __sk_buff, data)),
7394 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7395 offsetof(struct __sk_buff, data_end)),
7396 BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
7397 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7398 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
7399 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
7400 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
7401 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7402 BPF_MOV64_IMM(BPF_REG_3, 0),
7403 BPF_MOV64_IMM(BPF_REG_4, 0),
7404 BPF_MOV64_IMM(BPF_REG_5, 0),
7405 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7406 BPF_EXIT_INSN(),
7407 },
7408 .result = ACCEPT,
Gianluca Borello06c1c042017-01-09 10:19:49 -08007409 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007410 .retval = 0 /* csum_diff of 64-byte packet */,
Gianluca Borello06c1c042017-01-09 10:19:49 -08007411 },
7412 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007413 "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7414 .insns = {
7415 BPF_MOV64_IMM(BPF_REG_1, 0),
7416 BPF_MOV64_IMM(BPF_REG_2, 0),
7417 BPF_MOV64_IMM(BPF_REG_3, 0),
7418 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7419 BPF_EXIT_INSN(),
7420 },
7421 .errstr = "R1 type=inv expected=fp",
7422 .result = REJECT,
7423 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7424 },
7425 {
7426 "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7427 .insns = {
7428 BPF_MOV64_IMM(BPF_REG_1, 0),
7429 BPF_MOV64_IMM(BPF_REG_2, 1),
7430 BPF_MOV64_IMM(BPF_REG_3, 0),
7431 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7432 BPF_EXIT_INSN(),
7433 },
7434 .errstr = "R1 type=inv expected=fp",
7435 .result = REJECT,
7436 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7437 },
7438 {
7439 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7440 .insns = {
7441 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7442 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7443 BPF_MOV64_IMM(BPF_REG_2, 0),
7444 BPF_MOV64_IMM(BPF_REG_3, 0),
7445 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7446 BPF_EXIT_INSN(),
7447 },
7448 .result = ACCEPT,
7449 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7450 },
7451 {
7452 "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7453 .insns = {
7454 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7455 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7457 BPF_LD_MAP_FD(BPF_REG_1, 0),
7458 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7459 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7460 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7461 BPF_MOV64_IMM(BPF_REG_2, 0),
7462 BPF_MOV64_IMM(BPF_REG_3, 0),
7463 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7464 BPF_EXIT_INSN(),
7465 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007466 .fixup_map_hash_8b = { 3 },
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007467 .result = ACCEPT,
7468 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7469 },
7470 {
7471 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7472 .insns = {
7473 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7474 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7475 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7476 BPF_LD_MAP_FD(BPF_REG_1, 0),
7477 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7478 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7479 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7480 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7481 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7482 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7483 BPF_MOV64_IMM(BPF_REG_3, 0),
7484 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7485 BPF_EXIT_INSN(),
7486 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007487 .fixup_map_hash_8b = { 3 },
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007488 .result = ACCEPT,
7489 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7490 },
7491 {
7492 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7493 .insns = {
7494 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7495 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7496 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7497 BPF_LD_MAP_FD(BPF_REG_1, 0),
7498 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7499 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7500 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7501 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7502 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
7503 BPF_MOV64_IMM(BPF_REG_3, 0),
7504 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7505 BPF_EXIT_INSN(),
7506 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007507 .fixup_map_hash_8b = { 3 },
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007508 .result = ACCEPT,
7509 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7510 },
7511 {
Gianluca Borello06c1c042017-01-09 10:19:49 -08007512 "helper access to variable memory: 8 bytes leak",
7513 .insns = {
7514 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7515 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7516 BPF_MOV64_IMM(BPF_REG_0, 0),
7517 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7518 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7519 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7520 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7521 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7522 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7523 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08007524 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01007525 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7526 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08007527 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
7528 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7529 BPF_MOV64_IMM(BPF_REG_3, 0),
7530 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7531 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7532 BPF_EXIT_INSN(),
7533 },
7534 .errstr = "invalid indirect read from stack off -64+32 size 64",
7535 .result = REJECT,
7536 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7537 },
7538 {
7539 "helper access to variable memory: 8 bytes no leak (init memory)",
7540 .insns = {
7541 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7542 BPF_MOV64_IMM(BPF_REG_0, 0),
7543 BPF_MOV64_IMM(BPF_REG_0, 0),
7544 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7545 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7546 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7547 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7548 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7549 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7550 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7551 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7552 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7553 BPF_MOV64_IMM(BPF_REG_2, 0),
7554 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
7555 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
7556 BPF_MOV64_IMM(BPF_REG_3, 0),
7557 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7558 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7559 BPF_EXIT_INSN(),
7560 },
7561 .result = ACCEPT,
7562 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7563 },
Josef Bacik29200c12017-02-03 16:25:23 -05007564 {
7565 "invalid and of negative number",
7566 .insns = {
7567 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7568 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7570 BPF_LD_MAP_FD(BPF_REG_1, 0),
7571 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7572 BPF_FUNC_map_lookup_elem),
7573 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Edward Creef65b1842017-08-07 15:27:12 +01007574 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik29200c12017-02-03 16:25:23 -05007575 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
7576 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
7577 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7578 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7579 offsetof(struct test_val, foo)),
7580 BPF_EXIT_INSN(),
7581 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007582 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01007583 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05007584 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02007585 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik29200c12017-02-03 16:25:23 -05007586 },
7587 {
7588 "invalid range check",
7589 .insns = {
7590 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7591 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7592 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7593 BPF_LD_MAP_FD(BPF_REG_1, 0),
7594 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7595 BPF_FUNC_map_lookup_elem),
7596 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
7597 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7598 BPF_MOV64_IMM(BPF_REG_9, 1),
7599 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
7600 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
7601 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
7602 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
7603 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
7604 BPF_MOV32_IMM(BPF_REG_3, 1),
7605 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
7606 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
7607 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
7608 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
7609 BPF_MOV64_REG(BPF_REG_0, 0),
7610 BPF_EXIT_INSN(),
7611 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007612 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01007613 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05007614 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02007615 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07007616 },
7617 {
7618 "map in map access",
7619 .insns = {
7620 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7621 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7622 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7623 BPF_LD_MAP_FD(BPF_REG_1, 0),
7624 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7625 BPF_FUNC_map_lookup_elem),
7626 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7627 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7628 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7629 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7630 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7631 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7632 BPF_FUNC_map_lookup_elem),
Roman Gushchin0069fb82018-08-02 15:47:10 -07007633 BPF_MOV64_IMM(BPF_REG_0, 0),
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07007634 BPF_EXIT_INSN(),
7635 },
7636 .fixup_map_in_map = { 3 },
7637 .result = ACCEPT,
7638 },
7639 {
7640 "invalid inner map pointer",
7641 .insns = {
7642 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7643 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7644 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7645 BPF_LD_MAP_FD(BPF_REG_1, 0),
7646 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7647 BPF_FUNC_map_lookup_elem),
7648 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7649 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7650 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7651 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7652 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7653 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7654 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7655 BPF_FUNC_map_lookup_elem),
Roman Gushchin0069fb82018-08-02 15:47:10 -07007656 BPF_MOV64_IMM(BPF_REG_0, 0),
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07007657 BPF_EXIT_INSN(),
7658 },
7659 .fixup_map_in_map = { 3 },
Joe Stringeraad2eea2018-10-02 13:35:30 -07007660 .errstr = "R1 pointer arithmetic on map_ptr prohibited",
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07007661 .result = REJECT,
7662 },
7663 {
7664 "forgot null checking on the inner map pointer",
7665 .insns = {
7666 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7667 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7668 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7669 BPF_LD_MAP_FD(BPF_REG_1, 0),
7670 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7671 BPF_FUNC_map_lookup_elem),
7672 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7673 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7674 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7675 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7676 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7677 BPF_FUNC_map_lookup_elem),
Roman Gushchin0069fb82018-08-02 15:47:10 -07007678 BPF_MOV64_IMM(BPF_REG_0, 0),
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07007679 BPF_EXIT_INSN(),
7680 },
7681 .fixup_map_in_map = { 3 },
7682 .errstr = "R1 type=map_value_or_null expected=map_ptr",
7683 .result = REJECT,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02007684 },
7685 {
7686 "ld_abs: check calling conv, r1",
7687 .insns = {
7688 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7689 BPF_MOV64_IMM(BPF_REG_1, 0),
7690 BPF_LD_ABS(BPF_W, -0x200000),
7691 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
7692 BPF_EXIT_INSN(),
7693 },
7694 .errstr = "R1 !read_ok",
7695 .result = REJECT,
7696 },
7697 {
7698 "ld_abs: check calling conv, r2",
7699 .insns = {
7700 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7701 BPF_MOV64_IMM(BPF_REG_2, 0),
7702 BPF_LD_ABS(BPF_W, -0x200000),
7703 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7704 BPF_EXIT_INSN(),
7705 },
7706 .errstr = "R2 !read_ok",
7707 .result = REJECT,
7708 },
7709 {
7710 "ld_abs: check calling conv, r3",
7711 .insns = {
7712 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7713 BPF_MOV64_IMM(BPF_REG_3, 0),
7714 BPF_LD_ABS(BPF_W, -0x200000),
7715 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7716 BPF_EXIT_INSN(),
7717 },
7718 .errstr = "R3 !read_ok",
7719 .result = REJECT,
7720 },
7721 {
7722 "ld_abs: check calling conv, r4",
7723 .insns = {
7724 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7725 BPF_MOV64_IMM(BPF_REG_4, 0),
7726 BPF_LD_ABS(BPF_W, -0x200000),
7727 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7728 BPF_EXIT_INSN(),
7729 },
7730 .errstr = "R4 !read_ok",
7731 .result = REJECT,
7732 },
7733 {
7734 "ld_abs: check calling conv, r5",
7735 .insns = {
7736 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7737 BPF_MOV64_IMM(BPF_REG_5, 0),
7738 BPF_LD_ABS(BPF_W, -0x200000),
7739 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
7740 BPF_EXIT_INSN(),
7741 },
7742 .errstr = "R5 !read_ok",
7743 .result = REJECT,
7744 },
7745 {
7746 "ld_abs: check calling conv, r7",
7747 .insns = {
7748 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7749 BPF_MOV64_IMM(BPF_REG_7, 0),
7750 BPF_LD_ABS(BPF_W, -0x200000),
7751 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
7752 BPF_EXIT_INSN(),
7753 },
7754 .result = ACCEPT,
7755 },
7756 {
Daniel Borkmann87ab8192017-12-14 21:07:27 +01007757 "ld_abs: tests on r6 and skb data reload helper",
7758 .insns = {
7759 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7760 BPF_LD_ABS(BPF_B, 0),
7761 BPF_LD_ABS(BPF_H, 0),
7762 BPF_LD_ABS(BPF_W, 0),
7763 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
7764 BPF_MOV64_IMM(BPF_REG_6, 0),
7765 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
7766 BPF_MOV64_IMM(BPF_REG_2, 1),
7767 BPF_MOV64_IMM(BPF_REG_3, 2),
7768 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7769 BPF_FUNC_skb_vlan_push),
7770 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
7771 BPF_LD_ABS(BPF_B, 0),
7772 BPF_LD_ABS(BPF_H, 0),
7773 BPF_LD_ABS(BPF_W, 0),
7774 BPF_MOV64_IMM(BPF_REG_0, 42),
7775 BPF_EXIT_INSN(),
7776 },
7777 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7778 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007779 .retval = 42 /* ultimate return value */,
Daniel Borkmann87ab8192017-12-14 21:07:27 +01007780 },
7781 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02007782 "ld_ind: check calling conv, r1",
7783 .insns = {
7784 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7785 BPF_MOV64_IMM(BPF_REG_1, 1),
7786 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
7787 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
7788 BPF_EXIT_INSN(),
7789 },
7790 .errstr = "R1 !read_ok",
7791 .result = REJECT,
7792 },
7793 {
7794 "ld_ind: check calling conv, r2",
7795 .insns = {
7796 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7797 BPF_MOV64_IMM(BPF_REG_2, 1),
7798 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
7799 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7800 BPF_EXIT_INSN(),
7801 },
7802 .errstr = "R2 !read_ok",
7803 .result = REJECT,
7804 },
7805 {
7806 "ld_ind: check calling conv, r3",
7807 .insns = {
7808 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7809 BPF_MOV64_IMM(BPF_REG_3, 1),
7810 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
7811 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7812 BPF_EXIT_INSN(),
7813 },
7814 .errstr = "R3 !read_ok",
7815 .result = REJECT,
7816 },
7817 {
7818 "ld_ind: check calling conv, r4",
7819 .insns = {
7820 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7821 BPF_MOV64_IMM(BPF_REG_4, 1),
7822 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
7823 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7824 BPF_EXIT_INSN(),
7825 },
7826 .errstr = "R4 !read_ok",
7827 .result = REJECT,
7828 },
7829 {
7830 "ld_ind: check calling conv, r5",
7831 .insns = {
7832 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7833 BPF_MOV64_IMM(BPF_REG_5, 1),
7834 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
7835 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
7836 BPF_EXIT_INSN(),
7837 },
7838 .errstr = "R5 !read_ok",
7839 .result = REJECT,
7840 },
7841 {
7842 "ld_ind: check calling conv, r7",
7843 .insns = {
7844 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7845 BPF_MOV64_IMM(BPF_REG_7, 1),
7846 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
7847 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
7848 BPF_EXIT_INSN(),
7849 },
7850 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007851 .retval = 1,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02007852 },
Yonghong Song18f3d6b2017-06-13 15:52:14 -07007853 {
7854 "check bpf_perf_event_data->sample_period byte load permitted",
7855 .insns = {
7856 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02007857#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07007858 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
7859 offsetof(struct bpf_perf_event_data, sample_period)),
7860#else
7861 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
7862 offsetof(struct bpf_perf_event_data, sample_period) + 7),
7863#endif
7864 BPF_EXIT_INSN(),
7865 },
7866 .result = ACCEPT,
7867 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7868 },
7869 {
7870 "check bpf_perf_event_data->sample_period half load permitted",
7871 .insns = {
7872 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02007873#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07007874 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7875 offsetof(struct bpf_perf_event_data, sample_period)),
7876#else
7877 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7878 offsetof(struct bpf_perf_event_data, sample_period) + 6),
7879#endif
7880 BPF_EXIT_INSN(),
7881 },
7882 .result = ACCEPT,
7883 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7884 },
7885 {
7886 "check bpf_perf_event_data->sample_period word load permitted",
7887 .insns = {
7888 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02007889#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07007890 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7891 offsetof(struct bpf_perf_event_data, sample_period)),
7892#else
7893 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7894 offsetof(struct bpf_perf_event_data, sample_period) + 4),
7895#endif
7896 BPF_EXIT_INSN(),
7897 },
7898 .result = ACCEPT,
7899 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7900 },
7901 {
7902 "check bpf_perf_event_data->sample_period dword load permitted",
7903 .insns = {
7904 BPF_MOV64_IMM(BPF_REG_0, 0),
7905 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
7906 offsetof(struct bpf_perf_event_data, sample_period)),
7907 BPF_EXIT_INSN(),
7908 },
7909 .result = ACCEPT,
7910 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7911 },
7912 {
7913 "check skb->data half load not permitted",
7914 .insns = {
7915 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02007916#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07007917 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7918 offsetof(struct __sk_buff, data)),
7919#else
7920 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7921 offsetof(struct __sk_buff, data) + 2),
7922#endif
7923 BPF_EXIT_INSN(),
7924 },
7925 .result = REJECT,
7926 .errstr = "invalid bpf_context access",
7927 },
7928 {
7929 "check skb->tc_classid half load not permitted for lwt prog",
7930 .insns = {
7931 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02007932#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07007933 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7934 offsetof(struct __sk_buff, tc_classid)),
7935#else
7936 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7937 offsetof(struct __sk_buff, tc_classid) + 2),
7938#endif
7939 BPF_EXIT_INSN(),
7940 },
7941 .result = REJECT,
7942 .errstr = "invalid bpf_context access",
7943 .prog_type = BPF_PROG_TYPE_LWT_IN,
7944 },
Edward Creeb7122962017-07-21 00:00:24 +02007945 {
7946 "bounds checks mixing signed and unsigned, positive bounds",
7947 .insns = {
7948 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7949 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7950 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7951 BPF_LD_MAP_FD(BPF_REG_1, 0),
7952 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7953 BPF_FUNC_map_lookup_elem),
7954 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7955 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7956 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7957 BPF_MOV64_IMM(BPF_REG_2, 2),
7958 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
7959 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
7960 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7961 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7962 BPF_MOV64_IMM(BPF_REG_0, 0),
7963 BPF_EXIT_INSN(),
7964 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007965 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007966 .errstr = "unbounded min value",
Edward Creeb7122962017-07-21 00:00:24 +02007967 .result = REJECT,
Edward Creeb7122962017-07-21 00:00:24 +02007968 },
7969 {
7970 "bounds checks mixing signed and unsigned",
7971 .insns = {
7972 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7973 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7974 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7975 BPF_LD_MAP_FD(BPF_REG_1, 0),
7976 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7977 BPF_FUNC_map_lookup_elem),
7978 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7979 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7980 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7981 BPF_MOV64_IMM(BPF_REG_2, -1),
7982 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
7983 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7984 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7985 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7986 BPF_MOV64_IMM(BPF_REG_0, 0),
7987 BPF_EXIT_INSN(),
7988 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007989 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007990 .errstr = "unbounded min value",
Edward Creeb7122962017-07-21 00:00:24 +02007991 .result = REJECT,
Edward Creeb7122962017-07-21 00:00:24 +02007992 },
Daniel Borkmann86412502017-07-21 00:00:25 +02007993 {
7994 "bounds checks mixing signed and unsigned, variant 2",
7995 .insns = {
7996 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7997 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7998 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7999 BPF_LD_MAP_FD(BPF_REG_1, 0),
8000 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8001 BPF_FUNC_map_lookup_elem),
8002 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8003 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8004 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8005 BPF_MOV64_IMM(BPF_REG_2, -1),
8006 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
8007 BPF_MOV64_IMM(BPF_REG_8, 0),
8008 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
8009 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
8010 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
8011 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
8012 BPF_MOV64_IMM(BPF_REG_0, 0),
8013 BPF_EXIT_INSN(),
8014 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008015 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008016 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02008017 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02008018 },
8019 {
8020 "bounds checks mixing signed and unsigned, variant 3",
8021 .insns = {
8022 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8023 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8024 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8025 BPF_LD_MAP_FD(BPF_REG_1, 0),
8026 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8027 BPF_FUNC_map_lookup_elem),
8028 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8029 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8030 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8031 BPF_MOV64_IMM(BPF_REG_2, -1),
8032 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
8033 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
8034 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
8035 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
8036 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
8037 BPF_MOV64_IMM(BPF_REG_0, 0),
8038 BPF_EXIT_INSN(),
8039 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008040 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008041 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02008042 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02008043 },
8044 {
8045 "bounds checks mixing signed and unsigned, variant 4",
8046 .insns = {
8047 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8048 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8049 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8050 BPF_LD_MAP_FD(BPF_REG_1, 0),
8051 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8052 BPF_FUNC_map_lookup_elem),
8053 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8054 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8055 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8056 BPF_MOV64_IMM(BPF_REG_2, 1),
8057 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
8058 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8059 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8060 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8061 BPF_MOV64_IMM(BPF_REG_0, 0),
8062 BPF_EXIT_INSN(),
8063 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008064 .fixup_map_hash_8b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01008065 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02008066 },
8067 {
8068 "bounds checks mixing signed and unsigned, variant 5",
8069 .insns = {
8070 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8071 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8072 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8073 BPF_LD_MAP_FD(BPF_REG_1, 0),
8074 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8075 BPF_FUNC_map_lookup_elem),
8076 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8077 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8078 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8079 BPF_MOV64_IMM(BPF_REG_2, -1),
8080 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
8081 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
8082 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
8083 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8084 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8085 BPF_MOV64_IMM(BPF_REG_0, 0),
8086 BPF_EXIT_INSN(),
8087 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008088 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008089 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02008090 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02008091 },
8092 {
8093 "bounds checks mixing signed and unsigned, variant 6",
8094 .insns = {
8095 BPF_MOV64_IMM(BPF_REG_2, 0),
8096 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
8097 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
8098 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8099 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
8100 BPF_MOV64_IMM(BPF_REG_6, -1),
8101 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
8102 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
8103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
8104 BPF_MOV64_IMM(BPF_REG_5, 0),
8105 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
8106 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8107 BPF_FUNC_skb_load_bytes),
8108 BPF_MOV64_IMM(BPF_REG_0, 0),
8109 BPF_EXIT_INSN(),
8110 },
Daniel Borkmann86412502017-07-21 00:00:25 +02008111 .errstr = "R4 min value is negative, either use unsigned",
8112 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02008113 },
8114 {
8115 "bounds checks mixing signed and unsigned, variant 7",
8116 .insns = {
8117 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8118 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8120 BPF_LD_MAP_FD(BPF_REG_1, 0),
8121 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8122 BPF_FUNC_map_lookup_elem),
8123 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8124 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8125 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8126 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
8127 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
8128 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8129 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8130 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8131 BPF_MOV64_IMM(BPF_REG_0, 0),
8132 BPF_EXIT_INSN(),
8133 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008134 .fixup_map_hash_8b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01008135 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02008136 },
8137 {
8138 "bounds checks mixing signed and unsigned, variant 8",
8139 .insns = {
8140 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8141 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8142 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8143 BPF_LD_MAP_FD(BPF_REG_1, 0),
8144 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8145 BPF_FUNC_map_lookup_elem),
Daniel Borkmann86412502017-07-21 00:00:25 +02008146 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8147 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8148 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8149 BPF_MOV64_IMM(BPF_REG_2, -1),
8150 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8151 BPF_MOV64_IMM(BPF_REG_0, 0),
8152 BPF_EXIT_INSN(),
8153 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8154 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8155 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8156 BPF_MOV64_IMM(BPF_REG_0, 0),
8157 BPF_EXIT_INSN(),
8158 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008159 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008160 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02008161 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02008162 },
8163 {
Edward Creef65b1842017-08-07 15:27:12 +01008164 "bounds checks mixing signed and unsigned, variant 9",
Daniel Borkmann86412502017-07-21 00:00:25 +02008165 .insns = {
8166 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8167 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8168 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8169 BPF_LD_MAP_FD(BPF_REG_1, 0),
8170 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8171 BPF_FUNC_map_lookup_elem),
8172 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
8173 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8174 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8175 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
8176 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8177 BPF_MOV64_IMM(BPF_REG_0, 0),
8178 BPF_EXIT_INSN(),
8179 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8180 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8181 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8182 BPF_MOV64_IMM(BPF_REG_0, 0),
8183 BPF_EXIT_INSN(),
8184 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008185 .fixup_map_hash_8b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01008186 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02008187 },
8188 {
Edward Creef65b1842017-08-07 15:27:12 +01008189 "bounds checks mixing signed and unsigned, variant 10",
Daniel Borkmann86412502017-07-21 00:00:25 +02008190 .insns = {
8191 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8192 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8193 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8194 BPF_LD_MAP_FD(BPF_REG_1, 0),
8195 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8196 BPF_FUNC_map_lookup_elem),
8197 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8198 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8199 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8200 BPF_MOV64_IMM(BPF_REG_2, 0),
8201 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8202 BPF_MOV64_IMM(BPF_REG_0, 0),
8203 BPF_EXIT_INSN(),
8204 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8205 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8206 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8207 BPF_MOV64_IMM(BPF_REG_0, 0),
8208 BPF_EXIT_INSN(),
8209 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008210 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008211 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02008212 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02008213 },
8214 {
Edward Creef65b1842017-08-07 15:27:12 +01008215 "bounds checks mixing signed and unsigned, variant 11",
Daniel Borkmann86412502017-07-21 00:00:25 +02008216 .insns = {
8217 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8218 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8219 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8220 BPF_LD_MAP_FD(BPF_REG_1, 0),
8221 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8222 BPF_FUNC_map_lookup_elem),
8223 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8224 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8225 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8226 BPF_MOV64_IMM(BPF_REG_2, -1),
8227 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8228 /* Dead branch. */
8229 BPF_MOV64_IMM(BPF_REG_0, 0),
8230 BPF_EXIT_INSN(),
8231 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8232 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8233 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8234 BPF_MOV64_IMM(BPF_REG_0, 0),
8235 BPF_EXIT_INSN(),
8236 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008237 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008238 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02008239 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02008240 },
8241 {
Edward Creef65b1842017-08-07 15:27:12 +01008242 "bounds checks mixing signed and unsigned, variant 12",
Daniel Borkmann86412502017-07-21 00:00:25 +02008243 .insns = {
8244 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8245 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8247 BPF_LD_MAP_FD(BPF_REG_1, 0),
8248 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8249 BPF_FUNC_map_lookup_elem),
8250 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8251 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8252 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8253 BPF_MOV64_IMM(BPF_REG_2, -6),
8254 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8255 BPF_MOV64_IMM(BPF_REG_0, 0),
8256 BPF_EXIT_INSN(),
8257 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8258 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8259 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8260 BPF_MOV64_IMM(BPF_REG_0, 0),
8261 BPF_EXIT_INSN(),
8262 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008263 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008264 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02008265 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02008266 },
8267 {
Edward Creef65b1842017-08-07 15:27:12 +01008268 "bounds checks mixing signed and unsigned, variant 13",
Daniel Borkmann86412502017-07-21 00:00:25 +02008269 .insns = {
8270 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8273 BPF_LD_MAP_FD(BPF_REG_1, 0),
8274 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8275 BPF_FUNC_map_lookup_elem),
8276 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8277 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8278 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8279 BPF_MOV64_IMM(BPF_REG_2, 2),
8280 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8281 BPF_MOV64_IMM(BPF_REG_7, 1),
8282 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
8283 BPF_MOV64_IMM(BPF_REG_0, 0),
8284 BPF_EXIT_INSN(),
8285 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
8286 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
8287 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
8288 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8289 BPF_MOV64_IMM(BPF_REG_0, 0),
8290 BPF_EXIT_INSN(),
8291 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008292 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008293 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02008294 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02008295 },
8296 {
Edward Creef65b1842017-08-07 15:27:12 +01008297 "bounds checks mixing signed and unsigned, variant 14",
Daniel Borkmann86412502017-07-21 00:00:25 +02008298 .insns = {
8299 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
8300 offsetof(struct __sk_buff, mark)),
8301 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8302 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8303 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8304 BPF_LD_MAP_FD(BPF_REG_1, 0),
8305 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8306 BPF_FUNC_map_lookup_elem),
8307 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8308 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8309 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8310 BPF_MOV64_IMM(BPF_REG_2, -1),
8311 BPF_MOV64_IMM(BPF_REG_8, 2),
8312 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
8313 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
8314 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8315 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8316 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8317 BPF_MOV64_IMM(BPF_REG_0, 0),
8318 BPF_EXIT_INSN(),
8319 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
8320 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
8321 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008322 .fixup_map_hash_8b = { 4 },
Daniel Borkmann6f161012018-01-18 01:15:21 +01008323 .errstr = "R0 invalid mem access 'inv'",
Daniel Borkmann86412502017-07-21 00:00:25 +02008324 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02008325 },
8326 {
Edward Creef65b1842017-08-07 15:27:12 +01008327 "bounds checks mixing signed and unsigned, variant 15",
Daniel Borkmann86412502017-07-21 00:00:25 +02008328 .insns = {
8329 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8330 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8332 BPF_LD_MAP_FD(BPF_REG_1, 0),
8333 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8334 BPF_FUNC_map_lookup_elem),
8335 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8336 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8337 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8338 BPF_MOV64_IMM(BPF_REG_2, -6),
8339 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8340 BPF_MOV64_IMM(BPF_REG_0, 0),
8341 BPF_EXIT_INSN(),
8342 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8343 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
8344 BPF_MOV64_IMM(BPF_REG_0, 0),
8345 BPF_EXIT_INSN(),
8346 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8347 BPF_MOV64_IMM(BPF_REG_0, 0),
8348 BPF_EXIT_INSN(),
8349 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008350 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008351 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02008352 .result = REJECT,
8353 .result_unpriv = REJECT,
8354 },
Edward Cree545722c2017-07-21 14:36:57 +01008355 {
Edward Creef65b1842017-08-07 15:27:12 +01008356 "subtraction bounds (map value) variant 1",
Edward Cree545722c2017-07-21 14:36:57 +01008357 .insns = {
8358 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8359 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8361 BPF_LD_MAP_FD(BPF_REG_1, 0),
8362 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8363 BPF_FUNC_map_lookup_elem),
8364 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8365 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8366 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
8367 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8368 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
8369 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8370 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
8371 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8372 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8373 BPF_EXIT_INSN(),
8374 BPF_MOV64_IMM(BPF_REG_0, 0),
8375 BPF_EXIT_INSN(),
8376 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008377 .fixup_map_hash_8b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01008378 .errstr = "R0 max value is outside of the array range",
8379 .result = REJECT,
8380 },
8381 {
8382 "subtraction bounds (map value) variant 2",
8383 .insns = {
8384 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8385 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8386 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8387 BPF_LD_MAP_FD(BPF_REG_1, 0),
8388 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8389 BPF_FUNC_map_lookup_elem),
8390 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8391 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8392 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
8393 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8394 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
8395 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8396 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8397 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8398 BPF_EXIT_INSN(),
8399 BPF_MOV64_IMM(BPF_REG_0, 0),
8400 BPF_EXIT_INSN(),
8401 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008402 .fixup_map_hash_8b = { 3 },
Edward Cree545722c2017-07-21 14:36:57 +01008403 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
8404 .result = REJECT,
Edward Cree545722c2017-07-21 14:36:57 +01008405 },
Edward Cree69c4e8a2017-08-07 15:29:51 +01008406 {
Jann Horn2255f8d2017-12-18 20:12:01 -08008407 "bounds check based on zero-extended MOV",
8408 .insns = {
8409 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8410 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8411 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8412 BPF_LD_MAP_FD(BPF_REG_1, 0),
8413 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8414 BPF_FUNC_map_lookup_elem),
8415 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8416 /* r2 = 0x0000'0000'ffff'ffff */
8417 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
8418 /* r2 = 0 */
8419 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8420 /* no-op */
8421 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8422 /* access at offset 0 */
8423 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8424 /* exit */
8425 BPF_MOV64_IMM(BPF_REG_0, 0),
8426 BPF_EXIT_INSN(),
8427 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008428 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008429 .result = ACCEPT
8430 },
8431 {
8432 "bounds check based on sign-extended MOV. test1",
8433 .insns = {
8434 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8435 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8436 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8437 BPF_LD_MAP_FD(BPF_REG_1, 0),
8438 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8439 BPF_FUNC_map_lookup_elem),
8440 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8441 /* r2 = 0xffff'ffff'ffff'ffff */
8442 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8443 /* r2 = 0xffff'ffff */
8444 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8445 /* r0 = <oob pointer> */
8446 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8447 /* access to OOB pointer */
8448 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8449 /* exit */
8450 BPF_MOV64_IMM(BPF_REG_0, 0),
8451 BPF_EXIT_INSN(),
8452 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008453 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008454 .errstr = "map_value pointer and 4294967295",
8455 .result = REJECT
8456 },
8457 {
8458 "bounds check based on sign-extended MOV. test2",
8459 .insns = {
8460 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8461 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8462 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8463 BPF_LD_MAP_FD(BPF_REG_1, 0),
8464 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8465 BPF_FUNC_map_lookup_elem),
8466 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8467 /* r2 = 0xffff'ffff'ffff'ffff */
8468 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8469 /* r2 = 0xfff'ffff */
8470 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
8471 /* r0 = <oob pointer> */
8472 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8473 /* access to OOB pointer */
8474 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8475 /* exit */
8476 BPF_MOV64_IMM(BPF_REG_0, 0),
8477 BPF_EXIT_INSN(),
8478 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008479 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008480 .errstr = "R0 min value is outside of the array range",
8481 .result = REJECT
8482 },
8483 {
8484 "bounds check based on reg_off + var_off + insn_off. test1",
8485 .insns = {
8486 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8487 offsetof(struct __sk_buff, mark)),
8488 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8489 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8490 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8491 BPF_LD_MAP_FD(BPF_REG_1, 0),
8492 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8493 BPF_FUNC_map_lookup_elem),
8494 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8495 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8496 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
8497 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8498 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8499 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8500 BPF_MOV64_IMM(BPF_REG_0, 0),
8501 BPF_EXIT_INSN(),
8502 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008503 .fixup_map_hash_8b = { 4 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008504 .errstr = "value_size=8 off=1073741825",
8505 .result = REJECT,
8506 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8507 },
8508 {
8509 "bounds check based on reg_off + var_off + insn_off. test2",
8510 .insns = {
8511 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8512 offsetof(struct __sk_buff, mark)),
8513 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8514 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8515 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8516 BPF_LD_MAP_FD(BPF_REG_1, 0),
8517 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8518 BPF_FUNC_map_lookup_elem),
8519 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8520 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
8522 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8523 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8524 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8525 BPF_MOV64_IMM(BPF_REG_0, 0),
8526 BPF_EXIT_INSN(),
8527 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008528 .fixup_map_hash_8b = { 4 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008529 .errstr = "value 1073741823",
8530 .result = REJECT,
8531 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8532 },
8533 {
8534 "bounds check after truncation of non-boundary-crossing range",
8535 .insns = {
8536 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8537 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8538 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8539 BPF_LD_MAP_FD(BPF_REG_1, 0),
8540 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8541 BPF_FUNC_map_lookup_elem),
8542 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8543 /* r1 = [0x00, 0xff] */
8544 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8545 BPF_MOV64_IMM(BPF_REG_2, 1),
8546 /* r2 = 0x10'0000'0000 */
8547 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
8548 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
8549 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8550 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
8551 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8552 /* r1 = [0x00, 0xff] */
8553 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
8554 /* r1 = 0 */
8555 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8556 /* no-op */
8557 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8558 /* access at offset 0 */
8559 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8560 /* exit */
8561 BPF_MOV64_IMM(BPF_REG_0, 0),
8562 BPF_EXIT_INSN(),
8563 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008564 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008565 .result = ACCEPT
8566 },
8567 {
8568 "bounds check after truncation of boundary-crossing range (1)",
8569 .insns = {
8570 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8571 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8572 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8573 BPF_LD_MAP_FD(BPF_REG_1, 0),
8574 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8575 BPF_FUNC_map_lookup_elem),
8576 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8577 /* r1 = [0x00, 0xff] */
8578 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8579 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8580 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
8581 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8582 /* r1 = [0xffff'ff80, 0xffff'ffff] or
8583 * [0x0000'0000, 0x0000'007f]
8584 */
8585 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
8586 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8587 /* r1 = [0x00, 0xff] or
8588 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8589 */
8590 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8591 /* r1 = 0 or
8592 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8593 */
8594 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8595 /* no-op or OOB pointer computation */
8596 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8597 /* potentially OOB access */
8598 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8599 /* exit */
8600 BPF_MOV64_IMM(BPF_REG_0, 0),
8601 BPF_EXIT_INSN(),
8602 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008603 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008604 /* not actually fully unbounded, but the bound is very high */
8605 .errstr = "R0 unbounded memory access",
8606 .result = REJECT
8607 },
8608 {
8609 "bounds check after truncation of boundary-crossing range (2)",
8610 .insns = {
8611 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8612 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8613 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8614 BPF_LD_MAP_FD(BPF_REG_1, 0),
8615 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8616 BPF_FUNC_map_lookup_elem),
8617 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8618 /* r1 = [0x00, 0xff] */
8619 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8620 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8621 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
8622 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8623 /* r1 = [0xffff'ff80, 0xffff'ffff] or
8624 * [0x0000'0000, 0x0000'007f]
8625 * difference to previous test: truncation via MOV32
8626 * instead of ALU32.
8627 */
8628 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
8629 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8630 /* r1 = [0x00, 0xff] or
8631 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8632 */
8633 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8634 /* r1 = 0 or
8635 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8636 */
8637 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8638 /* no-op or OOB pointer computation */
8639 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8640 /* potentially OOB access */
8641 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8642 /* exit */
8643 BPF_MOV64_IMM(BPF_REG_0, 0),
8644 BPF_EXIT_INSN(),
8645 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008646 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008647 /* not actually fully unbounded, but the bound is very high */
8648 .errstr = "R0 unbounded memory access",
8649 .result = REJECT
8650 },
8651 {
8652 "bounds check after wrapping 32-bit addition",
8653 .insns = {
8654 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8655 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8656 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8657 BPF_LD_MAP_FD(BPF_REG_1, 0),
8658 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8659 BPF_FUNC_map_lookup_elem),
8660 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
8661 /* r1 = 0x7fff'ffff */
8662 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
8663 /* r1 = 0xffff'fffe */
8664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8665 /* r1 = 0 */
8666 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
8667 /* no-op */
8668 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8669 /* access at offset 0 */
8670 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8671 /* exit */
8672 BPF_MOV64_IMM(BPF_REG_0, 0),
8673 BPF_EXIT_INSN(),
8674 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008675 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008676 .result = ACCEPT
8677 },
8678 {
8679 "bounds check after shift with oversized count operand",
8680 .insns = {
8681 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8682 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8683 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8684 BPF_LD_MAP_FD(BPF_REG_1, 0),
8685 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8686 BPF_FUNC_map_lookup_elem),
8687 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8688 BPF_MOV64_IMM(BPF_REG_2, 32),
8689 BPF_MOV64_IMM(BPF_REG_1, 1),
8690 /* r1 = (u32)1 << (u32)32 = ? */
8691 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
8692 /* r1 = [0x0000, 0xffff] */
8693 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
8694 /* computes unknown pointer, potentially OOB */
8695 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8696 /* potentially OOB access */
8697 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8698 /* exit */
8699 BPF_MOV64_IMM(BPF_REG_0, 0),
8700 BPF_EXIT_INSN(),
8701 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008702 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008703 .errstr = "R0 max value is outside of the array range",
8704 .result = REJECT
8705 },
8706 {
8707 "bounds check after right shift of maybe-negative number",
8708 .insns = {
8709 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8710 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8711 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8712 BPF_LD_MAP_FD(BPF_REG_1, 0),
8713 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8714 BPF_FUNC_map_lookup_elem),
8715 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8716 /* r1 = [0x00, 0xff] */
8717 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8718 /* r1 = [-0x01, 0xfe] */
8719 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
8720 /* r1 = 0 or 0xff'ffff'ffff'ffff */
8721 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8722 /* r1 = 0 or 0xffff'ffff'ffff */
8723 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8724 /* computes unknown pointer, potentially OOB */
8725 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8726 /* potentially OOB access */
8727 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8728 /* exit */
8729 BPF_MOV64_IMM(BPF_REG_0, 0),
8730 BPF_EXIT_INSN(),
8731 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008732 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008733 .errstr = "R0 unbounded memory access",
8734 .result = REJECT
8735 },
8736 {
8737 "bounds check map access with off+size signed 32bit overflow. test1",
8738 .insns = {
8739 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8740 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8741 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8742 BPF_LD_MAP_FD(BPF_REG_1, 0),
8743 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8744 BPF_FUNC_map_lookup_elem),
8745 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8746 BPF_EXIT_INSN(),
8747 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
8748 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8749 BPF_JMP_A(0),
8750 BPF_EXIT_INSN(),
8751 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008752 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008753 .errstr = "map_value pointer and 2147483646",
8754 .result = REJECT
8755 },
8756 {
8757 "bounds check map access with off+size signed 32bit overflow. test2",
8758 .insns = {
8759 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8760 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8762 BPF_LD_MAP_FD(BPF_REG_1, 0),
8763 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8764 BPF_FUNC_map_lookup_elem),
8765 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8766 BPF_EXIT_INSN(),
8767 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8768 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8769 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8770 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8771 BPF_JMP_A(0),
8772 BPF_EXIT_INSN(),
8773 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008774 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008775 .errstr = "pointer offset 1073741822",
8776 .result = REJECT
8777 },
8778 {
8779 "bounds check map access with off+size signed 32bit overflow. test3",
8780 .insns = {
8781 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8782 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8783 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8784 BPF_LD_MAP_FD(BPF_REG_1, 0),
8785 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8786 BPF_FUNC_map_lookup_elem),
8787 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8788 BPF_EXIT_INSN(),
8789 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
8790 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
8791 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
8792 BPF_JMP_A(0),
8793 BPF_EXIT_INSN(),
8794 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008795 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008796 .errstr = "pointer offset -1073741822",
8797 .result = REJECT
8798 },
8799 {
8800 "bounds check map access with off+size signed 32bit overflow. test4",
8801 .insns = {
8802 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8803 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8804 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8805 BPF_LD_MAP_FD(BPF_REG_1, 0),
8806 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8807 BPF_FUNC_map_lookup_elem),
8808 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8809 BPF_EXIT_INSN(),
8810 BPF_MOV64_IMM(BPF_REG_1, 1000000),
8811 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
8812 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8813 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
8814 BPF_JMP_A(0),
8815 BPF_EXIT_INSN(),
8816 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008817 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008818 .errstr = "map_value pointer and 1000000000000",
8819 .result = REJECT
8820 },
8821 {
8822 "pointer/scalar confusion in state equality check (way 1)",
8823 .insns = {
8824 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8825 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8826 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8827 BPF_LD_MAP_FD(BPF_REG_1, 0),
8828 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8829 BPF_FUNC_map_lookup_elem),
8830 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
8831 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8832 BPF_JMP_A(1),
8833 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
8834 BPF_JMP_A(0),
8835 BPF_EXIT_INSN(),
8836 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008837 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008838 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08008839 .retval = POINTER_VALUE,
Jann Horn2255f8d2017-12-18 20:12:01 -08008840 .result_unpriv = REJECT,
8841 .errstr_unpriv = "R0 leaks addr as return value"
8842 },
8843 {
8844 "pointer/scalar confusion in state equality check (way 2)",
8845 .insns = {
8846 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8847 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8848 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8849 BPF_LD_MAP_FD(BPF_REG_1, 0),
8850 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8851 BPF_FUNC_map_lookup_elem),
8852 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
8853 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
8854 BPF_JMP_A(1),
8855 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8856 BPF_EXIT_INSN(),
8857 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008858 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008859 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08008860 .retval = POINTER_VALUE,
Jann Horn2255f8d2017-12-18 20:12:01 -08008861 .result_unpriv = REJECT,
8862 .errstr_unpriv = "R0 leaks addr as return value"
8863 },
8864 {
Edward Cree69c4e8a2017-08-07 15:29:51 +01008865 "variable-offset ctx access",
8866 .insns = {
8867 /* Get an unknown value */
8868 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8869 /* Make it small and 4-byte aligned */
8870 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8871 /* add it to skb. We now have either &skb->len or
8872 * &skb->pkt_type, but we don't know which
8873 */
8874 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8875 /* dereference it */
8876 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8877 BPF_EXIT_INSN(),
8878 },
8879 .errstr = "variable ctx access var_off=(0x0; 0x4)",
8880 .result = REJECT,
8881 .prog_type = BPF_PROG_TYPE_LWT_IN,
8882 },
8883 {
8884 "variable-offset stack access",
8885 .insns = {
8886 /* Fill the top 8 bytes of the stack */
8887 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8888 /* Get an unknown value */
8889 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8890 /* Make it small and 4-byte aligned */
8891 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8892 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
8893 /* add it to fp. We now have either fp-4 or fp-8, but
8894 * we don't know which
8895 */
8896 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8897 /* dereference it */
8898 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
8899 BPF_EXIT_INSN(),
8900 },
8901 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
8902 .result = REJECT,
8903 .prog_type = BPF_PROG_TYPE_LWT_IN,
8904 },
Edward Creed893dc22017-08-23 15:09:46 +01008905 {
Jann Horn2255f8d2017-12-18 20:12:01 -08008906 "indirect variable-offset stack access",
8907 .insns = {
8908 /* Fill the top 8 bytes of the stack */
8909 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8910 /* Get an unknown value */
8911 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8912 /* Make it small and 4-byte aligned */
8913 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8914 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
8915 /* add it to fp. We now have either fp-4 or fp-8, but
8916 * we don't know which
8917 */
8918 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8919 /* dereference it indirectly */
8920 BPF_LD_MAP_FD(BPF_REG_1, 0),
8921 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8922 BPF_FUNC_map_lookup_elem),
8923 BPF_MOV64_IMM(BPF_REG_0, 0),
8924 BPF_EXIT_INSN(),
8925 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008926 .fixup_map_hash_8b = { 5 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008927 .errstr = "variable stack read R2",
8928 .result = REJECT,
8929 .prog_type = BPF_PROG_TYPE_LWT_IN,
8930 },
8931 {
8932 "direct stack access with 32-bit wraparound. test1",
8933 .insns = {
8934 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8935 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8936 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8937 BPF_MOV32_IMM(BPF_REG_0, 0),
8938 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8939 BPF_EXIT_INSN()
8940 },
8941 .errstr = "fp pointer and 2147483647",
8942 .result = REJECT
8943 },
8944 {
8945 "direct stack access with 32-bit wraparound. test2",
8946 .insns = {
8947 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8948 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
8949 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
8950 BPF_MOV32_IMM(BPF_REG_0, 0),
8951 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8952 BPF_EXIT_INSN()
8953 },
8954 .errstr = "fp pointer and 1073741823",
8955 .result = REJECT
8956 },
8957 {
8958 "direct stack access with 32-bit wraparound. test3",
8959 .insns = {
8960 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8961 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
8962 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
8963 BPF_MOV32_IMM(BPF_REG_0, 0),
8964 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8965 BPF_EXIT_INSN()
8966 },
8967 .errstr = "fp pointer offset 1073741822",
8968 .result = REJECT
8969 },
8970 {
Edward Creed893dc22017-08-23 15:09:46 +01008971 "liveness pruning and write screening",
8972 .insns = {
8973 /* Get an unknown value */
8974 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8975 /* branch conditions teach us nothing about R2 */
8976 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
8977 BPF_MOV64_IMM(BPF_REG_0, 0),
8978 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
8979 BPF_MOV64_IMM(BPF_REG_0, 0),
8980 BPF_EXIT_INSN(),
8981 },
8982 .errstr = "R0 !read_ok",
8983 .result = REJECT,
8984 .prog_type = BPF_PROG_TYPE_LWT_IN,
8985 },
Alexei Starovoitovdf20cb72017-08-23 15:10:26 +01008986 {
8987 "varlen_map_value_access pruning",
8988 .insns = {
8989 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8990 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8991 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8992 BPF_LD_MAP_FD(BPF_REG_1, 0),
8993 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8994 BPF_FUNC_map_lookup_elem),
8995 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8996 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
8997 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
8998 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
8999 BPF_MOV32_IMM(BPF_REG_1, 0),
9000 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
9001 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9002 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
9003 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
9004 offsetof(struct test_val, foo)),
9005 BPF_EXIT_INSN(),
9006 },
Prashant Bhole908142e2018-10-09 10:04:53 +09009007 .fixup_map_hash_48b = { 3 },
Alexei Starovoitovdf20cb72017-08-23 15:10:26 +01009008 .errstr_unpriv = "R0 leaks addr",
9009 .errstr = "R0 unbounded memory access",
9010 .result_unpriv = REJECT,
9011 .result = REJECT,
9012 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9013 },
Edward Creee67b8a62017-09-15 14:37:38 +01009014 {
9015 "invalid 64-bit BPF_END",
9016 .insns = {
9017 BPF_MOV32_IMM(BPF_REG_0, 0),
9018 {
9019 .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
9020 .dst_reg = BPF_REG_0,
9021 .src_reg = 0,
9022 .off = 0,
9023 .imm = 32,
9024 },
9025 BPF_EXIT_INSN(),
9026 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01009027 .errstr = "unknown opcode d7",
Edward Creee67b8a62017-09-15 14:37:38 +01009028 .result = REJECT,
9029 },
Daniel Borkmann22c88522017-09-25 02:25:53 +02009030 {
Daniel Borkmann65073a62018-01-31 12:58:56 +01009031 "XDP, using ifindex from netdev",
9032 .insns = {
9033 BPF_MOV64_IMM(BPF_REG_0, 0),
9034 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9035 offsetof(struct xdp_md, ingress_ifindex)),
9036 BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
9037 BPF_MOV64_IMM(BPF_REG_0, 1),
9038 BPF_EXIT_INSN(),
9039 },
9040 .result = ACCEPT,
9041 .prog_type = BPF_PROG_TYPE_XDP,
9042 .retval = 1,
9043 },
9044 {
Daniel Borkmann22c88522017-09-25 02:25:53 +02009045 "meta access, test1",
9046 .insns = {
9047 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9048 offsetof(struct xdp_md, data_meta)),
9049 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9050 offsetof(struct xdp_md, data)),
9051 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9052 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9053 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9054 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9055 BPF_MOV64_IMM(BPF_REG_0, 0),
9056 BPF_EXIT_INSN(),
9057 },
9058 .result = ACCEPT,
9059 .prog_type = BPF_PROG_TYPE_XDP,
9060 },
9061 {
9062 "meta access, test2",
9063 .insns = {
9064 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9065 offsetof(struct xdp_md, data_meta)),
9066 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9067 offsetof(struct xdp_md, data)),
9068 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9069 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
9070 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9071 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9072 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9073 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9074 BPF_MOV64_IMM(BPF_REG_0, 0),
9075 BPF_EXIT_INSN(),
9076 },
9077 .result = REJECT,
9078 .errstr = "invalid access to packet, off=-8",
9079 .prog_type = BPF_PROG_TYPE_XDP,
9080 },
9081 {
9082 "meta access, test3",
9083 .insns = {
9084 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9085 offsetof(struct xdp_md, data_meta)),
9086 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9087 offsetof(struct xdp_md, data_end)),
9088 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9089 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9090 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9091 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9092 BPF_MOV64_IMM(BPF_REG_0, 0),
9093 BPF_EXIT_INSN(),
9094 },
9095 .result = REJECT,
9096 .errstr = "invalid access to packet",
9097 .prog_type = BPF_PROG_TYPE_XDP,
9098 },
9099 {
9100 "meta access, test4",
9101 .insns = {
9102 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9103 offsetof(struct xdp_md, data_meta)),
9104 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9105 offsetof(struct xdp_md, data_end)),
9106 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9107 offsetof(struct xdp_md, data)),
9108 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
9109 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9110 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9111 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9112 BPF_MOV64_IMM(BPF_REG_0, 0),
9113 BPF_EXIT_INSN(),
9114 },
9115 .result = REJECT,
9116 .errstr = "invalid access to packet",
9117 .prog_type = BPF_PROG_TYPE_XDP,
9118 },
9119 {
9120 "meta access, test5",
9121 .insns = {
9122 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9123 offsetof(struct xdp_md, data_meta)),
9124 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9125 offsetof(struct xdp_md, data)),
9126 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9127 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9128 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
9129 BPF_MOV64_IMM(BPF_REG_2, -8),
9130 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9131 BPF_FUNC_xdp_adjust_meta),
9132 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
9133 BPF_MOV64_IMM(BPF_REG_0, 0),
9134 BPF_EXIT_INSN(),
9135 },
9136 .result = REJECT,
9137 .errstr = "R3 !read_ok",
9138 .prog_type = BPF_PROG_TYPE_XDP,
9139 },
9140 {
9141 "meta access, test6",
9142 .insns = {
9143 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9144 offsetof(struct xdp_md, data_meta)),
9145 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9146 offsetof(struct xdp_md, data)),
9147 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9148 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9149 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9150 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9151 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
9152 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9153 BPF_MOV64_IMM(BPF_REG_0, 0),
9154 BPF_EXIT_INSN(),
9155 },
9156 .result = REJECT,
9157 .errstr = "invalid access to packet",
9158 .prog_type = BPF_PROG_TYPE_XDP,
9159 },
9160 {
9161 "meta access, test7",
9162 .insns = {
9163 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9164 offsetof(struct xdp_md, data_meta)),
9165 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9166 offsetof(struct xdp_md, data)),
9167 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9168 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9169 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9170 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9171 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9172 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9173 BPF_MOV64_IMM(BPF_REG_0, 0),
9174 BPF_EXIT_INSN(),
9175 },
9176 .result = ACCEPT,
9177 .prog_type = BPF_PROG_TYPE_XDP,
9178 },
9179 {
9180 "meta access, test8",
9181 .insns = {
9182 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9183 offsetof(struct xdp_md, data_meta)),
9184 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9185 offsetof(struct xdp_md, data)),
9186 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9187 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
9188 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9189 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9190 BPF_MOV64_IMM(BPF_REG_0, 0),
9191 BPF_EXIT_INSN(),
9192 },
9193 .result = ACCEPT,
9194 .prog_type = BPF_PROG_TYPE_XDP,
9195 },
9196 {
9197 "meta access, test9",
9198 .insns = {
9199 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9200 offsetof(struct xdp_md, data_meta)),
9201 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9202 offsetof(struct xdp_md, data)),
9203 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9204 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
9205 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
9206 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9207 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9208 BPF_MOV64_IMM(BPF_REG_0, 0),
9209 BPF_EXIT_INSN(),
9210 },
9211 .result = REJECT,
9212 .errstr = "invalid access to packet",
9213 .prog_type = BPF_PROG_TYPE_XDP,
9214 },
9215 {
9216 "meta access, test10",
9217 .insns = {
9218 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9219 offsetof(struct xdp_md, data_meta)),
9220 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9221 offsetof(struct xdp_md, data)),
9222 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9223 offsetof(struct xdp_md, data_end)),
9224 BPF_MOV64_IMM(BPF_REG_5, 42),
9225 BPF_MOV64_IMM(BPF_REG_6, 24),
9226 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
9227 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9228 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9229 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9230 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
9231 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9232 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9233 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9234 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
9235 BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
9236 BPF_MOV64_IMM(BPF_REG_0, 0),
9237 BPF_EXIT_INSN(),
9238 },
9239 .result = REJECT,
9240 .errstr = "invalid access to packet",
9241 .prog_type = BPF_PROG_TYPE_XDP,
9242 },
9243 {
9244 "meta access, test11",
9245 .insns = {
9246 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9247 offsetof(struct xdp_md, data_meta)),
9248 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9249 offsetof(struct xdp_md, data)),
9250 BPF_MOV64_IMM(BPF_REG_5, 42),
9251 BPF_MOV64_IMM(BPF_REG_6, 24),
9252 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
9253 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9254 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9255 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9256 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
9257 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9258 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9259 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9260 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
9261 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
9262 BPF_MOV64_IMM(BPF_REG_0, 0),
9263 BPF_EXIT_INSN(),
9264 },
9265 .result = ACCEPT,
9266 .prog_type = BPF_PROG_TYPE_XDP,
9267 },
9268 {
9269 "meta access, test12",
9270 .insns = {
9271 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9272 offsetof(struct xdp_md, data_meta)),
9273 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9274 offsetof(struct xdp_md, data)),
9275 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9276 offsetof(struct xdp_md, data_end)),
9277 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9278 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9279 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
9280 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
9281 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9283 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
9284 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9285 BPF_MOV64_IMM(BPF_REG_0, 0),
9286 BPF_EXIT_INSN(),
9287 },
9288 .result = ACCEPT,
9289 .prog_type = BPF_PROG_TYPE_XDP,
9290 },
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07009291 {
Jakub Kicinski28e33f92017-10-16 11:16:55 -07009292 "arithmetic ops make PTR_TO_CTX unusable",
9293 .insns = {
9294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
9295 offsetof(struct __sk_buff, data) -
9296 offsetof(struct __sk_buff, mark)),
9297 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9298 offsetof(struct __sk_buff, mark)),
9299 BPF_EXIT_INSN(),
9300 },
Daniel Borkmann58990d12018-06-07 17:40:03 +02009301 .errstr = "dereference of modified ctx ptr",
Jakub Kicinski28e33f92017-10-16 11:16:55 -07009302 .result = REJECT,
9303 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9304 },
Daniel Borkmannb37242c2017-10-21 02:34:23 +02009305 {
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08009306 "pkt_end - pkt_start is allowed",
9307 .insns = {
9308 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9309 offsetof(struct __sk_buff, data_end)),
9310 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9311 offsetof(struct __sk_buff, data)),
9312 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
9313 BPF_EXIT_INSN(),
9314 },
9315 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009316 .retval = TEST_DATA_LEN,
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08009317 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9318 },
9319 {
Daniel Borkmannb37242c2017-10-21 02:34:23 +02009320 "XDP pkt read, pkt_end mangling, bad access 1",
9321 .insns = {
9322 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9323 offsetof(struct xdp_md, data)),
9324 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9325 offsetof(struct xdp_md, data_end)),
9326 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9327 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9328 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
9329 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9330 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9331 BPF_MOV64_IMM(BPF_REG_0, 0),
9332 BPF_EXIT_INSN(),
9333 },
Joe Stringeraad2eea2018-10-02 13:35:30 -07009334 .errstr = "R3 pointer arithmetic on pkt_end",
Daniel Borkmannb37242c2017-10-21 02:34:23 +02009335 .result = REJECT,
9336 .prog_type = BPF_PROG_TYPE_XDP,
9337 },
9338 {
9339 "XDP pkt read, pkt_end mangling, bad access 2",
9340 .insns = {
9341 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9342 offsetof(struct xdp_md, data)),
9343 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9344 offsetof(struct xdp_md, data_end)),
9345 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9346 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9347 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
9348 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9349 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9350 BPF_MOV64_IMM(BPF_REG_0, 0),
9351 BPF_EXIT_INSN(),
9352 },
Joe Stringeraad2eea2018-10-02 13:35:30 -07009353 .errstr = "R3 pointer arithmetic on pkt_end",
Daniel Borkmannb37242c2017-10-21 02:34:23 +02009354 .result = REJECT,
9355 .prog_type = BPF_PROG_TYPE_XDP,
9356 },
9357 {
9358 "XDP pkt read, pkt_data' > pkt_end, good access",
9359 .insns = {
9360 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9361 offsetof(struct xdp_md, data)),
9362 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9363 offsetof(struct xdp_md, data_end)),
9364 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9365 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9366 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9367 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9368 BPF_MOV64_IMM(BPF_REG_0, 0),
9369 BPF_EXIT_INSN(),
9370 },
9371 .result = ACCEPT,
9372 .prog_type = BPF_PROG_TYPE_XDP,
9373 },
9374 {
9375 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
9376 .insns = {
9377 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9378 offsetof(struct xdp_md, data)),
9379 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9380 offsetof(struct xdp_md, data_end)),
9381 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9382 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9383 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9384 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9385 BPF_MOV64_IMM(BPF_REG_0, 0),
9386 BPF_EXIT_INSN(),
9387 },
9388 .errstr = "R1 offset is outside of the packet",
9389 .result = REJECT,
9390 .prog_type = BPF_PROG_TYPE_XDP,
9391 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9392 },
9393 {
9394 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
9395 .insns = {
9396 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9397 offsetof(struct xdp_md, data)),
9398 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9399 offsetof(struct xdp_md, data_end)),
9400 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9401 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9402 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9403 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9404 BPF_MOV64_IMM(BPF_REG_0, 0),
9405 BPF_EXIT_INSN(),
9406 },
9407 .errstr = "R1 offset is outside of the packet",
9408 .result = REJECT,
9409 .prog_type = BPF_PROG_TYPE_XDP,
9410 },
9411 {
9412 "XDP pkt read, pkt_end > pkt_data', good access",
9413 .insns = {
9414 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9415 offsetof(struct xdp_md, data)),
9416 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9417 offsetof(struct xdp_md, data_end)),
9418 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9419 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9420 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9421 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9422 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9423 BPF_MOV64_IMM(BPF_REG_0, 0),
9424 BPF_EXIT_INSN(),
9425 },
9426 .result = ACCEPT,
9427 .prog_type = BPF_PROG_TYPE_XDP,
9428 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9429 },
9430 {
9431 "XDP pkt read, pkt_end > pkt_data', bad access 1",
9432 .insns = {
9433 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9434 offsetof(struct xdp_md, data)),
9435 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9436 offsetof(struct xdp_md, data_end)),
9437 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9438 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9439 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9440 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9441 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9442 BPF_MOV64_IMM(BPF_REG_0, 0),
9443 BPF_EXIT_INSN(),
9444 },
9445 .errstr = "R1 offset is outside of the packet",
9446 .result = REJECT,
9447 .prog_type = BPF_PROG_TYPE_XDP,
9448 },
9449 {
9450 "XDP pkt read, pkt_end > pkt_data', bad access 2",
9451 .insns = {
9452 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9453 offsetof(struct xdp_md, data)),
9454 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9455 offsetof(struct xdp_md, data_end)),
9456 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9457 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9458 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9459 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9460 BPF_MOV64_IMM(BPF_REG_0, 0),
9461 BPF_EXIT_INSN(),
9462 },
9463 .errstr = "R1 offset is outside of the packet",
9464 .result = REJECT,
9465 .prog_type = BPF_PROG_TYPE_XDP,
9466 },
9467 {
9468 "XDP pkt read, pkt_data' < pkt_end, good access",
9469 .insns = {
9470 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9471 offsetof(struct xdp_md, data)),
9472 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9473 offsetof(struct xdp_md, data_end)),
9474 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9475 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9476 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9477 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9478 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9479 BPF_MOV64_IMM(BPF_REG_0, 0),
9480 BPF_EXIT_INSN(),
9481 },
9482 .result = ACCEPT,
9483 .prog_type = BPF_PROG_TYPE_XDP,
9484 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9485 },
9486 {
9487 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
9488 .insns = {
9489 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9490 offsetof(struct xdp_md, data)),
9491 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9492 offsetof(struct xdp_md, data_end)),
9493 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9494 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9495 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9496 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9497 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9498 BPF_MOV64_IMM(BPF_REG_0, 0),
9499 BPF_EXIT_INSN(),
9500 },
9501 .errstr = "R1 offset is outside of the packet",
9502 .result = REJECT,
9503 .prog_type = BPF_PROG_TYPE_XDP,
9504 },
9505 {
9506 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
9507 .insns = {
9508 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9509 offsetof(struct xdp_md, data)),
9510 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9511 offsetof(struct xdp_md, data_end)),
9512 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9513 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9514 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9515 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9516 BPF_MOV64_IMM(BPF_REG_0, 0),
9517 BPF_EXIT_INSN(),
9518 },
9519 .errstr = "R1 offset is outside of the packet",
9520 .result = REJECT,
9521 .prog_type = BPF_PROG_TYPE_XDP,
9522 },
9523 {
9524 "XDP pkt read, pkt_end < pkt_data', good access",
9525 .insns = {
9526 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9527 offsetof(struct xdp_md, data)),
9528 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9529 offsetof(struct xdp_md, data_end)),
9530 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9532 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9533 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9534 BPF_MOV64_IMM(BPF_REG_0, 0),
9535 BPF_EXIT_INSN(),
9536 },
9537 .result = ACCEPT,
9538 .prog_type = BPF_PROG_TYPE_XDP,
9539 },
9540 {
9541 "XDP pkt read, pkt_end < pkt_data', bad access 1",
9542 .insns = {
9543 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9544 offsetof(struct xdp_md, data)),
9545 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9546 offsetof(struct xdp_md, data_end)),
9547 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9548 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9549 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9550 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9551 BPF_MOV64_IMM(BPF_REG_0, 0),
9552 BPF_EXIT_INSN(),
9553 },
9554 .errstr = "R1 offset is outside of the packet",
9555 .result = REJECT,
9556 .prog_type = BPF_PROG_TYPE_XDP,
9557 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9558 },
9559 {
9560 "XDP pkt read, pkt_end < pkt_data', bad access 2",
9561 .insns = {
9562 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9563 offsetof(struct xdp_md, data)),
9564 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9565 offsetof(struct xdp_md, data_end)),
9566 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9567 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9568 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9569 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9570 BPF_MOV64_IMM(BPF_REG_0, 0),
9571 BPF_EXIT_INSN(),
9572 },
9573 .errstr = "R1 offset is outside of the packet",
9574 .result = REJECT,
9575 .prog_type = BPF_PROG_TYPE_XDP,
9576 },
9577 {
9578 "XDP pkt read, pkt_data' >= pkt_end, good access",
9579 .insns = {
9580 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9581 offsetof(struct xdp_md, data)),
9582 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9583 offsetof(struct xdp_md, data_end)),
9584 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9585 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9586 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9587 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9588 BPF_MOV64_IMM(BPF_REG_0, 0),
9589 BPF_EXIT_INSN(),
9590 },
9591 .result = ACCEPT,
9592 .prog_type = BPF_PROG_TYPE_XDP,
9593 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9594 },
9595 {
9596 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
9597 .insns = {
9598 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9599 offsetof(struct xdp_md, data)),
9600 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9601 offsetof(struct xdp_md, data_end)),
9602 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9603 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9604 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9605 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9606 BPF_MOV64_IMM(BPF_REG_0, 0),
9607 BPF_EXIT_INSN(),
9608 },
9609 .errstr = "R1 offset is outside of the packet",
9610 .result = REJECT,
9611 .prog_type = BPF_PROG_TYPE_XDP,
9612 },
9613 {
9614 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
9615 .insns = {
9616 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9617 offsetof(struct xdp_md, data)),
9618 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9619 offsetof(struct xdp_md, data_end)),
9620 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9621 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9622 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
9623 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9624 BPF_MOV64_IMM(BPF_REG_0, 0),
9625 BPF_EXIT_INSN(),
9626 },
9627 .errstr = "R1 offset is outside of the packet",
9628 .result = REJECT,
9629 .prog_type = BPF_PROG_TYPE_XDP,
9630 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9631 },
9632 {
9633 "XDP pkt read, pkt_end >= pkt_data', good access",
9634 .insns = {
9635 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9636 offsetof(struct xdp_md, data)),
9637 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9638 offsetof(struct xdp_md, data_end)),
9639 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9640 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9641 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9642 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9643 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9644 BPF_MOV64_IMM(BPF_REG_0, 0),
9645 BPF_EXIT_INSN(),
9646 },
9647 .result = ACCEPT,
9648 .prog_type = BPF_PROG_TYPE_XDP,
9649 },
9650 {
9651 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
9652 .insns = {
9653 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9654 offsetof(struct xdp_md, data)),
9655 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9656 offsetof(struct xdp_md, data_end)),
9657 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9658 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9659 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9660 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9661 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9662 BPF_MOV64_IMM(BPF_REG_0, 0),
9663 BPF_EXIT_INSN(),
9664 },
9665 .errstr = "R1 offset is outside of the packet",
9666 .result = REJECT,
9667 .prog_type = BPF_PROG_TYPE_XDP,
9668 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9669 },
9670 {
9671 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
9672 .insns = {
9673 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9674 offsetof(struct xdp_md, data)),
9675 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9676 offsetof(struct xdp_md, data_end)),
9677 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9678 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9679 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9680 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9681 BPF_MOV64_IMM(BPF_REG_0, 0),
9682 BPF_EXIT_INSN(),
9683 },
9684 .errstr = "R1 offset is outside of the packet",
9685 .result = REJECT,
9686 .prog_type = BPF_PROG_TYPE_XDP,
9687 },
9688 {
9689 "XDP pkt read, pkt_data' <= pkt_end, good access",
9690 .insns = {
9691 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9692 offsetof(struct xdp_md, data)),
9693 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9694 offsetof(struct xdp_md, data_end)),
9695 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9696 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9697 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9698 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9699 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9700 BPF_MOV64_IMM(BPF_REG_0, 0),
9701 BPF_EXIT_INSN(),
9702 },
9703 .result = ACCEPT,
9704 .prog_type = BPF_PROG_TYPE_XDP,
9705 },
9706 {
9707 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
9708 .insns = {
9709 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9710 offsetof(struct xdp_md, data)),
9711 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9712 offsetof(struct xdp_md, data_end)),
9713 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9714 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9715 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9716 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9717 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9718 BPF_MOV64_IMM(BPF_REG_0, 0),
9719 BPF_EXIT_INSN(),
9720 },
9721 .errstr = "R1 offset is outside of the packet",
9722 .result = REJECT,
9723 .prog_type = BPF_PROG_TYPE_XDP,
9724 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9725 },
9726 {
9727 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
9728 .insns = {
9729 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9730 offsetof(struct xdp_md, data)),
9731 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9732 offsetof(struct xdp_md, data_end)),
9733 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9734 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9735 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9736 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9737 BPF_MOV64_IMM(BPF_REG_0, 0),
9738 BPF_EXIT_INSN(),
9739 },
9740 .errstr = "R1 offset is outside of the packet",
9741 .result = REJECT,
9742 .prog_type = BPF_PROG_TYPE_XDP,
9743 },
9744 {
9745 "XDP pkt read, pkt_end <= pkt_data', good access",
9746 .insns = {
9747 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9748 offsetof(struct xdp_md, data)),
9749 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9750 offsetof(struct xdp_md, data_end)),
9751 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9752 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9753 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9754 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9755 BPF_MOV64_IMM(BPF_REG_0, 0),
9756 BPF_EXIT_INSN(),
9757 },
9758 .result = ACCEPT,
9759 .prog_type = BPF_PROG_TYPE_XDP,
9760 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9761 },
9762 {
9763 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
9764 .insns = {
9765 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9766 offsetof(struct xdp_md, data)),
9767 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9768 offsetof(struct xdp_md, data_end)),
9769 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9770 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9771 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9772 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9773 BPF_MOV64_IMM(BPF_REG_0, 0),
9774 BPF_EXIT_INSN(),
9775 },
9776 .errstr = "R1 offset is outside of the packet",
9777 .result = REJECT,
9778 .prog_type = BPF_PROG_TYPE_XDP,
9779 },
9780 {
9781 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
9782 .insns = {
9783 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9784 offsetof(struct xdp_md, data)),
9785 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9786 offsetof(struct xdp_md, data_end)),
9787 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9788 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9789 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
9790 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9791 BPF_MOV64_IMM(BPF_REG_0, 0),
9792 BPF_EXIT_INSN(),
9793 },
9794 .errstr = "R1 offset is outside of the packet",
9795 .result = REJECT,
9796 .prog_type = BPF_PROG_TYPE_XDP,
9797 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9798 },
Daniel Borkmannb06723d2017-11-01 23:58:09 +01009799 {
Daniel Borkmann634eab12017-11-01 23:58:11 +01009800 "XDP pkt read, pkt_meta' > pkt_data, good access",
9801 .insns = {
9802 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9803 offsetof(struct xdp_md, data_meta)),
9804 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9805 offsetof(struct xdp_md, data)),
9806 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9807 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9808 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9809 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9810 BPF_MOV64_IMM(BPF_REG_0, 0),
9811 BPF_EXIT_INSN(),
9812 },
9813 .result = ACCEPT,
9814 .prog_type = BPF_PROG_TYPE_XDP,
9815 },
9816 {
9817 "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
9818 .insns = {
9819 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9820 offsetof(struct xdp_md, data_meta)),
9821 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9822 offsetof(struct xdp_md, data)),
9823 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9824 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9825 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9826 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9827 BPF_MOV64_IMM(BPF_REG_0, 0),
9828 BPF_EXIT_INSN(),
9829 },
9830 .errstr = "R1 offset is outside of the packet",
9831 .result = REJECT,
9832 .prog_type = BPF_PROG_TYPE_XDP,
9833 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9834 },
9835 {
9836 "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
9837 .insns = {
9838 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9839 offsetof(struct xdp_md, data_meta)),
9840 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9841 offsetof(struct xdp_md, data)),
9842 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9844 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9845 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9846 BPF_MOV64_IMM(BPF_REG_0, 0),
9847 BPF_EXIT_INSN(),
9848 },
9849 .errstr = "R1 offset is outside of the packet",
9850 .result = REJECT,
9851 .prog_type = BPF_PROG_TYPE_XDP,
9852 },
9853 {
9854 "XDP pkt read, pkt_data > pkt_meta', good access",
9855 .insns = {
9856 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9857 offsetof(struct xdp_md, data_meta)),
9858 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9859 offsetof(struct xdp_md, data)),
9860 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9861 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9862 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9863 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9864 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9865 BPF_MOV64_IMM(BPF_REG_0, 0),
9866 BPF_EXIT_INSN(),
9867 },
9868 .result = ACCEPT,
9869 .prog_type = BPF_PROG_TYPE_XDP,
9870 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9871 },
9872 {
9873 "XDP pkt read, pkt_data > pkt_meta', bad access 1",
9874 .insns = {
9875 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9876 offsetof(struct xdp_md, data_meta)),
9877 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9878 offsetof(struct xdp_md, data)),
9879 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9880 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9881 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9882 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9883 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9884 BPF_MOV64_IMM(BPF_REG_0, 0),
9885 BPF_EXIT_INSN(),
9886 },
9887 .errstr = "R1 offset is outside of the packet",
9888 .result = REJECT,
9889 .prog_type = BPF_PROG_TYPE_XDP,
9890 },
9891 {
9892 "XDP pkt read, pkt_data > pkt_meta', bad access 2",
9893 .insns = {
9894 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9895 offsetof(struct xdp_md, data_meta)),
9896 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9897 offsetof(struct xdp_md, data)),
9898 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9899 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9900 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9901 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9902 BPF_MOV64_IMM(BPF_REG_0, 0),
9903 BPF_EXIT_INSN(),
9904 },
9905 .errstr = "R1 offset is outside of the packet",
9906 .result = REJECT,
9907 .prog_type = BPF_PROG_TYPE_XDP,
9908 },
9909 {
9910 "XDP pkt read, pkt_meta' < pkt_data, good access",
9911 .insns = {
9912 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9913 offsetof(struct xdp_md, data_meta)),
9914 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9915 offsetof(struct xdp_md, data)),
9916 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9917 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9918 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9919 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9920 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9921 BPF_MOV64_IMM(BPF_REG_0, 0),
9922 BPF_EXIT_INSN(),
9923 },
9924 .result = ACCEPT,
9925 .prog_type = BPF_PROG_TYPE_XDP,
9926 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9927 },
9928 {
9929 "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
9930 .insns = {
9931 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9932 offsetof(struct xdp_md, data_meta)),
9933 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9934 offsetof(struct xdp_md, data)),
9935 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9936 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9937 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9938 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9939 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9940 BPF_MOV64_IMM(BPF_REG_0, 0),
9941 BPF_EXIT_INSN(),
9942 },
9943 .errstr = "R1 offset is outside of the packet",
9944 .result = REJECT,
9945 .prog_type = BPF_PROG_TYPE_XDP,
9946 },
9947 {
9948 "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
9949 .insns = {
9950 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9951 offsetof(struct xdp_md, data_meta)),
9952 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9953 offsetof(struct xdp_md, data)),
9954 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9955 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9956 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9957 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9958 BPF_MOV64_IMM(BPF_REG_0, 0),
9959 BPF_EXIT_INSN(),
9960 },
9961 .errstr = "R1 offset is outside of the packet",
9962 .result = REJECT,
9963 .prog_type = BPF_PROG_TYPE_XDP,
9964 },
9965 {
9966 "XDP pkt read, pkt_data < pkt_meta', good access",
9967 .insns = {
9968 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9969 offsetof(struct xdp_md, data_meta)),
9970 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9971 offsetof(struct xdp_md, data)),
9972 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9973 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9974 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9975 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9976 BPF_MOV64_IMM(BPF_REG_0, 0),
9977 BPF_EXIT_INSN(),
9978 },
9979 .result = ACCEPT,
9980 .prog_type = BPF_PROG_TYPE_XDP,
9981 },
9982 {
9983 "XDP pkt read, pkt_data < pkt_meta', bad access 1",
9984 .insns = {
9985 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9986 offsetof(struct xdp_md, data_meta)),
9987 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9988 offsetof(struct xdp_md, data)),
9989 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9990 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9991 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9992 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9993 BPF_MOV64_IMM(BPF_REG_0, 0),
9994 BPF_EXIT_INSN(),
9995 },
9996 .errstr = "R1 offset is outside of the packet",
9997 .result = REJECT,
9998 .prog_type = BPF_PROG_TYPE_XDP,
9999 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10000 },
10001 {
10002 "XDP pkt read, pkt_data < pkt_meta', bad access 2",
10003 .insns = {
10004 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10005 offsetof(struct xdp_md, data_meta)),
10006 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10007 offsetof(struct xdp_md, data)),
10008 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10009 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10010 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
10011 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10012 BPF_MOV64_IMM(BPF_REG_0, 0),
10013 BPF_EXIT_INSN(),
10014 },
10015 .errstr = "R1 offset is outside of the packet",
10016 .result = REJECT,
10017 .prog_type = BPF_PROG_TYPE_XDP,
10018 },
10019 {
10020 "XDP pkt read, pkt_meta' >= pkt_data, good access",
10021 .insns = {
10022 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10023 offsetof(struct xdp_md, data_meta)),
10024 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10025 offsetof(struct xdp_md, data)),
10026 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10027 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10028 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10029 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10030 BPF_MOV64_IMM(BPF_REG_0, 0),
10031 BPF_EXIT_INSN(),
10032 },
10033 .result = ACCEPT,
10034 .prog_type = BPF_PROG_TYPE_XDP,
10035 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10036 },
10037 {
10038 "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
10039 .insns = {
10040 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10041 offsetof(struct xdp_md, data_meta)),
10042 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10043 offsetof(struct xdp_md, data)),
10044 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10045 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10046 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10047 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10048 BPF_MOV64_IMM(BPF_REG_0, 0),
10049 BPF_EXIT_INSN(),
10050 },
10051 .errstr = "R1 offset is outside of the packet",
10052 .result = REJECT,
10053 .prog_type = BPF_PROG_TYPE_XDP,
10054 },
10055 {
10056 "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
10057 .insns = {
10058 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10059 offsetof(struct xdp_md, data_meta)),
10060 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10061 offsetof(struct xdp_md, data)),
10062 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10063 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10064 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
10065 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10066 BPF_MOV64_IMM(BPF_REG_0, 0),
10067 BPF_EXIT_INSN(),
10068 },
10069 .errstr = "R1 offset is outside of the packet",
10070 .result = REJECT,
10071 .prog_type = BPF_PROG_TYPE_XDP,
10072 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10073 },
10074 {
10075 "XDP pkt read, pkt_data >= pkt_meta', good access",
10076 .insns = {
10077 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10078 offsetof(struct xdp_md, data_meta)),
10079 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10080 offsetof(struct xdp_md, data)),
10081 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10082 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10083 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10084 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10085 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10086 BPF_MOV64_IMM(BPF_REG_0, 0),
10087 BPF_EXIT_INSN(),
10088 },
10089 .result = ACCEPT,
10090 .prog_type = BPF_PROG_TYPE_XDP,
10091 },
10092 {
10093 "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
10094 .insns = {
10095 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10096 offsetof(struct xdp_md, data_meta)),
10097 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10098 offsetof(struct xdp_md, data)),
10099 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10100 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10101 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10102 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10103 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10104 BPF_MOV64_IMM(BPF_REG_0, 0),
10105 BPF_EXIT_INSN(),
10106 },
10107 .errstr = "R1 offset is outside of the packet",
10108 .result = REJECT,
10109 .prog_type = BPF_PROG_TYPE_XDP,
10110 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10111 },
10112 {
10113 "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
10114 .insns = {
10115 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10116 offsetof(struct xdp_md, data_meta)),
10117 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10118 offsetof(struct xdp_md, data)),
10119 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10120 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10121 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10122 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10123 BPF_MOV64_IMM(BPF_REG_0, 0),
10124 BPF_EXIT_INSN(),
10125 },
10126 .errstr = "R1 offset is outside of the packet",
10127 .result = REJECT,
10128 .prog_type = BPF_PROG_TYPE_XDP,
10129 },
10130 {
10131 "XDP pkt read, pkt_meta' <= pkt_data, good access",
10132 .insns = {
10133 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10134 offsetof(struct xdp_md, data_meta)),
10135 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10136 offsetof(struct xdp_md, data)),
10137 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10138 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10139 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10140 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10141 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10142 BPF_MOV64_IMM(BPF_REG_0, 0),
10143 BPF_EXIT_INSN(),
10144 },
10145 .result = ACCEPT,
10146 .prog_type = BPF_PROG_TYPE_XDP,
10147 },
10148 {
10149 "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
10150 .insns = {
10151 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10152 offsetof(struct xdp_md, data_meta)),
10153 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10154 offsetof(struct xdp_md, data)),
10155 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10156 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10157 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10158 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10159 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10160 BPF_MOV64_IMM(BPF_REG_0, 0),
10161 BPF_EXIT_INSN(),
10162 },
10163 .errstr = "R1 offset is outside of the packet",
10164 .result = REJECT,
10165 .prog_type = BPF_PROG_TYPE_XDP,
10166 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10167 },
10168 {
10169 "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
10170 .insns = {
10171 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10172 offsetof(struct xdp_md, data_meta)),
10173 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10174 offsetof(struct xdp_md, data)),
10175 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10176 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10177 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10178 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10179 BPF_MOV64_IMM(BPF_REG_0, 0),
10180 BPF_EXIT_INSN(),
10181 },
10182 .errstr = "R1 offset is outside of the packet",
10183 .result = REJECT,
10184 .prog_type = BPF_PROG_TYPE_XDP,
10185 },
10186 {
10187 "XDP pkt read, pkt_data <= pkt_meta', good access",
10188 .insns = {
10189 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10190 offsetof(struct xdp_md, data_meta)),
10191 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10192 offsetof(struct xdp_md, data)),
10193 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10195 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10196 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10197 BPF_MOV64_IMM(BPF_REG_0, 0),
10198 BPF_EXIT_INSN(),
10199 },
10200 .result = ACCEPT,
10201 .prog_type = BPF_PROG_TYPE_XDP,
10202 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10203 },
10204 {
10205 "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
10206 .insns = {
10207 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10208 offsetof(struct xdp_md, data_meta)),
10209 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10210 offsetof(struct xdp_md, data)),
10211 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10212 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10213 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10214 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10215 BPF_MOV64_IMM(BPF_REG_0, 0),
10216 BPF_EXIT_INSN(),
10217 },
10218 .errstr = "R1 offset is outside of the packet",
10219 .result = REJECT,
10220 .prog_type = BPF_PROG_TYPE_XDP,
10221 },
10222 {
10223 "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
10224 .insns = {
10225 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10226 offsetof(struct xdp_md, data_meta)),
10227 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10228 offsetof(struct xdp_md, data)),
10229 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10230 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10231 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
10232 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10233 BPF_MOV64_IMM(BPF_REG_0, 0),
10234 BPF_EXIT_INSN(),
10235 },
10236 .errstr = "R1 offset is outside of the packet",
10237 .result = REJECT,
10238 .prog_type = BPF_PROG_TYPE_XDP,
10239 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10240 },
10241 {
Daniel Borkmann6f161012018-01-18 01:15:21 +010010242 "check deducing bounds from const, 1",
10243 .insns = {
10244 BPF_MOV64_IMM(BPF_REG_0, 1),
10245 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
10246 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10247 BPF_EXIT_INSN(),
10248 },
10249 .result = REJECT,
10250 .errstr = "R0 tried to subtract pointer from scalar",
10251 },
10252 {
10253 "check deducing bounds from const, 2",
10254 .insns = {
10255 BPF_MOV64_IMM(BPF_REG_0, 1),
10256 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
10257 BPF_EXIT_INSN(),
10258 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
10259 BPF_EXIT_INSN(),
10260 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10261 BPF_EXIT_INSN(),
10262 },
10263 .result = ACCEPT,
Yonghong Song35136922018-01-22 22:10:59 -080010264 .retval = 1,
Daniel Borkmann6f161012018-01-18 01:15:21 +010010265 },
10266 {
10267 "check deducing bounds from const, 3",
10268 .insns = {
10269 BPF_MOV64_IMM(BPF_REG_0, 0),
10270 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10271 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10272 BPF_EXIT_INSN(),
10273 },
10274 .result = REJECT,
10275 .errstr = "R0 tried to subtract pointer from scalar",
10276 },
10277 {
10278 "check deducing bounds from const, 4",
10279 .insns = {
10280 BPF_MOV64_IMM(BPF_REG_0, 0),
10281 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
10282 BPF_EXIT_INSN(),
10283 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10284 BPF_EXIT_INSN(),
10285 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10286 BPF_EXIT_INSN(),
10287 },
10288 .result = ACCEPT,
10289 },
10290 {
10291 "check deducing bounds from const, 5",
10292 .insns = {
10293 BPF_MOV64_IMM(BPF_REG_0, 0),
10294 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10295 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10296 BPF_EXIT_INSN(),
10297 },
10298 .result = REJECT,
10299 .errstr = "R0 tried to subtract pointer from scalar",
10300 },
10301 {
10302 "check deducing bounds from const, 6",
10303 .insns = {
10304 BPF_MOV64_IMM(BPF_REG_0, 0),
10305 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10306 BPF_EXIT_INSN(),
10307 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10308 BPF_EXIT_INSN(),
10309 },
10310 .result = REJECT,
10311 .errstr = "R0 tried to subtract pointer from scalar",
10312 },
10313 {
10314 "check deducing bounds from const, 7",
10315 .insns = {
10316 BPF_MOV64_IMM(BPF_REG_0, ~0),
10317 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10318 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10319 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10320 offsetof(struct __sk_buff, mark)),
10321 BPF_EXIT_INSN(),
10322 },
10323 .result = REJECT,
10324 .errstr = "dereference of modified ctx ptr",
10325 },
10326 {
10327 "check deducing bounds from const, 8",
10328 .insns = {
10329 BPF_MOV64_IMM(BPF_REG_0, ~0),
10330 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10331 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
10332 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10333 offsetof(struct __sk_buff, mark)),
10334 BPF_EXIT_INSN(),
10335 },
10336 .result = REJECT,
10337 .errstr = "dereference of modified ctx ptr",
10338 },
10339 {
10340 "check deducing bounds from const, 9",
10341 .insns = {
10342 BPF_MOV64_IMM(BPF_REG_0, 0),
10343 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10344 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10345 BPF_EXIT_INSN(),
10346 },
10347 .result = REJECT,
10348 .errstr = "R0 tried to subtract pointer from scalar",
10349 },
10350 {
10351 "check deducing bounds from const, 10",
10352 .insns = {
10353 BPF_MOV64_IMM(BPF_REG_0, 0),
10354 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10355 /* Marks reg as unknown. */
10356 BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
10357 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10358 BPF_EXIT_INSN(),
10359 },
10360 .result = REJECT,
10361 .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
10362 },
10363 {
Daniel Borkmannb06723d2017-11-01 23:58:09 +010010364 "bpf_exit with invalid return code. test1",
10365 .insns = {
10366 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10367 BPF_EXIT_INSN(),
10368 },
10369 .errstr = "R0 has value (0x0; 0xffffffff)",
10370 .result = REJECT,
10371 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10372 },
10373 {
10374 "bpf_exit with invalid return code. test2",
10375 .insns = {
10376 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10377 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
10378 BPF_EXIT_INSN(),
10379 },
10380 .result = ACCEPT,
10381 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10382 },
10383 {
10384 "bpf_exit with invalid return code. test3",
10385 .insns = {
10386 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10387 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
10388 BPF_EXIT_INSN(),
10389 },
10390 .errstr = "R0 has value (0x0; 0x3)",
10391 .result = REJECT,
10392 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10393 },
10394 {
10395 "bpf_exit with invalid return code. test4",
10396 .insns = {
10397 BPF_MOV64_IMM(BPF_REG_0, 1),
10398 BPF_EXIT_INSN(),
10399 },
10400 .result = ACCEPT,
10401 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10402 },
10403 {
10404 "bpf_exit with invalid return code. test5",
10405 .insns = {
10406 BPF_MOV64_IMM(BPF_REG_0, 2),
10407 BPF_EXIT_INSN(),
10408 },
10409 .errstr = "R0 has value (0x2; 0x0)",
10410 .result = REJECT,
10411 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10412 },
10413 {
10414 "bpf_exit with invalid return code. test6",
10415 .insns = {
10416 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10417 BPF_EXIT_INSN(),
10418 },
10419 .errstr = "R0 is not a known value (ctx)",
10420 .result = REJECT,
10421 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10422 },
10423 {
10424 "bpf_exit with invalid return code. test7",
10425 .insns = {
10426 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10427 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
10428 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
10429 BPF_EXIT_INSN(),
10430 },
10431 .errstr = "R0 has unknown scalar value",
10432 .result = REJECT,
10433 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10434 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010435 {
10436 "calls: basic sanity",
10437 .insns = {
10438 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10439 BPF_MOV64_IMM(BPF_REG_0, 1),
10440 BPF_EXIT_INSN(),
10441 BPF_MOV64_IMM(BPF_REG_0, 2),
10442 BPF_EXIT_INSN(),
10443 },
10444 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10445 .result = ACCEPT,
10446 },
10447 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010448 "calls: not on unpriviledged",
10449 .insns = {
10450 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10451 BPF_MOV64_IMM(BPF_REG_0, 1),
10452 BPF_EXIT_INSN(),
10453 BPF_MOV64_IMM(BPF_REG_0, 2),
10454 BPF_EXIT_INSN(),
10455 },
10456 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
10457 .result_unpriv = REJECT,
10458 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010459 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010460 },
10461 {
Daniel Borkmann21ccaf22018-01-26 23:33:48 +010010462 "calls: div by 0 in subprog",
10463 .insns = {
10464 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10465 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10466 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10467 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10468 offsetof(struct __sk_buff, data_end)),
10469 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10470 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10471 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10472 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10473 BPF_MOV64_IMM(BPF_REG_0, 1),
10474 BPF_EXIT_INSN(),
10475 BPF_MOV32_IMM(BPF_REG_2, 0),
10476 BPF_MOV32_IMM(BPF_REG_3, 1),
10477 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
10478 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10479 offsetof(struct __sk_buff, data)),
10480 BPF_EXIT_INSN(),
10481 },
10482 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10483 .result = ACCEPT,
10484 .retval = 1,
10485 },
10486 {
10487 "calls: multiple ret types in subprog 1",
10488 .insns = {
10489 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10490 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10491 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10492 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10493 offsetof(struct __sk_buff, data_end)),
10494 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10495 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10496 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10497 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10498 BPF_MOV64_IMM(BPF_REG_0, 1),
10499 BPF_EXIT_INSN(),
10500 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10501 offsetof(struct __sk_buff, data)),
10502 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10503 BPF_MOV32_IMM(BPF_REG_0, 42),
10504 BPF_EXIT_INSN(),
10505 },
10506 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10507 .result = REJECT,
10508 .errstr = "R0 invalid mem access 'inv'",
10509 },
10510 {
10511 "calls: multiple ret types in subprog 2",
10512 .insns = {
10513 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10514 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10515 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10516 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10517 offsetof(struct __sk_buff, data_end)),
10518 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10519 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10520 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10521 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10522 BPF_MOV64_IMM(BPF_REG_0, 1),
10523 BPF_EXIT_INSN(),
10524 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10525 offsetof(struct __sk_buff, data)),
10526 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10527 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
10528 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10529 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10530 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10531 BPF_LD_MAP_FD(BPF_REG_1, 0),
10532 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10533 BPF_FUNC_map_lookup_elem),
10534 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10535 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
10536 offsetof(struct __sk_buff, data)),
10537 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
10538 BPF_EXIT_INSN(),
10539 },
10540 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Prashant Bhole908142e2018-10-09 10:04:53 +090010541 .fixup_map_hash_8b = { 16 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +010010542 .result = REJECT,
10543 .errstr = "R0 min value is outside of the array range",
10544 },
10545 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010546 "calls: overlapping caller/callee",
10547 .insns = {
10548 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
10549 BPF_MOV64_IMM(BPF_REG_0, 1),
10550 BPF_EXIT_INSN(),
10551 },
10552 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10553 .errstr = "last insn is not an exit or jmp",
10554 .result = REJECT,
10555 },
10556 {
10557 "calls: wrong recursive calls",
10558 .insns = {
10559 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10560 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10561 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10562 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10563 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10564 BPF_MOV64_IMM(BPF_REG_0, 1),
10565 BPF_EXIT_INSN(),
10566 },
10567 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10568 .errstr = "jump out of range",
10569 .result = REJECT,
10570 },
10571 {
10572 "calls: wrong src reg",
10573 .insns = {
10574 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
10575 BPF_MOV64_IMM(BPF_REG_0, 1),
10576 BPF_EXIT_INSN(),
10577 },
10578 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10579 .errstr = "BPF_CALL uses reserved fields",
10580 .result = REJECT,
10581 },
10582 {
10583 "calls: wrong off value",
10584 .insns = {
10585 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
10586 BPF_MOV64_IMM(BPF_REG_0, 1),
10587 BPF_EXIT_INSN(),
10588 BPF_MOV64_IMM(BPF_REG_0, 2),
10589 BPF_EXIT_INSN(),
10590 },
10591 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10592 .errstr = "BPF_CALL uses reserved fields",
10593 .result = REJECT,
10594 },
10595 {
10596 "calls: jump back loop",
10597 .insns = {
10598 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10599 BPF_MOV64_IMM(BPF_REG_0, 1),
10600 BPF_EXIT_INSN(),
10601 },
10602 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10603 .errstr = "back-edge from insn 0 to 0",
10604 .result = REJECT,
10605 },
10606 {
10607 "calls: conditional call",
10608 .insns = {
10609 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10610 offsetof(struct __sk_buff, mark)),
10611 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10612 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10613 BPF_MOV64_IMM(BPF_REG_0, 1),
10614 BPF_EXIT_INSN(),
10615 BPF_MOV64_IMM(BPF_REG_0, 2),
10616 BPF_EXIT_INSN(),
10617 },
10618 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10619 .errstr = "jump out of range",
10620 .result = REJECT,
10621 },
10622 {
10623 "calls: conditional call 2",
10624 .insns = {
10625 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10626 offsetof(struct __sk_buff, mark)),
10627 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10628 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10629 BPF_MOV64_IMM(BPF_REG_0, 1),
10630 BPF_EXIT_INSN(),
10631 BPF_MOV64_IMM(BPF_REG_0, 2),
10632 BPF_EXIT_INSN(),
10633 BPF_MOV64_IMM(BPF_REG_0, 3),
10634 BPF_EXIT_INSN(),
10635 },
10636 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10637 .result = ACCEPT,
10638 },
10639 {
10640 "calls: conditional call 3",
10641 .insns = {
10642 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10643 offsetof(struct __sk_buff, mark)),
10644 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10645 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10646 BPF_MOV64_IMM(BPF_REG_0, 1),
10647 BPF_EXIT_INSN(),
10648 BPF_MOV64_IMM(BPF_REG_0, 1),
10649 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10650 BPF_MOV64_IMM(BPF_REG_0, 3),
10651 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10652 },
10653 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10654 .errstr = "back-edge from insn",
10655 .result = REJECT,
10656 },
10657 {
10658 "calls: conditional call 4",
10659 .insns = {
10660 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10661 offsetof(struct __sk_buff, mark)),
10662 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10663 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10664 BPF_MOV64_IMM(BPF_REG_0, 1),
10665 BPF_EXIT_INSN(),
10666 BPF_MOV64_IMM(BPF_REG_0, 1),
10667 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
10668 BPF_MOV64_IMM(BPF_REG_0, 3),
10669 BPF_EXIT_INSN(),
10670 },
10671 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10672 .result = ACCEPT,
10673 },
10674 {
10675 "calls: conditional call 5",
10676 .insns = {
10677 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10678 offsetof(struct __sk_buff, mark)),
10679 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10680 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10681 BPF_MOV64_IMM(BPF_REG_0, 1),
10682 BPF_EXIT_INSN(),
10683 BPF_MOV64_IMM(BPF_REG_0, 1),
10684 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10685 BPF_MOV64_IMM(BPF_REG_0, 3),
10686 BPF_EXIT_INSN(),
10687 },
10688 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10689 .errstr = "back-edge from insn",
10690 .result = REJECT,
10691 },
10692 {
10693 "calls: conditional call 6",
10694 .insns = {
10695 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10696 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
10697 BPF_EXIT_INSN(),
10698 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10699 offsetof(struct __sk_buff, mark)),
10700 BPF_EXIT_INSN(),
10701 },
10702 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10703 .errstr = "back-edge from insn",
10704 .result = REJECT,
10705 },
10706 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010707 "calls: using r0 returned by callee",
10708 .insns = {
10709 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10710 BPF_EXIT_INSN(),
10711 BPF_MOV64_IMM(BPF_REG_0, 2),
10712 BPF_EXIT_INSN(),
10713 },
10714 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10715 .result = ACCEPT,
10716 },
10717 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010718 "calls: using uninit r0 from callee",
10719 .insns = {
10720 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10721 BPF_EXIT_INSN(),
10722 BPF_EXIT_INSN(),
10723 },
10724 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10725 .errstr = "!read_ok",
10726 .result = REJECT,
10727 },
10728 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010729 "calls: callee is using r1",
10730 .insns = {
10731 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10732 BPF_EXIT_INSN(),
10733 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10734 offsetof(struct __sk_buff, len)),
10735 BPF_EXIT_INSN(),
10736 },
10737 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
10738 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010739 .retval = TEST_DATA_LEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010740 },
10741 {
10742 "calls: callee using args1",
10743 .insns = {
10744 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10745 BPF_EXIT_INSN(),
10746 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10747 BPF_EXIT_INSN(),
10748 },
10749 .errstr_unpriv = "allowed for root only",
10750 .result_unpriv = REJECT,
10751 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010752 .retval = POINTER_VALUE,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010753 },
10754 {
10755 "calls: callee using wrong args2",
10756 .insns = {
10757 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10758 BPF_EXIT_INSN(),
10759 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10760 BPF_EXIT_INSN(),
10761 },
10762 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10763 .errstr = "R2 !read_ok",
10764 .result = REJECT,
10765 },
10766 {
10767 "calls: callee using two args",
10768 .insns = {
10769 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10770 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
10771 offsetof(struct __sk_buff, len)),
10772 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
10773 offsetof(struct __sk_buff, len)),
10774 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10775 BPF_EXIT_INSN(),
10776 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10777 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
10778 BPF_EXIT_INSN(),
10779 },
10780 .errstr_unpriv = "allowed for root only",
10781 .result_unpriv = REJECT,
10782 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010783 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010784 },
10785 {
10786 "calls: callee changing pkt pointers",
10787 .insns = {
10788 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
10789 offsetof(struct xdp_md, data)),
10790 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
10791 offsetof(struct xdp_md, data_end)),
10792 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
10793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
10794 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
10795 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10796 /* clear_all_pkt_pointers() has to walk all frames
10797 * to make sure that pkt pointers in the caller
10798 * are cleared when callee is calling a helper that
10799 * adjusts packet size
10800 */
10801 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10802 BPF_MOV32_IMM(BPF_REG_0, 0),
10803 BPF_EXIT_INSN(),
10804 BPF_MOV64_IMM(BPF_REG_2, 0),
10805 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10806 BPF_FUNC_xdp_adjust_head),
10807 BPF_EXIT_INSN(),
10808 },
10809 .result = REJECT,
10810 .errstr = "R6 invalid mem access 'inv'",
10811 .prog_type = BPF_PROG_TYPE_XDP,
10812 },
10813 {
10814 "calls: two calls with args",
10815 .insns = {
10816 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10817 BPF_EXIT_INSN(),
10818 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10819 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10820 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10821 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10822 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10823 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10824 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10825 BPF_EXIT_INSN(),
10826 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10827 offsetof(struct __sk_buff, len)),
10828 BPF_EXIT_INSN(),
10829 },
10830 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10831 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010832 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010833 },
10834 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010835 "calls: calls with stack arith",
10836 .insns = {
10837 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10838 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10839 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10840 BPF_EXIT_INSN(),
10841 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10842 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10843 BPF_EXIT_INSN(),
10844 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10845 BPF_MOV64_IMM(BPF_REG_0, 42),
10846 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
10847 BPF_EXIT_INSN(),
10848 },
10849 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10850 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010851 .retval = 42,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010852 },
10853 {
10854 "calls: calls with misaligned stack access",
10855 .insns = {
10856 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10857 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
10858 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10859 BPF_EXIT_INSN(),
10860 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
10861 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10862 BPF_EXIT_INSN(),
10863 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
10864 BPF_MOV64_IMM(BPF_REG_0, 42),
10865 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
10866 BPF_EXIT_INSN(),
10867 },
10868 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10869 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
10870 .errstr = "misaligned stack access",
10871 .result = REJECT,
10872 },
10873 {
10874 "calls: calls control flow, jump test",
10875 .insns = {
10876 BPF_MOV64_IMM(BPF_REG_0, 42),
10877 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10878 BPF_MOV64_IMM(BPF_REG_0, 43),
10879 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10880 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
10881 BPF_EXIT_INSN(),
10882 },
10883 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10884 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010885 .retval = 43,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010886 },
10887 {
10888 "calls: calls control flow, jump test 2",
10889 .insns = {
10890 BPF_MOV64_IMM(BPF_REG_0, 42),
10891 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10892 BPF_MOV64_IMM(BPF_REG_0, 43),
10893 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10894 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
10895 BPF_EXIT_INSN(),
10896 },
10897 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10898 .errstr = "jump out of range from insn 1 to 4",
10899 .result = REJECT,
10900 },
10901 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010902 "calls: two calls with bad jump",
10903 .insns = {
10904 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10905 BPF_EXIT_INSN(),
10906 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10907 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10908 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10909 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10910 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10911 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10912 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10913 BPF_EXIT_INSN(),
10914 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10915 offsetof(struct __sk_buff, len)),
10916 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
10917 BPF_EXIT_INSN(),
10918 },
10919 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10920 .errstr = "jump out of range from insn 11 to 9",
10921 .result = REJECT,
10922 },
10923 {
10924 "calls: recursive call. test1",
10925 .insns = {
10926 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10927 BPF_EXIT_INSN(),
10928 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10929 BPF_EXIT_INSN(),
10930 },
10931 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10932 .errstr = "back-edge",
10933 .result = REJECT,
10934 },
10935 {
10936 "calls: recursive call. test2",
10937 .insns = {
10938 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10939 BPF_EXIT_INSN(),
10940 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
10941 BPF_EXIT_INSN(),
10942 },
10943 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10944 .errstr = "back-edge",
10945 .result = REJECT,
10946 },
10947 {
10948 "calls: unreachable code",
10949 .insns = {
10950 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10951 BPF_EXIT_INSN(),
10952 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10953 BPF_EXIT_INSN(),
10954 BPF_MOV64_IMM(BPF_REG_0, 0),
10955 BPF_EXIT_INSN(),
10956 BPF_MOV64_IMM(BPF_REG_0, 0),
10957 BPF_EXIT_INSN(),
10958 },
10959 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10960 .errstr = "unreachable insn 6",
10961 .result = REJECT,
10962 },
10963 {
10964 "calls: invalid call",
10965 .insns = {
10966 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10967 BPF_EXIT_INSN(),
10968 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
10969 BPF_EXIT_INSN(),
10970 },
10971 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10972 .errstr = "invalid destination",
10973 .result = REJECT,
10974 },
10975 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010976 "calls: invalid call 2",
10977 .insns = {
10978 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10979 BPF_EXIT_INSN(),
10980 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
10981 BPF_EXIT_INSN(),
10982 },
10983 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10984 .errstr = "invalid destination",
10985 .result = REJECT,
10986 },
10987 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010988 "calls: jumping across function bodies. test1",
10989 .insns = {
10990 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10991 BPF_MOV64_IMM(BPF_REG_0, 0),
10992 BPF_EXIT_INSN(),
10993 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
10994 BPF_EXIT_INSN(),
10995 },
10996 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10997 .errstr = "jump out of range",
10998 .result = REJECT,
10999 },
11000 {
11001 "calls: jumping across function bodies. test2",
11002 .insns = {
11003 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
11004 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11005 BPF_MOV64_IMM(BPF_REG_0, 0),
11006 BPF_EXIT_INSN(),
11007 BPF_EXIT_INSN(),
11008 },
11009 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11010 .errstr = "jump out of range",
11011 .result = REJECT,
11012 },
11013 {
11014 "calls: call without exit",
11015 .insns = {
11016 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11017 BPF_EXIT_INSN(),
11018 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11019 BPF_EXIT_INSN(),
11020 BPF_MOV64_IMM(BPF_REG_0, 0),
11021 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
11022 },
11023 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11024 .errstr = "not an exit",
11025 .result = REJECT,
11026 },
11027 {
11028 "calls: call into middle of ld_imm64",
11029 .insns = {
11030 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11031 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11032 BPF_MOV64_IMM(BPF_REG_0, 0),
11033 BPF_EXIT_INSN(),
11034 BPF_LD_IMM64(BPF_REG_0, 0),
11035 BPF_EXIT_INSN(),
11036 },
11037 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11038 .errstr = "last insn",
11039 .result = REJECT,
11040 },
11041 {
11042 "calls: call into middle of other call",
11043 .insns = {
11044 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11045 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11046 BPF_MOV64_IMM(BPF_REG_0, 0),
11047 BPF_EXIT_INSN(),
11048 BPF_MOV64_IMM(BPF_REG_0, 0),
11049 BPF_MOV64_IMM(BPF_REG_0, 0),
11050 BPF_EXIT_INSN(),
11051 },
11052 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11053 .errstr = "last insn",
11054 .result = REJECT,
11055 },
11056 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080011057 "calls: ld_abs with changing ctx data in callee",
11058 .insns = {
11059 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11060 BPF_LD_ABS(BPF_B, 0),
11061 BPF_LD_ABS(BPF_H, 0),
11062 BPF_LD_ABS(BPF_W, 0),
11063 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
11064 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11065 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
11066 BPF_LD_ABS(BPF_B, 0),
11067 BPF_LD_ABS(BPF_H, 0),
11068 BPF_LD_ABS(BPF_W, 0),
11069 BPF_EXIT_INSN(),
11070 BPF_MOV64_IMM(BPF_REG_2, 1),
11071 BPF_MOV64_IMM(BPF_REG_3, 2),
11072 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11073 BPF_FUNC_skb_vlan_push),
11074 BPF_EXIT_INSN(),
11075 },
11076 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11077 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
11078 .result = REJECT,
11079 },
11080 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011081 "calls: two calls with bad fallthrough",
11082 .insns = {
11083 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11084 BPF_EXIT_INSN(),
11085 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11086 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11087 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11088 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11089 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11090 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11091 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11092 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
11093 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11094 offsetof(struct __sk_buff, len)),
11095 BPF_EXIT_INSN(),
11096 },
11097 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11098 .errstr = "not an exit",
11099 .result = REJECT,
11100 },
11101 {
11102 "calls: two calls with stack read",
11103 .insns = {
11104 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11105 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11106 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11107 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11108 BPF_EXIT_INSN(),
11109 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11110 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11111 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11112 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11114 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11115 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11116 BPF_EXIT_INSN(),
11117 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11118 BPF_EXIT_INSN(),
11119 },
11120 .prog_type = BPF_PROG_TYPE_XDP,
11121 .result = ACCEPT,
11122 },
11123 {
11124 "calls: two calls with stack write",
11125 .insns = {
11126 /* main prog */
11127 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11128 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11129 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11130 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11131 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11132 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11133 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11134 BPF_EXIT_INSN(),
11135
11136 /* subprog 1 */
11137 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11138 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11139 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
11140 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
11141 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11142 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11143 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
11144 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
11145 /* write into stack frame of main prog */
11146 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11147 BPF_EXIT_INSN(),
11148
11149 /* subprog 2 */
11150 /* read from stack frame of main prog */
11151 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11152 BPF_EXIT_INSN(),
11153 },
11154 .prog_type = BPF_PROG_TYPE_XDP,
11155 .result = ACCEPT,
11156 },
11157 {
Jann Horn6b80ad22017-12-22 19:12:35 +010011158 "calls: stack overflow using two frames (pre-call access)",
11159 .insns = {
11160 /* prog 1 */
11161 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11162 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
11163 BPF_EXIT_INSN(),
11164
11165 /* prog 2 */
11166 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11167 BPF_MOV64_IMM(BPF_REG_0, 0),
11168 BPF_EXIT_INSN(),
11169 },
11170 .prog_type = BPF_PROG_TYPE_XDP,
11171 .errstr = "combined stack size",
11172 .result = REJECT,
11173 },
11174 {
11175 "calls: stack overflow using two frames (post-call access)",
11176 .insns = {
11177 /* prog 1 */
11178 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
11179 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11180 BPF_EXIT_INSN(),
11181
11182 /* prog 2 */
11183 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11184 BPF_MOV64_IMM(BPF_REG_0, 0),
11185 BPF_EXIT_INSN(),
11186 },
11187 .prog_type = BPF_PROG_TYPE_XDP,
11188 .errstr = "combined stack size",
11189 .result = REJECT,
11190 },
11191 {
Alexei Starovoitov6b86c422017-12-25 13:15:41 -080011192 "calls: stack depth check using three frames. test1",
11193 .insns = {
11194 /* main */
11195 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11196 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
11197 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
11198 BPF_MOV64_IMM(BPF_REG_0, 0),
11199 BPF_EXIT_INSN(),
11200 /* A */
11201 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11202 BPF_EXIT_INSN(),
11203 /* B */
11204 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
11205 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11206 BPF_EXIT_INSN(),
11207 },
11208 .prog_type = BPF_PROG_TYPE_XDP,
11209 /* stack_main=32, stack_A=256, stack_B=64
11210 * and max(main+A, main+A+B) < 512
11211 */
11212 .result = ACCEPT,
11213 },
11214 {
11215 "calls: stack depth check using three frames. test2",
11216 .insns = {
11217 /* main */
11218 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11219 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
11220 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
11221 BPF_MOV64_IMM(BPF_REG_0, 0),
11222 BPF_EXIT_INSN(),
11223 /* A */
11224 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11225 BPF_EXIT_INSN(),
11226 /* B */
11227 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
11228 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11229 BPF_EXIT_INSN(),
11230 },
11231 .prog_type = BPF_PROG_TYPE_XDP,
11232 /* stack_main=32, stack_A=64, stack_B=256
11233 * and max(main+A, main+A+B) < 512
11234 */
11235 .result = ACCEPT,
11236 },
11237 {
11238 "calls: stack depth check using three frames. test3",
11239 .insns = {
11240 /* main */
11241 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11242 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11243 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11244 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
11245 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
11246 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11247 BPF_MOV64_IMM(BPF_REG_0, 0),
11248 BPF_EXIT_INSN(),
11249 /* A */
11250 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
11251 BPF_EXIT_INSN(),
11252 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
11253 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
11254 /* B */
11255 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
11256 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
11257 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11258 BPF_EXIT_INSN(),
11259 },
11260 .prog_type = BPF_PROG_TYPE_XDP,
11261 /* stack_main=64, stack_A=224, stack_B=256
11262 * and max(main+A, main+A+B) > 512
11263 */
11264 .errstr = "combined stack",
11265 .result = REJECT,
11266 },
11267 {
11268 "calls: stack depth check using three frames. test4",
11269 /* void main(void) {
11270 * func1(0);
11271 * func1(1);
11272 * func2(1);
11273 * }
11274 * void func1(int alloc_or_recurse) {
11275 * if (alloc_or_recurse) {
11276 * frame_pointer[-300] = 1;
11277 * } else {
11278 * func2(alloc_or_recurse);
11279 * }
11280 * }
11281 * void func2(int alloc_or_recurse) {
11282 * if (alloc_or_recurse) {
11283 * frame_pointer[-300] = 1;
11284 * }
11285 * }
11286 */
11287 .insns = {
11288 /* main */
11289 BPF_MOV64_IMM(BPF_REG_1, 0),
11290 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11291 BPF_MOV64_IMM(BPF_REG_1, 1),
11292 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11293 BPF_MOV64_IMM(BPF_REG_1, 1),
11294 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
11295 BPF_MOV64_IMM(BPF_REG_0, 0),
11296 BPF_EXIT_INSN(),
11297 /* A */
11298 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
11299 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11300 BPF_EXIT_INSN(),
11301 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11302 BPF_EXIT_INSN(),
11303 /* B */
11304 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11305 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11306 BPF_EXIT_INSN(),
11307 },
11308 .prog_type = BPF_PROG_TYPE_XDP,
11309 .result = REJECT,
11310 .errstr = "combined stack",
11311 },
11312 {
Alexei Starovoitovaada9ce2017-12-25 13:15:42 -080011313 "calls: stack depth check using three frames. test5",
11314 .insns = {
11315 /* main */
11316 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
11317 BPF_EXIT_INSN(),
11318 /* A */
11319 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11320 BPF_EXIT_INSN(),
11321 /* B */
11322 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
11323 BPF_EXIT_INSN(),
11324 /* C */
11325 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
11326 BPF_EXIT_INSN(),
11327 /* D */
11328 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
11329 BPF_EXIT_INSN(),
11330 /* E */
11331 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
11332 BPF_EXIT_INSN(),
11333 /* F */
11334 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
11335 BPF_EXIT_INSN(),
11336 /* G */
11337 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
11338 BPF_EXIT_INSN(),
11339 /* H */
11340 BPF_MOV64_IMM(BPF_REG_0, 0),
11341 BPF_EXIT_INSN(),
11342 },
11343 .prog_type = BPF_PROG_TYPE_XDP,
11344 .errstr = "call stack",
11345 .result = REJECT,
11346 },
11347 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011348 "calls: spill into caller stack frame",
11349 .insns = {
11350 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11351 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11352 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11353 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11354 BPF_EXIT_INSN(),
11355 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
11356 BPF_MOV64_IMM(BPF_REG_0, 0),
11357 BPF_EXIT_INSN(),
11358 },
11359 .prog_type = BPF_PROG_TYPE_XDP,
11360 .errstr = "cannot spill",
11361 .result = REJECT,
11362 },
11363 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080011364 "calls: write into caller stack frame",
11365 .insns = {
11366 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11367 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11368 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11369 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11370 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11371 BPF_EXIT_INSN(),
11372 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
11373 BPF_MOV64_IMM(BPF_REG_0, 0),
11374 BPF_EXIT_INSN(),
11375 },
11376 .prog_type = BPF_PROG_TYPE_XDP,
11377 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011378 .retval = 42,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080011379 },
11380 {
11381 "calls: write into callee stack frame",
11382 .insns = {
11383 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11384 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
11385 BPF_EXIT_INSN(),
11386 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
11387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
11388 BPF_EXIT_INSN(),
11389 },
11390 .prog_type = BPF_PROG_TYPE_XDP,
11391 .errstr = "cannot return stack pointer",
11392 .result = REJECT,
11393 },
11394 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011395 "calls: two calls with stack write and void return",
11396 .insns = {
11397 /* main prog */
11398 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11399 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11400 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11401 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11403 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11404 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11405 BPF_EXIT_INSN(),
11406
11407 /* subprog 1 */
11408 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11409 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11410 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11411 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11412 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11413 BPF_EXIT_INSN(),
11414
11415 /* subprog 2 */
11416 /* write into stack frame of main prog */
11417 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
11418 BPF_EXIT_INSN(), /* void return */
11419 },
11420 .prog_type = BPF_PROG_TYPE_XDP,
11421 .result = ACCEPT,
11422 },
11423 {
11424 "calls: ambiguous return value",
11425 .insns = {
11426 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11427 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11428 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11429 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11430 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11431 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11432 BPF_EXIT_INSN(),
11433 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11434 BPF_MOV64_IMM(BPF_REG_0, 0),
11435 BPF_EXIT_INSN(),
11436 },
11437 .errstr_unpriv = "allowed for root only",
11438 .result_unpriv = REJECT,
11439 .errstr = "R0 !read_ok",
11440 .result = REJECT,
11441 },
11442 {
11443 "calls: two calls that return map_value",
11444 .insns = {
11445 /* main prog */
11446 /* pass fp-16, fp-8 into a function */
11447 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11448 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11449 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11450 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11451 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
11452
11453 /* fetch map_value_ptr from the stack of this function */
11454 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11455 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11456 /* write into map value */
11457 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11458 /* fetch secound map_value_ptr from the stack */
11459 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11460 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11461 /* write into map value */
11462 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11463 BPF_MOV64_IMM(BPF_REG_0, 0),
11464 BPF_EXIT_INSN(),
11465
11466 /* subprog 1 */
11467 /* call 3rd function twice */
11468 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11469 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11470 /* first time with fp-8 */
11471 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11472 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11473 /* second time with fp-16 */
11474 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11475 BPF_EXIT_INSN(),
11476
11477 /* subprog 2 */
11478 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11479 /* lookup from map */
11480 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11481 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11482 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11483 BPF_LD_MAP_FD(BPF_REG_1, 0),
11484 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11485 BPF_FUNC_map_lookup_elem),
11486 /* write map_value_ptr into stack frame of main prog */
11487 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11488 BPF_MOV64_IMM(BPF_REG_0, 0),
11489 BPF_EXIT_INSN(), /* return 0 */
11490 },
11491 .prog_type = BPF_PROG_TYPE_XDP,
Prashant Bhole908142e2018-10-09 10:04:53 +090011492 .fixup_map_hash_8b = { 23 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011493 .result = ACCEPT,
11494 },
11495 {
11496 "calls: two calls that return map_value with bool condition",
11497 .insns = {
11498 /* main prog */
11499 /* pass fp-16, fp-8 into a function */
11500 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11501 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11502 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11503 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11504 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11505 BPF_MOV64_IMM(BPF_REG_0, 0),
11506 BPF_EXIT_INSN(),
11507
11508 /* subprog 1 */
11509 /* call 3rd function twice */
11510 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11511 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11512 /* first time with fp-8 */
11513 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11514 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11515 /* fetch map_value_ptr from the stack of this function */
11516 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11517 /* write into map value */
11518 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11519 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11520 /* second time with fp-16 */
11521 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11522 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11523 /* fetch secound map_value_ptr from the stack */
11524 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11525 /* write into map value */
11526 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11527 BPF_EXIT_INSN(),
11528
11529 /* subprog 2 */
11530 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11531 /* lookup from map */
11532 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11533 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11534 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11535 BPF_LD_MAP_FD(BPF_REG_1, 0),
11536 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11537 BPF_FUNC_map_lookup_elem),
11538 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11539 BPF_MOV64_IMM(BPF_REG_0, 0),
11540 BPF_EXIT_INSN(), /* return 0 */
11541 /* write map_value_ptr into stack frame of main prog */
11542 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11543 BPF_MOV64_IMM(BPF_REG_0, 1),
11544 BPF_EXIT_INSN(), /* return 1 */
11545 },
11546 .prog_type = BPF_PROG_TYPE_XDP,
Prashant Bhole908142e2018-10-09 10:04:53 +090011547 .fixup_map_hash_8b = { 23 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011548 .result = ACCEPT,
11549 },
11550 {
11551 "calls: two calls that return map_value with incorrect bool check",
11552 .insns = {
11553 /* main prog */
11554 /* pass fp-16, fp-8 into a function */
11555 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11557 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11558 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11559 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11560 BPF_MOV64_IMM(BPF_REG_0, 0),
11561 BPF_EXIT_INSN(),
11562
11563 /* subprog 1 */
11564 /* call 3rd function twice */
11565 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11566 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11567 /* first time with fp-8 */
11568 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11569 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11570 /* fetch map_value_ptr from the stack of this function */
11571 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11572 /* write into map value */
11573 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11574 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11575 /* second time with fp-16 */
11576 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11577 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11578 /* fetch secound map_value_ptr from the stack */
11579 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11580 /* write into map value */
11581 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11582 BPF_EXIT_INSN(),
11583
11584 /* subprog 2 */
11585 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11586 /* lookup from map */
11587 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11588 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11589 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11590 BPF_LD_MAP_FD(BPF_REG_1, 0),
11591 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11592 BPF_FUNC_map_lookup_elem),
11593 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11594 BPF_MOV64_IMM(BPF_REG_0, 0),
11595 BPF_EXIT_INSN(), /* return 0 */
11596 /* write map_value_ptr into stack frame of main prog */
11597 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11598 BPF_MOV64_IMM(BPF_REG_0, 1),
11599 BPF_EXIT_INSN(), /* return 1 */
11600 },
11601 .prog_type = BPF_PROG_TYPE_XDP,
Prashant Bhole908142e2018-10-09 10:04:53 +090011602 .fixup_map_hash_8b = { 23 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011603 .result = REJECT,
11604 .errstr = "invalid read from stack off -16+0 size 8",
11605 },
11606 {
11607 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
11608 .insns = {
11609 /* main prog */
11610 /* pass fp-16, fp-8 into a function */
11611 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11612 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11613 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11614 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11615 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11616 BPF_MOV64_IMM(BPF_REG_0, 0),
11617 BPF_EXIT_INSN(),
11618
11619 /* subprog 1 */
11620 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11621 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11622 /* 1st lookup from map */
11623 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11624 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11625 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11626 BPF_LD_MAP_FD(BPF_REG_1, 0),
11627 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11628 BPF_FUNC_map_lookup_elem),
11629 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11630 BPF_MOV64_IMM(BPF_REG_8, 0),
11631 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11632 /* write map_value_ptr into stack frame of main prog at fp-8 */
11633 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11634 BPF_MOV64_IMM(BPF_REG_8, 1),
11635
11636 /* 2nd lookup from map */
11637 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
11638 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11639 BPF_LD_MAP_FD(BPF_REG_1, 0),
11640 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
11641 BPF_FUNC_map_lookup_elem),
11642 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11643 BPF_MOV64_IMM(BPF_REG_9, 0),
11644 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11645 /* write map_value_ptr into stack frame of main prog at fp-16 */
11646 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11647 BPF_MOV64_IMM(BPF_REG_9, 1),
11648
11649 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11650 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
11651 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11652 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11653 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11654 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
11655 BPF_EXIT_INSN(),
11656
11657 /* subprog 2 */
11658 /* if arg2 == 1 do *arg1 = 0 */
11659 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11660 /* fetch map_value_ptr from the stack of this function */
11661 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11662 /* write into map value */
11663 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11664
11665 /* if arg4 == 1 do *arg3 = 0 */
11666 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11667 /* fetch map_value_ptr from the stack of this function */
11668 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11669 /* write into map value */
11670 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
11671 BPF_EXIT_INSN(),
11672 },
11673 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Prashant Bhole908142e2018-10-09 10:04:53 +090011674 .fixup_map_hash_8b = { 12, 22 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011675 .result = REJECT,
11676 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
11677 },
11678 {
11679 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
11680 .insns = {
11681 /* main prog */
11682 /* pass fp-16, fp-8 into a function */
11683 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11685 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11686 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11687 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11688 BPF_MOV64_IMM(BPF_REG_0, 0),
11689 BPF_EXIT_INSN(),
11690
11691 /* subprog 1 */
11692 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11693 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11694 /* 1st lookup from map */
11695 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11696 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11697 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11698 BPF_LD_MAP_FD(BPF_REG_1, 0),
11699 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11700 BPF_FUNC_map_lookup_elem),
11701 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11702 BPF_MOV64_IMM(BPF_REG_8, 0),
11703 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11704 /* write map_value_ptr into stack frame of main prog at fp-8 */
11705 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11706 BPF_MOV64_IMM(BPF_REG_8, 1),
11707
11708 /* 2nd lookup from map */
11709 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
11710 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11711 BPF_LD_MAP_FD(BPF_REG_1, 0),
11712 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
11713 BPF_FUNC_map_lookup_elem),
11714 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11715 BPF_MOV64_IMM(BPF_REG_9, 0),
11716 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11717 /* write map_value_ptr into stack frame of main prog at fp-16 */
11718 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11719 BPF_MOV64_IMM(BPF_REG_9, 1),
11720
11721 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11722 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
11723 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11724 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11725 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11726 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
11727 BPF_EXIT_INSN(),
11728
11729 /* subprog 2 */
11730 /* if arg2 == 1 do *arg1 = 0 */
11731 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11732 /* fetch map_value_ptr from the stack of this function */
11733 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11734 /* write into map value */
11735 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11736
11737 /* if arg4 == 1 do *arg3 = 0 */
11738 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11739 /* fetch map_value_ptr from the stack of this function */
11740 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11741 /* write into map value */
11742 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11743 BPF_EXIT_INSN(),
11744 },
11745 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Prashant Bhole908142e2018-10-09 10:04:53 +090011746 .fixup_map_hash_8b = { 12, 22 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011747 .result = ACCEPT,
11748 },
11749 {
11750 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
11751 .insns = {
11752 /* main prog */
11753 /* pass fp-16, fp-8 into a function */
11754 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11755 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11756 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11757 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11758 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
11759 BPF_MOV64_IMM(BPF_REG_0, 0),
11760 BPF_EXIT_INSN(),
11761
11762 /* subprog 1 */
11763 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11764 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11765 /* 1st lookup from map */
11766 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
11767 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11768 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
11769 BPF_LD_MAP_FD(BPF_REG_1, 0),
11770 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11771 BPF_FUNC_map_lookup_elem),
11772 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11773 BPF_MOV64_IMM(BPF_REG_8, 0),
11774 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11775 /* write map_value_ptr into stack frame of main prog at fp-8 */
11776 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11777 BPF_MOV64_IMM(BPF_REG_8, 1),
11778
11779 /* 2nd lookup from map */
11780 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11781 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
11782 BPF_LD_MAP_FD(BPF_REG_1, 0),
11783 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11784 BPF_FUNC_map_lookup_elem),
11785 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11786 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
11787 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11788 /* write map_value_ptr into stack frame of main prog at fp-16 */
11789 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11790 BPF_MOV64_IMM(BPF_REG_9, 1),
11791
11792 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11793 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
11794 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11795 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11796 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11797 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
11798 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
11799
11800 /* subprog 2 */
11801 /* if arg2 == 1 do *arg1 = 0 */
11802 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11803 /* fetch map_value_ptr from the stack of this function */
11804 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11805 /* write into map value */
11806 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11807
11808 /* if arg4 == 1 do *arg3 = 0 */
11809 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11810 /* fetch map_value_ptr from the stack of this function */
11811 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11812 /* write into map value */
11813 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
11814 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
11815 },
11816 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Prashant Bhole908142e2018-10-09 10:04:53 +090011817 .fixup_map_hash_8b = { 12, 22 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011818 .result = REJECT,
11819 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
11820 },
11821 {
11822 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
11823 .insns = {
11824 /* main prog */
11825 /* pass fp-16, fp-8 into a function */
11826 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11827 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11828 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11829 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11830 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11831 BPF_MOV64_IMM(BPF_REG_0, 0),
11832 BPF_EXIT_INSN(),
11833
11834 /* subprog 1 */
11835 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11836 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11837 /* 1st lookup from map */
11838 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11839 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11840 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11841 BPF_LD_MAP_FD(BPF_REG_1, 0),
11842 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11843 BPF_FUNC_map_lookup_elem),
11844 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11845 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11846 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11847 BPF_MOV64_IMM(BPF_REG_8, 0),
11848 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11849 BPF_MOV64_IMM(BPF_REG_8, 1),
11850
11851 /* 2nd lookup from map */
11852 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11854 BPF_LD_MAP_FD(BPF_REG_1, 0),
11855 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11856 BPF_FUNC_map_lookup_elem),
11857 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
11858 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11859 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11860 BPF_MOV64_IMM(BPF_REG_9, 0),
11861 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11862 BPF_MOV64_IMM(BPF_REG_9, 1),
11863
11864 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11865 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11866 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11867 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11868 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11869 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11870 BPF_EXIT_INSN(),
11871
11872 /* subprog 2 */
11873 /* if arg2 == 1 do *arg1 = 0 */
11874 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11875 /* fetch map_value_ptr from the stack of this function */
11876 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11877 /* write into map value */
11878 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11879
11880 /* if arg4 == 1 do *arg3 = 0 */
11881 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11882 /* fetch map_value_ptr from the stack of this function */
11883 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11884 /* write into map value */
11885 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11886 BPF_EXIT_INSN(),
11887 },
11888 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Prashant Bhole908142e2018-10-09 10:04:53 +090011889 .fixup_map_hash_8b = { 12, 22 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011890 .result = ACCEPT,
11891 },
11892 {
11893 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
11894 .insns = {
11895 /* main prog */
11896 /* pass fp-16, fp-8 into a function */
11897 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11899 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11901 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11902 BPF_MOV64_IMM(BPF_REG_0, 0),
11903 BPF_EXIT_INSN(),
11904
11905 /* subprog 1 */
11906 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11907 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11908 /* 1st lookup from map */
11909 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11910 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11912 BPF_LD_MAP_FD(BPF_REG_1, 0),
11913 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11914 BPF_FUNC_map_lookup_elem),
11915 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11916 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11917 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11918 BPF_MOV64_IMM(BPF_REG_8, 0),
11919 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11920 BPF_MOV64_IMM(BPF_REG_8, 1),
11921
11922 /* 2nd lookup from map */
11923 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11925 BPF_LD_MAP_FD(BPF_REG_1, 0),
11926 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11927 BPF_FUNC_map_lookup_elem),
11928 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
11929 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11930 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11931 BPF_MOV64_IMM(BPF_REG_9, 0),
11932 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11933 BPF_MOV64_IMM(BPF_REG_9, 1),
11934
11935 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11936 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11937 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11938 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11939 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11940 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11941 BPF_EXIT_INSN(),
11942
11943 /* subprog 2 */
11944 /* if arg2 == 1 do *arg1 = 0 */
11945 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11946 /* fetch map_value_ptr from the stack of this function */
11947 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11948 /* write into map value */
11949 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11950
11951 /* if arg4 == 0 do *arg3 = 0 */
11952 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
11953 /* fetch map_value_ptr from the stack of this function */
11954 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11955 /* write into map value */
11956 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11957 BPF_EXIT_INSN(),
11958 },
11959 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Prashant Bhole908142e2018-10-09 10:04:53 +090011960 .fixup_map_hash_8b = { 12, 22 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011961 .result = REJECT,
11962 .errstr = "R0 invalid mem access 'inv'",
11963 },
11964 {
11965 "calls: pkt_ptr spill into caller stack",
11966 .insns = {
11967 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11968 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11969 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11970 BPF_EXIT_INSN(),
11971
11972 /* subprog 1 */
11973 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11974 offsetof(struct __sk_buff, data)),
11975 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11976 offsetof(struct __sk_buff, data_end)),
11977 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11978 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11979 /* spill unchecked pkt_ptr into stack of caller */
11980 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11981 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11982 /* now the pkt range is verified, read pkt_ptr from stack */
11983 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11984 /* write 4 bytes into packet */
11985 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11986 BPF_EXIT_INSN(),
11987 },
11988 .result = ACCEPT,
11989 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011990 .retval = POINTER_VALUE,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011991 },
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080011992 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080011993 "calls: pkt_ptr spill into caller stack 2",
11994 .insns = {
11995 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11996 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11997 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11998 /* Marking is still kept, but not in all cases safe. */
11999 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12000 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12001 BPF_EXIT_INSN(),
12002
12003 /* subprog 1 */
12004 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12005 offsetof(struct __sk_buff, data)),
12006 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12007 offsetof(struct __sk_buff, data_end)),
12008 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12009 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12010 /* spill unchecked pkt_ptr into stack of caller */
12011 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12012 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12013 /* now the pkt range is verified, read pkt_ptr from stack */
12014 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12015 /* write 4 bytes into packet */
12016 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12017 BPF_EXIT_INSN(),
12018 },
12019 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12020 .errstr = "invalid access to packet",
12021 .result = REJECT,
12022 },
12023 {
12024 "calls: pkt_ptr spill into caller stack 3",
12025 .insns = {
12026 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12027 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12028 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12029 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12030 /* Marking is still kept and safe here. */
12031 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12032 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12033 BPF_EXIT_INSN(),
12034
12035 /* subprog 1 */
12036 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12037 offsetof(struct __sk_buff, data)),
12038 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12039 offsetof(struct __sk_buff, data_end)),
12040 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12041 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12042 /* spill unchecked pkt_ptr into stack of caller */
12043 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12044 BPF_MOV64_IMM(BPF_REG_5, 0),
12045 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12046 BPF_MOV64_IMM(BPF_REG_5, 1),
12047 /* now the pkt range is verified, read pkt_ptr from stack */
12048 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12049 /* write 4 bytes into packet */
12050 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12051 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12052 BPF_EXIT_INSN(),
12053 },
12054 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12055 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080012056 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080012057 },
12058 {
12059 "calls: pkt_ptr spill into caller stack 4",
12060 .insns = {
12061 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12062 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12063 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12064 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12065 /* Check marking propagated. */
12066 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12067 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12068 BPF_EXIT_INSN(),
12069
12070 /* subprog 1 */
12071 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12072 offsetof(struct __sk_buff, data)),
12073 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12074 offsetof(struct __sk_buff, data_end)),
12075 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12076 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12077 /* spill unchecked pkt_ptr into stack of caller */
12078 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12079 BPF_MOV64_IMM(BPF_REG_5, 0),
12080 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12081 BPF_MOV64_IMM(BPF_REG_5, 1),
12082 /* don't read back pkt_ptr from stack here */
12083 /* write 4 bytes into packet */
12084 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12085 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12086 BPF_EXIT_INSN(),
12087 },
12088 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12089 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080012090 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080012091 },
12092 {
12093 "calls: pkt_ptr spill into caller stack 5",
12094 .insns = {
12095 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12096 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12097 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
12098 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12099 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12100 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12101 BPF_EXIT_INSN(),
12102
12103 /* subprog 1 */
12104 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12105 offsetof(struct __sk_buff, data)),
12106 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12107 offsetof(struct __sk_buff, data_end)),
12108 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12109 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12110 BPF_MOV64_IMM(BPF_REG_5, 0),
12111 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12112 /* spill checked pkt_ptr into stack of caller */
12113 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12114 BPF_MOV64_IMM(BPF_REG_5, 1),
12115 /* don't read back pkt_ptr from stack here */
12116 /* write 4 bytes into packet */
12117 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12118 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12119 BPF_EXIT_INSN(),
12120 },
12121 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12122 .errstr = "same insn cannot be used with different",
12123 .result = REJECT,
12124 },
12125 {
12126 "calls: pkt_ptr spill into caller stack 6",
12127 .insns = {
12128 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12129 offsetof(struct __sk_buff, data_end)),
12130 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12131 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12132 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12133 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12134 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12135 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12136 BPF_EXIT_INSN(),
12137
12138 /* subprog 1 */
12139 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12140 offsetof(struct __sk_buff, data)),
12141 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12142 offsetof(struct __sk_buff, data_end)),
12143 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12144 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12145 BPF_MOV64_IMM(BPF_REG_5, 0),
12146 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12147 /* spill checked pkt_ptr into stack of caller */
12148 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12149 BPF_MOV64_IMM(BPF_REG_5, 1),
12150 /* don't read back pkt_ptr from stack here */
12151 /* write 4 bytes into packet */
12152 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12153 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12154 BPF_EXIT_INSN(),
12155 },
12156 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12157 .errstr = "R4 invalid mem access",
12158 .result = REJECT,
12159 },
12160 {
12161 "calls: pkt_ptr spill into caller stack 7",
12162 .insns = {
12163 BPF_MOV64_IMM(BPF_REG_2, 0),
12164 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12165 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12166 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12167 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12168 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12169 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12170 BPF_EXIT_INSN(),
12171
12172 /* subprog 1 */
12173 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12174 offsetof(struct __sk_buff, data)),
12175 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12176 offsetof(struct __sk_buff, data_end)),
12177 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12178 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12179 BPF_MOV64_IMM(BPF_REG_5, 0),
12180 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12181 /* spill checked pkt_ptr into stack of caller */
12182 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12183 BPF_MOV64_IMM(BPF_REG_5, 1),
12184 /* don't read back pkt_ptr from stack here */
12185 /* write 4 bytes into packet */
12186 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12187 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12188 BPF_EXIT_INSN(),
12189 },
12190 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12191 .errstr = "R4 invalid mem access",
12192 .result = REJECT,
12193 },
12194 {
12195 "calls: pkt_ptr spill into caller stack 8",
12196 .insns = {
12197 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12198 offsetof(struct __sk_buff, data)),
12199 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12200 offsetof(struct __sk_buff, data_end)),
12201 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12202 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12203 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12204 BPF_EXIT_INSN(),
12205 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12206 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12207 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12208 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12209 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12210 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12211 BPF_EXIT_INSN(),
12212
12213 /* subprog 1 */
12214 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12215 offsetof(struct __sk_buff, data)),
12216 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12217 offsetof(struct __sk_buff, data_end)),
12218 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12219 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12220 BPF_MOV64_IMM(BPF_REG_5, 0),
12221 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12222 /* spill checked pkt_ptr into stack of caller */
12223 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12224 BPF_MOV64_IMM(BPF_REG_5, 1),
12225 /* don't read back pkt_ptr from stack here */
12226 /* write 4 bytes into packet */
12227 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12228 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12229 BPF_EXIT_INSN(),
12230 },
12231 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12232 .result = ACCEPT,
12233 },
12234 {
12235 "calls: pkt_ptr spill into caller stack 9",
12236 .insns = {
12237 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12238 offsetof(struct __sk_buff, data)),
12239 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12240 offsetof(struct __sk_buff, data_end)),
12241 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12242 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12243 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12244 BPF_EXIT_INSN(),
12245 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12247 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12248 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12249 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12250 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12251 BPF_EXIT_INSN(),
12252
12253 /* subprog 1 */
12254 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12255 offsetof(struct __sk_buff, data)),
12256 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12257 offsetof(struct __sk_buff, data_end)),
12258 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12259 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12260 BPF_MOV64_IMM(BPF_REG_5, 0),
12261 /* spill unchecked pkt_ptr into stack of caller */
12262 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12263 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12264 BPF_MOV64_IMM(BPF_REG_5, 1),
12265 /* don't read back pkt_ptr from stack here */
12266 /* write 4 bytes into packet */
12267 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12268 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12269 BPF_EXIT_INSN(),
12270 },
12271 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12272 .errstr = "invalid access to packet",
12273 .result = REJECT,
12274 },
12275 {
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080012276 "calls: caller stack init to zero or map_value_or_null",
12277 .insns = {
12278 BPF_MOV64_IMM(BPF_REG_0, 0),
12279 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12280 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12281 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12282 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12283 /* fetch map_value_or_null or const_zero from stack */
12284 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12285 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12286 /* store into map_value */
12287 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
12288 BPF_EXIT_INSN(),
12289
12290 /* subprog 1 */
12291 /* if (ctx == 0) return; */
12292 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
12293 /* else bpf_map_lookup() and *(fp - 8) = r0 */
12294 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
12295 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12296 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12297 BPF_LD_MAP_FD(BPF_REG_1, 0),
12298 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12299 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12300 BPF_FUNC_map_lookup_elem),
12301 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12302 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12303 BPF_EXIT_INSN(),
12304 },
Prashant Bhole908142e2018-10-09 10:04:53 +090012305 .fixup_map_hash_8b = { 13 },
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080012306 .result = ACCEPT,
12307 .prog_type = BPF_PROG_TYPE_XDP,
12308 },
12309 {
12310 "calls: stack init to zero and pruning",
12311 .insns = {
12312 /* first make allocated_stack 16 byte */
12313 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
12314 /* now fork the execution such that the false branch
12315 * of JGT insn will be verified second and it skisp zero
12316 * init of fp-8 stack slot. If stack liveness marking
12317 * is missing live_read marks from call map_lookup
12318 * processing then pruning will incorrectly assume
12319 * that fp-8 stack slot was unused in the fall-through
12320 * branch and will accept the program incorrectly
12321 */
12322 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
12323 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12324 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
12325 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12326 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12327 BPF_LD_MAP_FD(BPF_REG_1, 0),
12328 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12329 BPF_FUNC_map_lookup_elem),
12330 BPF_EXIT_INSN(),
12331 },
Prashant Bhole908142e2018-10-09 10:04:53 +090012332 .fixup_map_hash_48b = { 6 },
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080012333 .errstr = "invalid indirect read from stack off -8+0 size 8",
12334 .result = REJECT,
12335 .prog_type = BPF_PROG_TYPE_XDP,
12336 },
Gianluca Borellofd05e572017-12-23 10:09:55 +000012337 {
Daniel Borkmann06be0862018-06-02 23:06:31 +020012338 "calls: two calls returning different map pointers for lookup (hash, array)",
12339 .insns = {
12340 /* main prog */
12341 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12342 BPF_CALL_REL(11),
12343 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12344 BPF_CALL_REL(12),
12345 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12346 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12347 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12348 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12349 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12350 BPF_FUNC_map_lookup_elem),
12351 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12352 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12353 offsetof(struct test_val, foo)),
12354 BPF_MOV64_IMM(BPF_REG_0, 1),
12355 BPF_EXIT_INSN(),
12356 /* subprog 1 */
12357 BPF_LD_MAP_FD(BPF_REG_0, 0),
12358 BPF_EXIT_INSN(),
12359 /* subprog 2 */
12360 BPF_LD_MAP_FD(BPF_REG_0, 0),
12361 BPF_EXIT_INSN(),
12362 },
12363 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Prashant Bhole908142e2018-10-09 10:04:53 +090012364 .fixup_map_hash_48b = { 13 },
12365 .fixup_map_array_48b = { 16 },
Daniel Borkmann06be0862018-06-02 23:06:31 +020012366 .result = ACCEPT,
12367 .retval = 1,
12368 },
12369 {
12370 "calls: two calls returning different map pointers for lookup (hash, map in map)",
12371 .insns = {
12372 /* main prog */
12373 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12374 BPF_CALL_REL(11),
12375 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12376 BPF_CALL_REL(12),
12377 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12378 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12379 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12380 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12381 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12382 BPF_FUNC_map_lookup_elem),
12383 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12384 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12385 offsetof(struct test_val, foo)),
12386 BPF_MOV64_IMM(BPF_REG_0, 1),
12387 BPF_EXIT_INSN(),
12388 /* subprog 1 */
12389 BPF_LD_MAP_FD(BPF_REG_0, 0),
12390 BPF_EXIT_INSN(),
12391 /* subprog 2 */
12392 BPF_LD_MAP_FD(BPF_REG_0, 0),
12393 BPF_EXIT_INSN(),
12394 },
12395 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12396 .fixup_map_in_map = { 16 },
Prashant Bhole908142e2018-10-09 10:04:53 +090012397 .fixup_map_array_48b = { 13 },
Daniel Borkmann06be0862018-06-02 23:06:31 +020012398 .result = REJECT,
12399 .errstr = "R0 invalid mem access 'map_ptr'",
12400 },
12401 {
12402 "cond: two branches returning different map pointers for lookup (tail, tail)",
12403 .insns = {
12404 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12405 offsetof(struct __sk_buff, mark)),
12406 BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
12407 BPF_LD_MAP_FD(BPF_REG_2, 0),
12408 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12409 BPF_LD_MAP_FD(BPF_REG_2, 0),
12410 BPF_MOV64_IMM(BPF_REG_3, 7),
12411 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12412 BPF_FUNC_tail_call),
12413 BPF_MOV64_IMM(BPF_REG_0, 1),
12414 BPF_EXIT_INSN(),
12415 },
12416 .fixup_prog1 = { 5 },
12417 .fixup_prog2 = { 2 },
12418 .result_unpriv = REJECT,
12419 .errstr_unpriv = "tail_call abusing map_ptr",
12420 .result = ACCEPT,
12421 .retval = 42,
12422 },
12423 {
12424 "cond: two branches returning same map pointers for lookup (tail, tail)",
12425 .insns = {
12426 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12427 offsetof(struct __sk_buff, mark)),
12428 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
12429 BPF_LD_MAP_FD(BPF_REG_2, 0),
12430 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12431 BPF_LD_MAP_FD(BPF_REG_2, 0),
12432 BPF_MOV64_IMM(BPF_REG_3, 7),
12433 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12434 BPF_FUNC_tail_call),
12435 BPF_MOV64_IMM(BPF_REG_0, 1),
12436 BPF_EXIT_INSN(),
12437 },
12438 .fixup_prog2 = { 2, 5 },
12439 .result_unpriv = ACCEPT,
12440 .result = ACCEPT,
12441 .retval = 42,
12442 },
12443 {
Gianluca Borellofd05e572017-12-23 10:09:55 +000012444 "search pruning: all branches should be verified (nop operation)",
12445 .insns = {
12446 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12447 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12448 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12449 BPF_LD_MAP_FD(BPF_REG_1, 0),
12450 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12451 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
12452 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12453 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12454 BPF_MOV64_IMM(BPF_REG_4, 0),
12455 BPF_JMP_A(1),
12456 BPF_MOV64_IMM(BPF_REG_4, 1),
12457 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12458 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12459 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12460 BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
12461 BPF_MOV64_IMM(BPF_REG_6, 0),
12462 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
12463 BPF_EXIT_INSN(),
12464 },
Prashant Bhole908142e2018-10-09 10:04:53 +090012465 .fixup_map_hash_8b = { 3 },
Gianluca Borellofd05e572017-12-23 10:09:55 +000012466 .errstr = "R6 invalid mem access 'inv'",
12467 .result = REJECT,
12468 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12469 },
12470 {
12471 "search pruning: all branches should be verified (invalid stack access)",
12472 .insns = {
12473 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12474 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12475 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12476 BPF_LD_MAP_FD(BPF_REG_1, 0),
12477 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12478 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
12479 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12480 BPF_MOV64_IMM(BPF_REG_4, 0),
12481 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12482 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12483 BPF_JMP_A(1),
12484 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
12485 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12486 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12487 BPF_EXIT_INSN(),
12488 },
Prashant Bhole908142e2018-10-09 10:04:53 +090012489 .fixup_map_hash_8b = { 3 },
Gianluca Borellofd05e572017-12-23 10:09:55 +000012490 .errstr = "invalid read from stack off -16+0 size 8",
12491 .result = REJECT,
12492 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12493 },
Daniel Borkmann23d191a2018-02-24 01:08:03 +010012494 {
12495 "jit: lsh, rsh, arsh by 1",
12496 .insns = {
12497 BPF_MOV64_IMM(BPF_REG_0, 1),
12498 BPF_MOV64_IMM(BPF_REG_1, 0xff),
12499 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
12500 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
12501 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
12502 BPF_EXIT_INSN(),
12503 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
12504 BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
12505 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
12506 BPF_EXIT_INSN(),
12507 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
12508 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
12509 BPF_EXIT_INSN(),
12510 BPF_MOV64_IMM(BPF_REG_0, 2),
12511 BPF_EXIT_INSN(),
12512 },
12513 .result = ACCEPT,
12514 .retval = 2,
12515 },
12516 {
12517 "jit: mov32 for ldimm64, 1",
12518 .insns = {
12519 BPF_MOV64_IMM(BPF_REG_0, 2),
12520 BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
12521 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
12522 BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
12523 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12524 BPF_MOV64_IMM(BPF_REG_0, 1),
12525 BPF_EXIT_INSN(),
12526 },
12527 .result = ACCEPT,
12528 .retval = 2,
12529 },
12530 {
12531 "jit: mov32 for ldimm64, 2",
12532 .insns = {
12533 BPF_MOV64_IMM(BPF_REG_0, 1),
12534 BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
12535 BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
12536 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12537 BPF_MOV64_IMM(BPF_REG_0, 2),
12538 BPF_EXIT_INSN(),
12539 },
12540 .result = ACCEPT,
12541 .retval = 2,
12542 },
12543 {
12544 "jit: various mul tests",
12545 .insns = {
12546 BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12547 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12548 BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
12549 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12550 BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12551 BPF_MOV64_IMM(BPF_REG_0, 1),
12552 BPF_EXIT_INSN(),
12553 BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12554 BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12555 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12556 BPF_MOV64_IMM(BPF_REG_0, 1),
12557 BPF_EXIT_INSN(),
12558 BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
12559 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12560 BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12561 BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12562 BPF_MOV64_IMM(BPF_REG_0, 1),
12563 BPF_EXIT_INSN(),
12564 BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12565 BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12566 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12567 BPF_MOV64_IMM(BPF_REG_0, 1),
12568 BPF_EXIT_INSN(),
12569 BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
12570 BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
12571 BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12572 BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
12573 BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
12574 BPF_MOV64_IMM(BPF_REG_0, 1),
12575 BPF_EXIT_INSN(),
12576 BPF_MOV64_IMM(BPF_REG_0, 2),
12577 BPF_EXIT_INSN(),
12578 },
12579 .result = ACCEPT,
12580 .retval = 2,
12581 },
David S. Miller0f3e9c92018-03-06 00:53:44 -050012582 {
Daniel Borkmannca369602018-02-23 22:29:05 +010012583 "xadd/w check unaligned stack",
12584 .insns = {
12585 BPF_MOV64_IMM(BPF_REG_0, 1),
12586 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12587 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
12588 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12589 BPF_EXIT_INSN(),
12590 },
12591 .result = REJECT,
12592 .errstr = "misaligned stack access off",
12593 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12594 },
12595 {
12596 "xadd/w check unaligned map",
12597 .insns = {
12598 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12599 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12600 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12601 BPF_LD_MAP_FD(BPF_REG_1, 0),
12602 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12603 BPF_FUNC_map_lookup_elem),
12604 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
12605 BPF_EXIT_INSN(),
12606 BPF_MOV64_IMM(BPF_REG_1, 1),
12607 BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
12608 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
12609 BPF_EXIT_INSN(),
12610 },
Prashant Bhole908142e2018-10-09 10:04:53 +090012611 .fixup_map_hash_8b = { 3 },
Daniel Borkmannca369602018-02-23 22:29:05 +010012612 .result = REJECT,
12613 .errstr = "misaligned value access off",
12614 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12615 },
12616 {
12617 "xadd/w check unaligned pkt",
12618 .insns = {
12619 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12620 offsetof(struct xdp_md, data)),
12621 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12622 offsetof(struct xdp_md, data_end)),
12623 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
12624 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
12625 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
12626 BPF_MOV64_IMM(BPF_REG_0, 99),
12627 BPF_JMP_IMM(BPF_JA, 0, 0, 6),
12628 BPF_MOV64_IMM(BPF_REG_0, 1),
12629 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12630 BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
12631 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
12632 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
12633 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
12634 BPF_EXIT_INSN(),
12635 },
12636 .result = REJECT,
Joe Stringer9d2be442018-10-02 13:35:31 -070012637 .errstr = "BPF_XADD stores into R2 ctx",
Daniel Borkmannca369602018-02-23 22:29:05 +010012638 .prog_type = BPF_PROG_TYPE_XDP,
12639 },
Yonghong Song2abe611c2018-04-28 22:28:14 -070012640 {
Daniel Borkmannfa47a162018-07-19 18:18:36 +020012641 "xadd/w check whether src/dst got mangled, 1",
12642 .insns = {
12643 BPF_MOV64_IMM(BPF_REG_0, 1),
12644 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12645 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
12646 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12647 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12648 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12649 BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
12650 BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
12651 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12652 BPF_EXIT_INSN(),
12653 BPF_MOV64_IMM(BPF_REG_0, 42),
12654 BPF_EXIT_INSN(),
12655 },
12656 .result = ACCEPT,
12657 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12658 .retval = 3,
12659 },
12660 {
12661 "xadd/w check whether src/dst got mangled, 2",
12662 .insns = {
12663 BPF_MOV64_IMM(BPF_REG_0, 1),
12664 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12665 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
12666 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12667 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12668 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12669 BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
12670 BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
12671 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
12672 BPF_EXIT_INSN(),
12673 BPF_MOV64_IMM(BPF_REG_0, 42),
12674 BPF_EXIT_INSN(),
12675 },
12676 .result = ACCEPT,
12677 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12678 .retval = 3,
12679 },
12680 {
Yonghong Song2abe611c2018-04-28 22:28:14 -070012681 "bpf_get_stack return R0 within range",
12682 .insns = {
12683 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12684 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12685 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12686 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12687 BPF_LD_MAP_FD(BPF_REG_1, 0),
12688 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12689 BPF_FUNC_map_lookup_elem),
12690 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
12691 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
12692 BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
12693 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12694 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
12695 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
12696 BPF_MOV64_IMM(BPF_REG_4, 256),
12697 BPF_EMIT_CALL(BPF_FUNC_get_stack),
12698 BPF_MOV64_IMM(BPF_REG_1, 0),
12699 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
12700 BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
12701 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
12702 BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
12703 BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
12704 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
12705 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
12706 BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
12707 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
12708 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
12709 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
12710 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
12711 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12712 BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
12713 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
12714 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
12715 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12716 BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
12717 BPF_MOV64_IMM(BPF_REG_4, 0),
12718 BPF_EMIT_CALL(BPF_FUNC_get_stack),
12719 BPF_EXIT_INSN(),
12720 },
Prashant Bhole908142e2018-10-09 10:04:53 +090012721 .fixup_map_hash_48b = { 4 },
Yonghong Song2abe611c2018-04-28 22:28:14 -070012722 .result = ACCEPT,
12723 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12724 },
Daniel Borkmann93731ef2018-05-04 01:08:13 +020012725 {
12726 "ld_abs: invalid op 1",
12727 .insns = {
12728 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12729 BPF_LD_ABS(BPF_DW, 0),
12730 BPF_EXIT_INSN(),
12731 },
12732 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12733 .result = REJECT,
12734 .errstr = "unknown opcode",
12735 },
12736 {
12737 "ld_abs: invalid op 2",
12738 .insns = {
12739 BPF_MOV32_IMM(BPF_REG_0, 256),
12740 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12741 BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
12742 BPF_EXIT_INSN(),
12743 },
12744 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12745 .result = REJECT,
12746 .errstr = "unknown opcode",
12747 },
12748 {
12749 "ld_abs: nmap reduced",
12750 .insns = {
12751 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12752 BPF_LD_ABS(BPF_H, 12),
12753 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
12754 BPF_LD_ABS(BPF_H, 12),
12755 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
12756 BPF_MOV32_IMM(BPF_REG_0, 18),
12757 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
12758 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
12759 BPF_LD_IND(BPF_W, BPF_REG_7, 14),
12760 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
12761 BPF_MOV32_IMM(BPF_REG_0, 280971478),
12762 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
12763 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
12764 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
12765 BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
12766 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
12767 BPF_LD_ABS(BPF_H, 12),
12768 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
12769 BPF_MOV32_IMM(BPF_REG_0, 22),
12770 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
12771 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
12772 BPF_LD_IND(BPF_H, BPF_REG_7, 14),
12773 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
12774 BPF_MOV32_IMM(BPF_REG_0, 17366),
12775 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
12776 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
12777 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
12778 BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
12779 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12780 BPF_MOV32_IMM(BPF_REG_0, 256),
12781 BPF_EXIT_INSN(),
12782 BPF_MOV32_IMM(BPF_REG_0, 0),
12783 BPF_EXIT_INSN(),
12784 },
12785 .data = {
12786 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
12787 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
12788 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
12789 },
12790 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12791 .result = ACCEPT,
12792 .retval = 256,
12793 },
12794 {
12795 "ld_abs: div + abs, test 1",
12796 .insns = {
12797 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12798 BPF_LD_ABS(BPF_B, 3),
12799 BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
12800 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
12801 BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
12802 BPF_LD_ABS(BPF_B, 4),
12803 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
12804 BPF_LD_IND(BPF_B, BPF_REG_8, -70),
12805 BPF_EXIT_INSN(),
12806 },
12807 .data = {
12808 10, 20, 30, 40, 50,
12809 },
12810 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12811 .result = ACCEPT,
12812 .retval = 10,
12813 },
12814 {
12815 "ld_abs: div + abs, test 2",
12816 .insns = {
12817 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12818 BPF_LD_ABS(BPF_B, 3),
12819 BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
12820 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
12821 BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
12822 BPF_LD_ABS(BPF_B, 128),
12823 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
12824 BPF_LD_IND(BPF_B, BPF_REG_8, -70),
12825 BPF_EXIT_INSN(),
12826 },
12827 .data = {
12828 10, 20, 30, 40, 50,
12829 },
12830 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12831 .result = ACCEPT,
12832 .retval = 0,
12833 },
12834 {
12835 "ld_abs: div + abs, test 3",
12836 .insns = {
12837 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12838 BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
12839 BPF_LD_ABS(BPF_B, 3),
12840 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
12841 BPF_EXIT_INSN(),
12842 },
12843 .data = {
12844 10, 20, 30, 40, 50,
12845 },
12846 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12847 .result = ACCEPT,
12848 .retval = 0,
12849 },
12850 {
12851 "ld_abs: div + abs, test 4",
12852 .insns = {
12853 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12854 BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
12855 BPF_LD_ABS(BPF_B, 256),
12856 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
12857 BPF_EXIT_INSN(),
12858 },
12859 .data = {
12860 10, 20, 30, 40, 50,
12861 },
12862 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12863 .result = ACCEPT,
12864 .retval = 0,
12865 },
12866 {
12867 "ld_abs: vlan + abs, test 1",
12868 .insns = { },
12869 .data = {
12870 0x34,
12871 },
12872 .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
12873 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12874 .result = ACCEPT,
12875 .retval = 0xbef,
12876 },
12877 {
12878 "ld_abs: vlan + abs, test 2",
12879 .insns = {
12880 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12881 BPF_LD_ABS(BPF_B, 0),
12882 BPF_LD_ABS(BPF_H, 0),
12883 BPF_LD_ABS(BPF_W, 0),
12884 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
12885 BPF_MOV64_IMM(BPF_REG_6, 0),
12886 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12887 BPF_MOV64_IMM(BPF_REG_2, 1),
12888 BPF_MOV64_IMM(BPF_REG_3, 2),
12889 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12890 BPF_FUNC_skb_vlan_push),
12891 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
12892 BPF_LD_ABS(BPF_B, 0),
12893 BPF_LD_ABS(BPF_H, 0),
12894 BPF_LD_ABS(BPF_W, 0),
12895 BPF_MOV64_IMM(BPF_REG_0, 42),
12896 BPF_EXIT_INSN(),
12897 },
12898 .data = {
12899 0x34,
12900 },
12901 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12902 .result = ACCEPT,
12903 .retval = 42,
12904 },
12905 {
12906 "ld_abs: jump around ld_abs",
12907 .insns = { },
12908 .data = {
12909 10, 11,
12910 },
12911 .fill_helper = bpf_fill_jump_around_ld_abs,
12912 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12913 .result = ACCEPT,
12914 .retval = 10,
12915 },
Daniel Borkmanna82d8cd2018-05-14 23:22:34 +020012916 {
12917 "ld_dw: xor semi-random 64 bit imms, test 1",
12918 .insns = { },
12919 .data = { },
12920 .fill_helper = bpf_fill_rand_ld_dw,
12921 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12922 .result = ACCEPT,
12923 .retval = 4090,
12924 },
12925 {
12926 "ld_dw: xor semi-random 64 bit imms, test 2",
12927 .insns = { },
12928 .data = { },
12929 .fill_helper = bpf_fill_rand_ld_dw,
12930 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12931 .result = ACCEPT,
12932 .retval = 2047,
12933 },
12934 {
12935 "ld_dw: xor semi-random 64 bit imms, test 3",
12936 .insns = { },
12937 .data = { },
12938 .fill_helper = bpf_fill_rand_ld_dw,
12939 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12940 .result = ACCEPT,
12941 .retval = 511,
12942 },
12943 {
12944 "ld_dw: xor semi-random 64 bit imms, test 4",
12945 .insns = { },
12946 .data = { },
12947 .fill_helper = bpf_fill_rand_ld_dw,
12948 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12949 .result = ACCEPT,
12950 .retval = 5,
12951 },
Daniel Borkmann58990d12018-06-07 17:40:03 +020012952 {
12953 "pass unmodified ctx pointer to helper",
12954 .insns = {
12955 BPF_MOV64_IMM(BPF_REG_2, 0),
12956 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12957 BPF_FUNC_csum_update),
12958 BPF_MOV64_IMM(BPF_REG_0, 0),
12959 BPF_EXIT_INSN(),
12960 },
12961 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12962 .result = ACCEPT,
12963 },
12964 {
Joe Stringerb584ab82018-10-02 13:35:38 -070012965 "reference tracking: leak potential reference",
12966 .insns = {
12967 BPF_SK_LOOKUP,
12968 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
12969 BPF_EXIT_INSN(),
12970 },
12971 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12972 .errstr = "Unreleased reference",
12973 .result = REJECT,
12974 },
12975 {
12976 "reference tracking: leak potential reference on stack",
12977 .insns = {
12978 BPF_SK_LOOKUP,
12979 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12980 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12981 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
12982 BPF_MOV64_IMM(BPF_REG_0, 0),
12983 BPF_EXIT_INSN(),
12984 },
12985 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12986 .errstr = "Unreleased reference",
12987 .result = REJECT,
12988 },
12989 {
12990 "reference tracking: leak potential reference on stack 2",
12991 .insns = {
12992 BPF_SK_LOOKUP,
12993 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12994 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12995 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
12996 BPF_MOV64_IMM(BPF_REG_0, 0),
12997 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
12998 BPF_EXIT_INSN(),
12999 },
13000 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13001 .errstr = "Unreleased reference",
13002 .result = REJECT,
13003 },
13004 {
13005 "reference tracking: zero potential reference",
13006 .insns = {
13007 BPF_SK_LOOKUP,
13008 BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
13009 BPF_EXIT_INSN(),
13010 },
13011 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13012 .errstr = "Unreleased reference",
13013 .result = REJECT,
13014 },
13015 {
13016 "reference tracking: copy and zero potential references",
13017 .insns = {
13018 BPF_SK_LOOKUP,
13019 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
13020 BPF_MOV64_IMM(BPF_REG_0, 0),
13021 BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
13022 BPF_EXIT_INSN(),
13023 },
13024 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13025 .errstr = "Unreleased reference",
13026 .result = REJECT,
13027 },
13028 {
13029 "reference tracking: release reference without check",
13030 .insns = {
13031 BPF_SK_LOOKUP,
13032 /* reference in r0 may be NULL */
13033 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13034 BPF_MOV64_IMM(BPF_REG_2, 0),
13035 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13036 BPF_EXIT_INSN(),
13037 },
13038 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13039 .errstr = "type=sock_or_null expected=sock",
13040 .result = REJECT,
13041 },
13042 {
13043 "reference tracking: release reference",
13044 .insns = {
13045 BPF_SK_LOOKUP,
13046 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13047 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13048 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13049 BPF_EXIT_INSN(),
13050 },
13051 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13052 .result = ACCEPT,
13053 },
13054 {
13055 "reference tracking: release reference 2",
13056 .insns = {
13057 BPF_SK_LOOKUP,
13058 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13059 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
13060 BPF_EXIT_INSN(),
13061 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13062 BPF_EXIT_INSN(),
13063 },
13064 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13065 .result = ACCEPT,
13066 },
13067 {
13068 "reference tracking: release reference twice",
13069 .insns = {
13070 BPF_SK_LOOKUP,
13071 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13072 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13073 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13074 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13075 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13076 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13077 BPF_EXIT_INSN(),
13078 },
13079 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13080 .errstr = "type=inv expected=sock",
13081 .result = REJECT,
13082 },
13083 {
13084 "reference tracking: release reference twice inside branch",
13085 .insns = {
13086 BPF_SK_LOOKUP,
13087 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13088 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13089 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
13090 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13091 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13092 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13093 BPF_EXIT_INSN(),
13094 },
13095 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13096 .errstr = "type=inv expected=sock",
13097 .result = REJECT,
13098 },
13099 {
13100 "reference tracking: alloc, check, free in one subbranch",
13101 .insns = {
13102 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13103 offsetof(struct __sk_buff, data)),
13104 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13105 offsetof(struct __sk_buff, data_end)),
13106 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13107 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
13108 /* if (offsetof(skb, mark) > data_len) exit; */
13109 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13110 BPF_EXIT_INSN(),
13111 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
13112 offsetof(struct __sk_buff, mark)),
13113 BPF_SK_LOOKUP,
13114 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
13115 /* Leak reference in R0 */
13116 BPF_EXIT_INSN(),
13117 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13118 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13119 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13120 BPF_EXIT_INSN(),
13121 },
13122 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13123 .errstr = "Unreleased reference",
13124 .result = REJECT,
13125 },
13126 {
13127 "reference tracking: alloc, check, free in both subbranches",
13128 .insns = {
13129 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13130 offsetof(struct __sk_buff, data)),
13131 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13132 offsetof(struct __sk_buff, data_end)),
13133 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13134 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
13135 /* if (offsetof(skb, mark) > data_len) exit; */
13136 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13137 BPF_EXIT_INSN(),
13138 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
13139 offsetof(struct __sk_buff, mark)),
13140 BPF_SK_LOOKUP,
13141 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
13142 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13143 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13144 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13145 BPF_EXIT_INSN(),
13146 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13147 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13148 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13149 BPF_EXIT_INSN(),
13150 },
13151 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13152 .result = ACCEPT,
13153 },
13154 {
13155 "reference tracking in call: free reference in subprog",
13156 .insns = {
13157 BPF_SK_LOOKUP,
13158 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
13159 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13160 BPF_MOV64_IMM(BPF_REG_0, 0),
13161 BPF_EXIT_INSN(),
13162
13163 /* subprog 1 */
13164 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
13165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
13166 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13167 BPF_EXIT_INSN(),
13168 },
13169 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13170 .result = ACCEPT,
13171 },
13172 {
Daniel Borkmann58990d12018-06-07 17:40:03 +020013173 "pass modified ctx pointer to helper, 1",
13174 .insns = {
13175 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
13176 BPF_MOV64_IMM(BPF_REG_2, 0),
13177 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13178 BPF_FUNC_csum_update),
13179 BPF_MOV64_IMM(BPF_REG_0, 0),
13180 BPF_EXIT_INSN(),
13181 },
13182 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13183 .result = REJECT,
13184 .errstr = "dereference of modified ctx ptr",
13185 },
13186 {
13187 "pass modified ctx pointer to helper, 2",
13188 .insns = {
13189 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
13190 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13191 BPF_FUNC_get_socket_cookie),
13192 BPF_MOV64_IMM(BPF_REG_0, 0),
13193 BPF_EXIT_INSN(),
13194 },
13195 .result_unpriv = REJECT,
13196 .result = REJECT,
13197 .errstr_unpriv = "dereference of modified ctx ptr",
13198 .errstr = "dereference of modified ctx ptr",
13199 },
13200 {
13201 "pass modified ctx pointer to helper, 3",
13202 .insns = {
13203 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
13204 BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
13205 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
13206 BPF_MOV64_IMM(BPF_REG_2, 0),
13207 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13208 BPF_FUNC_csum_update),
13209 BPF_MOV64_IMM(BPF_REG_0, 0),
13210 BPF_EXIT_INSN(),
13211 },
13212 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13213 .result = REJECT,
13214 .errstr = "variable ctx access var_off=(0x0; 0x4)",
13215 },
Arthur Fabrefbeb1602018-07-31 18:17:22 +010013216 {
13217 "mov64 src == dst",
13218 .insns = {
13219 BPF_MOV64_IMM(BPF_REG_2, 0),
13220 BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
13221 // Check bounds are OK
13222 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
13223 BPF_MOV64_IMM(BPF_REG_0, 0),
13224 BPF_EXIT_INSN(),
13225 },
13226 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13227 .result = ACCEPT,
13228 },
13229 {
13230 "mov64 src != dst",
13231 .insns = {
13232 BPF_MOV64_IMM(BPF_REG_3, 0),
13233 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
13234 // Check bounds are OK
13235 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
13236 BPF_MOV64_IMM(BPF_REG_0, 0),
13237 BPF_EXIT_INSN(),
13238 },
13239 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13240 .result = ACCEPT,
13241 },
Joe Stringerb584ab82018-10-02 13:35:38 -070013242 {
13243 "reference tracking in call: free reference in subprog and outside",
13244 .insns = {
13245 BPF_SK_LOOKUP,
13246 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
13247 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13248 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13249 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13250 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13251 BPF_EXIT_INSN(),
13252
13253 /* subprog 1 */
13254 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
13255 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
13256 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13257 BPF_EXIT_INSN(),
13258 },
13259 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13260 .errstr = "type=inv expected=sock",
13261 .result = REJECT,
13262 },
13263 {
13264 "reference tracking in call: alloc & leak reference in subprog",
13265 .insns = {
13266 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13267 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13268 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13269 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13270 BPF_MOV64_IMM(BPF_REG_0, 0),
13271 BPF_EXIT_INSN(),
13272
13273 /* subprog 1 */
13274 BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
13275 BPF_SK_LOOKUP,
13276 /* spill unchecked sk_ptr into stack of caller */
13277 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13278 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13279 BPF_EXIT_INSN(),
13280 },
13281 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13282 .errstr = "Unreleased reference",
13283 .result = REJECT,
13284 },
13285 {
13286 "reference tracking in call: alloc in subprog, release outside",
13287 .insns = {
13288 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13289 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
13290 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13291 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13292 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13293 BPF_EXIT_INSN(),
13294
13295 /* subprog 1 */
13296 BPF_SK_LOOKUP,
13297 BPF_EXIT_INSN(), /* return sk */
13298 },
13299 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13300 .retval = POINTER_VALUE,
13301 .result = ACCEPT,
13302 },
13303 {
13304 "reference tracking in call: sk_ptr leak into caller stack",
13305 .insns = {
13306 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13307 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13308 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13309 BPF_MOV64_IMM(BPF_REG_0, 0),
13310 BPF_EXIT_INSN(),
13311
13312 /* subprog 1 */
13313 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13314 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13315 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13316 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
13317 /* spill unchecked sk_ptr into stack of caller */
13318 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13319 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13320 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13321 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13322 BPF_EXIT_INSN(),
13323
13324 /* subprog 2 */
13325 BPF_SK_LOOKUP,
13326 BPF_EXIT_INSN(),
13327 },
13328 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13329 .errstr = "Unreleased reference",
13330 .result = REJECT,
13331 },
13332 {
13333 "reference tracking in call: sk_ptr spill into caller stack",
13334 .insns = {
13335 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13336 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13337 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13338 BPF_MOV64_IMM(BPF_REG_0, 0),
13339 BPF_EXIT_INSN(),
13340
13341 /* subprog 1 */
13342 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13343 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13344 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13345 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
13346 /* spill unchecked sk_ptr into stack of caller */
13347 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13348 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13349 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13350 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13351 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13352 /* now the sk_ptr is verified, free the reference */
13353 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
13354 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13355 BPF_EXIT_INSN(),
13356
13357 /* subprog 2 */
13358 BPF_SK_LOOKUP,
13359 BPF_EXIT_INSN(),
13360 },
13361 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13362 .result = ACCEPT,
13363 },
13364 {
13365 "reference tracking: allow LD_ABS",
13366 .insns = {
13367 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13368 BPF_SK_LOOKUP,
13369 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13370 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13371 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13372 BPF_LD_ABS(BPF_B, 0),
13373 BPF_LD_ABS(BPF_H, 0),
13374 BPF_LD_ABS(BPF_W, 0),
13375 BPF_EXIT_INSN(),
13376 },
13377 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13378 .result = ACCEPT,
13379 },
13380 {
13381 "reference tracking: forbid LD_ABS while holding reference",
13382 .insns = {
13383 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13384 BPF_SK_LOOKUP,
13385 BPF_LD_ABS(BPF_B, 0),
13386 BPF_LD_ABS(BPF_H, 0),
13387 BPF_LD_ABS(BPF_W, 0),
13388 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13389 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13390 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13391 BPF_EXIT_INSN(),
13392 },
13393 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13394 .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13395 .result = REJECT,
13396 },
13397 {
13398 "reference tracking: allow LD_IND",
13399 .insns = {
13400 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13401 BPF_SK_LOOKUP,
13402 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13403 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13404 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13405 BPF_MOV64_IMM(BPF_REG_7, 1),
13406 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13407 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13408 BPF_EXIT_INSN(),
13409 },
13410 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13411 .result = ACCEPT,
13412 .retval = 1,
13413 },
13414 {
13415 "reference tracking: forbid LD_IND while holding reference",
13416 .insns = {
13417 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13418 BPF_SK_LOOKUP,
13419 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
13420 BPF_MOV64_IMM(BPF_REG_7, 1),
13421 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13422 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13423 BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
13424 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13425 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13426 BPF_EXIT_INSN(),
13427 },
13428 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13429 .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13430 .result = REJECT,
13431 },
13432 {
13433 "reference tracking: check reference or tail call",
13434 .insns = {
13435 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13436 BPF_SK_LOOKUP,
13437 /* if (sk) bpf_sk_release() */
13438 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13439 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
13440 /* bpf_tail_call() */
13441 BPF_MOV64_IMM(BPF_REG_3, 2),
13442 BPF_LD_MAP_FD(BPF_REG_2, 0),
13443 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13444 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13445 BPF_FUNC_tail_call),
13446 BPF_MOV64_IMM(BPF_REG_0, 0),
13447 BPF_EXIT_INSN(),
13448 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13449 BPF_EXIT_INSN(),
13450 },
13451 .fixup_prog1 = { 17 },
13452 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13453 .result = ACCEPT,
13454 },
13455 {
13456 "reference tracking: release reference then tail call",
13457 .insns = {
13458 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13459 BPF_SK_LOOKUP,
13460 /* if (sk) bpf_sk_release() */
13461 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13462 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13463 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13464 /* bpf_tail_call() */
13465 BPF_MOV64_IMM(BPF_REG_3, 2),
13466 BPF_LD_MAP_FD(BPF_REG_2, 0),
13467 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13468 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13469 BPF_FUNC_tail_call),
13470 BPF_MOV64_IMM(BPF_REG_0, 0),
13471 BPF_EXIT_INSN(),
13472 },
13473 .fixup_prog1 = { 18 },
13474 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13475 .result = ACCEPT,
13476 },
13477 {
13478 "reference tracking: leak possible reference over tail call",
13479 .insns = {
13480 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13481 /* Look up socket and store in REG_6 */
13482 BPF_SK_LOOKUP,
13483 /* bpf_tail_call() */
13484 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13485 BPF_MOV64_IMM(BPF_REG_3, 2),
13486 BPF_LD_MAP_FD(BPF_REG_2, 0),
13487 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13488 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13489 BPF_FUNC_tail_call),
13490 BPF_MOV64_IMM(BPF_REG_0, 0),
13491 /* if (sk) bpf_sk_release() */
13492 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13493 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13494 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13495 BPF_EXIT_INSN(),
13496 },
13497 .fixup_prog1 = { 16 },
13498 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13499 .errstr = "tail_call would lead to reference leak",
13500 .result = REJECT,
13501 },
13502 {
13503 "reference tracking: leak checked reference over tail call",
13504 .insns = {
13505 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13506 /* Look up socket and store in REG_6 */
13507 BPF_SK_LOOKUP,
13508 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13509 /* if (!sk) goto end */
13510 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
13511 /* bpf_tail_call() */
13512 BPF_MOV64_IMM(BPF_REG_3, 0),
13513 BPF_LD_MAP_FD(BPF_REG_2, 0),
13514 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13515 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13516 BPF_FUNC_tail_call),
13517 BPF_MOV64_IMM(BPF_REG_0, 0),
13518 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13519 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13520 BPF_EXIT_INSN(),
13521 },
13522 .fixup_prog1 = { 17 },
13523 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13524 .errstr = "tail_call would lead to reference leak",
13525 .result = REJECT,
13526 },
13527 {
13528 "reference tracking: mangle and release sock_or_null",
13529 .insns = {
13530 BPF_SK_LOOKUP,
13531 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13532 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
13533 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13534 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13535 BPF_EXIT_INSN(),
13536 },
13537 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13538 .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
13539 .result = REJECT,
13540 },
13541 {
13542 "reference tracking: mangle and release sock",
13543 .insns = {
13544 BPF_SK_LOOKUP,
13545 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13546 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13547 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
13548 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13549 BPF_EXIT_INSN(),
13550 },
13551 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13552 .errstr = "R1 pointer arithmetic on sock prohibited",
13553 .result = REJECT,
13554 },
13555 {
13556 "reference tracking: access member",
13557 .insns = {
13558 BPF_SK_LOOKUP,
13559 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13560 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13561 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
13562 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13563 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13564 BPF_EXIT_INSN(),
13565 },
13566 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13567 .result = ACCEPT,
13568 },
13569 {
13570 "reference tracking: write to member",
13571 .insns = {
13572 BPF_SK_LOOKUP,
13573 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13574 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
13575 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13576 BPF_LD_IMM64(BPF_REG_2, 42),
13577 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
13578 offsetof(struct bpf_sock, mark)),
13579 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13580 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13581 BPF_LD_IMM64(BPF_REG_0, 0),
13582 BPF_EXIT_INSN(),
13583 },
13584 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13585 .errstr = "cannot write into socket",
13586 .result = REJECT,
13587 },
13588 {
13589 "reference tracking: invalid 64-bit access of member",
13590 .insns = {
13591 BPF_SK_LOOKUP,
13592 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13593 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13594 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
13595 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13596 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13597 BPF_EXIT_INSN(),
13598 },
13599 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13600 .errstr = "invalid bpf_sock access off=0 size=8",
13601 .result = REJECT,
13602 },
13603 {
13604 "reference tracking: access after release",
13605 .insns = {
13606 BPF_SK_LOOKUP,
13607 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13608 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13609 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13610 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
13611 BPF_EXIT_INSN(),
13612 },
13613 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13614 .errstr = "!read_ok",
13615 .result = REJECT,
13616 },
13617 {
13618 "reference tracking: direct access for lookup",
13619 .insns = {
13620 /* Check that the packet is at least 64B long */
13621 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13622 offsetof(struct __sk_buff, data)),
13623 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13624 offsetof(struct __sk_buff, data_end)),
13625 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
13627 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
13628 /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
13629 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
13630 BPF_MOV64_IMM(BPF_REG_4, 0),
13631 BPF_MOV64_IMM(BPF_REG_5, 0),
13632 BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
13633 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13634 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13635 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
13636 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13637 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13638 BPF_EXIT_INSN(),
13639 },
13640 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13641 .result = ACCEPT,
13642 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013643};
13644
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013645static int probe_filter_length(const struct bpf_insn *fp)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013646{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013647 int len;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013648
13649 for (len = MAX_INSNS - 1; len > 0; --len)
13650 if (fp[len].code != 0 || fp[len].imm != 0)
13651 break;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013652 return len + 1;
13653}
13654
Daniel Borkmann06be0862018-06-02 23:06:31 +020013655static int create_map(uint32_t type, uint32_t size_key,
13656 uint32_t size_value, uint32_t max_elem)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013657{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013658 int fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013659
Daniel Borkmann06be0862018-06-02 23:06:31 +020013660 fd = bpf_create_map(type, size_key, size_value, max_elem,
13661 type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013662 if (fd < 0)
13663 printf("Failed to create hash map '%s'!\n", strerror(errno));
Alexei Starovoitovbf508872015-10-07 22:23:23 -070013664
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013665 return fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070013666}
13667
Joe Stringer0c586072018-10-02 13:35:37 -070013668static int create_prog_dummy1(enum bpf_map_type prog_type)
Daniel Borkmannb33eb732018-02-26 22:34:33 +010013669{
13670 struct bpf_insn prog[] = {
13671 BPF_MOV64_IMM(BPF_REG_0, 42),
13672 BPF_EXIT_INSN(),
13673 };
13674
Joe Stringer0c586072018-10-02 13:35:37 -070013675 return bpf_load_program(prog_type, prog,
Daniel Borkmannb33eb732018-02-26 22:34:33 +010013676 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
13677}
13678
Joe Stringer0c586072018-10-02 13:35:37 -070013679static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
Daniel Borkmannb33eb732018-02-26 22:34:33 +010013680{
13681 struct bpf_insn prog[] = {
13682 BPF_MOV64_IMM(BPF_REG_3, idx),
13683 BPF_LD_MAP_FD(BPF_REG_2, mfd),
13684 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13685 BPF_FUNC_tail_call),
13686 BPF_MOV64_IMM(BPF_REG_0, 41),
13687 BPF_EXIT_INSN(),
13688 };
13689
Joe Stringer0c586072018-10-02 13:35:37 -070013690 return bpf_load_program(prog_type, prog,
Daniel Borkmannb33eb732018-02-26 22:34:33 +010013691 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
13692}
13693
Joe Stringer0c586072018-10-02 13:35:37 -070013694static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem,
13695 int p1key)
Alexei Starovoitovbf508872015-10-07 22:23:23 -070013696{
Daniel Borkmann06be0862018-06-02 23:06:31 +020013697 int p2key = 1;
Daniel Borkmannb33eb732018-02-26 22:34:33 +010013698 int mfd, p1fd, p2fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070013699
Daniel Borkmannb33eb732018-02-26 22:34:33 +010013700 mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
Daniel Borkmann06be0862018-06-02 23:06:31 +020013701 sizeof(int), max_elem, 0);
Daniel Borkmannb33eb732018-02-26 22:34:33 +010013702 if (mfd < 0) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013703 printf("Failed to create prog array '%s'!\n", strerror(errno));
Daniel Borkmannb33eb732018-02-26 22:34:33 +010013704 return -1;
13705 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013706
Joe Stringer0c586072018-10-02 13:35:37 -070013707 p1fd = create_prog_dummy1(prog_type);
13708 p2fd = create_prog_dummy2(prog_type, mfd, p2key);
Daniel Borkmannb33eb732018-02-26 22:34:33 +010013709 if (p1fd < 0 || p2fd < 0)
13710 goto out;
13711 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
13712 goto out;
13713 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
13714 goto out;
13715 close(p2fd);
13716 close(p1fd);
13717
13718 return mfd;
13719out:
13720 close(p2fd);
13721 close(p1fd);
13722 close(mfd);
13723 return -1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013724}
13725
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070013726static int create_map_in_map(void)
13727{
13728 int inner_map_fd, outer_map_fd;
13729
13730 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
13731 sizeof(int), 1, 0);
13732 if (inner_map_fd < 0) {
13733 printf("Failed to create array '%s'!\n", strerror(errno));
13734 return inner_map_fd;
13735 }
13736
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070013737 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070013738 sizeof(int), inner_map_fd, 1, 0);
13739 if (outer_map_fd < 0)
13740 printf("Failed to create array of maps '%s'!\n",
13741 strerror(errno));
13742
13743 close(inner_map_fd);
13744
13745 return outer_map_fd;
13746}
13747
Roman Gushchina3c60542018-09-28 14:45:53 +000013748static int create_cgroup_storage(bool percpu)
Roman Gushchind4c9f572018-08-02 14:27:28 -070013749{
Roman Gushchina3c60542018-09-28 14:45:53 +000013750 enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
13751 BPF_MAP_TYPE_CGROUP_STORAGE;
Roman Gushchind4c9f572018-08-02 14:27:28 -070013752 int fd;
13753
Roman Gushchina3c60542018-09-28 14:45:53 +000013754 fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
Roman Gushchind4c9f572018-08-02 14:27:28 -070013755 TEST_DATA_LEN, 0, 0);
13756 if (fd < 0)
Roman Gushchina3c60542018-09-28 14:45:53 +000013757 printf("Failed to create cgroup storage '%s'!\n",
13758 strerror(errno));
Roman Gushchind4c9f572018-08-02 14:27:28 -070013759
13760 return fd;
13761}
13762
Daniel Borkmann93731ef2018-05-04 01:08:13 +020013763static char bpf_vlog[UINT_MAX >> 8];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013764
Joe Stringer0c586072018-10-02 13:35:37 -070013765static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
13766 struct bpf_insn *prog, int *map_fds)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013767{
Prashant Bhole908142e2018-10-09 10:04:53 +090013768 int *fixup_map_hash_8b = test->fixup_map_hash_8b;
13769 int *fixup_map_hash_48b = test->fixup_map_hash_48b;
13770 int *fixup_map_hash_16b = test->fixup_map_hash_16b;
13771 int *fixup_map_array_48b = test->fixup_map_array_48b;
Prashant Bhole7c85c442018-10-09 10:04:54 +090013772 int *fixup_map_sockmap = test->fixup_map_sockmap;
13773 int *fixup_map_sockhash = test->fixup_map_sockhash;
13774 int *fixup_map_xskmap = test->fixup_map_xskmap;
13775 int *fixup_map_stacktrace = test->fixup_map_stacktrace;
Daniel Borkmann06be0862018-06-02 23:06:31 +020013776 int *fixup_prog1 = test->fixup_prog1;
13777 int *fixup_prog2 = test->fixup_prog2;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070013778 int *fixup_map_in_map = test->fixup_map_in_map;
Roman Gushchind4c9f572018-08-02 14:27:28 -070013779 int *fixup_cgroup_storage = test->fixup_cgroup_storage;
Roman Gushchina3c60542018-09-28 14:45:53 +000013780 int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013781
Daniel Borkmann93731ef2018-05-04 01:08:13 +020013782 if (test->fill_helper)
13783 test->fill_helper(test);
13784
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013785 /* Allocating HTs with 1 elem is fine here, since we only test
13786 * for verifier and not do a runtime lookup, so the only thing
13787 * that really matters is value size in this case.
13788 */
Prashant Bhole908142e2018-10-09 10:04:53 +090013789 if (*fixup_map_hash_8b) {
Daniel Borkmann06be0862018-06-02 23:06:31 +020013790 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
13791 sizeof(long long), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013792 do {
Prashant Bhole908142e2018-10-09 10:04:53 +090013793 prog[*fixup_map_hash_8b].imm = map_fds[0];
13794 fixup_map_hash_8b++;
13795 } while (*fixup_map_hash_8b);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013796 }
13797
Prashant Bhole908142e2018-10-09 10:04:53 +090013798 if (*fixup_map_hash_48b) {
Daniel Borkmann06be0862018-06-02 23:06:31 +020013799 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
13800 sizeof(struct test_val), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013801 do {
Prashant Bhole908142e2018-10-09 10:04:53 +090013802 prog[*fixup_map_hash_48b].imm = map_fds[1];
13803 fixup_map_hash_48b++;
13804 } while (*fixup_map_hash_48b);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013805 }
13806
Prashant Bhole908142e2018-10-09 10:04:53 +090013807 if (*fixup_map_hash_16b) {
Daniel Borkmann06be0862018-06-02 23:06:31 +020013808 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
13809 sizeof(struct other_val), 1);
Paul Chaignon5f90dd62018-04-24 15:08:19 +020013810 do {
Prashant Bhole908142e2018-10-09 10:04:53 +090013811 prog[*fixup_map_hash_16b].imm = map_fds[2];
13812 fixup_map_hash_16b++;
13813 } while (*fixup_map_hash_16b);
Paul Chaignon5f90dd62018-04-24 15:08:19 +020013814 }
13815
Prashant Bhole908142e2018-10-09 10:04:53 +090013816 if (*fixup_map_array_48b) {
Daniel Borkmann06be0862018-06-02 23:06:31 +020013817 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
13818 sizeof(struct test_val), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013819 do {
Prashant Bhole908142e2018-10-09 10:04:53 +090013820 prog[*fixup_map_array_48b].imm = map_fds[3];
13821 fixup_map_array_48b++;
13822 } while (*fixup_map_array_48b);
Daniel Borkmann06be0862018-06-02 23:06:31 +020013823 }
13824
13825 if (*fixup_prog1) {
Joe Stringer0c586072018-10-02 13:35:37 -070013826 map_fds[4] = create_prog_array(prog_type, 4, 0);
Daniel Borkmann06be0862018-06-02 23:06:31 +020013827 do {
13828 prog[*fixup_prog1].imm = map_fds[4];
13829 fixup_prog1++;
13830 } while (*fixup_prog1);
13831 }
13832
13833 if (*fixup_prog2) {
Joe Stringer0c586072018-10-02 13:35:37 -070013834 map_fds[5] = create_prog_array(prog_type, 8, 7);
Daniel Borkmann06be0862018-06-02 23:06:31 +020013835 do {
13836 prog[*fixup_prog2].imm = map_fds[5];
13837 fixup_prog2++;
13838 } while (*fixup_prog2);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013839 }
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070013840
13841 if (*fixup_map_in_map) {
Daniel Borkmann06be0862018-06-02 23:06:31 +020013842 map_fds[6] = create_map_in_map();
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070013843 do {
Daniel Borkmann06be0862018-06-02 23:06:31 +020013844 prog[*fixup_map_in_map].imm = map_fds[6];
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070013845 fixup_map_in_map++;
13846 } while (*fixup_map_in_map);
13847 }
Roman Gushchind4c9f572018-08-02 14:27:28 -070013848
13849 if (*fixup_cgroup_storage) {
Roman Gushchina3c60542018-09-28 14:45:53 +000013850 map_fds[7] = create_cgroup_storage(false);
Roman Gushchind4c9f572018-08-02 14:27:28 -070013851 do {
13852 prog[*fixup_cgroup_storage].imm = map_fds[7];
13853 fixup_cgroup_storage++;
13854 } while (*fixup_cgroup_storage);
13855 }
Roman Gushchina3c60542018-09-28 14:45:53 +000013856
13857 if (*fixup_percpu_cgroup_storage) {
13858 map_fds[8] = create_cgroup_storage(true);
13859 do {
13860 prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
13861 fixup_percpu_cgroup_storage++;
13862 } while (*fixup_percpu_cgroup_storage);
13863 }
Prashant Bhole7c85c442018-10-09 10:04:54 +090013864 if (*fixup_map_sockmap) {
13865 map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
13866 sizeof(int), 1);
13867 do {
13868 prog[*fixup_map_sockmap].imm = map_fds[9];
13869 fixup_map_sockmap++;
13870 } while (*fixup_map_sockmap);
13871 }
13872 if (*fixup_map_sockhash) {
13873 map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
13874 sizeof(int), 1);
13875 do {
13876 prog[*fixup_map_sockhash].imm = map_fds[10];
13877 fixup_map_sockhash++;
13878 } while (*fixup_map_sockhash);
13879 }
13880 if (*fixup_map_xskmap) {
13881 map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
13882 sizeof(int), 1);
13883 do {
13884 prog[*fixup_map_xskmap].imm = map_fds[11];
13885 fixup_map_xskmap++;
13886 } while (*fixup_map_xskmap);
13887 }
13888 if (*fixup_map_stacktrace) {
13889 map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
13890 sizeof(u64), 1);
13891 do {
13892 prog[*fixup_map_stacktrace].imm = map_fds[12];
13893 fixup_map_stacktrace++;
13894 } while (fixup_map_stacktrace);
13895 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013896}
13897
13898static void do_test_single(struct bpf_test *test, bool unpriv,
13899 int *passes, int *errors)
13900{
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020013901 int fd_prog, expected_ret, reject_from_alignment;
Daniel Borkmann93731ef2018-05-04 01:08:13 +020013902 int prog_len, prog_type = test->prog_type;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013903 struct bpf_insn *prog = test->insns;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070013904 int map_fds[MAX_NR_MAPS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013905 const char *expected_err;
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080013906 uint32_t retval;
13907 int i, err;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013908
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070013909 for (i = 0; i < MAX_NR_MAPS; i++)
13910 map_fds[i] = -1;
13911
Joe Stringer0c586072018-10-02 13:35:37 -070013912 if (!prog_type)
13913 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
13914 do_test_fixup(test, prog_type, prog, map_fds);
Daniel Borkmann93731ef2018-05-04 01:08:13 +020013915 prog_len = probe_filter_length(prog);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013916
Joe Stringer0c586072018-10-02 13:35:37 -070013917 fd_prog = bpf_verify_program(prog_type, prog, prog_len,
13918 test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmannd6554902017-07-21 00:00:22 +020013919 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013920
13921 expected_ret = unpriv && test->result_unpriv != UNDEF ?
13922 test->result_unpriv : test->result;
13923 expected_err = unpriv && test->errstr_unpriv ?
13924 test->errstr_unpriv : test->errstr;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020013925
13926 reject_from_alignment = fd_prog < 0 &&
13927 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
13928 strstr(bpf_vlog, "Unknown alignment.");
13929#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13930 if (reject_from_alignment) {
13931 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
13932 strerror(errno));
13933 goto fail_log;
13934 }
13935#endif
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013936 if (expected_ret == ACCEPT) {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020013937 if (fd_prog < 0 && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013938 printf("FAIL\nFailed to load prog '%s'!\n",
13939 strerror(errno));
13940 goto fail_log;
13941 }
13942 } else {
13943 if (fd_prog >= 0) {
13944 printf("FAIL\nUnexpected success to load!\n");
13945 goto fail_log;
13946 }
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020013947 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
Joe Stringer95f87a92018-02-14 13:50:34 -080013948 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
13949 expected_err, bpf_vlog);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013950 goto fail_log;
13951 }
13952 }
13953
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080013954 if (fd_prog >= 0) {
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +020013955 __u8 tmp[TEST_DATA_LEN << 2];
13956 __u32 size_tmp = sizeof(tmp);
13957
Daniel Borkmann93731ef2018-05-04 01:08:13 +020013958 err = bpf_prog_test_run(fd_prog, 1, test->data,
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +020013959 sizeof(test->data), tmp, &size_tmp,
Daniel Borkmann93731ef2018-05-04 01:08:13 +020013960 &retval, NULL);
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080013961 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
13962 printf("Unexpected bpf_prog_test_run error\n");
13963 goto fail_log;
13964 }
13965 if (!err && retval != test->retval &&
13966 test->retval != POINTER_VALUE) {
13967 printf("FAIL retval %d != %d\n", retval, test->retval);
13968 goto fail_log;
13969 }
13970 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013971 (*passes)++;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020013972 printf("OK%s\n", reject_from_alignment ?
13973 " (NOTE: reject due to unknown alignment)" : "");
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013974close_fds:
13975 close(fd_prog);
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070013976 for (i = 0; i < MAX_NR_MAPS; i++)
13977 close(map_fds[i]);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013978 sched_yield();
13979 return;
13980fail_log:
13981 (*errors)++;
13982 printf("%s", bpf_vlog);
13983 goto close_fds;
13984}
13985
Mickaël Salaünd02d8982017-02-10 00:21:37 +010013986static bool is_admin(void)
13987{
13988 cap_t caps;
13989 cap_flag_value_t sysadmin = CAP_CLEAR;
13990 const cap_value_t cap_val = CAP_SYS_ADMIN;
13991
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080013992#ifdef CAP_IS_SUPPORTED
Mickaël Salaünd02d8982017-02-10 00:21:37 +010013993 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
13994 perror("cap_get_flag");
13995 return false;
13996 }
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080013997#endif
Mickaël Salaünd02d8982017-02-10 00:21:37 +010013998 caps = cap_get_proc();
13999 if (!caps) {
14000 perror("cap_get_proc");
14001 return false;
14002 }
14003 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
14004 perror("cap_get_flag");
14005 if (cap_free(caps))
14006 perror("cap_free");
14007 return (sysadmin == CAP_SET);
14008}
14009
14010static int set_admin(bool admin)
14011{
14012 cap_t caps;
14013 const cap_value_t cap_val = CAP_SYS_ADMIN;
14014 int ret = -1;
14015
14016 caps = cap_get_proc();
14017 if (!caps) {
14018 perror("cap_get_proc");
14019 return -1;
14020 }
14021 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
14022 admin ? CAP_SET : CAP_CLEAR)) {
14023 perror("cap_set_flag");
14024 goto out;
14025 }
14026 if (cap_set_proc(caps)) {
14027 perror("cap_set_proc");
14028 goto out;
14029 }
14030 ret = 0;
14031out:
14032 if (cap_free(caps))
14033 perror("cap_free");
14034 return ret;
14035}
14036
Joe Stringer0a6748742018-02-14 13:50:36 -080014037static void get_unpriv_disabled()
14038{
14039 char buf[2];
14040 FILE *fd;
14041
14042 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
Jesper Dangaard Brouerdeea8122018-05-17 19:39:31 +020014043 if (!fd) {
14044 perror("fopen /proc/sys/"UNPRIV_SYSCTL);
14045 unpriv_disabled = true;
14046 return;
14047 }
Joe Stringer0a6748742018-02-14 13:50:36 -080014048 if (fgets(buf, 2, fd) == buf && atoi(buf))
14049 unpriv_disabled = true;
14050 fclose(fd);
14051}
14052
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020014053static int do_test(bool unpriv, unsigned int from, unsigned int to)
14054{
Joe Stringerd0a0e492018-02-14 13:50:35 -080014055 int i, passes = 0, errors = 0, skips = 0;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020014056
14057 for (i = from; i < to; i++) {
14058 struct bpf_test *test = &tests[i];
14059
14060 /* Program types that are not supported by non-root we
14061 * skip right away.
14062 */
Joe Stringer0a6748742018-02-14 13:50:36 -080014063 if (!test->prog_type && unpriv_disabled) {
14064 printf("#%d/u %s SKIP\n", i, test->descr);
14065 skips++;
14066 } else if (!test->prog_type) {
Mickaël Salaünd02d8982017-02-10 00:21:37 +010014067 if (!unpriv)
14068 set_admin(false);
14069 printf("#%d/u %s ", i, test->descr);
14070 do_test_single(test, true, &passes, &errors);
14071 if (!unpriv)
14072 set_admin(true);
14073 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020014074
Joe Stringerd0a0e492018-02-14 13:50:35 -080014075 if (unpriv) {
14076 printf("#%d/p %s SKIP\n", i, test->descr);
14077 skips++;
14078 } else {
Mickaël Salaünd02d8982017-02-10 00:21:37 +010014079 printf("#%d/p %s ", i, test->descr);
14080 do_test_single(test, false, &passes, &errors);
14081 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020014082 }
14083
Joe Stringerd0a0e492018-02-14 13:50:35 -080014084 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
14085 skips, errors);
Jesper Dangaard Brouerefe5f9c2017-06-13 15:17:19 +020014086 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020014087}
14088
14089int main(int argc, char **argv)
14090{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020014091 unsigned int from = 0, to = ARRAY_SIZE(tests);
Mickaël Salaünd02d8982017-02-10 00:21:37 +010014092 bool unpriv = !is_admin();
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070014093
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020014094 if (argc == 3) {
14095 unsigned int l = atoi(argv[argc - 2]);
14096 unsigned int u = atoi(argv[argc - 1]);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070014097
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020014098 if (l < to && u < to) {
14099 from = l;
14100 to = u + 1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070014101 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020014102 } else if (argc == 2) {
14103 unsigned int t = atoi(argv[argc - 1]);
Alexei Starovoitovbf508872015-10-07 22:23:23 -070014104
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020014105 if (t < to) {
14106 from = t;
14107 to = t + 1;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070014108 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070014109 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070014110
Joe Stringer0a6748742018-02-14 13:50:36 -080014111 get_unpriv_disabled();
14112 if (unpriv && unpriv_disabled) {
14113 printf("Cannot run as unprivileged user with sysctl %s.\n",
14114 UNPRIV_SYSCTL);
14115 return EXIT_FAILURE;
14116 }
14117
Daniel Borkmanna82d8cd2018-05-14 23:22:34 +020014118 bpf_semi_rand_init();
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020014119 return do_test(unpriv, from, to);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070014120}