blob: 29c3bb6ad1cd8a82983bfd634119eca30853f0c2 [file] [log] [blame]
Luca Boccassid75fe9c2021-09-23 01:05:40 +01001/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
Jakub Kicinski8d930452018-05-14 22:35:03 -07002/* eBPF instruction mini library */
3#ifndef __BPF_INSN_H
4#define __BPF_INSN_H
Joe Stringer43371c82016-12-14 14:43:39 -08005
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006struct bpf_insn;
7
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07008/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
9
10#define BPF_ALU64_REG(OP, DST, SRC) \
11 ((struct bpf_insn) { \
12 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
13 .dst_reg = DST, \
14 .src_reg = SRC, \
15 .off = 0, \
16 .imm = 0 })
17
18#define BPF_ALU32_REG(OP, DST, SRC) \
19 ((struct bpf_insn) { \
20 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
21 .dst_reg = DST, \
22 .src_reg = SRC, \
23 .off = 0, \
24 .imm = 0 })
25
26/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
27
28#define BPF_ALU64_IMM(OP, DST, IMM) \
29 ((struct bpf_insn) { \
30 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
31 .dst_reg = DST, \
32 .src_reg = 0, \
33 .off = 0, \
34 .imm = IMM })
35
36#define BPF_ALU32_IMM(OP, DST, IMM) \
37 ((struct bpf_insn) { \
38 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
39 .dst_reg = DST, \
40 .src_reg = 0, \
41 .off = 0, \
42 .imm = IMM })
43
44/* Short form of mov, dst_reg = src_reg */
45
46#define BPF_MOV64_REG(DST, SRC) \
47 ((struct bpf_insn) { \
48 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
49 .dst_reg = DST, \
50 .src_reg = SRC, \
51 .off = 0, \
52 .imm = 0 })
53
Alexei Starovoitovbf508872015-10-07 22:23:23 -070054#define BPF_MOV32_REG(DST, SRC) \
55 ((struct bpf_insn) { \
56 .code = BPF_ALU | BPF_MOV | BPF_X, \
57 .dst_reg = DST, \
58 .src_reg = SRC, \
59 .off = 0, \
60 .imm = 0 })
61
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070062/* Short form of mov, dst_reg = imm32 */
63
64#define BPF_MOV64_IMM(DST, IMM) \
65 ((struct bpf_insn) { \
66 .code = BPF_ALU64 | BPF_MOV | BPF_K, \
67 .dst_reg = DST, \
68 .src_reg = 0, \
69 .off = 0, \
70 .imm = IMM })
71
Josef Bacik48461132016-09-28 10:54:32 -040072#define BPF_MOV32_IMM(DST, IMM) \
73 ((struct bpf_insn) { \
74 .code = BPF_ALU | BPF_MOV | BPF_K, \
75 .dst_reg = DST, \
76 .src_reg = 0, \
77 .off = 0, \
78 .imm = IMM })
79
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070080/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
81#define BPF_LD_IMM64(DST, IMM) \
82 BPF_LD_IMM64_RAW(DST, 0, IMM)
83
84#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
85 ((struct bpf_insn) { \
86 .code = BPF_LD | BPF_DW | BPF_IMM, \
87 .dst_reg = DST, \
88 .src_reg = SRC, \
89 .off = 0, \
90 .imm = (__u32) (IMM) }), \
91 ((struct bpf_insn) { \
92 .code = 0, /* zero is reserved opcode */ \
93 .dst_reg = 0, \
94 .src_reg = 0, \
95 .off = 0, \
96 .imm = ((__u64) (IMM)) >> 32 })
97
Daniel Borkmannf1a66f82015-03-01 12:31:43 +010098#ifndef BPF_PSEUDO_MAP_FD
99# define BPF_PSEUDO_MAP_FD 1
100#endif
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700101
102/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
103#define BPF_LD_MAP_FD(DST, MAP_FD) \
104 BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
105
106
Alexei Starovoitov03f47232014-12-01 15:06:36 -0800107/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
108
109#define BPF_LD_ABS(SIZE, IMM) \
110 ((struct bpf_insn) { \
111 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
112 .dst_reg = 0, \
113 .src_reg = 0, \
114 .off = 0, \
115 .imm = IMM })
116
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700117/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
118
119#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
120 ((struct bpf_insn) { \
121 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
122 .dst_reg = DST, \
123 .src_reg = SRC, \
124 .off = OFF, \
125 .imm = 0 })
126
127/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
128
129#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
130 ((struct bpf_insn) { \
131 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
132 .dst_reg = DST, \
133 .src_reg = SRC, \
134 .off = OFF, \
135 .imm = 0 })
136
Björn Töpelda9d35e2021-01-18 10:17:53 +0100137/*
138 * Atomic operations:
139 *
140 * BPF_ADD *(uint *) (dst_reg + off16) += src_reg
141 * BPF_AND *(uint *) (dst_reg + off16) &= src_reg
142 * BPF_OR *(uint *) (dst_reg + off16) |= src_reg
143 * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg
144 * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
145 * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
146 * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
147 * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
148 * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
149 * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
150 */
Chenbo Feng51570a52017-03-22 17:27:36 -0700151
Björn Töpelda9d35e2021-01-18 10:17:53 +0100152#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
Chenbo Feng51570a52017-03-22 17:27:36 -0700153 ((struct bpf_insn) { \
Brendan Jackman91c960b2021-01-14 18:17:44 +0000154 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
Chenbo Feng51570a52017-03-22 17:27:36 -0700155 .dst_reg = DST, \
156 .src_reg = SRC, \
157 .off = OFF, \
Björn Töpelda9d35e2021-01-18 10:17:53 +0100158 .imm = OP })
159
160/* Legacy alias */
161#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
Chenbo Feng51570a52017-03-22 17:27:36 -0700162
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700163/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
164
165#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
166 ((struct bpf_insn) { \
167 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
168 .dst_reg = DST, \
169 .src_reg = 0, \
170 .off = OFF, \
171 .imm = IMM })
172
173/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
174
175#define BPF_JMP_REG(OP, DST, SRC, OFF) \
176 ((struct bpf_insn) { \
177 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
178 .dst_reg = DST, \
179 .src_reg = SRC, \
180 .off = OFF, \
181 .imm = 0 })
182
Jiong Wang6ea848b2019-01-26 12:26:13 -0500183/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
184
185#define BPF_JMP32_REG(OP, DST, SRC, OFF) \
186 ((struct bpf_insn) { \
187 .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \
188 .dst_reg = DST, \
189 .src_reg = SRC, \
190 .off = OFF, \
191 .imm = 0 })
192
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700193/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
194
195#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
196 ((struct bpf_insn) { \
197 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
198 .dst_reg = DST, \
199 .src_reg = 0, \
200 .off = OFF, \
201 .imm = IMM })
202
Jiong Wang6ea848b2019-01-26 12:26:13 -0500203/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
204
205#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \
206 ((struct bpf_insn) { \
207 .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \
208 .dst_reg = DST, \
209 .src_reg = 0, \
210 .off = OFF, \
211 .imm = IMM })
212
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700213/* Raw code statement block */
214
215#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
216 ((struct bpf_insn) { \
217 .code = CODE, \
218 .dst_reg = DST, \
219 .src_reg = SRC, \
220 .off = OFF, \
221 .imm = IMM })
222
223/* Program exit */
224
225#define BPF_EXIT_INSN() \
226 ((struct bpf_insn) { \
227 .code = BPF_JMP | BPF_EXIT, \
228 .dst_reg = 0, \
229 .src_reg = 0, \
230 .off = 0, \
231 .imm = 0 })
232
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700233#endif