blob: f8912e45be7ab0033b5710a70c882da0ef1c0272 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Zi Shen Lime54bcde2014-08-26 21:15:30 -07002/*
3 * BPF JIT compiler for ARM64
4 *
Zi Shen Lim42ff7122016-01-13 23:33:22 -08005 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
Zi Shen Lime54bcde2014-08-26 21:15:30 -07006 */
7
8#define pr_fmt(fmt) "bpf_jit: " fmt
9
Jean-Philippe Brucker80083422020-07-28 17:21:26 +020010#include <linux/bitfield.h>
Zi Shen Limddb55992016-06-08 21:18:48 -070011#include <linux/bpf.h>
Zi Shen Lime54bcde2014-08-26 21:15:30 -070012#include <linux/filter.h>
Zi Shen Lime54bcde2014-08-26 21:15:30 -070013#include <linux/printk.h>
Zi Shen Lime54bcde2014-08-26 21:15:30 -070014#include <linux/slab.h>
Daniel Borkmannb569c1c2014-09-16 08:48:50 +010015
Zi Shen Lime54bcde2014-08-26 21:15:30 -070016#include <asm/byteorder.h>
17#include <asm/cacheflush.h>
Daniel Borkmannb569c1c2014-09-16 08:48:50 +010018#include <asm/debug-monitors.h>
Laura Abbottd4bbc302017-05-08 15:58:05 -070019#include <asm/set_memory.h>
Zi Shen Lime54bcde2014-08-26 21:15:30 -070020
21#include "bpf_jit.h"
22
Daniel Borkmann26eb0422016-05-13 19:08:34 +020023#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
24#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
Zi Shen Limddb55992016-06-08 21:18:48 -070025#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
Daniel Borkmann7005cad2017-06-07 13:45:37 +020026#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
Zi Shen Lime54bcde2014-08-26 21:15:30 -070027
28/* Map BPF registers to A64 registers */
29static const int bpf2a64[] = {
30 /* return value from in-kernel function, and exit value from eBPF */
31 [BPF_REG_0] = A64_R(7),
32 /* arguments from eBPF program to in-kernel function */
33 [BPF_REG_1] = A64_R(0),
34 [BPF_REG_2] = A64_R(1),
35 [BPF_REG_3] = A64_R(2),
36 [BPF_REG_4] = A64_R(3),
37 [BPF_REG_5] = A64_R(4),
38 /* callee saved registers that in-kernel function will preserve */
39 [BPF_REG_6] = A64_R(19),
40 [BPF_REG_7] = A64_R(20),
41 [BPF_REG_8] = A64_R(21),
42 [BPF_REG_9] = A64_R(22),
43 /* read-only frame pointer to access stack */
Yang Shiec0738d2015-11-16 14:35:35 -080044 [BPF_REG_FP] = A64_R(25),
Yang Shi4c1cd4f2016-05-16 16:36:26 -070045 /* temporary registers for internal BPF JIT */
46 [TMP_REG_1] = A64_R(10),
47 [TMP_REG_2] = A64_R(11),
Daniel Borkmann7005cad2017-06-07 13:45:37 +020048 [TMP_REG_3] = A64_R(12),
Zi Shen Limddb55992016-06-08 21:18:48 -070049 /* tail_call_cnt */
50 [TCALL_CNT] = A64_R(26),
Daniel Borkmann26eb0422016-05-13 19:08:34 +020051 /* temporary register for blinding constants */
52 [BPF_REG_AX] = A64_R(9),
Zi Shen Lime54bcde2014-08-26 21:15:30 -070053};
54
55struct jit_ctx {
56 const struct bpf_prog *prog;
57 int idx;
Zi Shen Lim51c9fbb12014-12-03 08:38:01 +000058 int epilogue_offset;
Zi Shen Lime54bcde2014-08-26 21:15:30 -070059 int *offset;
Jean-Philippe Brucker80083422020-07-28 17:21:26 +020060 int exentry_idx;
Luc Van Oostenryck425e1ed2017-06-28 16:58:03 +020061 __le32 *image;
Daniel Borkmannf1c9eed2017-06-11 03:55:27 +020062 u32 stack_size;
Zi Shen Lime54bcde2014-08-26 21:15:30 -070063};
64
65static inline void emit(const u32 insn, struct jit_ctx *ctx)
66{
67 if (ctx->image != NULL)
68 ctx->image[ctx->idx] = cpu_to_le32(insn);
69
70 ctx->idx++;
71}
72
Zi Shen Lime54bcde2014-08-26 21:15:30 -070073static inline void emit_a64_mov_i(const int is64, const int reg,
74 const s32 val, struct jit_ctx *ctx)
75{
76 u16 hi = val >> 16;
77 u16 lo = val & 0xffff;
78
79 if (hi & 0x8000) {
80 if (hi == 0xffff) {
81 emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
82 } else {
83 emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
Daniel Borkmann6d2eea62018-05-14 23:22:32 +020084 if (lo != 0xffff)
85 emit(A64_MOVK(is64, reg, lo, 0), ctx);
Zi Shen Lime54bcde2014-08-26 21:15:30 -070086 }
87 } else {
88 emit(A64_MOVZ(is64, reg, lo, 0), ctx);
89 if (hi)
90 emit(A64_MOVK(is64, reg, hi, 16), ctx);
91 }
92}
93
Daniel Borkmann6d2eea62018-05-14 23:22:32 +020094static int i64_i16_blocks(const u64 val, bool inverse)
95{
96 return (((val >> 0) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
97 (((val >> 16) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
98 (((val >> 32) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
99 (((val >> 48) & 0xffff) != (inverse ? 0xffff : 0x0000));
100}
101
102static inline void emit_a64_mov_i64(const int reg, const u64 val,
103 struct jit_ctx *ctx)
104{
105 u64 nrm_tmp = val, rev_tmp = ~val;
106 bool inverse;
107 int shift;
108
109 if (!(nrm_tmp >> 32))
110 return emit_a64_mov_i(0, reg, (u32)val, ctx);
111
112 inverse = i64_i16_blocks(nrm_tmp, true) < i64_i16_blocks(nrm_tmp, false);
113 shift = max(round_down((inverse ? (fls64(rev_tmp) - 1) :
114 (fls64(nrm_tmp) - 1)), 16), 0);
115 if (inverse)
116 emit(A64_MOVN(1, reg, (rev_tmp >> shift) & 0xffff, shift), ctx);
117 else
118 emit(A64_MOVZ(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx);
119 shift -= 16;
120 while (shift >= 0) {
121 if (((nrm_tmp >> shift) & 0xffff) != (inverse ? 0xffff : 0x0000))
122 emit(A64_MOVK(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx);
123 shift -= 16;
124 }
125}
126
127/*
Ard Biesheuvelcc2b8ed2018-11-23 18:29:02 +0100128 * Kernel addresses in the vmalloc space use at most 48 bits, and the
129 * remaining bits are guaranteed to be 0x1. So we can compose the address
130 * with a fixed length movn/movk/movk sequence.
Daniel Borkmann6d2eea62018-05-14 23:22:32 +0200131 */
132static inline void emit_addr_mov_i64(const int reg, const u64 val,
133 struct jit_ctx *ctx)
134{
135 u64 tmp = val;
136 int shift = 0;
137
Ard Biesheuvelcc2b8ed2018-11-23 18:29:02 +0100138 emit(A64_MOVN(1, reg, ~tmp & 0xffff, shift), ctx);
139 while (shift < 32) {
Daniel Borkmann6d2eea62018-05-14 23:22:32 +0200140 tmp >>= 16;
141 shift += 16;
142 emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
143 }
144}
145
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700146static inline int bpf2a64_offset(int bpf_to, int bpf_from,
147 const struct jit_ctx *ctx)
148{
Xi Wang8eee5392015-06-25 05:47:39 -0700149 int to = ctx->offset[bpf_to];
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700150 /* -1 to account for the Branch instruction */
Xi Wang8eee5392015-06-25 05:47:39 -0700151 int from = ctx->offset[bpf_from] - 1;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700152
153 return to - from;
154}
155
Daniel Borkmannb569c1c2014-09-16 08:48:50 +0100156static void jit_fill_hole(void *area, unsigned int size)
157{
Luc Van Oostenryck425e1ed2017-06-28 16:58:03 +0200158 __le32 *ptr;
Daniel Borkmannb569c1c2014-09-16 08:48:50 +0100159 /* We are guaranteed to have aligned memory. */
160 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
161 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
162}
163
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700164static inline int epilogue_offset(const struct jit_ctx *ctx)
165{
Zi Shen Lim51c9fbb12014-12-03 08:38:01 +0000166 int to = ctx->epilogue_offset;
167 int from = ctx->idx;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700168
169 return to - from;
170}
171
Luke Nelsonfd868f12020-05-08 11:15:46 -0700172static bool is_addsub_imm(u32 imm)
173{
174 /* Either imm12 or shifted imm12. */
175 return !(imm & ~0xfff) || !(imm & ~0xfff000);
176}
177
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700178/* Stack must be multiples of 16B */
179#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
180
Daniel Borkmanna2284d92018-01-16 03:46:08 +0100181/* Tail call offset to jump into */
Mark Brownfa76cfe2020-05-06 20:51:32 +0100182#if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)
183#define PROLOGUE_OFFSET 8
184#else
Daniel Borkmanna2284d92018-01-16 03:46:08 +0100185#define PROLOGUE_OFFSET 7
Mark Brownfa76cfe2020-05-06 20:51:32 +0100186#endif
Zi Shen Limddb55992016-06-08 21:18:48 -0700187
Daniel Borkmann56ea6a82018-05-14 23:22:33 +0200188static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700189{
Daniel Borkmannf1c9eed2017-06-11 03:55:27 +0200190 const struct bpf_prog *prog = ctx->prog;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700191 const u8 r6 = bpf2a64[BPF_REG_6];
192 const u8 r7 = bpf2a64[BPF_REG_7];
193 const u8 r8 = bpf2a64[BPF_REG_8];
194 const u8 r9 = bpf2a64[BPF_REG_9];
195 const u8 fp = bpf2a64[BPF_REG_FP];
Zi Shen Limddb55992016-06-08 21:18:48 -0700196 const u8 tcc = bpf2a64[TCALL_CNT];
197 const int idx0 = ctx->idx;
198 int cur_offset;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700199
Yang Shiec0738d2015-11-16 14:35:35 -0800200 /*
201 * BPF prog stack layout
202 *
203 * high
204 * original A64_SP => 0:+-----+ BPF prologue
205 * |FP/LR|
206 * current A64_FP => -16:+-----+
207 * | ... | callee saved registers
Yang Shi4c1cd4f2016-05-16 16:36:26 -0700208 * BPF fp register => -64:+-----+ <= (BPF_FP)
Yang Shiec0738d2015-11-16 14:35:35 -0800209 * | |
210 * | ... | BPF prog stack
211 * | |
Daniel Borkmannf1c9eed2017-06-11 03:55:27 +0200212 * +-----+ <= (BPF_FP - prog->aux->stack_depth)
Daniel Borkmann09ece3d2018-05-14 23:22:31 +0200213 * |RSVD | padding
Daniel Borkmannf1c9eed2017-06-11 03:55:27 +0200214 * current A64_SP => +-----+ <= (BPF_FP - ctx->stack_size)
Yang Shiec0738d2015-11-16 14:35:35 -0800215 * | |
216 * | ... | Function call stack
217 * | |
218 * +-----+
219 * low
220 *
221 */
222
Mark Brownfa76cfe2020-05-06 20:51:32 +0100223 /* BTI landing pad */
224 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
225 emit(A64_BTI_C, ctx);
226
Yang Shiec0738d2015-11-16 14:35:35 -0800227 /* Save FP and LR registers to stay align with ARM64 AAPCS */
228 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
229 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
230
Zi Shen Limddb55992016-06-08 21:18:48 -0700231 /* Save callee-saved registers */
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700232 emit(A64_PUSH(r6, r7, A64_SP), ctx);
233 emit(A64_PUSH(r8, r9, A64_SP), ctx);
Zi Shen Limddb55992016-06-08 21:18:48 -0700234 emit(A64_PUSH(fp, tcc, A64_SP), ctx);
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700235
Zi Shen Limddb55992016-06-08 21:18:48 -0700236 /* Set up BPF prog stack base register */
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700237 emit(A64_MOV(1, fp, A64_SP), ctx);
238
Daniel Borkmann56ea6a82018-05-14 23:22:33 +0200239 if (!ebpf_from_cbpf) {
240 /* Initialize tail_call_cnt */
241 emit(A64_MOVZ(1, tcc, 0, 0), ctx);
Zi Shen Limddb55992016-06-08 21:18:48 -0700242
Daniel Borkmann56ea6a82018-05-14 23:22:33 +0200243 cur_offset = ctx->idx - idx0;
244 if (cur_offset != PROLOGUE_OFFSET) {
245 pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
246 cur_offset, PROLOGUE_OFFSET);
247 return -1;
248 }
Mark Brownfa76cfe2020-05-06 20:51:32 +0100249
250 /* BTI landing pad for the tail call, done with a BR */
251 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
252 emit(A64_BTI_J, ctx);
Zi Shen Limddb55992016-06-08 21:18:48 -0700253 }
Daniel Borkmanna2284d92018-01-16 03:46:08 +0100254
Daniel Borkmann09ece3d2018-05-14 23:22:31 +0200255 ctx->stack_size = STACK_ALIGN(prog->aux->stack_depth);
Daniel Borkmanna2284d92018-01-16 03:46:08 +0100256
257 /* Set up function call stack */
258 emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
Zi Shen Limddb55992016-06-08 21:18:48 -0700259 return 0;
260}
261
262static int out_offset = -1; /* initialized on the first pass of build_body() */
263static int emit_bpf_tail_call(struct jit_ctx *ctx)
264{
265 /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
266 const u8 r2 = bpf2a64[BPF_REG_2];
267 const u8 r3 = bpf2a64[BPF_REG_3];
268
269 const u8 tmp = bpf2a64[TMP_REG_1];
270 const u8 prg = bpf2a64[TMP_REG_2];
271 const u8 tcc = bpf2a64[TCALL_CNT];
272 const int idx0 = ctx->idx;
273#define cur_offset (ctx->idx - idx0)
274#define jmp_offset (out_offset - (cur_offset))
275 size_t off;
276
277 /* if (index >= array->map.max_entries)
278 * goto out;
279 */
280 off = offsetof(struct bpf_array, map.max_entries);
281 emit_a64_mov_i64(tmp, off, ctx);
282 emit(A64_LDR32(tmp, r2, tmp), ctx);
Daniel Borkmann16338a92018-02-23 01:03:43 +0100283 emit(A64_MOV(0, r3, r3), ctx);
Zi Shen Limddb55992016-06-08 21:18:48 -0700284 emit(A64_CMP(0, r3, tmp), ctx);
Daniel Borkmann16338a92018-02-23 01:03:43 +0100285 emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
Zi Shen Limddb55992016-06-08 21:18:48 -0700286
287 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
288 * goto out;
289 * tail_call_cnt++;
290 */
291 emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
292 emit(A64_CMP(1, tcc, tmp), ctx);
Daniel Borkmann16338a92018-02-23 01:03:43 +0100293 emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
Zi Shen Limddb55992016-06-08 21:18:48 -0700294 emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
295
296 /* prog = array->ptrs[index];
297 * if (prog == NULL)
298 * goto out;
299 */
300 off = offsetof(struct bpf_array, ptrs);
301 emit_a64_mov_i64(tmp, off, ctx);
Daniel Borkmannd8b54112017-05-11 01:53:15 +0200302 emit(A64_ADD(1, tmp, r2, tmp), ctx);
303 emit(A64_LSL(1, prg, r3, 3), ctx);
304 emit(A64_LDR64(prg, tmp, prg), ctx);
Zi Shen Limddb55992016-06-08 21:18:48 -0700305 emit(A64_CBZ(1, prg, jmp_offset), ctx);
306
Daniel Borkmanna2284d92018-01-16 03:46:08 +0100307 /* goto *(prog->bpf_func + prologue_offset); */
Zi Shen Limddb55992016-06-08 21:18:48 -0700308 off = offsetof(struct bpf_prog, bpf_func);
309 emit_a64_mov_i64(tmp, off, ctx);
310 emit(A64_LDR64(tmp, prg, tmp), ctx);
311 emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
Daniel Borkmanna2284d92018-01-16 03:46:08 +0100312 emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
Zi Shen Limddb55992016-06-08 21:18:48 -0700313 emit(A64_BR(tmp), ctx);
314
315 /* out: */
316 if (out_offset == -1)
317 out_offset = cur_offset;
318 if (cur_offset != out_offset) {
319 pr_err_once("tail_call out_offset = %d, expected %d!\n",
320 cur_offset, out_offset);
321 return -1;
322 }
323 return 0;
324#undef cur_offset
325#undef jmp_offset
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700326}
327
328static void build_epilogue(struct jit_ctx *ctx)
329{
330 const u8 r0 = bpf2a64[BPF_REG_0];
331 const u8 r6 = bpf2a64[BPF_REG_6];
332 const u8 r7 = bpf2a64[BPF_REG_7];
333 const u8 r8 = bpf2a64[BPF_REG_8];
334 const u8 r9 = bpf2a64[BPF_REG_9];
335 const u8 fp = bpf2a64[BPF_REG_FP];
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700336
337 /* We're done with BPF stack */
Daniel Borkmannf1c9eed2017-06-11 03:55:27 +0200338 emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700339
Yang Shiec0738d2015-11-16 14:35:35 -0800340 /* Restore fs (x25) and x26 */
341 emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
342
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700343 /* Restore callee-saved register */
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700344 emit(A64_POP(r8, r9, A64_SP), ctx);
345 emit(A64_POP(r6, r7, A64_SP), ctx);
346
Yang Shiec0738d2015-11-16 14:35:35 -0800347 /* Restore FP/LR registers */
348 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700349
350 /* Set return value */
351 emit(A64_MOV(1, A64_R(0), r0), ctx);
352
353 emit(A64_RET(A64_LR), ctx);
354}
355
Jean-Philippe Brucker80083422020-07-28 17:21:26 +0200356#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
357#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
358
359int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
360 struct pt_regs *regs)
361{
362 off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
363 int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
364
365 regs->regs[dst_reg] = 0;
366 regs->pc = (unsigned long)&ex->fixup - offset;
367 return 1;
368}
369
370/* For accesses to BTF pointers, add an entry to the exception table */
371static int add_exception_handler(const struct bpf_insn *insn,
372 struct jit_ctx *ctx,
373 int dst_reg)
374{
375 off_t offset;
376 unsigned long pc;
377 struct exception_table_entry *ex;
378
379 if (!ctx->image)
380 /* First pass */
381 return 0;
382
383 if (BPF_MODE(insn->code) != BPF_PROBE_MEM)
384 return 0;
385
386 if (!ctx->prog->aux->extable ||
387 WARN_ON_ONCE(ctx->exentry_idx >= ctx->prog->aux->num_exentries))
388 return -EINVAL;
389
390 ex = &ctx->prog->aux->extable[ctx->exentry_idx];
391 pc = (unsigned long)&ctx->image[ctx->idx - 1];
392
393 offset = pc - (long)&ex->insn;
394 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
395 return -ERANGE;
396 ex->insn = offset;
397
398 /*
399 * Since the extable follows the program, the fixup offset is always
400 * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
401 * to keep things simple, and put the destination register in the upper
402 * bits. We don't need to worry about buildtime or runtime sort
403 * modifying the upper bits because the table is already sorted, and
404 * isn't part of the main exception table.
405 */
406 offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE);
407 if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
408 return -ERANGE;
409
410 ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
411 FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
412
413 ctx->exentry_idx++;
414 return 0;
415}
416
Zi Shen Lim30d3d942014-09-16 21:29:23 +0100417/* JITs an eBPF instruction.
418 * Returns:
419 * 0 - successfully JITed an 8-byte eBPF instruction.
420 * >0 - successfully JITed a 16-byte eBPF instruction.
421 * <0 - failed to JIT.
422 */
Daniel Borkmann8c11ea52018-11-26 14:05:39 +0100423static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
424 bool extra_pass)
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700425{
426 const u8 code = insn->code;
427 const u8 dst = bpf2a64[insn->dst_reg];
428 const u8 src = bpf2a64[insn->src_reg];
429 const u8 tmp = bpf2a64[TMP_REG_1];
430 const u8 tmp2 = bpf2a64[TMP_REG_2];
Daniel Borkmann7005cad2017-06-07 13:45:37 +0200431 const u8 tmp3 = bpf2a64[TMP_REG_3];
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700432 const s16 off = insn->off;
433 const s32 imm = insn->imm;
434 const int i = insn - ctx->prog->insnsi;
Jiong Wang654b65a2019-01-26 12:26:08 -0500435 const bool is64 = BPF_CLASS(code) == BPF_ALU64 ||
436 BPF_CLASS(code) == BPF_JMP;
Daniel Borkmann85f68fe2017-05-01 02:57:20 +0200437 const bool isdw = BPF_SIZE(code) == BPF_DW;
Daniel Borkmann34b8ab02019-04-26 21:48:22 +0200438 u8 jmp_cond, reg;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700439 s32 jmp_offset;
Luke Nelsonfd495912020-05-08 11:15:45 -0700440 u32 a64_insn;
Jean-Philippe Brucker80083422020-07-28 17:21:26 +0200441 int ret;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700442
Zi Shen Lim251599e2015-11-03 22:56:44 -0800443#define check_imm(bits, imm) do { \
444 if ((((imm) > 0) && ((imm) >> (bits))) || \
445 (((imm) < 0) && (~(imm) >> (bits)))) { \
446 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
447 i, imm, imm); \
448 return -EINVAL; \
449 } \
450} while (0)
451#define check_imm19(imm) check_imm(19, imm)
452#define check_imm26(imm) check_imm(26, imm)
453
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700454 switch (code) {
455 /* dst = src */
456 case BPF_ALU | BPF_MOV | BPF_X:
457 case BPF_ALU64 | BPF_MOV | BPF_X:
458 emit(A64_MOV(is64, dst, src), ctx);
459 break;
460 /* dst = dst OP src */
461 case BPF_ALU | BPF_ADD | BPF_X:
462 case BPF_ALU64 | BPF_ADD | BPF_X:
463 emit(A64_ADD(is64, dst, dst, src), ctx);
464 break;
465 case BPF_ALU | BPF_SUB | BPF_X:
466 case BPF_ALU64 | BPF_SUB | BPF_X:
467 emit(A64_SUB(is64, dst, dst, src), ctx);
468 break;
469 case BPF_ALU | BPF_AND | BPF_X:
470 case BPF_ALU64 | BPF_AND | BPF_X:
471 emit(A64_AND(is64, dst, dst, src), ctx);
472 break;
473 case BPF_ALU | BPF_OR | BPF_X:
474 case BPF_ALU64 | BPF_OR | BPF_X:
475 emit(A64_ORR(is64, dst, dst, src), ctx);
476 break;
477 case BPF_ALU | BPF_XOR | BPF_X:
478 case BPF_ALU64 | BPF_XOR | BPF_X:
479 emit(A64_EOR(is64, dst, dst, src), ctx);
480 break;
481 case BPF_ALU | BPF_MUL | BPF_X:
482 case BPF_ALU64 | BPF_MUL | BPF_X:
483 emit(A64_MUL(is64, dst, dst, src), ctx);
484 break;
485 case BPF_ALU | BPF_DIV | BPF_X:
486 case BPF_ALU64 | BPF_DIV | BPF_X:
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700487 case BPF_ALU | BPF_MOD | BPF_X:
488 case BPF_ALU64 | BPF_MOD | BPF_X:
Zi Shen Lim14e589f2015-11-04 20:43:59 -0800489 switch (BPF_OP(code)) {
490 case BPF_DIV:
491 emit(A64_UDIV(is64, dst, dst, src), ctx);
492 break;
493 case BPF_MOD:
Zi Shen Lim14e589f2015-11-04 20:43:59 -0800494 emit(A64_UDIV(is64, tmp, dst, src), ctx);
Jerin Jacob504792e2019-09-02 11:44:48 +0530495 emit(A64_MSUB(is64, dst, dst, tmp, src), ctx);
Zi Shen Lim14e589f2015-11-04 20:43:59 -0800496 break;
497 }
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700498 break;
Zi Shen Limd65a6342014-09-16 19:37:35 +0100499 case BPF_ALU | BPF_LSH | BPF_X:
500 case BPF_ALU64 | BPF_LSH | BPF_X:
501 emit(A64_LSLV(is64, dst, dst, src), ctx);
502 break;
503 case BPF_ALU | BPF_RSH | BPF_X:
504 case BPF_ALU64 | BPF_RSH | BPF_X:
505 emit(A64_LSRV(is64, dst, dst, src), ctx);
506 break;
507 case BPF_ALU | BPF_ARSH | BPF_X:
508 case BPF_ALU64 | BPF_ARSH | BPF_X:
509 emit(A64_ASRV(is64, dst, dst, src), ctx);
510 break;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700511 /* dst = -dst */
512 case BPF_ALU | BPF_NEG:
513 case BPF_ALU64 | BPF_NEG:
514 emit(A64_NEG(is64, dst, dst), ctx);
515 break;
516 /* dst = BSWAP##imm(dst) */
517 case BPF_ALU | BPF_END | BPF_FROM_LE:
518 case BPF_ALU | BPF_END | BPF_FROM_BE:
519#ifdef CONFIG_CPU_BIG_ENDIAN
520 if (BPF_SRC(code) == BPF_FROM_BE)
Xi Wangd63903b2015-06-25 18:39:15 -0700521 goto emit_bswap_uxt;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700522#else /* !CONFIG_CPU_BIG_ENDIAN */
523 if (BPF_SRC(code) == BPF_FROM_LE)
Xi Wangd63903b2015-06-25 18:39:15 -0700524 goto emit_bswap_uxt;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700525#endif
526 switch (imm) {
527 case 16:
528 emit(A64_REV16(is64, dst, dst), ctx);
Xi Wangd63903b2015-06-25 18:39:15 -0700529 /* zero-extend 16 bits into 64 bits */
530 emit(A64_UXTH(is64, dst, dst), ctx);
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700531 break;
532 case 32:
533 emit(A64_REV32(is64, dst, dst), ctx);
Xi Wangd63903b2015-06-25 18:39:15 -0700534 /* upper 32 bits already cleared */
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700535 break;
536 case 64:
537 emit(A64_REV64(dst, dst), ctx);
538 break;
539 }
540 break;
Xi Wangd63903b2015-06-25 18:39:15 -0700541emit_bswap_uxt:
542 switch (imm) {
543 case 16:
544 /* zero-extend 16 bits into 64 bits */
545 emit(A64_UXTH(is64, dst, dst), ctx);
546 break;
547 case 32:
548 /* zero-extend 32 bits into 64 bits */
549 emit(A64_UXTW(is64, dst, dst), ctx);
550 break;
551 case 64:
552 /* nop */
553 break;
554 }
555 break;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700556 /* dst = imm */
557 case BPF_ALU | BPF_MOV | BPF_K:
558 case BPF_ALU64 | BPF_MOV | BPF_K:
559 emit_a64_mov_i(is64, dst, imm, ctx);
560 break;
561 /* dst = dst OP imm */
562 case BPF_ALU | BPF_ADD | BPF_K:
563 case BPF_ALU64 | BPF_ADD | BPF_K:
Luke Nelsonfd868f12020-05-08 11:15:46 -0700564 if (is_addsub_imm(imm)) {
565 emit(A64_ADD_I(is64, dst, dst, imm), ctx);
566 } else if (is_addsub_imm(-imm)) {
567 emit(A64_SUB_I(is64, dst, dst, -imm), ctx);
568 } else {
569 emit_a64_mov_i(is64, tmp, imm, ctx);
570 emit(A64_ADD(is64, dst, dst, tmp), ctx);
571 }
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700572 break;
573 case BPF_ALU | BPF_SUB | BPF_K:
574 case BPF_ALU64 | BPF_SUB | BPF_K:
Luke Nelsonfd868f12020-05-08 11:15:46 -0700575 if (is_addsub_imm(imm)) {
576 emit(A64_SUB_I(is64, dst, dst, imm), ctx);
577 } else if (is_addsub_imm(-imm)) {
578 emit(A64_ADD_I(is64, dst, dst, -imm), ctx);
579 } else {
580 emit_a64_mov_i(is64, tmp, imm, ctx);
581 emit(A64_SUB(is64, dst, dst, tmp), ctx);
582 }
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700583 break;
584 case BPF_ALU | BPF_AND | BPF_K:
585 case BPF_ALU64 | BPF_AND | BPF_K:
Luke Nelsonfd495912020-05-08 11:15:45 -0700586 a64_insn = A64_AND_I(is64, dst, dst, imm);
587 if (a64_insn != AARCH64_BREAK_FAULT) {
588 emit(a64_insn, ctx);
589 } else {
590 emit_a64_mov_i(is64, tmp, imm, ctx);
591 emit(A64_AND(is64, dst, dst, tmp), ctx);
592 }
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700593 break;
594 case BPF_ALU | BPF_OR | BPF_K:
595 case BPF_ALU64 | BPF_OR | BPF_K:
Luke Nelsonfd495912020-05-08 11:15:45 -0700596 a64_insn = A64_ORR_I(is64, dst, dst, imm);
597 if (a64_insn != AARCH64_BREAK_FAULT) {
598 emit(a64_insn, ctx);
599 } else {
600 emit_a64_mov_i(is64, tmp, imm, ctx);
601 emit(A64_ORR(is64, dst, dst, tmp), ctx);
602 }
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700603 break;
604 case BPF_ALU | BPF_XOR | BPF_K:
605 case BPF_ALU64 | BPF_XOR | BPF_K:
Luke Nelsonfd495912020-05-08 11:15:45 -0700606 a64_insn = A64_EOR_I(is64, dst, dst, imm);
607 if (a64_insn != AARCH64_BREAK_FAULT) {
608 emit(a64_insn, ctx);
609 } else {
610 emit_a64_mov_i(is64, tmp, imm, ctx);
611 emit(A64_EOR(is64, dst, dst, tmp), ctx);
612 }
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700613 break;
614 case BPF_ALU | BPF_MUL | BPF_K:
615 case BPF_ALU64 | BPF_MUL | BPF_K:
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700616 emit_a64_mov_i(is64, tmp, imm, ctx);
617 emit(A64_MUL(is64, dst, dst, tmp), ctx);
618 break;
619 case BPF_ALU | BPF_DIV | BPF_K:
620 case BPF_ALU64 | BPF_DIV | BPF_K:
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700621 emit_a64_mov_i(is64, tmp, imm, ctx);
622 emit(A64_UDIV(is64, dst, dst, tmp), ctx);
623 break;
624 case BPF_ALU | BPF_MOD | BPF_K:
625 case BPF_ALU64 | BPF_MOD | BPF_K:
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700626 emit_a64_mov_i(is64, tmp2, imm, ctx);
627 emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
Jerin Jacob504792e2019-09-02 11:44:48 +0530628 emit(A64_MSUB(is64, dst, dst, tmp, tmp2), ctx);
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700629 break;
630 case BPF_ALU | BPF_LSH | BPF_K:
631 case BPF_ALU64 | BPF_LSH | BPF_K:
632 emit(A64_LSL(is64, dst, dst, imm), ctx);
633 break;
634 case BPF_ALU | BPF_RSH | BPF_K:
635 case BPF_ALU64 | BPF_RSH | BPF_K:
636 emit(A64_LSR(is64, dst, dst, imm), ctx);
637 break;
638 case BPF_ALU | BPF_ARSH | BPF_K:
639 case BPF_ALU64 | BPF_ARSH | BPF_K:
640 emit(A64_ASR(is64, dst, dst, imm), ctx);
641 break;
642
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700643 /* JUMP off */
644 case BPF_JMP | BPF_JA:
645 jmp_offset = bpf2a64_offset(i + off, i, ctx);
646 check_imm26(jmp_offset);
647 emit(A64_B(jmp_offset), ctx);
648 break;
649 /* IF (dst COND src) JUMP off */
650 case BPF_JMP | BPF_JEQ | BPF_X:
651 case BPF_JMP | BPF_JGT | BPF_X:
Daniel Borkmannc362b2f2017-08-10 01:39:57 +0200652 case BPF_JMP | BPF_JLT | BPF_X:
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700653 case BPF_JMP | BPF_JGE | BPF_X:
Daniel Borkmannc362b2f2017-08-10 01:39:57 +0200654 case BPF_JMP | BPF_JLE | BPF_X:
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700655 case BPF_JMP | BPF_JNE | BPF_X:
656 case BPF_JMP | BPF_JSGT | BPF_X:
Daniel Borkmannc362b2f2017-08-10 01:39:57 +0200657 case BPF_JMP | BPF_JSLT | BPF_X:
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700658 case BPF_JMP | BPF_JSGE | BPF_X:
Daniel Borkmannc362b2f2017-08-10 01:39:57 +0200659 case BPF_JMP | BPF_JSLE | BPF_X:
Jiong Wang654b65a2019-01-26 12:26:08 -0500660 case BPF_JMP32 | BPF_JEQ | BPF_X:
661 case BPF_JMP32 | BPF_JGT | BPF_X:
662 case BPF_JMP32 | BPF_JLT | BPF_X:
663 case BPF_JMP32 | BPF_JGE | BPF_X:
664 case BPF_JMP32 | BPF_JLE | BPF_X:
665 case BPF_JMP32 | BPF_JNE | BPF_X:
666 case BPF_JMP32 | BPF_JSGT | BPF_X:
667 case BPF_JMP32 | BPF_JSLT | BPF_X:
668 case BPF_JMP32 | BPF_JSGE | BPF_X:
669 case BPF_JMP32 | BPF_JSLE | BPF_X:
670 emit(A64_CMP(is64, dst, src), ctx);
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700671emit_cond_jmp:
672 jmp_offset = bpf2a64_offset(i + off, i, ctx);
673 check_imm19(jmp_offset);
674 switch (BPF_OP(code)) {
675 case BPF_JEQ:
676 jmp_cond = A64_COND_EQ;
677 break;
678 case BPF_JGT:
679 jmp_cond = A64_COND_HI;
680 break;
Daniel Borkmannc362b2f2017-08-10 01:39:57 +0200681 case BPF_JLT:
682 jmp_cond = A64_COND_CC;
683 break;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700684 case BPF_JGE:
685 jmp_cond = A64_COND_CS;
686 break;
Daniel Borkmannc362b2f2017-08-10 01:39:57 +0200687 case BPF_JLE:
688 jmp_cond = A64_COND_LS;
689 break;
Zi Shen Lim98397fc2016-05-12 23:37:58 -0700690 case BPF_JSET:
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700691 case BPF_JNE:
692 jmp_cond = A64_COND_NE;
693 break;
694 case BPF_JSGT:
695 jmp_cond = A64_COND_GT;
696 break;
Daniel Borkmannc362b2f2017-08-10 01:39:57 +0200697 case BPF_JSLT:
698 jmp_cond = A64_COND_LT;
699 break;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700700 case BPF_JSGE:
701 jmp_cond = A64_COND_GE;
702 break;
Daniel Borkmannc362b2f2017-08-10 01:39:57 +0200703 case BPF_JSLE:
704 jmp_cond = A64_COND_LE;
705 break;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700706 default:
707 return -EFAULT;
708 }
709 emit(A64_B_(jmp_cond, jmp_offset), ctx);
710 break;
711 case BPF_JMP | BPF_JSET | BPF_X:
Jiong Wang654b65a2019-01-26 12:26:08 -0500712 case BPF_JMP32 | BPF_JSET | BPF_X:
713 emit(A64_TST(is64, dst, src), ctx);
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700714 goto emit_cond_jmp;
715 /* IF (dst COND imm) JUMP off */
716 case BPF_JMP | BPF_JEQ | BPF_K:
717 case BPF_JMP | BPF_JGT | BPF_K:
Daniel Borkmannc362b2f2017-08-10 01:39:57 +0200718 case BPF_JMP | BPF_JLT | BPF_K:
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700719 case BPF_JMP | BPF_JGE | BPF_K:
Daniel Borkmannc362b2f2017-08-10 01:39:57 +0200720 case BPF_JMP | BPF_JLE | BPF_K:
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700721 case BPF_JMP | BPF_JNE | BPF_K:
722 case BPF_JMP | BPF_JSGT | BPF_K:
Daniel Borkmannc362b2f2017-08-10 01:39:57 +0200723 case BPF_JMP | BPF_JSLT | BPF_K:
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700724 case BPF_JMP | BPF_JSGE | BPF_K:
Daniel Borkmannc362b2f2017-08-10 01:39:57 +0200725 case BPF_JMP | BPF_JSLE | BPF_K:
Jiong Wang654b65a2019-01-26 12:26:08 -0500726 case BPF_JMP32 | BPF_JEQ | BPF_K:
727 case BPF_JMP32 | BPF_JGT | BPF_K:
728 case BPF_JMP32 | BPF_JLT | BPF_K:
729 case BPF_JMP32 | BPF_JGE | BPF_K:
730 case BPF_JMP32 | BPF_JLE | BPF_K:
731 case BPF_JMP32 | BPF_JNE | BPF_K:
732 case BPF_JMP32 | BPF_JSGT | BPF_K:
733 case BPF_JMP32 | BPF_JSLT | BPF_K:
734 case BPF_JMP32 | BPF_JSGE | BPF_K:
735 case BPF_JMP32 | BPF_JSLE | BPF_K:
Luke Nelsonfd868f12020-05-08 11:15:46 -0700736 if (is_addsub_imm(imm)) {
737 emit(A64_CMP_I(is64, dst, imm), ctx);
738 } else if (is_addsub_imm(-imm)) {
739 emit(A64_CMN_I(is64, dst, -imm), ctx);
740 } else {
741 emit_a64_mov_i(is64, tmp, imm, ctx);
742 emit(A64_CMP(is64, dst, tmp), ctx);
743 }
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700744 goto emit_cond_jmp;
745 case BPF_JMP | BPF_JSET | BPF_K:
Jiong Wang654b65a2019-01-26 12:26:08 -0500746 case BPF_JMP32 | BPF_JSET | BPF_K:
Luke Nelsonfd495912020-05-08 11:15:45 -0700747 a64_insn = A64_TST_I(is64, dst, imm);
748 if (a64_insn != AARCH64_BREAK_FAULT) {
749 emit(a64_insn, ctx);
750 } else {
751 emit_a64_mov_i(is64, tmp, imm, ctx);
752 emit(A64_TST(is64, dst, tmp), ctx);
753 }
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700754 goto emit_cond_jmp;
755 /* function call */
756 case BPF_JMP | BPF_CALL:
757 {
758 const u8 r0 = bpf2a64[BPF_REG_0];
Daniel Borkmann8c11ea52018-11-26 14:05:39 +0100759 bool func_addr_fixed;
760 u64 func_addr;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700761
Daniel Borkmann8c11ea52018-11-26 14:05:39 +0100762 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
763 &func_addr, &func_addr_fixed);
764 if (ret < 0)
765 return ret;
Ard Biesheuvelcc2b8ed2018-11-23 18:29:02 +0100766 emit_addr_mov_i64(tmp, func_addr, ctx);
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700767 emit(A64_BLR(tmp), ctx);
768 emit(A64_MOV(1, r0, A64_R(0)), ctx);
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700769 break;
770 }
Zi Shen Limddb55992016-06-08 21:18:48 -0700771 /* tail call */
Alexei Starovoitov71189fa2017-05-30 13:31:27 -0700772 case BPF_JMP | BPF_TAIL_CALL:
Zi Shen Limddb55992016-06-08 21:18:48 -0700773 if (emit_bpf_tail_call(ctx))
774 return -EFAULT;
775 break;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700776 /* function return */
777 case BPF_JMP | BPF_EXIT:
Zi Shen Lim51c9fbb12014-12-03 08:38:01 +0000778 /* Optimization: when last instruction is EXIT,
779 simply fallthrough to epilogue. */
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700780 if (i == ctx->prog->len - 1)
781 break;
782 jmp_offset = epilogue_offset(ctx);
783 check_imm26(jmp_offset);
784 emit(A64_B(jmp_offset), ctx);
785 break;
786
Zi Shen Lim30d3d942014-09-16 21:29:23 +0100787 /* dst = imm64 */
788 case BPF_LD | BPF_IMM | BPF_DW:
789 {
790 const struct bpf_insn insn1 = insn[1];
791 u64 imm64;
792
Xi Wang1e4df6b2015-05-08 06:39:51 +0100793 imm64 = (u64)insn1.imm << 32 | (u32)imm;
Zi Shen Lim30d3d942014-09-16 21:29:23 +0100794 emit_a64_mov_i64(dst, imm64, ctx);
795
796 return 1;
797 }
798
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700799 /* LDX: dst = *(size *)(src + off) */
800 case BPF_LDX | BPF_MEM | BPF_W:
801 case BPF_LDX | BPF_MEM | BPF_H:
802 case BPF_LDX | BPF_MEM | BPF_B:
803 case BPF_LDX | BPF_MEM | BPF_DW:
Jean-Philippe Brucker80083422020-07-28 17:21:26 +0200804 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
805 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
806 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
807 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700808 emit_a64_mov_i(1, tmp, off, ctx);
809 switch (BPF_SIZE(code)) {
810 case BPF_W:
811 emit(A64_LDR32(dst, src, tmp), ctx);
812 break;
813 case BPF_H:
814 emit(A64_LDRH(dst, src, tmp), ctx);
815 break;
816 case BPF_B:
817 emit(A64_LDRB(dst, src, tmp), ctx);
818 break;
819 case BPF_DW:
820 emit(A64_LDR64(dst, src, tmp), ctx);
821 break;
822 }
Jean-Philippe Brucker80083422020-07-28 17:21:26 +0200823
824 ret = add_exception_handler(insn, ctx, dst);
825 if (ret)
826 return ret;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700827 break;
828
829 /* ST: *(size *)(dst + off) = imm */
830 case BPF_ST | BPF_MEM | BPF_W:
831 case BPF_ST | BPF_MEM | BPF_H:
832 case BPF_ST | BPF_MEM | BPF_B:
833 case BPF_ST | BPF_MEM | BPF_DW:
Yang Shidf849ba2015-11-30 14:24:07 -0800834 /* Load imm to a register then store it */
Yang Shidf849ba2015-11-30 14:24:07 -0800835 emit_a64_mov_i(1, tmp2, off, ctx);
836 emit_a64_mov_i(1, tmp, imm, ctx);
837 switch (BPF_SIZE(code)) {
838 case BPF_W:
839 emit(A64_STR32(tmp, dst, tmp2), ctx);
840 break;
841 case BPF_H:
842 emit(A64_STRH(tmp, dst, tmp2), ctx);
843 break;
844 case BPF_B:
845 emit(A64_STRB(tmp, dst, tmp2), ctx);
846 break;
847 case BPF_DW:
848 emit(A64_STR64(tmp, dst, tmp2), ctx);
849 break;
850 }
851 break;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700852
853 /* STX: *(size *)(dst + off) = src */
854 case BPF_STX | BPF_MEM | BPF_W:
855 case BPF_STX | BPF_MEM | BPF_H:
856 case BPF_STX | BPF_MEM | BPF_B:
857 case BPF_STX | BPF_MEM | BPF_DW:
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700858 emit_a64_mov_i(1, tmp, off, ctx);
859 switch (BPF_SIZE(code)) {
860 case BPF_W:
861 emit(A64_STR32(src, dst, tmp), ctx);
862 break;
863 case BPF_H:
864 emit(A64_STRH(src, dst, tmp), ctx);
865 break;
866 case BPF_B:
867 emit(A64_STRB(src, dst, tmp), ctx);
868 break;
869 case BPF_DW:
870 emit(A64_STR64(src, dst, tmp), ctx);
871 break;
872 }
873 break;
Daniel Borkmann34b8ab02019-04-26 21:48:22 +0200874
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700875 /* STX XADD: lock *(u32 *)(dst + off) += src */
876 case BPF_STX | BPF_XADD | BPF_W:
877 /* STX XADD: lock *(u64 *)(dst + off) += src */
878 case BPF_STX | BPF_XADD | BPF_DW:
Daniel Borkmann34b8ab02019-04-26 21:48:22 +0200879 if (!off) {
880 reg = dst;
881 } else {
882 emit_a64_mov_i(1, tmp, off, ctx);
883 emit(A64_ADD(1, tmp, tmp, dst), ctx);
884 reg = tmp;
885 }
886 if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) {
887 emit(A64_STADD(isdw, reg, src), ctx);
888 } else {
889 emit(A64_LDXR(isdw, tmp2, reg), ctx);
890 emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
891 emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
892 jmp_offset = -3;
893 check_imm19(jmp_offset);
894 emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
895 }
Daniel Borkmann85f68fe2017-05-01 02:57:20 +0200896 break;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700897
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700898 default:
899 pr_err_once("unknown opcode %02x\n", code);
900 return -EINVAL;
901 }
902
903 return 0;
904}
905
Daniel Borkmann8c11ea52018-11-26 14:05:39 +0100906static int build_body(struct jit_ctx *ctx, bool extra_pass)
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700907{
908 const struct bpf_prog *prog = ctx->prog;
909 int i;
910
911 for (i = 0; i < prog->len; i++) {
912 const struct bpf_insn *insn = &prog->insnsi[i];
913 int ret;
914
Daniel Borkmann8c11ea52018-11-26 14:05:39 +0100915 ret = build_insn(insn, ctx, extra_pass);
Zi Shen Lim30d3d942014-09-16 21:29:23 +0100916 if (ret > 0) {
917 i++;
Daniel Borkmannddc665a2017-05-02 20:34:54 +0200918 if (ctx->image == NULL)
919 ctx->offset[i] = ctx->idx;
Zi Shen Lim30d3d942014-09-16 21:29:23 +0100920 continue;
921 }
Daniel Borkmannddc665a2017-05-02 20:34:54 +0200922 if (ctx->image == NULL)
923 ctx->offset[i] = ctx->idx;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700924 if (ret)
925 return ret;
926 }
927
928 return 0;
929}
930
Zi Shen Lim42ff7122016-01-13 23:33:22 -0800931static int validate_code(struct jit_ctx *ctx)
932{
933 int i;
934
935 for (i = 0; i < ctx->idx; i++) {
936 u32 a64_insn = le32_to_cpu(ctx->image[i]);
937
938 if (a64_insn == AARCH64_BREAK_FAULT)
939 return -1;
940 }
941
Jean-Philippe Brucker80083422020-07-28 17:21:26 +0200942 if (WARN_ON_ONCE(ctx->exentry_idx != ctx->prog->aux->num_exentries))
943 return -1;
944
Zi Shen Lim42ff7122016-01-13 23:33:22 -0800945 return 0;
946}
947
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700948static inline void bpf_flush_icache(void *start, void *end)
949{
950 flush_icache_range((unsigned long)start, (unsigned long)end);
951}
952
Alexei Starovoitovdb496942017-12-14 17:55:16 -0800953struct arm64_jit_data {
954 struct bpf_binary_header *header;
955 u8 *image;
956 struct jit_ctx ctx;
957};
958
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +0200959struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700960{
Jean-Philippe Brucker80083422020-07-28 17:21:26 +0200961 int image_size, prog_size, extable_size;
Daniel Borkmann26eb0422016-05-13 19:08:34 +0200962 struct bpf_prog *tmp, *orig_prog = prog;
Daniel Borkmannb569c1c2014-09-16 08:48:50 +0100963 struct bpf_binary_header *header;
Alexei Starovoitovdb496942017-12-14 17:55:16 -0800964 struct arm64_jit_data *jit_data;
Daniel Borkmann56ea6a82018-05-14 23:22:33 +0200965 bool was_classic = bpf_prog_was_classic(prog);
Daniel Borkmann26eb0422016-05-13 19:08:34 +0200966 bool tmp_blinded = false;
Alexei Starovoitovdb496942017-12-14 17:55:16 -0800967 bool extra_pass = false;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700968 struct jit_ctx ctx;
Daniel Borkmannb569c1c2014-09-16 08:48:50 +0100969 u8 *image_ptr;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700970
Alexei Starovoitov60b58afc2017-12-14 17:55:14 -0800971 if (!prog->jit_requested)
Daniel Borkmann26eb0422016-05-13 19:08:34 +0200972 return orig_prog;
973
974 tmp = bpf_jit_blind_constants(prog);
975 /* If blinding was requested and we failed during blinding,
976 * we must fall back to the interpreter.
977 */
978 if (IS_ERR(tmp))
979 return orig_prog;
980 if (tmp != prog) {
981 tmp_blinded = true;
982 prog = tmp;
983 }
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700984
Alexei Starovoitovdb496942017-12-14 17:55:16 -0800985 jit_data = prog->aux->jit_data;
986 if (!jit_data) {
987 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
988 if (!jit_data) {
989 prog = orig_prog;
990 goto out;
991 }
992 prog->aux->jit_data = jit_data;
993 }
994 if (jit_data->ctx.offset) {
995 ctx = jit_data->ctx;
996 image_ptr = jit_data->image;
997 header = jit_data->header;
998 extra_pass = true;
Jean-Philippe Brucker80083422020-07-28 17:21:26 +0200999 prog_size = sizeof(u32) * ctx.idx;
Alexei Starovoitovdb496942017-12-14 17:55:16 -08001000 goto skip_init_ctx;
1001 }
Zi Shen Lime54bcde2014-08-26 21:15:30 -07001002 memset(&ctx, 0, sizeof(ctx));
1003 ctx.prog = prog;
1004
1005 ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
Daniel Borkmann26eb0422016-05-13 19:08:34 +02001006 if (ctx.offset == NULL) {
1007 prog = orig_prog;
Alexei Starovoitovdb496942017-12-14 17:55:16 -08001008 goto out_off;
Daniel Borkmann26eb0422016-05-13 19:08:34 +02001009 }
Zi Shen Lime54bcde2014-08-26 21:15:30 -07001010
1011 /* 1. Initial fake pass to compute ctx->idx. */
1012
Yang Shi4c1cd4f2016-05-16 16:36:26 -07001013 /* Fake pass to fill in ctx->offset. */
Daniel Borkmann8c11ea52018-11-26 14:05:39 +01001014 if (build_body(&ctx, extra_pass)) {
Daniel Borkmann26eb0422016-05-13 19:08:34 +02001015 prog = orig_prog;
1016 goto out_off;
1017 }
Zi Shen Lime54bcde2014-08-26 21:15:30 -07001018
Daniel Borkmann56ea6a82018-05-14 23:22:33 +02001019 if (build_prologue(&ctx, was_classic)) {
Zi Shen Limddb55992016-06-08 21:18:48 -07001020 prog = orig_prog;
1021 goto out_off;
1022 }
Zi Shen Lim51c9fbb12014-12-03 08:38:01 +00001023
1024 ctx.epilogue_offset = ctx.idx;
Zi Shen Lime54bcde2014-08-26 21:15:30 -07001025 build_epilogue(&ctx);
1026
Jean-Philippe Brucker80083422020-07-28 17:21:26 +02001027 extable_size = prog->aux->num_exentries *
1028 sizeof(struct exception_table_entry);
1029
Zi Shen Lime54bcde2014-08-26 21:15:30 -07001030 /* Now we know the actual image size. */
Jean-Philippe Brucker80083422020-07-28 17:21:26 +02001031 prog_size = sizeof(u32) * ctx.idx;
1032 image_size = prog_size + extable_size;
Daniel Borkmannb569c1c2014-09-16 08:48:50 +01001033 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1034 sizeof(u32), jit_fill_hole);
Daniel Borkmann26eb0422016-05-13 19:08:34 +02001035 if (header == NULL) {
1036 prog = orig_prog;
1037 goto out_off;
1038 }
Zi Shen Lime54bcde2014-08-26 21:15:30 -07001039
1040 /* 2. Now, the actual pass. */
1041
Luc Van Oostenryck425e1ed2017-06-28 16:58:03 +02001042 ctx.image = (__le32 *)image_ptr;
Jean-Philippe Brucker80083422020-07-28 17:21:26 +02001043 if (extable_size)
1044 prog->aux->extable = (void *)image_ptr + prog_size;
Alexei Starovoitovdb496942017-12-14 17:55:16 -08001045skip_init_ctx:
Zi Shen Lime54bcde2014-08-26 21:15:30 -07001046 ctx.idx = 0;
Jean-Philippe Brucker80083422020-07-28 17:21:26 +02001047 ctx.exentry_idx = 0;
Daniel Borkmannb569c1c2014-09-16 08:48:50 +01001048
Daniel Borkmann56ea6a82018-05-14 23:22:33 +02001049 build_prologue(&ctx, was_classic);
Zi Shen Lime54bcde2014-08-26 21:15:30 -07001050
Daniel Borkmann8c11ea52018-11-26 14:05:39 +01001051 if (build_body(&ctx, extra_pass)) {
Daniel Borkmannb569c1c2014-09-16 08:48:50 +01001052 bpf_jit_binary_free(header);
Daniel Borkmann26eb0422016-05-13 19:08:34 +02001053 prog = orig_prog;
1054 goto out_off;
Daniel Borkmann60ef0492014-09-11 10:36:48 +01001055 }
Zi Shen Lime54bcde2014-08-26 21:15:30 -07001056
1057 build_epilogue(&ctx);
1058
Zi Shen Lim42ff7122016-01-13 23:33:22 -08001059 /* 3. Extra pass to validate JITed code. */
1060 if (validate_code(&ctx)) {
1061 bpf_jit_binary_free(header);
Daniel Borkmann26eb0422016-05-13 19:08:34 +02001062 prog = orig_prog;
1063 goto out_off;
Zi Shen Lim42ff7122016-01-13 23:33:22 -08001064 }
1065
Zi Shen Lime54bcde2014-08-26 21:15:30 -07001066 /* And we're done. */
1067 if (bpf_jit_enable > 1)
Jean-Philippe Brucker80083422020-07-28 17:21:26 +02001068 bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
Zi Shen Lime54bcde2014-08-26 21:15:30 -07001069
Daniel Borkmannc3d4c682015-11-14 01:16:18 +01001070 bpf_flush_icache(header, ctx.image + ctx.idx);
Daniel Borkmannb569c1c2014-09-16 08:48:50 +01001071
Alexei Starovoitovdb496942017-12-14 17:55:16 -08001072 if (!prog->is_func || extra_pass) {
1073 if (extra_pass && ctx.idx != jit_data->ctx.idx) {
1074 pr_err_once("multi-func JIT bug %d != %d\n",
1075 ctx.idx, jit_data->ctx.idx);
1076 bpf_jit_binary_free(header);
1077 prog->bpf_func = NULL;
1078 prog->jited = 0;
1079 goto out_off;
1080 }
1081 bpf_jit_binary_lock_ro(header);
1082 } else {
1083 jit_data->ctx = ctx;
1084 jit_data->image = image_ptr;
1085 jit_data->header = header;
1086 }
Zi Shen Lime54bcde2014-08-26 21:15:30 -07001087 prog->bpf_func = (void *)ctx.image;
Daniel Borkmanna91263d2015-09-30 01:41:50 +02001088 prog->jited = 1;
Jean-Philippe Brucker80083422020-07-28 17:21:26 +02001089 prog->jited_len = prog_size;
Daniel Borkmann26eb0422016-05-13 19:08:34 +02001090
Alexei Starovoitovdb496942017-12-14 17:55:16 -08001091 if (!prog->is_func || extra_pass) {
Martin KaFai Lau37ab5662018-12-11 16:02:05 -08001092 bpf_prog_fill_jited_linfo(prog, ctx.offset);
Daniel Borkmann26eb0422016-05-13 19:08:34 +02001093out_off:
Alexei Starovoitovdb496942017-12-14 17:55:16 -08001094 kfree(ctx.offset);
1095 kfree(jit_data);
1096 prog->aux->jit_data = NULL;
1097 }
Daniel Borkmann26eb0422016-05-13 19:08:34 +02001098out:
1099 if (tmp_blinded)
1100 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1101 tmp : orig_prog);
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001102 return prog;
Zi Shen Lime54bcde2014-08-26 21:15:30 -07001103}
Ard Biesheuvel91fc957c2018-11-23 23:18:04 +01001104
1105void *bpf_jit_alloc_exec(unsigned long size)
1106{
1107 return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
1108 BPF_JIT_REGION_END, GFP_KERNEL,
Ard Biesheuvel3f750702019-05-23 11:22:56 +01001109 PAGE_KERNEL, 0, NUMA_NO_NODE,
Ard Biesheuvel91fc957c2018-11-23 23:18:04 +01001110 __builtin_return_address(0));
1111}
1112
1113void bpf_jit_free_exec(void *addr)
1114{
1115 return vfree(addr);
1116}