blob: 8aa19c846881c7ba511aa64c3a9e66ba99ee14b8 [file] [log] [blame]
Björn Töpel2353ecc2019-02-05 13:41:22 +01001// SPDX-License-Identifier: GPL-2.0
2/* BPF JIT compiler for RV64G
3 *
4 * Copyright(c) 2019 Björn Töpel <bjorn.topel@gmail.com>
5 *
6 */
7
8#include <linux/bpf.h>
9#include <linux/filter.h>
10#include <asm/cacheflush.h>
11
12enum {
13 RV_REG_ZERO = 0, /* The constant value 0 */
14 RV_REG_RA = 1, /* Return address */
15 RV_REG_SP = 2, /* Stack pointer */
16 RV_REG_GP = 3, /* Global pointer */
17 RV_REG_TP = 4, /* Thread pointer */
18 RV_REG_T0 = 5, /* Temporaries */
19 RV_REG_T1 = 6,
20 RV_REG_T2 = 7,
21 RV_REG_FP = 8,
22 RV_REG_S1 = 9, /* Saved registers */
23 RV_REG_A0 = 10, /* Function argument/return values */
24 RV_REG_A1 = 11, /* Function arguments */
25 RV_REG_A2 = 12,
26 RV_REG_A3 = 13,
27 RV_REG_A4 = 14,
28 RV_REG_A5 = 15,
29 RV_REG_A6 = 16,
30 RV_REG_A7 = 17,
31 RV_REG_S2 = 18, /* Saved registers */
32 RV_REG_S3 = 19,
33 RV_REG_S4 = 20,
34 RV_REG_S5 = 21,
35 RV_REG_S6 = 22,
36 RV_REG_S7 = 23,
37 RV_REG_S8 = 24,
38 RV_REG_S9 = 25,
39 RV_REG_S10 = 26,
40 RV_REG_S11 = 27,
41 RV_REG_T3 = 28, /* Temporaries */
42 RV_REG_T4 = 29,
43 RV_REG_T5 = 30,
44 RV_REG_T6 = 31,
45};
46
47#define RV_REG_TCC RV_REG_A6
48#define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */
49
50static const int regmap[] = {
51 [BPF_REG_0] = RV_REG_A5,
52 [BPF_REG_1] = RV_REG_A0,
53 [BPF_REG_2] = RV_REG_A1,
54 [BPF_REG_3] = RV_REG_A2,
55 [BPF_REG_4] = RV_REG_A3,
56 [BPF_REG_5] = RV_REG_A4,
57 [BPF_REG_6] = RV_REG_S1,
58 [BPF_REG_7] = RV_REG_S2,
59 [BPF_REG_8] = RV_REG_S3,
60 [BPF_REG_9] = RV_REG_S4,
61 [BPF_REG_FP] = RV_REG_S5,
62 [BPF_REG_AX] = RV_REG_T0,
63};
64
65enum {
66 RV_CTX_F_SEEN_TAIL_CALL = 0,
67 RV_CTX_F_SEEN_CALL = RV_REG_RA,
68 RV_CTX_F_SEEN_S1 = RV_REG_S1,
69 RV_CTX_F_SEEN_S2 = RV_REG_S2,
70 RV_CTX_F_SEEN_S3 = RV_REG_S3,
71 RV_CTX_F_SEEN_S4 = RV_REG_S4,
72 RV_CTX_F_SEEN_S5 = RV_REG_S5,
73 RV_CTX_F_SEEN_S6 = RV_REG_S6,
74};
75
76struct rv_jit_context {
77 struct bpf_prog *prog;
78 u32 *insns; /* RV insns */
79 int ninsns;
80 int epilogue_offset;
81 int *offset; /* BPF to RV */
82 unsigned long flags;
83 int stack_size;
84};
85
86struct rv_jit_data {
87 struct bpf_binary_header *header;
88 u8 *image;
89 struct rv_jit_context ctx;
90};
91
92static u8 bpf_to_rv_reg(int bpf_reg, struct rv_jit_context *ctx)
93{
94 u8 reg = regmap[bpf_reg];
95
96 switch (reg) {
97 case RV_CTX_F_SEEN_S1:
98 case RV_CTX_F_SEEN_S2:
99 case RV_CTX_F_SEEN_S3:
100 case RV_CTX_F_SEEN_S4:
101 case RV_CTX_F_SEEN_S5:
102 case RV_CTX_F_SEEN_S6:
103 __set_bit(reg, &ctx->flags);
104 }
105 return reg;
106};
107
108static bool seen_reg(int reg, struct rv_jit_context *ctx)
109{
110 switch (reg) {
111 case RV_CTX_F_SEEN_CALL:
112 case RV_CTX_F_SEEN_S1:
113 case RV_CTX_F_SEEN_S2:
114 case RV_CTX_F_SEEN_S3:
115 case RV_CTX_F_SEEN_S4:
116 case RV_CTX_F_SEEN_S5:
117 case RV_CTX_F_SEEN_S6:
118 return test_bit(reg, &ctx->flags);
119 }
120 return false;
121}
122
Björn Töpelf1003b72019-12-16 10:13:35 +0100123static void mark_fp(struct rv_jit_context *ctx)
124{
125 __set_bit(RV_CTX_F_SEEN_S5, &ctx->flags);
126}
127
Björn Töpel2353ecc2019-02-05 13:41:22 +0100128static void mark_call(struct rv_jit_context *ctx)
129{
130 __set_bit(RV_CTX_F_SEEN_CALL, &ctx->flags);
131}
132
133static bool seen_call(struct rv_jit_context *ctx)
134{
135 return test_bit(RV_CTX_F_SEEN_CALL, &ctx->flags);
136}
137
138static void mark_tail_call(struct rv_jit_context *ctx)
139{
140 __set_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags);
141}
142
143static bool seen_tail_call(struct rv_jit_context *ctx)
144{
145 return test_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags);
146}
147
148static u8 rv_tail_call_reg(struct rv_jit_context *ctx)
149{
150 mark_tail_call(ctx);
151
152 if (seen_call(ctx)) {
153 __set_bit(RV_CTX_F_SEEN_S6, &ctx->flags);
154 return RV_REG_S6;
155 }
156 return RV_REG_A6;
157}
158
159static void emit(const u32 insn, struct rv_jit_context *ctx)
160{
161 if (ctx->insns)
162 ctx->insns[ctx->ninsns] = insn;
163
164 ctx->ninsns++;
165}
166
167static u32 rv_r_insn(u8 funct7, u8 rs2, u8 rs1, u8 funct3, u8 rd, u8 opcode)
168{
169 return (funct7 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) |
170 (rd << 7) | opcode;
171}
172
173static u32 rv_i_insn(u16 imm11_0, u8 rs1, u8 funct3, u8 rd, u8 opcode)
174{
175 return (imm11_0 << 20) | (rs1 << 15) | (funct3 << 12) | (rd << 7) |
176 opcode;
177}
178
179static u32 rv_s_insn(u16 imm11_0, u8 rs2, u8 rs1, u8 funct3, u8 opcode)
180{
181 u8 imm11_5 = imm11_0 >> 5, imm4_0 = imm11_0 & 0x1f;
182
183 return (imm11_5 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) |
184 (imm4_0 << 7) | opcode;
185}
186
187static u32 rv_sb_insn(u16 imm12_1, u8 rs2, u8 rs1, u8 funct3, u8 opcode)
188{
189 u8 imm12 = ((imm12_1 & 0x800) >> 5) | ((imm12_1 & 0x3f0) >> 4);
190 u8 imm4_1 = ((imm12_1 & 0xf) << 1) | ((imm12_1 & 0x400) >> 10);
191
192 return (imm12 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) |
193 (imm4_1 << 7) | opcode;
194}
195
196static u32 rv_u_insn(u32 imm31_12, u8 rd, u8 opcode)
197{
198 return (imm31_12 << 12) | (rd << 7) | opcode;
199}
200
201static u32 rv_uj_insn(u32 imm20_1, u8 rd, u8 opcode)
202{
203 u32 imm;
204
205 imm = (imm20_1 & 0x80000) | ((imm20_1 & 0x3ff) << 9) |
206 ((imm20_1 & 0x400) >> 2) | ((imm20_1 & 0x7f800) >> 11);
207
208 return (imm << 12) | (rd << 7) | opcode;
209}
210
211static u32 rv_amo_insn(u8 funct5, u8 aq, u8 rl, u8 rs2, u8 rs1,
212 u8 funct3, u8 rd, u8 opcode)
213{
214 u8 funct7 = (funct5 << 2) | (aq << 1) | rl;
215
216 return rv_r_insn(funct7, rs2, rs1, funct3, rd, opcode);
217}
218
219static u32 rv_addiw(u8 rd, u8 rs1, u16 imm11_0)
220{
221 return rv_i_insn(imm11_0, rs1, 0, rd, 0x1b);
222}
223
224static u32 rv_addi(u8 rd, u8 rs1, u16 imm11_0)
225{
226 return rv_i_insn(imm11_0, rs1, 0, rd, 0x13);
227}
228
229static u32 rv_addw(u8 rd, u8 rs1, u8 rs2)
230{
231 return rv_r_insn(0, rs2, rs1, 0, rd, 0x3b);
232}
233
234static u32 rv_add(u8 rd, u8 rs1, u8 rs2)
235{
236 return rv_r_insn(0, rs2, rs1, 0, rd, 0x33);
237}
238
239static u32 rv_subw(u8 rd, u8 rs1, u8 rs2)
240{
241 return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x3b);
242}
243
244static u32 rv_sub(u8 rd, u8 rs1, u8 rs2)
245{
246 return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x33);
247}
248
249static u32 rv_and(u8 rd, u8 rs1, u8 rs2)
250{
251 return rv_r_insn(0, rs2, rs1, 7, rd, 0x33);
252}
253
254static u32 rv_or(u8 rd, u8 rs1, u8 rs2)
255{
256 return rv_r_insn(0, rs2, rs1, 6, rd, 0x33);
257}
258
259static u32 rv_xor(u8 rd, u8 rs1, u8 rs2)
260{
261 return rv_r_insn(0, rs2, rs1, 4, rd, 0x33);
262}
263
264static u32 rv_mulw(u8 rd, u8 rs1, u8 rs2)
265{
266 return rv_r_insn(1, rs2, rs1, 0, rd, 0x3b);
267}
268
269static u32 rv_mul(u8 rd, u8 rs1, u8 rs2)
270{
271 return rv_r_insn(1, rs2, rs1, 0, rd, 0x33);
272}
273
274static u32 rv_divuw(u8 rd, u8 rs1, u8 rs2)
275{
276 return rv_r_insn(1, rs2, rs1, 5, rd, 0x3b);
277}
278
279static u32 rv_divu(u8 rd, u8 rs1, u8 rs2)
280{
281 return rv_r_insn(1, rs2, rs1, 5, rd, 0x33);
282}
283
284static u32 rv_remuw(u8 rd, u8 rs1, u8 rs2)
285{
286 return rv_r_insn(1, rs2, rs1, 7, rd, 0x3b);
287}
288
289static u32 rv_remu(u8 rd, u8 rs1, u8 rs2)
290{
291 return rv_r_insn(1, rs2, rs1, 7, rd, 0x33);
292}
293
294static u32 rv_sllw(u8 rd, u8 rs1, u8 rs2)
295{
296 return rv_r_insn(0, rs2, rs1, 1, rd, 0x3b);
297}
298
299static u32 rv_sll(u8 rd, u8 rs1, u8 rs2)
300{
301 return rv_r_insn(0, rs2, rs1, 1, rd, 0x33);
302}
303
304static u32 rv_srlw(u8 rd, u8 rs1, u8 rs2)
305{
306 return rv_r_insn(0, rs2, rs1, 5, rd, 0x3b);
307}
308
309static u32 rv_srl(u8 rd, u8 rs1, u8 rs2)
310{
311 return rv_r_insn(0, rs2, rs1, 5, rd, 0x33);
312}
313
314static u32 rv_sraw(u8 rd, u8 rs1, u8 rs2)
315{
316 return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x3b);
317}
318
319static u32 rv_sra(u8 rd, u8 rs1, u8 rs2)
320{
321 return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x33);
322}
323
324static u32 rv_lui(u8 rd, u32 imm31_12)
325{
326 return rv_u_insn(imm31_12, rd, 0x37);
327}
328
329static u32 rv_slli(u8 rd, u8 rs1, u16 imm11_0)
330{
331 return rv_i_insn(imm11_0, rs1, 1, rd, 0x13);
332}
333
334static u32 rv_andi(u8 rd, u8 rs1, u16 imm11_0)
335{
336 return rv_i_insn(imm11_0, rs1, 7, rd, 0x13);
337}
338
339static u32 rv_ori(u8 rd, u8 rs1, u16 imm11_0)
340{
341 return rv_i_insn(imm11_0, rs1, 6, rd, 0x13);
342}
343
344static u32 rv_xori(u8 rd, u8 rs1, u16 imm11_0)
345{
346 return rv_i_insn(imm11_0, rs1, 4, rd, 0x13);
347}
348
349static u32 rv_slliw(u8 rd, u8 rs1, u16 imm11_0)
350{
351 return rv_i_insn(imm11_0, rs1, 1, rd, 0x1b);
352}
353
354static u32 rv_srliw(u8 rd, u8 rs1, u16 imm11_0)
355{
356 return rv_i_insn(imm11_0, rs1, 5, rd, 0x1b);
357}
358
359static u32 rv_srli(u8 rd, u8 rs1, u16 imm11_0)
360{
361 return rv_i_insn(imm11_0, rs1, 5, rd, 0x13);
362}
363
364static u32 rv_sraiw(u8 rd, u8 rs1, u16 imm11_0)
365{
366 return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x1b);
367}
368
369static u32 rv_srai(u8 rd, u8 rs1, u16 imm11_0)
370{
371 return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x13);
372}
373
374static u32 rv_jal(u8 rd, u32 imm20_1)
375{
376 return rv_uj_insn(imm20_1, rd, 0x6f);
377}
378
379static u32 rv_jalr(u8 rd, u8 rs1, u16 imm11_0)
380{
381 return rv_i_insn(imm11_0, rs1, 0, rd, 0x67);
382}
383
384static u32 rv_beq(u8 rs1, u8 rs2, u16 imm12_1)
385{
386 return rv_sb_insn(imm12_1, rs2, rs1, 0, 0x63);
387}
388
389static u32 rv_bltu(u8 rs1, u8 rs2, u16 imm12_1)
390{
391 return rv_sb_insn(imm12_1, rs2, rs1, 6, 0x63);
392}
393
394static u32 rv_bgeu(u8 rs1, u8 rs2, u16 imm12_1)
395{
396 return rv_sb_insn(imm12_1, rs2, rs1, 7, 0x63);
397}
398
399static u32 rv_bne(u8 rs1, u8 rs2, u16 imm12_1)
400{
401 return rv_sb_insn(imm12_1, rs2, rs1, 1, 0x63);
402}
403
404static u32 rv_blt(u8 rs1, u8 rs2, u16 imm12_1)
405{
406 return rv_sb_insn(imm12_1, rs2, rs1, 4, 0x63);
407}
408
409static u32 rv_bge(u8 rs1, u8 rs2, u16 imm12_1)
410{
411 return rv_sb_insn(imm12_1, rs2, rs1, 5, 0x63);
412}
413
414static u32 rv_sb(u8 rs1, u16 imm11_0, u8 rs2)
415{
416 return rv_s_insn(imm11_0, rs2, rs1, 0, 0x23);
417}
418
419static u32 rv_sh(u8 rs1, u16 imm11_0, u8 rs2)
420{
421 return rv_s_insn(imm11_0, rs2, rs1, 1, 0x23);
422}
423
424static u32 rv_sw(u8 rs1, u16 imm11_0, u8 rs2)
425{
426 return rv_s_insn(imm11_0, rs2, rs1, 2, 0x23);
427}
428
429static u32 rv_sd(u8 rs1, u16 imm11_0, u8 rs2)
430{
431 return rv_s_insn(imm11_0, rs2, rs1, 3, 0x23);
432}
433
434static u32 rv_lbu(u8 rd, u16 imm11_0, u8 rs1)
435{
436 return rv_i_insn(imm11_0, rs1, 4, rd, 0x03);
437}
438
439static u32 rv_lhu(u8 rd, u16 imm11_0, u8 rs1)
440{
441 return rv_i_insn(imm11_0, rs1, 5, rd, 0x03);
442}
443
444static u32 rv_lwu(u8 rd, u16 imm11_0, u8 rs1)
445{
446 return rv_i_insn(imm11_0, rs1, 6, rd, 0x03);
447}
448
449static u32 rv_ld(u8 rd, u16 imm11_0, u8 rs1)
450{
451 return rv_i_insn(imm11_0, rs1, 3, rd, 0x03);
452}
453
454static u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
455{
456 return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f);
457}
458
459static u32 rv_amoadd_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
460{
461 return rv_amo_insn(0, aq, rl, rs2, rs1, 3, rd, 0x2f);
462}
463
Björn Töpel7d1ef132019-12-16 10:13:36 +0100464static u32 rv_auipc(u8 rd, u32 imm31_12)
465{
466 return rv_u_insn(imm31_12, rd, 0x17);
467}
468
Björn Töpel2353ecc2019-02-05 13:41:22 +0100469static bool is_12b_int(s64 val)
470{
471 return -(1 << 11) <= val && val < (1 << 11);
472}
473
474static bool is_13b_int(s64 val)
475{
476 return -(1 << 12) <= val && val < (1 << 12);
477}
478
479static bool is_21b_int(s64 val)
480{
481 return -(1L << 20) <= val && val < (1L << 20);
482}
483
484static bool is_32b_int(s64 val)
485{
486 return -(1L << 31) <= val && val < (1L << 31);
487}
488
489static int is_12b_check(int off, int insn)
490{
491 if (!is_12b_int(off)) {
Björn Töpel7d1ef132019-12-16 10:13:36 +0100492 pr_err("bpf-jit: insn=%d 12b < offset=%d not supported yet!\n",
Björn Töpel2353ecc2019-02-05 13:41:22 +0100493 insn, (int)off);
494 return -1;
495 }
496 return 0;
497}
498
Björn Töpel2353ecc2019-02-05 13:41:22 +0100499static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
500{
501 /* Note that the immediate from the add is sign-extended,
502 * which means that we need to compensate this by adding 2^12,
503 * when the 12th bit is set. A simpler way of doing this, and
504 * getting rid of the check, is to just add 2**11 before the
505 * shift. The "Loading a 32-Bit constant" example from the
506 * "Computer Organization and Design, RISC-V edition" book by
507 * Patterson/Hennessy highlights this fact.
508 *
509 * This also means that we need to process LSB to MSB.
510 */
511 s64 upper = (val + (1 << 11)) >> 12, lower = val & 0xfff;
512 int shift;
513
514 if (is_32b_int(val)) {
515 if (upper)
516 emit(rv_lui(rd, upper), ctx);
517
518 if (!upper) {
519 emit(rv_addi(rd, RV_REG_ZERO, lower), ctx);
520 return;
521 }
522
523 emit(rv_addiw(rd, rd, lower), ctx);
524 return;
525 }
526
527 shift = __ffs(upper);
528 upper >>= shift;
529 shift += 12;
530
531 emit_imm(rd, upper, ctx);
532
533 emit(rv_slli(rd, rd, shift), ctx);
534 if (lower)
535 emit(rv_addi(rd, rd, lower), ctx);
536}
537
Björn Töpel7d1ef132019-12-16 10:13:36 +0100538static int rv_offset(int insn, int off, struct rv_jit_context *ctx)
Björn Töpel2353ecc2019-02-05 13:41:22 +0100539{
Björn Töpel7d1ef132019-12-16 10:13:36 +0100540 int from, to;
Björn Töpel2353ecc2019-02-05 13:41:22 +0100541
Björn Töpel7d1ef132019-12-16 10:13:36 +0100542 off++; /* BPF branch is from PC+1, RV is from PC */
543 from = (insn > 0) ? ctx->offset[insn - 1] : 0;
544 to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0;
Björn Töpel2353ecc2019-02-05 13:41:22 +0100545 return (to - from) << 2;
546}
547
548static int epilogue_offset(struct rv_jit_context *ctx)
549{
550 int to = ctx->epilogue_offset, from = ctx->ninsns;
551
552 return (to - from) << 2;
553}
554
Björn Töpelfe8322b2019-12-16 10:13:39 +0100555static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
Björn Töpel2353ecc2019-02-05 13:41:22 +0100556{
557 int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 8;
558
559 if (seen_reg(RV_REG_RA, ctx)) {
560 emit(rv_ld(RV_REG_RA, store_offset, RV_REG_SP), ctx);
561 store_offset -= 8;
562 }
563 emit(rv_ld(RV_REG_FP, store_offset, RV_REG_SP), ctx);
564 store_offset -= 8;
565 if (seen_reg(RV_REG_S1, ctx)) {
566 emit(rv_ld(RV_REG_S1, store_offset, RV_REG_SP), ctx);
567 store_offset -= 8;
568 }
569 if (seen_reg(RV_REG_S2, ctx)) {
570 emit(rv_ld(RV_REG_S2, store_offset, RV_REG_SP), ctx);
571 store_offset -= 8;
572 }
573 if (seen_reg(RV_REG_S3, ctx)) {
574 emit(rv_ld(RV_REG_S3, store_offset, RV_REG_SP), ctx);
575 store_offset -= 8;
576 }
577 if (seen_reg(RV_REG_S4, ctx)) {
578 emit(rv_ld(RV_REG_S4, store_offset, RV_REG_SP), ctx);
579 store_offset -= 8;
580 }
581 if (seen_reg(RV_REG_S5, ctx)) {
582 emit(rv_ld(RV_REG_S5, store_offset, RV_REG_SP), ctx);
583 store_offset -= 8;
584 }
585 if (seen_reg(RV_REG_S6, ctx)) {
586 emit(rv_ld(RV_REG_S6, store_offset, RV_REG_SP), ctx);
587 store_offset -= 8;
588 }
589
590 emit(rv_addi(RV_REG_SP, RV_REG_SP, stack_adjust), ctx);
591 /* Set return value. */
Björn Töpelfe8322b2019-12-16 10:13:39 +0100592 if (!is_tail_call)
Björn Töpelf1003b72019-12-16 10:13:35 +0100593 emit(rv_addi(RV_REG_A0, RV_REG_A5, 0), ctx);
Björn Töpelfe8322b2019-12-16 10:13:39 +0100594 emit(rv_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
595 is_tail_call ? 4 : 0), /* skip TCC init */
596 ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +0100597}
598
Björn Töpel7d1ef132019-12-16 10:13:36 +0100599/* return -1 or inverted cond */
600static int invert_bpf_cond(u8 cond)
601{
602 switch (cond) {
603 case BPF_JEQ:
604 return BPF_JNE;
605 case BPF_JGT:
606 return BPF_JLE;
607 case BPF_JLT:
608 return BPF_JGE;
609 case BPF_JGE:
610 return BPF_JLT;
611 case BPF_JLE:
612 return BPF_JGT;
613 case BPF_JNE:
614 return BPF_JEQ;
615 case BPF_JSGT:
616 return BPF_JSLE;
617 case BPF_JSLT:
618 return BPF_JSGE;
619 case BPF_JSGE:
620 return BPF_JSLT;
621 case BPF_JSLE:
622 return BPF_JSGT;
623 }
624 return -1;
625}
626
627static void emit_bcc(u8 cond, u8 rd, u8 rs, int rvoff,
628 struct rv_jit_context *ctx)
629{
630 switch (cond) {
631 case BPF_JEQ:
632 emit(rv_beq(rd, rs, rvoff >> 1), ctx);
633 return;
634 case BPF_JGT:
635 emit(rv_bltu(rs, rd, rvoff >> 1), ctx);
636 return;
637 case BPF_JLT:
638 emit(rv_bltu(rd, rs, rvoff >> 1), ctx);
639 return;
640 case BPF_JGE:
641 emit(rv_bgeu(rd, rs, rvoff >> 1), ctx);
642 return;
643 case BPF_JLE:
644 emit(rv_bgeu(rs, rd, rvoff >> 1), ctx);
645 return;
646 case BPF_JNE:
647 emit(rv_bne(rd, rs, rvoff >> 1), ctx);
648 return;
649 case BPF_JSGT:
650 emit(rv_blt(rs, rd, rvoff >> 1), ctx);
651 return;
652 case BPF_JSLT:
653 emit(rv_blt(rd, rs, rvoff >> 1), ctx);
654 return;
655 case BPF_JSGE:
656 emit(rv_bge(rd, rs, rvoff >> 1), ctx);
657 return;
658 case BPF_JSLE:
659 emit(rv_bge(rs, rd, rvoff >> 1), ctx);
660 }
661}
662
663static void emit_branch(u8 cond, u8 rd, u8 rs, int rvoff,
664 struct rv_jit_context *ctx)
665{
666 s64 upper, lower;
667
668 if (is_13b_int(rvoff)) {
669 emit_bcc(cond, rd, rs, rvoff, ctx);
670 return;
671 }
672
673 /* Adjust for jal */
674 rvoff -= 4;
675
676 /* Transform, e.g.:
677 * bne rd,rs,foo
678 * to
679 * beq rd,rs,<.L1>
680 * (auipc foo)
681 * jal(r) foo
682 * .L1
683 */
684 cond = invert_bpf_cond(cond);
685 if (is_21b_int(rvoff)) {
686 emit_bcc(cond, rd, rs, 8, ctx);
687 emit(rv_jal(RV_REG_ZERO, rvoff >> 1), ctx);
688 return;
689 }
690
691 /* 32b No need for an additional rvoff adjustment, since we
692 * get that from the auipc at PC', where PC = PC' + 4.
693 */
694 upper = (rvoff + (1 << 11)) >> 12;
695 lower = rvoff & 0xfff;
696
697 emit_bcc(cond, rd, rs, 12, ctx);
698 emit(rv_auipc(RV_REG_T1, upper), ctx);
699 emit(rv_jalr(RV_REG_ZERO, RV_REG_T1, lower), ctx);
700}
701
Björn Töpel2353ecc2019-02-05 13:41:22 +0100702static void emit_zext_32(u8 reg, struct rv_jit_context *ctx)
703{
704 emit(rv_slli(reg, reg, 32), ctx);
705 emit(rv_srli(reg, reg, 32), ctx);
706}
707
708static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
709{
710 int tc_ninsn, off, start_insn = ctx->ninsns;
711 u8 tcc = rv_tail_call_reg(ctx);
712
713 /* a0: &ctx
714 * a1: &array
715 * a2: index
716 *
717 * if (index >= array->map.max_entries)
718 * goto out;
719 */
720 tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] :
721 ctx->offset[0];
722 emit_zext_32(RV_REG_A2, ctx);
723
724 off = offsetof(struct bpf_array, map.max_entries);
725 if (is_12b_check(off, insn))
726 return -1;
727 emit(rv_lwu(RV_REG_T1, off, RV_REG_A1), ctx);
728 off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
Björn Töpel29d92ed2019-12-16 10:13:37 +0100729 emit_branch(BPF_JGE, RV_REG_A2, RV_REG_T1, off, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +0100730
731 /* if (--TCC < 0)
732 * goto out;
733 */
734 emit(rv_addi(RV_REG_T1, tcc, -1), ctx);
735 off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
Björn Töpel29d92ed2019-12-16 10:13:37 +0100736 emit_branch(BPF_JSLT, RV_REG_T1, RV_REG_ZERO, off, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +0100737
738 /* prog = array->ptrs[index];
739 * if (!prog)
740 * goto out;
741 */
742 emit(rv_slli(RV_REG_T2, RV_REG_A2, 3), ctx);
743 emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_A1), ctx);
744 off = offsetof(struct bpf_array, ptrs);
745 if (is_12b_check(off, insn))
746 return -1;
747 emit(rv_ld(RV_REG_T2, off, RV_REG_T2), ctx);
748 off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
Björn Töpel29d92ed2019-12-16 10:13:37 +0100749 emit_branch(BPF_JEQ, RV_REG_T2, RV_REG_ZERO, off, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +0100750
751 /* goto *(prog->bpf_func + 4); */
752 off = offsetof(struct bpf_prog, bpf_func);
753 if (is_12b_check(off, insn))
754 return -1;
755 emit(rv_ld(RV_REG_T3, off, RV_REG_T2), ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +0100756 emit(rv_addi(RV_REG_TCC, RV_REG_T1, 0), ctx);
Björn Töpelfe8322b2019-12-16 10:13:39 +0100757 __build_epilogue(true, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +0100758 return 0;
759}
760
761static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn,
762 struct rv_jit_context *ctx)
763{
764 u8 code = insn->code;
765
766 switch (code) {
767 case BPF_JMP | BPF_JA:
768 case BPF_JMP | BPF_CALL:
769 case BPF_JMP | BPF_EXIT:
770 case BPF_JMP | BPF_TAIL_CALL:
771 break;
772 default:
773 *rd = bpf_to_rv_reg(insn->dst_reg, ctx);
774 }
775
776 if (code & (BPF_ALU | BPF_X) || code & (BPF_ALU64 | BPF_X) ||
777 code & (BPF_JMP | BPF_X) || code & (BPF_JMP32 | BPF_X) ||
778 code & BPF_LDX || code & BPF_STX)
779 *rs = bpf_to_rv_reg(insn->src_reg, ctx);
780}
781
Björn Töpel2353ecc2019-02-05 13:41:22 +0100782static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
783{
784 emit(rv_addi(RV_REG_T2, *rd, 0), ctx);
785 emit_zext_32(RV_REG_T2, ctx);
786 emit(rv_addi(RV_REG_T1, *rs, 0), ctx);
787 emit_zext_32(RV_REG_T1, ctx);
788 *rd = RV_REG_T2;
789 *rs = RV_REG_T1;
790}
791
792static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
793{
794 emit(rv_addiw(RV_REG_T2, *rd, 0), ctx);
795 emit(rv_addiw(RV_REG_T1, *rs, 0), ctx);
796 *rd = RV_REG_T2;
797 *rs = RV_REG_T1;
798}
799
800static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx)
801{
802 emit(rv_addi(RV_REG_T2, *rd, 0), ctx);
803 emit_zext_32(RV_REG_T2, ctx);
804 emit_zext_32(RV_REG_T1, ctx);
805 *rd = RV_REG_T2;
806}
807
808static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
809{
810 emit(rv_addiw(RV_REG_T2, *rd, 0), ctx);
811 *rd = RV_REG_T2;
812}
813
Björn Töpel33203c02019-12-16 10:13:38 +0100814static void emit_jump_and_link(u8 rd, int rvoff, struct rv_jit_context *ctx)
815{
816 s64 upper, lower;
817
818 if (is_21b_int(rvoff)) {
819 emit(rv_jal(rd, rvoff >> 1), ctx);
820 return;
821 }
822
823 upper = (rvoff + (1 << 11)) >> 12;
824 lower = rvoff & 0xfff;
825 emit(rv_auipc(RV_REG_T1, upper), ctx);
826 emit(rv_jalr(rd, RV_REG_T1, lower), ctx);
827}
828
Björn Töpel7d1ef132019-12-16 10:13:36 +0100829static bool is_signed_bpf_cond(u8 cond)
830{
831 return cond == BPF_JSGT || cond == BPF_JSLT ||
832 cond == BPF_JSGE || cond == BPF_JSLE;
833}
834
Björn Töpel2353ecc2019-02-05 13:41:22 +0100835static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
836 bool extra_pass)
837{
838 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
839 BPF_CLASS(insn->code) == BPF_JMP;
Björn Töpel7d1ef132019-12-16 10:13:36 +0100840 int s, e, rvoff, i = insn - ctx->prog->insnsi;
Jiong Wang66d0d5a2019-05-24 23:25:27 +0100841 struct bpf_prog_aux *aux = ctx->prog->aux;
Björn Töpel2353ecc2019-02-05 13:41:22 +0100842 u8 rd = -1, rs = -1, code = insn->code;
843 s16 off = insn->off;
844 s32 imm = insn->imm;
845
846 init_regs(&rd, &rs, insn, ctx);
847
848 switch (code) {
849 /* dst = src */
850 case BPF_ALU | BPF_MOV | BPF_X:
851 case BPF_ALU64 | BPF_MOV | BPF_X:
Jiong Wang66d0d5a2019-05-24 23:25:27 +0100852 if (imm == 1) {
853 /* Special mov32 for zext */
854 emit_zext_32(rd, ctx);
855 break;
856 }
Björn Töpel2353ecc2019-02-05 13:41:22 +0100857 emit(is64 ? rv_addi(rd, rs, 0) : rv_addiw(rd, rs, 0), ctx);
Jiong Wang66d0d5a2019-05-24 23:25:27 +0100858 if (!is64 && !aux->verifier_zext)
Björn Töpel2353ecc2019-02-05 13:41:22 +0100859 emit_zext_32(rd, ctx);
860 break;
861
862 /* dst = dst OP src */
863 case BPF_ALU | BPF_ADD | BPF_X:
864 case BPF_ALU64 | BPF_ADD | BPF_X:
865 emit(is64 ? rv_add(rd, rd, rs) : rv_addw(rd, rd, rs), ctx);
Luke Nelson46dd3d72019-07-04 17:18:02 -0700866 if (!is64 && !aux->verifier_zext)
Luke Nelson1e692f02019-05-30 15:29:22 -0700867 emit_zext_32(rd, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +0100868 break;
869 case BPF_ALU | BPF_SUB | BPF_X:
870 case BPF_ALU64 | BPF_SUB | BPF_X:
871 emit(is64 ? rv_sub(rd, rd, rs) : rv_subw(rd, rd, rs), ctx);
Luke Nelson46dd3d72019-07-04 17:18:02 -0700872 if (!is64 && !aux->verifier_zext)
Luke Nelson1e692f02019-05-30 15:29:22 -0700873 emit_zext_32(rd, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +0100874 break;
875 case BPF_ALU | BPF_AND | BPF_X:
876 case BPF_ALU64 | BPF_AND | BPF_X:
877 emit(rv_and(rd, rd, rs), ctx);
Luke Nelson46dd3d72019-07-04 17:18:02 -0700878 if (!is64 && !aux->verifier_zext)
Björn Töpelfe121ee2019-05-21 15:46:22 +0200879 emit_zext_32(rd, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +0100880 break;
881 case BPF_ALU | BPF_OR | BPF_X:
882 case BPF_ALU64 | BPF_OR | BPF_X:
883 emit(rv_or(rd, rd, rs), ctx);
Luke Nelson46dd3d72019-07-04 17:18:02 -0700884 if (!is64 && !aux->verifier_zext)
Björn Töpelfe121ee2019-05-21 15:46:22 +0200885 emit_zext_32(rd, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +0100886 break;
887 case BPF_ALU | BPF_XOR | BPF_X:
888 case BPF_ALU64 | BPF_XOR | BPF_X:
889 emit(rv_xor(rd, rd, rs), ctx);
Luke Nelson46dd3d72019-07-04 17:18:02 -0700890 if (!is64 && !aux->verifier_zext)
Björn Töpelfe121ee2019-05-21 15:46:22 +0200891 emit_zext_32(rd, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +0100892 break;
893 case BPF_ALU | BPF_MUL | BPF_X:
894 case BPF_ALU64 | BPF_MUL | BPF_X:
895 emit(is64 ? rv_mul(rd, rd, rs) : rv_mulw(rd, rd, rs), ctx);
Jiong Wang66d0d5a2019-05-24 23:25:27 +0100896 if (!is64 && !aux->verifier_zext)
Björn Töpel2353ecc2019-02-05 13:41:22 +0100897 emit_zext_32(rd, ctx);
898 break;
899 case BPF_ALU | BPF_DIV | BPF_X:
900 case BPF_ALU64 | BPF_DIV | BPF_X:
901 emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx);
Jiong Wang66d0d5a2019-05-24 23:25:27 +0100902 if (!is64 && !aux->verifier_zext)
Björn Töpel2353ecc2019-02-05 13:41:22 +0100903 emit_zext_32(rd, ctx);
904 break;
905 case BPF_ALU | BPF_MOD | BPF_X:
906 case BPF_ALU64 | BPF_MOD | BPF_X:
907 emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx);
Jiong Wang66d0d5a2019-05-24 23:25:27 +0100908 if (!is64 && !aux->verifier_zext)
Björn Töpel2353ecc2019-02-05 13:41:22 +0100909 emit_zext_32(rd, ctx);
910 break;
911 case BPF_ALU | BPF_LSH | BPF_X:
912 case BPF_ALU64 | BPF_LSH | BPF_X:
913 emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx);
Luke Nelson1e692f02019-05-30 15:29:22 -0700914 if (!is64)
915 emit_zext_32(rd, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +0100916 break;
917 case BPF_ALU | BPF_RSH | BPF_X:
918 case BPF_ALU64 | BPF_RSH | BPF_X:
919 emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx);
Luke Nelson46dd3d72019-07-04 17:18:02 -0700920 if (!is64 && !aux->verifier_zext)
Luke Nelson1e692f02019-05-30 15:29:22 -0700921 emit_zext_32(rd, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +0100922 break;
923 case BPF_ALU | BPF_ARSH | BPF_X:
924 case BPF_ALU64 | BPF_ARSH | BPF_X:
925 emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx);
Luke Nelson46dd3d72019-07-04 17:18:02 -0700926 if (!is64 && !aux->verifier_zext)
Luke Nelson1e692f02019-05-30 15:29:22 -0700927 emit_zext_32(rd, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +0100928 break;
929
930 /* dst = -dst */
931 case BPF_ALU | BPF_NEG:
932 case BPF_ALU64 | BPF_NEG:
933 emit(is64 ? rv_sub(rd, RV_REG_ZERO, rd) :
934 rv_subw(rd, RV_REG_ZERO, rd), ctx);
Luke Nelson46dd3d72019-07-04 17:18:02 -0700935 if (!is64 && !aux->verifier_zext)
Luke Nelson1e692f02019-05-30 15:29:22 -0700936 emit_zext_32(rd, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +0100937 break;
938
939 /* dst = BSWAP##imm(dst) */
940 case BPF_ALU | BPF_END | BPF_FROM_LE:
941 {
942 int shift = 64 - imm;
943
944 emit(rv_slli(rd, rd, shift), ctx);
945 emit(rv_srli(rd, rd, shift), ctx);
946 break;
947 }
948 case BPF_ALU | BPF_END | BPF_FROM_BE:
949 emit(rv_addi(RV_REG_T2, RV_REG_ZERO, 0), ctx);
950
951 emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
952 emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
953 emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
954 emit(rv_srli(rd, rd, 8), ctx);
955 if (imm == 16)
956 goto out_be;
957
958 emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
959 emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
960 emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
961 emit(rv_srli(rd, rd, 8), ctx);
962
963 emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
964 emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
965 emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
966 emit(rv_srli(rd, rd, 8), ctx);
967 if (imm == 32)
968 goto out_be;
969
970 emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
971 emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
972 emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
973 emit(rv_srli(rd, rd, 8), ctx);
974
975 emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
976 emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
977 emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
978 emit(rv_srli(rd, rd, 8), ctx);
979
980 emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
981 emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
982 emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
983 emit(rv_srli(rd, rd, 8), ctx);
984
985 emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
986 emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
987 emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
988 emit(rv_srli(rd, rd, 8), ctx);
989out_be:
990 emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
991 emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
992
993 emit(rv_addi(rd, RV_REG_T2, 0), ctx);
994 break;
995
996 /* dst = imm */
997 case BPF_ALU | BPF_MOV | BPF_K:
998 case BPF_ALU64 | BPF_MOV | BPF_K:
999 emit_imm(rd, imm, ctx);
Jiong Wang66d0d5a2019-05-24 23:25:27 +01001000 if (!is64 && !aux->verifier_zext)
Björn Töpel2353ecc2019-02-05 13:41:22 +01001001 emit_zext_32(rd, ctx);
1002 break;
1003
1004 /* dst = dst OP imm */
1005 case BPF_ALU | BPF_ADD | BPF_K:
1006 case BPF_ALU64 | BPF_ADD | BPF_K:
1007 if (is_12b_int(imm)) {
1008 emit(is64 ? rv_addi(rd, rd, imm) :
1009 rv_addiw(rd, rd, imm), ctx);
1010 } else {
1011 emit_imm(RV_REG_T1, imm, ctx);
1012 emit(is64 ? rv_add(rd, rd, RV_REG_T1) :
1013 rv_addw(rd, rd, RV_REG_T1), ctx);
1014 }
Jiong Wang66d0d5a2019-05-24 23:25:27 +01001015 if (!is64 && !aux->verifier_zext)
Björn Töpel2353ecc2019-02-05 13:41:22 +01001016 emit_zext_32(rd, ctx);
1017 break;
1018 case BPF_ALU | BPF_SUB | BPF_K:
1019 case BPF_ALU64 | BPF_SUB | BPF_K:
1020 if (is_12b_int(-imm)) {
1021 emit(is64 ? rv_addi(rd, rd, -imm) :
1022 rv_addiw(rd, rd, -imm), ctx);
1023 } else {
1024 emit_imm(RV_REG_T1, imm, ctx);
1025 emit(is64 ? rv_sub(rd, rd, RV_REG_T1) :
1026 rv_subw(rd, rd, RV_REG_T1), ctx);
1027 }
Jiong Wang66d0d5a2019-05-24 23:25:27 +01001028 if (!is64 && !aux->verifier_zext)
Björn Töpel2353ecc2019-02-05 13:41:22 +01001029 emit_zext_32(rd, ctx);
1030 break;
1031 case BPF_ALU | BPF_AND | BPF_K:
1032 case BPF_ALU64 | BPF_AND | BPF_K:
1033 if (is_12b_int(imm)) {
1034 emit(rv_andi(rd, rd, imm), ctx);
1035 } else {
1036 emit_imm(RV_REG_T1, imm, ctx);
1037 emit(rv_and(rd, rd, RV_REG_T1), ctx);
1038 }
Jiong Wang66d0d5a2019-05-24 23:25:27 +01001039 if (!is64 && !aux->verifier_zext)
Björn Töpel2353ecc2019-02-05 13:41:22 +01001040 emit_zext_32(rd, ctx);
1041 break;
1042 case BPF_ALU | BPF_OR | BPF_K:
1043 case BPF_ALU64 | BPF_OR | BPF_K:
1044 if (is_12b_int(imm)) {
1045 emit(rv_ori(rd, rd, imm), ctx);
1046 } else {
1047 emit_imm(RV_REG_T1, imm, ctx);
1048 emit(rv_or(rd, rd, RV_REG_T1), ctx);
1049 }
Jiong Wang66d0d5a2019-05-24 23:25:27 +01001050 if (!is64 && !aux->verifier_zext)
Björn Töpel2353ecc2019-02-05 13:41:22 +01001051 emit_zext_32(rd, ctx);
1052 break;
1053 case BPF_ALU | BPF_XOR | BPF_K:
1054 case BPF_ALU64 | BPF_XOR | BPF_K:
1055 if (is_12b_int(imm)) {
1056 emit(rv_xori(rd, rd, imm), ctx);
1057 } else {
1058 emit_imm(RV_REG_T1, imm, ctx);
1059 emit(rv_xor(rd, rd, RV_REG_T1), ctx);
1060 }
Jiong Wang66d0d5a2019-05-24 23:25:27 +01001061 if (!is64 && !aux->verifier_zext)
Björn Töpel2353ecc2019-02-05 13:41:22 +01001062 emit_zext_32(rd, ctx);
1063 break;
1064 case BPF_ALU | BPF_MUL | BPF_K:
1065 case BPF_ALU64 | BPF_MUL | BPF_K:
1066 emit_imm(RV_REG_T1, imm, ctx);
1067 emit(is64 ? rv_mul(rd, rd, RV_REG_T1) :
1068 rv_mulw(rd, rd, RV_REG_T1), ctx);
Jiong Wang66d0d5a2019-05-24 23:25:27 +01001069 if (!is64 && !aux->verifier_zext)
Björn Töpel2353ecc2019-02-05 13:41:22 +01001070 emit_zext_32(rd, ctx);
1071 break;
1072 case BPF_ALU | BPF_DIV | BPF_K:
1073 case BPF_ALU64 | BPF_DIV | BPF_K:
1074 emit_imm(RV_REG_T1, imm, ctx);
1075 emit(is64 ? rv_divu(rd, rd, RV_REG_T1) :
1076 rv_divuw(rd, rd, RV_REG_T1), ctx);
Jiong Wang66d0d5a2019-05-24 23:25:27 +01001077 if (!is64 && !aux->verifier_zext)
Björn Töpel2353ecc2019-02-05 13:41:22 +01001078 emit_zext_32(rd, ctx);
1079 break;
1080 case BPF_ALU | BPF_MOD | BPF_K:
1081 case BPF_ALU64 | BPF_MOD | BPF_K:
1082 emit_imm(RV_REG_T1, imm, ctx);
1083 emit(is64 ? rv_remu(rd, rd, RV_REG_T1) :
1084 rv_remuw(rd, rd, RV_REG_T1), ctx);
Jiong Wang66d0d5a2019-05-24 23:25:27 +01001085 if (!is64 && !aux->verifier_zext)
Björn Töpel2353ecc2019-02-05 13:41:22 +01001086 emit_zext_32(rd, ctx);
1087 break;
1088 case BPF_ALU | BPF_LSH | BPF_K:
1089 case BPF_ALU64 | BPF_LSH | BPF_K:
1090 emit(is64 ? rv_slli(rd, rd, imm) : rv_slliw(rd, rd, imm), ctx);
Luke Nelson1e692f02019-05-30 15:29:22 -07001091 if (!is64)
1092 emit_zext_32(rd, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +01001093 break;
1094 case BPF_ALU | BPF_RSH | BPF_K:
1095 case BPF_ALU64 | BPF_RSH | BPF_K:
1096 emit(is64 ? rv_srli(rd, rd, imm) : rv_srliw(rd, rd, imm), ctx);
Luke Nelson1e692f02019-05-30 15:29:22 -07001097 if (!is64)
1098 emit_zext_32(rd, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +01001099 break;
1100 case BPF_ALU | BPF_ARSH | BPF_K:
1101 case BPF_ALU64 | BPF_ARSH | BPF_K:
1102 emit(is64 ? rv_srai(rd, rd, imm) : rv_sraiw(rd, rd, imm), ctx);
Luke Nelson1e692f02019-05-30 15:29:22 -07001103 if (!is64)
1104 emit_zext_32(rd, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +01001105 break;
1106
1107 /* JUMP off */
1108 case BPF_JMP | BPF_JA:
Björn Töpel7d1ef132019-12-16 10:13:36 +01001109 rvoff = rv_offset(i, off, ctx);
Björn Töpel33203c02019-12-16 10:13:38 +01001110 emit_jump_and_link(RV_REG_ZERO, rvoff, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +01001111 break;
1112
1113 /* IF (dst COND src) JUMP off */
1114 case BPF_JMP | BPF_JEQ | BPF_X:
1115 case BPF_JMP32 | BPF_JEQ | BPF_X:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001116 case BPF_JMP | BPF_JGT | BPF_X:
1117 case BPF_JMP32 | BPF_JGT | BPF_X:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001118 case BPF_JMP | BPF_JLT | BPF_X:
1119 case BPF_JMP32 | BPF_JLT | BPF_X:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001120 case BPF_JMP | BPF_JGE | BPF_X:
1121 case BPF_JMP32 | BPF_JGE | BPF_X:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001122 case BPF_JMP | BPF_JLE | BPF_X:
1123 case BPF_JMP32 | BPF_JLE | BPF_X:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001124 case BPF_JMP | BPF_JNE | BPF_X:
1125 case BPF_JMP32 | BPF_JNE | BPF_X:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001126 case BPF_JMP | BPF_JSGT | BPF_X:
1127 case BPF_JMP32 | BPF_JSGT | BPF_X:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001128 case BPF_JMP | BPF_JSLT | BPF_X:
1129 case BPF_JMP32 | BPF_JSLT | BPF_X:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001130 case BPF_JMP | BPF_JSGE | BPF_X:
1131 case BPF_JMP32 | BPF_JSGE | BPF_X:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001132 case BPF_JMP | BPF_JSLE | BPF_X:
1133 case BPF_JMP32 | BPF_JSLE | BPF_X:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001134 case BPF_JMP | BPF_JSET | BPF_X:
1135 case BPF_JMP32 | BPF_JSET | BPF_X:
Björn Töpel7d1ef132019-12-16 10:13:36 +01001136 rvoff = rv_offset(i, off, ctx);
1137 if (!is64) {
1138 s = ctx->ninsns;
1139 if (is_signed_bpf_cond(BPF_OP(code)))
1140 emit_sext_32_rd_rs(&rd, &rs, ctx);
1141 else
1142 emit_zext_32_rd_rs(&rd, &rs, ctx);
1143 e = ctx->ninsns;
1144
1145 /* Adjust for extra insns */
1146 rvoff -= (e - s) << 2;
1147 }
1148
1149 if (BPF_OP(code) == BPF_JSET) {
1150 /* Adjust for and */
1151 rvoff -= 4;
1152 emit(rv_and(RV_REG_T1, rd, rs), ctx);
1153 emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff,
1154 ctx);
1155 } else {
1156 emit_branch(BPF_OP(code), rd, rs, rvoff, ctx);
1157 }
Björn Töpel2353ecc2019-02-05 13:41:22 +01001158 break;
1159
1160 /* IF (dst COND imm) JUMP off */
1161 case BPF_JMP | BPF_JEQ | BPF_K:
1162 case BPF_JMP32 | BPF_JEQ | BPF_K:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001163 case BPF_JMP | BPF_JGT | BPF_K:
1164 case BPF_JMP32 | BPF_JGT | BPF_K:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001165 case BPF_JMP | BPF_JLT | BPF_K:
1166 case BPF_JMP32 | BPF_JLT | BPF_K:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001167 case BPF_JMP | BPF_JGE | BPF_K:
1168 case BPF_JMP32 | BPF_JGE | BPF_K:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001169 case BPF_JMP | BPF_JLE | BPF_K:
1170 case BPF_JMP32 | BPF_JLE | BPF_K:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001171 case BPF_JMP | BPF_JNE | BPF_K:
1172 case BPF_JMP32 | BPF_JNE | BPF_K:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001173 case BPF_JMP | BPF_JSGT | BPF_K:
1174 case BPF_JMP32 | BPF_JSGT | BPF_K:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001175 case BPF_JMP | BPF_JSLT | BPF_K:
1176 case BPF_JMP32 | BPF_JSLT | BPF_K:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001177 case BPF_JMP | BPF_JSGE | BPF_K:
1178 case BPF_JMP32 | BPF_JSGE | BPF_K:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001179 case BPF_JMP | BPF_JSLE | BPF_K:
1180 case BPF_JMP32 | BPF_JSLE | BPF_K:
Björn Töpel2353ecc2019-02-05 13:41:22 +01001181 case BPF_JMP | BPF_JSET | BPF_K:
1182 case BPF_JMP32 | BPF_JSET | BPF_K:
Björn Töpel7d1ef132019-12-16 10:13:36 +01001183 rvoff = rv_offset(i, off, ctx);
1184 s = ctx->ninsns;
Björn Töpel2353ecc2019-02-05 13:41:22 +01001185 emit_imm(RV_REG_T1, imm, ctx);
Björn Töpel7d1ef132019-12-16 10:13:36 +01001186 if (!is64) {
1187 if (is_signed_bpf_cond(BPF_OP(code)))
1188 emit_sext_32_rd(&rd, ctx);
1189 else
1190 emit_zext_32_rd_t1(&rd, ctx);
1191 }
1192 e = ctx->ninsns;
1193
1194 /* Adjust for extra insns */
1195 rvoff -= (e - s) << 2;
1196
1197 if (BPF_OP(code) == BPF_JSET) {
1198 /* Adjust for and */
1199 rvoff -= 4;
1200 emit(rv_and(RV_REG_T1, rd, RV_REG_T1), ctx);
1201 emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff,
1202 ctx);
1203 } else {
1204 emit_branch(BPF_OP(code), rd, RV_REG_T1, rvoff, ctx);
1205 }
Björn Töpel2353ecc2019-02-05 13:41:22 +01001206 break;
1207
1208 /* function call */
1209 case BPF_JMP | BPF_CALL:
1210 {
1211 bool fixed;
1212 int i, ret;
1213 u64 addr;
1214
1215 mark_call(ctx);
1216 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr,
1217 &fixed);
1218 if (ret < 0)
1219 return ret;
1220 if (fixed) {
1221 emit_imm(RV_REG_T1, addr, ctx);
1222 } else {
1223 i = ctx->ninsns;
1224 emit_imm(RV_REG_T1, addr, ctx);
1225 for (i = ctx->ninsns - i; i < 8; i++) {
1226 /* nop */
1227 emit(rv_addi(RV_REG_ZERO, RV_REG_ZERO, 0),
1228 ctx);
1229 }
1230 }
1231 emit(rv_jalr(RV_REG_RA, RV_REG_T1, 0), ctx);
1232 rd = bpf_to_rv_reg(BPF_REG_0, ctx);
1233 emit(rv_addi(rd, RV_REG_A0, 0), ctx);
1234 break;
1235 }
1236 /* tail call */
1237 case BPF_JMP | BPF_TAIL_CALL:
1238 if (emit_bpf_tail_call(i, ctx))
1239 return -1;
1240 break;
1241
1242 /* function return */
1243 case BPF_JMP | BPF_EXIT:
1244 if (i == ctx->prog->len - 1)
1245 break;
1246
1247 rvoff = epilogue_offset(ctx);
Björn Töpel33203c02019-12-16 10:13:38 +01001248 emit_jump_and_link(RV_REG_ZERO, rvoff, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +01001249 break;
1250
1251 /* dst = imm64 */
1252 case BPF_LD | BPF_IMM | BPF_DW:
1253 {
1254 struct bpf_insn insn1 = insn[1];
1255 u64 imm64;
1256
1257 imm64 = (u64)insn1.imm << 32 | (u32)imm;
1258 emit_imm(rd, imm64, ctx);
1259 return 1;
1260 }
1261
1262 /* LDX: dst = *(size *)(src + off) */
1263 case BPF_LDX | BPF_MEM | BPF_B:
1264 if (is_12b_int(off)) {
1265 emit(rv_lbu(rd, off, rs), ctx);
1266 break;
1267 }
1268
1269 emit_imm(RV_REG_T1, off, ctx);
1270 emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
1271 emit(rv_lbu(rd, 0, RV_REG_T1), ctx);
Jiong Wang66d0d5a2019-05-24 23:25:27 +01001272 if (insn_is_zext(&insn[1]))
1273 return 1;
Björn Töpel2353ecc2019-02-05 13:41:22 +01001274 break;
1275 case BPF_LDX | BPF_MEM | BPF_H:
1276 if (is_12b_int(off)) {
1277 emit(rv_lhu(rd, off, rs), ctx);
1278 break;
1279 }
1280
1281 emit_imm(RV_REG_T1, off, ctx);
1282 emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
1283 emit(rv_lhu(rd, 0, RV_REG_T1), ctx);
Jiong Wang66d0d5a2019-05-24 23:25:27 +01001284 if (insn_is_zext(&insn[1]))
1285 return 1;
Björn Töpel2353ecc2019-02-05 13:41:22 +01001286 break;
1287 case BPF_LDX | BPF_MEM | BPF_W:
1288 if (is_12b_int(off)) {
1289 emit(rv_lwu(rd, off, rs), ctx);
1290 break;
1291 }
1292
1293 emit_imm(RV_REG_T1, off, ctx);
1294 emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
1295 emit(rv_lwu(rd, 0, RV_REG_T1), ctx);
Jiong Wang66d0d5a2019-05-24 23:25:27 +01001296 if (insn_is_zext(&insn[1]))
1297 return 1;
Björn Töpel2353ecc2019-02-05 13:41:22 +01001298 break;
1299 case BPF_LDX | BPF_MEM | BPF_DW:
1300 if (is_12b_int(off)) {
1301 emit(rv_ld(rd, off, rs), ctx);
1302 break;
1303 }
1304
1305 emit_imm(RV_REG_T1, off, ctx);
1306 emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
1307 emit(rv_ld(rd, 0, RV_REG_T1), ctx);
1308 break;
1309
1310 /* ST: *(size *)(dst + off) = imm */
1311 case BPF_ST | BPF_MEM | BPF_B:
1312 emit_imm(RV_REG_T1, imm, ctx);
1313 if (is_12b_int(off)) {
1314 emit(rv_sb(rd, off, RV_REG_T1), ctx);
1315 break;
1316 }
1317
1318 emit_imm(RV_REG_T2, off, ctx);
1319 emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx);
1320 emit(rv_sb(RV_REG_T2, 0, RV_REG_T1), ctx);
1321 break;
1322
1323 case BPF_ST | BPF_MEM | BPF_H:
1324 emit_imm(RV_REG_T1, imm, ctx);
1325 if (is_12b_int(off)) {
1326 emit(rv_sh(rd, off, RV_REG_T1), ctx);
1327 break;
1328 }
1329
1330 emit_imm(RV_REG_T2, off, ctx);
1331 emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx);
1332 emit(rv_sh(RV_REG_T2, 0, RV_REG_T1), ctx);
1333 break;
1334 case BPF_ST | BPF_MEM | BPF_W:
1335 emit_imm(RV_REG_T1, imm, ctx);
1336 if (is_12b_int(off)) {
1337 emit(rv_sw(rd, off, RV_REG_T1), ctx);
1338 break;
1339 }
1340
1341 emit_imm(RV_REG_T2, off, ctx);
1342 emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx);
1343 emit(rv_sw(RV_REG_T2, 0, RV_REG_T1), ctx);
1344 break;
1345 case BPF_ST | BPF_MEM | BPF_DW:
1346 emit_imm(RV_REG_T1, imm, ctx);
1347 if (is_12b_int(off)) {
1348 emit(rv_sd(rd, off, RV_REG_T1), ctx);
1349 break;
1350 }
1351
1352 emit_imm(RV_REG_T2, off, ctx);
1353 emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx);
1354 emit(rv_sd(RV_REG_T2, 0, RV_REG_T1), ctx);
1355 break;
1356
1357 /* STX: *(size *)(dst + off) = src */
1358 case BPF_STX | BPF_MEM | BPF_B:
1359 if (is_12b_int(off)) {
1360 emit(rv_sb(rd, off, rs), ctx);
1361 break;
1362 }
1363
1364 emit_imm(RV_REG_T1, off, ctx);
1365 emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
1366 emit(rv_sb(RV_REG_T1, 0, rs), ctx);
1367 break;
1368 case BPF_STX | BPF_MEM | BPF_H:
1369 if (is_12b_int(off)) {
1370 emit(rv_sh(rd, off, rs), ctx);
1371 break;
1372 }
1373
1374 emit_imm(RV_REG_T1, off, ctx);
1375 emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
1376 emit(rv_sh(RV_REG_T1, 0, rs), ctx);
1377 break;
1378 case BPF_STX | BPF_MEM | BPF_W:
1379 if (is_12b_int(off)) {
1380 emit(rv_sw(rd, off, rs), ctx);
1381 break;
1382 }
1383
1384 emit_imm(RV_REG_T1, off, ctx);
1385 emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
1386 emit(rv_sw(RV_REG_T1, 0, rs), ctx);
1387 break;
1388 case BPF_STX | BPF_MEM | BPF_DW:
1389 if (is_12b_int(off)) {
1390 emit(rv_sd(rd, off, rs), ctx);
1391 break;
1392 }
1393
1394 emit_imm(RV_REG_T1, off, ctx);
1395 emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
1396 emit(rv_sd(RV_REG_T1, 0, rs), ctx);
1397 break;
1398 /* STX XADD: lock *(u32 *)(dst + off) += src */
1399 case BPF_STX | BPF_XADD | BPF_W:
1400 /* STX XADD: lock *(u64 *)(dst + off) += src */
1401 case BPF_STX | BPF_XADD | BPF_DW:
1402 if (off) {
1403 if (is_12b_int(off)) {
1404 emit(rv_addi(RV_REG_T1, rd, off), ctx);
1405 } else {
1406 emit_imm(RV_REG_T1, off, ctx);
1407 emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
1408 }
1409
1410 rd = RV_REG_T1;
1411 }
1412
1413 emit(BPF_SIZE(code) == BPF_W ?
1414 rv_amoadd_w(RV_REG_ZERO, rs, rd, 0, 0) :
1415 rv_amoadd_d(RV_REG_ZERO, rs, rd, 0, 0), ctx);
1416 break;
1417 default:
1418 pr_err("bpf-jit: unknown opcode %02x\n", code);
1419 return -EINVAL;
1420 }
1421
1422 return 0;
1423}
1424
1425static void build_prologue(struct rv_jit_context *ctx)
1426{
1427 int stack_adjust = 0, store_offset, bpf_stack_adjust;
1428
Björn Töpelf1003b72019-12-16 10:13:35 +01001429 bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
1430 if (bpf_stack_adjust)
1431 mark_fp(ctx);
1432
Björn Töpel2353ecc2019-02-05 13:41:22 +01001433 if (seen_reg(RV_REG_RA, ctx))
1434 stack_adjust += 8;
1435 stack_adjust += 8; /* RV_REG_FP */
1436 if (seen_reg(RV_REG_S1, ctx))
1437 stack_adjust += 8;
1438 if (seen_reg(RV_REG_S2, ctx))
1439 stack_adjust += 8;
1440 if (seen_reg(RV_REG_S3, ctx))
1441 stack_adjust += 8;
1442 if (seen_reg(RV_REG_S4, ctx))
1443 stack_adjust += 8;
1444 if (seen_reg(RV_REG_S5, ctx))
1445 stack_adjust += 8;
1446 if (seen_reg(RV_REG_S6, ctx))
1447 stack_adjust += 8;
1448
1449 stack_adjust = round_up(stack_adjust, 16);
Björn Töpel2353ecc2019-02-05 13:41:22 +01001450 stack_adjust += bpf_stack_adjust;
1451
1452 store_offset = stack_adjust - 8;
1453
1454 /* First instruction is always setting the tail-call-counter
1455 * (TCC) register. This instruction is skipped for tail calls.
1456 */
1457 emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx);
1458
1459 emit(rv_addi(RV_REG_SP, RV_REG_SP, -stack_adjust), ctx);
1460
1461 if (seen_reg(RV_REG_RA, ctx)) {
1462 emit(rv_sd(RV_REG_SP, store_offset, RV_REG_RA), ctx);
1463 store_offset -= 8;
1464 }
1465 emit(rv_sd(RV_REG_SP, store_offset, RV_REG_FP), ctx);
1466 store_offset -= 8;
1467 if (seen_reg(RV_REG_S1, ctx)) {
1468 emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S1), ctx);
1469 store_offset -= 8;
1470 }
1471 if (seen_reg(RV_REG_S2, ctx)) {
1472 emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S2), ctx);
1473 store_offset -= 8;
1474 }
1475 if (seen_reg(RV_REG_S3, ctx)) {
1476 emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S3), ctx);
1477 store_offset -= 8;
1478 }
1479 if (seen_reg(RV_REG_S4, ctx)) {
1480 emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S4), ctx);
1481 store_offset -= 8;
1482 }
1483 if (seen_reg(RV_REG_S5, ctx)) {
1484 emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S5), ctx);
1485 store_offset -= 8;
1486 }
1487 if (seen_reg(RV_REG_S6, ctx)) {
1488 emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S6), ctx);
1489 store_offset -= 8;
1490 }
1491
1492 emit(rv_addi(RV_REG_FP, RV_REG_SP, stack_adjust), ctx);
1493
1494 if (bpf_stack_adjust)
1495 emit(rv_addi(RV_REG_S5, RV_REG_SP, bpf_stack_adjust), ctx);
1496
1497 /* Program contains calls and tail calls, so RV_REG_TCC need
1498 * to be saved across calls.
1499 */
1500 if (seen_tail_call(ctx) && seen_call(ctx))
1501 emit(rv_addi(RV_REG_TCC_SAVED, RV_REG_TCC, 0), ctx);
1502
1503 ctx->stack_size = stack_adjust;
1504}
1505
1506static void build_epilogue(struct rv_jit_context *ctx)
1507{
Björn Töpelfe8322b2019-12-16 10:13:39 +01001508 __build_epilogue(false, ctx);
Björn Töpel2353ecc2019-02-05 13:41:22 +01001509}
1510
1511static int build_body(struct rv_jit_context *ctx, bool extra_pass)
1512{
1513 const struct bpf_prog *prog = ctx->prog;
1514 int i;
1515
1516 for (i = 0; i < prog->len; i++) {
1517 const struct bpf_insn *insn = &prog->insnsi[i];
1518 int ret;
1519
1520 ret = emit_insn(insn, ctx, extra_pass);
1521 if (ret > 0) {
1522 i++;
1523 if (ctx->insns == NULL)
1524 ctx->offset[i] = ctx->ninsns;
1525 continue;
1526 }
1527 if (ctx->insns == NULL)
1528 ctx->offset[i] = ctx->ninsns;
1529 if (ret)
1530 return ret;
1531 }
1532 return 0;
1533}
1534
1535static void bpf_fill_ill_insns(void *area, unsigned int size)
1536{
1537 memset(area, 0, size);
1538}
1539
1540static void bpf_flush_icache(void *start, void *end)
1541{
1542 flush_icache_range((unsigned long)start, (unsigned long)end);
1543}
1544
Jiong Wang66d0d5a2019-05-24 23:25:27 +01001545bool bpf_jit_needs_zext(void)
1546{
1547 return true;
1548}
1549
Björn Töpel2353ecc2019-02-05 13:41:22 +01001550struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1551{
1552 bool tmp_blinded = false, extra_pass = false;
1553 struct bpf_prog *tmp, *orig_prog = prog;
Björn Töpel7d1ef132019-12-16 10:13:36 +01001554 int pass = 0, prev_ninsns = 0, i;
Björn Töpel2353ecc2019-02-05 13:41:22 +01001555 struct rv_jit_data *jit_data;
1556 struct rv_jit_context *ctx;
1557 unsigned int image_size;
1558
1559 if (!prog->jit_requested)
1560 return orig_prog;
1561
1562 tmp = bpf_jit_blind_constants(prog);
1563 if (IS_ERR(tmp))
1564 return orig_prog;
1565 if (tmp != prog) {
1566 tmp_blinded = true;
1567 prog = tmp;
1568 }
1569
1570 jit_data = prog->aux->jit_data;
1571 if (!jit_data) {
1572 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1573 if (!jit_data) {
1574 prog = orig_prog;
1575 goto out;
1576 }
1577 prog->aux->jit_data = jit_data;
1578 }
1579
1580 ctx = &jit_data->ctx;
1581
1582 if (ctx->offset) {
1583 extra_pass = true;
1584 image_size = sizeof(u32) * ctx->ninsns;
1585 goto skip_init_ctx;
1586 }
1587
1588 ctx->prog = prog;
1589 ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
1590 if (!ctx->offset) {
1591 prog = orig_prog;
1592 goto out_offset;
1593 }
Björn Töpel7d1ef132019-12-16 10:13:36 +01001594 for (i = 0; i < prog->len; i++) {
1595 prev_ninsns += 32;
1596 ctx->offset[i] = prev_ninsns;
Björn Töpel2353ecc2019-02-05 13:41:22 +01001597 }
Björn Töpel7d1ef132019-12-16 10:13:36 +01001598
1599 for (i = 0; i < 16; i++) {
1600 pass++;
1601 ctx->ninsns = 0;
1602 if (build_body(ctx, extra_pass)) {
1603 prog = orig_prog;
1604 goto out_offset;
1605 }
1606 build_prologue(ctx);
1607 ctx->epilogue_offset = ctx->ninsns;
1608 build_epilogue(ctx);
1609 if (ctx->ninsns == prev_ninsns)
1610 break;
1611 prev_ninsns = ctx->ninsns;
1612 }
Björn Töpel2353ecc2019-02-05 13:41:22 +01001613
1614 /* Allocate image, now that we know the size. */
1615 image_size = sizeof(u32) * ctx->ninsns;
1616 jit_data->header = bpf_jit_binary_alloc(image_size, &jit_data->image,
1617 sizeof(u32),
1618 bpf_fill_ill_insns);
1619 if (!jit_data->header) {
1620 prog = orig_prog;
1621 goto out_offset;
1622 }
1623
1624 /* Second, real pass, that acutally emits the image. */
1625 ctx->insns = (u32 *)jit_data->image;
1626skip_init_ctx:
Björn Töpel7d1ef132019-12-16 10:13:36 +01001627 pass++;
Björn Töpel2353ecc2019-02-05 13:41:22 +01001628 ctx->ninsns = 0;
1629
1630 build_prologue(ctx);
1631 if (build_body(ctx, extra_pass)) {
1632 bpf_jit_binary_free(jit_data->header);
1633 prog = orig_prog;
1634 goto out_offset;
1635 }
1636 build_epilogue(ctx);
1637
1638 if (bpf_jit_enable > 1)
Björn Töpel7d1ef132019-12-16 10:13:36 +01001639 bpf_jit_dump(prog->len, image_size, pass, ctx->insns);
Björn Töpel2353ecc2019-02-05 13:41:22 +01001640
1641 prog->bpf_func = (void *)ctx->insns;
1642 prog->jited = 1;
1643 prog->jited_len = image_size;
1644
1645 bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
1646
1647 if (!prog->is_func || extra_pass) {
1648out_offset:
1649 kfree(ctx->offset);
1650 kfree(jit_data);
1651 prog->aux->jit_data = NULL;
1652 }
1653out:
1654 if (tmp_blinded)
1655 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1656 tmp : orig_prog);
1657 return prog;
1658}