blob: b25bf9b11b9db21fbd05b4111ac70f292c0d4543 [file] [log] [blame]
Thomas Gleixnerb886d83c2019-06-01 10:08:55 +02001// SPDX-License-Identifier: GPL-2.0-only
Naveen N. Rao156d0e22016-06-22 21:55:07 +05302/*
3 * bpf_jit_comp64.c: eBPF JIT compiler
4 *
5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6 * IBM Corporation
7 *
8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
Naveen N. Rao156d0e22016-06-22 21:55:07 +05309 */
10#include <linux/moduleloader.h>
11#include <asm/cacheflush.h>
Christophe Leroyec0c4642018-07-05 16:24:57 +000012#include <asm/asm-compat.h>
Naveen N. Rao156d0e22016-06-22 21:55:07 +053013#include <linux/netdevice.h>
14#include <linux/filter.h>
15#include <linux/if_vlan.h>
16#include <asm/kprobes.h>
Naveen N. Raoce076142016-09-24 02:05:01 +053017#include <linux/bpf.h>
Naveen N. Raob7540d62021-10-06 01:55:25 +053018#include <asm/security_features.h>
Naveen N. Rao156d0e22016-06-22 21:55:07 +053019
20#include "bpf_jit64.h"
21
Naveen N. Rao156d0e22016-06-22 21:55:07 +053022static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
23{
24 /*
25 * We only need a stack frame if:
26 * - we call other functions (kernel helpers), or
27 * - the bpf program uses its stack area
28 * The latter condition is deduced from the usage of BPF_REG_FP
29 */
Christophe Leroyed573b52021-03-22 16:37:47 +000030 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, b2p[BPF_REG_FP]);
Naveen N. Rao156d0e22016-06-22 21:55:07 +053031}
32
Naveen N. Rao7b847f52016-09-24 02:05:00 +053033/*
34 * When not setting up our own stackframe, the redzone usage is:
35 *
36 * [ prev sp ] <-------------
37 * [ ... ] |
38 * sp (r1) ---> [ stack pointer ] --------------
Naveen N. Raob7540d62021-10-06 01:55:25 +053039 * [ nv gpr save area ] 5*8
Naveen N. Rao7b847f52016-09-24 02:05:00 +053040 * [ tail_call_cnt ] 8
Naveen N. Raob7540d62021-10-06 01:55:25 +053041 * [ local_tmp_var ] 16
Naveen N. Rao7b847f52016-09-24 02:05:00 +053042 * [ unused red zone ] 208 bytes protected
43 */
44static int bpf_jit_stack_local(struct codegen_context *ctx)
45{
46 if (bpf_has_stack_frame(ctx))
Sandipan Dasac0761e2017-09-02 00:23:01 +053047 return STACK_FRAME_MIN_SIZE + ctx->stack_size;
Naveen N. Rao7b847f52016-09-24 02:05:00 +053048 else
Naveen N. Raob7540d62021-10-06 01:55:25 +053049 return -(BPF_PPC_STACK_SAVE + 24);
Naveen N. Rao7b847f52016-09-24 02:05:00 +053050}
51
Naveen N. Raoce076142016-09-24 02:05:01 +053052static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
53{
Naveen N. Raob7540d62021-10-06 01:55:25 +053054 return bpf_jit_stack_local(ctx) + 16;
Naveen N. Raoce076142016-09-24 02:05:01 +053055}
56
Naveen N. Rao7b847f52016-09-24 02:05:00 +053057static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
58{
59 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
Sandipan Dasac0761e2017-09-02 00:23:01 +053060 return (bpf_has_stack_frame(ctx) ?
61 (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
62 - (8 * (32 - reg));
Naveen N. Rao7b847f52016-09-24 02:05:00 +053063
64 pr_err("BPF JIT is asking about unknown registers");
65 BUG();
66}
67
Christophe Leroy40272032021-03-22 16:37:53 +000068void bpf_jit_realloc_regs(struct codegen_context *ctx)
69{
70}
71
Christophe Leroy4ea76e92021-03-22 16:37:49 +000072void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
Naveen N. Rao156d0e22016-06-22 21:55:07 +053073{
74 int i;
Naveen N. Rao156d0e22016-06-22 21:55:07 +053075
Naveen N. Raoce076142016-09-24 02:05:01 +053076 /*
77 * Initialize tail_call_cnt if we do tail calls.
78 * Otherwise, put in NOPs so that it can be skipped when we are
79 * invoked through a tail call.
80 */
81 if (ctx->seen & SEEN_TAILCALL) {
Balamuruhan S3a181232020-06-24 17:00:36 +053082 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], 0));
Naveen N. Raoce076142016-09-24 02:05:01 +053083 /* this goes in the redzone */
84 PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
85 } else {
Balamuruhan S3a181232020-06-24 17:00:36 +053086 EMIT(PPC_RAW_NOP());
87 EMIT(PPC_RAW_NOP());
Naveen N. Raoce076142016-09-24 02:05:01 +053088 }
89
90#define BPF_TAILCALL_PROLOGUE_SIZE 8
91
Naveen N. Rao7b847f52016-09-24 02:05:00 +053092 if (bpf_has_stack_frame(ctx)) {
Naveen N. Rao156d0e22016-06-22 21:55:07 +053093 /*
94 * We need a stack frame, but we don't necessarily need to
95 * save/restore LR unless we call other functions
96 */
97 if (ctx->seen & SEEN_FUNC) {
Christophe Leroye08021f2021-05-20 10:23:07 +000098 EMIT(PPC_RAW_MFLR(_R0));
Naveen N. Rao156d0e22016-06-22 21:55:07 +053099 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
100 }
101
Sandipan Dasac0761e2017-09-02 00:23:01 +0530102 PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530103 }
104
105 /*
106 * Back up non-volatile regs -- BPF registers 6-10
107 * If we haven't created our own stack frame, we save these
108 * in the protected zone below the previous stack frame
109 */
110 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
Christophe Leroyed573b52021-03-22 16:37:47 +0000111 if (bpf_is_seen_register(ctx, b2p[i]))
Naveen N. Rao7b847f52016-09-24 02:05:00 +0530112 PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530113
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530114 /* Setup frame pointer to point to the bpf stack area */
Christophe Leroyed573b52021-03-22 16:37:47 +0000115 if (bpf_is_seen_register(ctx, b2p[BPF_REG_FP]))
Balamuruhan S3a181232020-06-24 17:00:36 +0530116 EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], 1,
117 STACK_FRAME_MIN_SIZE + ctx->stack_size));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530118}
119
Naveen N. Raoce076142016-09-24 02:05:01 +0530120static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530121{
122 int i;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530123
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530124 /* Restore NVRs */
125 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
Christophe Leroyed573b52021-03-22 16:37:47 +0000126 if (bpf_is_seen_register(ctx, b2p[i]))
Naveen N. Rao7b847f52016-09-24 02:05:00 +0530127 PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530128
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530129 /* Tear down our stack frame */
Naveen N. Rao7b847f52016-09-24 02:05:00 +0530130 if (bpf_has_stack_frame(ctx)) {
Balamuruhan S3a181232020-06-24 17:00:36 +0530131 EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530132 if (ctx->seen & SEEN_FUNC) {
133 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
Balamuruhan S3a181232020-06-24 17:00:36 +0530134 EMIT(PPC_RAW_MTLR(0));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530135 }
136 }
Naveen N. Raoce076142016-09-24 02:05:01 +0530137}
138
Christophe Leroy4ea76e92021-03-22 16:37:49 +0000139void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
Naveen N. Raoce076142016-09-24 02:05:01 +0530140{
141 bpf_jit_emit_common_epilogue(image, ctx);
142
143 /* Move result to r3 */
Balamuruhan S3a181232020-06-24 17:00:36 +0530144 EMIT(PPC_RAW_MR(3, b2p[BPF_REG_0]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530145
Balamuruhan S3a181232020-06-24 17:00:36 +0530146 EMIT(PPC_RAW_BLR());
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530147}
148
Daniel Borkmanne2c95a62018-11-26 14:05:38 +0100149static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
150 u64 func)
151{
152#ifdef PPC64_ELF_ABI_v1
153 /* func points to the function descriptor */
154 PPC_LI64(b2p[TMP_REG_2], func);
155 /* Load actual entry point from function descriptor */
156 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
Naveen N. Rao20ccb002021-06-09 14:30:24 +0530157 /* ... and move it to CTR */
158 EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
Daniel Borkmanne2c95a62018-11-26 14:05:38 +0100159 /*
160 * Load TOC from function descriptor at offset 8.
161 * We can clobber r2 since we get called through a
162 * function pointer (so caller will save/restore r2)
163 * and since we don't use a TOC ourself.
164 */
165 PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
166#else
167 /* We can clobber r12 */
168 PPC_FUNC_ADDR(12, func);
Naveen N. Rao20ccb002021-06-09 14:30:24 +0530169 EMIT(PPC_RAW_MTCTR(12));
Daniel Borkmanne2c95a62018-11-26 14:05:38 +0100170#endif
Naveen N. Rao20ccb002021-06-09 14:30:24 +0530171 EMIT(PPC_RAW_BCTRL());
Daniel Borkmanne2c95a62018-11-26 14:05:38 +0100172}
173
Christophe Leroy4ea76e92021-03-22 16:37:49 +0000174void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
Naveen N. Raoce076142016-09-24 02:05:01 +0530175{
Sandipan Das4ea69b22018-05-24 12:26:46 +0530176 unsigned int i, ctx_idx = ctx->idx;
177
178 /* Load function address into r12 */
179 PPC_LI64(12, func);
180
181 /* For bpf-to-bpf function calls, the callee's address is unknown
182 * until the last extra pass. As seen above, we use PPC_LI64() to
183 * load the callee's address, but this may optimize the number of
184 * instructions required based on the nature of the address.
185 *
186 * Since we don't want the number of instructions emitted to change,
187 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
188 * we always have a five-instruction sequence, which is the maximum
189 * that PPC_LI64() can emit.
190 */
191 for (i = ctx->idx - ctx_idx; i < 5; i++)
Balamuruhan S3a181232020-06-24 17:00:36 +0530192 EMIT(PPC_RAW_NOP());
Sandipan Das4ea69b22018-05-24 12:26:46 +0530193
Naveen N. Raoce076142016-09-24 02:05:01 +0530194#ifdef PPC64_ELF_ABI_v1
Naveen N. Raoce076142016-09-24 02:05:01 +0530195 /*
196 * Load TOC from function descriptor at offset 8.
197 * We can clobber r2 since we get called through a
198 * function pointer (so caller will save/restore r2)
199 * and since we don't use a TOC ourself.
200 */
Sandipan Das4ea69b22018-05-24 12:26:46 +0530201 PPC_BPF_LL(2, 12, 8);
202 /* Load actual entry point from function descriptor */
203 PPC_BPF_LL(12, 12, 0);
Naveen N. Raoce076142016-09-24 02:05:01 +0530204#endif
Sandipan Das4ea69b22018-05-24 12:26:46 +0530205
Naveen N. Rao20ccb002021-06-09 14:30:24 +0530206 EMIT(PPC_RAW_MTCTR(12));
207 EMIT(PPC_RAW_BCTRL());
Naveen N. Raoce076142016-09-24 02:05:01 +0530208}
209
Naveen N. Rao3832ba42021-10-06 01:55:21 +0530210static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
Naveen N. Raoce076142016-09-24 02:05:01 +0530211{
212 /*
213 * By now, the eBPF program has already setup parameters in r3, r4 and r5
214 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
215 * r4/BPF_REG_2 - pointer to bpf_array
216 * r5/BPF_REG_3 - index in bpf_array
217 */
218 int b2p_bpf_array = b2p[BPF_REG_2];
219 int b2p_index = b2p[BPF_REG_3];
220
221 /*
222 * if (index >= array->map.max_entries)
223 * goto out;
224 */
Balamuruhan S06541862020-06-24 17:00:35 +0530225 EMIT(PPC_RAW_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
Balamuruhan S3a181232020-06-24 17:00:36 +0530226 EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
227 EMIT(PPC_RAW_CMPLW(b2p_index, b2p[TMP_REG_1]));
Naveen N. Raoce076142016-09-24 02:05:01 +0530228 PPC_BCC(COND_GE, out);
229
230 /*
231 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
232 * goto out;
233 */
Naveen N. Rao86be36f2019-03-15 20:21:19 +0530234 PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
Balamuruhan S3a181232020-06-24 17:00:36 +0530235 EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT));
Naveen N. Raoce076142016-09-24 02:05:01 +0530236 PPC_BCC(COND_GT, out);
237
238 /*
239 * tail_call_cnt++;
240 */
Balamuruhan S3a181232020-06-24 17:00:36 +0530241 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1));
Naveen N. Raoce076142016-09-24 02:05:01 +0530242 PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
243
244 /* prog = array->ptrs[index]; */
Balamuruhan S3a181232020-06-24 17:00:36 +0530245 EMIT(PPC_RAW_MULI(b2p[TMP_REG_1], b2p_index, 8));
Balamuruhan S06541862020-06-24 17:00:35 +0530246 EMIT(PPC_RAW_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array));
Naveen N. Rao86be36f2019-03-15 20:21:19 +0530247 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
Naveen N. Raoce076142016-09-24 02:05:01 +0530248
249 /*
250 * if (prog == NULL)
251 * goto out;
252 */
Balamuruhan S3a181232020-06-24 17:00:36 +0530253 EMIT(PPC_RAW_CMPLDI(b2p[TMP_REG_1], 0));
Naveen N. Raoce076142016-09-24 02:05:01 +0530254 PPC_BCC(COND_EQ, out);
255
256 /* goto *(prog->bpf_func + prologue_size); */
Naveen N. Rao86be36f2019-03-15 20:21:19 +0530257 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
Naveen N. Raoce076142016-09-24 02:05:01 +0530258#ifdef PPC64_ELF_ABI_v1
259 /* skip past the function descriptor */
Balamuruhan S3a181232020-06-24 17:00:36 +0530260 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
261 FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE));
Naveen N. Raoce076142016-09-24 02:05:01 +0530262#else
Balamuruhan S3a181232020-06-24 17:00:36 +0530263 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE));
Naveen N. Raoce076142016-09-24 02:05:01 +0530264#endif
Balamuruhan S3a181232020-06-24 17:00:36 +0530265 EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
Naveen N. Raoce076142016-09-24 02:05:01 +0530266
267 /* tear down stack, restore NVRs, ... */
268 bpf_jit_emit_common_epilogue(image, ctx);
269
Balamuruhan S3a181232020-06-24 17:00:36 +0530270 EMIT(PPC_RAW_BCTR());
Naveen N. Rao3832ba42021-10-06 01:55:21 +0530271
Naveen N. Raoce076142016-09-24 02:05:01 +0530272 /* out: */
Naveen N. Rao3832ba42021-10-06 01:55:21 +0530273 return 0;
Naveen N. Raoce076142016-09-24 02:05:01 +0530274}
275
Naveen N. Raob7540d62021-10-06 01:55:25 +0530276/*
277 * We spill into the redzone always, even if the bpf program has its own stackframe.
278 * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
279 */
280void bpf_stf_barrier(void);
281
282asm (
283" .global bpf_stf_barrier ;"
284" bpf_stf_barrier: ;"
285" std 21,-64(1) ;"
286" std 22,-56(1) ;"
287" sync ;"
288" ld 21,-64(1) ;"
289" ld 22,-56(1) ;"
290" ori 31,31,0 ;"
291" .rept 14 ;"
292" b 1f ;"
293" 1: ;"
294" .endr ;"
295" blr ;"
296);
297
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530298/* Assemble the body code between the prologue & epilogue */
Christophe Leroy4ea76e92021-03-22 16:37:49 +0000299int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
Ravi Bangoria04c04202021-10-12 18:00:50 +0530300 u32 *addrs)
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530301{
Naveen N. Raob7540d62021-10-06 01:55:25 +0530302 enum stf_barrier_type stf_barrier = stf_barrier_type_get();
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530303 const struct bpf_insn *insn = fp->insnsi;
304 int flen = fp->len;
Daniel Borkmanne2c95a62018-11-26 14:05:38 +0100305 int i, ret;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530306
307 /* Start of epilogue code - will only be valid 2nd pass onwards */
308 u32 exit_addr = addrs[flen];
309
310 for (i = 0; i < flen; i++) {
311 u32 code = insn[i].code;
312 u32 dst_reg = b2p[insn[i].dst_reg];
313 u32 src_reg = b2p[insn[i].src_reg];
314 s16 off = insn[i].off;
315 s32 imm = insn[i].imm;
Daniel Borkmanne2c95a62018-11-26 14:05:38 +0100316 bool func_addr_fixed;
317 u64 func_addr;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530318 u64 imm64;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530319 u32 true_cond;
Daniel Borkmannb9c1e602018-07-19 18:18:35 +0200320 u32 tmp_idx;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530321
322 /*
323 * addrs[] maps a BPF bytecode address into a real offset from
324 * the start of the body code.
325 */
326 addrs[i] = ctx->idx * 4;
327
328 /*
329 * As an optimization, we note down which non-volatile registers
330 * are used so that we can only save/restore those in our
331 * prologue and epilogue. We do this here regardless of whether
332 * the actual BPF instruction uses src/dst registers or not
333 * (for instance, BPF_CALL does not use them). The expectation
334 * is that those instructions will have src_reg/dst_reg set to
335 * 0. Even otherwise, we just lose some prologue/epilogue
336 * optimization but everything else should work without
337 * any issues.
338 */
Naveen N. Rao7b847f52016-09-24 02:05:00 +0530339 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
Christophe Leroyed573b52021-03-22 16:37:47 +0000340 bpf_set_seen_register(ctx, dst_reg);
Naveen N. Rao7b847f52016-09-24 02:05:00 +0530341 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
Christophe Leroyed573b52021-03-22 16:37:47 +0000342 bpf_set_seen_register(ctx, src_reg);
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530343
344 switch (code) {
345 /*
346 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
347 */
348 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
349 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
Balamuruhan S06541862020-06-24 17:00:35 +0530350 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530351 goto bpf_alu32_trunc;
352 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
353 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
Balamuruhan S3a181232020-06-24 17:00:36 +0530354 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530355 goto bpf_alu32_trunc;
356 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530357 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
Naveen N. Rao5855c4c2021-10-06 01:55:23 +0530358 if (!imm) {
359 goto bpf_alu32_trunc;
360 } else if (imm >= -32768 && imm < 32768) {
361 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
362 } else {
363 PPC_LI32(b2p[TMP_REG_1], imm);
364 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
365 }
366 goto bpf_alu32_trunc;
367 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530368 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
Naveen N. Rao5855c4c2021-10-06 01:55:23 +0530369 if (!imm) {
370 goto bpf_alu32_trunc;
371 } else if (imm > -32768 && imm <= 32768) {
372 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
373 } else {
374 PPC_LI32(b2p[TMP_REG_1], imm);
375 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530376 }
377 goto bpf_alu32_trunc;
378 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
379 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
380 if (BPF_CLASS(code) == BPF_ALU)
Balamuruhan S3a181232020-06-24 17:00:36 +0530381 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530382 else
Balamuruhan S3a181232020-06-24 17:00:36 +0530383 EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530384 goto bpf_alu32_trunc;
385 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
386 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
387 if (imm >= -32768 && imm < 32768)
Balamuruhan S3a181232020-06-24 17:00:36 +0530388 EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530389 else {
390 PPC_LI32(b2p[TMP_REG_1], imm);
391 if (BPF_CLASS(code) == BPF_ALU)
Balamuruhan S3a181232020-06-24 17:00:36 +0530392 EMIT(PPC_RAW_MULW(dst_reg, dst_reg,
393 b2p[TMP_REG_1]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530394 else
Balamuruhan S3a181232020-06-24 17:00:36 +0530395 EMIT(PPC_RAW_MULD(dst_reg, dst_reg,
396 b2p[TMP_REG_1]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530397 }
398 goto bpf_alu32_trunc;
399 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
400 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530401 if (BPF_OP(code) == BPF_MOD) {
Balamuruhan S3a181232020-06-24 17:00:36 +0530402 EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg));
403 EMIT(PPC_RAW_MULW(b2p[TMP_REG_1], src_reg,
404 b2p[TMP_REG_1]));
405 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530406 } else
Balamuruhan S3a181232020-06-24 17:00:36 +0530407 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530408 goto bpf_alu32_trunc;
409 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
410 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530411 if (BPF_OP(code) == BPF_MOD) {
Balamuruhan S3a181232020-06-24 17:00:36 +0530412 EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg));
413 EMIT(PPC_RAW_MULD(b2p[TMP_REG_1], src_reg,
414 b2p[TMP_REG_1]));
415 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530416 } else
Balamuruhan S3a181232020-06-24 17:00:36 +0530417 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530418 break;
419 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
420 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
421 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
422 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
423 if (imm == 0)
424 return -EINVAL;
Naveen N. Rao8bbc9d82021-10-06 01:55:22 +0530425 if (imm == 1) {
426 if (BPF_OP(code) == BPF_DIV) {
427 goto bpf_alu32_trunc;
428 } else {
429 EMIT(PPC_RAW_LI(dst_reg, 0));
430 break;
431 }
432 }
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530433
434 PPC_LI32(b2p[TMP_REG_1], imm);
435 switch (BPF_CLASS(code)) {
436 case BPF_ALU:
437 if (BPF_OP(code) == BPF_MOD) {
Balamuruhan S3a181232020-06-24 17:00:36 +0530438 EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_2],
439 dst_reg,
440 b2p[TMP_REG_1]));
441 EMIT(PPC_RAW_MULW(b2p[TMP_REG_1],
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530442 b2p[TMP_REG_1],
Balamuruhan S3a181232020-06-24 17:00:36 +0530443 b2p[TMP_REG_2]));
444 EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
445 b2p[TMP_REG_1]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530446 } else
Balamuruhan S3a181232020-06-24 17:00:36 +0530447 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg,
448 b2p[TMP_REG_1]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530449 break;
450 case BPF_ALU64:
451 if (BPF_OP(code) == BPF_MOD) {
Balamuruhan S3a181232020-06-24 17:00:36 +0530452 EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_2],
453 dst_reg,
454 b2p[TMP_REG_1]));
455 EMIT(PPC_RAW_MULD(b2p[TMP_REG_1],
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530456 b2p[TMP_REG_1],
Balamuruhan S3a181232020-06-24 17:00:36 +0530457 b2p[TMP_REG_2]));
458 EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
459 b2p[TMP_REG_1]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530460 } else
Balamuruhan S3a181232020-06-24 17:00:36 +0530461 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg,
462 b2p[TMP_REG_1]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530463 break;
464 }
465 goto bpf_alu32_trunc;
466 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
467 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
Balamuruhan S3a181232020-06-24 17:00:36 +0530468 EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530469 goto bpf_alu32_trunc;
470
471 /*
472 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
473 */
474 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
475 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
Balamuruhan S3a181232020-06-24 17:00:36 +0530476 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530477 goto bpf_alu32_trunc;
478 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
479 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
480 if (!IMM_H(imm))
Balamuruhan S3a181232020-06-24 17:00:36 +0530481 EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530482 else {
483 /* Sign-extended */
484 PPC_LI32(b2p[TMP_REG_1], imm);
Balamuruhan S3a181232020-06-24 17:00:36 +0530485 EMIT(PPC_RAW_AND(dst_reg, dst_reg, b2p[TMP_REG_1]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530486 }
487 goto bpf_alu32_trunc;
488 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
489 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
Balamuruhan S3a181232020-06-24 17:00:36 +0530490 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530491 goto bpf_alu32_trunc;
492 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
493 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
494 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
495 /* Sign-extended */
496 PPC_LI32(b2p[TMP_REG_1], imm);
Balamuruhan S3a181232020-06-24 17:00:36 +0530497 EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_1]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530498 } else {
499 if (IMM_L(imm))
Balamuruhan S3a181232020-06-24 17:00:36 +0530500 EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530501 if (IMM_H(imm))
Balamuruhan S3a181232020-06-24 17:00:36 +0530502 EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530503 }
504 goto bpf_alu32_trunc;
505 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
506 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
Balamuruhan S3a181232020-06-24 17:00:36 +0530507 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530508 goto bpf_alu32_trunc;
509 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
510 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
511 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
512 /* Sign-extended */
513 PPC_LI32(b2p[TMP_REG_1], imm);
Balamuruhan S3a181232020-06-24 17:00:36 +0530514 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530515 } else {
516 if (IMM_L(imm))
Balamuruhan S3a181232020-06-24 17:00:36 +0530517 EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530518 if (IMM_H(imm))
Balamuruhan S3a181232020-06-24 17:00:36 +0530519 EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530520 }
521 goto bpf_alu32_trunc;
522 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
523 /* slw clears top 32 bits */
Balamuruhan S3a181232020-06-24 17:00:36 +0530524 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
Jiong Wanga4c92772019-05-24 23:25:23 +0100525 /* skip zero extension move, but set address map. */
526 if (insn_is_zext(&insn[i + 1]))
527 addrs[++i] = ctx->idx * 4;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530528 break;
529 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
Balamuruhan S3a181232020-06-24 17:00:36 +0530530 EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530531 break;
532 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
533 /* with imm 0, we still need to clear top 32 bits */
Balamuruhan S3a181232020-06-24 17:00:36 +0530534 EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
Jiong Wanga4c92772019-05-24 23:25:23 +0100535 if (insn_is_zext(&insn[i + 1]))
536 addrs[++i] = ctx->idx * 4;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530537 break;
538 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
539 if (imm != 0)
Balamuruhan S3a181232020-06-24 17:00:36 +0530540 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530541 break;
542 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
Balamuruhan S3a181232020-06-24 17:00:36 +0530543 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
Jiong Wanga4c92772019-05-24 23:25:23 +0100544 if (insn_is_zext(&insn[i + 1]))
545 addrs[++i] = ctx->idx * 4;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530546 break;
547 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
Balamuruhan S3a181232020-06-24 17:00:36 +0530548 EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530549 break;
550 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
Balamuruhan S3a181232020-06-24 17:00:36 +0530551 EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
Jiong Wanga4c92772019-05-24 23:25:23 +0100552 if (insn_is_zext(&insn[i + 1]))
553 addrs[++i] = ctx->idx * 4;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530554 break;
555 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
556 if (imm != 0)
Balamuruhan S3a181232020-06-24 17:00:36 +0530557 EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530558 break;
Jiong Wang44cf43c2018-12-05 13:52:31 -0500559 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
Balamuruhan S3a181232020-06-24 17:00:36 +0530560 EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
Jiong Wang44cf43c2018-12-05 13:52:31 -0500561 goto bpf_alu32_trunc;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530562 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
Balamuruhan S3a181232020-06-24 17:00:36 +0530563 EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530564 break;
Jiong Wang44cf43c2018-12-05 13:52:31 -0500565 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
Balamuruhan S3a181232020-06-24 17:00:36 +0530566 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
Jiong Wang44cf43c2018-12-05 13:52:31 -0500567 goto bpf_alu32_trunc;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530568 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
569 if (imm != 0)
Balamuruhan S3a181232020-06-24 17:00:36 +0530570 EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530571 break;
572
573 /*
574 * MOV
575 */
576 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
577 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
Jiong Wanga4c92772019-05-24 23:25:23 +0100578 if (imm == 1) {
579 /* special mov32 for zext */
Balamuruhan S3a181232020-06-24 17:00:36 +0530580 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
Jiong Wanga4c92772019-05-24 23:25:23 +0100581 break;
582 }
Balamuruhan S3a181232020-06-24 17:00:36 +0530583 EMIT(PPC_RAW_MR(dst_reg, src_reg));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530584 goto bpf_alu32_trunc;
585 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
586 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
587 PPC_LI32(dst_reg, imm);
588 if (imm < 0)
589 goto bpf_alu32_trunc;
Jiong Wanga4c92772019-05-24 23:25:23 +0100590 else if (insn_is_zext(&insn[i + 1]))
591 addrs[++i] = ctx->idx * 4;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530592 break;
593
594bpf_alu32_trunc:
595 /* Truncate to 32-bits */
Jiong Wanga4c92772019-05-24 23:25:23 +0100596 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
Balamuruhan S3a181232020-06-24 17:00:36 +0530597 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530598 break;
599
600 /*
601 * BPF_FROM_BE/LE
602 */
603 case BPF_ALU | BPF_END | BPF_FROM_LE:
604 case BPF_ALU | BPF_END | BPF_FROM_BE:
605#ifdef __BIG_ENDIAN__
606 if (BPF_SRC(code) == BPF_FROM_BE)
607 goto emit_clear;
608#else /* !__BIG_ENDIAN__ */
609 if (BPF_SRC(code) == BPF_FROM_LE)
610 goto emit_clear;
611#endif
612 switch (imm) {
613 case 16:
614 /* Rotate 8 bits left & mask with 0x0000ff00 */
Balamuruhan S3a181232020-06-24 17:00:36 +0530615 EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530616 /* Rotate 8 bits right & insert LSB to reg */
Balamuruhan S3a181232020-06-24 17:00:36 +0530617 EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530618 /* Move result back to dst_reg */
Balamuruhan S3a181232020-06-24 17:00:36 +0530619 EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530620 break;
621 case 32:
622 /*
623 * Rotate word left by 8 bits:
624 * 2 bytes are already in their final position
625 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
626 */
Balamuruhan S3a181232020-06-24 17:00:36 +0530627 EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530628 /* Rotate 24 bits and insert byte 1 */
Balamuruhan S3a181232020-06-24 17:00:36 +0530629 EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530630 /* Rotate 24 bits and insert byte 3 */
Balamuruhan S3a181232020-06-24 17:00:36 +0530631 EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23));
632 EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530633 break;
634 case 64:
635 /*
636 * Way easier and faster(?) to store the value
637 * into stack and then use ldbrx
638 *
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530639 * ctx->seen will be reliable in pass2, but
640 * the instructions generated will remain the
641 * same across all passes
642 */
Naveen N. Rao86be36f2019-03-15 20:21:19 +0530643 PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
Balamuruhan S3a181232020-06-24 17:00:36 +0530644 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)));
645 EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530646 break;
647 }
648 break;
649
650emit_clear:
651 switch (imm) {
652 case 16:
653 /* zero-extend 16 bits into 64 bits */
Balamuruhan S3a181232020-06-24 17:00:36 +0530654 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
Jiong Wanga4c92772019-05-24 23:25:23 +0100655 if (insn_is_zext(&insn[i + 1]))
656 addrs[++i] = ctx->idx * 4;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530657 break;
658 case 32:
Jiong Wanga4c92772019-05-24 23:25:23 +0100659 if (!fp->aux->verifier_zext)
660 /* zero-extend 32 bits into 64 bits */
Balamuruhan S3a181232020-06-24 17:00:36 +0530661 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530662 break;
663 case 64:
664 /* nop */
665 break;
666 }
667 break;
668
669 /*
Daniel Borkmannf5e81d12021-07-13 08:18:31 +0000670 * BPF_ST NOSPEC (speculation barrier)
671 */
672 case BPF_ST | BPF_NOSPEC:
Naveen N. Raob7540d62021-10-06 01:55:25 +0530673 if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
674 !security_ftr_enabled(SEC_FTR_STF_BARRIER))
675 break;
676
677 switch (stf_barrier) {
678 case STF_BARRIER_EIEIO:
679 EMIT(PPC_RAW_EIEIO() | 0x02000000);
680 break;
681 case STF_BARRIER_SYNC_ORI:
682 EMIT(PPC_RAW_SYNC());
683 EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0));
684 EMIT(PPC_RAW_ORI(_R31, _R31, 0));
685 break;
686 case STF_BARRIER_FALLBACK:
687 EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1]));
688 PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
689 EMIT(PPC_RAW_MTCTR(12));
690 EMIT(PPC_RAW_BCTRL());
691 EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
692 break;
693 case STF_BARRIER_NONE:
694 break;
695 }
Daniel Borkmannf5e81d12021-07-13 08:18:31 +0000696 break;
697
698 /*
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530699 * BPF_ST(X)
700 */
701 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
702 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
703 if (BPF_CLASS(code) == BPF_ST) {
Balamuruhan S3a181232020-06-24 17:00:36 +0530704 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530705 src_reg = b2p[TMP_REG_1];
706 }
Balamuruhan S3a181232020-06-24 17:00:36 +0530707 EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530708 break;
709 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
710 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
711 if (BPF_CLASS(code) == BPF_ST) {
Balamuruhan S3a181232020-06-24 17:00:36 +0530712 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530713 src_reg = b2p[TMP_REG_1];
714 }
Balamuruhan S3a181232020-06-24 17:00:36 +0530715 EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530716 break;
717 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
718 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
719 if (BPF_CLASS(code) == BPF_ST) {
720 PPC_LI32(b2p[TMP_REG_1], imm);
721 src_reg = b2p[TMP_REG_1];
722 }
Balamuruhan S3a181232020-06-24 17:00:36 +0530723 EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530724 break;
725 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
726 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
727 if (BPF_CLASS(code) == BPF_ST) {
728 PPC_LI32(b2p[TMP_REG_1], imm);
729 src_reg = b2p[TMP_REG_1];
730 }
Naveen N. Rao86be36f2019-03-15 20:21:19 +0530731 PPC_BPF_STL(src_reg, dst_reg, off);
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530732 break;
733
734 /*
Brendan Jackman91c960b2021-01-14 18:17:44 +0000735 * BPF_STX ATOMIC (atomic ops)
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530736 */
Brendan Jackman91c960b2021-01-14 18:17:44 +0000737 case BPF_STX | BPF_ATOMIC | BPF_W:
Naveen N. Rao419ac822021-07-01 20:38:58 +0530738 if (imm != BPF_ADD) {
Brendan Jackman91c960b2021-01-14 18:17:44 +0000739 pr_err_ratelimited(
740 "eBPF filter atomic op code %02x (@%d) unsupported\n",
741 code, i);
742 return -ENOTSUPP;
743 }
744
745 /* *(u32 *)(dst + off) += src */
746
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530747 /* Get EA into TMP_REG_1 */
Balamuruhan S3a181232020-06-24 17:00:36 +0530748 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
Daniel Borkmannb9c1e602018-07-19 18:18:35 +0200749 tmp_idx = ctx->idx * 4;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530750 /* load value from memory into TMP_REG_2 */
Balamuruhan S06541862020-06-24 17:00:35 +0530751 EMIT(PPC_RAW_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530752 /* add value from src_reg into this */
Balamuruhan S06541862020-06-24 17:00:35 +0530753 EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530754 /* store result back */
Balamuruhan S3a181232020-06-24 17:00:36 +0530755 EMIT(PPC_RAW_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530756 /* we're done if this succeeded */
Daniel Borkmannb9c1e602018-07-19 18:18:35 +0200757 PPC_BCC_SHORT(COND_NE, tmp_idx);
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530758 break;
Brendan Jackman91c960b2021-01-14 18:17:44 +0000759 case BPF_STX | BPF_ATOMIC | BPF_DW:
Naveen N. Rao419ac822021-07-01 20:38:58 +0530760 if (imm != BPF_ADD) {
Brendan Jackman91c960b2021-01-14 18:17:44 +0000761 pr_err_ratelimited(
762 "eBPF filter atomic op code %02x (@%d) unsupported\n",
763 code, i);
764 return -ENOTSUPP;
765 }
766 /* *(u64 *)(dst + off) += src */
767
Balamuruhan S3a181232020-06-24 17:00:36 +0530768 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
Daniel Borkmannb9c1e602018-07-19 18:18:35 +0200769 tmp_idx = ctx->idx * 4;
Balamuruhan S06541862020-06-24 17:00:35 +0530770 EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
771 EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
772 EMIT(PPC_RAW_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
Daniel Borkmannb9c1e602018-07-19 18:18:35 +0200773 PPC_BCC_SHORT(COND_NE, tmp_idx);
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530774 break;
775
776 /*
777 * BPF_LDX
778 */
779 /* dst = *(u8 *)(ul) (src + off) */
780 case BPF_LDX | BPF_MEM | BPF_B:
Balamuruhan S3a181232020-06-24 17:00:36 +0530781 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
Jiong Wanga4c92772019-05-24 23:25:23 +0100782 if (insn_is_zext(&insn[i + 1]))
783 addrs[++i] = ctx->idx * 4;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530784 break;
785 /* dst = *(u16 *)(ul) (src + off) */
786 case BPF_LDX | BPF_MEM | BPF_H:
Balamuruhan S3a181232020-06-24 17:00:36 +0530787 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
Jiong Wanga4c92772019-05-24 23:25:23 +0100788 if (insn_is_zext(&insn[i + 1]))
789 addrs[++i] = ctx->idx * 4;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530790 break;
791 /* dst = *(u32 *)(ul) (src + off) */
792 case BPF_LDX | BPF_MEM | BPF_W:
Balamuruhan S06541862020-06-24 17:00:35 +0530793 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
Jiong Wanga4c92772019-05-24 23:25:23 +0100794 if (insn_is_zext(&insn[i + 1]))
795 addrs[++i] = ctx->idx * 4;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530796 break;
797 /* dst = *(u64 *)(ul) (src + off) */
798 case BPF_LDX | BPF_MEM | BPF_DW:
Naveen N. Rao86be36f2019-03-15 20:21:19 +0530799 PPC_BPF_LL(dst_reg, src_reg, off);
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530800 break;
801
802 /*
803 * Doubleword load
804 * 16 byte instruction that uses two 'struct bpf_insn'
805 */
806 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
807 imm64 = ((u64)(u32) insn[i].imm) |
808 (((u64)(u32) insn[i+1].imm) << 32);
809 /* Adjust for two bpf instructions */
810 addrs[++i] = ctx->idx * 4;
811 PPC_LI64(dst_reg, imm64);
812 break;
813
814 /*
815 * Return/Exit
816 */
817 case BPF_JMP | BPF_EXIT:
818 /*
819 * If this isn't the very last instruction, branch to
820 * the epilogue. If we _are_ the last instruction,
821 * we'll just fall through to the epilogue.
822 */
823 if (i != flen - 1)
824 PPC_JMP(exit_addr);
825 /* else fall through to the epilogue */
826 break;
827
828 /*
Sandipan Das8484ce82018-05-24 12:26:47 +0530829 * Call kernel helper or bpf function
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530830 */
831 case BPF_JMP | BPF_CALL:
832 ctx->seen |= SEEN_FUNC;
Sandipan Das8484ce82018-05-24 12:26:47 +0530833
Ravi Bangoria04c04202021-10-12 18:00:50 +0530834 ret = bpf_jit_get_func_addr(fp, &insn[i], false,
Daniel Borkmanne2c95a62018-11-26 14:05:38 +0100835 &func_addr, &func_addr_fixed);
836 if (ret < 0)
837 return ret;
838
839 if (func_addr_fixed)
840 bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
Sandipan Das8484ce82018-05-24 12:26:47 +0530841 else
Daniel Borkmanne2c95a62018-11-26 14:05:38 +0100842 bpf_jit_emit_func_call_rel(image, ctx, func_addr);
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530843 /* move return value from r3 to BPF_REG_0 */
Balamuruhan S3a181232020-06-24 17:00:36 +0530844 EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530845 break;
846
847 /*
848 * Jumps and branches
849 */
850 case BPF_JMP | BPF_JA:
851 PPC_JMP(addrs[i + 1 + off]);
852 break;
853
854 case BPF_JMP | BPF_JGT | BPF_K:
855 case BPF_JMP | BPF_JGT | BPF_X:
856 case BPF_JMP | BPF_JSGT | BPF_K:
857 case BPF_JMP | BPF_JSGT | BPF_X:
Jiong Wang5f645992019-01-26 12:26:10 -0500858 case BPF_JMP32 | BPF_JGT | BPF_K:
859 case BPF_JMP32 | BPF_JGT | BPF_X:
860 case BPF_JMP32 | BPF_JSGT | BPF_K:
861 case BPF_JMP32 | BPF_JSGT | BPF_X:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530862 true_cond = COND_GT;
863 goto cond_branch;
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200864 case BPF_JMP | BPF_JLT | BPF_K:
865 case BPF_JMP | BPF_JLT | BPF_X:
866 case BPF_JMP | BPF_JSLT | BPF_K:
867 case BPF_JMP | BPF_JSLT | BPF_X:
Jiong Wang5f645992019-01-26 12:26:10 -0500868 case BPF_JMP32 | BPF_JLT | BPF_K:
869 case BPF_JMP32 | BPF_JLT | BPF_X:
870 case BPF_JMP32 | BPF_JSLT | BPF_K:
871 case BPF_JMP32 | BPF_JSLT | BPF_X:
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200872 true_cond = COND_LT;
873 goto cond_branch;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530874 case BPF_JMP | BPF_JGE | BPF_K:
875 case BPF_JMP | BPF_JGE | BPF_X:
876 case BPF_JMP | BPF_JSGE | BPF_K:
877 case BPF_JMP | BPF_JSGE | BPF_X:
Jiong Wang5f645992019-01-26 12:26:10 -0500878 case BPF_JMP32 | BPF_JGE | BPF_K:
879 case BPF_JMP32 | BPF_JGE | BPF_X:
880 case BPF_JMP32 | BPF_JSGE | BPF_K:
881 case BPF_JMP32 | BPF_JSGE | BPF_X:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530882 true_cond = COND_GE;
883 goto cond_branch;
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200884 case BPF_JMP | BPF_JLE | BPF_K:
885 case BPF_JMP | BPF_JLE | BPF_X:
886 case BPF_JMP | BPF_JSLE | BPF_K:
887 case BPF_JMP | BPF_JSLE | BPF_X:
Jiong Wang5f645992019-01-26 12:26:10 -0500888 case BPF_JMP32 | BPF_JLE | BPF_K:
889 case BPF_JMP32 | BPF_JLE | BPF_X:
890 case BPF_JMP32 | BPF_JSLE | BPF_K:
891 case BPF_JMP32 | BPF_JSLE | BPF_X:
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200892 true_cond = COND_LE;
893 goto cond_branch;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530894 case BPF_JMP | BPF_JEQ | BPF_K:
895 case BPF_JMP | BPF_JEQ | BPF_X:
Jiong Wang5f645992019-01-26 12:26:10 -0500896 case BPF_JMP32 | BPF_JEQ | BPF_K:
897 case BPF_JMP32 | BPF_JEQ | BPF_X:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530898 true_cond = COND_EQ;
899 goto cond_branch;
900 case BPF_JMP | BPF_JNE | BPF_K:
901 case BPF_JMP | BPF_JNE | BPF_X:
Jiong Wang5f645992019-01-26 12:26:10 -0500902 case BPF_JMP32 | BPF_JNE | BPF_K:
903 case BPF_JMP32 | BPF_JNE | BPF_X:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530904 true_cond = COND_NE;
905 goto cond_branch;
906 case BPF_JMP | BPF_JSET | BPF_K:
907 case BPF_JMP | BPF_JSET | BPF_X:
Jiong Wang5f645992019-01-26 12:26:10 -0500908 case BPF_JMP32 | BPF_JSET | BPF_K:
909 case BPF_JMP32 | BPF_JSET | BPF_X:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530910 true_cond = COND_NE;
911 /* Fall through */
912
913cond_branch:
914 switch (code) {
915 case BPF_JMP | BPF_JGT | BPF_X:
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200916 case BPF_JMP | BPF_JLT | BPF_X:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530917 case BPF_JMP | BPF_JGE | BPF_X:
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200918 case BPF_JMP | BPF_JLE | BPF_X:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530919 case BPF_JMP | BPF_JEQ | BPF_X:
920 case BPF_JMP | BPF_JNE | BPF_X:
Jiong Wang5f645992019-01-26 12:26:10 -0500921 case BPF_JMP32 | BPF_JGT | BPF_X:
922 case BPF_JMP32 | BPF_JLT | BPF_X:
923 case BPF_JMP32 | BPF_JGE | BPF_X:
924 case BPF_JMP32 | BPF_JLE | BPF_X:
925 case BPF_JMP32 | BPF_JEQ | BPF_X:
926 case BPF_JMP32 | BPF_JNE | BPF_X:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530927 /* unsigned comparison */
Jiong Wang5f645992019-01-26 12:26:10 -0500928 if (BPF_CLASS(code) == BPF_JMP32)
Balamuruhan S3a181232020-06-24 17:00:36 +0530929 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
Jiong Wang5f645992019-01-26 12:26:10 -0500930 else
Balamuruhan S3a181232020-06-24 17:00:36 +0530931 EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530932 break;
933 case BPF_JMP | BPF_JSGT | BPF_X:
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200934 case BPF_JMP | BPF_JSLT | BPF_X:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530935 case BPF_JMP | BPF_JSGE | BPF_X:
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200936 case BPF_JMP | BPF_JSLE | BPF_X:
Jiong Wang5f645992019-01-26 12:26:10 -0500937 case BPF_JMP32 | BPF_JSGT | BPF_X:
938 case BPF_JMP32 | BPF_JSLT | BPF_X:
939 case BPF_JMP32 | BPF_JSGE | BPF_X:
940 case BPF_JMP32 | BPF_JSLE | BPF_X:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530941 /* signed comparison */
Jiong Wang5f645992019-01-26 12:26:10 -0500942 if (BPF_CLASS(code) == BPF_JMP32)
Balamuruhan S3a181232020-06-24 17:00:36 +0530943 EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
Jiong Wang5f645992019-01-26 12:26:10 -0500944 else
Balamuruhan S3a181232020-06-24 17:00:36 +0530945 EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530946 break;
947 case BPF_JMP | BPF_JSET | BPF_X:
Jiong Wang5f645992019-01-26 12:26:10 -0500948 case BPF_JMP32 | BPF_JSET | BPF_X:
949 if (BPF_CLASS(code) == BPF_JMP) {
Balamuruhan S3a181232020-06-24 17:00:36 +0530950 EMIT(PPC_RAW_AND_DOT(b2p[TMP_REG_1], dst_reg,
951 src_reg));
Jiong Wang5f645992019-01-26 12:26:10 -0500952 } else {
953 int tmp_reg = b2p[TMP_REG_1];
954
Balamuruhan S3a181232020-06-24 17:00:36 +0530955 EMIT(PPC_RAW_AND(tmp_reg, dst_reg, src_reg));
956 EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0,
957 31));
Jiong Wang5f645992019-01-26 12:26:10 -0500958 }
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530959 break;
960 case BPF_JMP | BPF_JNE | BPF_K:
961 case BPF_JMP | BPF_JEQ | BPF_K:
962 case BPF_JMP | BPF_JGT | BPF_K:
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200963 case BPF_JMP | BPF_JLT | BPF_K:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530964 case BPF_JMP | BPF_JGE | BPF_K:
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200965 case BPF_JMP | BPF_JLE | BPF_K:
Jiong Wang5f645992019-01-26 12:26:10 -0500966 case BPF_JMP32 | BPF_JNE | BPF_K:
967 case BPF_JMP32 | BPF_JEQ | BPF_K:
968 case BPF_JMP32 | BPF_JGT | BPF_K:
969 case BPF_JMP32 | BPF_JLT | BPF_K:
970 case BPF_JMP32 | BPF_JGE | BPF_K:
971 case BPF_JMP32 | BPF_JLE | BPF_K:
972 {
973 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
974
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530975 /*
976 * Need sign-extended load, so only positive
977 * values can be used as imm in cmpldi
978 */
Jiong Wang5f645992019-01-26 12:26:10 -0500979 if (imm >= 0 && imm < 32768) {
980 if (is_jmp32)
Balamuruhan S3a181232020-06-24 17:00:36 +0530981 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
Jiong Wang5f645992019-01-26 12:26:10 -0500982 else
Balamuruhan S3a181232020-06-24 17:00:36 +0530983 EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
Jiong Wang5f645992019-01-26 12:26:10 -0500984 } else {
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530985 /* sign-extending load */
986 PPC_LI32(b2p[TMP_REG_1], imm);
987 /* ... but unsigned comparison */
Jiong Wang5f645992019-01-26 12:26:10 -0500988 if (is_jmp32)
Balamuruhan S3a181232020-06-24 17:00:36 +0530989 EMIT(PPC_RAW_CMPLW(dst_reg,
990 b2p[TMP_REG_1]));
Jiong Wang5f645992019-01-26 12:26:10 -0500991 else
Balamuruhan S3a181232020-06-24 17:00:36 +0530992 EMIT(PPC_RAW_CMPLD(dst_reg,
993 b2p[TMP_REG_1]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530994 }
995 break;
Jiong Wang5f645992019-01-26 12:26:10 -0500996 }
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530997 case BPF_JMP | BPF_JSGT | BPF_K:
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200998 case BPF_JMP | BPF_JSLT | BPF_K:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530999 case BPF_JMP | BPF_JSGE | BPF_K:
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +02001000 case BPF_JMP | BPF_JSLE | BPF_K:
Jiong Wang5f645992019-01-26 12:26:10 -05001001 case BPF_JMP32 | BPF_JSGT | BPF_K:
1002 case BPF_JMP32 | BPF_JSLT | BPF_K:
1003 case BPF_JMP32 | BPF_JSGE | BPF_K:
1004 case BPF_JMP32 | BPF_JSLE | BPF_K:
1005 {
1006 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1007
Naveen N. Rao156d0e22016-06-22 21:55:07 +05301008 /*
1009 * signed comparison, so any 16-bit value
1010 * can be used in cmpdi
1011 */
Jiong Wang5f645992019-01-26 12:26:10 -05001012 if (imm >= -32768 && imm < 32768) {
1013 if (is_jmp32)
Balamuruhan S3a181232020-06-24 17:00:36 +05301014 EMIT(PPC_RAW_CMPWI(dst_reg, imm));
Jiong Wang5f645992019-01-26 12:26:10 -05001015 else
Balamuruhan S3a181232020-06-24 17:00:36 +05301016 EMIT(PPC_RAW_CMPDI(dst_reg, imm));
Jiong Wang5f645992019-01-26 12:26:10 -05001017 } else {
Naveen N. Rao156d0e22016-06-22 21:55:07 +05301018 PPC_LI32(b2p[TMP_REG_1], imm);
Jiong Wang5f645992019-01-26 12:26:10 -05001019 if (is_jmp32)
Balamuruhan S3a181232020-06-24 17:00:36 +05301020 EMIT(PPC_RAW_CMPW(dst_reg,
1021 b2p[TMP_REG_1]));
Jiong Wang5f645992019-01-26 12:26:10 -05001022 else
Balamuruhan S3a181232020-06-24 17:00:36 +05301023 EMIT(PPC_RAW_CMPD(dst_reg,
1024 b2p[TMP_REG_1]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +05301025 }
1026 break;
Jiong Wang5f645992019-01-26 12:26:10 -05001027 }
Naveen N. Rao156d0e22016-06-22 21:55:07 +05301028 case BPF_JMP | BPF_JSET | BPF_K:
Jiong Wang5f645992019-01-26 12:26:10 -05001029 case BPF_JMP32 | BPF_JSET | BPF_K:
Naveen N. Rao156d0e22016-06-22 21:55:07 +05301030 /* andi does not sign-extend the immediate */
1031 if (imm >= 0 && imm < 32768)
1032 /* PPC_ANDI is _only/always_ dot-form */
Balamuruhan S3a181232020-06-24 17:00:36 +05301033 EMIT(PPC_RAW_ANDI(b2p[TMP_REG_1], dst_reg, imm));
Naveen N. Rao156d0e22016-06-22 21:55:07 +05301034 else {
Jiong Wang5f645992019-01-26 12:26:10 -05001035 int tmp_reg = b2p[TMP_REG_1];
1036
1037 PPC_LI32(tmp_reg, imm);
1038 if (BPF_CLASS(code) == BPF_JMP) {
Balamuruhan S3a181232020-06-24 17:00:36 +05301039 EMIT(PPC_RAW_AND_DOT(tmp_reg, dst_reg,
1040 tmp_reg));
Jiong Wang5f645992019-01-26 12:26:10 -05001041 } else {
Balamuruhan S3a181232020-06-24 17:00:36 +05301042 EMIT(PPC_RAW_AND(tmp_reg, dst_reg,
1043 tmp_reg));
1044 EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg,
1045 0, 0, 31));
Jiong Wang5f645992019-01-26 12:26:10 -05001046 }
Naveen N. Rao156d0e22016-06-22 21:55:07 +05301047 }
1048 break;
1049 }
1050 PPC_BCC(true_cond, addrs[i + 1 + off]);
1051 break;
1052
1053 /*
Naveen N. Raoce076142016-09-24 02:05:01 +05301054 * Tail call
Naveen N. Rao156d0e22016-06-22 21:55:07 +05301055 */
Alexei Starovoitov71189fa2017-05-30 13:31:27 -07001056 case BPF_JMP | BPF_TAIL_CALL:
Naveen N. Raoce076142016-09-24 02:05:01 +05301057 ctx->seen |= SEEN_TAILCALL;
Naveen N. Rao3832ba42021-10-06 01:55:21 +05301058 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1059 if (ret < 0)
1060 return ret;
Naveen N. Raoce076142016-09-24 02:05:01 +05301061 break;
Naveen N. Rao156d0e22016-06-22 21:55:07 +05301062
1063 default:
1064 /*
1065 * The filter contains something cruel & unusual.
1066 * We don't handle it, but also there shouldn't be
1067 * anything missing from our list.
1068 */
1069 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1070 code, i);
1071 return -ENOTSUPP;
1072 }
1073 }
1074
1075 /* Set end-of-body-code address for exit. */
1076 addrs[i] = ctx->idx * 4;
1077
1078 return 0;
1079}