Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 1 | /* |
| 2 | * bpf_jit_comp64.c: eBPF JIT compiler |
| 3 | * |
| 4 | * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> |
| 5 | * IBM Corporation |
| 6 | * |
| 7 | * Based on the powerpc classic BPF JIT compiler by Matt Evans |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or |
| 10 | * modify it under the terms of the GNU General Public License |
| 11 | * as published by the Free Software Foundation; version 2 |
| 12 | * of the License. |
| 13 | */ |
| 14 | #include <linux/moduleloader.h> |
| 15 | #include <asm/cacheflush.h> |
| 16 | #include <linux/netdevice.h> |
| 17 | #include <linux/filter.h> |
| 18 | #include <linux/if_vlan.h> |
| 19 | #include <asm/kprobes.h> |
Naveen N. Rao | ce07614 | 2016-09-24 02:05:01 +0530 | [diff] [blame] | 20 | #include <linux/bpf.h> |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 21 | |
| 22 | #include "bpf_jit64.h" |
| 23 | |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 24 | static void bpf_jit_fill_ill_insns(void *area, unsigned int size) |
| 25 | { |
Naveen N. Rao | 6acdc9a | 2017-03-28 01:07:41 +0530 | [diff] [blame] | 26 | memset32(area, BREAKPOINT_INSTRUCTION, size/4); |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 27 | } |
| 28 | |
| 29 | static inline void bpf_flush_icache(void *start, void *end) |
| 30 | { |
| 31 | smp_wmb(); |
| 32 | flush_icache_range((unsigned long)start, (unsigned long)end); |
| 33 | } |
| 34 | |
| 35 | static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i) |
| 36 | { |
| 37 | return (ctx->seen & (1 << (31 - b2p[i]))); |
| 38 | } |
| 39 | |
| 40 | static inline void bpf_set_seen_register(struct codegen_context *ctx, int i) |
| 41 | { |
| 42 | ctx->seen |= (1 << (31 - b2p[i])); |
| 43 | } |
| 44 | |
| 45 | static inline bool bpf_has_stack_frame(struct codegen_context *ctx) |
| 46 | { |
| 47 | /* |
| 48 | * We only need a stack frame if: |
| 49 | * - we call other functions (kernel helpers), or |
| 50 | * - the bpf program uses its stack area |
| 51 | * The latter condition is deduced from the usage of BPF_REG_FP |
| 52 | */ |
| 53 | return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP); |
| 54 | } |
| 55 | |
Naveen N. Rao | 7b847f5 | 2016-09-24 02:05:00 +0530 | [diff] [blame] | 56 | /* |
| 57 | * When not setting up our own stackframe, the redzone usage is: |
| 58 | * |
| 59 | * [ prev sp ] <------------- |
| 60 | * [ ... ] | |
| 61 | * sp (r1) ---> [ stack pointer ] -------------- |
Daniel Borkmann | dbf44daf | 2018-05-04 01:08:21 +0200 | [diff] [blame] | 62 | * [ nv gpr save area ] 6*8 |
Naveen N. Rao | 7b847f5 | 2016-09-24 02:05:00 +0530 | [diff] [blame] | 63 | * [ tail_call_cnt ] 8 |
| 64 | * [ local_tmp_var ] 8 |
| 65 | * [ unused red zone ] 208 bytes protected |
| 66 | */ |
| 67 | static int bpf_jit_stack_local(struct codegen_context *ctx) |
| 68 | { |
| 69 | if (bpf_has_stack_frame(ctx)) |
Sandipan Das | ac0761e | 2017-09-02 00:23:01 +0530 | [diff] [blame] | 70 | return STACK_FRAME_MIN_SIZE + ctx->stack_size; |
Naveen N. Rao | 7b847f5 | 2016-09-24 02:05:00 +0530 | [diff] [blame] | 71 | else |
| 72 | return -(BPF_PPC_STACK_SAVE + 16); |
| 73 | } |
| 74 | |
Naveen N. Rao | ce07614 | 2016-09-24 02:05:01 +0530 | [diff] [blame] | 75 | static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx) |
| 76 | { |
| 77 | return bpf_jit_stack_local(ctx) + 8; |
| 78 | } |
| 79 | |
Naveen N. Rao | 7b847f5 | 2016-09-24 02:05:00 +0530 | [diff] [blame] | 80 | static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) |
| 81 | { |
| 82 | if (reg >= BPF_PPC_NVR_MIN && reg < 32) |
Sandipan Das | ac0761e | 2017-09-02 00:23:01 +0530 | [diff] [blame] | 83 | return (bpf_has_stack_frame(ctx) ? |
| 84 | (BPF_PPC_STACKFRAME + ctx->stack_size) : 0) |
| 85 | - (8 * (32 - reg)); |
Naveen N. Rao | 7b847f5 | 2016-09-24 02:05:00 +0530 | [diff] [blame] | 86 | |
| 87 | pr_err("BPF JIT is asking about unknown registers"); |
| 88 | BUG(); |
| 89 | } |
| 90 | |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 91 | static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) |
| 92 | { |
| 93 | int i; |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 94 | |
Naveen N. Rao | ce07614 | 2016-09-24 02:05:01 +0530 | [diff] [blame] | 95 | /* |
| 96 | * Initialize tail_call_cnt if we do tail calls. |
| 97 | * Otherwise, put in NOPs so that it can be skipped when we are |
| 98 | * invoked through a tail call. |
| 99 | */ |
| 100 | if (ctx->seen & SEEN_TAILCALL) { |
| 101 | PPC_LI(b2p[TMP_REG_1], 0); |
| 102 | /* this goes in the redzone */ |
| 103 | PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8)); |
| 104 | } else { |
| 105 | PPC_NOP(); |
| 106 | PPC_NOP(); |
| 107 | } |
| 108 | |
| 109 | #define BPF_TAILCALL_PROLOGUE_SIZE 8 |
| 110 | |
Naveen N. Rao | 7b847f5 | 2016-09-24 02:05:00 +0530 | [diff] [blame] | 111 | if (bpf_has_stack_frame(ctx)) { |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 112 | /* |
| 113 | * We need a stack frame, but we don't necessarily need to |
| 114 | * save/restore LR unless we call other functions |
| 115 | */ |
| 116 | if (ctx->seen & SEEN_FUNC) { |
| 117 | EMIT(PPC_INST_MFLR | __PPC_RT(R0)); |
| 118 | PPC_BPF_STL(0, 1, PPC_LR_STKOFF); |
| 119 | } |
| 120 | |
Sandipan Das | ac0761e | 2017-09-02 00:23:01 +0530 | [diff] [blame] | 121 | PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size)); |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 122 | } |
| 123 | |
| 124 | /* |
| 125 | * Back up non-volatile regs -- BPF registers 6-10 |
| 126 | * If we haven't created our own stack frame, we save these |
| 127 | * in the protected zone below the previous stack frame |
| 128 | */ |
| 129 | for (i = BPF_REG_6; i <= BPF_REG_10; i++) |
| 130 | if (bpf_is_seen_register(ctx, i)) |
Naveen N. Rao | 7b847f5 | 2016-09-24 02:05:00 +0530 | [diff] [blame] | 131 | PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i])); |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 132 | |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 133 | /* Setup frame pointer to point to the bpf stack area */ |
| 134 | if (bpf_is_seen_register(ctx, BPF_REG_FP)) |
| 135 | PPC_ADDI(b2p[BPF_REG_FP], 1, |
Sandipan Das | ac0761e | 2017-09-02 00:23:01 +0530 | [diff] [blame] | 136 | STACK_FRAME_MIN_SIZE + ctx->stack_size); |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 137 | } |
| 138 | |
Naveen N. Rao | ce07614 | 2016-09-24 02:05:01 +0530 | [diff] [blame] | 139 | static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx) |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 140 | { |
| 141 | int i; |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 142 | |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 143 | /* Restore NVRs */ |
| 144 | for (i = BPF_REG_6; i <= BPF_REG_10; i++) |
| 145 | if (bpf_is_seen_register(ctx, i)) |
Naveen N. Rao | 7b847f5 | 2016-09-24 02:05:00 +0530 | [diff] [blame] | 146 | PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i])); |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 147 | |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 148 | /* Tear down our stack frame */ |
Naveen N. Rao | 7b847f5 | 2016-09-24 02:05:00 +0530 | [diff] [blame] | 149 | if (bpf_has_stack_frame(ctx)) { |
Sandipan Das | ac0761e | 2017-09-02 00:23:01 +0530 | [diff] [blame] | 150 | PPC_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size); |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 151 | if (ctx->seen & SEEN_FUNC) { |
| 152 | PPC_BPF_LL(0, 1, PPC_LR_STKOFF); |
| 153 | PPC_MTLR(0); |
| 154 | } |
| 155 | } |
Naveen N. Rao | ce07614 | 2016-09-24 02:05:01 +0530 | [diff] [blame] | 156 | } |
| 157 | |
| 158 | static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) |
| 159 | { |
| 160 | bpf_jit_emit_common_epilogue(image, ctx); |
| 161 | |
| 162 | /* Move result to r3 */ |
| 163 | PPC_MR(3, b2p[BPF_REG_0]); |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 164 | |
| 165 | PPC_BLR(); |
| 166 | } |
| 167 | |
Naveen N. Rao | ce07614 | 2016-09-24 02:05:01 +0530 | [diff] [blame] | 168 | static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func) |
| 169 | { |
Sandipan Das | 4ea69b2 | 2018-05-24 12:26:46 +0530 | [diff] [blame] | 170 | unsigned int i, ctx_idx = ctx->idx; |
| 171 | |
| 172 | /* Load function address into r12 */ |
| 173 | PPC_LI64(12, func); |
| 174 | |
| 175 | /* For bpf-to-bpf function calls, the callee's address is unknown |
| 176 | * until the last extra pass. As seen above, we use PPC_LI64() to |
| 177 | * load the callee's address, but this may optimize the number of |
| 178 | * instructions required based on the nature of the address. |
| 179 | * |
| 180 | * Since we don't want the number of instructions emitted to change, |
| 181 | * we pad the optimized PPC_LI64() call with NOPs to guarantee that |
| 182 | * we always have a five-instruction sequence, which is the maximum |
| 183 | * that PPC_LI64() can emit. |
| 184 | */ |
| 185 | for (i = ctx->idx - ctx_idx; i < 5; i++) |
| 186 | PPC_NOP(); |
| 187 | |
Naveen N. Rao | ce07614 | 2016-09-24 02:05:01 +0530 | [diff] [blame] | 188 | #ifdef PPC64_ELF_ABI_v1 |
Naveen N. Rao | ce07614 | 2016-09-24 02:05:01 +0530 | [diff] [blame] | 189 | /* |
| 190 | * Load TOC from function descriptor at offset 8. |
| 191 | * We can clobber r2 since we get called through a |
| 192 | * function pointer (so caller will save/restore r2) |
| 193 | * and since we don't use a TOC ourself. |
| 194 | */ |
Sandipan Das | 4ea69b2 | 2018-05-24 12:26:46 +0530 | [diff] [blame] | 195 | PPC_BPF_LL(2, 12, 8); |
| 196 | /* Load actual entry point from function descriptor */ |
| 197 | PPC_BPF_LL(12, 12, 0); |
Naveen N. Rao | ce07614 | 2016-09-24 02:05:01 +0530 | [diff] [blame] | 198 | #endif |
Sandipan Das | 4ea69b2 | 2018-05-24 12:26:46 +0530 | [diff] [blame] | 199 | |
| 200 | PPC_MTLR(12); |
Naveen N. Rao | ce07614 | 2016-09-24 02:05:01 +0530 | [diff] [blame] | 201 | PPC_BLRL(); |
| 202 | } |
| 203 | |
| 204 | static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) |
| 205 | { |
| 206 | /* |
| 207 | * By now, the eBPF program has already setup parameters in r3, r4 and r5 |
| 208 | * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program |
| 209 | * r4/BPF_REG_2 - pointer to bpf_array |
| 210 | * r5/BPF_REG_3 - index in bpf_array |
| 211 | */ |
| 212 | int b2p_bpf_array = b2p[BPF_REG_2]; |
| 213 | int b2p_index = b2p[BPF_REG_3]; |
| 214 | |
| 215 | /* |
| 216 | * if (index >= array->map.max_entries) |
| 217 | * goto out; |
| 218 | */ |
| 219 | PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)); |
Daniel Borkmann | d269176 | 2018-02-26 22:00:47 +0100 | [diff] [blame] | 220 | PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31); |
Naveen N. Rao | ce07614 | 2016-09-24 02:05:01 +0530 | [diff] [blame] | 221 | PPC_CMPLW(b2p_index, b2p[TMP_REG_1]); |
| 222 | PPC_BCC(COND_GE, out); |
| 223 | |
| 224 | /* |
| 225 | * if (tail_call_cnt > MAX_TAIL_CALL_CNT) |
| 226 | * goto out; |
| 227 | */ |
| 228 | PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx)); |
| 229 | PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT); |
| 230 | PPC_BCC(COND_GT, out); |
| 231 | |
| 232 | /* |
| 233 | * tail_call_cnt++; |
| 234 | */ |
| 235 | PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1); |
| 236 | PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx)); |
| 237 | |
| 238 | /* prog = array->ptrs[index]; */ |
| 239 | PPC_MULI(b2p[TMP_REG_1], b2p_index, 8); |
| 240 | PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array); |
| 241 | PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs)); |
| 242 | |
| 243 | /* |
| 244 | * if (prog == NULL) |
| 245 | * goto out; |
| 246 | */ |
| 247 | PPC_CMPLDI(b2p[TMP_REG_1], 0); |
| 248 | PPC_BCC(COND_EQ, out); |
| 249 | |
| 250 | /* goto *(prog->bpf_func + prologue_size); */ |
| 251 | PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func)); |
| 252 | #ifdef PPC64_ELF_ABI_v1 |
| 253 | /* skip past the function descriptor */ |
| 254 | PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], |
| 255 | FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE); |
| 256 | #else |
| 257 | PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE); |
| 258 | #endif |
| 259 | PPC_MTCTR(b2p[TMP_REG_1]); |
| 260 | |
| 261 | /* tear down stack, restore NVRs, ... */ |
| 262 | bpf_jit_emit_common_epilogue(image, ctx); |
| 263 | |
| 264 | PPC_BCTR(); |
| 265 | /* out: */ |
| 266 | } |
| 267 | |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 268 | /* Assemble the body code between the prologue & epilogue */ |
| 269 | static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, |
| 270 | struct codegen_context *ctx, |
Sandipan Das | 8484ce8 | 2018-05-24 12:26:47 +0530 | [diff] [blame] | 271 | u32 *addrs, bool extra_pass) |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 272 | { |
| 273 | const struct bpf_insn *insn = fp->insnsi; |
| 274 | int flen = fp->len; |
| 275 | int i; |
| 276 | |
| 277 | /* Start of epilogue code - will only be valid 2nd pass onwards */ |
| 278 | u32 exit_addr = addrs[flen]; |
| 279 | |
| 280 | for (i = 0; i < flen; i++) { |
| 281 | u32 code = insn[i].code; |
| 282 | u32 dst_reg = b2p[insn[i].dst_reg]; |
| 283 | u32 src_reg = b2p[insn[i].src_reg]; |
| 284 | s16 off = insn[i].off; |
| 285 | s32 imm = insn[i].imm; |
| 286 | u64 imm64; |
| 287 | u8 *func; |
| 288 | u32 true_cond; |
Daniel Borkmann | b9c1e60 | 2018-07-19 18:18:35 +0200 | [diff] [blame^] | 289 | u32 tmp_idx; |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 290 | |
| 291 | /* |
| 292 | * addrs[] maps a BPF bytecode address into a real offset from |
| 293 | * the start of the body code. |
| 294 | */ |
| 295 | addrs[i] = ctx->idx * 4; |
| 296 | |
| 297 | /* |
| 298 | * As an optimization, we note down which non-volatile registers |
| 299 | * are used so that we can only save/restore those in our |
| 300 | * prologue and epilogue. We do this here regardless of whether |
| 301 | * the actual BPF instruction uses src/dst registers or not |
| 302 | * (for instance, BPF_CALL does not use them). The expectation |
| 303 | * is that those instructions will have src_reg/dst_reg set to |
| 304 | * 0. Even otherwise, we just lose some prologue/epilogue |
| 305 | * optimization but everything else should work without |
| 306 | * any issues. |
| 307 | */ |
Naveen N. Rao | 7b847f5 | 2016-09-24 02:05:00 +0530 | [diff] [blame] | 308 | if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32) |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 309 | bpf_set_seen_register(ctx, insn[i].dst_reg); |
Naveen N. Rao | 7b847f5 | 2016-09-24 02:05:00 +0530 | [diff] [blame] | 310 | if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32) |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 311 | bpf_set_seen_register(ctx, insn[i].src_reg); |
| 312 | |
| 313 | switch (code) { |
| 314 | /* |
| 315 | * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG |
| 316 | */ |
| 317 | case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */ |
| 318 | case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */ |
| 319 | PPC_ADD(dst_reg, dst_reg, src_reg); |
| 320 | goto bpf_alu32_trunc; |
| 321 | case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */ |
| 322 | case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */ |
| 323 | PPC_SUB(dst_reg, dst_reg, src_reg); |
| 324 | goto bpf_alu32_trunc; |
| 325 | case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */ |
| 326 | case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */ |
| 327 | case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */ |
| 328 | case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */ |
| 329 | if (BPF_OP(code) == BPF_SUB) |
| 330 | imm = -imm; |
| 331 | if (imm) { |
| 332 | if (imm >= -32768 && imm < 32768) |
| 333 | PPC_ADDI(dst_reg, dst_reg, IMM_L(imm)); |
| 334 | else { |
| 335 | PPC_LI32(b2p[TMP_REG_1], imm); |
| 336 | PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]); |
| 337 | } |
| 338 | } |
| 339 | goto bpf_alu32_trunc; |
| 340 | case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */ |
| 341 | case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */ |
| 342 | if (BPF_CLASS(code) == BPF_ALU) |
| 343 | PPC_MULW(dst_reg, dst_reg, src_reg); |
| 344 | else |
| 345 | PPC_MULD(dst_reg, dst_reg, src_reg); |
| 346 | goto bpf_alu32_trunc; |
| 347 | case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */ |
| 348 | case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */ |
| 349 | if (imm >= -32768 && imm < 32768) |
| 350 | PPC_MULI(dst_reg, dst_reg, IMM_L(imm)); |
| 351 | else { |
| 352 | PPC_LI32(b2p[TMP_REG_1], imm); |
| 353 | if (BPF_CLASS(code) == BPF_ALU) |
| 354 | PPC_MULW(dst_reg, dst_reg, |
| 355 | b2p[TMP_REG_1]); |
| 356 | else |
| 357 | PPC_MULD(dst_reg, dst_reg, |
| 358 | b2p[TMP_REG_1]); |
| 359 | } |
| 360 | goto bpf_alu32_trunc; |
| 361 | case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */ |
| 362 | case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */ |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 363 | if (BPF_OP(code) == BPF_MOD) { |
| 364 | PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg); |
| 365 | PPC_MULW(b2p[TMP_REG_1], src_reg, |
| 366 | b2p[TMP_REG_1]); |
| 367 | PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]); |
| 368 | } else |
| 369 | PPC_DIVWU(dst_reg, dst_reg, src_reg); |
| 370 | goto bpf_alu32_trunc; |
| 371 | case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */ |
| 372 | case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */ |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 373 | if (BPF_OP(code) == BPF_MOD) { |
| 374 | PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg); |
| 375 | PPC_MULD(b2p[TMP_REG_1], src_reg, |
| 376 | b2p[TMP_REG_1]); |
| 377 | PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]); |
| 378 | } else |
| 379 | PPC_DIVD(dst_reg, dst_reg, src_reg); |
| 380 | break; |
| 381 | case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */ |
| 382 | case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */ |
| 383 | case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */ |
| 384 | case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */ |
| 385 | if (imm == 0) |
| 386 | return -EINVAL; |
| 387 | else if (imm == 1) |
| 388 | goto bpf_alu32_trunc; |
| 389 | |
| 390 | PPC_LI32(b2p[TMP_REG_1], imm); |
| 391 | switch (BPF_CLASS(code)) { |
| 392 | case BPF_ALU: |
| 393 | if (BPF_OP(code) == BPF_MOD) { |
| 394 | PPC_DIVWU(b2p[TMP_REG_2], dst_reg, |
| 395 | b2p[TMP_REG_1]); |
| 396 | PPC_MULW(b2p[TMP_REG_1], |
| 397 | b2p[TMP_REG_1], |
| 398 | b2p[TMP_REG_2]); |
| 399 | PPC_SUB(dst_reg, dst_reg, |
| 400 | b2p[TMP_REG_1]); |
| 401 | } else |
| 402 | PPC_DIVWU(dst_reg, dst_reg, |
| 403 | b2p[TMP_REG_1]); |
| 404 | break; |
| 405 | case BPF_ALU64: |
| 406 | if (BPF_OP(code) == BPF_MOD) { |
| 407 | PPC_DIVD(b2p[TMP_REG_2], dst_reg, |
| 408 | b2p[TMP_REG_1]); |
| 409 | PPC_MULD(b2p[TMP_REG_1], |
| 410 | b2p[TMP_REG_1], |
| 411 | b2p[TMP_REG_2]); |
| 412 | PPC_SUB(dst_reg, dst_reg, |
| 413 | b2p[TMP_REG_1]); |
| 414 | } else |
| 415 | PPC_DIVD(dst_reg, dst_reg, |
| 416 | b2p[TMP_REG_1]); |
| 417 | break; |
| 418 | } |
| 419 | goto bpf_alu32_trunc; |
| 420 | case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */ |
| 421 | case BPF_ALU64 | BPF_NEG: /* dst = -dst */ |
| 422 | PPC_NEG(dst_reg, dst_reg); |
| 423 | goto bpf_alu32_trunc; |
| 424 | |
| 425 | /* |
| 426 | * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH |
| 427 | */ |
| 428 | case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */ |
| 429 | case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */ |
| 430 | PPC_AND(dst_reg, dst_reg, src_reg); |
| 431 | goto bpf_alu32_trunc; |
| 432 | case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */ |
| 433 | case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */ |
| 434 | if (!IMM_H(imm)) |
| 435 | PPC_ANDI(dst_reg, dst_reg, IMM_L(imm)); |
| 436 | else { |
| 437 | /* Sign-extended */ |
| 438 | PPC_LI32(b2p[TMP_REG_1], imm); |
| 439 | PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]); |
| 440 | } |
| 441 | goto bpf_alu32_trunc; |
| 442 | case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */ |
| 443 | case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */ |
| 444 | PPC_OR(dst_reg, dst_reg, src_reg); |
| 445 | goto bpf_alu32_trunc; |
| 446 | case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */ |
| 447 | case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */ |
| 448 | if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) { |
| 449 | /* Sign-extended */ |
| 450 | PPC_LI32(b2p[TMP_REG_1], imm); |
| 451 | PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]); |
| 452 | } else { |
| 453 | if (IMM_L(imm)) |
| 454 | PPC_ORI(dst_reg, dst_reg, IMM_L(imm)); |
| 455 | if (IMM_H(imm)) |
| 456 | PPC_ORIS(dst_reg, dst_reg, IMM_H(imm)); |
| 457 | } |
| 458 | goto bpf_alu32_trunc; |
| 459 | case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */ |
| 460 | case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */ |
| 461 | PPC_XOR(dst_reg, dst_reg, src_reg); |
| 462 | goto bpf_alu32_trunc; |
| 463 | case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */ |
| 464 | case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */ |
| 465 | if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) { |
| 466 | /* Sign-extended */ |
| 467 | PPC_LI32(b2p[TMP_REG_1], imm); |
| 468 | PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]); |
| 469 | } else { |
| 470 | if (IMM_L(imm)) |
| 471 | PPC_XORI(dst_reg, dst_reg, IMM_L(imm)); |
| 472 | if (IMM_H(imm)) |
| 473 | PPC_XORIS(dst_reg, dst_reg, IMM_H(imm)); |
| 474 | } |
| 475 | goto bpf_alu32_trunc; |
| 476 | case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */ |
| 477 | /* slw clears top 32 bits */ |
| 478 | PPC_SLW(dst_reg, dst_reg, src_reg); |
| 479 | break; |
| 480 | case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */ |
| 481 | PPC_SLD(dst_reg, dst_reg, src_reg); |
| 482 | break; |
| 483 | case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */ |
| 484 | /* with imm 0, we still need to clear top 32 bits */ |
| 485 | PPC_SLWI(dst_reg, dst_reg, imm); |
| 486 | break; |
| 487 | case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */ |
| 488 | if (imm != 0) |
| 489 | PPC_SLDI(dst_reg, dst_reg, imm); |
| 490 | break; |
| 491 | case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */ |
| 492 | PPC_SRW(dst_reg, dst_reg, src_reg); |
| 493 | break; |
| 494 | case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */ |
| 495 | PPC_SRD(dst_reg, dst_reg, src_reg); |
| 496 | break; |
| 497 | case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */ |
| 498 | PPC_SRWI(dst_reg, dst_reg, imm); |
| 499 | break; |
| 500 | case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */ |
| 501 | if (imm != 0) |
| 502 | PPC_SRDI(dst_reg, dst_reg, imm); |
| 503 | break; |
| 504 | case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */ |
| 505 | PPC_SRAD(dst_reg, dst_reg, src_reg); |
| 506 | break; |
| 507 | case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */ |
| 508 | if (imm != 0) |
| 509 | PPC_SRADI(dst_reg, dst_reg, imm); |
| 510 | break; |
| 511 | |
| 512 | /* |
| 513 | * MOV |
| 514 | */ |
| 515 | case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */ |
| 516 | case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */ |
| 517 | PPC_MR(dst_reg, src_reg); |
| 518 | goto bpf_alu32_trunc; |
| 519 | case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */ |
| 520 | case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */ |
| 521 | PPC_LI32(dst_reg, imm); |
| 522 | if (imm < 0) |
| 523 | goto bpf_alu32_trunc; |
| 524 | break; |
| 525 | |
| 526 | bpf_alu32_trunc: |
| 527 | /* Truncate to 32-bits */ |
| 528 | if (BPF_CLASS(code) == BPF_ALU) |
| 529 | PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31); |
| 530 | break; |
| 531 | |
| 532 | /* |
| 533 | * BPF_FROM_BE/LE |
| 534 | */ |
| 535 | case BPF_ALU | BPF_END | BPF_FROM_LE: |
| 536 | case BPF_ALU | BPF_END | BPF_FROM_BE: |
| 537 | #ifdef __BIG_ENDIAN__ |
| 538 | if (BPF_SRC(code) == BPF_FROM_BE) |
| 539 | goto emit_clear; |
| 540 | #else /* !__BIG_ENDIAN__ */ |
| 541 | if (BPF_SRC(code) == BPF_FROM_LE) |
| 542 | goto emit_clear; |
| 543 | #endif |
| 544 | switch (imm) { |
| 545 | case 16: |
| 546 | /* Rotate 8 bits left & mask with 0x0000ff00 */ |
| 547 | PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23); |
| 548 | /* Rotate 8 bits right & insert LSB to reg */ |
| 549 | PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31); |
| 550 | /* Move result back to dst_reg */ |
| 551 | PPC_MR(dst_reg, b2p[TMP_REG_1]); |
| 552 | break; |
| 553 | case 32: |
| 554 | /* |
| 555 | * Rotate word left by 8 bits: |
| 556 | * 2 bytes are already in their final position |
| 557 | * -- byte 2 and 4 (of bytes 1, 2, 3 and 4) |
| 558 | */ |
| 559 | PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31); |
| 560 | /* Rotate 24 bits and insert byte 1 */ |
| 561 | PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7); |
| 562 | /* Rotate 24 bits and insert byte 3 */ |
| 563 | PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23); |
| 564 | PPC_MR(dst_reg, b2p[TMP_REG_1]); |
| 565 | break; |
| 566 | case 64: |
| 567 | /* |
| 568 | * Way easier and faster(?) to store the value |
| 569 | * into stack and then use ldbrx |
| 570 | * |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 571 | * ctx->seen will be reliable in pass2, but |
| 572 | * the instructions generated will remain the |
| 573 | * same across all passes |
| 574 | */ |
Naveen N. Rao | 7b847f5 | 2016-09-24 02:05:00 +0530 | [diff] [blame] | 575 | PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx)); |
| 576 | PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)); |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 577 | PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]); |
| 578 | break; |
| 579 | } |
| 580 | break; |
| 581 | |
| 582 | emit_clear: |
| 583 | switch (imm) { |
| 584 | case 16: |
| 585 | /* zero-extend 16 bits into 64 bits */ |
| 586 | PPC_RLDICL(dst_reg, dst_reg, 0, 48); |
| 587 | break; |
| 588 | case 32: |
| 589 | /* zero-extend 32 bits into 64 bits */ |
| 590 | PPC_RLDICL(dst_reg, dst_reg, 0, 32); |
| 591 | break; |
| 592 | case 64: |
| 593 | /* nop */ |
| 594 | break; |
| 595 | } |
| 596 | break; |
| 597 | |
| 598 | /* |
| 599 | * BPF_ST(X) |
| 600 | */ |
| 601 | case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */ |
| 602 | case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */ |
| 603 | if (BPF_CLASS(code) == BPF_ST) { |
| 604 | PPC_LI(b2p[TMP_REG_1], imm); |
| 605 | src_reg = b2p[TMP_REG_1]; |
| 606 | } |
| 607 | PPC_STB(src_reg, dst_reg, off); |
| 608 | break; |
| 609 | case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */ |
| 610 | case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */ |
| 611 | if (BPF_CLASS(code) == BPF_ST) { |
| 612 | PPC_LI(b2p[TMP_REG_1], imm); |
| 613 | src_reg = b2p[TMP_REG_1]; |
| 614 | } |
| 615 | PPC_STH(src_reg, dst_reg, off); |
| 616 | break; |
| 617 | case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */ |
| 618 | case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */ |
| 619 | if (BPF_CLASS(code) == BPF_ST) { |
| 620 | PPC_LI32(b2p[TMP_REG_1], imm); |
| 621 | src_reg = b2p[TMP_REG_1]; |
| 622 | } |
| 623 | PPC_STW(src_reg, dst_reg, off); |
| 624 | break; |
| 625 | case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */ |
| 626 | case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */ |
| 627 | if (BPF_CLASS(code) == BPF_ST) { |
| 628 | PPC_LI32(b2p[TMP_REG_1], imm); |
| 629 | src_reg = b2p[TMP_REG_1]; |
| 630 | } |
| 631 | PPC_STD(src_reg, dst_reg, off); |
| 632 | break; |
| 633 | |
| 634 | /* |
| 635 | * BPF_STX XADD (atomic_add) |
| 636 | */ |
| 637 | /* *(u32 *)(dst + off) += src */ |
| 638 | case BPF_STX | BPF_XADD | BPF_W: |
| 639 | /* Get EA into TMP_REG_1 */ |
| 640 | PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); |
Daniel Borkmann | b9c1e60 | 2018-07-19 18:18:35 +0200 | [diff] [blame^] | 641 | tmp_idx = ctx->idx * 4; |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 642 | /* load value from memory into TMP_REG_2 */ |
| 643 | PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); |
| 644 | /* add value from src_reg into this */ |
| 645 | PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); |
| 646 | /* store result back */ |
| 647 | PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); |
| 648 | /* we're done if this succeeded */ |
Daniel Borkmann | b9c1e60 | 2018-07-19 18:18:35 +0200 | [diff] [blame^] | 649 | PPC_BCC_SHORT(COND_NE, tmp_idx); |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 650 | break; |
| 651 | /* *(u64 *)(dst + off) += src */ |
| 652 | case BPF_STX | BPF_XADD | BPF_DW: |
| 653 | PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); |
Daniel Borkmann | b9c1e60 | 2018-07-19 18:18:35 +0200 | [diff] [blame^] | 654 | tmp_idx = ctx->idx * 4; |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 655 | PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); |
| 656 | PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); |
| 657 | PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); |
Daniel Borkmann | b9c1e60 | 2018-07-19 18:18:35 +0200 | [diff] [blame^] | 658 | PPC_BCC_SHORT(COND_NE, tmp_idx); |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 659 | break; |
| 660 | |
| 661 | /* |
| 662 | * BPF_LDX |
| 663 | */ |
| 664 | /* dst = *(u8 *)(ul) (src + off) */ |
| 665 | case BPF_LDX | BPF_MEM | BPF_B: |
| 666 | PPC_LBZ(dst_reg, src_reg, off); |
| 667 | break; |
| 668 | /* dst = *(u16 *)(ul) (src + off) */ |
| 669 | case BPF_LDX | BPF_MEM | BPF_H: |
| 670 | PPC_LHZ(dst_reg, src_reg, off); |
| 671 | break; |
| 672 | /* dst = *(u32 *)(ul) (src + off) */ |
| 673 | case BPF_LDX | BPF_MEM | BPF_W: |
| 674 | PPC_LWZ(dst_reg, src_reg, off); |
| 675 | break; |
| 676 | /* dst = *(u64 *)(ul) (src + off) */ |
| 677 | case BPF_LDX | BPF_MEM | BPF_DW: |
| 678 | PPC_LD(dst_reg, src_reg, off); |
| 679 | break; |
| 680 | |
| 681 | /* |
| 682 | * Doubleword load |
| 683 | * 16 byte instruction that uses two 'struct bpf_insn' |
| 684 | */ |
| 685 | case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */ |
| 686 | imm64 = ((u64)(u32) insn[i].imm) | |
| 687 | (((u64)(u32) insn[i+1].imm) << 32); |
| 688 | /* Adjust for two bpf instructions */ |
| 689 | addrs[++i] = ctx->idx * 4; |
| 690 | PPC_LI64(dst_reg, imm64); |
| 691 | break; |
| 692 | |
| 693 | /* |
| 694 | * Return/Exit |
| 695 | */ |
| 696 | case BPF_JMP | BPF_EXIT: |
| 697 | /* |
| 698 | * If this isn't the very last instruction, branch to |
| 699 | * the epilogue. If we _are_ the last instruction, |
| 700 | * we'll just fall through to the epilogue. |
| 701 | */ |
| 702 | if (i != flen - 1) |
| 703 | PPC_JMP(exit_addr); |
| 704 | /* else fall through to the epilogue */ |
| 705 | break; |
| 706 | |
| 707 | /* |
Sandipan Das | 8484ce8 | 2018-05-24 12:26:47 +0530 | [diff] [blame] | 708 | * Call kernel helper or bpf function |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 709 | */ |
| 710 | case BPF_JMP | BPF_CALL: |
| 711 | ctx->seen |= SEEN_FUNC; |
Sandipan Das | 8484ce8 | 2018-05-24 12:26:47 +0530 | [diff] [blame] | 712 | |
| 713 | /* bpf function call */ |
| 714 | if (insn[i].src_reg == BPF_PSEUDO_CALL) |
| 715 | if (!extra_pass) |
| 716 | func = NULL; |
| 717 | else if (fp->aux->func && off < fp->aux->func_cnt) |
| 718 | /* use the subprog id from the off |
| 719 | * field to lookup the callee address |
| 720 | */ |
| 721 | func = (u8 *) fp->aux->func[off]->bpf_func; |
| 722 | else |
| 723 | return -EINVAL; |
| 724 | /* kernel helper call */ |
| 725 | else |
| 726 | func = (u8 *) __bpf_call_base + imm; |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 727 | |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 728 | bpf_jit_emit_func_call(image, ctx, (u64)func); |
| 729 | |
| 730 | /* move return value from r3 to BPF_REG_0 */ |
| 731 | PPC_MR(b2p[BPF_REG_0], 3); |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 732 | break; |
| 733 | |
| 734 | /* |
| 735 | * Jumps and branches |
| 736 | */ |
| 737 | case BPF_JMP | BPF_JA: |
| 738 | PPC_JMP(addrs[i + 1 + off]); |
| 739 | break; |
| 740 | |
| 741 | case BPF_JMP | BPF_JGT | BPF_K: |
| 742 | case BPF_JMP | BPF_JGT | BPF_X: |
| 743 | case BPF_JMP | BPF_JSGT | BPF_K: |
| 744 | case BPF_JMP | BPF_JSGT | BPF_X: |
| 745 | true_cond = COND_GT; |
| 746 | goto cond_branch; |
Daniel Borkmann | 20dbf5c | 2017-08-10 01:40:00 +0200 | [diff] [blame] | 747 | case BPF_JMP | BPF_JLT | BPF_K: |
| 748 | case BPF_JMP | BPF_JLT | BPF_X: |
| 749 | case BPF_JMP | BPF_JSLT | BPF_K: |
| 750 | case BPF_JMP | BPF_JSLT | BPF_X: |
| 751 | true_cond = COND_LT; |
| 752 | goto cond_branch; |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 753 | case BPF_JMP | BPF_JGE | BPF_K: |
| 754 | case BPF_JMP | BPF_JGE | BPF_X: |
| 755 | case BPF_JMP | BPF_JSGE | BPF_K: |
| 756 | case BPF_JMP | BPF_JSGE | BPF_X: |
| 757 | true_cond = COND_GE; |
| 758 | goto cond_branch; |
Daniel Borkmann | 20dbf5c | 2017-08-10 01:40:00 +0200 | [diff] [blame] | 759 | case BPF_JMP | BPF_JLE | BPF_K: |
| 760 | case BPF_JMP | BPF_JLE | BPF_X: |
| 761 | case BPF_JMP | BPF_JSLE | BPF_K: |
| 762 | case BPF_JMP | BPF_JSLE | BPF_X: |
| 763 | true_cond = COND_LE; |
| 764 | goto cond_branch; |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 765 | case BPF_JMP | BPF_JEQ | BPF_K: |
| 766 | case BPF_JMP | BPF_JEQ | BPF_X: |
| 767 | true_cond = COND_EQ; |
| 768 | goto cond_branch; |
| 769 | case BPF_JMP | BPF_JNE | BPF_K: |
| 770 | case BPF_JMP | BPF_JNE | BPF_X: |
| 771 | true_cond = COND_NE; |
| 772 | goto cond_branch; |
| 773 | case BPF_JMP | BPF_JSET | BPF_K: |
| 774 | case BPF_JMP | BPF_JSET | BPF_X: |
| 775 | true_cond = COND_NE; |
| 776 | /* Fall through */ |
| 777 | |
| 778 | cond_branch: |
| 779 | switch (code) { |
| 780 | case BPF_JMP | BPF_JGT | BPF_X: |
Daniel Borkmann | 20dbf5c | 2017-08-10 01:40:00 +0200 | [diff] [blame] | 781 | case BPF_JMP | BPF_JLT | BPF_X: |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 782 | case BPF_JMP | BPF_JGE | BPF_X: |
Daniel Borkmann | 20dbf5c | 2017-08-10 01:40:00 +0200 | [diff] [blame] | 783 | case BPF_JMP | BPF_JLE | BPF_X: |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 784 | case BPF_JMP | BPF_JEQ | BPF_X: |
| 785 | case BPF_JMP | BPF_JNE | BPF_X: |
| 786 | /* unsigned comparison */ |
| 787 | PPC_CMPLD(dst_reg, src_reg); |
| 788 | break; |
| 789 | case BPF_JMP | BPF_JSGT | BPF_X: |
Daniel Borkmann | 20dbf5c | 2017-08-10 01:40:00 +0200 | [diff] [blame] | 790 | case BPF_JMP | BPF_JSLT | BPF_X: |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 791 | case BPF_JMP | BPF_JSGE | BPF_X: |
Daniel Borkmann | 20dbf5c | 2017-08-10 01:40:00 +0200 | [diff] [blame] | 792 | case BPF_JMP | BPF_JSLE | BPF_X: |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 793 | /* signed comparison */ |
| 794 | PPC_CMPD(dst_reg, src_reg); |
| 795 | break; |
| 796 | case BPF_JMP | BPF_JSET | BPF_X: |
| 797 | PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg); |
| 798 | break; |
| 799 | case BPF_JMP | BPF_JNE | BPF_K: |
| 800 | case BPF_JMP | BPF_JEQ | BPF_K: |
| 801 | case BPF_JMP | BPF_JGT | BPF_K: |
Daniel Borkmann | 20dbf5c | 2017-08-10 01:40:00 +0200 | [diff] [blame] | 802 | case BPF_JMP | BPF_JLT | BPF_K: |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 803 | case BPF_JMP | BPF_JGE | BPF_K: |
Daniel Borkmann | 20dbf5c | 2017-08-10 01:40:00 +0200 | [diff] [blame] | 804 | case BPF_JMP | BPF_JLE | BPF_K: |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 805 | /* |
| 806 | * Need sign-extended load, so only positive |
| 807 | * values can be used as imm in cmpldi |
| 808 | */ |
| 809 | if (imm >= 0 && imm < 32768) |
| 810 | PPC_CMPLDI(dst_reg, imm); |
| 811 | else { |
| 812 | /* sign-extending load */ |
| 813 | PPC_LI32(b2p[TMP_REG_1], imm); |
| 814 | /* ... but unsigned comparison */ |
| 815 | PPC_CMPLD(dst_reg, b2p[TMP_REG_1]); |
| 816 | } |
| 817 | break; |
| 818 | case BPF_JMP | BPF_JSGT | BPF_K: |
Daniel Borkmann | 20dbf5c | 2017-08-10 01:40:00 +0200 | [diff] [blame] | 819 | case BPF_JMP | BPF_JSLT | BPF_K: |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 820 | case BPF_JMP | BPF_JSGE | BPF_K: |
Daniel Borkmann | 20dbf5c | 2017-08-10 01:40:00 +0200 | [diff] [blame] | 821 | case BPF_JMP | BPF_JSLE | BPF_K: |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 822 | /* |
| 823 | * signed comparison, so any 16-bit value |
| 824 | * can be used in cmpdi |
| 825 | */ |
| 826 | if (imm >= -32768 && imm < 32768) |
| 827 | PPC_CMPDI(dst_reg, imm); |
| 828 | else { |
| 829 | PPC_LI32(b2p[TMP_REG_1], imm); |
| 830 | PPC_CMPD(dst_reg, b2p[TMP_REG_1]); |
| 831 | } |
| 832 | break; |
| 833 | case BPF_JMP | BPF_JSET | BPF_K: |
| 834 | /* andi does not sign-extend the immediate */ |
| 835 | if (imm >= 0 && imm < 32768) |
| 836 | /* PPC_ANDI is _only/always_ dot-form */ |
| 837 | PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm); |
| 838 | else { |
| 839 | PPC_LI32(b2p[TMP_REG_1], imm); |
| 840 | PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, |
| 841 | b2p[TMP_REG_1]); |
| 842 | } |
| 843 | break; |
| 844 | } |
| 845 | PPC_BCC(true_cond, addrs[i + 1 + off]); |
| 846 | break; |
| 847 | |
| 848 | /* |
Naveen N. Rao | ce07614 | 2016-09-24 02:05:01 +0530 | [diff] [blame] | 849 | * Tail call |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 850 | */ |
Alexei Starovoitov | 71189fa | 2017-05-30 13:31:27 -0700 | [diff] [blame] | 851 | case BPF_JMP | BPF_TAIL_CALL: |
Naveen N. Rao | ce07614 | 2016-09-24 02:05:01 +0530 | [diff] [blame] | 852 | ctx->seen |= SEEN_TAILCALL; |
| 853 | bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); |
| 854 | break; |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 855 | |
| 856 | default: |
| 857 | /* |
| 858 | * The filter contains something cruel & unusual. |
| 859 | * We don't handle it, but also there shouldn't be |
| 860 | * anything missing from our list. |
| 861 | */ |
| 862 | pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n", |
| 863 | code, i); |
| 864 | return -ENOTSUPP; |
| 865 | } |
| 866 | } |
| 867 | |
| 868 | /* Set end-of-body-code address for exit. */ |
| 869 | addrs[i] = ctx->idx * 4; |
| 870 | |
| 871 | return 0; |
| 872 | } |
| 873 | |
Sandipan Das | 8484ce8 | 2018-05-24 12:26:47 +0530 | [diff] [blame] | 874 | struct powerpc64_jit_data { |
| 875 | struct bpf_binary_header *header; |
| 876 | u32 *addrs; |
| 877 | u8 *image; |
| 878 | u32 proglen; |
| 879 | struct codegen_context ctx; |
| 880 | }; |
| 881 | |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 882 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) |
| 883 | { |
| 884 | u32 proglen; |
| 885 | u32 alloclen; |
| 886 | u8 *image = NULL; |
| 887 | u32 *code_base; |
| 888 | u32 *addrs; |
Sandipan Das | 8484ce8 | 2018-05-24 12:26:47 +0530 | [diff] [blame] | 889 | struct powerpc64_jit_data *jit_data; |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 890 | struct codegen_context cgctx; |
| 891 | int pass; |
| 892 | int flen; |
| 893 | struct bpf_binary_header *bpf_hdr; |
Naveen N. Rao | b7b7013 | 2016-09-24 02:05:02 +0530 | [diff] [blame] | 894 | struct bpf_prog *org_fp = fp; |
| 895 | struct bpf_prog *tmp_fp; |
| 896 | bool bpf_blinded = false; |
Sandipan Das | 8484ce8 | 2018-05-24 12:26:47 +0530 | [diff] [blame] | 897 | bool extra_pass = false; |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 898 | |
Alexei Starovoitov | 60b58afc | 2017-12-14 17:55:14 -0800 | [diff] [blame] | 899 | if (!fp->jit_requested) |
Naveen N. Rao | b7b7013 | 2016-09-24 02:05:02 +0530 | [diff] [blame] | 900 | return org_fp; |
| 901 | |
| 902 | tmp_fp = bpf_jit_blind_constants(org_fp); |
| 903 | if (IS_ERR(tmp_fp)) |
| 904 | return org_fp; |
| 905 | |
| 906 | if (tmp_fp != org_fp) { |
| 907 | bpf_blinded = true; |
| 908 | fp = tmp_fp; |
| 909 | } |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 910 | |
Sandipan Das | 8484ce8 | 2018-05-24 12:26:47 +0530 | [diff] [blame] | 911 | jit_data = fp->aux->jit_data; |
| 912 | if (!jit_data) { |
| 913 | jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); |
| 914 | if (!jit_data) { |
| 915 | fp = org_fp; |
| 916 | goto out; |
| 917 | } |
| 918 | fp->aux->jit_data = jit_data; |
| 919 | } |
| 920 | |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 921 | flen = fp->len; |
Sandipan Das | 8484ce8 | 2018-05-24 12:26:47 +0530 | [diff] [blame] | 922 | addrs = jit_data->addrs; |
| 923 | if (addrs) { |
| 924 | cgctx = jit_data->ctx; |
| 925 | image = jit_data->image; |
| 926 | bpf_hdr = jit_data->header; |
| 927 | proglen = jit_data->proglen; |
| 928 | alloclen = proglen + FUNCTION_DESCR_SIZE; |
| 929 | extra_pass = true; |
| 930 | goto skip_init_ctx; |
| 931 | } |
| 932 | |
Kees Cook | 6396bb2 | 2018-06-12 14:03:40 -0700 | [diff] [blame] | 933 | addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL); |
Naveen N. Rao | b7b7013 | 2016-09-24 02:05:02 +0530 | [diff] [blame] | 934 | if (addrs == NULL) { |
| 935 | fp = org_fp; |
Sandipan Das | 8484ce8 | 2018-05-24 12:26:47 +0530 | [diff] [blame] | 936 | goto out_addrs; |
Naveen N. Rao | b7b7013 | 2016-09-24 02:05:02 +0530 | [diff] [blame] | 937 | } |
| 938 | |
| 939 | memset(&cgctx, 0, sizeof(struct codegen_context)); |
| 940 | |
Sandipan Das | ac0761e | 2017-09-02 00:23:01 +0530 | [diff] [blame] | 941 | /* Make sure that the stack is quadword aligned. */ |
| 942 | cgctx.stack_size = round_up(fp->aux->stack_depth, 16); |
| 943 | |
Naveen N. Rao | b7b7013 | 2016-09-24 02:05:02 +0530 | [diff] [blame] | 944 | /* Scouting faux-generate pass 0 */ |
Sandipan Das | 8484ce8 | 2018-05-24 12:26:47 +0530 | [diff] [blame] | 945 | if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) { |
Naveen N. Rao | b7b7013 | 2016-09-24 02:05:02 +0530 | [diff] [blame] | 946 | /* We hit something illegal or unsupported. */ |
| 947 | fp = org_fp; |
Sandipan Das | 8484ce8 | 2018-05-24 12:26:47 +0530 | [diff] [blame] | 948 | goto out_addrs; |
Naveen N. Rao | b7b7013 | 2016-09-24 02:05:02 +0530 | [diff] [blame] | 949 | } |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 950 | |
| 951 | /* |
| 952 | * Pretend to build prologue, given the features we've seen. This will |
| 953 | * update ctgtx.idx as it pretends to output instructions, then we can |
| 954 | * calculate total size from idx. |
| 955 | */ |
| 956 | bpf_jit_build_prologue(0, &cgctx); |
| 957 | bpf_jit_build_epilogue(0, &cgctx); |
| 958 | |
| 959 | proglen = cgctx.idx * 4; |
| 960 | alloclen = proglen + FUNCTION_DESCR_SIZE; |
| 961 | |
| 962 | bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, |
| 963 | bpf_jit_fill_ill_insns); |
Naveen N. Rao | b7b7013 | 2016-09-24 02:05:02 +0530 | [diff] [blame] | 964 | if (!bpf_hdr) { |
| 965 | fp = org_fp; |
Sandipan Das | 8484ce8 | 2018-05-24 12:26:47 +0530 | [diff] [blame] | 966 | goto out_addrs; |
Naveen N. Rao | b7b7013 | 2016-09-24 02:05:02 +0530 | [diff] [blame] | 967 | } |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 968 | |
Sandipan Das | 8484ce8 | 2018-05-24 12:26:47 +0530 | [diff] [blame] | 969 | skip_init_ctx: |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 970 | code_base = (u32 *)(image + FUNCTION_DESCR_SIZE); |
| 971 | |
| 972 | /* Code generation passes 1-2 */ |
| 973 | for (pass = 1; pass < 3; pass++) { |
| 974 | /* Now build the prologue, body code & epilogue for real. */ |
| 975 | cgctx.idx = 0; |
| 976 | bpf_jit_build_prologue(code_base, &cgctx); |
Sandipan Das | 8484ce8 | 2018-05-24 12:26:47 +0530 | [diff] [blame] | 977 | bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass); |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 978 | bpf_jit_build_epilogue(code_base, &cgctx); |
| 979 | |
| 980 | if (bpf_jit_enable > 1) |
| 981 | pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass, |
| 982 | proglen - (cgctx.idx * 4), cgctx.seen); |
| 983 | } |
| 984 | |
| 985 | if (bpf_jit_enable > 1) |
| 986 | /* |
| 987 | * Note that we output the base address of the code_base |
| 988 | * rather than image, since opcodes are in code_base. |
| 989 | */ |
| 990 | bpf_jit_dump(flen, proglen, pass, code_base); |
| 991 | |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 992 | #ifdef PPC64_ELF_ABI_v1 |
Daniel Borkmann | 052de33 | 2017-01-13 22:40:00 +0530 | [diff] [blame] | 993 | /* Function descriptor nastiness: Address + TOC */ |
| 994 | ((u64 *)image)[0] = (u64)code_base; |
| 995 | ((u64 *)image)[1] = local_paca->kernel_toc; |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 996 | #endif |
Daniel Borkmann | 052de33 | 2017-01-13 22:40:00 +0530 | [diff] [blame] | 997 | |
| 998 | fp->bpf_func = (void *)image; |
| 999 | fp->jited = 1; |
Martin KaFai Lau | 783d28dd1 | 2017-06-05 12:15:51 -0700 | [diff] [blame] | 1000 | fp->jited_len = alloclen; |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 1001 | |
Naveen N. Rao | 10528b9 | 2017-01-13 22:40:01 +0530 | [diff] [blame] | 1002 | bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE)); |
Sandipan Das | 8484ce8 | 2018-05-24 12:26:47 +0530 | [diff] [blame] | 1003 | if (!fp->is_func || extra_pass) { |
| 1004 | out_addrs: |
| 1005 | kfree(addrs); |
| 1006 | kfree(jit_data); |
| 1007 | fp->aux->jit_data = NULL; |
| 1008 | } else { |
| 1009 | jit_data->addrs = addrs; |
| 1010 | jit_data->ctx = cgctx; |
| 1011 | jit_data->proglen = proglen; |
| 1012 | jit_data->image = image; |
| 1013 | jit_data->header = bpf_hdr; |
| 1014 | } |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 1015 | |
| 1016 | out: |
Naveen N. Rao | b7b7013 | 2016-09-24 02:05:02 +0530 | [diff] [blame] | 1017 | if (bpf_blinded) |
| 1018 | bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp); |
| 1019 | |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 1020 | return fp; |
| 1021 | } |
| 1022 | |
Daniel Borkmann | 74451e66 | 2017-02-16 22:24:50 +0100 | [diff] [blame] | 1023 | /* Overriding bpf_jit_free() as we don't set images read-only. */ |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 1024 | void bpf_jit_free(struct bpf_prog *fp) |
| 1025 | { |
| 1026 | unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; |
| 1027 | struct bpf_binary_header *bpf_hdr = (void *)addr; |
| 1028 | |
| 1029 | if (fp->jited) |
| 1030 | bpf_jit_binary_free(bpf_hdr); |
| 1031 | |
| 1032 | bpf_prog_unlock_free(fp); |
| 1033 | } |