blob: e4582744a31d852edcd419a73939ae274bc020cb [file] [log] [blame]
Naveen N. Rao156d0e22016-06-22 21:55:07 +05301/*
2 * bpf_jit_comp64.c: eBPF JIT compiler
3 *
4 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
5 * IBM Corporation
6 *
7 * Based on the powerpc classic BPF JIT compiler by Matt Evans
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
12 * of the License.
13 */
14#include <linux/moduleloader.h>
15#include <asm/cacheflush.h>
16#include <linux/netdevice.h>
17#include <linux/filter.h>
18#include <linux/if_vlan.h>
19#include <asm/kprobes.h>
Naveen N. Raoce076142016-09-24 02:05:01 +053020#include <linux/bpf.h>
Naveen N. Rao156d0e22016-06-22 21:55:07 +053021
22#include "bpf_jit64.h"
23
Naveen N. Rao156d0e22016-06-22 21:55:07 +053024static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
25{
Naveen N. Rao6acdc9a2017-03-28 01:07:41 +053026 memset32(area, BREAKPOINT_INSTRUCTION, size/4);
Naveen N. Rao156d0e22016-06-22 21:55:07 +053027}
28
29static inline void bpf_flush_icache(void *start, void *end)
30{
31 smp_wmb();
32 flush_icache_range((unsigned long)start, (unsigned long)end);
33}
34
35static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
36{
37 return (ctx->seen & (1 << (31 - b2p[i])));
38}
39
40static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
41{
42 ctx->seen |= (1 << (31 - b2p[i]));
43}
44
45static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
46{
47 /*
48 * We only need a stack frame if:
49 * - we call other functions (kernel helpers), or
50 * - the bpf program uses its stack area
51 * The latter condition is deduced from the usage of BPF_REG_FP
52 */
53 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
54}
55
Naveen N. Rao7b847f52016-09-24 02:05:00 +053056/*
57 * When not setting up our own stackframe, the redzone usage is:
58 *
59 * [ prev sp ] <-------------
60 * [ ... ] |
61 * sp (r1) ---> [ stack pointer ] --------------
Daniel Borkmanndbf44daf2018-05-04 01:08:21 +020062 * [ nv gpr save area ] 6*8
Naveen N. Rao7b847f52016-09-24 02:05:00 +053063 * [ tail_call_cnt ] 8
64 * [ local_tmp_var ] 8
65 * [ unused red zone ] 208 bytes protected
66 */
67static int bpf_jit_stack_local(struct codegen_context *ctx)
68{
69 if (bpf_has_stack_frame(ctx))
Sandipan Dasac0761e2017-09-02 00:23:01 +053070 return STACK_FRAME_MIN_SIZE + ctx->stack_size;
Naveen N. Rao7b847f52016-09-24 02:05:00 +053071 else
72 return -(BPF_PPC_STACK_SAVE + 16);
73}
74
Naveen N. Raoce076142016-09-24 02:05:01 +053075static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
76{
77 return bpf_jit_stack_local(ctx) + 8;
78}
79
Naveen N. Rao7b847f52016-09-24 02:05:00 +053080static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
81{
82 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
Sandipan Dasac0761e2017-09-02 00:23:01 +053083 return (bpf_has_stack_frame(ctx) ?
84 (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
85 - (8 * (32 - reg));
Naveen N. Rao7b847f52016-09-24 02:05:00 +053086
87 pr_err("BPF JIT is asking about unknown registers");
88 BUG();
89}
90
Naveen N. Rao156d0e22016-06-22 21:55:07 +053091static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
92{
93 int i;
Naveen N. Rao156d0e22016-06-22 21:55:07 +053094
Naveen N. Raoce076142016-09-24 02:05:01 +053095 /*
96 * Initialize tail_call_cnt if we do tail calls.
97 * Otherwise, put in NOPs so that it can be skipped when we are
98 * invoked through a tail call.
99 */
100 if (ctx->seen & SEEN_TAILCALL) {
101 PPC_LI(b2p[TMP_REG_1], 0);
102 /* this goes in the redzone */
103 PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
104 } else {
105 PPC_NOP();
106 PPC_NOP();
107 }
108
109#define BPF_TAILCALL_PROLOGUE_SIZE 8
110
Naveen N. Rao7b847f52016-09-24 02:05:00 +0530111 if (bpf_has_stack_frame(ctx)) {
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530112 /*
113 * We need a stack frame, but we don't necessarily need to
114 * save/restore LR unless we call other functions
115 */
116 if (ctx->seen & SEEN_FUNC) {
117 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
118 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
119 }
120
Sandipan Dasac0761e2017-09-02 00:23:01 +0530121 PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530122 }
123
124 /*
125 * Back up non-volatile regs -- BPF registers 6-10
126 * If we haven't created our own stack frame, we save these
127 * in the protected zone below the previous stack frame
128 */
129 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
130 if (bpf_is_seen_register(ctx, i))
Naveen N. Rao7b847f52016-09-24 02:05:00 +0530131 PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530132
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530133 /* Setup frame pointer to point to the bpf stack area */
134 if (bpf_is_seen_register(ctx, BPF_REG_FP))
135 PPC_ADDI(b2p[BPF_REG_FP], 1,
Sandipan Dasac0761e2017-09-02 00:23:01 +0530136 STACK_FRAME_MIN_SIZE + ctx->stack_size);
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530137}
138
Naveen N. Raoce076142016-09-24 02:05:01 +0530139static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530140{
141 int i;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530142
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530143 /* Restore NVRs */
144 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
145 if (bpf_is_seen_register(ctx, i))
Naveen N. Rao7b847f52016-09-24 02:05:00 +0530146 PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530147
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530148 /* Tear down our stack frame */
Naveen N. Rao7b847f52016-09-24 02:05:00 +0530149 if (bpf_has_stack_frame(ctx)) {
Sandipan Dasac0761e2017-09-02 00:23:01 +0530150 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size);
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530151 if (ctx->seen & SEEN_FUNC) {
152 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
153 PPC_MTLR(0);
154 }
155 }
Naveen N. Raoce076142016-09-24 02:05:01 +0530156}
157
158static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
159{
160 bpf_jit_emit_common_epilogue(image, ctx);
161
162 /* Move result to r3 */
163 PPC_MR(3, b2p[BPF_REG_0]);
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530164
165 PPC_BLR();
166}
167
Naveen N. Raoce076142016-09-24 02:05:01 +0530168static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
169{
Sandipan Das4ea69b22018-05-24 12:26:46 +0530170 unsigned int i, ctx_idx = ctx->idx;
171
172 /* Load function address into r12 */
173 PPC_LI64(12, func);
174
175 /* For bpf-to-bpf function calls, the callee's address is unknown
176 * until the last extra pass. As seen above, we use PPC_LI64() to
177 * load the callee's address, but this may optimize the number of
178 * instructions required based on the nature of the address.
179 *
180 * Since we don't want the number of instructions emitted to change,
181 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
182 * we always have a five-instruction sequence, which is the maximum
183 * that PPC_LI64() can emit.
184 */
185 for (i = ctx->idx - ctx_idx; i < 5; i++)
186 PPC_NOP();
187
Naveen N. Raoce076142016-09-24 02:05:01 +0530188#ifdef PPC64_ELF_ABI_v1
Naveen N. Raoce076142016-09-24 02:05:01 +0530189 /*
190 * Load TOC from function descriptor at offset 8.
191 * We can clobber r2 since we get called through a
192 * function pointer (so caller will save/restore r2)
193 * and since we don't use a TOC ourself.
194 */
Sandipan Das4ea69b22018-05-24 12:26:46 +0530195 PPC_BPF_LL(2, 12, 8);
196 /* Load actual entry point from function descriptor */
197 PPC_BPF_LL(12, 12, 0);
Naveen N. Raoce076142016-09-24 02:05:01 +0530198#endif
Sandipan Das4ea69b22018-05-24 12:26:46 +0530199
200 PPC_MTLR(12);
Naveen N. Raoce076142016-09-24 02:05:01 +0530201 PPC_BLRL();
202}
203
204static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
205{
206 /*
207 * By now, the eBPF program has already setup parameters in r3, r4 and r5
208 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
209 * r4/BPF_REG_2 - pointer to bpf_array
210 * r5/BPF_REG_3 - index in bpf_array
211 */
212 int b2p_bpf_array = b2p[BPF_REG_2];
213 int b2p_index = b2p[BPF_REG_3];
214
215 /*
216 * if (index >= array->map.max_entries)
217 * goto out;
218 */
219 PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
Daniel Borkmannd2691762018-02-26 22:00:47 +0100220 PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
Naveen N. Raoce076142016-09-24 02:05:01 +0530221 PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
222 PPC_BCC(COND_GE, out);
223
224 /*
225 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
226 * goto out;
227 */
228 PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
229 PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
230 PPC_BCC(COND_GT, out);
231
232 /*
233 * tail_call_cnt++;
234 */
235 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1);
236 PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
237
238 /* prog = array->ptrs[index]; */
239 PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
240 PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
241 PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
242
243 /*
244 * if (prog == NULL)
245 * goto out;
246 */
247 PPC_CMPLDI(b2p[TMP_REG_1], 0);
248 PPC_BCC(COND_EQ, out);
249
250 /* goto *(prog->bpf_func + prologue_size); */
251 PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
252#ifdef PPC64_ELF_ABI_v1
253 /* skip past the function descriptor */
254 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
255 FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE);
256#else
257 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE);
258#endif
259 PPC_MTCTR(b2p[TMP_REG_1]);
260
261 /* tear down stack, restore NVRs, ... */
262 bpf_jit_emit_common_epilogue(image, ctx);
263
264 PPC_BCTR();
265 /* out: */
266}
267
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530268/* Assemble the body code between the prologue & epilogue */
269static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
270 struct codegen_context *ctx,
271 u32 *addrs)
272{
273 const struct bpf_insn *insn = fp->insnsi;
274 int flen = fp->len;
275 int i;
276
277 /* Start of epilogue code - will only be valid 2nd pass onwards */
278 u32 exit_addr = addrs[flen];
279
280 for (i = 0; i < flen; i++) {
281 u32 code = insn[i].code;
282 u32 dst_reg = b2p[insn[i].dst_reg];
283 u32 src_reg = b2p[insn[i].src_reg];
284 s16 off = insn[i].off;
285 s32 imm = insn[i].imm;
286 u64 imm64;
287 u8 *func;
288 u32 true_cond;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530289
290 /*
291 * addrs[] maps a BPF bytecode address into a real offset from
292 * the start of the body code.
293 */
294 addrs[i] = ctx->idx * 4;
295
296 /*
297 * As an optimization, we note down which non-volatile registers
298 * are used so that we can only save/restore those in our
299 * prologue and epilogue. We do this here regardless of whether
300 * the actual BPF instruction uses src/dst registers or not
301 * (for instance, BPF_CALL does not use them). The expectation
302 * is that those instructions will have src_reg/dst_reg set to
303 * 0. Even otherwise, we just lose some prologue/epilogue
304 * optimization but everything else should work without
305 * any issues.
306 */
Naveen N. Rao7b847f52016-09-24 02:05:00 +0530307 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530308 bpf_set_seen_register(ctx, insn[i].dst_reg);
Naveen N. Rao7b847f52016-09-24 02:05:00 +0530309 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530310 bpf_set_seen_register(ctx, insn[i].src_reg);
311
312 switch (code) {
313 /*
314 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
315 */
316 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
317 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
318 PPC_ADD(dst_reg, dst_reg, src_reg);
319 goto bpf_alu32_trunc;
320 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
321 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
322 PPC_SUB(dst_reg, dst_reg, src_reg);
323 goto bpf_alu32_trunc;
324 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
325 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
326 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
327 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
328 if (BPF_OP(code) == BPF_SUB)
329 imm = -imm;
330 if (imm) {
331 if (imm >= -32768 && imm < 32768)
332 PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
333 else {
334 PPC_LI32(b2p[TMP_REG_1], imm);
335 PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
336 }
337 }
338 goto bpf_alu32_trunc;
339 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
340 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
341 if (BPF_CLASS(code) == BPF_ALU)
342 PPC_MULW(dst_reg, dst_reg, src_reg);
343 else
344 PPC_MULD(dst_reg, dst_reg, src_reg);
345 goto bpf_alu32_trunc;
346 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
347 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
348 if (imm >= -32768 && imm < 32768)
349 PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
350 else {
351 PPC_LI32(b2p[TMP_REG_1], imm);
352 if (BPF_CLASS(code) == BPF_ALU)
353 PPC_MULW(dst_reg, dst_reg,
354 b2p[TMP_REG_1]);
355 else
356 PPC_MULD(dst_reg, dst_reg,
357 b2p[TMP_REG_1]);
358 }
359 goto bpf_alu32_trunc;
360 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
361 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530362 if (BPF_OP(code) == BPF_MOD) {
363 PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
364 PPC_MULW(b2p[TMP_REG_1], src_reg,
365 b2p[TMP_REG_1]);
366 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
367 } else
368 PPC_DIVWU(dst_reg, dst_reg, src_reg);
369 goto bpf_alu32_trunc;
370 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
371 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530372 if (BPF_OP(code) == BPF_MOD) {
373 PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
374 PPC_MULD(b2p[TMP_REG_1], src_reg,
375 b2p[TMP_REG_1]);
376 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
377 } else
378 PPC_DIVD(dst_reg, dst_reg, src_reg);
379 break;
380 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
381 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
382 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
383 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
384 if (imm == 0)
385 return -EINVAL;
386 else if (imm == 1)
387 goto bpf_alu32_trunc;
388
389 PPC_LI32(b2p[TMP_REG_1], imm);
390 switch (BPF_CLASS(code)) {
391 case BPF_ALU:
392 if (BPF_OP(code) == BPF_MOD) {
393 PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
394 b2p[TMP_REG_1]);
395 PPC_MULW(b2p[TMP_REG_1],
396 b2p[TMP_REG_1],
397 b2p[TMP_REG_2]);
398 PPC_SUB(dst_reg, dst_reg,
399 b2p[TMP_REG_1]);
400 } else
401 PPC_DIVWU(dst_reg, dst_reg,
402 b2p[TMP_REG_1]);
403 break;
404 case BPF_ALU64:
405 if (BPF_OP(code) == BPF_MOD) {
406 PPC_DIVD(b2p[TMP_REG_2], dst_reg,
407 b2p[TMP_REG_1]);
408 PPC_MULD(b2p[TMP_REG_1],
409 b2p[TMP_REG_1],
410 b2p[TMP_REG_2]);
411 PPC_SUB(dst_reg, dst_reg,
412 b2p[TMP_REG_1]);
413 } else
414 PPC_DIVD(dst_reg, dst_reg,
415 b2p[TMP_REG_1]);
416 break;
417 }
418 goto bpf_alu32_trunc;
419 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
420 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
421 PPC_NEG(dst_reg, dst_reg);
422 goto bpf_alu32_trunc;
423
424 /*
425 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
426 */
427 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
428 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
429 PPC_AND(dst_reg, dst_reg, src_reg);
430 goto bpf_alu32_trunc;
431 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
432 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
433 if (!IMM_H(imm))
434 PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
435 else {
436 /* Sign-extended */
437 PPC_LI32(b2p[TMP_REG_1], imm);
438 PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
439 }
440 goto bpf_alu32_trunc;
441 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
442 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
443 PPC_OR(dst_reg, dst_reg, src_reg);
444 goto bpf_alu32_trunc;
445 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
446 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
447 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
448 /* Sign-extended */
449 PPC_LI32(b2p[TMP_REG_1], imm);
450 PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
451 } else {
452 if (IMM_L(imm))
453 PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
454 if (IMM_H(imm))
455 PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
456 }
457 goto bpf_alu32_trunc;
458 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
459 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
460 PPC_XOR(dst_reg, dst_reg, src_reg);
461 goto bpf_alu32_trunc;
462 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
463 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
464 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
465 /* Sign-extended */
466 PPC_LI32(b2p[TMP_REG_1], imm);
467 PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
468 } else {
469 if (IMM_L(imm))
470 PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
471 if (IMM_H(imm))
472 PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
473 }
474 goto bpf_alu32_trunc;
475 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
476 /* slw clears top 32 bits */
477 PPC_SLW(dst_reg, dst_reg, src_reg);
478 break;
479 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
480 PPC_SLD(dst_reg, dst_reg, src_reg);
481 break;
482 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
483 /* with imm 0, we still need to clear top 32 bits */
484 PPC_SLWI(dst_reg, dst_reg, imm);
485 break;
486 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
487 if (imm != 0)
488 PPC_SLDI(dst_reg, dst_reg, imm);
489 break;
490 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
491 PPC_SRW(dst_reg, dst_reg, src_reg);
492 break;
493 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
494 PPC_SRD(dst_reg, dst_reg, src_reg);
495 break;
496 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
497 PPC_SRWI(dst_reg, dst_reg, imm);
498 break;
499 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
500 if (imm != 0)
501 PPC_SRDI(dst_reg, dst_reg, imm);
502 break;
503 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
504 PPC_SRAD(dst_reg, dst_reg, src_reg);
505 break;
506 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
507 if (imm != 0)
508 PPC_SRADI(dst_reg, dst_reg, imm);
509 break;
510
511 /*
512 * MOV
513 */
514 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
515 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
516 PPC_MR(dst_reg, src_reg);
517 goto bpf_alu32_trunc;
518 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
519 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
520 PPC_LI32(dst_reg, imm);
521 if (imm < 0)
522 goto bpf_alu32_trunc;
523 break;
524
525bpf_alu32_trunc:
526 /* Truncate to 32-bits */
527 if (BPF_CLASS(code) == BPF_ALU)
528 PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
529 break;
530
531 /*
532 * BPF_FROM_BE/LE
533 */
534 case BPF_ALU | BPF_END | BPF_FROM_LE:
535 case BPF_ALU | BPF_END | BPF_FROM_BE:
536#ifdef __BIG_ENDIAN__
537 if (BPF_SRC(code) == BPF_FROM_BE)
538 goto emit_clear;
539#else /* !__BIG_ENDIAN__ */
540 if (BPF_SRC(code) == BPF_FROM_LE)
541 goto emit_clear;
542#endif
543 switch (imm) {
544 case 16:
545 /* Rotate 8 bits left & mask with 0x0000ff00 */
546 PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
547 /* Rotate 8 bits right & insert LSB to reg */
548 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
549 /* Move result back to dst_reg */
550 PPC_MR(dst_reg, b2p[TMP_REG_1]);
551 break;
552 case 32:
553 /*
554 * Rotate word left by 8 bits:
555 * 2 bytes are already in their final position
556 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
557 */
558 PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
559 /* Rotate 24 bits and insert byte 1 */
560 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
561 /* Rotate 24 bits and insert byte 3 */
562 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
563 PPC_MR(dst_reg, b2p[TMP_REG_1]);
564 break;
565 case 64:
566 /*
567 * Way easier and faster(?) to store the value
568 * into stack and then use ldbrx
569 *
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530570 * ctx->seen will be reliable in pass2, but
571 * the instructions generated will remain the
572 * same across all passes
573 */
Naveen N. Rao7b847f52016-09-24 02:05:00 +0530574 PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
575 PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530576 PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
577 break;
578 }
579 break;
580
581emit_clear:
582 switch (imm) {
583 case 16:
584 /* zero-extend 16 bits into 64 bits */
585 PPC_RLDICL(dst_reg, dst_reg, 0, 48);
586 break;
587 case 32:
588 /* zero-extend 32 bits into 64 bits */
589 PPC_RLDICL(dst_reg, dst_reg, 0, 32);
590 break;
591 case 64:
592 /* nop */
593 break;
594 }
595 break;
596
597 /*
598 * BPF_ST(X)
599 */
600 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
601 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
602 if (BPF_CLASS(code) == BPF_ST) {
603 PPC_LI(b2p[TMP_REG_1], imm);
604 src_reg = b2p[TMP_REG_1];
605 }
606 PPC_STB(src_reg, dst_reg, off);
607 break;
608 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
609 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
610 if (BPF_CLASS(code) == BPF_ST) {
611 PPC_LI(b2p[TMP_REG_1], imm);
612 src_reg = b2p[TMP_REG_1];
613 }
614 PPC_STH(src_reg, dst_reg, off);
615 break;
616 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
617 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
618 if (BPF_CLASS(code) == BPF_ST) {
619 PPC_LI32(b2p[TMP_REG_1], imm);
620 src_reg = b2p[TMP_REG_1];
621 }
622 PPC_STW(src_reg, dst_reg, off);
623 break;
624 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
625 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
626 if (BPF_CLASS(code) == BPF_ST) {
627 PPC_LI32(b2p[TMP_REG_1], imm);
628 src_reg = b2p[TMP_REG_1];
629 }
630 PPC_STD(src_reg, dst_reg, off);
631 break;
632
633 /*
634 * BPF_STX XADD (atomic_add)
635 */
636 /* *(u32 *)(dst + off) += src */
637 case BPF_STX | BPF_XADD | BPF_W:
638 /* Get EA into TMP_REG_1 */
639 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
640 /* error if EA is not word-aligned */
641 PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
642 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
643 PPC_LI(b2p[BPF_REG_0], 0);
644 PPC_JMP(exit_addr);
645 /* load value from memory into TMP_REG_2 */
646 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
647 /* add value from src_reg into this */
648 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
649 /* store result back */
650 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
651 /* we're done if this succeeded */
652 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
653 /* otherwise, let's try once more */
654 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
655 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
656 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
657 /* exit if the store was not successful */
658 PPC_LI(b2p[BPF_REG_0], 0);
659 PPC_BCC(COND_NE, exit_addr);
660 break;
661 /* *(u64 *)(dst + off) += src */
662 case BPF_STX | BPF_XADD | BPF_DW:
663 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
664 /* error if EA is not doubleword-aligned */
665 PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
666 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
667 PPC_LI(b2p[BPF_REG_0], 0);
668 PPC_JMP(exit_addr);
669 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
670 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
671 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
672 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
673 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
674 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
675 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
676 PPC_LI(b2p[BPF_REG_0], 0);
677 PPC_BCC(COND_NE, exit_addr);
678 break;
679
680 /*
681 * BPF_LDX
682 */
683 /* dst = *(u8 *)(ul) (src + off) */
684 case BPF_LDX | BPF_MEM | BPF_B:
685 PPC_LBZ(dst_reg, src_reg, off);
686 break;
687 /* dst = *(u16 *)(ul) (src + off) */
688 case BPF_LDX | BPF_MEM | BPF_H:
689 PPC_LHZ(dst_reg, src_reg, off);
690 break;
691 /* dst = *(u32 *)(ul) (src + off) */
692 case BPF_LDX | BPF_MEM | BPF_W:
693 PPC_LWZ(dst_reg, src_reg, off);
694 break;
695 /* dst = *(u64 *)(ul) (src + off) */
696 case BPF_LDX | BPF_MEM | BPF_DW:
697 PPC_LD(dst_reg, src_reg, off);
698 break;
699
700 /*
701 * Doubleword load
702 * 16 byte instruction that uses two 'struct bpf_insn'
703 */
704 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
705 imm64 = ((u64)(u32) insn[i].imm) |
706 (((u64)(u32) insn[i+1].imm) << 32);
707 /* Adjust for two bpf instructions */
708 addrs[++i] = ctx->idx * 4;
709 PPC_LI64(dst_reg, imm64);
710 break;
711
712 /*
713 * Return/Exit
714 */
715 case BPF_JMP | BPF_EXIT:
716 /*
717 * If this isn't the very last instruction, branch to
718 * the epilogue. If we _are_ the last instruction,
719 * we'll just fall through to the epilogue.
720 */
721 if (i != flen - 1)
722 PPC_JMP(exit_addr);
723 /* else fall through to the epilogue */
724 break;
725
726 /*
727 * Call kernel helper
728 */
729 case BPF_JMP | BPF_CALL:
730 ctx->seen |= SEEN_FUNC;
731 func = (u8 *) __bpf_call_base + imm;
732
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530733 bpf_jit_emit_func_call(image, ctx, (u64)func);
734
735 /* move return value from r3 to BPF_REG_0 */
736 PPC_MR(b2p[BPF_REG_0], 3);
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530737 break;
738
739 /*
740 * Jumps and branches
741 */
742 case BPF_JMP | BPF_JA:
743 PPC_JMP(addrs[i + 1 + off]);
744 break;
745
746 case BPF_JMP | BPF_JGT | BPF_K:
747 case BPF_JMP | BPF_JGT | BPF_X:
748 case BPF_JMP | BPF_JSGT | BPF_K:
749 case BPF_JMP | BPF_JSGT | BPF_X:
750 true_cond = COND_GT;
751 goto cond_branch;
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200752 case BPF_JMP | BPF_JLT | BPF_K:
753 case BPF_JMP | BPF_JLT | BPF_X:
754 case BPF_JMP | BPF_JSLT | BPF_K:
755 case BPF_JMP | BPF_JSLT | BPF_X:
756 true_cond = COND_LT;
757 goto cond_branch;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530758 case BPF_JMP | BPF_JGE | BPF_K:
759 case BPF_JMP | BPF_JGE | BPF_X:
760 case BPF_JMP | BPF_JSGE | BPF_K:
761 case BPF_JMP | BPF_JSGE | BPF_X:
762 true_cond = COND_GE;
763 goto cond_branch;
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200764 case BPF_JMP | BPF_JLE | BPF_K:
765 case BPF_JMP | BPF_JLE | BPF_X:
766 case BPF_JMP | BPF_JSLE | BPF_K:
767 case BPF_JMP | BPF_JSLE | BPF_X:
768 true_cond = COND_LE;
769 goto cond_branch;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530770 case BPF_JMP | BPF_JEQ | BPF_K:
771 case BPF_JMP | BPF_JEQ | BPF_X:
772 true_cond = COND_EQ;
773 goto cond_branch;
774 case BPF_JMP | BPF_JNE | BPF_K:
775 case BPF_JMP | BPF_JNE | BPF_X:
776 true_cond = COND_NE;
777 goto cond_branch;
778 case BPF_JMP | BPF_JSET | BPF_K:
779 case BPF_JMP | BPF_JSET | BPF_X:
780 true_cond = COND_NE;
781 /* Fall through */
782
783cond_branch:
784 switch (code) {
785 case BPF_JMP | BPF_JGT | BPF_X:
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200786 case BPF_JMP | BPF_JLT | BPF_X:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530787 case BPF_JMP | BPF_JGE | BPF_X:
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200788 case BPF_JMP | BPF_JLE | BPF_X:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530789 case BPF_JMP | BPF_JEQ | BPF_X:
790 case BPF_JMP | BPF_JNE | BPF_X:
791 /* unsigned comparison */
792 PPC_CMPLD(dst_reg, src_reg);
793 break;
794 case BPF_JMP | BPF_JSGT | BPF_X:
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200795 case BPF_JMP | BPF_JSLT | BPF_X:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530796 case BPF_JMP | BPF_JSGE | BPF_X:
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200797 case BPF_JMP | BPF_JSLE | BPF_X:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530798 /* signed comparison */
799 PPC_CMPD(dst_reg, src_reg);
800 break;
801 case BPF_JMP | BPF_JSET | BPF_X:
802 PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
803 break;
804 case BPF_JMP | BPF_JNE | BPF_K:
805 case BPF_JMP | BPF_JEQ | BPF_K:
806 case BPF_JMP | BPF_JGT | BPF_K:
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200807 case BPF_JMP | BPF_JLT | BPF_K:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530808 case BPF_JMP | BPF_JGE | BPF_K:
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200809 case BPF_JMP | BPF_JLE | BPF_K:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530810 /*
811 * Need sign-extended load, so only positive
812 * values can be used as imm in cmpldi
813 */
814 if (imm >= 0 && imm < 32768)
815 PPC_CMPLDI(dst_reg, imm);
816 else {
817 /* sign-extending load */
818 PPC_LI32(b2p[TMP_REG_1], imm);
819 /* ... but unsigned comparison */
820 PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
821 }
822 break;
823 case BPF_JMP | BPF_JSGT | BPF_K:
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200824 case BPF_JMP | BPF_JSLT | BPF_K:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530825 case BPF_JMP | BPF_JSGE | BPF_K:
Daniel Borkmann20dbf5c2017-08-10 01:40:00 +0200826 case BPF_JMP | BPF_JSLE | BPF_K:
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530827 /*
828 * signed comparison, so any 16-bit value
829 * can be used in cmpdi
830 */
831 if (imm >= -32768 && imm < 32768)
832 PPC_CMPDI(dst_reg, imm);
833 else {
834 PPC_LI32(b2p[TMP_REG_1], imm);
835 PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
836 }
837 break;
838 case BPF_JMP | BPF_JSET | BPF_K:
839 /* andi does not sign-extend the immediate */
840 if (imm >= 0 && imm < 32768)
841 /* PPC_ANDI is _only/always_ dot-form */
842 PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
843 else {
844 PPC_LI32(b2p[TMP_REG_1], imm);
845 PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
846 b2p[TMP_REG_1]);
847 }
848 break;
849 }
850 PPC_BCC(true_cond, addrs[i + 1 + off]);
851 break;
852
853 /*
Naveen N. Raoce076142016-09-24 02:05:01 +0530854 * Tail call
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530855 */
Alexei Starovoitov71189fa2017-05-30 13:31:27 -0700856 case BPF_JMP | BPF_TAIL_CALL:
Naveen N. Raoce076142016-09-24 02:05:01 +0530857 ctx->seen |= SEEN_TAILCALL;
858 bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
859 break;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530860
861 default:
862 /*
863 * The filter contains something cruel & unusual.
864 * We don't handle it, but also there shouldn't be
865 * anything missing from our list.
866 */
867 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
868 code, i);
869 return -ENOTSUPP;
870 }
871 }
872
873 /* Set end-of-body-code address for exit. */
874 addrs[i] = ctx->idx * 4;
875
876 return 0;
877}
878
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530879struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
880{
881 u32 proglen;
882 u32 alloclen;
883 u8 *image = NULL;
884 u32 *code_base;
885 u32 *addrs;
886 struct codegen_context cgctx;
887 int pass;
888 int flen;
889 struct bpf_binary_header *bpf_hdr;
Naveen N. Raob7b70132016-09-24 02:05:02 +0530890 struct bpf_prog *org_fp = fp;
891 struct bpf_prog *tmp_fp;
892 bool bpf_blinded = false;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530893
Alexei Starovoitov60b58afc2017-12-14 17:55:14 -0800894 if (!fp->jit_requested)
Naveen N. Raob7b70132016-09-24 02:05:02 +0530895 return org_fp;
896
897 tmp_fp = bpf_jit_blind_constants(org_fp);
898 if (IS_ERR(tmp_fp))
899 return org_fp;
900
901 if (tmp_fp != org_fp) {
902 bpf_blinded = true;
903 fp = tmp_fp;
904 }
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530905
906 flen = fp->len;
907 addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
Naveen N. Raob7b70132016-09-24 02:05:02 +0530908 if (addrs == NULL) {
909 fp = org_fp;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530910 goto out;
Naveen N. Raob7b70132016-09-24 02:05:02 +0530911 }
912
913 memset(&cgctx, 0, sizeof(struct codegen_context));
914
Sandipan Dasac0761e2017-09-02 00:23:01 +0530915 /* Make sure that the stack is quadword aligned. */
916 cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
917
Naveen N. Raob7b70132016-09-24 02:05:02 +0530918 /* Scouting faux-generate pass 0 */
919 if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) {
920 /* We hit something illegal or unsupported. */
921 fp = org_fp;
922 goto out;
923 }
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530924
925 /*
926 * Pretend to build prologue, given the features we've seen. This will
927 * update ctgtx.idx as it pretends to output instructions, then we can
928 * calculate total size from idx.
929 */
930 bpf_jit_build_prologue(0, &cgctx);
931 bpf_jit_build_epilogue(0, &cgctx);
932
933 proglen = cgctx.idx * 4;
934 alloclen = proglen + FUNCTION_DESCR_SIZE;
935
936 bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
937 bpf_jit_fill_ill_insns);
Naveen N. Raob7b70132016-09-24 02:05:02 +0530938 if (!bpf_hdr) {
939 fp = org_fp;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530940 goto out;
Naveen N. Raob7b70132016-09-24 02:05:02 +0530941 }
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530942
943 code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
944
945 /* Code generation passes 1-2 */
946 for (pass = 1; pass < 3; pass++) {
947 /* Now build the prologue, body code & epilogue for real. */
948 cgctx.idx = 0;
949 bpf_jit_build_prologue(code_base, &cgctx);
950 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
951 bpf_jit_build_epilogue(code_base, &cgctx);
952
953 if (bpf_jit_enable > 1)
954 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
955 proglen - (cgctx.idx * 4), cgctx.seen);
956 }
957
958 if (bpf_jit_enable > 1)
959 /*
960 * Note that we output the base address of the code_base
961 * rather than image, since opcodes are in code_base.
962 */
963 bpf_jit_dump(flen, proglen, pass, code_base);
964
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530965#ifdef PPC64_ELF_ABI_v1
Daniel Borkmann052de332017-01-13 22:40:00 +0530966 /* Function descriptor nastiness: Address + TOC */
967 ((u64 *)image)[0] = (u64)code_base;
968 ((u64 *)image)[1] = local_paca->kernel_toc;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530969#endif
Daniel Borkmann052de332017-01-13 22:40:00 +0530970
971 fp->bpf_func = (void *)image;
972 fp->jited = 1;
Martin KaFai Lau783d28dd12017-06-05 12:15:51 -0700973 fp->jited_len = alloclen;
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530974
Naveen N. Rao10528b92017-01-13 22:40:01 +0530975 bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530976
977out:
978 kfree(addrs);
Naveen N. Raob7b70132016-09-24 02:05:02 +0530979
980 if (bpf_blinded)
981 bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
982
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530983 return fp;
984}
985
Daniel Borkmann74451e662017-02-16 22:24:50 +0100986/* Overriding bpf_jit_free() as we don't set images read-only. */
Naveen N. Rao156d0e22016-06-22 21:55:07 +0530987void bpf_jit_free(struct bpf_prog *fp)
988{
989 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
990 struct bpf_binary_header *bpf_hdr = (void *)addr;
991
992 if (fp->jited)
993 bpf_jit_binary_free(bpf_hdr);
994
995 bpf_prog_unlock_free(fp);
996}