blob: 56dd1f4e3e4447ba79ce39802ad076caf8855a96 [file] [log] [blame]
Christophe Leroy4ea76e92021-03-22 16:37:49 +00001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * eBPF JIT compiler
4 *
5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6 * IBM Corporation
7 *
8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
9 */
10#include <linux/moduleloader.h>
11#include <asm/cacheflush.h>
12#include <asm/asm-compat.h>
13#include <linux/netdevice.h>
14#include <linux/filter.h>
15#include <linux/if_vlan.h>
16#include <asm/kprobes.h>
17#include <linux/bpf.h>
18
19#include "bpf_jit.h"
20
21static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
22{
23 memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
24}
25
Naveen N. Raof9320c42022-01-06 17:15:07 +053026/* Fix updated addresses (for subprog calls, ldimm64, et al) during extra pass */
27static int bpf_jit_fixup_addresses(struct bpf_prog *fp, u32 *image,
28 struct codegen_context *ctx, u32 *addrs)
Christophe Leroy4ea76e92021-03-22 16:37:49 +000029{
30 const struct bpf_insn *insn = fp->insnsi;
31 bool func_addr_fixed;
32 u64 func_addr;
33 u32 tmp_idx;
Naveen N. Raof9320c42022-01-06 17:15:07 +053034 int i, j, ret;
Christophe Leroy4ea76e92021-03-22 16:37:49 +000035
36 for (i = 0; i < fp->len; i++) {
37 /*
38 * During the extra pass, only the branch target addresses for
39 * the subprog calls need to be fixed. All other instructions
40 * can left untouched.
41 *
42 * The JITed image length does not change because we already
43 * ensure that the JITed instruction sequence for these calls
44 * are of fixed length by padding them with NOPs.
45 */
46 if (insn[i].code == (BPF_JMP | BPF_CALL) &&
47 insn[i].src_reg == BPF_PSEUDO_CALL) {
48 ret = bpf_jit_get_func_addr(fp, &insn[i], true,
49 &func_addr,
50 &func_addr_fixed);
51 if (ret < 0)
52 return ret;
53
54 /*
55 * Save ctx->idx as this would currently point to the
56 * end of the JITed image and set it to the offset of
57 * the instruction sequence corresponding to the
58 * subprog call temporarily.
59 */
60 tmp_idx = ctx->idx;
61 ctx->idx = addrs[i] / 4;
62 bpf_jit_emit_func_call_rel(image, ctx, func_addr);
63
64 /*
65 * Restore ctx->idx here. This is safe as the length
66 * of the JITed sequence remains unchanged.
67 */
68 ctx->idx = tmp_idx;
Naveen N. Raof9320c42022-01-06 17:15:07 +053069 } else if (insn[i].code == (BPF_LD | BPF_IMM | BPF_DW)) {
70 tmp_idx = ctx->idx;
71 ctx->idx = addrs[i] / 4;
72#ifdef CONFIG_PPC32
73 PPC_LI32(ctx->b2p[insn[i].dst_reg] - 1, (u32)insn[i + 1].imm);
74 PPC_LI32(ctx->b2p[insn[i].dst_reg], (u32)insn[i].imm);
75 for (j = ctx->idx - addrs[i] / 4; j < 4; j++)
76 EMIT(PPC_RAW_NOP());
77#else
78 func_addr = ((u64)(u32)insn[i].imm) | (((u64)(u32)insn[i + 1].imm) << 32);
79 PPC_LI64(b2p[insn[i].dst_reg], func_addr);
80 /* overwrite rest with nops */
81 for (j = ctx->idx - addrs[i] / 4; j < 5; j++)
82 EMIT(PPC_RAW_NOP());
83#endif
84 ctx->idx = tmp_idx;
85 i++;
Christophe Leroy4ea76e92021-03-22 16:37:49 +000086 }
87 }
88
89 return 0;
90}
91
92struct powerpc64_jit_data {
93 struct bpf_binary_header *header;
94 u32 *addrs;
95 u8 *image;
96 u32 proglen;
97 struct codegen_context ctx;
98};
99
100bool bpf_jit_needs_zext(void)
101{
102 return true;
103}
104
105struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
106{
107 u32 proglen;
108 u32 alloclen;
109 u8 *image = NULL;
110 u32 *code_base;
111 u32 *addrs;
112 struct powerpc64_jit_data *jit_data;
113 struct codegen_context cgctx;
114 int pass;
115 int flen;
116 struct bpf_binary_header *bpf_hdr;
117 struct bpf_prog *org_fp = fp;
118 struct bpf_prog *tmp_fp;
119 bool bpf_blinded = false;
120 bool extra_pass = false;
Ravi Bangoria983bdc02021-10-12 18:00:53 +0530121 u32 extable_len;
122 u32 fixup_len;
Christophe Leroy4ea76e92021-03-22 16:37:49 +0000123
124 if (!fp->jit_requested)
125 return org_fp;
126
127 tmp_fp = bpf_jit_blind_constants(org_fp);
128 if (IS_ERR(tmp_fp))
129 return org_fp;
130
131 if (tmp_fp != org_fp) {
132 bpf_blinded = true;
133 fp = tmp_fp;
134 }
135
136 jit_data = fp->aux->jit_data;
137 if (!jit_data) {
138 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
139 if (!jit_data) {
140 fp = org_fp;
141 goto out;
142 }
143 fp->aux->jit_data = jit_data;
144 }
145
146 flen = fp->len;
147 addrs = jit_data->addrs;
148 if (addrs) {
149 cgctx = jit_data->ctx;
150 image = jit_data->image;
151 bpf_hdr = jit_data->header;
152 proglen = jit_data->proglen;
Christophe Leroy4ea76e92021-03-22 16:37:49 +0000153 extra_pass = true;
154 goto skip_init_ctx;
155 }
156
157 addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
158 if (addrs == NULL) {
159 fp = org_fp;
160 goto out_addrs;
161 }
162
163 memset(&cgctx, 0, sizeof(struct codegen_context));
Christophe Leroy40272032021-03-22 16:37:53 +0000164 memcpy(cgctx.b2p, b2p, sizeof(cgctx.b2p));
Christophe Leroy4ea76e92021-03-22 16:37:49 +0000165
166 /* Make sure that the stack is quadword aligned. */
167 cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
168
169 /* Scouting faux-generate pass 0 */
Ravi Bangoria983bdc02021-10-12 18:00:53 +0530170 if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0)) {
Christophe Leroy4ea76e92021-03-22 16:37:49 +0000171 /* We hit something illegal or unsupported. */
172 fp = org_fp;
173 goto out_addrs;
174 }
175
176 /*
177 * If we have seen a tail call, we need a second pass.
178 * This is because bpf_jit_emit_common_epilogue() is called
179 * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
180 */
181 if (cgctx.seen & SEEN_TAILCALL) {
182 cgctx.idx = 0;
Ravi Bangoria983bdc02021-10-12 18:00:53 +0530183 if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0)) {
Christophe Leroy4ea76e92021-03-22 16:37:49 +0000184 fp = org_fp;
185 goto out_addrs;
186 }
187 }
188
Christophe Leroy40272032021-03-22 16:37:53 +0000189 bpf_jit_realloc_regs(&cgctx);
Christophe Leroy4ea76e92021-03-22 16:37:49 +0000190 /*
191 * Pretend to build prologue, given the features we've seen. This will
192 * update ctgtx.idx as it pretends to output instructions, then we can
193 * calculate total size from idx.
194 */
195 bpf_jit_build_prologue(0, &cgctx);
196 bpf_jit_build_epilogue(0, &cgctx);
197
Ravi Bangoria983bdc02021-10-12 18:00:53 +0530198 fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4;
199 extable_len = fp->aux->num_exentries * sizeof(struct exception_table_entry);
200
Christophe Leroy4ea76e92021-03-22 16:37:49 +0000201 proglen = cgctx.idx * 4;
Ravi Bangoria983bdc02021-10-12 18:00:53 +0530202 alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len;
Christophe Leroy4ea76e92021-03-22 16:37:49 +0000203
204 bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, bpf_jit_fill_ill_insns);
205 if (!bpf_hdr) {
206 fp = org_fp;
207 goto out_addrs;
208 }
209
Ravi Bangoria983bdc02021-10-12 18:00:53 +0530210 if (extable_len)
211 fp->aux->extable = (void *)image + FUNCTION_DESCR_SIZE + proglen + fixup_len;
212
Christophe Leroy4ea76e92021-03-22 16:37:49 +0000213skip_init_ctx:
214 code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
215
216 if (extra_pass) {
217 /*
218 * Do not touch the prologue and epilogue as they will remain
219 * unchanged. Only fix the branch target address for subprog
Naveen N. Raof9320c42022-01-06 17:15:07 +0530220 * calls in the body, and ldimm64 instructions.
Christophe Leroy4ea76e92021-03-22 16:37:49 +0000221 *
222 * This does not change the offsets and lengths of the subprog
223 * call instruction sequences and hence, the size of the JITed
224 * image as well.
225 */
Naveen N. Raof9320c42022-01-06 17:15:07 +0530226 bpf_jit_fixup_addresses(fp, code_base, &cgctx, addrs);
Christophe Leroy4ea76e92021-03-22 16:37:49 +0000227
228 /* There is no need to perform the usual passes. */
229 goto skip_codegen_passes;
230 }
231
232 /* Code generation passes 1-2 */
233 for (pass = 1; pass < 3; pass++) {
234 /* Now build the prologue, body code & epilogue for real. */
235 cgctx.idx = 0;
236 bpf_jit_build_prologue(code_base, &cgctx);
Ravi Bangoria983bdc02021-10-12 18:00:53 +0530237 if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, pass)) {
Naveen N. Rao3832ba42021-10-06 01:55:21 +0530238 bpf_jit_binary_free(bpf_hdr);
239 fp = org_fp;
240 goto out_addrs;
241 }
Christophe Leroy4ea76e92021-03-22 16:37:49 +0000242 bpf_jit_build_epilogue(code_base, &cgctx);
243
244 if (bpf_jit_enable > 1)
245 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
246 proglen - (cgctx.idx * 4), cgctx.seen);
247 }
248
249skip_codegen_passes:
250 if (bpf_jit_enable > 1)
251 /*
252 * Note that we output the base address of the code_base
253 * rather than image, since opcodes are in code_base.
254 */
255 bpf_jit_dump(flen, proglen, pass, code_base);
256
257#ifdef PPC64_ELF_ABI_v1
258 /* Function descriptor nastiness: Address + TOC */
259 ((u64 *)image)[0] = (u64)code_base;
260 ((u64 *)image)[1] = local_paca->kernel_toc;
261#endif
262
263 fp->bpf_func = (void *)image;
264 fp->jited = 1;
Ravi Bangoria983bdc02021-10-12 18:00:53 +0530265 fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
Christophe Leroy4ea76e92021-03-22 16:37:49 +0000266
267 bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
268 if (!fp->is_func || extra_pass) {
Hari Bathini44a82142021-10-25 11:26:49 +0530269 bpf_jit_binary_lock_ro(bpf_hdr);
Christophe Leroy4ea76e92021-03-22 16:37:49 +0000270 bpf_prog_fill_jited_linfo(fp, addrs);
271out_addrs:
272 kfree(addrs);
273 kfree(jit_data);
274 fp->aux->jit_data = NULL;
275 } else {
276 jit_data->addrs = addrs;
277 jit_data->ctx = cgctx;
278 jit_data->proglen = proglen;
279 jit_data->image = image;
280 jit_data->header = bpf_hdr;
281 }
282
283out:
284 if (bpf_blinded)
285 bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
286
287 return fp;
288}
Ravi Bangoria983bdc02021-10-12 18:00:53 +0530289
290/*
291 * The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling
292 * this function, as this only applies to BPF_PROBE_MEM, for now.
293 */
294int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
295 int insn_idx, int jmp_off, int dst_reg)
296{
297 off_t offset;
298 unsigned long pc;
299 struct exception_table_entry *ex;
300 u32 *fixup;
301
302 /* Populate extable entries only in the last pass */
303 if (pass != 2)
304 return 0;
305
306 if (!fp->aux->extable ||
307 WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries))
308 return -EINVAL;
309
310 pc = (unsigned long)&image[insn_idx];
311
312 fixup = (void *)fp->aux->extable -
313 (fp->aux->num_exentries * BPF_FIXUP_LEN * 4) +
314 (ctx->exentry_idx * BPF_FIXUP_LEN * 4);
315
316 fixup[0] = PPC_RAW_LI(dst_reg, 0);
Hari Bathini23b51912021-10-12 18:00:55 +0530317 if (IS_ENABLED(CONFIG_PPC32))
318 fixup[1] = PPC_RAW_LI(dst_reg - 1, 0); /* clear higher 32-bit register too */
Ravi Bangoria983bdc02021-10-12 18:00:53 +0530319
320 fixup[BPF_FIXUP_LEN - 1] =
321 PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]);
322
323 ex = &fp->aux->extable[ctx->exentry_idx];
324
325 offset = pc - (long)&ex->insn;
326 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
327 return -ERANGE;
328 ex->insn = offset;
329
330 offset = (long)fixup - (long)&ex->fixup;
331 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
332 return -ERANGE;
333 ex->fixup = offset;
334
335 ctx->exentry_idx++;
336 return 0;
337}