Christophe Leroy | 4ea76e9 | 2021-03-22 16:37:49 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * eBPF JIT compiler |
| 4 | * |
| 5 | * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> |
| 6 | * IBM Corporation |
| 7 | * |
| 8 | * Based on the powerpc classic BPF JIT compiler by Matt Evans |
| 9 | */ |
| 10 | #include <linux/moduleloader.h> |
| 11 | #include <asm/cacheflush.h> |
| 12 | #include <asm/asm-compat.h> |
| 13 | #include <linux/netdevice.h> |
| 14 | #include <linux/filter.h> |
| 15 | #include <linux/if_vlan.h> |
| 16 | #include <asm/kprobes.h> |
| 17 | #include <linux/bpf.h> |
| 18 | |
| 19 | #include "bpf_jit.h" |
| 20 | |
| 21 | static void bpf_jit_fill_ill_insns(void *area, unsigned int size) |
| 22 | { |
| 23 | memset32(area, BREAKPOINT_INSTRUCTION, size / 4); |
| 24 | } |
| 25 | |
| 26 | /* Fix the branch target addresses for subprog calls */ |
| 27 | static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image, |
| 28 | struct codegen_context *ctx, u32 *addrs) |
| 29 | { |
| 30 | const struct bpf_insn *insn = fp->insnsi; |
| 31 | bool func_addr_fixed; |
| 32 | u64 func_addr; |
| 33 | u32 tmp_idx; |
| 34 | int i, ret; |
| 35 | |
| 36 | for (i = 0; i < fp->len; i++) { |
| 37 | /* |
| 38 | * During the extra pass, only the branch target addresses for |
| 39 | * the subprog calls need to be fixed. All other instructions |
| 40 | * can left untouched. |
| 41 | * |
| 42 | * The JITed image length does not change because we already |
| 43 | * ensure that the JITed instruction sequence for these calls |
| 44 | * are of fixed length by padding them with NOPs. |
| 45 | */ |
| 46 | if (insn[i].code == (BPF_JMP | BPF_CALL) && |
| 47 | insn[i].src_reg == BPF_PSEUDO_CALL) { |
| 48 | ret = bpf_jit_get_func_addr(fp, &insn[i], true, |
| 49 | &func_addr, |
| 50 | &func_addr_fixed); |
| 51 | if (ret < 0) |
| 52 | return ret; |
| 53 | |
| 54 | /* |
| 55 | * Save ctx->idx as this would currently point to the |
| 56 | * end of the JITed image and set it to the offset of |
| 57 | * the instruction sequence corresponding to the |
| 58 | * subprog call temporarily. |
| 59 | */ |
| 60 | tmp_idx = ctx->idx; |
| 61 | ctx->idx = addrs[i] / 4; |
| 62 | bpf_jit_emit_func_call_rel(image, ctx, func_addr); |
| 63 | |
| 64 | /* |
| 65 | * Restore ctx->idx here. This is safe as the length |
| 66 | * of the JITed sequence remains unchanged. |
| 67 | */ |
| 68 | ctx->idx = tmp_idx; |
| 69 | } |
| 70 | } |
| 71 | |
| 72 | return 0; |
| 73 | } |
| 74 | |
| 75 | struct powerpc64_jit_data { |
| 76 | struct bpf_binary_header *header; |
| 77 | u32 *addrs; |
| 78 | u8 *image; |
| 79 | u32 proglen; |
| 80 | struct codegen_context ctx; |
| 81 | }; |
| 82 | |
| 83 | bool bpf_jit_needs_zext(void) |
| 84 | { |
| 85 | return true; |
| 86 | } |
| 87 | |
| 88 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) |
| 89 | { |
| 90 | u32 proglen; |
| 91 | u32 alloclen; |
| 92 | u8 *image = NULL; |
| 93 | u32 *code_base; |
| 94 | u32 *addrs; |
| 95 | struct powerpc64_jit_data *jit_data; |
| 96 | struct codegen_context cgctx; |
| 97 | int pass; |
| 98 | int flen; |
| 99 | struct bpf_binary_header *bpf_hdr; |
| 100 | struct bpf_prog *org_fp = fp; |
| 101 | struct bpf_prog *tmp_fp; |
| 102 | bool bpf_blinded = false; |
| 103 | bool extra_pass = false; |
| 104 | |
| 105 | if (!fp->jit_requested) |
| 106 | return org_fp; |
| 107 | |
| 108 | tmp_fp = bpf_jit_blind_constants(org_fp); |
| 109 | if (IS_ERR(tmp_fp)) |
| 110 | return org_fp; |
| 111 | |
| 112 | if (tmp_fp != org_fp) { |
| 113 | bpf_blinded = true; |
| 114 | fp = tmp_fp; |
| 115 | } |
| 116 | |
| 117 | jit_data = fp->aux->jit_data; |
| 118 | if (!jit_data) { |
| 119 | jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); |
| 120 | if (!jit_data) { |
| 121 | fp = org_fp; |
| 122 | goto out; |
| 123 | } |
| 124 | fp->aux->jit_data = jit_data; |
| 125 | } |
| 126 | |
| 127 | flen = fp->len; |
| 128 | addrs = jit_data->addrs; |
| 129 | if (addrs) { |
| 130 | cgctx = jit_data->ctx; |
| 131 | image = jit_data->image; |
| 132 | bpf_hdr = jit_data->header; |
| 133 | proglen = jit_data->proglen; |
| 134 | alloclen = proglen + FUNCTION_DESCR_SIZE; |
| 135 | extra_pass = true; |
| 136 | goto skip_init_ctx; |
| 137 | } |
| 138 | |
| 139 | addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL); |
| 140 | if (addrs == NULL) { |
| 141 | fp = org_fp; |
| 142 | goto out_addrs; |
| 143 | } |
| 144 | |
| 145 | memset(&cgctx, 0, sizeof(struct codegen_context)); |
Christophe Leroy | 4027203 | 2021-03-22 16:37:53 +0000 | [diff] [blame] | 146 | memcpy(cgctx.b2p, b2p, sizeof(cgctx.b2p)); |
Christophe Leroy | 4ea76e9 | 2021-03-22 16:37:49 +0000 | [diff] [blame] | 147 | |
| 148 | /* Make sure that the stack is quadword aligned. */ |
| 149 | cgctx.stack_size = round_up(fp->aux->stack_depth, 16); |
| 150 | |
| 151 | /* Scouting faux-generate pass 0 */ |
| 152 | if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) { |
| 153 | /* We hit something illegal or unsupported. */ |
| 154 | fp = org_fp; |
| 155 | goto out_addrs; |
| 156 | } |
| 157 | |
| 158 | /* |
| 159 | * If we have seen a tail call, we need a second pass. |
| 160 | * This is because bpf_jit_emit_common_epilogue() is called |
| 161 | * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen. |
| 162 | */ |
| 163 | if (cgctx.seen & SEEN_TAILCALL) { |
| 164 | cgctx.idx = 0; |
| 165 | if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) { |
| 166 | fp = org_fp; |
| 167 | goto out_addrs; |
| 168 | } |
| 169 | } |
| 170 | |
Christophe Leroy | 4027203 | 2021-03-22 16:37:53 +0000 | [diff] [blame] | 171 | bpf_jit_realloc_regs(&cgctx); |
Christophe Leroy | 4ea76e9 | 2021-03-22 16:37:49 +0000 | [diff] [blame] | 172 | /* |
| 173 | * Pretend to build prologue, given the features we've seen. This will |
| 174 | * update ctgtx.idx as it pretends to output instructions, then we can |
| 175 | * calculate total size from idx. |
| 176 | */ |
| 177 | bpf_jit_build_prologue(0, &cgctx); |
| 178 | bpf_jit_build_epilogue(0, &cgctx); |
| 179 | |
| 180 | proglen = cgctx.idx * 4; |
| 181 | alloclen = proglen + FUNCTION_DESCR_SIZE; |
| 182 | |
| 183 | bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, bpf_jit_fill_ill_insns); |
| 184 | if (!bpf_hdr) { |
| 185 | fp = org_fp; |
| 186 | goto out_addrs; |
| 187 | } |
| 188 | |
| 189 | skip_init_ctx: |
| 190 | code_base = (u32 *)(image + FUNCTION_DESCR_SIZE); |
| 191 | |
| 192 | if (extra_pass) { |
| 193 | /* |
| 194 | * Do not touch the prologue and epilogue as they will remain |
| 195 | * unchanged. Only fix the branch target address for subprog |
| 196 | * calls in the body. |
| 197 | * |
| 198 | * This does not change the offsets and lengths of the subprog |
| 199 | * call instruction sequences and hence, the size of the JITed |
| 200 | * image as well. |
| 201 | */ |
| 202 | bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs); |
| 203 | |
| 204 | /* There is no need to perform the usual passes. */ |
| 205 | goto skip_codegen_passes; |
| 206 | } |
| 207 | |
| 208 | /* Code generation passes 1-2 */ |
| 209 | for (pass = 1; pass < 3; pass++) { |
| 210 | /* Now build the prologue, body code & epilogue for real. */ |
| 211 | cgctx.idx = 0; |
| 212 | bpf_jit_build_prologue(code_base, &cgctx); |
Naveen N. Rao | 3832ba4 | 2021-10-06 01:55:21 +0530 | [diff] [blame^] | 213 | if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass)) { |
| 214 | bpf_jit_binary_free(bpf_hdr); |
| 215 | fp = org_fp; |
| 216 | goto out_addrs; |
| 217 | } |
Christophe Leroy | 4ea76e9 | 2021-03-22 16:37:49 +0000 | [diff] [blame] | 218 | bpf_jit_build_epilogue(code_base, &cgctx); |
| 219 | |
| 220 | if (bpf_jit_enable > 1) |
| 221 | pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass, |
| 222 | proglen - (cgctx.idx * 4), cgctx.seen); |
| 223 | } |
| 224 | |
| 225 | skip_codegen_passes: |
| 226 | if (bpf_jit_enable > 1) |
| 227 | /* |
| 228 | * Note that we output the base address of the code_base |
| 229 | * rather than image, since opcodes are in code_base. |
| 230 | */ |
| 231 | bpf_jit_dump(flen, proglen, pass, code_base); |
| 232 | |
| 233 | #ifdef PPC64_ELF_ABI_v1 |
| 234 | /* Function descriptor nastiness: Address + TOC */ |
| 235 | ((u64 *)image)[0] = (u64)code_base; |
| 236 | ((u64 *)image)[1] = local_paca->kernel_toc; |
| 237 | #endif |
| 238 | |
| 239 | fp->bpf_func = (void *)image; |
| 240 | fp->jited = 1; |
| 241 | fp->jited_len = alloclen; |
| 242 | |
| 243 | bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE)); |
Jordan Niethe | 62e3d42 | 2021-06-09 11:34:28 +1000 | [diff] [blame] | 244 | bpf_jit_binary_lock_ro(bpf_hdr); |
Christophe Leroy | 4ea76e9 | 2021-03-22 16:37:49 +0000 | [diff] [blame] | 245 | if (!fp->is_func || extra_pass) { |
| 246 | bpf_prog_fill_jited_linfo(fp, addrs); |
| 247 | out_addrs: |
| 248 | kfree(addrs); |
| 249 | kfree(jit_data); |
| 250 | fp->aux->jit_data = NULL; |
| 251 | } else { |
| 252 | jit_data->addrs = addrs; |
| 253 | jit_data->ctx = cgctx; |
| 254 | jit_data->proglen = proglen; |
| 255 | jit_data->image = image; |
| 256 | jit_data->header = bpf_hdr; |
| 257 | } |
| 258 | |
| 259 | out: |
| 260 | if (bpf_blinded) |
| 261 | bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp); |
| 262 | |
| 263 | return fp; |
| 264 | } |