Thomas Gleixner | caab277 | 2019-06-03 07:44:50 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 2 | /* |
| 3 | * AArch64 loadable module support. |
| 4 | * |
| 5 | * Copyright (C) 2012 ARM Limited |
| 6 | * |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 7 | * Author: Will Deacon <will.deacon@arm.com> |
| 8 | */ |
| 9 | |
| 10 | #include <linux/bitops.h> |
| 11 | #include <linux/elf.h> |
Mark Rutland | f1a54ae | 2019-10-17 15:26:38 +0100 | [diff] [blame] | 12 | #include <linux/ftrace.h> |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 13 | #include <linux/gfp.h> |
Andrey Ryabinin | 39d114d | 2015-10-12 18:52:58 +0300 | [diff] [blame] | 14 | #include <linux/kasan.h> |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 15 | #include <linux/kernel.h> |
| 16 | #include <linux/mm.h> |
| 17 | #include <linux/moduleloader.h> |
| 18 | #include <linux/vmalloc.h> |
Paul Walmsley | 2c2b282 | 2015-01-05 17:38:41 -0700 | [diff] [blame] | 19 | #include <asm/alternative.h> |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 20 | #include <asm/insn.h> |
Andre Przywara | 932ded4 | 2014-11-28 13:40:45 +0000 | [diff] [blame] | 21 | #include <asm/sections.h> |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 22 | |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 23 | void *module_alloc(unsigned long size) |
| 24 | { |
Ard Biesheuvel | 6f496a55 | 2019-06-25 19:08:54 +0200 | [diff] [blame] | 25 | u64 module_alloc_end = module_alloc_base + MODULES_VSIZE; |
Florian Fainelli | 0c2cf6d | 2017-04-27 11:19:02 -0700 | [diff] [blame] | 26 | gfp_t gfp_mask = GFP_KERNEL; |
Andrey Ryabinin | 39d114d | 2015-10-12 18:52:58 +0300 | [diff] [blame] | 27 | void *p; |
| 28 | |
Florian Fainelli | 0c2cf6d | 2017-04-27 11:19:02 -0700 | [diff] [blame] | 29 | /* Silence the initial allocation */ |
| 30 | if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) |
| 31 | gfp_mask |= __GFP_NOWARN; |
| 32 | |
Ard Biesheuvel | 6f496a55 | 2019-06-25 19:08:54 +0200 | [diff] [blame] | 33 | if (IS_ENABLED(CONFIG_KASAN)) |
| 34 | /* don't exceed the static module region - see below */ |
| 35 | module_alloc_end = MODULES_END; |
| 36 | |
Ard Biesheuvel | f80fb3a | 2016-01-26 14:12:01 +0100 | [diff] [blame] | 37 | p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, |
Linus Torvalds | dfd437a | 2019-07-08 09:54:55 -0700 | [diff] [blame] | 38 | module_alloc_end, gfp_mask, PAGE_KERNEL, 0, |
Andrey Ryabinin | 39d114d | 2015-10-12 18:52:58 +0300 | [diff] [blame] | 39 | NUMA_NO_NODE, __builtin_return_address(0)); |
| 40 | |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 41 | if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && |
| 42 | !IS_ENABLED(CONFIG_KASAN)) |
| 43 | /* |
| 44 | * KASAN can only deal with module allocations being served |
| 45 | * from the reserved module region, since the remainder of |
| 46 | * the vmalloc region is already backed by zero shadow pages, |
| 47 | * and punching holes into it is non-trivial. Since the module |
| 48 | * region is not randomized when KASAN is enabled, it is even |
| 49 | * less likely that the module region gets exhausted, so we |
| 50 | * can simply omit this fallback in that case. |
| 51 | */ |
Ard Biesheuvel | f2b9ba8 | 2018-03-06 17:15:32 +0000 | [diff] [blame] | 52 | p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, |
Ard Biesheuvel | b2eed9b | 2019-05-23 10:17:37 +0100 | [diff] [blame] | 53 | module_alloc_base + SZ_2G, GFP_KERNEL, |
Ard Biesheuvel | 7dfac3c | 2019-05-23 11:22:53 +0100 | [diff] [blame] | 54 | PAGE_KERNEL, 0, NUMA_NO_NODE, |
Ard Biesheuvel | f2b9ba8 | 2018-03-06 17:15:32 +0000 | [diff] [blame] | 55 | __builtin_return_address(0)); |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 56 | |
Andrey Ryabinin | 39d114d | 2015-10-12 18:52:58 +0300 | [diff] [blame] | 57 | if (p && (kasan_module_alloc(p, size) < 0)) { |
| 58 | vfree(p); |
| 59 | return NULL; |
| 60 | } |
| 61 | |
| 62 | return p; |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 63 | } |
| 64 | |
| 65 | enum aarch64_reloc_op { |
| 66 | RELOC_OP_NONE, |
| 67 | RELOC_OP_ABS, |
| 68 | RELOC_OP_PREL, |
| 69 | RELOC_OP_PAGE, |
| 70 | }; |
| 71 | |
Luc Van Oostenryck | 02129ae | 2017-06-28 16:56:00 +0200 | [diff] [blame] | 72 | static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val) |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 73 | { |
| 74 | switch (reloc_op) { |
| 75 | case RELOC_OP_ABS: |
| 76 | return val; |
| 77 | case RELOC_OP_PREL: |
| 78 | return val - (u64)place; |
| 79 | case RELOC_OP_PAGE: |
| 80 | return (val & ~0xfff) - ((u64)place & ~0xfff); |
| 81 | case RELOC_OP_NONE: |
| 82 | return 0; |
| 83 | } |
| 84 | |
| 85 | pr_err("do_reloc: unknown relocation operation %d\n", reloc_op); |
| 86 | return 0; |
| 87 | } |
| 88 | |
| 89 | static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) |
| 90 | { |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 91 | s64 sval = do_reloc(op, place, val); |
| 92 | |
Ard Biesheuvel | 1cf24a2 | 2019-05-23 11:38:54 +0100 | [diff] [blame] | 93 | /* |
| 94 | * The ELF psABI for AArch64 documents the 16-bit and 32-bit place |
Ard Biesheuvel | 3fd00be | 2019-05-28 16:13:16 +0200 | [diff] [blame] | 95 | * relative and absolute relocations as having a range of [-2^15, 2^16) |
| 96 | * or [-2^31, 2^32), respectively. However, in order to be able to |
| 97 | * detect overflows reliably, we have to choose whether we interpret |
| 98 | * such quantities as signed or as unsigned, and stick with it. |
Ard Biesheuvel | 1cf24a2 | 2019-05-23 11:38:54 +0100 | [diff] [blame] | 99 | * The way we organize our address space requires a signed |
| 100 | * interpretation of 32-bit relative references, so let's use that |
| 101 | * for all R_AARCH64_PRELxx relocations. This means our upper |
| 102 | * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX. |
| 103 | */ |
| 104 | |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 105 | switch (len) { |
| 106 | case 16: |
| 107 | *(s16 *)place = sval; |
Ard Biesheuvel | 3fd00be | 2019-05-28 16:13:16 +0200 | [diff] [blame] | 108 | switch (op) { |
| 109 | case RELOC_OP_ABS: |
| 110 | if (sval < 0 || sval > U16_MAX) |
| 111 | return -ERANGE; |
| 112 | break; |
| 113 | case RELOC_OP_PREL: |
| 114 | if (sval < S16_MIN || sval > S16_MAX) |
| 115 | return -ERANGE; |
| 116 | break; |
| 117 | default: |
| 118 | pr_err("Invalid 16-bit data relocation (%d)\n", op); |
| 119 | return 0; |
| 120 | } |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 121 | break; |
| 122 | case 32: |
| 123 | *(s32 *)place = sval; |
Ard Biesheuvel | 3fd00be | 2019-05-28 16:13:16 +0200 | [diff] [blame] | 124 | switch (op) { |
| 125 | case RELOC_OP_ABS: |
| 126 | if (sval < 0 || sval > U32_MAX) |
| 127 | return -ERANGE; |
| 128 | break; |
| 129 | case RELOC_OP_PREL: |
| 130 | if (sval < S32_MIN || sval > S32_MAX) |
| 131 | return -ERANGE; |
| 132 | break; |
| 133 | default: |
| 134 | pr_err("Invalid 32-bit data relocation (%d)\n", op); |
| 135 | return 0; |
| 136 | } |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 137 | break; |
| 138 | case 64: |
| 139 | *(s64 *)place = sval; |
| 140 | break; |
| 141 | default: |
| 142 | pr_err("Invalid length (%d) for data relocation\n", len); |
| 143 | return 0; |
| 144 | } |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 145 | return 0; |
| 146 | } |
| 147 | |
Ard Biesheuvel | b24a557 | 2016-01-05 10:18:51 +0100 | [diff] [blame] | 148 | enum aarch64_insn_movw_imm_type { |
| 149 | AARCH64_INSN_IMM_MOVNZ, |
| 150 | AARCH64_INSN_IMM_MOVKZ, |
| 151 | }; |
| 152 | |
Luc Van Oostenryck | 02129ae | 2017-06-28 16:56:00 +0200 | [diff] [blame] | 153 | static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val, |
Ard Biesheuvel | b24a557 | 2016-01-05 10:18:51 +0100 | [diff] [blame] | 154 | int lsb, enum aarch64_insn_movw_imm_type imm_type) |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 155 | { |
Ard Biesheuvel | b24a557 | 2016-01-05 10:18:51 +0100 | [diff] [blame] | 156 | u64 imm; |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 157 | s64 sval; |
Luc Van Oostenryck | 02129ae | 2017-06-28 16:56:00 +0200 | [diff] [blame] | 158 | u32 insn = le32_to_cpu(*place); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 159 | |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 160 | sval = do_reloc(op, place, val); |
Ard Biesheuvel | b24a557 | 2016-01-05 10:18:51 +0100 | [diff] [blame] | 161 | imm = sval >> lsb; |
Will Deacon | 122e2fa | 2013-11-05 10:16:52 +0000 | [diff] [blame] | 162 | |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 163 | if (imm_type == AARCH64_INSN_IMM_MOVNZ) { |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 164 | /* |
| 165 | * For signed MOVW relocations, we have to manipulate the |
| 166 | * instruction encoding depending on whether or not the |
| 167 | * immediate is less than zero. |
| 168 | */ |
| 169 | insn &= ~(3 << 29); |
Ard Biesheuvel | b24a557 | 2016-01-05 10:18:51 +0100 | [diff] [blame] | 170 | if (sval >= 0) { |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 171 | /* >=0: Set the instruction to MOVZ (opcode 10b). */ |
| 172 | insn |= 2 << 29; |
| 173 | } else { |
| 174 | /* |
| 175 | * <0: Set the instruction to MOVN (opcode 00b). |
| 176 | * Since we've masked the opcode already, we |
| 177 | * don't need to do anything other than |
| 178 | * inverting the new immediate field. |
| 179 | */ |
| 180 | imm = ~imm; |
| 181 | } |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 182 | } |
| 183 | |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 184 | /* Update the instruction with the new encoding. */ |
Ard Biesheuvel | b24a557 | 2016-01-05 10:18:51 +0100 | [diff] [blame] | 185 | insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); |
Luc Van Oostenryck | 02129ae | 2017-06-28 16:56:00 +0200 | [diff] [blame] | 186 | *place = cpu_to_le32(insn); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 187 | |
Ard Biesheuvel | b24a557 | 2016-01-05 10:18:51 +0100 | [diff] [blame] | 188 | if (imm > U16_MAX) |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 189 | return -ERANGE; |
| 190 | |
| 191 | return 0; |
| 192 | } |
| 193 | |
Luc Van Oostenryck | 02129ae | 2017-06-28 16:56:00 +0200 | [diff] [blame] | 194 | static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val, |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 195 | int lsb, int len, enum aarch64_insn_imm_type imm_type) |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 196 | { |
| 197 | u64 imm, imm_mask; |
| 198 | s64 sval; |
Luc Van Oostenryck | 02129ae | 2017-06-28 16:56:00 +0200 | [diff] [blame] | 199 | u32 insn = le32_to_cpu(*place); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 200 | |
| 201 | /* Calculate the relocation value. */ |
| 202 | sval = do_reloc(op, place, val); |
| 203 | sval >>= lsb; |
| 204 | |
| 205 | /* Extract the value bits and shift them to bit 0. */ |
| 206 | imm_mask = (BIT(lsb + len) - 1) >> lsb; |
| 207 | imm = sval & imm_mask; |
| 208 | |
| 209 | /* Update the instruction's immediate field. */ |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 210 | insn = aarch64_insn_encode_immediate(imm_type, insn, imm); |
Luc Van Oostenryck | 02129ae | 2017-06-28 16:56:00 +0200 | [diff] [blame] | 211 | *place = cpu_to_le32(insn); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 212 | |
| 213 | /* |
| 214 | * Extract the upper value bits (including the sign bit) and |
| 215 | * shift them to bit 0. |
| 216 | */ |
| 217 | sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); |
| 218 | |
| 219 | /* |
| 220 | * Overflow has occurred if the upper bits are not all equal to |
| 221 | * the sign bit of the value. |
| 222 | */ |
| 223 | if ((u64)(sval + 1) >= 2) |
| 224 | return -ERANGE; |
| 225 | |
| 226 | return 0; |
| 227 | } |
| 228 | |
Jessica Yu | c8ebf64 | 2018-11-05 19:53:23 +0100 | [diff] [blame] | 229 | static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs, |
| 230 | __le32 *place, u64 val) |
Ard Biesheuvel | a257e02 | 2018-03-06 17:15:33 +0000 | [diff] [blame] | 231 | { |
| 232 | u32 insn; |
| 233 | |
Ard Biesheuvel | bdb85cd | 2018-11-22 09:46:46 +0100 | [diff] [blame] | 234 | if (!is_forbidden_offset_for_adrp(place)) |
Ard Biesheuvel | a257e02 | 2018-03-06 17:15:33 +0000 | [diff] [blame] | 235 | return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21, |
| 236 | AARCH64_INSN_IMM_ADR); |
| 237 | |
| 238 | /* patch ADRP to ADR if it is in range */ |
| 239 | if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21, |
| 240 | AARCH64_INSN_IMM_ADR)) { |
| 241 | insn = le32_to_cpu(*place); |
| 242 | insn &= ~BIT(31); |
| 243 | } else { |
| 244 | /* out of range for ADR -> emit a veneer */ |
Jessica Yu | c8ebf64 | 2018-11-05 19:53:23 +0100 | [diff] [blame] | 245 | val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff); |
Ard Biesheuvel | a257e02 | 2018-03-06 17:15:33 +0000 | [diff] [blame] | 246 | if (!val) |
| 247 | return -ENOEXEC; |
| 248 | insn = aarch64_insn_gen_branch_imm((u64)place, val, |
| 249 | AARCH64_INSN_BRANCH_NOLINK); |
| 250 | } |
| 251 | |
| 252 | *place = cpu_to_le32(insn); |
| 253 | return 0; |
| 254 | } |
| 255 | |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 256 | int apply_relocate_add(Elf64_Shdr *sechdrs, |
| 257 | const char *strtab, |
| 258 | unsigned int symindex, |
| 259 | unsigned int relsec, |
| 260 | struct module *me) |
| 261 | { |
| 262 | unsigned int i; |
| 263 | int ovf; |
| 264 | bool overflow_check; |
| 265 | Elf64_Sym *sym; |
| 266 | void *loc; |
| 267 | u64 val; |
| 268 | Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; |
| 269 | |
| 270 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { |
| 271 | /* loc corresponds to P in the AArch64 ELF document. */ |
| 272 | loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr |
| 273 | + rel[i].r_offset; |
| 274 | |
| 275 | /* sym is the ELF symbol we're referring to. */ |
| 276 | sym = (Elf64_Sym *)sechdrs[symindex].sh_addr |
| 277 | + ELF64_R_SYM(rel[i].r_info); |
| 278 | |
| 279 | /* val corresponds to (S + A) in the AArch64 ELF document. */ |
| 280 | val = sym->st_value + rel[i].r_addend; |
| 281 | |
| 282 | /* Check for overflow by default. */ |
| 283 | overflow_check = true; |
| 284 | |
| 285 | /* Perform the static relocation. */ |
| 286 | switch (ELF64_R_TYPE(rel[i].r_info)) { |
| 287 | /* Null relocations. */ |
| 288 | case R_ARM_NONE: |
| 289 | case R_AARCH64_NONE: |
| 290 | ovf = 0; |
| 291 | break; |
| 292 | |
| 293 | /* Data relocations. */ |
| 294 | case R_AARCH64_ABS64: |
| 295 | overflow_check = false; |
| 296 | ovf = reloc_data(RELOC_OP_ABS, loc, val, 64); |
| 297 | break; |
| 298 | case R_AARCH64_ABS32: |
| 299 | ovf = reloc_data(RELOC_OP_ABS, loc, val, 32); |
| 300 | break; |
| 301 | case R_AARCH64_ABS16: |
| 302 | ovf = reloc_data(RELOC_OP_ABS, loc, val, 16); |
| 303 | break; |
| 304 | case R_AARCH64_PREL64: |
| 305 | overflow_check = false; |
| 306 | ovf = reloc_data(RELOC_OP_PREL, loc, val, 64); |
| 307 | break; |
| 308 | case R_AARCH64_PREL32: |
| 309 | ovf = reloc_data(RELOC_OP_PREL, loc, val, 32); |
| 310 | break; |
| 311 | case R_AARCH64_PREL16: |
| 312 | ovf = reloc_data(RELOC_OP_PREL, loc, val, 16); |
| 313 | break; |
| 314 | |
| 315 | /* MOVW instruction relocations. */ |
| 316 | case R_AARCH64_MOVW_UABS_G0_NC: |
| 317 | overflow_check = false; |
Anders Roxell | eca92a5 | 2019-07-26 13:27:21 +0200 | [diff] [blame] | 318 | /* Fall through */ |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 319 | case R_AARCH64_MOVW_UABS_G0: |
| 320 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, |
Ard Biesheuvel | b24a557 | 2016-01-05 10:18:51 +0100 | [diff] [blame] | 321 | AARCH64_INSN_IMM_MOVKZ); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 322 | break; |
| 323 | case R_AARCH64_MOVW_UABS_G1_NC: |
| 324 | overflow_check = false; |
Anders Roxell | eca92a5 | 2019-07-26 13:27:21 +0200 | [diff] [blame] | 325 | /* Fall through */ |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 326 | case R_AARCH64_MOVW_UABS_G1: |
| 327 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, |
Ard Biesheuvel | b24a557 | 2016-01-05 10:18:51 +0100 | [diff] [blame] | 328 | AARCH64_INSN_IMM_MOVKZ); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 329 | break; |
| 330 | case R_AARCH64_MOVW_UABS_G2_NC: |
| 331 | overflow_check = false; |
Anders Roxell | eca92a5 | 2019-07-26 13:27:21 +0200 | [diff] [blame] | 332 | /* Fall through */ |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 333 | case R_AARCH64_MOVW_UABS_G2: |
| 334 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, |
Ard Biesheuvel | b24a557 | 2016-01-05 10:18:51 +0100 | [diff] [blame] | 335 | AARCH64_INSN_IMM_MOVKZ); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 336 | break; |
| 337 | case R_AARCH64_MOVW_UABS_G3: |
| 338 | /* We're using the top bits so we can't overflow. */ |
| 339 | overflow_check = false; |
| 340 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, |
Ard Biesheuvel | b24a557 | 2016-01-05 10:18:51 +0100 | [diff] [blame] | 341 | AARCH64_INSN_IMM_MOVKZ); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 342 | break; |
| 343 | case R_AARCH64_MOVW_SABS_G0: |
| 344 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 345 | AARCH64_INSN_IMM_MOVNZ); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 346 | break; |
| 347 | case R_AARCH64_MOVW_SABS_G1: |
| 348 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 349 | AARCH64_INSN_IMM_MOVNZ); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 350 | break; |
| 351 | case R_AARCH64_MOVW_SABS_G2: |
| 352 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 353 | AARCH64_INSN_IMM_MOVNZ); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 354 | break; |
| 355 | case R_AARCH64_MOVW_PREL_G0_NC: |
| 356 | overflow_check = false; |
| 357 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, |
Ard Biesheuvel | b24a557 | 2016-01-05 10:18:51 +0100 | [diff] [blame] | 358 | AARCH64_INSN_IMM_MOVKZ); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 359 | break; |
| 360 | case R_AARCH64_MOVW_PREL_G0: |
| 361 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 362 | AARCH64_INSN_IMM_MOVNZ); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 363 | break; |
| 364 | case R_AARCH64_MOVW_PREL_G1_NC: |
| 365 | overflow_check = false; |
| 366 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, |
Ard Biesheuvel | b24a557 | 2016-01-05 10:18:51 +0100 | [diff] [blame] | 367 | AARCH64_INSN_IMM_MOVKZ); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 368 | break; |
| 369 | case R_AARCH64_MOVW_PREL_G1: |
| 370 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 371 | AARCH64_INSN_IMM_MOVNZ); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 372 | break; |
| 373 | case R_AARCH64_MOVW_PREL_G2_NC: |
| 374 | overflow_check = false; |
| 375 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, |
Ard Biesheuvel | b24a557 | 2016-01-05 10:18:51 +0100 | [diff] [blame] | 376 | AARCH64_INSN_IMM_MOVKZ); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 377 | break; |
| 378 | case R_AARCH64_MOVW_PREL_G2: |
| 379 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 380 | AARCH64_INSN_IMM_MOVNZ); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 381 | break; |
| 382 | case R_AARCH64_MOVW_PREL_G3: |
| 383 | /* We're using the top bits so we can't overflow. */ |
| 384 | overflow_check = false; |
| 385 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 386 | AARCH64_INSN_IMM_MOVNZ); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 387 | break; |
| 388 | |
| 389 | /* Immediate instruction relocations. */ |
| 390 | case R_AARCH64_LD_PREL_LO19: |
| 391 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 392 | AARCH64_INSN_IMM_19); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 393 | break; |
| 394 | case R_AARCH64_ADR_PREL_LO21: |
| 395 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 396 | AARCH64_INSN_IMM_ADR); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 397 | break; |
| 398 | case R_AARCH64_ADR_PREL_PG_HI21_NC: |
| 399 | overflow_check = false; |
Anders Roxell | eca92a5 | 2019-07-26 13:27:21 +0200 | [diff] [blame] | 400 | /* Fall through */ |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 401 | case R_AARCH64_ADR_PREL_PG_HI21: |
Jessica Yu | c8ebf64 | 2018-11-05 19:53:23 +0100 | [diff] [blame] | 402 | ovf = reloc_insn_adrp(me, sechdrs, loc, val); |
Ard Biesheuvel | a257e02 | 2018-03-06 17:15:33 +0000 | [diff] [blame] | 403 | if (ovf && ovf != -ERANGE) |
| 404 | return ovf; |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 405 | break; |
| 406 | case R_AARCH64_ADD_ABS_LO12_NC: |
| 407 | case R_AARCH64_LDST8_ABS_LO12_NC: |
| 408 | overflow_check = false; |
| 409 | ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 410 | AARCH64_INSN_IMM_12); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 411 | break; |
| 412 | case R_AARCH64_LDST16_ABS_LO12_NC: |
| 413 | overflow_check = false; |
| 414 | ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 415 | AARCH64_INSN_IMM_12); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 416 | break; |
| 417 | case R_AARCH64_LDST32_ABS_LO12_NC: |
| 418 | overflow_check = false; |
| 419 | ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 420 | AARCH64_INSN_IMM_12); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 421 | break; |
| 422 | case R_AARCH64_LDST64_ABS_LO12_NC: |
| 423 | overflow_check = false; |
| 424 | ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 425 | AARCH64_INSN_IMM_12); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 426 | break; |
| 427 | case R_AARCH64_LDST128_ABS_LO12_NC: |
| 428 | overflow_check = false; |
| 429 | ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 430 | AARCH64_INSN_IMM_12); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 431 | break; |
| 432 | case R_AARCH64_TSTBR14: |
| 433 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 434 | AARCH64_INSN_IMM_14); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 435 | break; |
| 436 | case R_AARCH64_CONDBR19: |
| 437 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 438 | AARCH64_INSN_IMM_19); |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 439 | break; |
| 440 | case R_AARCH64_JUMP26: |
| 441 | case R_AARCH64_CALL26: |
| 442 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, |
Jiang Liu | c84fced | 2014-01-07 22:17:10 +0800 | [diff] [blame] | 443 | AARCH64_INSN_IMM_26); |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 444 | |
| 445 | if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && |
| 446 | ovf == -ERANGE) { |
Jessica Yu | c8ebf64 | 2018-11-05 19:53:23 +0100 | [diff] [blame] | 447 | val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym); |
Ard Biesheuvel | 5e8307b | 2018-03-06 17:15:31 +0000 | [diff] [blame] | 448 | if (!val) |
| 449 | return -ENOEXEC; |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 450 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, |
| 451 | 26, AARCH64_INSN_IMM_26); |
| 452 | } |
Will Deacon | 257cb25 | 2012-03-05 11:49:33 +0000 | [diff] [blame] | 453 | break; |
| 454 | |
| 455 | default: |
| 456 | pr_err("module %s: unsupported RELA relocation: %llu\n", |
| 457 | me->name, ELF64_R_TYPE(rel[i].r_info)); |
| 458 | return -ENOEXEC; |
| 459 | } |
| 460 | |
| 461 | if (overflow_check && ovf == -ERANGE) |
| 462 | goto overflow; |
| 463 | |
| 464 | } |
| 465 | |
| 466 | return 0; |
| 467 | |
| 468 | overflow: |
| 469 | pr_err("module %s: overflow in relocation type %d val %Lx\n", |
| 470 | me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); |
| 471 | return -ENOEXEC; |
| 472 | } |
Andre Przywara | 932ded4 | 2014-11-28 13:40:45 +0000 | [diff] [blame] | 473 | |
Mark Rutland | bd8b21d | 2019-10-17 14:03:26 +0100 | [diff] [blame] | 474 | static const Elf_Shdr *find_section(const Elf_Ehdr *hdr, |
| 475 | const Elf_Shdr *sechdrs, |
| 476 | const char *name) |
Andre Przywara | 932ded4 | 2014-11-28 13:40:45 +0000 | [diff] [blame] | 477 | { |
| 478 | const Elf_Shdr *s, *se; |
| 479 | const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; |
| 480 | |
| 481 | for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { |
Mark Rutland | bd8b21d | 2019-10-17 14:03:26 +0100 | [diff] [blame] | 482 | if (strcmp(name, secstrs + s->sh_name) == 0) |
| 483 | return s; |
Andre Przywara | 932ded4 | 2014-11-28 13:40:45 +0000 | [diff] [blame] | 484 | } |
| 485 | |
Mark Rutland | bd8b21d | 2019-10-17 14:03:26 +0100 | [diff] [blame] | 486 | return NULL; |
| 487 | } |
| 488 | |
Torsten Duwe | 3b23e499 | 2019-02-08 16:10:19 +0100 | [diff] [blame^] | 489 | static inline void __init_plt(struct plt_entry *plt, unsigned long addr) |
| 490 | { |
| 491 | *plt = get_plt_entry(addr, plt); |
| 492 | } |
| 493 | |
Mark Rutland | f1a54ae | 2019-10-17 15:26:38 +0100 | [diff] [blame] | 494 | static int module_init_ftrace_plt(const Elf_Ehdr *hdr, |
| 495 | const Elf_Shdr *sechdrs, |
| 496 | struct module *mod) |
| 497 | { |
| 498 | #if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE) |
| 499 | const Elf_Shdr *s; |
Torsten Duwe | 3b23e499 | 2019-02-08 16:10:19 +0100 | [diff] [blame^] | 500 | struct plt_entry *plts; |
Mark Rutland | f1a54ae | 2019-10-17 15:26:38 +0100 | [diff] [blame] | 501 | |
| 502 | s = find_section(hdr, sechdrs, ".text.ftrace_trampoline"); |
| 503 | if (!s) |
| 504 | return -ENOEXEC; |
| 505 | |
Torsten Duwe | 3b23e499 | 2019-02-08 16:10:19 +0100 | [diff] [blame^] | 506 | plts = (void *)s->sh_addr; |
| 507 | |
| 508 | __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR); |
| 509 | |
| 510 | if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) |
| 511 | __init_plt(&plts[FTRACE_REGS_PLT_IDX], FTRACE_REGS_ADDR); |
| 512 | |
| 513 | mod->arch.ftrace_trampolines = plts; |
Mark Rutland | f1a54ae | 2019-10-17 15:26:38 +0100 | [diff] [blame] | 514 | #endif |
| 515 | return 0; |
| 516 | } |
| 517 | |
Mark Rutland | bd8b21d | 2019-10-17 14:03:26 +0100 | [diff] [blame] | 518 | int module_finalize(const Elf_Ehdr *hdr, |
| 519 | const Elf_Shdr *sechdrs, |
| 520 | struct module *me) |
| 521 | { |
| 522 | const Elf_Shdr *s; |
Mark Rutland | bd8b21d | 2019-10-17 14:03:26 +0100 | [diff] [blame] | 523 | s = find_section(hdr, sechdrs, ".altinstructions"); |
| 524 | if (s) |
| 525 | apply_alternatives_module((void *)s->sh_addr, s->sh_size); |
| 526 | |
Mark Rutland | f1a54ae | 2019-10-17 15:26:38 +0100 | [diff] [blame] | 527 | return module_init_ftrace_plt(hdr, sechdrs, me); |
Andre Przywara | 932ded4 | 2014-11-28 13:40:45 +0000 | [diff] [blame] | 528 | } |