Philipp Rudo | 7140688 | 2017-06-19 10:45:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * s390 code for kexec_file_load system call |
| 4 | * |
| 5 | * Copyright IBM Corp. 2018 |
| 6 | * |
| 7 | * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> |
| 8 | */ |
| 9 | |
| 10 | #include <linux/elf.h> |
Philipp Rudo | e23a802 | 2019-02-26 10:50:39 +0100 | [diff] [blame] | 11 | #include <linux/errno.h> |
Philipp Rudo | 7140688 | 2017-06-19 10:45:33 +0200 | [diff] [blame] | 12 | #include <linux/kexec.h> |
Philipp Rudo | e23a802 | 2019-02-26 10:50:39 +0100 | [diff] [blame] | 13 | #include <linux/module.h> |
| 14 | #include <linux/verification.h> |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 15 | #include <asm/boot_data.h> |
Philipp Rudo | e23a802 | 2019-02-26 10:50:39 +0100 | [diff] [blame] | 16 | #include <asm/ipl.h> |
Philipp Rudo | 7140688 | 2017-06-19 10:45:33 +0200 | [diff] [blame] | 17 | #include <asm/setup.h> |
| 18 | |
| 19 | const struct kexec_file_ops * const kexec_file_loaders[] = { |
Philipp Rudo | 8be0188 | 2017-09-11 15:15:29 +0200 | [diff] [blame] | 20 | &s390_kexec_elf_ops, |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 21 | &s390_kexec_image_ops, |
Philipp Rudo | 7140688 | 2017-06-19 10:45:33 +0200 | [diff] [blame] | 22 | NULL, |
| 23 | }; |
| 24 | |
Jiri Bohac | 99d5cadf | 2019-08-19 17:17:44 -0700 | [diff] [blame^] | 25 | #ifdef CONFIG_KEXEC_SIG |
Philipp Rudo | e23a802 | 2019-02-26 10:50:39 +0100 | [diff] [blame] | 26 | /* |
| 27 | * Module signature information block. |
| 28 | * |
| 29 | * The constituents of the signature section are, in order: |
| 30 | * |
| 31 | * - Signer's name |
| 32 | * - Key identifier |
| 33 | * - Signature data |
| 34 | * - Information block |
| 35 | */ |
| 36 | struct module_signature { |
| 37 | u8 algo; /* Public-key crypto algorithm [0] */ |
| 38 | u8 hash; /* Digest algorithm [0] */ |
| 39 | u8 id_type; /* Key identifier type [PKEY_ID_PKCS7] */ |
| 40 | u8 signer_len; /* Length of signer's name [0] */ |
| 41 | u8 key_id_len; /* Length of key identifier [0] */ |
| 42 | u8 __pad[3]; |
| 43 | __be32 sig_len; /* Length of signature data */ |
| 44 | }; |
| 45 | |
| 46 | #define PKEY_ID_PKCS7 2 |
| 47 | |
| 48 | int s390_verify_sig(const char *kernel, unsigned long kernel_len) |
| 49 | { |
| 50 | const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1; |
| 51 | struct module_signature *ms; |
| 52 | unsigned long sig_len; |
| 53 | |
| 54 | /* Skip signature verification when not secure IPLed. */ |
| 55 | if (!ipl_secure_flag) |
| 56 | return 0; |
| 57 | |
| 58 | if (marker_len > kernel_len) |
| 59 | return -EKEYREJECTED; |
| 60 | |
| 61 | if (memcmp(kernel + kernel_len - marker_len, MODULE_SIG_STRING, |
| 62 | marker_len)) |
| 63 | return -EKEYREJECTED; |
| 64 | kernel_len -= marker_len; |
| 65 | |
| 66 | ms = (void *)kernel + kernel_len - sizeof(*ms); |
| 67 | kernel_len -= sizeof(*ms); |
| 68 | |
| 69 | sig_len = be32_to_cpu(ms->sig_len); |
| 70 | if (sig_len >= kernel_len) |
| 71 | return -EKEYREJECTED; |
| 72 | kernel_len -= sig_len; |
| 73 | |
| 74 | if (ms->id_type != PKEY_ID_PKCS7) |
| 75 | return -EKEYREJECTED; |
| 76 | |
| 77 | if (ms->algo != 0 || |
| 78 | ms->hash != 0 || |
| 79 | ms->signer_len != 0 || |
| 80 | ms->key_id_len != 0 || |
| 81 | ms->__pad[0] != 0 || |
| 82 | ms->__pad[1] != 0 || |
| 83 | ms->__pad[2] != 0) { |
| 84 | return -EBADMSG; |
| 85 | } |
| 86 | |
| 87 | return verify_pkcs7_signature(kernel, kernel_len, |
| 88 | kernel + kernel_len, sig_len, |
| 89 | VERIFY_USE_PLATFORM_KEYRING, |
| 90 | VERIFYING_MODULE_SIGNATURE, |
| 91 | NULL, NULL); |
| 92 | } |
Jiri Bohac | 99d5cadf | 2019-08-19 17:17:44 -0700 | [diff] [blame^] | 93 | #endif /* CONFIG_KEXEC_SIG */ |
Philipp Rudo | e23a802 | 2019-02-26 10:50:39 +0100 | [diff] [blame] | 94 | |
Philipp Rudo | 653beba | 2019-03-07 15:56:34 +0100 | [diff] [blame] | 95 | static int kexec_file_update_purgatory(struct kimage *image, |
| 96 | struct s390_load_data *data) |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 97 | { |
| 98 | u64 entry, type; |
| 99 | int ret; |
| 100 | |
Philipp Rudo | ee337f5 | 2017-09-05 11:55:23 +0200 | [diff] [blame] | 101 | if (image->type == KEXEC_TYPE_CRASH) { |
| 102 | entry = STARTUP_KDUMP_OFFSET; |
| 103 | type = KEXEC_TYPE_CRASH; |
| 104 | } else { |
| 105 | entry = STARTUP_NORMAL_OFFSET; |
| 106 | type = KEXEC_TYPE_DEFAULT; |
| 107 | } |
| 108 | |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 109 | ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry, |
| 110 | sizeof(entry), false); |
Philipp Rudo | ee337f5 | 2017-09-05 11:55:23 +0200 | [diff] [blame] | 111 | if (ret) |
| 112 | return ret; |
| 113 | |
| 114 | ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type, |
| 115 | sizeof(type), false); |
| 116 | if (ret) |
| 117 | return ret; |
| 118 | |
| 119 | if (image->type == KEXEC_TYPE_CRASH) { |
| 120 | u64 crash_size; |
| 121 | |
| 122 | ret = kexec_purgatory_get_set_symbol(image, "crash_start", |
| 123 | &crashk_res.start, |
| 124 | sizeof(crashk_res.start), |
| 125 | false); |
| 126 | if (ret) |
| 127 | return ret; |
| 128 | |
| 129 | crash_size = crashk_res.end - crashk_res.start + 1; |
| 130 | ret = kexec_purgatory_get_set_symbol(image, "crash_size", |
| 131 | &crash_size, |
| 132 | sizeof(crash_size), |
| 133 | false); |
| 134 | } |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 135 | return ret; |
| 136 | } |
| 137 | |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 138 | static int kexec_file_add_purgatory(struct kimage *image, |
| 139 | struct s390_load_data *data) |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 140 | { |
| 141 | struct kexec_buf buf; |
| 142 | int ret; |
| 143 | |
| 144 | buf.image = image; |
| 145 | |
| 146 | data->memsz = ALIGN(data->memsz, PAGE_SIZE); |
| 147 | buf.mem = data->memsz; |
Philipp Rudo | ee337f5 | 2017-09-05 11:55:23 +0200 | [diff] [blame] | 148 | if (image->type == KEXEC_TYPE_CRASH) |
| 149 | buf.mem += crashk_res.start; |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 150 | |
| 151 | ret = kexec_load_purgatory(image, &buf); |
| 152 | if (ret) |
| 153 | return ret; |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 154 | data->memsz += buf.memsz; |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 155 | |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 156 | return kexec_file_update_purgatory(image, data); |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 157 | } |
| 158 | |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 159 | static int kexec_file_add_initrd(struct kimage *image, |
| 160 | struct s390_load_data *data) |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 161 | { |
| 162 | struct kexec_buf buf; |
| 163 | int ret; |
| 164 | |
| 165 | buf.image = image; |
| 166 | |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 167 | buf.buffer = image->initrd_buf; |
| 168 | buf.bufsz = image->initrd_buf_len; |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 169 | |
| 170 | data->memsz = ALIGN(data->memsz, PAGE_SIZE); |
| 171 | buf.mem = data->memsz; |
Philipp Rudo | ee337f5 | 2017-09-05 11:55:23 +0200 | [diff] [blame] | 172 | if (image->type == KEXEC_TYPE_CRASH) |
| 173 | buf.mem += crashk_res.start; |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 174 | buf.memsz = buf.bufsz; |
| 175 | |
Philipp Rudo | d0d249d | 2019-03-06 17:36:26 +0100 | [diff] [blame] | 176 | data->parm->initrd_start = buf.mem; |
| 177 | data->parm->initrd_size = buf.memsz; |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 178 | data->memsz += buf.memsz; |
| 179 | |
| 180 | ret = kexec_add_buffer(&buf); |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 181 | if (ret) |
| 182 | return ret; |
| 183 | |
| 184 | return ipl_report_add_component(data->report, &buf, 0, 0); |
| 185 | } |
| 186 | |
| 187 | static int kexec_file_add_ipl_report(struct kimage *image, |
| 188 | struct s390_load_data *data) |
| 189 | { |
| 190 | __u32 *lc_ipl_parmblock_ptr; |
| 191 | unsigned int len, ncerts; |
| 192 | struct kexec_buf buf; |
| 193 | unsigned long addr; |
| 194 | void *ptr, *end; |
| 195 | |
| 196 | buf.image = image; |
| 197 | |
| 198 | data->memsz = ALIGN(data->memsz, PAGE_SIZE); |
| 199 | buf.mem = data->memsz; |
| 200 | if (image->type == KEXEC_TYPE_CRASH) |
| 201 | buf.mem += crashk_res.start; |
| 202 | |
| 203 | ptr = (void *)ipl_cert_list_addr; |
| 204 | end = ptr + ipl_cert_list_size; |
| 205 | ncerts = 0; |
| 206 | while (ptr < end) { |
| 207 | ncerts++; |
| 208 | len = *(unsigned int *)ptr; |
| 209 | ptr += sizeof(len); |
| 210 | ptr += len; |
| 211 | } |
| 212 | |
| 213 | addr = data->memsz + data->report->size; |
| 214 | addr += ncerts * sizeof(struct ipl_rb_certificate_entry); |
| 215 | ptr = (void *)ipl_cert_list_addr; |
| 216 | while (ptr < end) { |
| 217 | len = *(unsigned int *)ptr; |
| 218 | ptr += sizeof(len); |
| 219 | ipl_report_add_certificate(data->report, ptr, addr, len); |
| 220 | addr += len; |
| 221 | ptr += len; |
| 222 | } |
| 223 | |
| 224 | buf.buffer = ipl_report_finish(data->report); |
| 225 | buf.bufsz = data->report->size; |
| 226 | buf.memsz = buf.bufsz; |
| 227 | |
| 228 | data->memsz += buf.memsz; |
| 229 | |
| 230 | lc_ipl_parmblock_ptr = |
| 231 | data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr); |
| 232 | *lc_ipl_parmblock_ptr = (__u32)buf.mem; |
| 233 | |
| 234 | return kexec_add_buffer(&buf); |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 235 | } |
| 236 | |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 237 | void *kexec_file_add_components(struct kimage *image, |
| 238 | int (*add_kernel)(struct kimage *image, |
| 239 | struct s390_load_data *data)) |
| 240 | { |
| 241 | struct s390_load_data data = {0}; |
| 242 | int ret; |
| 243 | |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 244 | data.report = ipl_report_init(&ipl_block); |
| 245 | if (IS_ERR(data.report)) |
| 246 | return data.report; |
| 247 | |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 248 | ret = add_kernel(image, &data); |
| 249 | if (ret) |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 250 | goto out; |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 251 | |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 252 | if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE) { |
| 253 | ret = -EINVAL; |
| 254 | goto out; |
| 255 | } |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 256 | memcpy(data.parm->command_line, image->cmdline_buf, |
| 257 | image->cmdline_buf_len); |
| 258 | |
| 259 | if (image->type == KEXEC_TYPE_CRASH) { |
| 260 | data.parm->oldmem_base = crashk_res.start; |
| 261 | data.parm->oldmem_size = crashk_res.end - crashk_res.start + 1; |
| 262 | } |
| 263 | |
| 264 | if (image->initrd_buf) { |
| 265 | ret = kexec_file_add_initrd(image, &data); |
| 266 | if (ret) |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 267 | goto out; |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 268 | } |
| 269 | |
| 270 | ret = kexec_file_add_purgatory(image, &data); |
| 271 | if (ret) |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 272 | goto out; |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 273 | |
Philipp Rudo | 653beba | 2019-03-07 15:56:34 +0100 | [diff] [blame] | 274 | if (data.kernel_mem == 0) { |
| 275 | unsigned long restart_psw = 0x0008000080000000UL; |
| 276 | restart_psw += image->start; |
| 277 | memcpy(data.kernel_buf, &restart_psw, sizeof(restart_psw)); |
| 278 | image->start = 0; |
| 279 | } |
| 280 | |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 281 | ret = kexec_file_add_ipl_report(image, &data); |
| 282 | out: |
| 283 | ipl_report_free(data.report); |
| 284 | return ERR_PTR(ret); |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 285 | } |
| 286 | |
Philipp Rudo | 7140688 | 2017-06-19 10:45:33 +0200 | [diff] [blame] | 287 | int arch_kexec_apply_relocations_add(struct purgatory_info *pi, |
| 288 | Elf_Shdr *section, |
| 289 | const Elf_Shdr *relsec, |
| 290 | const Elf_Shdr *symtab) |
| 291 | { |
| 292 | Elf_Rela *relas; |
Gerald Schaefer | 805bc0b | 2019-02-03 21:35:45 +0100 | [diff] [blame] | 293 | int i, r_type; |
Philipp Rudo | 7140688 | 2017-06-19 10:45:33 +0200 | [diff] [blame] | 294 | |
| 295 | relas = (void *)pi->ehdr + relsec->sh_offset; |
| 296 | |
| 297 | for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) { |
| 298 | const Elf_Sym *sym; /* symbol to relocate */ |
| 299 | unsigned long addr; /* final location after relocation */ |
| 300 | unsigned long val; /* relocated symbol value */ |
| 301 | void *loc; /* tmp location to modify */ |
| 302 | |
| 303 | sym = (void *)pi->ehdr + symtab->sh_offset; |
| 304 | sym += ELF64_R_SYM(relas[i].r_info); |
| 305 | |
| 306 | if (sym->st_shndx == SHN_UNDEF) |
| 307 | return -ENOEXEC; |
| 308 | |
| 309 | if (sym->st_shndx == SHN_COMMON) |
| 310 | return -ENOEXEC; |
| 311 | |
| 312 | if (sym->st_shndx >= pi->ehdr->e_shnum && |
| 313 | sym->st_shndx != SHN_ABS) |
| 314 | return -ENOEXEC; |
| 315 | |
| 316 | loc = pi->purgatory_buf; |
| 317 | loc += section->sh_offset; |
| 318 | loc += relas[i].r_offset; |
| 319 | |
| 320 | val = sym->st_value; |
| 321 | if (sym->st_shndx != SHN_ABS) |
| 322 | val += pi->sechdrs[sym->st_shndx].sh_addr; |
| 323 | val += relas[i].r_addend; |
| 324 | |
| 325 | addr = section->sh_addr + relas[i].r_offset; |
| 326 | |
Gerald Schaefer | 805bc0b | 2019-02-03 21:35:45 +0100 | [diff] [blame] | 327 | r_type = ELF64_R_TYPE(relas[i].r_info); |
| 328 | arch_kexec_do_relocs(r_type, loc, val, addr); |
Philipp Rudo | 7140688 | 2017-06-19 10:45:33 +0200 | [diff] [blame] | 329 | } |
| 330 | return 0; |
| 331 | } |
| 332 | |
| 333 | int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, |
| 334 | unsigned long buf_len) |
| 335 | { |
| 336 | /* A kernel must be at least large enough to contain head.S. During |
| 337 | * load memory in head.S will be accessed, e.g. to register the next |
| 338 | * command line. If the next kernel were smaller the current kernel |
| 339 | * will panic at load. |
Philipp Rudo | 7140688 | 2017-06-19 10:45:33 +0200 | [diff] [blame] | 340 | */ |
Gerald Schaefer | 087c4d7 | 2019-04-08 12:49:58 +0200 | [diff] [blame] | 341 | if (buf_len < HEAD_END) |
Philipp Rudo | 7140688 | 2017-06-19 10:45:33 +0200 | [diff] [blame] | 342 | return -ENOEXEC; |
| 343 | |
| 344 | return kexec_image_probe_default(image, buf, buf_len); |
| 345 | } |