Philipp Rudo | 7140688 | 2017-06-19 10:45:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * s390 code for kexec_file_load system call |
| 4 | * |
| 5 | * Copyright IBM Corp. 2018 |
| 6 | * |
| 7 | * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> |
| 8 | */ |
| 9 | |
| 10 | #include <linux/elf.h> |
Philipp Rudo | e23a802 | 2019-02-26 10:50:39 +0100 | [diff] [blame] | 11 | #include <linux/errno.h> |
Philipp Rudo | 7140688 | 2017-06-19 10:45:33 +0200 | [diff] [blame] | 12 | #include <linux/kexec.h> |
Thiago Jung Bauermann | c8424e7 | 2019-07-04 15:57:34 -0300 | [diff] [blame] | 13 | #include <linux/module_signature.h> |
Philipp Rudo | e23a802 | 2019-02-26 10:50:39 +0100 | [diff] [blame] | 14 | #include <linux/verification.h> |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 15 | #include <asm/boot_data.h> |
Philipp Rudo | e23a802 | 2019-02-26 10:50:39 +0100 | [diff] [blame] | 16 | #include <asm/ipl.h> |
Philipp Rudo | 7140688 | 2017-06-19 10:45:33 +0200 | [diff] [blame] | 17 | #include <asm/setup.h> |
| 18 | |
| 19 | const struct kexec_file_ops * const kexec_file_loaders[] = { |
Philipp Rudo | 8be0188 | 2017-09-11 15:15:29 +0200 | [diff] [blame] | 20 | &s390_kexec_elf_ops, |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 21 | &s390_kexec_image_ops, |
Philipp Rudo | 7140688 | 2017-06-19 10:45:33 +0200 | [diff] [blame] | 22 | NULL, |
| 23 | }; |
| 24 | |
Jiri Bohac | 99d5cadf | 2019-08-19 17:17:44 -0700 | [diff] [blame] | 25 | #ifdef CONFIG_KEXEC_SIG |
Philipp Rudo | e23a802 | 2019-02-26 10:50:39 +0100 | [diff] [blame] | 26 | int s390_verify_sig(const char *kernel, unsigned long kernel_len) |
| 27 | { |
| 28 | const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1; |
| 29 | struct module_signature *ms; |
| 30 | unsigned long sig_len; |
| 31 | |
| 32 | /* Skip signature verification when not secure IPLed. */ |
| 33 | if (!ipl_secure_flag) |
| 34 | return 0; |
| 35 | |
| 36 | if (marker_len > kernel_len) |
| 37 | return -EKEYREJECTED; |
| 38 | |
| 39 | if (memcmp(kernel + kernel_len - marker_len, MODULE_SIG_STRING, |
| 40 | marker_len)) |
| 41 | return -EKEYREJECTED; |
| 42 | kernel_len -= marker_len; |
| 43 | |
| 44 | ms = (void *)kernel + kernel_len - sizeof(*ms); |
| 45 | kernel_len -= sizeof(*ms); |
| 46 | |
| 47 | sig_len = be32_to_cpu(ms->sig_len); |
| 48 | if (sig_len >= kernel_len) |
| 49 | return -EKEYREJECTED; |
| 50 | kernel_len -= sig_len; |
| 51 | |
| 52 | if (ms->id_type != PKEY_ID_PKCS7) |
| 53 | return -EKEYREJECTED; |
| 54 | |
| 55 | if (ms->algo != 0 || |
| 56 | ms->hash != 0 || |
| 57 | ms->signer_len != 0 || |
| 58 | ms->key_id_len != 0 || |
| 59 | ms->__pad[0] != 0 || |
| 60 | ms->__pad[1] != 0 || |
| 61 | ms->__pad[2] != 0) { |
| 62 | return -EBADMSG; |
| 63 | } |
| 64 | |
| 65 | return verify_pkcs7_signature(kernel, kernel_len, |
| 66 | kernel + kernel_len, sig_len, |
| 67 | VERIFY_USE_PLATFORM_KEYRING, |
| 68 | VERIFYING_MODULE_SIGNATURE, |
| 69 | NULL, NULL); |
| 70 | } |
Jiri Bohac | 99d5cadf | 2019-08-19 17:17:44 -0700 | [diff] [blame] | 71 | #endif /* CONFIG_KEXEC_SIG */ |
Philipp Rudo | e23a802 | 2019-02-26 10:50:39 +0100 | [diff] [blame] | 72 | |
Philipp Rudo | 653beba | 2019-03-07 15:56:34 +0100 | [diff] [blame] | 73 | static int kexec_file_update_purgatory(struct kimage *image, |
| 74 | struct s390_load_data *data) |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 75 | { |
| 76 | u64 entry, type; |
| 77 | int ret; |
| 78 | |
Philipp Rudo | ee337f5 | 2017-09-05 11:55:23 +0200 | [diff] [blame] | 79 | if (image->type == KEXEC_TYPE_CRASH) { |
| 80 | entry = STARTUP_KDUMP_OFFSET; |
| 81 | type = KEXEC_TYPE_CRASH; |
| 82 | } else { |
| 83 | entry = STARTUP_NORMAL_OFFSET; |
| 84 | type = KEXEC_TYPE_DEFAULT; |
| 85 | } |
| 86 | |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 87 | ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry, |
| 88 | sizeof(entry), false); |
Philipp Rudo | ee337f5 | 2017-09-05 11:55:23 +0200 | [diff] [blame] | 89 | if (ret) |
| 90 | return ret; |
| 91 | |
| 92 | ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type, |
| 93 | sizeof(type), false); |
| 94 | if (ret) |
| 95 | return ret; |
| 96 | |
| 97 | if (image->type == KEXEC_TYPE_CRASH) { |
| 98 | u64 crash_size; |
| 99 | |
| 100 | ret = kexec_purgatory_get_set_symbol(image, "crash_start", |
| 101 | &crashk_res.start, |
| 102 | sizeof(crashk_res.start), |
| 103 | false); |
| 104 | if (ret) |
| 105 | return ret; |
| 106 | |
| 107 | crash_size = crashk_res.end - crashk_res.start + 1; |
| 108 | ret = kexec_purgatory_get_set_symbol(image, "crash_size", |
| 109 | &crash_size, |
| 110 | sizeof(crash_size), |
| 111 | false); |
| 112 | } |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 113 | return ret; |
| 114 | } |
| 115 | |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 116 | static int kexec_file_add_purgatory(struct kimage *image, |
| 117 | struct s390_load_data *data) |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 118 | { |
| 119 | struct kexec_buf buf; |
| 120 | int ret; |
| 121 | |
| 122 | buf.image = image; |
| 123 | |
| 124 | data->memsz = ALIGN(data->memsz, PAGE_SIZE); |
| 125 | buf.mem = data->memsz; |
Philipp Rudo | ee337f5 | 2017-09-05 11:55:23 +0200 | [diff] [blame] | 126 | if (image->type == KEXEC_TYPE_CRASH) |
| 127 | buf.mem += crashk_res.start; |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 128 | |
| 129 | ret = kexec_load_purgatory(image, &buf); |
| 130 | if (ret) |
| 131 | return ret; |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 132 | data->memsz += buf.memsz; |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 133 | |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 134 | return kexec_file_update_purgatory(image, data); |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 135 | } |
| 136 | |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 137 | static int kexec_file_add_initrd(struct kimage *image, |
| 138 | struct s390_load_data *data) |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 139 | { |
| 140 | struct kexec_buf buf; |
| 141 | int ret; |
| 142 | |
| 143 | buf.image = image; |
| 144 | |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 145 | buf.buffer = image->initrd_buf; |
| 146 | buf.bufsz = image->initrd_buf_len; |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 147 | |
| 148 | data->memsz = ALIGN(data->memsz, PAGE_SIZE); |
| 149 | buf.mem = data->memsz; |
Philipp Rudo | ee337f5 | 2017-09-05 11:55:23 +0200 | [diff] [blame] | 150 | if (image->type == KEXEC_TYPE_CRASH) |
| 151 | buf.mem += crashk_res.start; |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 152 | buf.memsz = buf.bufsz; |
| 153 | |
Philipp Rudo | 70b6905 | 2020-05-12 19:39:56 +0200 | [diff] [blame] | 154 | data->parm->initrd_start = data->memsz; |
Philipp Rudo | d0d249d | 2019-03-06 17:36:26 +0100 | [diff] [blame] | 155 | data->parm->initrd_size = buf.memsz; |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 156 | data->memsz += buf.memsz; |
| 157 | |
| 158 | ret = kexec_add_buffer(&buf); |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 159 | if (ret) |
| 160 | return ret; |
| 161 | |
| 162 | return ipl_report_add_component(data->report, &buf, 0, 0); |
| 163 | } |
| 164 | |
| 165 | static int kexec_file_add_ipl_report(struct kimage *image, |
| 166 | struct s390_load_data *data) |
| 167 | { |
| 168 | __u32 *lc_ipl_parmblock_ptr; |
| 169 | unsigned int len, ncerts; |
| 170 | struct kexec_buf buf; |
| 171 | unsigned long addr; |
| 172 | void *ptr, *end; |
| 173 | |
| 174 | buf.image = image; |
| 175 | |
| 176 | data->memsz = ALIGN(data->memsz, PAGE_SIZE); |
| 177 | buf.mem = data->memsz; |
| 178 | if (image->type == KEXEC_TYPE_CRASH) |
| 179 | buf.mem += crashk_res.start; |
| 180 | |
| 181 | ptr = (void *)ipl_cert_list_addr; |
| 182 | end = ptr + ipl_cert_list_size; |
| 183 | ncerts = 0; |
| 184 | while (ptr < end) { |
| 185 | ncerts++; |
| 186 | len = *(unsigned int *)ptr; |
| 187 | ptr += sizeof(len); |
| 188 | ptr += len; |
| 189 | } |
| 190 | |
| 191 | addr = data->memsz + data->report->size; |
| 192 | addr += ncerts * sizeof(struct ipl_rb_certificate_entry); |
| 193 | ptr = (void *)ipl_cert_list_addr; |
| 194 | while (ptr < end) { |
| 195 | len = *(unsigned int *)ptr; |
| 196 | ptr += sizeof(len); |
| 197 | ipl_report_add_certificate(data->report, ptr, addr, len); |
| 198 | addr += len; |
| 199 | ptr += len; |
| 200 | } |
| 201 | |
| 202 | buf.buffer = ipl_report_finish(data->report); |
| 203 | buf.bufsz = data->report->size; |
| 204 | buf.memsz = buf.bufsz; |
| 205 | |
| 206 | data->memsz += buf.memsz; |
| 207 | |
| 208 | lc_ipl_parmblock_ptr = |
| 209 | data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr); |
| 210 | *lc_ipl_parmblock_ptr = (__u32)buf.mem; |
| 211 | |
| 212 | return kexec_add_buffer(&buf); |
Philipp Rudo | e49bb0a | 2017-08-30 14:03:38 +0200 | [diff] [blame] | 213 | } |
| 214 | |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 215 | void *kexec_file_add_components(struct kimage *image, |
| 216 | int (*add_kernel)(struct kimage *image, |
| 217 | struct s390_load_data *data)) |
| 218 | { |
Sven Schnelle | 5ecb2da | 2021-09-23 21:22:52 +0200 | [diff] [blame] | 219 | unsigned long max_command_line_size = LEGACY_COMMAND_LINE_SIZE; |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 220 | struct s390_load_data data = {0}; |
Sven Schnelle | 5ecb2da | 2021-09-23 21:22:52 +0200 | [diff] [blame] | 221 | unsigned long minsize; |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 222 | int ret; |
| 223 | |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 224 | data.report = ipl_report_init(&ipl_block); |
| 225 | if (IS_ERR(data.report)) |
| 226 | return data.report; |
| 227 | |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 228 | ret = add_kernel(image, &data); |
| 229 | if (ret) |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 230 | goto out; |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 231 | |
Sven Schnelle | 5ecb2da | 2021-09-23 21:22:52 +0200 | [diff] [blame] | 232 | ret = -EINVAL; |
| 233 | minsize = PARMAREA + offsetof(struct parmarea, command_line); |
| 234 | if (image->kernel_buf_len < minsize) |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 235 | goto out; |
Sven Schnelle | 5ecb2da | 2021-09-23 21:22:52 +0200 | [diff] [blame] | 236 | |
| 237 | if (data.parm->max_command_line_size) |
| 238 | max_command_line_size = data.parm->max_command_line_size; |
| 239 | |
| 240 | if (minsize + max_command_line_size < minsize) |
| 241 | goto out; |
| 242 | |
| 243 | if (image->kernel_buf_len < minsize + max_command_line_size) |
| 244 | goto out; |
| 245 | |
| 246 | if (image->cmdline_buf_len >= max_command_line_size) |
| 247 | goto out; |
| 248 | |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 249 | memcpy(data.parm->command_line, image->cmdline_buf, |
| 250 | image->cmdline_buf_len); |
| 251 | |
| 252 | if (image->type == KEXEC_TYPE_CRASH) { |
| 253 | data.parm->oldmem_base = crashk_res.start; |
| 254 | data.parm->oldmem_size = crashk_res.end - crashk_res.start + 1; |
| 255 | } |
| 256 | |
| 257 | if (image->initrd_buf) { |
| 258 | ret = kexec_file_add_initrd(image, &data); |
| 259 | if (ret) |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 260 | goto out; |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 261 | } |
| 262 | |
| 263 | ret = kexec_file_add_purgatory(image, &data); |
| 264 | if (ret) |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 265 | goto out; |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 266 | |
Philipp Rudo | 653beba | 2019-03-07 15:56:34 +0100 | [diff] [blame] | 267 | if (data.kernel_mem == 0) { |
| 268 | unsigned long restart_psw = 0x0008000080000000UL; |
| 269 | restart_psw += image->start; |
| 270 | memcpy(data.kernel_buf, &restart_psw, sizeof(restart_psw)); |
| 271 | image->start = 0; |
| 272 | } |
| 273 | |
Philipp Rudo | 99feaa7 | 2019-03-18 12:53:47 +0100 | [diff] [blame] | 274 | ret = kexec_file_add_ipl_report(image, &data); |
| 275 | out: |
| 276 | ipl_report_free(data.report); |
| 277 | return ERR_PTR(ret); |
Philipp Rudo | 8e49642 | 2019-03-07 12:48:03 +0100 | [diff] [blame] | 278 | } |
| 279 | |
Philipp Rudo | 7140688 | 2017-06-19 10:45:33 +0200 | [diff] [blame] | 280 | int arch_kexec_apply_relocations_add(struct purgatory_info *pi, |
| 281 | Elf_Shdr *section, |
| 282 | const Elf_Shdr *relsec, |
| 283 | const Elf_Shdr *symtab) |
| 284 | { |
| 285 | Elf_Rela *relas; |
Gerald Schaefer | 805bc0b | 2019-02-03 21:35:45 +0100 | [diff] [blame] | 286 | int i, r_type; |
Philipp Rudo | 7140688 | 2017-06-19 10:45:33 +0200 | [diff] [blame] | 287 | |
| 288 | relas = (void *)pi->ehdr + relsec->sh_offset; |
| 289 | |
| 290 | for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) { |
| 291 | const Elf_Sym *sym; /* symbol to relocate */ |
| 292 | unsigned long addr; /* final location after relocation */ |
| 293 | unsigned long val; /* relocated symbol value */ |
| 294 | void *loc; /* tmp location to modify */ |
| 295 | |
| 296 | sym = (void *)pi->ehdr + symtab->sh_offset; |
| 297 | sym += ELF64_R_SYM(relas[i].r_info); |
| 298 | |
| 299 | if (sym->st_shndx == SHN_UNDEF) |
| 300 | return -ENOEXEC; |
| 301 | |
| 302 | if (sym->st_shndx == SHN_COMMON) |
| 303 | return -ENOEXEC; |
| 304 | |
| 305 | if (sym->st_shndx >= pi->ehdr->e_shnum && |
| 306 | sym->st_shndx != SHN_ABS) |
| 307 | return -ENOEXEC; |
| 308 | |
| 309 | loc = pi->purgatory_buf; |
| 310 | loc += section->sh_offset; |
| 311 | loc += relas[i].r_offset; |
| 312 | |
| 313 | val = sym->st_value; |
| 314 | if (sym->st_shndx != SHN_ABS) |
| 315 | val += pi->sechdrs[sym->st_shndx].sh_addr; |
| 316 | val += relas[i].r_addend; |
| 317 | |
| 318 | addr = section->sh_addr + relas[i].r_offset; |
| 319 | |
Gerald Schaefer | 805bc0b | 2019-02-03 21:35:45 +0100 | [diff] [blame] | 320 | r_type = ELF64_R_TYPE(relas[i].r_info); |
| 321 | arch_kexec_do_relocs(r_type, loc, val, addr); |
Philipp Rudo | 7140688 | 2017-06-19 10:45:33 +0200 | [diff] [blame] | 322 | } |
| 323 | return 0; |
| 324 | } |