blob: 528edff085d9ab8d3efaa56f2dbf9a7a8ac4d059 [file] [log] [blame]
Philipp Rudo71406882017-06-19 10:45:33 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * s390 code for kexec_file_load system call
4 *
5 * Copyright IBM Corp. 2018
6 *
7 * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
8 */
9
10#include <linux/elf.h>
Philipp Rudoe23a8022019-02-26 10:50:39 +010011#include <linux/errno.h>
Philipp Rudo71406882017-06-19 10:45:33 +020012#include <linux/kexec.h>
Thiago Jung Bauermannc8424e72019-07-04 15:57:34 -030013#include <linux/module_signature.h>
Philipp Rudoe23a8022019-02-26 10:50:39 +010014#include <linux/verification.h>
Philipp Rudo99feaa72019-03-18 12:53:47 +010015#include <asm/boot_data.h>
Philipp Rudoe23a8022019-02-26 10:50:39 +010016#include <asm/ipl.h>
Philipp Rudo71406882017-06-19 10:45:33 +020017#include <asm/setup.h>
18
19const struct kexec_file_ops * const kexec_file_loaders[] = {
Philipp Rudo8be01882017-09-11 15:15:29 +020020 &s390_kexec_elf_ops,
Philipp Rudoe49bb0a2017-08-30 14:03:38 +020021 &s390_kexec_image_ops,
Philipp Rudo71406882017-06-19 10:45:33 +020022 NULL,
23};
24
Jiri Bohac99d5cadf2019-08-19 17:17:44 -070025#ifdef CONFIG_KEXEC_SIG
Philipp Rudoe23a8022019-02-26 10:50:39 +010026int s390_verify_sig(const char *kernel, unsigned long kernel_len)
27{
28 const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1;
29 struct module_signature *ms;
30 unsigned long sig_len;
31
32 /* Skip signature verification when not secure IPLed. */
33 if (!ipl_secure_flag)
34 return 0;
35
36 if (marker_len > kernel_len)
37 return -EKEYREJECTED;
38
39 if (memcmp(kernel + kernel_len - marker_len, MODULE_SIG_STRING,
40 marker_len))
41 return -EKEYREJECTED;
42 kernel_len -= marker_len;
43
44 ms = (void *)kernel + kernel_len - sizeof(*ms);
45 kernel_len -= sizeof(*ms);
46
47 sig_len = be32_to_cpu(ms->sig_len);
48 if (sig_len >= kernel_len)
49 return -EKEYREJECTED;
50 kernel_len -= sig_len;
51
52 if (ms->id_type != PKEY_ID_PKCS7)
53 return -EKEYREJECTED;
54
55 if (ms->algo != 0 ||
56 ms->hash != 0 ||
57 ms->signer_len != 0 ||
58 ms->key_id_len != 0 ||
59 ms->__pad[0] != 0 ||
60 ms->__pad[1] != 0 ||
61 ms->__pad[2] != 0) {
62 return -EBADMSG;
63 }
64
65 return verify_pkcs7_signature(kernel, kernel_len,
66 kernel + kernel_len, sig_len,
67 VERIFY_USE_PLATFORM_KEYRING,
68 VERIFYING_MODULE_SIGNATURE,
69 NULL, NULL);
70}
Jiri Bohac99d5cadf2019-08-19 17:17:44 -070071#endif /* CONFIG_KEXEC_SIG */
Philipp Rudoe23a8022019-02-26 10:50:39 +010072
Philipp Rudo653beba2019-03-07 15:56:34 +010073static int kexec_file_update_purgatory(struct kimage *image,
74 struct s390_load_data *data)
Philipp Rudoe49bb0a2017-08-30 14:03:38 +020075{
76 u64 entry, type;
77 int ret;
78
Philipp Rudoee337f52017-09-05 11:55:23 +020079 if (image->type == KEXEC_TYPE_CRASH) {
80 entry = STARTUP_KDUMP_OFFSET;
81 type = KEXEC_TYPE_CRASH;
82 } else {
83 entry = STARTUP_NORMAL_OFFSET;
84 type = KEXEC_TYPE_DEFAULT;
85 }
86
Philipp Rudoe49bb0a2017-08-30 14:03:38 +020087 ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry,
88 sizeof(entry), false);
Philipp Rudoee337f52017-09-05 11:55:23 +020089 if (ret)
90 return ret;
91
92 ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type,
93 sizeof(type), false);
94 if (ret)
95 return ret;
96
97 if (image->type == KEXEC_TYPE_CRASH) {
98 u64 crash_size;
99
100 ret = kexec_purgatory_get_set_symbol(image, "crash_start",
101 &crashk_res.start,
102 sizeof(crashk_res.start),
103 false);
104 if (ret)
105 return ret;
106
107 crash_size = crashk_res.end - crashk_res.start + 1;
108 ret = kexec_purgatory_get_set_symbol(image, "crash_size",
109 &crash_size,
110 sizeof(crash_size),
111 false);
112 }
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200113 return ret;
114}
115
Philipp Rudo8e496422019-03-07 12:48:03 +0100116static int kexec_file_add_purgatory(struct kimage *image,
117 struct s390_load_data *data)
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200118{
119 struct kexec_buf buf;
120 int ret;
121
122 buf.image = image;
123
124 data->memsz = ALIGN(data->memsz, PAGE_SIZE);
125 buf.mem = data->memsz;
Philipp Rudoee337f52017-09-05 11:55:23 +0200126 if (image->type == KEXEC_TYPE_CRASH)
127 buf.mem += crashk_res.start;
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200128
129 ret = kexec_load_purgatory(image, &buf);
130 if (ret)
131 return ret;
Philipp Rudo99feaa72019-03-18 12:53:47 +0100132 data->memsz += buf.memsz;
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200133
Philipp Rudo99feaa72019-03-18 12:53:47 +0100134 return kexec_file_update_purgatory(image, data);
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200135}
136
Philipp Rudo8e496422019-03-07 12:48:03 +0100137static int kexec_file_add_initrd(struct kimage *image,
138 struct s390_load_data *data)
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200139{
140 struct kexec_buf buf;
141 int ret;
142
143 buf.image = image;
144
Philipp Rudo8e496422019-03-07 12:48:03 +0100145 buf.buffer = image->initrd_buf;
146 buf.bufsz = image->initrd_buf_len;
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200147
148 data->memsz = ALIGN(data->memsz, PAGE_SIZE);
149 buf.mem = data->memsz;
Philipp Rudoee337f52017-09-05 11:55:23 +0200150 if (image->type == KEXEC_TYPE_CRASH)
151 buf.mem += crashk_res.start;
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200152 buf.memsz = buf.bufsz;
153
Philipp Rudo70b69052020-05-12 19:39:56 +0200154 data->parm->initrd_start = data->memsz;
Philipp Rudod0d249d2019-03-06 17:36:26 +0100155 data->parm->initrd_size = buf.memsz;
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200156 data->memsz += buf.memsz;
157
158 ret = kexec_add_buffer(&buf);
Philipp Rudo99feaa72019-03-18 12:53:47 +0100159 if (ret)
160 return ret;
161
162 return ipl_report_add_component(data->report, &buf, 0, 0);
163}
164
165static int kexec_file_add_ipl_report(struct kimage *image,
166 struct s390_load_data *data)
167{
168 __u32 *lc_ipl_parmblock_ptr;
169 unsigned int len, ncerts;
170 struct kexec_buf buf;
171 unsigned long addr;
172 void *ptr, *end;
173
174 buf.image = image;
175
176 data->memsz = ALIGN(data->memsz, PAGE_SIZE);
177 buf.mem = data->memsz;
178 if (image->type == KEXEC_TYPE_CRASH)
179 buf.mem += crashk_res.start;
180
181 ptr = (void *)ipl_cert_list_addr;
182 end = ptr + ipl_cert_list_size;
183 ncerts = 0;
184 while (ptr < end) {
185 ncerts++;
186 len = *(unsigned int *)ptr;
187 ptr += sizeof(len);
188 ptr += len;
189 }
190
191 addr = data->memsz + data->report->size;
192 addr += ncerts * sizeof(struct ipl_rb_certificate_entry);
193 ptr = (void *)ipl_cert_list_addr;
194 while (ptr < end) {
195 len = *(unsigned int *)ptr;
196 ptr += sizeof(len);
197 ipl_report_add_certificate(data->report, ptr, addr, len);
198 addr += len;
199 ptr += len;
200 }
201
202 buf.buffer = ipl_report_finish(data->report);
203 buf.bufsz = data->report->size;
204 buf.memsz = buf.bufsz;
205
206 data->memsz += buf.memsz;
207
208 lc_ipl_parmblock_ptr =
209 data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
210 *lc_ipl_parmblock_ptr = (__u32)buf.mem;
211
212 return kexec_add_buffer(&buf);
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200213}
214
Philipp Rudo8e496422019-03-07 12:48:03 +0100215void *kexec_file_add_components(struct kimage *image,
216 int (*add_kernel)(struct kimage *image,
217 struct s390_load_data *data))
218{
Sven Schnelle5ecb2da2021-09-23 21:22:52 +0200219 unsigned long max_command_line_size = LEGACY_COMMAND_LINE_SIZE;
Philipp Rudo8e496422019-03-07 12:48:03 +0100220 struct s390_load_data data = {0};
Sven Schnelle5ecb2da2021-09-23 21:22:52 +0200221 unsigned long minsize;
Philipp Rudo8e496422019-03-07 12:48:03 +0100222 int ret;
223
Philipp Rudo99feaa72019-03-18 12:53:47 +0100224 data.report = ipl_report_init(&ipl_block);
225 if (IS_ERR(data.report))
226 return data.report;
227
Philipp Rudo8e496422019-03-07 12:48:03 +0100228 ret = add_kernel(image, &data);
229 if (ret)
Philipp Rudo99feaa72019-03-18 12:53:47 +0100230 goto out;
Philipp Rudo8e496422019-03-07 12:48:03 +0100231
Sven Schnelle5ecb2da2021-09-23 21:22:52 +0200232 ret = -EINVAL;
233 minsize = PARMAREA + offsetof(struct parmarea, command_line);
234 if (image->kernel_buf_len < minsize)
Philipp Rudo99feaa72019-03-18 12:53:47 +0100235 goto out;
Sven Schnelle5ecb2da2021-09-23 21:22:52 +0200236
237 if (data.parm->max_command_line_size)
238 max_command_line_size = data.parm->max_command_line_size;
239
240 if (minsize + max_command_line_size < minsize)
241 goto out;
242
243 if (image->kernel_buf_len < minsize + max_command_line_size)
244 goto out;
245
246 if (image->cmdline_buf_len >= max_command_line_size)
247 goto out;
248
Philipp Rudo8e496422019-03-07 12:48:03 +0100249 memcpy(data.parm->command_line, image->cmdline_buf,
250 image->cmdline_buf_len);
251
252 if (image->type == KEXEC_TYPE_CRASH) {
253 data.parm->oldmem_base = crashk_res.start;
254 data.parm->oldmem_size = crashk_res.end - crashk_res.start + 1;
255 }
256
257 if (image->initrd_buf) {
258 ret = kexec_file_add_initrd(image, &data);
259 if (ret)
Philipp Rudo99feaa72019-03-18 12:53:47 +0100260 goto out;
Philipp Rudo8e496422019-03-07 12:48:03 +0100261 }
262
263 ret = kexec_file_add_purgatory(image, &data);
264 if (ret)
Philipp Rudo99feaa72019-03-18 12:53:47 +0100265 goto out;
Philipp Rudo8e496422019-03-07 12:48:03 +0100266
Philipp Rudo653beba2019-03-07 15:56:34 +0100267 if (data.kernel_mem == 0) {
268 unsigned long restart_psw = 0x0008000080000000UL;
269 restart_psw += image->start;
270 memcpy(data.kernel_buf, &restart_psw, sizeof(restart_psw));
271 image->start = 0;
272 }
273
Philipp Rudo99feaa72019-03-18 12:53:47 +0100274 ret = kexec_file_add_ipl_report(image, &data);
275out:
276 ipl_report_free(data.report);
277 return ERR_PTR(ret);
Philipp Rudo8e496422019-03-07 12:48:03 +0100278}
279
Philipp Rudo71406882017-06-19 10:45:33 +0200280int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
281 Elf_Shdr *section,
282 const Elf_Shdr *relsec,
283 const Elf_Shdr *symtab)
284{
285 Elf_Rela *relas;
Gerald Schaefer805bc0b2019-02-03 21:35:45 +0100286 int i, r_type;
Philipp Rudo71406882017-06-19 10:45:33 +0200287
288 relas = (void *)pi->ehdr + relsec->sh_offset;
289
290 for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) {
291 const Elf_Sym *sym; /* symbol to relocate */
292 unsigned long addr; /* final location after relocation */
293 unsigned long val; /* relocated symbol value */
294 void *loc; /* tmp location to modify */
295
296 sym = (void *)pi->ehdr + symtab->sh_offset;
297 sym += ELF64_R_SYM(relas[i].r_info);
298
299 if (sym->st_shndx == SHN_UNDEF)
300 return -ENOEXEC;
301
302 if (sym->st_shndx == SHN_COMMON)
303 return -ENOEXEC;
304
305 if (sym->st_shndx >= pi->ehdr->e_shnum &&
306 sym->st_shndx != SHN_ABS)
307 return -ENOEXEC;
308
309 loc = pi->purgatory_buf;
310 loc += section->sh_offset;
311 loc += relas[i].r_offset;
312
313 val = sym->st_value;
314 if (sym->st_shndx != SHN_ABS)
315 val += pi->sechdrs[sym->st_shndx].sh_addr;
316 val += relas[i].r_addend;
317
318 addr = section->sh_addr + relas[i].r_offset;
319
Gerald Schaefer805bc0b2019-02-03 21:35:45 +0100320 r_type = ELF64_R_TYPE(relas[i].r_info);
321 arch_kexec_do_relocs(r_type, loc, val, addr);
Philipp Rudo71406882017-06-19 10:45:33 +0200322 }
323 return 0;
324}