blob: c0f33ba49a9ab9a8f3c48e19a14d7199d44ab891 [file] [log] [blame]
Philipp Rudo71406882017-06-19 10:45:33 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * s390 code for kexec_file_load system call
4 *
5 * Copyright IBM Corp. 2018
6 *
7 * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
8 */
9
10#include <linux/elf.h>
Philipp Rudoe23a8022019-02-26 10:50:39 +010011#include <linux/errno.h>
Philipp Rudo71406882017-06-19 10:45:33 +020012#include <linux/kexec.h>
Philipp Rudoe23a8022019-02-26 10:50:39 +010013#include <linux/module.h>
14#include <linux/verification.h>
Philipp Rudo99feaa72019-03-18 12:53:47 +010015#include <asm/boot_data.h>
Philipp Rudoe23a8022019-02-26 10:50:39 +010016#include <asm/ipl.h>
Philipp Rudo71406882017-06-19 10:45:33 +020017#include <asm/setup.h>
18
19const struct kexec_file_ops * const kexec_file_loaders[] = {
Philipp Rudo8be01882017-09-11 15:15:29 +020020 &s390_kexec_elf_ops,
Philipp Rudoe49bb0a2017-08-30 14:03:38 +020021 &s390_kexec_image_ops,
Philipp Rudo71406882017-06-19 10:45:33 +020022 NULL,
23};
24
Jiri Bohac99d5cadf2019-08-19 17:17:44 -070025#ifdef CONFIG_KEXEC_SIG
Philipp Rudoe23a8022019-02-26 10:50:39 +010026/*
27 * Module signature information block.
28 *
29 * The constituents of the signature section are, in order:
30 *
31 * - Signer's name
32 * - Key identifier
33 * - Signature data
34 * - Information block
35 */
36struct module_signature {
37 u8 algo; /* Public-key crypto algorithm [0] */
38 u8 hash; /* Digest algorithm [0] */
39 u8 id_type; /* Key identifier type [PKEY_ID_PKCS7] */
40 u8 signer_len; /* Length of signer's name [0] */
41 u8 key_id_len; /* Length of key identifier [0] */
42 u8 __pad[3];
43 __be32 sig_len; /* Length of signature data */
44};
45
46#define PKEY_ID_PKCS7 2
47
48int s390_verify_sig(const char *kernel, unsigned long kernel_len)
49{
50 const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1;
51 struct module_signature *ms;
52 unsigned long sig_len;
53
54 /* Skip signature verification when not secure IPLed. */
55 if (!ipl_secure_flag)
56 return 0;
57
58 if (marker_len > kernel_len)
59 return -EKEYREJECTED;
60
61 if (memcmp(kernel + kernel_len - marker_len, MODULE_SIG_STRING,
62 marker_len))
63 return -EKEYREJECTED;
64 kernel_len -= marker_len;
65
66 ms = (void *)kernel + kernel_len - sizeof(*ms);
67 kernel_len -= sizeof(*ms);
68
69 sig_len = be32_to_cpu(ms->sig_len);
70 if (sig_len >= kernel_len)
71 return -EKEYREJECTED;
72 kernel_len -= sig_len;
73
74 if (ms->id_type != PKEY_ID_PKCS7)
75 return -EKEYREJECTED;
76
77 if (ms->algo != 0 ||
78 ms->hash != 0 ||
79 ms->signer_len != 0 ||
80 ms->key_id_len != 0 ||
81 ms->__pad[0] != 0 ||
82 ms->__pad[1] != 0 ||
83 ms->__pad[2] != 0) {
84 return -EBADMSG;
85 }
86
87 return verify_pkcs7_signature(kernel, kernel_len,
88 kernel + kernel_len, sig_len,
89 VERIFY_USE_PLATFORM_KEYRING,
90 VERIFYING_MODULE_SIGNATURE,
91 NULL, NULL);
92}
Jiri Bohac99d5cadf2019-08-19 17:17:44 -070093#endif /* CONFIG_KEXEC_SIG */
Philipp Rudoe23a8022019-02-26 10:50:39 +010094
Philipp Rudo653beba2019-03-07 15:56:34 +010095static int kexec_file_update_purgatory(struct kimage *image,
96 struct s390_load_data *data)
Philipp Rudoe49bb0a2017-08-30 14:03:38 +020097{
98 u64 entry, type;
99 int ret;
100
Philipp Rudoee337f52017-09-05 11:55:23 +0200101 if (image->type == KEXEC_TYPE_CRASH) {
102 entry = STARTUP_KDUMP_OFFSET;
103 type = KEXEC_TYPE_CRASH;
104 } else {
105 entry = STARTUP_NORMAL_OFFSET;
106 type = KEXEC_TYPE_DEFAULT;
107 }
108
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200109 ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry,
110 sizeof(entry), false);
Philipp Rudoee337f52017-09-05 11:55:23 +0200111 if (ret)
112 return ret;
113
114 ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type,
115 sizeof(type), false);
116 if (ret)
117 return ret;
118
119 if (image->type == KEXEC_TYPE_CRASH) {
120 u64 crash_size;
121
122 ret = kexec_purgatory_get_set_symbol(image, "crash_start",
123 &crashk_res.start,
124 sizeof(crashk_res.start),
125 false);
126 if (ret)
127 return ret;
128
129 crash_size = crashk_res.end - crashk_res.start + 1;
130 ret = kexec_purgatory_get_set_symbol(image, "crash_size",
131 &crash_size,
132 sizeof(crash_size),
133 false);
134 }
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200135 return ret;
136}
137
Philipp Rudo8e496422019-03-07 12:48:03 +0100138static int kexec_file_add_purgatory(struct kimage *image,
139 struct s390_load_data *data)
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200140{
141 struct kexec_buf buf;
142 int ret;
143
144 buf.image = image;
145
146 data->memsz = ALIGN(data->memsz, PAGE_SIZE);
147 buf.mem = data->memsz;
Philipp Rudoee337f52017-09-05 11:55:23 +0200148 if (image->type == KEXEC_TYPE_CRASH)
149 buf.mem += crashk_res.start;
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200150
151 ret = kexec_load_purgatory(image, &buf);
152 if (ret)
153 return ret;
Philipp Rudo99feaa72019-03-18 12:53:47 +0100154 data->memsz += buf.memsz;
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200155
Philipp Rudo99feaa72019-03-18 12:53:47 +0100156 return kexec_file_update_purgatory(image, data);
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200157}
158
Philipp Rudo8e496422019-03-07 12:48:03 +0100159static int kexec_file_add_initrd(struct kimage *image,
160 struct s390_load_data *data)
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200161{
162 struct kexec_buf buf;
163 int ret;
164
165 buf.image = image;
166
Philipp Rudo8e496422019-03-07 12:48:03 +0100167 buf.buffer = image->initrd_buf;
168 buf.bufsz = image->initrd_buf_len;
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200169
170 data->memsz = ALIGN(data->memsz, PAGE_SIZE);
171 buf.mem = data->memsz;
Philipp Rudoee337f52017-09-05 11:55:23 +0200172 if (image->type == KEXEC_TYPE_CRASH)
173 buf.mem += crashk_res.start;
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200174 buf.memsz = buf.bufsz;
175
Philipp Rudod0d249d2019-03-06 17:36:26 +0100176 data->parm->initrd_start = buf.mem;
177 data->parm->initrd_size = buf.memsz;
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200178 data->memsz += buf.memsz;
179
180 ret = kexec_add_buffer(&buf);
Philipp Rudo99feaa72019-03-18 12:53:47 +0100181 if (ret)
182 return ret;
183
184 return ipl_report_add_component(data->report, &buf, 0, 0);
185}
186
187static int kexec_file_add_ipl_report(struct kimage *image,
188 struct s390_load_data *data)
189{
190 __u32 *lc_ipl_parmblock_ptr;
191 unsigned int len, ncerts;
192 struct kexec_buf buf;
193 unsigned long addr;
194 void *ptr, *end;
195
196 buf.image = image;
197
198 data->memsz = ALIGN(data->memsz, PAGE_SIZE);
199 buf.mem = data->memsz;
200 if (image->type == KEXEC_TYPE_CRASH)
201 buf.mem += crashk_res.start;
202
203 ptr = (void *)ipl_cert_list_addr;
204 end = ptr + ipl_cert_list_size;
205 ncerts = 0;
206 while (ptr < end) {
207 ncerts++;
208 len = *(unsigned int *)ptr;
209 ptr += sizeof(len);
210 ptr += len;
211 }
212
213 addr = data->memsz + data->report->size;
214 addr += ncerts * sizeof(struct ipl_rb_certificate_entry);
215 ptr = (void *)ipl_cert_list_addr;
216 while (ptr < end) {
217 len = *(unsigned int *)ptr;
218 ptr += sizeof(len);
219 ipl_report_add_certificate(data->report, ptr, addr, len);
220 addr += len;
221 ptr += len;
222 }
223
224 buf.buffer = ipl_report_finish(data->report);
225 buf.bufsz = data->report->size;
226 buf.memsz = buf.bufsz;
227
228 data->memsz += buf.memsz;
229
230 lc_ipl_parmblock_ptr =
231 data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
232 *lc_ipl_parmblock_ptr = (__u32)buf.mem;
233
234 return kexec_add_buffer(&buf);
Philipp Rudoe49bb0a2017-08-30 14:03:38 +0200235}
236
Philipp Rudo8e496422019-03-07 12:48:03 +0100237void *kexec_file_add_components(struct kimage *image,
238 int (*add_kernel)(struct kimage *image,
239 struct s390_load_data *data))
240{
241 struct s390_load_data data = {0};
242 int ret;
243
Philipp Rudo99feaa72019-03-18 12:53:47 +0100244 data.report = ipl_report_init(&ipl_block);
245 if (IS_ERR(data.report))
246 return data.report;
247
Philipp Rudo8e496422019-03-07 12:48:03 +0100248 ret = add_kernel(image, &data);
249 if (ret)
Philipp Rudo99feaa72019-03-18 12:53:47 +0100250 goto out;
Philipp Rudo8e496422019-03-07 12:48:03 +0100251
Philipp Rudo99feaa72019-03-18 12:53:47 +0100252 if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE) {
253 ret = -EINVAL;
254 goto out;
255 }
Philipp Rudo8e496422019-03-07 12:48:03 +0100256 memcpy(data.parm->command_line, image->cmdline_buf,
257 image->cmdline_buf_len);
258
259 if (image->type == KEXEC_TYPE_CRASH) {
260 data.parm->oldmem_base = crashk_res.start;
261 data.parm->oldmem_size = crashk_res.end - crashk_res.start + 1;
262 }
263
264 if (image->initrd_buf) {
265 ret = kexec_file_add_initrd(image, &data);
266 if (ret)
Philipp Rudo99feaa72019-03-18 12:53:47 +0100267 goto out;
Philipp Rudo8e496422019-03-07 12:48:03 +0100268 }
269
270 ret = kexec_file_add_purgatory(image, &data);
271 if (ret)
Philipp Rudo99feaa72019-03-18 12:53:47 +0100272 goto out;
Philipp Rudo8e496422019-03-07 12:48:03 +0100273
Philipp Rudo653beba2019-03-07 15:56:34 +0100274 if (data.kernel_mem == 0) {
275 unsigned long restart_psw = 0x0008000080000000UL;
276 restart_psw += image->start;
277 memcpy(data.kernel_buf, &restart_psw, sizeof(restart_psw));
278 image->start = 0;
279 }
280
Philipp Rudo99feaa72019-03-18 12:53:47 +0100281 ret = kexec_file_add_ipl_report(image, &data);
282out:
283 ipl_report_free(data.report);
284 return ERR_PTR(ret);
Philipp Rudo8e496422019-03-07 12:48:03 +0100285}
286
Philipp Rudo71406882017-06-19 10:45:33 +0200287int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
288 Elf_Shdr *section,
289 const Elf_Shdr *relsec,
290 const Elf_Shdr *symtab)
291{
292 Elf_Rela *relas;
Gerald Schaefer805bc0b2019-02-03 21:35:45 +0100293 int i, r_type;
Philipp Rudo71406882017-06-19 10:45:33 +0200294
295 relas = (void *)pi->ehdr + relsec->sh_offset;
296
297 for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) {
298 const Elf_Sym *sym; /* symbol to relocate */
299 unsigned long addr; /* final location after relocation */
300 unsigned long val; /* relocated symbol value */
301 void *loc; /* tmp location to modify */
302
303 sym = (void *)pi->ehdr + symtab->sh_offset;
304 sym += ELF64_R_SYM(relas[i].r_info);
305
306 if (sym->st_shndx == SHN_UNDEF)
307 return -ENOEXEC;
308
309 if (sym->st_shndx == SHN_COMMON)
310 return -ENOEXEC;
311
312 if (sym->st_shndx >= pi->ehdr->e_shnum &&
313 sym->st_shndx != SHN_ABS)
314 return -ENOEXEC;
315
316 loc = pi->purgatory_buf;
317 loc += section->sh_offset;
318 loc += relas[i].r_offset;
319
320 val = sym->st_value;
321 if (sym->st_shndx != SHN_ABS)
322 val += pi->sechdrs[sym->st_shndx].sh_addr;
323 val += relas[i].r_addend;
324
325 addr = section->sh_addr + relas[i].r_offset;
326
Gerald Schaefer805bc0b2019-02-03 21:35:45 +0100327 r_type = ELF64_R_TYPE(relas[i].r_info);
328 arch_kexec_do_relocs(r_type, loc, val, addr);
Philipp Rudo71406882017-06-19 10:45:33 +0200329 }
330 return 0;
331}
332
333int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
334 unsigned long buf_len)
335{
336 /* A kernel must be at least large enough to contain head.S. During
337 * load memory in head.S will be accessed, e.g. to register the next
338 * command line. If the next kernel were smaller the current kernel
339 * will panic at load.
Philipp Rudo71406882017-06-19 10:45:33 +0200340 */
Gerald Schaefer087c4d72019-04-08 12:49:58 +0200341 if (buf_len < HEAD_END)
Philipp Rudo71406882017-06-19 10:45:33 +0200342 return -ENOEXEC;
343
344 return kexec_image_probe_default(image, buf, buf_len);
345}