blob: 8347fc158d2b96ff2169389cd79ef42d4f513cf9 [file] [log] [blame]
Thomas Gleixner40b0b3f2019-06-03 07:44:46 +02001// SPDX-License-Identifier: GPL-2.0-only
Dave Younga43cac02015-09-09 15:38:51 -07002/*
3 * kexec: kexec_file_load system call
4 *
5 * Copyright (C) 2014 Red Hat Inc.
6 * Authors:
7 * Vivek Goyal <vgoyal@redhat.com>
Dave Younga43cac02015-09-09 15:38:51 -07008 */
9
Minfei Huangde90a6b2015-11-06 16:32:45 -080010#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
Dave Younga43cac02015-09-09 15:38:51 -070012#include <linux/capability.h>
13#include <linux/mm.h>
14#include <linux/file.h>
15#include <linux/slab.h>
16#include <linux/kexec.h>
AKASHI Takahiro735c2f92018-11-15 14:52:43 +090017#include <linux/memblock.h>
Dave Younga43cac02015-09-09 15:38:51 -070018#include <linux/mutex.h>
19#include <linux/list.h>
Mimi Zoharb804def2016-01-14 20:59:14 -050020#include <linux/fs.h>
Mimi Zohar7b8589c2016-12-19 16:22:48 -080021#include <linux/ima.h>
Dave Younga43cac02015-09-09 15:38:51 -070022#include <crypto/hash.h>
Eric Biggersa24d22b2020-11-12 21:20:21 -080023#include <crypto/sha2.h>
AKASHI Takahirobabac4a2018-04-13 15:36:06 -070024#include <linux/elf.h>
25#include <linux/elfcore.h>
26#include <linux/kernel.h>
Scott Brandenb89999d02020-10-02 10:38:15 -070027#include <linux/kernel_read_file.h>
Dave Younga43cac02015-09-09 15:38:51 -070028#include <linux/syscalls.h>
29#include <linux/vmalloc.h>
30#include "kexec_internal.h"
31
Dave Younga43cac02015-09-09 15:38:51 -070032static int kexec_calculate_store_digests(struct kimage *image);
33
AKASHI Takahiro9ec4ece2018-04-13 15:35:49 -070034/*
35 * Currently this is the only default function that is exported as some
36 * architectures need it to do additional handlings.
37 * In the future, other default functions may be exported too if required.
38 */
39int kexec_image_probe_default(struct kimage *image, void *buf,
40 unsigned long buf_len)
41{
42 const struct kexec_file_ops * const *fops;
43 int ret = -ENOEXEC;
44
45 for (fops = &kexec_file_loaders[0]; *fops && (*fops)->probe; ++fops) {
46 ret = (*fops)->probe(buf, buf_len);
47 if (!ret) {
48 image->fops = *fops;
49 return ret;
50 }
51 }
52
53 return ret;
54}
55
Dave Younga43cac02015-09-09 15:38:51 -070056/* Architectures can provide this probe function */
57int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
58 unsigned long buf_len)
59{
AKASHI Takahiro9ec4ece2018-04-13 15:35:49 -070060 return kexec_image_probe_default(image, buf, buf_len);
61}
62
63static void *kexec_image_load_default(struct kimage *image)
64{
65 if (!image->fops || !image->fops->load)
66 return ERR_PTR(-ENOEXEC);
67
68 return image->fops->load(image, image->kernel_buf,
69 image->kernel_buf_len, image->initrd_buf,
70 image->initrd_buf_len, image->cmdline_buf,
71 image->cmdline_buf_len);
Dave Younga43cac02015-09-09 15:38:51 -070072}
73
74void * __weak arch_kexec_kernel_image_load(struct kimage *image)
75{
AKASHI Takahiro9ec4ece2018-04-13 15:35:49 -070076 return kexec_image_load_default(image);
77}
78
AKASHI Takahiro92a98a22018-11-15 14:52:41 +090079int kexec_image_post_load_cleanup_default(struct kimage *image)
AKASHI Takahiro9ec4ece2018-04-13 15:35:49 -070080{
81 if (!image->fops || !image->fops->cleanup)
82 return 0;
83
84 return image->fops->cleanup(image->image_loader_data);
Dave Younga43cac02015-09-09 15:38:51 -070085}
86
87int __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
88{
AKASHI Takahiro9ec4ece2018-04-13 15:35:49 -070089 return kexec_image_post_load_cleanup_default(image);
Dave Younga43cac02015-09-09 15:38:51 -070090}
91
Jiri Bohac99d5cadf2019-08-19 17:17:44 -070092#ifdef CONFIG_KEXEC_SIG
AKASHI Takahiro9ec4ece2018-04-13 15:35:49 -070093static int kexec_image_verify_sig_default(struct kimage *image, void *buf,
94 unsigned long buf_len)
95{
96 if (!image->fops || !image->fops->verify_sig) {
97 pr_debug("kernel loader does not support signature verification.\n");
98 return -EKEYREJECTED;
99 }
100
101 return image->fops->verify_sig(buf, buf_len);
102}
103
Dave Younga43cac02015-09-09 15:38:51 -0700104int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
105 unsigned long buf_len)
106{
AKASHI Takahiro9ec4ece2018-04-13 15:35:49 -0700107 return kexec_image_verify_sig_default(image, buf, buf_len);
Dave Younga43cac02015-09-09 15:38:51 -0700108}
Xunlei Pang978e30c2016-01-20 15:00:36 -0800109#endif
Dave Younga43cac02015-09-09 15:38:51 -0700110
Philipp Rudo8aec3952018-04-13 15:36:24 -0700111/*
112 * arch_kexec_apply_relocations_add - apply relocations of type RELA
113 * @pi: Purgatory to be relocated.
114 * @section: Section relocations applying to.
115 * @relsec: Section containing RELAs.
116 * @symtab: Corresponding symtab.
117 *
118 * Return: 0 on success, negative errno on error.
119 */
Dave Younga43cac02015-09-09 15:38:51 -0700120int __weak
Philipp Rudo8aec3952018-04-13 15:36:24 -0700121arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
122 const Elf_Shdr *relsec, const Elf_Shdr *symtab)
Dave Younga43cac02015-09-09 15:38:51 -0700123{
124 pr_err("RELA relocation unsupported.\n");
125 return -ENOEXEC;
126}
127
Philipp Rudo8aec3952018-04-13 15:36:24 -0700128/*
129 * arch_kexec_apply_relocations - apply relocations of type REL
130 * @pi: Purgatory to be relocated.
131 * @section: Section relocations applying to.
132 * @relsec: Section containing RELs.
133 * @symtab: Corresponding symtab.
134 *
135 * Return: 0 on success, negative errno on error.
136 */
Dave Younga43cac02015-09-09 15:38:51 -0700137int __weak
Philipp Rudo8aec3952018-04-13 15:36:24 -0700138arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
139 const Elf_Shdr *relsec, const Elf_Shdr *symtab)
Dave Younga43cac02015-09-09 15:38:51 -0700140{
141 pr_err("REL relocation unsupported.\n");
142 return -ENOEXEC;
143}
144
145/*
146 * Free up memory used by kernel, initrd, and command line. This is temporary
147 * memory allocation which is not needed any more after these buffers have
148 * been loaded into separate segments and have been copied elsewhere.
149 */
150void kimage_file_post_load_cleanup(struct kimage *image)
151{
152 struct purgatory_info *pi = &image->purgatory_info;
153
154 vfree(image->kernel_buf);
155 image->kernel_buf = NULL;
156
157 vfree(image->initrd_buf);
158 image->initrd_buf = NULL;
159
160 kfree(image->cmdline_buf);
161 image->cmdline_buf = NULL;
162
163 vfree(pi->purgatory_buf);
164 pi->purgatory_buf = NULL;
165
166 vfree(pi->sechdrs);
167 pi->sechdrs = NULL;
168
Lakshmi Ramasubramanianf31e3382021-02-04 09:49:51 -0800169#ifdef CONFIG_IMA_KEXEC
170 vfree(image->ima_buffer);
171 image->ima_buffer = NULL;
172#endif /* CONFIG_IMA_KEXEC */
173
Dave Younga43cac02015-09-09 15:38:51 -0700174 /* See if architecture has anything to cleanup post load */
175 arch_kimage_file_post_load_cleanup(image);
176
177 /*
178 * Above call should have called into bootloader to free up
179 * any data stored in kimage->image_loader_data. It should
180 * be ok now to free it up.
181 */
182 kfree(image->image_loader_data);
183 image->image_loader_data = NULL;
184}
185
Jiri Bohac99d5cadf2019-08-19 17:17:44 -0700186#ifdef CONFIG_KEXEC_SIG
187static int
188kimage_validate_signature(struct kimage *image)
189{
Jiri Bohac99d5cadf2019-08-19 17:17:44 -0700190 int ret;
191
192 ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf,
193 image->kernel_buf_len);
Lianbo Jiangfd7af712020-06-25 20:29:27 -0700194 if (ret) {
Jiri Bohac99d5cadf2019-08-19 17:17:44 -0700195
Jiri Bohac99d5cadf2019-08-19 17:17:44 -0700196 if (IS_ENABLED(CONFIG_KEXEC_SIG_FORCE)) {
Lianbo Jiangfd7af712020-06-25 20:29:27 -0700197 pr_notice("Enforced kernel signature verification failed (%d).\n", ret);
Jiri Bohac99d5cadf2019-08-19 17:17:44 -0700198 return ret;
199 }
200
Lianbo Jiangfd7af712020-06-25 20:29:27 -0700201 /*
202 * If IMA is guaranteed to appraise a signature on the kexec
Matthew Garrett29d3c1c2019-08-19 17:18:01 -0700203 * image, permit it even if the kernel is otherwise locked
204 * down.
205 */
206 if (!ima_appraise_signature(READING_KEXEC_IMAGE) &&
207 security_locked_down(LOCKDOWN_KEXEC))
208 return -EPERM;
209
Lianbo Jiangfd7af712020-06-25 20:29:27 -0700210 pr_debug("kernel signature verification failed (%d).\n", ret);
Jiri Bohac99d5cadf2019-08-19 17:17:44 -0700211 }
212
Lianbo Jiangfd7af712020-06-25 20:29:27 -0700213 return 0;
Jiri Bohac99d5cadf2019-08-19 17:17:44 -0700214}
215#endif
216
Dave Younga43cac02015-09-09 15:38:51 -0700217/*
218 * In file mode list of segments is prepared by kernel. Copy relevant
219 * data from user space, do error checking, prepare segment list
220 */
221static int
222kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd,
223 const char __user *cmdline_ptr,
224 unsigned long cmdline_len, unsigned flags)
225{
Jiri Bohac99d5cadf2019-08-19 17:17:44 -0700226 int ret;
Dave Younga43cac02015-09-09 15:38:51 -0700227 void *ldata;
228
Kees Cook0fa8e082020-10-02 10:38:25 -0700229 ret = kernel_read_file_from_fd(kernel_fd, 0, &image->kernel_buf,
Kees Cook88535282020-10-02 10:38:19 -0700230 INT_MAX, NULL, READING_KEXEC_IMAGE);
Kees Cookf7a4f682020-10-02 10:38:17 -0700231 if (ret < 0)
Dave Younga43cac02015-09-09 15:38:51 -0700232 return ret;
Kees Cookf7a4f682020-10-02 10:38:17 -0700233 image->kernel_buf_len = ret;
Dave Younga43cac02015-09-09 15:38:51 -0700234
235 /* Call arch image probe handlers */
236 ret = arch_kexec_kernel_image_probe(image, image->kernel_buf,
237 image->kernel_buf_len);
Dave Younga43cac02015-09-09 15:38:51 -0700238 if (ret)
239 goto out;
240
Jiri Bohac99d5cadf2019-08-19 17:17:44 -0700241#ifdef CONFIG_KEXEC_SIG
242 ret = kimage_validate_signature(image);
243
244 if (ret)
Dave Younga43cac02015-09-09 15:38:51 -0700245 goto out;
Dave Younga43cac02015-09-09 15:38:51 -0700246#endif
247 /* It is possible that there no initramfs is being loaded */
248 if (!(flags & KEXEC_FILE_NO_INITRAMFS)) {
Kees Cook0fa8e082020-10-02 10:38:25 -0700249 ret = kernel_read_file_from_fd(initrd_fd, 0, &image->initrd_buf,
Kees Cook88535282020-10-02 10:38:19 -0700250 INT_MAX, NULL,
Mimi Zoharb804def2016-01-14 20:59:14 -0500251 READING_KEXEC_INITRAMFS);
Kees Cookf7a4f682020-10-02 10:38:17 -0700252 if (ret < 0)
Dave Younga43cac02015-09-09 15:38:51 -0700253 goto out;
Kees Cookf7a4f682020-10-02 10:38:17 -0700254 image->initrd_buf_len = ret;
255 ret = 0;
Dave Younga43cac02015-09-09 15:38:51 -0700256 }
257
258 if (cmdline_len) {
Al Viroa9bd8df2017-05-13 18:39:01 -0400259 image->cmdline_buf = memdup_user(cmdline_ptr, cmdline_len);
260 if (IS_ERR(image->cmdline_buf)) {
261 ret = PTR_ERR(image->cmdline_buf);
262 image->cmdline_buf = NULL;
Dave Younga43cac02015-09-09 15:38:51 -0700263 goto out;
264 }
265
266 image->cmdline_buf_len = cmdline_len;
267
268 /* command line should be a string with last byte null */
269 if (image->cmdline_buf[cmdline_len - 1] != '\0') {
270 ret = -EINVAL;
271 goto out;
272 }
Prakhar Srivastava6a31fcd2019-06-23 23:23:31 -0700273
Tyler Hicks48341772020-07-09 01:19:11 -0500274 ima_kexec_cmdline(kernel_fd, image->cmdline_buf,
Prakhar Srivastava6a31fcd2019-06-23 23:23:31 -0700275 image->cmdline_buf_len - 1);
Dave Younga43cac02015-09-09 15:38:51 -0700276 }
277
Prakhar Srivastava6a31fcd2019-06-23 23:23:31 -0700278 /* IMA needs to pass the measurement list to the next kernel. */
279 ima_add_kexec_buffer(image);
280
Dave Younga43cac02015-09-09 15:38:51 -0700281 /* Call arch image load handlers */
282 ldata = arch_kexec_kernel_image_load(image);
283
284 if (IS_ERR(ldata)) {
285 ret = PTR_ERR(ldata);
286 goto out;
287 }
288
289 image->image_loader_data = ldata;
290out:
291 /* In case of error, free up all allocated memory in this function */
292 if (ret)
293 kimage_file_post_load_cleanup(image);
294 return ret;
295}
296
297static int
298kimage_file_alloc_init(struct kimage **rimage, int kernel_fd,
299 int initrd_fd, const char __user *cmdline_ptr,
300 unsigned long cmdline_len, unsigned long flags)
301{
302 int ret;
303 struct kimage *image;
304 bool kexec_on_panic = flags & KEXEC_FILE_ON_CRASH;
305
306 image = do_kimage_alloc_init();
307 if (!image)
308 return -ENOMEM;
309
310 image->file_mode = 1;
311
312 if (kexec_on_panic) {
313 /* Enable special crash kernel control page alloc policy. */
314 image->control_page = crashk_res.start;
315 image->type = KEXEC_TYPE_CRASH;
316 }
317
318 ret = kimage_file_prepare_segments(image, kernel_fd, initrd_fd,
319 cmdline_ptr, cmdline_len, flags);
320 if (ret)
321 goto out_free_image;
322
323 ret = sanity_check_segment_list(image);
324 if (ret)
325 goto out_free_post_load_bufs;
326
327 ret = -ENOMEM;
328 image->control_code_page = kimage_alloc_control_pages(image,
329 get_order(KEXEC_CONTROL_PAGE_SIZE));
330 if (!image->control_code_page) {
331 pr_err("Could not allocate control_code_buffer\n");
332 goto out_free_post_load_bufs;
333 }
334
335 if (!kexec_on_panic) {
336 image->swap_page = kimage_alloc_control_pages(image, 0);
337 if (!image->swap_page) {
338 pr_err("Could not allocate swap buffer\n");
339 goto out_free_control_pages;
340 }
341 }
342
343 *rimage = image;
344 return 0;
345out_free_control_pages:
346 kimage_free_page_list(&image->control_pages);
347out_free_post_load_bufs:
348 kimage_file_post_load_cleanup(image);
349out_free_image:
350 kfree(image);
351 return ret;
352}
353
354SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
355 unsigned long, cmdline_len, const char __user *, cmdline_ptr,
356 unsigned long, flags)
357{
358 int ret = 0, i;
359 struct kimage **dest_image, *image;
360
361 /* We only trust the superuser with rebooting the system. */
362 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
363 return -EPERM;
364
365 /* Make sure we have a legal set of flags */
366 if (flags != (flags & KEXEC_FILE_FLAGS))
367 return -EINVAL;
368
369 image = NULL;
370
371 if (!mutex_trylock(&kexec_mutex))
372 return -EBUSY;
373
374 dest_image = &kexec_image;
Xunlei Pang9b492cf2016-05-23 16:24:10 -0700375 if (flags & KEXEC_FILE_ON_CRASH) {
Dave Younga43cac02015-09-09 15:38:51 -0700376 dest_image = &kexec_crash_image;
Xunlei Pang9b492cf2016-05-23 16:24:10 -0700377 if (kexec_crash_image)
378 arch_kexec_unprotect_crashkres();
379 }
Dave Younga43cac02015-09-09 15:38:51 -0700380
381 if (flags & KEXEC_FILE_UNLOAD)
382 goto exchange;
383
384 /*
385 * In case of crash, new kernel gets loaded in reserved region. It is
386 * same memory where old crash kernel might be loaded. Free any
387 * current crash dump kernel before we corrupt it.
388 */
389 if (flags & KEXEC_FILE_ON_CRASH)
390 kimage_free(xchg(&kexec_crash_image, NULL));
391
392 ret = kimage_file_alloc_init(&image, kernel_fd, initrd_fd, cmdline_ptr,
393 cmdline_len, flags);
394 if (ret)
395 goto out;
396
397 ret = machine_kexec_prepare(image);
398 if (ret)
399 goto out;
400
Xunlei Pang12293842017-07-12 14:33:21 -0700401 /*
402 * Some architecture(like S390) may touch the crash memory before
403 * machine_kexec_prepare(), we must copy vmcoreinfo data after it.
404 */
405 ret = kimage_crash_copy_vmcoreinfo(image);
406 if (ret)
407 goto out;
408
Dave Younga43cac02015-09-09 15:38:51 -0700409 ret = kexec_calculate_store_digests(image);
410 if (ret)
411 goto out;
412
413 for (i = 0; i < image->nr_segments; i++) {
414 struct kexec_segment *ksegment;
415
416 ksegment = &image->segment[i];
417 pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
418 i, ksegment->buf, ksegment->bufsz, ksegment->mem,
419 ksegment->memsz);
420
421 ret = kimage_load_segment(image, &image->segment[i]);
422 if (ret)
423 goto out;
424 }
425
426 kimage_terminate(image);
427
Pavel Tatashinde68e4d2019-12-04 10:59:15 -0500428 ret = machine_kexec_post_load(image);
429 if (ret)
430 goto out;
431
Dave Younga43cac02015-09-09 15:38:51 -0700432 /*
433 * Free up any temporary buffers allocated which are not needed
434 * after image has been loaded
435 */
436 kimage_file_post_load_cleanup(image);
437exchange:
438 image = xchg(dest_image, image);
439out:
Xunlei Pang9b492cf2016-05-23 16:24:10 -0700440 if ((flags & KEXEC_FILE_ON_CRASH) && kexec_crash_image)
441 arch_kexec_protect_crashkres();
442
Dave Younga43cac02015-09-09 15:38:51 -0700443 mutex_unlock(&kexec_mutex);
444 kimage_free(image);
445 return ret;
446}
447
448static int locate_mem_hole_top_down(unsigned long start, unsigned long end,
449 struct kexec_buf *kbuf)
450{
451 struct kimage *image = kbuf->image;
452 unsigned long temp_start, temp_end;
453
454 temp_end = min(end, kbuf->buf_max);
455 temp_start = temp_end - kbuf->memsz;
456
457 do {
458 /* align down start */
459 temp_start = temp_start & (~(kbuf->buf_align - 1));
460
461 if (temp_start < start || temp_start < kbuf->buf_min)
462 return 0;
463
464 temp_end = temp_start + kbuf->memsz - 1;
465
466 /*
467 * Make sure this does not conflict with any of existing
468 * segments
469 */
470 if (kimage_is_destination_range(image, temp_start, temp_end)) {
471 temp_start = temp_start - PAGE_SIZE;
472 continue;
473 }
474
475 /* We found a suitable memory range */
476 break;
477 } while (1);
478
479 /* If we are here, we found a suitable memory range */
480 kbuf->mem = temp_start;
481
482 /* Success, stop navigating through remaining System RAM ranges */
483 return 1;
484}
485
486static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end,
487 struct kexec_buf *kbuf)
488{
489 struct kimage *image = kbuf->image;
490 unsigned long temp_start, temp_end;
491
492 temp_start = max(start, kbuf->buf_min);
493
494 do {
495 temp_start = ALIGN(temp_start, kbuf->buf_align);
496 temp_end = temp_start + kbuf->memsz - 1;
497
498 if (temp_end > end || temp_end > kbuf->buf_max)
499 return 0;
500 /*
501 * Make sure this does not conflict with any of existing
502 * segments
503 */
504 if (kimage_is_destination_range(image, temp_start, temp_end)) {
505 temp_start = temp_start + PAGE_SIZE;
506 continue;
507 }
508
509 /* We found a suitable memory range */
510 break;
511 } while (1);
512
513 /* If we are here, we found a suitable memory range */
514 kbuf->mem = temp_start;
515
516 /* Success, stop navigating through remaining System RAM ranges */
517 return 1;
518}
519
Tom Lendacky1d2e7332017-10-20 09:30:51 -0500520static int locate_mem_hole_callback(struct resource *res, void *arg)
Dave Younga43cac02015-09-09 15:38:51 -0700521{
522 struct kexec_buf *kbuf = (struct kexec_buf *)arg;
Tom Lendacky1d2e7332017-10-20 09:30:51 -0500523 u64 start = res->start, end = res->end;
Dave Younga43cac02015-09-09 15:38:51 -0700524 unsigned long sz = end - start + 1;
525
526 /* Returning 0 will take to next memory range */
David Hildenbrand3fe4f492020-06-04 16:48:44 -0700527
528 /* Don't use memory that will be detected and handled by a driver. */
David Hildenbrand7cf603d2020-10-15 20:08:33 -0700529 if (res->flags & IORESOURCE_SYSRAM_DRIVER_MANAGED)
David Hildenbrand3fe4f492020-06-04 16:48:44 -0700530 return 0;
531
Dave Younga43cac02015-09-09 15:38:51 -0700532 if (sz < kbuf->memsz)
533 return 0;
534
535 if (end < kbuf->buf_min || start > kbuf->buf_max)
536 return 0;
537
538 /*
539 * Allocate memory top down with-in ram range. Otherwise bottom up
540 * allocation.
541 */
542 if (kbuf->top_down)
543 return locate_mem_hole_top_down(start, end, kbuf);
544 return locate_mem_hole_bottom_up(start, end, kbuf);
545}
546
Mike Rapoport350e88b2019-05-13 17:22:59 -0700547#ifdef CONFIG_ARCH_KEEP_MEMBLOCK
AKASHI Takahiro735c2f92018-11-15 14:52:43 +0900548static int kexec_walk_memblock(struct kexec_buf *kbuf,
549 int (*func)(struct resource *, void *))
550{
551 int ret = 0;
552 u64 i;
553 phys_addr_t mstart, mend;
554 struct resource res = { };
555
AKASHI Takahiro497e1852018-11-15 14:52:44 +0900556 if (kbuf->image->type == KEXEC_TYPE_CRASH)
557 return func(&crashk_res, kbuf);
558
David Hildenbrandf7892d82021-11-05 13:44:53 -0700559 /*
560 * Using MEMBLOCK_NONE will properly skip MEMBLOCK_DRIVER_MANAGED. See
561 * IORESOURCE_SYSRAM_DRIVER_MANAGED handling in
562 * locate_mem_hole_callback().
563 */
AKASHI Takahiro735c2f92018-11-15 14:52:43 +0900564 if (kbuf->top_down) {
AKASHI Takahiro497e1852018-11-15 14:52:44 +0900565 for_each_free_mem_range_reverse(i, NUMA_NO_NODE, MEMBLOCK_NONE,
AKASHI Takahiro735c2f92018-11-15 14:52:43 +0900566 &mstart, &mend, NULL) {
567 /*
568 * In memblock, end points to the first byte after the
569 * range while in kexec, end points to the last byte
570 * in the range.
571 */
572 res.start = mstart;
573 res.end = mend - 1;
574 ret = func(&res, kbuf);
575 if (ret)
576 break;
577 }
578 } else {
AKASHI Takahiro497e1852018-11-15 14:52:44 +0900579 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
580 &mstart, &mend, NULL) {
AKASHI Takahiro735c2f92018-11-15 14:52:43 +0900581 /*
582 * In memblock, end points to the first byte after the
583 * range while in kexec, end points to the last byte
584 * in the range.
585 */
586 res.start = mstart;
587 res.end = mend - 1;
588 ret = func(&res, kbuf);
589 if (ret)
590 break;
591 }
592 }
593
594 return ret;
595}
Mike Rapoport350e88b2019-05-13 17:22:59 -0700596#else
597static int kexec_walk_memblock(struct kexec_buf *kbuf,
598 int (*func)(struct resource *, void *))
599{
600 return 0;
601}
AKASHI Takahiro735c2f92018-11-15 14:52:43 +0900602#endif
603
Thiago Jung Bauermann60fe3912016-11-29 23:45:47 +1100604/**
AKASHI Takahiro735c2f92018-11-15 14:52:43 +0900605 * kexec_walk_resources - call func(data) on free memory regions
Thiago Jung Bauermann60fe3912016-11-29 23:45:47 +1100606 * @kbuf: Context info for the search. Also passed to @func.
607 * @func: Function to call for each memory region.
608 *
609 * Return: The memory walk will stop when func returns a non-zero value
610 * and that value will be returned. If all free regions are visited without
611 * func returning non-zero, then zero will be returned.
612 */
AKASHI Takahiro735c2f92018-11-15 14:52:43 +0900613static int kexec_walk_resources(struct kexec_buf *kbuf,
614 int (*func)(struct resource *, void *))
Thiago Jung Bauermann60fe3912016-11-29 23:45:47 +1100615{
616 if (kbuf->image->type == KEXEC_TYPE_CRASH)
617 return walk_iomem_res_desc(crashk_res.desc,
618 IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
619 crashk_res.start, crashk_res.end,
620 kbuf, func);
621 else
622 return walk_system_ram_res(0, ULONG_MAX, kbuf, func);
623}
624
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100625/**
Thiago Jung Bauermanne2e806f2016-11-29 23:45:49 +1100626 * kexec_locate_mem_hole - find free memory for the purgatory or the next kernel
627 * @kbuf: Parameters for the memory search.
628 *
629 * On success, kbuf->mem will have the start address of the memory region found.
630 *
631 * Return: 0 on success, negative errno on error.
632 */
633int kexec_locate_mem_hole(struct kexec_buf *kbuf)
634{
635 int ret;
636
AKASHI Takahirob6664ba2018-11-15 14:52:42 +0900637 /* Arch knows where to place */
638 if (kbuf->mem != KEXEC_BUF_MEM_UNKNOWN)
639 return 0;
640
Mike Rapoport350e88b2019-05-13 17:22:59 -0700641 if (!IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
AKASHI Takahiro735c2f92018-11-15 14:52:43 +0900642 ret = kexec_walk_resources(kbuf, locate_mem_hole_callback);
643 else
644 ret = kexec_walk_memblock(kbuf, locate_mem_hole_callback);
Thiago Jung Bauermanne2e806f2016-11-29 23:45:49 +1100645
646 return ret == 1 ? 0 : -EADDRNOTAVAIL;
647}
648
649/**
Hari Bathinif891f192020-07-29 17:09:19 +0530650 * arch_kexec_locate_mem_hole - Find free memory to place the segments.
651 * @kbuf: Parameters for the memory search.
652 *
653 * On success, kbuf->mem will have the start address of the memory region found.
654 *
655 * Return: 0 on success, negative errno on error.
656 */
657int __weak arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
658{
659 return kexec_locate_mem_hole(kbuf);
660}
661
662/**
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100663 * kexec_add_buffer - place a buffer in a kexec segment
664 * @kbuf: Buffer contents and memory parameters.
665 *
666 * This function assumes that kexec_mutex is held.
667 * On successful return, @kbuf->mem will have the physical address of
668 * the buffer in memory.
669 *
670 * Return: 0 on success, negative errno on error.
Dave Younga43cac02015-09-09 15:38:51 -0700671 */
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100672int kexec_add_buffer(struct kexec_buf *kbuf)
Dave Younga43cac02015-09-09 15:38:51 -0700673{
Dave Younga43cac02015-09-09 15:38:51 -0700674 struct kexec_segment *ksegment;
Dave Younga43cac02015-09-09 15:38:51 -0700675 int ret;
676
677 /* Currently adding segment this way is allowed only in file mode */
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100678 if (!kbuf->image->file_mode)
Dave Younga43cac02015-09-09 15:38:51 -0700679 return -EINVAL;
680
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100681 if (kbuf->image->nr_segments >= KEXEC_SEGMENT_MAX)
Dave Younga43cac02015-09-09 15:38:51 -0700682 return -EINVAL;
683
684 /*
685 * Make sure we are not trying to add buffer after allocating
686 * control pages. All segments need to be placed first before
687 * any control pages are allocated. As control page allocation
688 * logic goes through list of segments to make sure there are
689 * no destination overlaps.
690 */
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100691 if (!list_empty(&kbuf->image->control_pages)) {
Dave Younga43cac02015-09-09 15:38:51 -0700692 WARN_ON(1);
693 return -EINVAL;
694 }
695
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100696 /* Ensure minimum alignment needed for segments. */
697 kbuf->memsz = ALIGN(kbuf->memsz, PAGE_SIZE);
698 kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE);
Dave Younga43cac02015-09-09 15:38:51 -0700699
700 /* Walk the RAM ranges and allocate a suitable range for the buffer */
Hari Bathinif891f192020-07-29 17:09:19 +0530701 ret = arch_kexec_locate_mem_hole(kbuf);
Thiago Jung Bauermanne2e806f2016-11-29 23:45:49 +1100702 if (ret)
703 return ret;
Dave Younga43cac02015-09-09 15:38:51 -0700704
705 /* Found a suitable memory range */
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100706 ksegment = &kbuf->image->segment[kbuf->image->nr_segments];
Dave Younga43cac02015-09-09 15:38:51 -0700707 ksegment->kbuf = kbuf->buffer;
708 ksegment->bufsz = kbuf->bufsz;
709 ksegment->mem = kbuf->mem;
710 ksegment->memsz = kbuf->memsz;
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100711 kbuf->image->nr_segments++;
Dave Younga43cac02015-09-09 15:38:51 -0700712 return 0;
713}
714
715/* Calculate and store the digest of segments */
716static int kexec_calculate_store_digests(struct kimage *image)
717{
718 struct crypto_shash *tfm;
719 struct shash_desc *desc;
720 int ret = 0, i, j, zero_buf_sz, sha_region_sz;
721 size_t desc_size, nullsz;
722 char *digest;
723 void *zero_buf;
724 struct kexec_sha_region *sha_regions;
725 struct purgatory_info *pi = &image->purgatory_info;
726
AKASHI Takahirob799a092018-04-13 15:35:45 -0700727 if (!IS_ENABLED(CONFIG_ARCH_HAS_KEXEC_PURGATORY))
728 return 0;
729
Dave Younga43cac02015-09-09 15:38:51 -0700730 zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT);
731 zero_buf_sz = PAGE_SIZE;
732
733 tfm = crypto_alloc_shash("sha256", 0, 0);
734 if (IS_ERR(tfm)) {
735 ret = PTR_ERR(tfm);
736 goto out;
737 }
738
739 desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
740 desc = kzalloc(desc_size, GFP_KERNEL);
741 if (!desc) {
742 ret = -ENOMEM;
743 goto out_free_tfm;
744 }
745
746 sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
747 sha_regions = vzalloc(sha_region_sz);
Jia-Ju Bai31d82c22021-05-06 18:04:38 -0700748 if (!sha_regions) {
749 ret = -ENOMEM;
Dave Younga43cac02015-09-09 15:38:51 -0700750 goto out_free_desc;
Jia-Ju Bai31d82c22021-05-06 18:04:38 -0700751 }
Dave Younga43cac02015-09-09 15:38:51 -0700752
753 desc->tfm = tfm;
Dave Younga43cac02015-09-09 15:38:51 -0700754
755 ret = crypto_shash_init(desc);
756 if (ret < 0)
757 goto out_free_sha_regions;
758
759 digest = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
760 if (!digest) {
761 ret = -ENOMEM;
762 goto out_free_sha_regions;
763 }
764
765 for (j = i = 0; i < image->nr_segments; i++) {
766 struct kexec_segment *ksegment;
767
768 ksegment = &image->segment[i];
769 /*
770 * Skip purgatory as it will be modified once we put digest
771 * info in purgatory.
772 */
773 if (ksegment->kbuf == pi->purgatory_buf)
774 continue;
775
776 ret = crypto_shash_update(desc, ksegment->kbuf,
777 ksegment->bufsz);
778 if (ret)
779 break;
780
781 /*
782 * Assume rest of the buffer is filled with zero and
783 * update digest accordingly.
784 */
785 nullsz = ksegment->memsz - ksegment->bufsz;
786 while (nullsz) {
787 unsigned long bytes = nullsz;
788
789 if (bytes > zero_buf_sz)
790 bytes = zero_buf_sz;
791 ret = crypto_shash_update(desc, zero_buf, bytes);
792 if (ret)
793 break;
794 nullsz -= bytes;
795 }
796
797 if (ret)
798 break;
799
800 sha_regions[j].start = ksegment->mem;
801 sha_regions[j].len = ksegment->memsz;
802 j++;
803 }
804
805 if (!ret) {
806 ret = crypto_shash_final(desc, digest);
807 if (ret)
808 goto out_free_digest;
Thomas Gleixner40c50c12017-03-10 13:17:18 +0100809 ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha_regions",
810 sha_regions, sha_region_sz, 0);
Dave Younga43cac02015-09-09 15:38:51 -0700811 if (ret)
812 goto out_free_digest;
813
Thomas Gleixner40c50c12017-03-10 13:17:18 +0100814 ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha256_digest",
815 digest, SHA256_DIGEST_SIZE, 0);
Dave Younga43cac02015-09-09 15:38:51 -0700816 if (ret)
817 goto out_free_digest;
818 }
819
820out_free_digest:
821 kfree(digest);
822out_free_sha_regions:
823 vfree(sha_regions);
824out_free_desc:
825 kfree(desc);
826out_free_tfm:
827 kfree(tfm);
828out:
829 return ret;
830}
831
AKASHI Takahirob799a092018-04-13 15:35:45 -0700832#ifdef CONFIG_ARCH_HAS_KEXEC_PURGATORY
Philipp Rudo93045702018-04-13 15:36:28 -0700833/*
834 * kexec_purgatory_setup_kbuf - prepare buffer to load purgatory.
835 * @pi: Purgatory to be loaded.
836 * @kbuf: Buffer to setup.
837 *
838 * Allocates the memory needed for the buffer. Caller is responsible to free
839 * the memory after use.
840 *
841 * Return: 0 on success, negative errno on error.
842 */
843static int kexec_purgatory_setup_kbuf(struct purgatory_info *pi,
844 struct kexec_buf *kbuf)
Dave Younga43cac02015-09-09 15:38:51 -0700845{
Philipp Rudo93045702018-04-13 15:36:28 -0700846 const Elf_Shdr *sechdrs;
847 unsigned long bss_align;
848 unsigned long bss_sz;
849 unsigned long align;
850 int i, ret;
Dave Younga43cac02015-09-09 15:38:51 -0700851
Philipp Rudo93045702018-04-13 15:36:28 -0700852 sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
Philipp Rudo3be3f612018-04-13 15:36:43 -0700853 kbuf->buf_align = bss_align = 1;
854 kbuf->bufsz = bss_sz = 0;
Dave Younga43cac02015-09-09 15:38:51 -0700855
Philipp Rudo93045702018-04-13 15:36:28 -0700856 for (i = 0; i < pi->ehdr->e_shnum; i++) {
857 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
858 continue;
859
860 align = sechdrs[i].sh_addralign;
861 if (sechdrs[i].sh_type != SHT_NOBITS) {
862 if (kbuf->buf_align < align)
863 kbuf->buf_align = align;
864 kbuf->bufsz = ALIGN(kbuf->bufsz, align);
865 kbuf->bufsz += sechdrs[i].sh_size;
866 } else {
867 if (bss_align < align)
868 bss_align = align;
869 bss_sz = ALIGN(bss_sz, align);
870 bss_sz += sechdrs[i].sh_size;
871 }
872 }
873 kbuf->bufsz = ALIGN(kbuf->bufsz, bss_align);
874 kbuf->memsz = kbuf->bufsz + bss_sz;
875 if (kbuf->buf_align < bss_align)
876 kbuf->buf_align = bss_align;
877
878 kbuf->buffer = vzalloc(kbuf->bufsz);
879 if (!kbuf->buffer)
880 return -ENOMEM;
881 pi->purgatory_buf = kbuf->buffer;
882
883 ret = kexec_add_buffer(kbuf);
884 if (ret)
885 goto out;
Philipp Rudo93045702018-04-13 15:36:28 -0700886
887 return 0;
888out:
889 vfree(pi->purgatory_buf);
890 pi->purgatory_buf = NULL;
891 return ret;
892}
893
894/*
895 * kexec_purgatory_setup_sechdrs - prepares the pi->sechdrs buffer.
896 * @pi: Purgatory to be loaded.
897 * @kbuf: Buffer prepared to store purgatory.
898 *
899 * Allocates the memory needed for the buffer. Caller is responsible to free
900 * the memory after use.
901 *
902 * Return: 0 on success, negative errno on error.
903 */
904static int kexec_purgatory_setup_sechdrs(struct purgatory_info *pi,
905 struct kexec_buf *kbuf)
906{
Philipp Rudo93045702018-04-13 15:36:28 -0700907 unsigned long bss_addr;
908 unsigned long offset;
Philipp Rudo93045702018-04-13 15:36:28 -0700909 Elf_Shdr *sechdrs;
Philipp Rudo93045702018-04-13 15:36:28 -0700910 int i;
911
Philipp Rudo8da0b722018-04-13 15:36:39 -0700912 /*
913 * The section headers in kexec_purgatory are read-only. In order to
914 * have them modifiable make a temporary copy.
915 */
Kees Cookfad953c2018-06-12 14:27:37 -0700916 sechdrs = vzalloc(array_size(sizeof(Elf_Shdr), pi->ehdr->e_shnum));
Dave Younga43cac02015-09-09 15:38:51 -0700917 if (!sechdrs)
918 return -ENOMEM;
Philipp Rudo93045702018-04-13 15:36:28 -0700919 memcpy(sechdrs, (void *)pi->ehdr + pi->ehdr->e_shoff,
920 pi->ehdr->e_shnum * sizeof(Elf_Shdr));
921 pi->sechdrs = sechdrs;
Dave Younga43cac02015-09-09 15:38:51 -0700922
Philipp Rudo620f6972018-04-13 15:36:35 -0700923 offset = 0;
924 bss_addr = kbuf->mem + kbuf->bufsz;
Philipp Rudof1b1cca2018-04-13 15:36:32 -0700925 kbuf->image->start = pi->ehdr->e_entry;
Dave Younga43cac02015-09-09 15:38:51 -0700926
927 for (i = 0; i < pi->ehdr->e_shnum; i++) {
Philipp Rudo93045702018-04-13 15:36:28 -0700928 unsigned long align;
Philipp Rudo620f6972018-04-13 15:36:35 -0700929 void *src, *dst;
Philipp Rudo93045702018-04-13 15:36:28 -0700930
Dave Younga43cac02015-09-09 15:38:51 -0700931 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
932 continue;
933
934 align = sechdrs[i].sh_addralign;
Philipp Rudof1b1cca2018-04-13 15:36:32 -0700935 if (sechdrs[i].sh_type == SHT_NOBITS) {
Dave Younga43cac02015-09-09 15:38:51 -0700936 bss_addr = ALIGN(bss_addr, align);
937 sechdrs[i].sh_addr = bss_addr;
938 bss_addr += sechdrs[i].sh_size;
Philipp Rudof1b1cca2018-04-13 15:36:32 -0700939 continue;
Dave Younga43cac02015-09-09 15:38:51 -0700940 }
Dave Younga43cac02015-09-09 15:38:51 -0700941
Philipp Rudo620f6972018-04-13 15:36:35 -0700942 offset = ALIGN(offset, align);
Philipp Rudof1b1cca2018-04-13 15:36:32 -0700943 if (sechdrs[i].sh_flags & SHF_EXECINSTR &&
944 pi->ehdr->e_entry >= sechdrs[i].sh_addr &&
945 pi->ehdr->e_entry < (sechdrs[i].sh_addr
946 + sechdrs[i].sh_size)) {
947 kbuf->image->start -= sechdrs[i].sh_addr;
Philipp Rudo620f6972018-04-13 15:36:35 -0700948 kbuf->image->start += kbuf->mem + offset;
Philipp Rudof1b1cca2018-04-13 15:36:32 -0700949 }
950
Philipp Rudo8da0b722018-04-13 15:36:39 -0700951 src = (void *)pi->ehdr + sechdrs[i].sh_offset;
Philipp Rudo620f6972018-04-13 15:36:35 -0700952 dst = pi->purgatory_buf + offset;
953 memcpy(dst, src, sechdrs[i].sh_size);
954
955 sechdrs[i].sh_addr = kbuf->mem + offset;
Philipp Rudo8da0b722018-04-13 15:36:39 -0700956 sechdrs[i].sh_offset = offset;
Philipp Rudo620f6972018-04-13 15:36:35 -0700957 offset += sechdrs[i].sh_size;
Philipp Rudof1b1cca2018-04-13 15:36:32 -0700958 }
Dave Younga43cac02015-09-09 15:38:51 -0700959
Philipp Rudo93045702018-04-13 15:36:28 -0700960 return 0;
Dave Younga43cac02015-09-09 15:38:51 -0700961}
962
963static int kexec_apply_relocations(struct kimage *image)
964{
965 int i, ret;
966 struct purgatory_info *pi = &image->purgatory_info;
Philipp Rudo8aec3952018-04-13 15:36:24 -0700967 const Elf_Shdr *sechdrs;
Dave Younga43cac02015-09-09 15:38:51 -0700968
Philipp Rudo8aec3952018-04-13 15:36:24 -0700969 sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
970
Dave Younga43cac02015-09-09 15:38:51 -0700971 for (i = 0; i < pi->ehdr->e_shnum; i++) {
Philipp Rudo8aec3952018-04-13 15:36:24 -0700972 const Elf_Shdr *relsec;
973 const Elf_Shdr *symtab;
974 Elf_Shdr *section;
Dave Younga43cac02015-09-09 15:38:51 -0700975
Philipp Rudo8aec3952018-04-13 15:36:24 -0700976 relsec = sechdrs + i;
977
978 if (relsec->sh_type != SHT_RELA &&
979 relsec->sh_type != SHT_REL)
Dave Younga43cac02015-09-09 15:38:51 -0700980 continue;
981
982 /*
983 * For section of type SHT_RELA/SHT_REL,
984 * ->sh_link contains section header index of associated
985 * symbol table. And ->sh_info contains section header
986 * index of section to which relocations apply.
987 */
Philipp Rudo8aec3952018-04-13 15:36:24 -0700988 if (relsec->sh_info >= pi->ehdr->e_shnum ||
989 relsec->sh_link >= pi->ehdr->e_shnum)
Dave Younga43cac02015-09-09 15:38:51 -0700990 return -ENOEXEC;
991
Philipp Rudo8aec3952018-04-13 15:36:24 -0700992 section = pi->sechdrs + relsec->sh_info;
993 symtab = sechdrs + relsec->sh_link;
Dave Younga43cac02015-09-09 15:38:51 -0700994
995 if (!(section->sh_flags & SHF_ALLOC))
996 continue;
997
998 /*
999 * symtab->sh_link contain section header index of associated
1000 * string table.
1001 */
1002 if (symtab->sh_link >= pi->ehdr->e_shnum)
1003 /* Invalid section number? */
1004 continue;
1005
1006 /*
1007 * Respective architecture needs to provide support for applying
1008 * relocations of type SHT_RELA/SHT_REL.
1009 */
Philipp Rudo8aec3952018-04-13 15:36:24 -07001010 if (relsec->sh_type == SHT_RELA)
1011 ret = arch_kexec_apply_relocations_add(pi, section,
1012 relsec, symtab);
1013 else if (relsec->sh_type == SHT_REL)
1014 ret = arch_kexec_apply_relocations(pi, section,
1015 relsec, symtab);
Dave Younga43cac02015-09-09 15:38:51 -07001016 if (ret)
1017 return ret;
1018 }
1019
1020 return 0;
1021}
1022
Philipp Rudo3be3f612018-04-13 15:36:43 -07001023/*
1024 * kexec_load_purgatory - Load and relocate the purgatory object.
1025 * @image: Image to add the purgatory to.
1026 * @kbuf: Memory parameters to use.
1027 *
1028 * Allocates the memory needed for image->purgatory_info.sechdrs and
1029 * image->purgatory_info.purgatory_buf/kbuf->buffer. Caller is responsible
1030 * to free the memory after use.
1031 *
1032 * Return: 0 on success, negative errno on error.
1033 */
1034int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf)
Dave Younga43cac02015-09-09 15:38:51 -07001035{
1036 struct purgatory_info *pi = &image->purgatory_info;
1037 int ret;
1038
1039 if (kexec_purgatory_size <= 0)
1040 return -EINVAL;
1041
Philipp Rudo65c225d2018-04-13 15:36:17 -07001042 pi->ehdr = (const Elf_Ehdr *)kexec_purgatory;
Dave Younga43cac02015-09-09 15:38:51 -07001043
Philipp Rudo3be3f612018-04-13 15:36:43 -07001044 ret = kexec_purgatory_setup_kbuf(pi, kbuf);
Dave Younga43cac02015-09-09 15:38:51 -07001045 if (ret)
1046 return ret;
1047
Philipp Rudo3be3f612018-04-13 15:36:43 -07001048 ret = kexec_purgatory_setup_sechdrs(pi, kbuf);
Philipp Rudo93045702018-04-13 15:36:28 -07001049 if (ret)
1050 goto out_free_kbuf;
1051
Dave Younga43cac02015-09-09 15:38:51 -07001052 ret = kexec_apply_relocations(image);
1053 if (ret)
1054 goto out;
1055
Dave Younga43cac02015-09-09 15:38:51 -07001056 return 0;
1057out:
1058 vfree(pi->sechdrs);
Thiago Jung Bauermann070c43e2016-09-01 16:14:44 -07001059 pi->sechdrs = NULL;
Philipp Rudo93045702018-04-13 15:36:28 -07001060out_free_kbuf:
Dave Younga43cac02015-09-09 15:38:51 -07001061 vfree(pi->purgatory_buf);
Thiago Jung Bauermann070c43e2016-09-01 16:14:44 -07001062 pi->purgatory_buf = NULL;
Dave Younga43cac02015-09-09 15:38:51 -07001063 return ret;
1064}
1065
Philipp Rudo961d9212018-04-13 15:36:21 -07001066/*
1067 * kexec_purgatory_find_symbol - find a symbol in the purgatory
1068 * @pi: Purgatory to search in.
1069 * @name: Name of the symbol.
1070 *
1071 * Return: pointer to symbol in read-only symtab on success, NULL on error.
1072 */
1073static const Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
1074 const char *name)
Dave Younga43cac02015-09-09 15:38:51 -07001075{
Philipp Rudo961d9212018-04-13 15:36:21 -07001076 const Elf_Shdr *sechdrs;
Philipp Rudo65c225d2018-04-13 15:36:17 -07001077 const Elf_Ehdr *ehdr;
Philipp Rudo961d9212018-04-13 15:36:21 -07001078 const Elf_Sym *syms;
Dave Younga43cac02015-09-09 15:38:51 -07001079 const char *strtab;
Philipp Rudo961d9212018-04-13 15:36:21 -07001080 int i, k;
Dave Younga43cac02015-09-09 15:38:51 -07001081
Philipp Rudo961d9212018-04-13 15:36:21 -07001082 if (!pi->ehdr)
Dave Younga43cac02015-09-09 15:38:51 -07001083 return NULL;
1084
Dave Younga43cac02015-09-09 15:38:51 -07001085 ehdr = pi->ehdr;
Philipp Rudo961d9212018-04-13 15:36:21 -07001086 sechdrs = (void *)ehdr + ehdr->e_shoff;
Dave Younga43cac02015-09-09 15:38:51 -07001087
1088 for (i = 0; i < ehdr->e_shnum; i++) {
1089 if (sechdrs[i].sh_type != SHT_SYMTAB)
1090 continue;
1091
1092 if (sechdrs[i].sh_link >= ehdr->e_shnum)
1093 /* Invalid strtab section number */
1094 continue;
Philipp Rudo961d9212018-04-13 15:36:21 -07001095 strtab = (void *)ehdr + sechdrs[sechdrs[i].sh_link].sh_offset;
1096 syms = (void *)ehdr + sechdrs[i].sh_offset;
Dave Younga43cac02015-09-09 15:38:51 -07001097
1098 /* Go through symbols for a match */
1099 for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) {
1100 if (ELF_ST_BIND(syms[k].st_info) != STB_GLOBAL)
1101 continue;
1102
1103 if (strcmp(strtab + syms[k].st_name, name) != 0)
1104 continue;
1105
1106 if (syms[k].st_shndx == SHN_UNDEF ||
1107 syms[k].st_shndx >= ehdr->e_shnum) {
1108 pr_debug("Symbol: %s has bad section index %d.\n",
1109 name, syms[k].st_shndx);
1110 return NULL;
1111 }
1112
1113 /* Found the symbol we are looking for */
1114 return &syms[k];
1115 }
1116 }
1117
1118 return NULL;
1119}
1120
1121void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name)
1122{
1123 struct purgatory_info *pi = &image->purgatory_info;
Philipp Rudo961d9212018-04-13 15:36:21 -07001124 const Elf_Sym *sym;
Dave Younga43cac02015-09-09 15:38:51 -07001125 Elf_Shdr *sechdr;
1126
1127 sym = kexec_purgatory_find_symbol(pi, name);
1128 if (!sym)
1129 return ERR_PTR(-EINVAL);
1130
1131 sechdr = &pi->sechdrs[sym->st_shndx];
1132
1133 /*
1134 * Returns the address where symbol will finally be loaded after
1135 * kexec_load_segment()
1136 */
1137 return (void *)(sechdr->sh_addr + sym->st_value);
1138}
1139
1140/*
1141 * Get or set value of a symbol. If "get_value" is true, symbol value is
1142 * returned in buf otherwise symbol value is set based on value in buf.
1143 */
1144int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
1145 void *buf, unsigned int size, bool get_value)
1146{
Dave Younga43cac02015-09-09 15:38:51 -07001147 struct purgatory_info *pi = &image->purgatory_info;
Philipp Rudo961d9212018-04-13 15:36:21 -07001148 const Elf_Sym *sym;
1149 Elf_Shdr *sec;
Dave Younga43cac02015-09-09 15:38:51 -07001150 char *sym_buf;
1151
1152 sym = kexec_purgatory_find_symbol(pi, name);
1153 if (!sym)
1154 return -EINVAL;
1155
1156 if (sym->st_size != size) {
1157 pr_err("symbol %s size mismatch: expected %lu actual %u\n",
1158 name, (unsigned long)sym->st_size, size);
1159 return -EINVAL;
1160 }
1161
Philipp Rudo961d9212018-04-13 15:36:21 -07001162 sec = pi->sechdrs + sym->st_shndx;
Dave Younga43cac02015-09-09 15:38:51 -07001163
Philipp Rudo961d9212018-04-13 15:36:21 -07001164 if (sec->sh_type == SHT_NOBITS) {
Dave Younga43cac02015-09-09 15:38:51 -07001165 pr_err("symbol %s is in a bss section. Cannot %s\n", name,
1166 get_value ? "get" : "set");
1167 return -EINVAL;
1168 }
1169
Philipp Rudo8da0b722018-04-13 15:36:39 -07001170 sym_buf = (char *)pi->purgatory_buf + sec->sh_offset + sym->st_value;
Dave Younga43cac02015-09-09 15:38:51 -07001171
1172 if (get_value)
1173 memcpy((void *)buf, sym_buf, size);
1174 else
1175 memcpy((void *)sym_buf, buf, size);
1176
1177 return 0;
1178}
AKASHI Takahirob799a092018-04-13 15:35:45 -07001179#endif /* CONFIG_ARCH_HAS_KEXEC_PURGATORY */
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001180
1181int crash_exclude_mem_range(struct crash_mem *mem,
1182 unsigned long long mstart, unsigned long long mend)
1183{
1184 int i, j;
Lianbo Jianga2e9a95d2020-08-04 12:49:32 +08001185 unsigned long long start, end, p_start, p_end;
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001186 struct crash_mem_range temp_range = {0, 0};
1187
1188 for (i = 0; i < mem->nr_ranges; i++) {
1189 start = mem->ranges[i].start;
1190 end = mem->ranges[i].end;
Lianbo Jianga2e9a95d2020-08-04 12:49:32 +08001191 p_start = mstart;
1192 p_end = mend;
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001193
1194 if (mstart > end || mend < start)
1195 continue;
1196
1197 /* Truncate any area outside of range */
1198 if (mstart < start)
Lianbo Jianga2e9a95d2020-08-04 12:49:32 +08001199 p_start = start;
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001200 if (mend > end)
Lianbo Jianga2e9a95d2020-08-04 12:49:32 +08001201 p_end = end;
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001202
1203 /* Found completely overlapping range */
Lianbo Jianga2e9a95d2020-08-04 12:49:32 +08001204 if (p_start == start && p_end == end) {
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001205 mem->ranges[i].start = 0;
1206 mem->ranges[i].end = 0;
1207 if (i < mem->nr_ranges - 1) {
1208 /* Shift rest of the ranges to left */
1209 for (j = i; j < mem->nr_ranges - 1; j++) {
1210 mem->ranges[j].start =
1211 mem->ranges[j+1].start;
1212 mem->ranges[j].end =
1213 mem->ranges[j+1].end;
1214 }
Lianbo Jianga2e9a95d2020-08-04 12:49:32 +08001215
1216 /*
1217 * Continue to check if there are another overlapping ranges
1218 * from the current position because of shifting the above
1219 * mem ranges.
1220 */
1221 i--;
1222 mem->nr_ranges--;
1223 continue;
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001224 }
1225 mem->nr_ranges--;
1226 return 0;
1227 }
1228
Lianbo Jianga2e9a95d2020-08-04 12:49:32 +08001229 if (p_start > start && p_end < end) {
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001230 /* Split original range */
Lianbo Jianga2e9a95d2020-08-04 12:49:32 +08001231 mem->ranges[i].end = p_start - 1;
1232 temp_range.start = p_end + 1;
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001233 temp_range.end = end;
Lianbo Jianga2e9a95d2020-08-04 12:49:32 +08001234 } else if (p_start != start)
1235 mem->ranges[i].end = p_start - 1;
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001236 else
Lianbo Jianga2e9a95d2020-08-04 12:49:32 +08001237 mem->ranges[i].start = p_end + 1;
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001238 break;
1239 }
1240
1241 /* If a split happened, add the split to array */
1242 if (!temp_range.end)
1243 return 0;
1244
1245 /* Split happened */
1246 if (i == mem->max_nr_ranges - 1)
1247 return -ENOMEM;
1248
1249 /* Location where new range should go */
1250 j = i + 1;
1251 if (j < mem->nr_ranges) {
1252 /* Move over all ranges one slot towards the end */
1253 for (i = mem->nr_ranges - 1; i >= j; i--)
1254 mem->ranges[i + 1] = mem->ranges[i];
1255 }
1256
1257 mem->ranges[j].start = temp_range.start;
1258 mem->ranges[j].end = temp_range.end;
1259 mem->nr_ranges++;
1260 return 0;
1261}
1262
1263int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
1264 void **addr, unsigned long *sz)
1265{
1266 Elf64_Ehdr *ehdr;
1267 Elf64_Phdr *phdr;
1268 unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
1269 unsigned char *buf;
1270 unsigned int cpu, i;
1271 unsigned long long notes_addr;
1272 unsigned long mstart, mend;
1273
Lianbo Jiang475f63a2020-08-04 12:49:33 +08001274 /* extra phdr for vmcoreinfo ELF note */
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001275 nr_phdr = nr_cpus + 1;
1276 nr_phdr += mem->nr_ranges;
1277
1278 /*
1279 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
1280 * area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64).
1281 * I think this is required by tools like gdb. So same physical
Lianbo Jiang475f63a2020-08-04 12:49:33 +08001282 * memory will be mapped in two ELF headers. One will contain kernel
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001283 * text virtual addresses and other will have __va(physical) addresses.
1284 */
1285
1286 nr_phdr++;
1287 elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
1288 elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
1289
1290 buf = vzalloc(elf_sz);
1291 if (!buf)
1292 return -ENOMEM;
1293
1294 ehdr = (Elf64_Ehdr *)buf;
1295 phdr = (Elf64_Phdr *)(ehdr + 1);
1296 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
1297 ehdr->e_ident[EI_CLASS] = ELFCLASS64;
1298 ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
1299 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1300 ehdr->e_ident[EI_OSABI] = ELF_OSABI;
1301 memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
1302 ehdr->e_type = ET_CORE;
1303 ehdr->e_machine = ELF_ARCH;
1304 ehdr->e_version = EV_CURRENT;
1305 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1306 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1307 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1308
Lianbo Jianga2e9a95d2020-08-04 12:49:32 +08001309 /* Prepare one phdr of type PT_NOTE for each present CPU */
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001310 for_each_present_cpu(cpu) {
1311 phdr->p_type = PT_NOTE;
1312 notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
1313 phdr->p_offset = phdr->p_paddr = notes_addr;
1314 phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
1315 (ehdr->e_phnum)++;
1316 phdr++;
1317 }
1318
1319 /* Prepare one PT_NOTE header for vmcoreinfo */
1320 phdr->p_type = PT_NOTE;
1321 phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
1322 phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
1323 (ehdr->e_phnum)++;
1324 phdr++;
1325
1326 /* Prepare PT_LOAD type program header for kernel text region */
1327 if (kernel_map) {
1328 phdr->p_type = PT_LOAD;
1329 phdr->p_flags = PF_R|PF_W|PF_X;
Helge Dellerf973cce2019-10-21 22:38:29 +02001330 phdr->p_vaddr = (unsigned long) _text;
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001331 phdr->p_filesz = phdr->p_memsz = _end - _text;
1332 phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
1333 ehdr->e_phnum++;
1334 phdr++;
1335 }
1336
1337 /* Go through all the ranges in mem->ranges[] and prepare phdr */
1338 for (i = 0; i < mem->nr_ranges; i++) {
1339 mstart = mem->ranges[i].start;
1340 mend = mem->ranges[i].end;
1341
1342 phdr->p_type = PT_LOAD;
1343 phdr->p_flags = PF_R|PF_W|PF_X;
1344 phdr->p_offset = mstart;
1345
1346 phdr->p_paddr = mstart;
Helge Dellerf973cce2019-10-21 22:38:29 +02001347 phdr->p_vaddr = (unsigned long) __va(mstart);
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001348 phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
1349 phdr->p_align = 0;
1350 ehdr->e_phnum++;
Lianbo Jiang475f63a2020-08-04 12:49:33 +08001351 pr_debug("Crash PT_LOAD ELF header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001352 phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
1353 ehdr->e_phnum, phdr->p_offset);
Lianbo Jiang475f63a2020-08-04 12:49:33 +08001354 phdr++;
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001355 }
1356
1357 *addr = buf;
1358 *sz = elf_sz;
1359 return 0;
1360}