blob: c8b07d8ea5a2bba61448b169a0cd0ea3daa086ad [file] [log] [blame]
Eric W. Biederman5033cba2005-06-25 14:57:56 -07001/*
Hiroshi Shimamoto62a31a02007-10-19 18:24:20 -07002 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
Eric W. Biederman5033cba2005-06-25 14:57:56 -07003 *
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5 *
6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
Vivek Goyaldd5f7262014-08-08 14:26:09 -07007 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
8 * Authors:
9 * Vivek Goyal <vgoyal@redhat.com>
Eric W. Biederman5033cba2005-06-25 14:57:56 -070010 *
11 */
12
Vivek Goyaldd5f7262014-08-08 14:26:09 -070013#define pr_fmt(fmt) "kexec: " fmt
14
Eric W. Biederman5033cba2005-06-25 14:57:56 -070015#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/smp.h>
Eric W. Biederman5033cba2005-06-25 14:57:56 -070018#include <linux/reboot.h>
19#include <linux/kexec.h>
Eric W. Biederman5033cba2005-06-25 14:57:56 -070020#include <linux/delay.h>
21#include <linux/elf.h>
22#include <linux/elfcore.h>
Paul Gortmaker186f4362016-07-13 20:18:56 -040023#include <linux/export.h>
Vivek Goyaldd5f7262014-08-08 14:26:09 -070024#include <linux/slab.h>
Stephen Rothwelld6472302015-06-02 19:01:38 +100025#include <linux/vmalloc.h>
Eric W. Biederman5033cba2005-06-25 14:57:56 -070026
27#include <asm/processor.h>
28#include <asm/hardirq.h>
29#include <asm/nmi.h>
30#include <asm/hw_irq.h>
Vivek Goyal19842d62005-11-15 00:09:04 -080031#include <asm/apic.h>
Ingo Molnar5520b7e2017-01-27 11:59:46 +010032#include <asm/e820/types.h>
Jiang Liu8643e282014-10-27 16:12:04 +080033#include <asm/io_apic.h>
OGAWA Hirofumi0c1b2722007-12-03 17:17:10 +010034#include <asm/hpet.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070035#include <linux/kdebug.h>
Jaswinder Singh Rajput96b89dc62009-01-07 21:35:48 +053036#include <asm/cpu.h>
Glauber Costaed23dc62008-03-17 16:08:38 -030037#include <asm/reboot.h>
Eduardo Habkost2340b622008-11-17 19:03:23 -020038#include <asm/virtext.h>
Takao Indohda06a432015-11-04 14:22:33 +090039#include <asm/intel_pt.h>
Yi Wang89f579c2018-11-22 10:04:09 +080040#include <asm/crash.h>
Eduardo Habkost8e294782008-11-12 11:34:40 -020041
Vivek Goyaldd5f7262014-08-08 14:26:09 -070042/* Used while preparing memory map entries for second kernel */
43struct crash_memmap_data {
44 struct boot_params *params;
45 /* Type of memory */
46 unsigned int type;
47};
48
Zhang Yanfeif23d1f42012-12-06 23:40:47 +080049/*
50 * This is used to VMCLEAR all VMCSs loaded on the
51 * processor. And when loading kvm_intel module, the
52 * callback function pointer will be assigned.
53 *
54 * protected by rcu.
55 */
Zhang Yanfei0ca0d812012-12-11 17:11:34 +080056crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
Zhang Yanfeif23d1f42012-12-06 23:40:47 +080057EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
Vivek Goyaldd5f7262014-08-08 14:26:09 -070058unsigned long crash_zero_bytes;
Zhang Yanfeif23d1f42012-12-06 23:40:47 +080059
60static inline void cpu_crash_vmclear_loaded_vmcss(void)
61{
Zhang Yanfei0ca0d812012-12-11 17:11:34 +080062 crash_vmclear_fn *do_vmclear_operation = NULL;
Zhang Yanfeif23d1f42012-12-06 23:40:47 +080063
64 rcu_read_lock();
65 do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
66 if (do_vmclear_operation)
67 do_vmclear_operation();
68 rcu_read_unlock();
69}
70
Eduardo Habkostb2bbe712008-11-12 11:34:38 -020071#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
72
Don Zickus9c48f1c2011-09-30 15:06:21 -040073static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
Eric W. Biedermanc4ac4262005-06-25 14:57:58 -070074{
Mike Galbraith1fb473d2007-10-24 12:58:01 +020075#ifdef CONFIG_X86_32
Vivek Goyal4d554762005-06-25 14:58:13 -070076 struct pt_regs fixed_regs;
Eduardo Habkosta7d41822008-11-12 11:34:37 -020077
Andy Lutomirskif39b6f02015-03-18 18:33:33 -070078 if (!user_mode(regs)) {
Eduardo Habkosta7d41822008-11-12 11:34:37 -020079 crash_fixup_ss_esp(&fixed_regs, regs);
80 regs = &fixed_regs;
81 }
82#endif
83 crash_save_cpu(regs, cpu);
84
Zhang Yanfeif23d1f42012-12-06 23:40:47 +080085 /*
86 * VMCLEAR VMCSs loaded on all cpus if needed.
87 */
88 cpu_crash_vmclear_loaded_vmcss();
89
Eduardo Habkost2340b622008-11-17 19:03:23 -020090 /* Disable VMX or SVM if needed.
91 *
92 * We need to disable virtualization on all CPUs.
93 * Having VMX or SVM enabled on any CPU may break rebooting
94 * after the kdump kernel has finished its task.
95 */
96 cpu_emergency_vmxoff();
97 cpu_emergency_svm_disable();
98
Takao Indohda06a432015-11-04 14:22:33 +090099 /*
100 * Disable Intel PT to stop its logging
101 */
102 cpu_emergency_stop_pt();
103
Eduardo Habkosta7d41822008-11-12 11:34:37 -0200104 disable_local_APIC();
105}
106
Hidehiro Kawai0ee59412016-10-11 13:54:23 -0700107void kdump_nmi_shootdown_cpus(void)
Eduardo Habkostd1e7b912008-11-12 11:34:39 -0200108{
Eduardo Habkost8e294782008-11-12 11:34:40 -0200109 nmi_shootdown_cpus(kdump_nmi_callback);
Eduardo Habkostd1e7b912008-11-12 11:34:39 -0200110
Vivek Goyal19842d62005-11-15 00:09:04 -0800111 disable_local_APIC();
Eric W. Biedermanc4ac4262005-06-25 14:57:58 -0700112}
Eduardo Habkostd1e7b912008-11-12 11:34:39 -0200113
Hidehiro Kawai0ee59412016-10-11 13:54:23 -0700114/* Override the weak function in kernel/panic.c */
115void crash_smp_send_stop(void)
116{
117 static int cpus_stopped;
118
119 if (cpus_stopped)
120 return;
121
122 if (smp_ops.crash_stop_other_cpus)
123 smp_ops.crash_stop_other_cpus();
124 else
125 smp_send_stop();
126
127 cpus_stopped = 1;
128}
129
Eric W. Biedermanc4ac4262005-06-25 14:57:58 -0700130#else
Hidehiro Kawai0ee59412016-10-11 13:54:23 -0700131void crash_smp_send_stop(void)
Eric W. Biedermanc4ac4262005-06-25 14:57:58 -0700132{
133 /* There are no cpus to shootdown */
134}
135#endif
136
Glauber Costaed23dc62008-03-17 16:08:38 -0300137void native_machine_crash_shutdown(struct pt_regs *regs)
Eric W. Biederman5033cba2005-06-25 14:57:56 -0700138{
139 /* This function is only called after the system
Lee Revellf18190b2006-06-26 18:30:00 +0200140 * has panicked or is otherwise in a critical state.
Eric W. Biederman5033cba2005-06-25 14:57:56 -0700141 * The minimum amount of code to allow a kexec'd kernel
142 * to run successfully needs to happen here.
143 *
144 * In practice this means shooting down the other cpus in
145 * an SMP system.
146 */
Eric W. Biedermanc4ac4262005-06-25 14:57:58 -0700147 /* The kernel is broken so disable interrupts */
148 local_irq_disable();
Vivek Goyala3ea8ac2005-06-25 14:58:14 -0700149
Hidehiro Kawai0ee59412016-10-11 13:54:23 -0700150 crash_smp_send_stop();
Eduardo Habkost2340b622008-11-17 19:03:23 -0200151
Zhang Yanfeif23d1f42012-12-06 23:40:47 +0800152 /*
153 * VMCLEAR VMCSs loaded on this cpu if needed.
154 */
155 cpu_crash_vmclear_loaded_vmcss();
156
Eduardo Habkost2340b622008-11-17 19:03:23 -0200157 /* Booting kdump kernel with VMX or SVM enabled won't work,
158 * because (among other limitations) we can't disable paging
159 * with the virt flags.
160 */
161 cpu_emergency_vmxoff();
162 cpu_emergency_svm_disable();
163
Takao Indohda06a432015-11-04 14:22:33 +0900164 /*
165 * Disable Intel PT to stop its logging
166 */
167 cpu_emergency_stop_pt();
168
Yoshihiro YUNOMAE17405452013-08-20 16:01:07 +0900169#ifdef CONFIG_X86_IO_APIC
170 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
171 ioapic_zap_locks();
Baoquan He339b2ae2018-02-14 13:46:53 +0800172 clear_IO_APIC();
Vivek Goyal19842d62005-11-15 00:09:04 -0800173#endif
Fenghua Yu522e6642013-10-23 18:30:12 -0700174 lapic_shutdown();
Baoquan He339b2ae2018-02-14 13:46:53 +0800175 restore_boot_irq_mode();
OGAWA Hirofumi0c1b2722007-12-03 17:17:10 +0100176#ifdef CONFIG_HPET_TIMER
177 hpet_disable();
178#endif
Magnus Damm85916f82006-12-06 20:40:41 -0800179 crash_save_cpu(regs, safe_smp_processor_id());
Eric W. Biederman5033cba2005-06-25 14:57:56 -0700180}
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700181
Vivek Goyal74ca3172014-08-29 15:18:46 -0700182#ifdef CONFIG_KEXEC_FILE
Tom Lendacky1d2e7332017-10-20 09:30:51 -0500183static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700184{
Lee, Chun-Yie3c41e32015-09-29 20:58:57 +0800185 unsigned int *nr_ranges = arg;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700186
187 (*nr_ranges)++;
188 return 0;
189}
190
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700191/* Gather all the required information to prepare elf headers for ram regions */
AKASHI Takahiro8d5f8942018-04-13 15:35:59 -0700192static struct crash_mem *fill_up_crash_elf_data(void)
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700193{
194 unsigned int nr_ranges = 0;
AKASHI Takahiro8d5f8942018-04-13 15:35:59 -0700195 struct crash_mem *cmem;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700196
Lee, Chun-Yie3c41e32015-09-29 20:58:57 +0800197 walk_system_ram_res(0, -1, &nr_ranges,
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700198 get_nr_ram_ranges_callback);
AKASHI Takahiro8d5f8942018-04-13 15:35:59 -0700199 if (!nr_ranges)
200 return NULL;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700201
AKASHI Takahiro8d5f8942018-04-13 15:35:59 -0700202 /*
203 * Exclusion of crash region and/or crashk_low_res may cause
204 * another range split. So add extra two slots here.
205 */
206 nr_ranges += 2;
207 cmem = vzalloc(sizeof(struct crash_mem) +
208 sizeof(struct crash_mem_range) * nr_ranges);
209 if (!cmem)
210 return NULL;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700211
AKASHI Takahiro8d5f8942018-04-13 15:35:59 -0700212 cmem->max_nr_ranges = nr_ranges;
213 cmem->nr_ranges = 0;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700214
AKASHI Takahiro8d5f8942018-04-13 15:35:59 -0700215 return cmem;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700216}
217
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700218/*
219 * Look for any unwanted ranges between mstart, mend and remove them. This
AKASHI Takahiro8d5f8942018-04-13 15:35:59 -0700220 * might lead to split and split ranges are put in cmem->ranges[] array
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700221 */
AKASHI Takahiro8d5f8942018-04-13 15:35:59 -0700222static int elf_header_exclude_ranges(struct crash_mem *cmem)
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700223{
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700224 int ret = 0;
225
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700226 /* Exclude crashkernel region */
AKASHI Takahirobabac4a2018-04-13 15:36:06 -0700227 ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700228 if (ret)
229 return ret;
230
Baoquan Hea2d6aa82014-10-13 15:53:42 -0700231 if (crashk_low_res.end) {
AKASHI Takahirobabac4a2018-04-13 15:36:06 -0700232 ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
233 crashk_low_res.end);
Baoquan Hea2d6aa82014-10-13 15:53:42 -0700234 if (ret)
235 return ret;
236 }
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700237
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700238 return ret;
239}
240
Tom Lendacky1d2e7332017-10-20 09:30:51 -0500241static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700242{
AKASHI Takahiro8d5f8942018-04-13 15:35:59 -0700243 struct crash_mem *cmem = arg;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700244
AKASHI Takahirocbe66012018-04-13 15:35:53 -0700245 cmem->ranges[cmem->nr_ranges].start = res->start;
246 cmem->ranges[cmem->nr_ranges].end = res->end;
247 cmem->nr_ranges++;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700248
AKASHI Takahirocbe66012018-04-13 15:35:53 -0700249 return 0;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700250}
251
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700252/* Prepare elf headers. Return addr and size */
253static int prepare_elf_headers(struct kimage *image, void **addr,
254 unsigned long *sz)
255{
AKASHI Takahiro8d5f8942018-04-13 15:35:59 -0700256 struct crash_mem *cmem;
AKASHI Takahirocbe66012018-04-13 15:35:53 -0700257 Elf64_Ehdr *ehdr;
258 Elf64_Phdr *phdr;
259 int ret, i;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700260
AKASHI Takahiro8d5f8942018-04-13 15:35:59 -0700261 cmem = fill_up_crash_elf_data();
262 if (!cmem)
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700263 return -ENOMEM;
264
AKASHI Takahiro8d5f8942018-04-13 15:35:59 -0700265 ret = walk_system_ram_res(0, -1, cmem,
AKASHI Takahirocbe66012018-04-13 15:35:53 -0700266 prepare_elf64_ram_headers_callback);
267 if (ret)
268 goto out;
269
270 /* Exclude unwanted mem ranges */
AKASHI Takahiro8d5f8942018-04-13 15:35:59 -0700271 ret = elf_header_exclude_ranges(cmem);
AKASHI Takahirocbe66012018-04-13 15:35:53 -0700272 if (ret)
273 goto out;
274
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700275 /* By default prepare 64bit headers */
AKASHI Takahirobabac4a2018-04-13 15:36:06 -0700276 ret = crash_prepare_elf64_headers(cmem,
277 IS_ENABLED(CONFIG_X86_64), addr, sz);
AKASHI Takahirocbe66012018-04-13 15:35:53 -0700278 if (ret)
279 goto out;
280
281 /*
282 * If a range matches backup region, adjust offset to backup
283 * segment.
284 */
285 ehdr = (Elf64_Ehdr *)*addr;
286 phdr = (Elf64_Phdr *)(ehdr + 1);
287 for (i = 0; i < ehdr->e_phnum; phdr++, i++)
288 if (phdr->p_type == PT_LOAD &&
289 phdr->p_paddr == image->arch.backup_src_start &&
290 phdr->p_memsz == image->arch.backup_src_sz) {
291 phdr->p_offset = image->arch.backup_load_addr;
292 break;
293 }
294out:
AKASHI Takahiro8d5f8942018-04-13 15:35:59 -0700295 vfree(cmem);
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700296 return ret;
297}
298
Ingo Molnar8ec67d92017-01-27 12:54:38 +0100299static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700300{
301 unsigned int nr_e820_entries;
302
303 nr_e820_entries = params->e820_entries;
Ingo Molnar08b46d52017-01-28 17:29:08 +0100304 if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE)
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700305 return 1;
306
Ingo Molnar61a50102017-01-27 13:54:38 +0100307 memcpy(&params->e820_table[nr_e820_entries], entry,
Ingo Molnar8ec67d92017-01-27 12:54:38 +0100308 sizeof(struct e820_entry));
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700309 params->e820_entries++;
310 return 0;
311}
312
Tom Lendacky1d2e7332017-10-20 09:30:51 -0500313static int memmap_entry_callback(struct resource *res, void *arg)
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700314{
315 struct crash_memmap_data *cmd = arg;
316 struct boot_params *params = cmd->params;
Ingo Molnar8ec67d92017-01-27 12:54:38 +0100317 struct e820_entry ei;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700318
Tom Lendacky1d2e7332017-10-20 09:30:51 -0500319 ei.addr = res->start;
kbuild test robot9275b9332017-11-08 03:18:01 +0800320 ei.size = resource_size(res);
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700321 ei.type = cmd->type;
322 add_e820_entry(params, &ei);
323
324 return 0;
325}
326
327static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
328 unsigned long long mstart,
329 unsigned long long mend)
330{
331 unsigned long start, end;
332 int ret = 0;
333
334 cmem->ranges[0].start = mstart;
335 cmem->ranges[0].end = mend;
336 cmem->nr_ranges = 1;
337
338 /* Exclude Backup region */
339 start = image->arch.backup_load_addr;
340 end = start + image->arch.backup_src_sz - 1;
AKASHI Takahirobabac4a2018-04-13 15:36:06 -0700341 ret = crash_exclude_mem_range(cmem, start, end);
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700342 if (ret)
343 return ret;
344
345 /* Exclude elf header region */
346 start = image->arch.elf_load_addr;
347 end = start + image->arch.elf_headers_sz - 1;
AKASHI Takahirobabac4a2018-04-13 15:36:06 -0700348 return crash_exclude_mem_range(cmem, start, end);
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700349}
350
351/* Prepare memory map for crash dump kernel */
352int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
353{
354 int i, ret = 0;
355 unsigned long flags;
Ingo Molnar8ec67d92017-01-27 12:54:38 +0100356 struct e820_entry ei;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700357 struct crash_memmap_data cmd;
358 struct crash_mem *cmem;
359
360 cmem = vzalloc(sizeof(struct crash_mem));
361 if (!cmem)
362 return -ENOMEM;
363
364 memset(&cmd, 0, sizeof(struct crash_memmap_data));
365 cmd.params = params;
366
367 /* Add first 640K segment */
368 ei.addr = image->arch.backup_src_start;
369 ei.size = image->arch.backup_src_sz;
Ingo Molnar09821ff2017-01-28 17:09:33 +0100370 ei.type = E820_TYPE_RAM;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700371 add_e820_entry(params, &ei);
372
373 /* Add ACPI tables */
Ingo Molnar09821ff2017-01-28 17:09:33 +0100374 cmd.type = E820_TYPE_ACPI;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700375 flags = IORESOURCE_MEM | IORESOURCE_BUSY;
Toshi Kanif0f47112016-01-26 21:57:30 +0100376 walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700377 memmap_entry_callback);
378
379 /* Add ACPI Non-volatile Storage */
Ingo Molnar09821ff2017-01-28 17:09:33 +0100380 cmd.type = E820_TYPE_NVS;
Toshi Kanif0f47112016-01-26 21:57:30 +0100381 walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700382 memmap_entry_callback);
383
384 /* Add crashk_low_res region */
385 if (crashk_low_res.end) {
386 ei.addr = crashk_low_res.start;
387 ei.size = crashk_low_res.end - crashk_low_res.start + 1;
Ingo Molnar09821ff2017-01-28 17:09:33 +0100388 ei.type = E820_TYPE_RAM;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700389 add_e820_entry(params, &ei);
390 }
391
392 /* Exclude some ranges from crashk_res and add rest to memmap */
393 ret = memmap_exclude_ranges(image, cmem, crashk_res.start,
394 crashk_res.end);
395 if (ret)
396 goto out;
397
398 for (i = 0; i < cmem->nr_ranges; i++) {
399 ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
400
401 /* If entry is less than a page, skip it */
402 if (ei.size < PAGE_SIZE)
403 continue;
404 ei.addr = cmem->ranges[i].start;
Ingo Molnar09821ff2017-01-28 17:09:33 +0100405 ei.type = E820_TYPE_RAM;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700406 add_e820_entry(params, &ei);
407 }
408
409out:
410 vfree(cmem);
411 return ret;
412}
413
Tom Lendacky1d2e7332017-10-20 09:30:51 -0500414static int determine_backup_region(struct resource *res, void *arg)
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700415{
416 struct kimage *image = arg;
417
Tom Lendacky1d2e7332017-10-20 09:30:51 -0500418 image->arch.backup_src_start = res->start;
kbuild test robot9275b9332017-11-08 03:18:01 +0800419 image->arch.backup_src_sz = resource_size(res);
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700420
421 /* Expecting only one range for backup region */
422 return 1;
423}
424
425int crash_load_segments(struct kimage *image)
426{
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700427 int ret;
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100428 struct kexec_buf kbuf = { .image = image, .buf_min = 0,
429 .buf_max = ULONG_MAX, .top_down = false };
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700430
431 /*
432 * Determine and load a segment for backup area. First 640K RAM
433 * region is backup source
434 */
435
436 ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END,
437 image, determine_backup_region);
438
439 /* Zero or postive return values are ok */
440 if (ret < 0)
441 return ret;
442
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700443 /* Add backup segment. */
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100444 if (image->arch.backup_src_sz) {
445 kbuf.buffer = &crash_zero_bytes;
446 kbuf.bufsz = sizeof(crash_zero_bytes);
447 kbuf.memsz = image->arch.backup_src_sz;
448 kbuf.buf_align = PAGE_SIZE;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700449 /*
450 * Ideally there is no source for backup segment. This is
451 * copied in purgatory after crash. Just add a zero filled
452 * segment for now to make sure checksum logic works fine.
453 */
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100454 ret = kexec_add_buffer(&kbuf);
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700455 if (ret)
456 return ret;
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100457 image->arch.backup_load_addr = kbuf.mem;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700458 pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100459 image->arch.backup_load_addr,
460 image->arch.backup_src_start, kbuf.memsz);
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700461 }
462
463 /* Prepare elf headers and add a segment */
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100464 ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700465 if (ret)
466 return ret;
467
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100468 image->arch.elf_headers = kbuf.buffer;
469 image->arch.elf_headers_sz = kbuf.bufsz;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700470
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100471 kbuf.memsz = kbuf.bufsz;
472 kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
473 ret = kexec_add_buffer(&kbuf);
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700474 if (ret) {
475 vfree((void *)image->arch.elf_headers);
476 return ret;
477 }
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100478 image->arch.elf_load_addr = kbuf.mem;
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700479 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100480 image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz);
Vivek Goyaldd5f7262014-08-08 14:26:09 -0700481
482 return ret;
483}
Vivek Goyal74ca3172014-08-29 15:18:46 -0700484#endif /* CONFIG_KEXEC_FILE */