Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 1 | /* |
Hiroshi Shimamoto | 62a31a0 | 2007-10-19 18:24:20 -0700 | [diff] [blame] | 2 | * Architecture specific (i386/x86_64) functions for kexec based crash dumps. |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 3 | * |
| 4 | * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) |
| 5 | * |
| 6 | * Copyright (C) IBM Corporation, 2004. All rights reserved. |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 7 | * Copyright (C) Red Hat Inc., 2014. All rights reserved. |
| 8 | * Authors: |
| 9 | * Vivek Goyal <vgoyal@redhat.com> |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 10 | * |
| 11 | */ |
| 12 | |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 13 | #define pr_fmt(fmt) "kexec: " fmt |
| 14 | |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 15 | #include <linux/types.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/smp.h> |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 18 | #include <linux/reboot.h> |
| 19 | #include <linux/kexec.h> |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 20 | #include <linux/delay.h> |
| 21 | #include <linux/elf.h> |
| 22 | #include <linux/elfcore.h> |
Paul Gortmaker | 186f436 | 2016-07-13 20:18:56 -0400 | [diff] [blame] | 23 | #include <linux/export.h> |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 24 | #include <linux/slab.h> |
Stephen Rothwell | d647230 | 2015-06-02 19:01:38 +1000 | [diff] [blame] | 25 | #include <linux/vmalloc.h> |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 26 | |
| 27 | #include <asm/processor.h> |
| 28 | #include <asm/hardirq.h> |
| 29 | #include <asm/nmi.h> |
| 30 | #include <asm/hw_irq.h> |
Vivek Goyal | 19842d6 | 2005-11-15 00:09:04 -0800 | [diff] [blame] | 31 | #include <asm/apic.h> |
Ingo Molnar | 5520b7e | 2017-01-27 11:59:46 +0100 | [diff] [blame] | 32 | #include <asm/e820/types.h> |
Jiang Liu | 8643e28 | 2014-10-27 16:12:04 +0800 | [diff] [blame] | 33 | #include <asm/io_apic.h> |
OGAWA Hirofumi | 0c1b272 | 2007-12-03 17:17:10 +0100 | [diff] [blame] | 34 | #include <asm/hpet.h> |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 35 | #include <linux/kdebug.h> |
Jaswinder Singh Rajput | 96b89dc6 | 2009-01-07 21:35:48 +0530 | [diff] [blame] | 36 | #include <asm/cpu.h> |
Glauber Costa | ed23dc6 | 2008-03-17 16:08:38 -0300 | [diff] [blame] | 37 | #include <asm/reboot.h> |
Eduardo Habkost | 2340b62 | 2008-11-17 19:03:23 -0200 | [diff] [blame] | 38 | #include <asm/virtext.h> |
Takao Indoh | da06a43 | 2015-11-04 14:22:33 +0900 | [diff] [blame] | 39 | #include <asm/intel_pt.h> |
Eduardo Habkost | 8e29478 | 2008-11-12 11:34:40 -0200 | [diff] [blame] | 40 | |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 41 | /* Alignment required for elf header segment */ |
| 42 | #define ELF_CORE_HEADER_ALIGN 4096 |
| 43 | |
| 44 | /* This primarily represents number of split ranges due to exclusion */ |
| 45 | #define CRASH_MAX_RANGES 16 |
| 46 | |
| 47 | struct crash_mem_range { |
| 48 | u64 start, end; |
| 49 | }; |
| 50 | |
| 51 | struct crash_mem { |
| 52 | unsigned int nr_ranges; |
| 53 | struct crash_mem_range ranges[CRASH_MAX_RANGES]; |
| 54 | }; |
| 55 | |
| 56 | /* Misc data about ram ranges needed to prepare elf headers */ |
| 57 | struct crash_elf_data { |
| 58 | struct kimage *image; |
| 59 | /* |
| 60 | * Total number of ram ranges we have after various adjustments for |
Toshi Kani | f296f26 | 2016-01-26 21:57:31 +0100 | [diff] [blame] | 61 | * crash reserved region, etc. |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 62 | */ |
| 63 | unsigned int max_nr_ranges; |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 64 | |
| 65 | /* Pointer to elf header */ |
| 66 | void *ehdr; |
| 67 | /* Pointer to next phdr */ |
| 68 | void *bufp; |
| 69 | struct crash_mem mem; |
| 70 | }; |
| 71 | |
| 72 | /* Used while preparing memory map entries for second kernel */ |
| 73 | struct crash_memmap_data { |
| 74 | struct boot_params *params; |
| 75 | /* Type of memory */ |
| 76 | unsigned int type; |
| 77 | }; |
| 78 | |
Zhang Yanfei | f23d1f4 | 2012-12-06 23:40:47 +0800 | [diff] [blame] | 79 | /* |
| 80 | * This is used to VMCLEAR all VMCSs loaded on the |
| 81 | * processor. And when loading kvm_intel module, the |
| 82 | * callback function pointer will be assigned. |
| 83 | * |
| 84 | * protected by rcu. |
| 85 | */ |
Zhang Yanfei | 0ca0d81 | 2012-12-11 17:11:34 +0800 | [diff] [blame] | 86 | crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL; |
Zhang Yanfei | f23d1f4 | 2012-12-06 23:40:47 +0800 | [diff] [blame] | 87 | EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss); |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 88 | unsigned long crash_zero_bytes; |
Zhang Yanfei | f23d1f4 | 2012-12-06 23:40:47 +0800 | [diff] [blame] | 89 | |
| 90 | static inline void cpu_crash_vmclear_loaded_vmcss(void) |
| 91 | { |
Zhang Yanfei | 0ca0d81 | 2012-12-11 17:11:34 +0800 | [diff] [blame] | 92 | crash_vmclear_fn *do_vmclear_operation = NULL; |
Zhang Yanfei | f23d1f4 | 2012-12-06 23:40:47 +0800 | [diff] [blame] | 93 | |
| 94 | rcu_read_lock(); |
| 95 | do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss); |
| 96 | if (do_vmclear_operation) |
| 97 | do_vmclear_operation(); |
| 98 | rcu_read_unlock(); |
| 99 | } |
| 100 | |
Eduardo Habkost | b2bbe71 | 2008-11-12 11:34:38 -0200 | [diff] [blame] | 101 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) |
| 102 | |
Don Zickus | 9c48f1c | 2011-09-30 15:06:21 -0400 | [diff] [blame] | 103 | static void kdump_nmi_callback(int cpu, struct pt_regs *regs) |
Eric W. Biederman | c4ac426 | 2005-06-25 14:57:58 -0700 | [diff] [blame] | 104 | { |
Mike Galbraith | 1fb473d | 2007-10-24 12:58:01 +0200 | [diff] [blame] | 105 | #ifdef CONFIG_X86_32 |
Vivek Goyal | 4d55476 | 2005-06-25 14:58:13 -0700 | [diff] [blame] | 106 | struct pt_regs fixed_regs; |
Eduardo Habkost | a7d4182 | 2008-11-12 11:34:37 -0200 | [diff] [blame] | 107 | |
Andy Lutomirski | f39b6f0 | 2015-03-18 18:33:33 -0700 | [diff] [blame] | 108 | if (!user_mode(regs)) { |
Eduardo Habkost | a7d4182 | 2008-11-12 11:34:37 -0200 | [diff] [blame] | 109 | crash_fixup_ss_esp(&fixed_regs, regs); |
| 110 | regs = &fixed_regs; |
| 111 | } |
| 112 | #endif |
| 113 | crash_save_cpu(regs, cpu); |
| 114 | |
Zhang Yanfei | f23d1f4 | 2012-12-06 23:40:47 +0800 | [diff] [blame] | 115 | /* |
| 116 | * VMCLEAR VMCSs loaded on all cpus if needed. |
| 117 | */ |
| 118 | cpu_crash_vmclear_loaded_vmcss(); |
| 119 | |
Eduardo Habkost | 2340b62 | 2008-11-17 19:03:23 -0200 | [diff] [blame] | 120 | /* Disable VMX or SVM if needed. |
| 121 | * |
| 122 | * We need to disable virtualization on all CPUs. |
| 123 | * Having VMX or SVM enabled on any CPU may break rebooting |
| 124 | * after the kdump kernel has finished its task. |
| 125 | */ |
| 126 | cpu_emergency_vmxoff(); |
| 127 | cpu_emergency_svm_disable(); |
| 128 | |
Takao Indoh | da06a43 | 2015-11-04 14:22:33 +0900 | [diff] [blame] | 129 | /* |
| 130 | * Disable Intel PT to stop its logging |
| 131 | */ |
| 132 | cpu_emergency_stop_pt(); |
| 133 | |
Eduardo Habkost | a7d4182 | 2008-11-12 11:34:37 -0200 | [diff] [blame] | 134 | disable_local_APIC(); |
| 135 | } |
| 136 | |
Hidehiro Kawai | 0ee5941 | 2016-10-11 13:54:23 -0700 | [diff] [blame] | 137 | void kdump_nmi_shootdown_cpus(void) |
Eduardo Habkost | d1e7b91 | 2008-11-12 11:34:39 -0200 | [diff] [blame] | 138 | { |
Eduardo Habkost | 8e29478 | 2008-11-12 11:34:40 -0200 | [diff] [blame] | 139 | nmi_shootdown_cpus(kdump_nmi_callback); |
Eduardo Habkost | d1e7b91 | 2008-11-12 11:34:39 -0200 | [diff] [blame] | 140 | |
Vivek Goyal | 19842d6 | 2005-11-15 00:09:04 -0800 | [diff] [blame] | 141 | disable_local_APIC(); |
Eric W. Biederman | c4ac426 | 2005-06-25 14:57:58 -0700 | [diff] [blame] | 142 | } |
Eduardo Habkost | d1e7b91 | 2008-11-12 11:34:39 -0200 | [diff] [blame] | 143 | |
Hidehiro Kawai | 0ee5941 | 2016-10-11 13:54:23 -0700 | [diff] [blame] | 144 | /* Override the weak function in kernel/panic.c */ |
| 145 | void crash_smp_send_stop(void) |
| 146 | { |
| 147 | static int cpus_stopped; |
| 148 | |
| 149 | if (cpus_stopped) |
| 150 | return; |
| 151 | |
| 152 | if (smp_ops.crash_stop_other_cpus) |
| 153 | smp_ops.crash_stop_other_cpus(); |
| 154 | else |
| 155 | smp_send_stop(); |
| 156 | |
| 157 | cpus_stopped = 1; |
| 158 | } |
| 159 | |
Eric W. Biederman | c4ac426 | 2005-06-25 14:57:58 -0700 | [diff] [blame] | 160 | #else |
Hidehiro Kawai | 0ee5941 | 2016-10-11 13:54:23 -0700 | [diff] [blame] | 161 | void crash_smp_send_stop(void) |
Eric W. Biederman | c4ac426 | 2005-06-25 14:57:58 -0700 | [diff] [blame] | 162 | { |
| 163 | /* There are no cpus to shootdown */ |
| 164 | } |
| 165 | #endif |
| 166 | |
Glauber Costa | ed23dc6 | 2008-03-17 16:08:38 -0300 | [diff] [blame] | 167 | void native_machine_crash_shutdown(struct pt_regs *regs) |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 168 | { |
| 169 | /* This function is only called after the system |
Lee Revell | f18190b | 2006-06-26 18:30:00 +0200 | [diff] [blame] | 170 | * has panicked or is otherwise in a critical state. |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 171 | * The minimum amount of code to allow a kexec'd kernel |
| 172 | * to run successfully needs to happen here. |
| 173 | * |
| 174 | * In practice this means shooting down the other cpus in |
| 175 | * an SMP system. |
| 176 | */ |
Eric W. Biederman | c4ac426 | 2005-06-25 14:57:58 -0700 | [diff] [blame] | 177 | /* The kernel is broken so disable interrupts */ |
| 178 | local_irq_disable(); |
Vivek Goyal | a3ea8ac | 2005-06-25 14:58:14 -0700 | [diff] [blame] | 179 | |
Hidehiro Kawai | 0ee5941 | 2016-10-11 13:54:23 -0700 | [diff] [blame] | 180 | crash_smp_send_stop(); |
Eduardo Habkost | 2340b62 | 2008-11-17 19:03:23 -0200 | [diff] [blame] | 181 | |
Zhang Yanfei | f23d1f4 | 2012-12-06 23:40:47 +0800 | [diff] [blame] | 182 | /* |
| 183 | * VMCLEAR VMCSs loaded on this cpu if needed. |
| 184 | */ |
| 185 | cpu_crash_vmclear_loaded_vmcss(); |
| 186 | |
Eduardo Habkost | 2340b62 | 2008-11-17 19:03:23 -0200 | [diff] [blame] | 187 | /* Booting kdump kernel with VMX or SVM enabled won't work, |
| 188 | * because (among other limitations) we can't disable paging |
| 189 | * with the virt flags. |
| 190 | */ |
| 191 | cpu_emergency_vmxoff(); |
| 192 | cpu_emergency_svm_disable(); |
| 193 | |
Takao Indoh | da06a43 | 2015-11-04 14:22:33 +0900 | [diff] [blame] | 194 | /* |
| 195 | * Disable Intel PT to stop its logging |
| 196 | */ |
| 197 | cpu_emergency_stop_pt(); |
| 198 | |
Yoshihiro YUNOMAE | 1740545 | 2013-08-20 16:01:07 +0900 | [diff] [blame] | 199 | #ifdef CONFIG_X86_IO_APIC |
| 200 | /* Prevent crash_kexec() from deadlocking on ioapic_lock. */ |
| 201 | ioapic_zap_locks(); |
Vivek Goyal | 19842d6 | 2005-11-15 00:09:04 -0800 | [diff] [blame] | 202 | disable_IO_APIC(); |
| 203 | #endif |
Fenghua Yu | 522e664 | 2013-10-23 18:30:12 -0700 | [diff] [blame] | 204 | lapic_shutdown(); |
OGAWA Hirofumi | 0c1b272 | 2007-12-03 17:17:10 +0100 | [diff] [blame] | 205 | #ifdef CONFIG_HPET_TIMER |
| 206 | hpet_disable(); |
| 207 | #endif |
Magnus Damm | 85916f8 | 2006-12-06 20:40:41 -0800 | [diff] [blame] | 208 | crash_save_cpu(regs, safe_smp_processor_id()); |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 209 | } |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 210 | |
Vivek Goyal | 74ca317 | 2014-08-29 15:18:46 -0700 | [diff] [blame] | 211 | #ifdef CONFIG_KEXEC_FILE |
Lee, Chun-Yi | e3c41e3 | 2015-09-29 20:58:57 +0800 | [diff] [blame] | 212 | static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg) |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 213 | { |
Lee, Chun-Yi | e3c41e3 | 2015-09-29 20:58:57 +0800 | [diff] [blame] | 214 | unsigned int *nr_ranges = arg; |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 215 | |
| 216 | (*nr_ranges)++; |
| 217 | return 0; |
| 218 | } |
| 219 | |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 220 | |
| 221 | /* Gather all the required information to prepare elf headers for ram regions */ |
| 222 | static void fill_up_crash_elf_data(struct crash_elf_data *ced, |
| 223 | struct kimage *image) |
| 224 | { |
| 225 | unsigned int nr_ranges = 0; |
| 226 | |
| 227 | ced->image = image; |
| 228 | |
Lee, Chun-Yi | e3c41e3 | 2015-09-29 20:58:57 +0800 | [diff] [blame] | 229 | walk_system_ram_res(0, -1, &nr_ranges, |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 230 | get_nr_ram_ranges_callback); |
| 231 | |
| 232 | ced->max_nr_ranges = nr_ranges; |
| 233 | |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 234 | /* Exclusion of crash region could split memory ranges */ |
| 235 | ced->max_nr_ranges++; |
| 236 | |
| 237 | /* If crashk_low_res is not 0, another range split possible */ |
Baoquan He | a2d6aa8 | 2014-10-13 15:53:42 -0700 | [diff] [blame] | 238 | if (crashk_low_res.end) |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 239 | ced->max_nr_ranges++; |
| 240 | } |
| 241 | |
| 242 | static int exclude_mem_range(struct crash_mem *mem, |
| 243 | unsigned long long mstart, unsigned long long mend) |
| 244 | { |
| 245 | int i, j; |
| 246 | unsigned long long start, end; |
| 247 | struct crash_mem_range temp_range = {0, 0}; |
| 248 | |
| 249 | for (i = 0; i < mem->nr_ranges; i++) { |
| 250 | start = mem->ranges[i].start; |
| 251 | end = mem->ranges[i].end; |
| 252 | |
| 253 | if (mstart > end || mend < start) |
| 254 | continue; |
| 255 | |
| 256 | /* Truncate any area outside of range */ |
| 257 | if (mstart < start) |
| 258 | mstart = start; |
| 259 | if (mend > end) |
| 260 | mend = end; |
| 261 | |
| 262 | /* Found completely overlapping range */ |
| 263 | if (mstart == start && mend == end) { |
| 264 | mem->ranges[i].start = 0; |
| 265 | mem->ranges[i].end = 0; |
| 266 | if (i < mem->nr_ranges - 1) { |
| 267 | /* Shift rest of the ranges to left */ |
| 268 | for (j = i; j < mem->nr_ranges - 1; j++) { |
| 269 | mem->ranges[j].start = |
| 270 | mem->ranges[j+1].start; |
| 271 | mem->ranges[j].end = |
| 272 | mem->ranges[j+1].end; |
| 273 | } |
| 274 | } |
| 275 | mem->nr_ranges--; |
| 276 | return 0; |
| 277 | } |
| 278 | |
| 279 | if (mstart > start && mend < end) { |
| 280 | /* Split original range */ |
| 281 | mem->ranges[i].end = mstart - 1; |
| 282 | temp_range.start = mend + 1; |
| 283 | temp_range.end = end; |
| 284 | } else if (mstart != start) |
| 285 | mem->ranges[i].end = mstart - 1; |
| 286 | else |
| 287 | mem->ranges[i].start = mend + 1; |
| 288 | break; |
| 289 | } |
| 290 | |
| 291 | /* If a split happend, add the split to array */ |
| 292 | if (!temp_range.end) |
| 293 | return 0; |
| 294 | |
| 295 | /* Split happened */ |
| 296 | if (i == CRASH_MAX_RANGES - 1) { |
| 297 | pr_err("Too many crash ranges after split\n"); |
| 298 | return -ENOMEM; |
| 299 | } |
| 300 | |
| 301 | /* Location where new range should go */ |
| 302 | j = i + 1; |
| 303 | if (j < mem->nr_ranges) { |
| 304 | /* Move over all ranges one slot towards the end */ |
| 305 | for (i = mem->nr_ranges - 1; i >= j; i--) |
| 306 | mem->ranges[i + 1] = mem->ranges[i]; |
| 307 | } |
| 308 | |
| 309 | mem->ranges[j].start = temp_range.start; |
| 310 | mem->ranges[j].end = temp_range.end; |
| 311 | mem->nr_ranges++; |
| 312 | return 0; |
| 313 | } |
| 314 | |
| 315 | /* |
| 316 | * Look for any unwanted ranges between mstart, mend and remove them. This |
| 317 | * might lead to split and split ranges are put in ced->mem.ranges[] array |
| 318 | */ |
| 319 | static int elf_header_exclude_ranges(struct crash_elf_data *ced, |
| 320 | unsigned long long mstart, unsigned long long mend) |
| 321 | { |
| 322 | struct crash_mem *cmem = &ced->mem; |
| 323 | int ret = 0; |
| 324 | |
| 325 | memset(cmem->ranges, 0, sizeof(cmem->ranges)); |
| 326 | |
| 327 | cmem->ranges[0].start = mstart; |
| 328 | cmem->ranges[0].end = mend; |
| 329 | cmem->nr_ranges = 1; |
| 330 | |
| 331 | /* Exclude crashkernel region */ |
| 332 | ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end); |
| 333 | if (ret) |
| 334 | return ret; |
| 335 | |
Baoquan He | a2d6aa8 | 2014-10-13 15:53:42 -0700 | [diff] [blame] | 336 | if (crashk_low_res.end) { |
| 337 | ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); |
| 338 | if (ret) |
| 339 | return ret; |
| 340 | } |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 341 | |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 342 | return ret; |
| 343 | } |
| 344 | |
| 345 | static int prepare_elf64_ram_headers_callback(u64 start, u64 end, void *arg) |
| 346 | { |
| 347 | struct crash_elf_data *ced = arg; |
| 348 | Elf64_Ehdr *ehdr; |
| 349 | Elf64_Phdr *phdr; |
| 350 | unsigned long mstart, mend; |
| 351 | struct kimage *image = ced->image; |
| 352 | struct crash_mem *cmem; |
| 353 | int ret, i; |
| 354 | |
| 355 | ehdr = ced->ehdr; |
| 356 | |
| 357 | /* Exclude unwanted mem ranges */ |
| 358 | ret = elf_header_exclude_ranges(ced, start, end); |
| 359 | if (ret) |
| 360 | return ret; |
| 361 | |
| 362 | /* Go through all the ranges in ced->mem.ranges[] and prepare phdr */ |
| 363 | cmem = &ced->mem; |
| 364 | |
| 365 | for (i = 0; i < cmem->nr_ranges; i++) { |
| 366 | mstart = cmem->ranges[i].start; |
| 367 | mend = cmem->ranges[i].end; |
| 368 | |
| 369 | phdr = ced->bufp; |
| 370 | ced->bufp += sizeof(Elf64_Phdr); |
| 371 | |
| 372 | phdr->p_type = PT_LOAD; |
| 373 | phdr->p_flags = PF_R|PF_W|PF_X; |
| 374 | phdr->p_offset = mstart; |
| 375 | |
| 376 | /* |
| 377 | * If a range matches backup region, adjust offset to backup |
| 378 | * segment. |
| 379 | */ |
| 380 | if (mstart == image->arch.backup_src_start && |
| 381 | (mend - mstart + 1) == image->arch.backup_src_sz) |
| 382 | phdr->p_offset = image->arch.backup_load_addr; |
| 383 | |
| 384 | phdr->p_paddr = mstart; |
| 385 | phdr->p_vaddr = (unsigned long long) __va(mstart); |
| 386 | phdr->p_filesz = phdr->p_memsz = mend - mstart + 1; |
| 387 | phdr->p_align = 0; |
| 388 | ehdr->e_phnum++; |
| 389 | pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n", |
| 390 | phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz, |
| 391 | ehdr->e_phnum, phdr->p_offset); |
| 392 | } |
| 393 | |
| 394 | return ret; |
| 395 | } |
| 396 | |
| 397 | static int prepare_elf64_headers(struct crash_elf_data *ced, |
| 398 | void **addr, unsigned long *sz) |
| 399 | { |
| 400 | Elf64_Ehdr *ehdr; |
| 401 | Elf64_Phdr *phdr; |
| 402 | unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz; |
| 403 | unsigned char *buf, *bufp; |
| 404 | unsigned int cpu; |
| 405 | unsigned long long notes_addr; |
| 406 | int ret; |
| 407 | |
| 408 | /* extra phdr for vmcoreinfo elf note */ |
| 409 | nr_phdr = nr_cpus + 1; |
| 410 | nr_phdr += ced->max_nr_ranges; |
| 411 | |
| 412 | /* |
| 413 | * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping |
| 414 | * area on x86_64 (ffffffff80000000 - ffffffffa0000000). |
| 415 | * I think this is required by tools like gdb. So same physical |
| 416 | * memory will be mapped in two elf headers. One will contain kernel |
| 417 | * text virtual addresses and other will have __va(physical) addresses. |
| 418 | */ |
| 419 | |
| 420 | nr_phdr++; |
| 421 | elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr); |
| 422 | elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN); |
| 423 | |
| 424 | buf = vzalloc(elf_sz); |
| 425 | if (!buf) |
| 426 | return -ENOMEM; |
| 427 | |
| 428 | bufp = buf; |
| 429 | ehdr = (Elf64_Ehdr *)bufp; |
| 430 | bufp += sizeof(Elf64_Ehdr); |
| 431 | memcpy(ehdr->e_ident, ELFMAG, SELFMAG); |
| 432 | ehdr->e_ident[EI_CLASS] = ELFCLASS64; |
| 433 | ehdr->e_ident[EI_DATA] = ELFDATA2LSB; |
| 434 | ehdr->e_ident[EI_VERSION] = EV_CURRENT; |
| 435 | ehdr->e_ident[EI_OSABI] = ELF_OSABI; |
| 436 | memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD); |
| 437 | ehdr->e_type = ET_CORE; |
| 438 | ehdr->e_machine = ELF_ARCH; |
| 439 | ehdr->e_version = EV_CURRENT; |
| 440 | ehdr->e_phoff = sizeof(Elf64_Ehdr); |
| 441 | ehdr->e_ehsize = sizeof(Elf64_Ehdr); |
| 442 | ehdr->e_phentsize = sizeof(Elf64_Phdr); |
| 443 | |
| 444 | /* Prepare one phdr of type PT_NOTE for each present cpu */ |
| 445 | for_each_present_cpu(cpu) { |
| 446 | phdr = (Elf64_Phdr *)bufp; |
| 447 | bufp += sizeof(Elf64_Phdr); |
| 448 | phdr->p_type = PT_NOTE; |
| 449 | notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu)); |
| 450 | phdr->p_offset = phdr->p_paddr = notes_addr; |
| 451 | phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t); |
| 452 | (ehdr->e_phnum)++; |
| 453 | } |
| 454 | |
| 455 | /* Prepare one PT_NOTE header for vmcoreinfo */ |
| 456 | phdr = (Elf64_Phdr *)bufp; |
| 457 | bufp += sizeof(Elf64_Phdr); |
| 458 | phdr->p_type = PT_NOTE; |
| 459 | phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note(); |
| 460 | phdr->p_filesz = phdr->p_memsz = sizeof(vmcoreinfo_note); |
| 461 | (ehdr->e_phnum)++; |
| 462 | |
| 463 | #ifdef CONFIG_X86_64 |
| 464 | /* Prepare PT_LOAD type program header for kernel text region */ |
| 465 | phdr = (Elf64_Phdr *)bufp; |
| 466 | bufp += sizeof(Elf64_Phdr); |
| 467 | phdr->p_type = PT_LOAD; |
| 468 | phdr->p_flags = PF_R|PF_W|PF_X; |
| 469 | phdr->p_vaddr = (Elf64_Addr)_text; |
| 470 | phdr->p_filesz = phdr->p_memsz = _end - _text; |
| 471 | phdr->p_offset = phdr->p_paddr = __pa_symbol(_text); |
| 472 | (ehdr->e_phnum)++; |
| 473 | #endif |
| 474 | |
| 475 | /* Prepare PT_LOAD headers for system ram chunks. */ |
| 476 | ced->ehdr = ehdr; |
| 477 | ced->bufp = bufp; |
| 478 | ret = walk_system_ram_res(0, -1, ced, |
| 479 | prepare_elf64_ram_headers_callback); |
| 480 | if (ret < 0) |
| 481 | return ret; |
| 482 | |
| 483 | *addr = buf; |
| 484 | *sz = elf_sz; |
| 485 | return 0; |
| 486 | } |
| 487 | |
| 488 | /* Prepare elf headers. Return addr and size */ |
| 489 | static int prepare_elf_headers(struct kimage *image, void **addr, |
| 490 | unsigned long *sz) |
| 491 | { |
| 492 | struct crash_elf_data *ced; |
| 493 | int ret; |
| 494 | |
| 495 | ced = kzalloc(sizeof(*ced), GFP_KERNEL); |
| 496 | if (!ced) |
| 497 | return -ENOMEM; |
| 498 | |
| 499 | fill_up_crash_elf_data(ced, image); |
| 500 | |
| 501 | /* By default prepare 64bit headers */ |
| 502 | ret = prepare_elf64_headers(ced, addr, sz); |
| 503 | kfree(ced); |
| 504 | return ret; |
| 505 | } |
| 506 | |
Ingo Molnar | 8ec67d9 | 2017-01-27 12:54:38 +0100 | [diff] [blame] | 507 | static int add_e820_entry(struct boot_params *params, struct e820_entry *entry) |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 508 | { |
| 509 | unsigned int nr_e820_entries; |
| 510 | |
| 511 | nr_e820_entries = params->e820_entries; |
Ingo Molnar | 08b46d5 | 2017-01-28 17:29:08 +0100 | [diff] [blame^] | 512 | if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE) |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 513 | return 1; |
| 514 | |
Ingo Molnar | 61a5010 | 2017-01-27 13:54:38 +0100 | [diff] [blame] | 515 | memcpy(¶ms->e820_table[nr_e820_entries], entry, |
Ingo Molnar | 8ec67d9 | 2017-01-27 12:54:38 +0100 | [diff] [blame] | 516 | sizeof(struct e820_entry)); |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 517 | params->e820_entries++; |
| 518 | return 0; |
| 519 | } |
| 520 | |
| 521 | static int memmap_entry_callback(u64 start, u64 end, void *arg) |
| 522 | { |
| 523 | struct crash_memmap_data *cmd = arg; |
| 524 | struct boot_params *params = cmd->params; |
Ingo Molnar | 8ec67d9 | 2017-01-27 12:54:38 +0100 | [diff] [blame] | 525 | struct e820_entry ei; |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 526 | |
| 527 | ei.addr = start; |
| 528 | ei.size = end - start + 1; |
| 529 | ei.type = cmd->type; |
| 530 | add_e820_entry(params, &ei); |
| 531 | |
| 532 | return 0; |
| 533 | } |
| 534 | |
| 535 | static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem, |
| 536 | unsigned long long mstart, |
| 537 | unsigned long long mend) |
| 538 | { |
| 539 | unsigned long start, end; |
| 540 | int ret = 0; |
| 541 | |
| 542 | cmem->ranges[0].start = mstart; |
| 543 | cmem->ranges[0].end = mend; |
| 544 | cmem->nr_ranges = 1; |
| 545 | |
| 546 | /* Exclude Backup region */ |
| 547 | start = image->arch.backup_load_addr; |
| 548 | end = start + image->arch.backup_src_sz - 1; |
| 549 | ret = exclude_mem_range(cmem, start, end); |
| 550 | if (ret) |
| 551 | return ret; |
| 552 | |
| 553 | /* Exclude elf header region */ |
| 554 | start = image->arch.elf_load_addr; |
| 555 | end = start + image->arch.elf_headers_sz - 1; |
| 556 | return exclude_mem_range(cmem, start, end); |
| 557 | } |
| 558 | |
| 559 | /* Prepare memory map for crash dump kernel */ |
| 560 | int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) |
| 561 | { |
| 562 | int i, ret = 0; |
| 563 | unsigned long flags; |
Ingo Molnar | 8ec67d9 | 2017-01-27 12:54:38 +0100 | [diff] [blame] | 564 | struct e820_entry ei; |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 565 | struct crash_memmap_data cmd; |
| 566 | struct crash_mem *cmem; |
| 567 | |
| 568 | cmem = vzalloc(sizeof(struct crash_mem)); |
| 569 | if (!cmem) |
| 570 | return -ENOMEM; |
| 571 | |
| 572 | memset(&cmd, 0, sizeof(struct crash_memmap_data)); |
| 573 | cmd.params = params; |
| 574 | |
| 575 | /* Add first 640K segment */ |
| 576 | ei.addr = image->arch.backup_src_start; |
| 577 | ei.size = image->arch.backup_src_sz; |
Ingo Molnar | 09821ff | 2017-01-28 17:09:33 +0100 | [diff] [blame] | 578 | ei.type = E820_TYPE_RAM; |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 579 | add_e820_entry(params, &ei); |
| 580 | |
| 581 | /* Add ACPI tables */ |
Ingo Molnar | 09821ff | 2017-01-28 17:09:33 +0100 | [diff] [blame] | 582 | cmd.type = E820_TYPE_ACPI; |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 583 | flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
Toshi Kani | f0f4711 | 2016-01-26 21:57:30 +0100 | [diff] [blame] | 584 | walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd, |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 585 | memmap_entry_callback); |
| 586 | |
| 587 | /* Add ACPI Non-volatile Storage */ |
Ingo Molnar | 09821ff | 2017-01-28 17:09:33 +0100 | [diff] [blame] | 588 | cmd.type = E820_TYPE_NVS; |
Toshi Kani | f0f4711 | 2016-01-26 21:57:30 +0100 | [diff] [blame] | 589 | walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd, |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 590 | memmap_entry_callback); |
| 591 | |
| 592 | /* Add crashk_low_res region */ |
| 593 | if (crashk_low_res.end) { |
| 594 | ei.addr = crashk_low_res.start; |
| 595 | ei.size = crashk_low_res.end - crashk_low_res.start + 1; |
Ingo Molnar | 09821ff | 2017-01-28 17:09:33 +0100 | [diff] [blame] | 596 | ei.type = E820_TYPE_RAM; |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 597 | add_e820_entry(params, &ei); |
| 598 | } |
| 599 | |
| 600 | /* Exclude some ranges from crashk_res and add rest to memmap */ |
| 601 | ret = memmap_exclude_ranges(image, cmem, crashk_res.start, |
| 602 | crashk_res.end); |
| 603 | if (ret) |
| 604 | goto out; |
| 605 | |
| 606 | for (i = 0; i < cmem->nr_ranges; i++) { |
| 607 | ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1; |
| 608 | |
| 609 | /* If entry is less than a page, skip it */ |
| 610 | if (ei.size < PAGE_SIZE) |
| 611 | continue; |
| 612 | ei.addr = cmem->ranges[i].start; |
Ingo Molnar | 09821ff | 2017-01-28 17:09:33 +0100 | [diff] [blame] | 613 | ei.type = E820_TYPE_RAM; |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 614 | add_e820_entry(params, &ei); |
| 615 | } |
| 616 | |
| 617 | out: |
| 618 | vfree(cmem); |
| 619 | return ret; |
| 620 | } |
| 621 | |
| 622 | static int determine_backup_region(u64 start, u64 end, void *arg) |
| 623 | { |
| 624 | struct kimage *image = arg; |
| 625 | |
| 626 | image->arch.backup_src_start = start; |
| 627 | image->arch.backup_src_sz = end - start + 1; |
| 628 | |
| 629 | /* Expecting only one range for backup region */ |
| 630 | return 1; |
| 631 | } |
| 632 | |
| 633 | int crash_load_segments(struct kimage *image) |
| 634 | { |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 635 | int ret; |
Thiago Jung Bauermann | ec2b9bf | 2016-11-29 23:45:48 +1100 | [diff] [blame] | 636 | struct kexec_buf kbuf = { .image = image, .buf_min = 0, |
| 637 | .buf_max = ULONG_MAX, .top_down = false }; |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 638 | |
| 639 | /* |
| 640 | * Determine and load a segment for backup area. First 640K RAM |
| 641 | * region is backup source |
| 642 | */ |
| 643 | |
| 644 | ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END, |
| 645 | image, determine_backup_region); |
| 646 | |
| 647 | /* Zero or postive return values are ok */ |
| 648 | if (ret < 0) |
| 649 | return ret; |
| 650 | |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 651 | /* Add backup segment. */ |
Thiago Jung Bauermann | ec2b9bf | 2016-11-29 23:45:48 +1100 | [diff] [blame] | 652 | if (image->arch.backup_src_sz) { |
| 653 | kbuf.buffer = &crash_zero_bytes; |
| 654 | kbuf.bufsz = sizeof(crash_zero_bytes); |
| 655 | kbuf.memsz = image->arch.backup_src_sz; |
| 656 | kbuf.buf_align = PAGE_SIZE; |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 657 | /* |
| 658 | * Ideally there is no source for backup segment. This is |
| 659 | * copied in purgatory after crash. Just add a zero filled |
| 660 | * segment for now to make sure checksum logic works fine. |
| 661 | */ |
Thiago Jung Bauermann | ec2b9bf | 2016-11-29 23:45:48 +1100 | [diff] [blame] | 662 | ret = kexec_add_buffer(&kbuf); |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 663 | if (ret) |
| 664 | return ret; |
Thiago Jung Bauermann | ec2b9bf | 2016-11-29 23:45:48 +1100 | [diff] [blame] | 665 | image->arch.backup_load_addr = kbuf.mem; |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 666 | pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n", |
Thiago Jung Bauermann | ec2b9bf | 2016-11-29 23:45:48 +1100 | [diff] [blame] | 667 | image->arch.backup_load_addr, |
| 668 | image->arch.backup_src_start, kbuf.memsz); |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 669 | } |
| 670 | |
| 671 | /* Prepare elf headers and add a segment */ |
Thiago Jung Bauermann | ec2b9bf | 2016-11-29 23:45:48 +1100 | [diff] [blame] | 672 | ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz); |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 673 | if (ret) |
| 674 | return ret; |
| 675 | |
Thiago Jung Bauermann | ec2b9bf | 2016-11-29 23:45:48 +1100 | [diff] [blame] | 676 | image->arch.elf_headers = kbuf.buffer; |
| 677 | image->arch.elf_headers_sz = kbuf.bufsz; |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 678 | |
Thiago Jung Bauermann | ec2b9bf | 2016-11-29 23:45:48 +1100 | [diff] [blame] | 679 | kbuf.memsz = kbuf.bufsz; |
| 680 | kbuf.buf_align = ELF_CORE_HEADER_ALIGN; |
| 681 | ret = kexec_add_buffer(&kbuf); |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 682 | if (ret) { |
| 683 | vfree((void *)image->arch.elf_headers); |
| 684 | return ret; |
| 685 | } |
Thiago Jung Bauermann | ec2b9bf | 2016-11-29 23:45:48 +1100 | [diff] [blame] | 686 | image->arch.elf_load_addr = kbuf.mem; |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 687 | pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n", |
Thiago Jung Bauermann | ec2b9bf | 2016-11-29 23:45:48 +1100 | [diff] [blame] | 688 | image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz); |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 689 | |
| 690 | return ret; |
| 691 | } |
Vivek Goyal | 74ca317 | 2014-08-29 15:18:46 -0700 | [diff] [blame] | 692 | #endif /* CONFIG_KEXEC_FILE */ |