Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 2 | /* |
Heiko Carstens | a53c8fa | 2012-07-20 11:15:04 +0200 | [diff] [blame] | 3 | * Copyright IBM Corp. 2005, 2011 |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 4 | * |
Heiko Carstens | c6b5b84 | 2006-12-04 15:40:33 +0100 | [diff] [blame] | 5 | * Author(s): Rolf Adelsberger, |
| 6 | * Heiko Carstens <heiko.carstens@de.ibm.com> |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 7 | * Michael Holzheu <holzheu@linux.vnet.ibm.com> |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 8 | */ |
| 9 | |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 10 | #include <linux/device.h> |
| 11 | #include <linux/mm.h> |
| 12 | #include <linux/kexec.h> |
| 13 | #include <linux/delay.h> |
Heiko Carstens | 2b67fc4 | 2007-02-05 21:16:47 +0100 | [diff] [blame] | 14 | #include <linux/reboot.h> |
Heiko Carstens | 6966727 | 2011-03-15 17:08:33 +0100 | [diff] [blame] | 15 | #include <linux/ftrace.h> |
Michael Holzheu | 3ab121a | 2012-03-11 11:59:32 -0400 | [diff] [blame] | 16 | #include <linux/debug_locks.h> |
Michael Holzheu | b66ac63 | 2013-04-04 19:49:53 +0200 | [diff] [blame] | 17 | #include <linux/suspend.h> |
Heiko Carstens | a386fba | 2006-02-11 17:56:01 -0800 | [diff] [blame] | 18 | #include <asm/cio.h> |
| 19 | #include <asm/setup.h> |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 20 | #include <asm/pgtable.h> |
| 21 | #include <asm/pgalloc.h> |
Heiko Carstens | a386fba | 2006-02-11 17:56:01 -0800 | [diff] [blame] | 22 | #include <asm/smp.h> |
Heiko Carstens | 9c9c176 | 2007-03-05 23:35:45 +0100 | [diff] [blame] | 23 | #include <asm/ipl.h> |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 24 | #include <asm/diag.h> |
Heiko Carstens | 6b563d8 | 2012-09-14 14:11:32 +0200 | [diff] [blame] | 25 | #include <asm/elf.h> |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 26 | #include <asm/asm-offsets.h> |
Heiko Carstens | 4e042af | 2016-05-31 09:14:00 +0200 | [diff] [blame] | 27 | #include <asm/cacheflush.h> |
Michael Holzheu | a9fbf1a | 2012-05-18 15:10:05 +0200 | [diff] [blame] | 28 | #include <asm/os_info.h> |
Laura Abbott | e6c7c63 | 2017-05-08 15:58:08 -0700 | [diff] [blame] | 29 | #include <asm/set_memory.h> |
Martin Schwidefsky | 78c98f9 | 2019-01-28 08:33:08 +0100 | [diff] [blame] | 30 | #include <asm/stacktrace.h> |
Michael Holzheu | a62bc07 | 2014-10-06 17:57:43 +0200 | [diff] [blame] | 31 | #include <asm/switch_to.h> |
Martin Schwidefsky | 916cda1 | 2016-01-26 14:10:34 +0100 | [diff] [blame] | 32 | #include <asm/nmi.h> |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 33 | |
Heiko Carstens | c6b5b84 | 2006-12-04 15:40:33 +0100 | [diff] [blame] | 34 | typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 35 | |
Tobias Klauser | 2efe55a | 2006-06-26 18:57:34 +0200 | [diff] [blame] | 36 | extern const unsigned char relocate_kernel[]; |
| 37 | extern const unsigned long long relocate_kernel_len; |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 38 | |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 39 | #ifdef CONFIG_CRASH_DUMP |
| 40 | |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 41 | /* |
Michael Holzheu | b66ac63 | 2013-04-04 19:49:53 +0200 | [diff] [blame] | 42 | * PM notifier callback for kdump |
| 43 | */ |
| 44 | static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action, |
| 45 | void *ptr) |
| 46 | { |
| 47 | switch (action) { |
| 48 | case PM_SUSPEND_PREPARE: |
| 49 | case PM_HIBERNATION_PREPARE: |
Xunlei Pang | 7a0058e | 2016-05-23 16:24:22 -0700 | [diff] [blame] | 50 | if (kexec_crash_image) |
| 51 | arch_kexec_unprotect_crashkres(); |
Michael Holzheu | b66ac63 | 2013-04-04 19:49:53 +0200 | [diff] [blame] | 52 | break; |
| 53 | case PM_POST_SUSPEND: |
| 54 | case PM_POST_HIBERNATION: |
Xunlei Pang | 7a0058e | 2016-05-23 16:24:22 -0700 | [diff] [blame] | 55 | if (kexec_crash_image) |
| 56 | arch_kexec_protect_crashkres(); |
Michael Holzheu | b66ac63 | 2013-04-04 19:49:53 +0200 | [diff] [blame] | 57 | break; |
| 58 | default: |
| 59 | return NOTIFY_DONE; |
| 60 | } |
| 61 | return NOTIFY_OK; |
| 62 | } |
| 63 | |
| 64 | static int __init machine_kdump_pm_init(void) |
| 65 | { |
| 66 | pm_notifier(machine_kdump_pm_cb, 0); |
| 67 | return 0; |
| 68 | } |
| 69 | arch_initcall(machine_kdump_pm_init); |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 70 | |
| 71 | /* |
Martin Schwidefsky | 1a36a39 | 2015-10-29 10:28:26 +0100 | [diff] [blame] | 72 | * Reset the system, copy boot CPU registers to absolute zero, |
| 73 | * and jump to the kdump image |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 74 | */ |
| 75 | static void __do_machine_kdump(void *image) |
| 76 | { |
Martin Schwidefsky | 1a36a39 | 2015-10-29 10:28:26 +0100 | [diff] [blame] | 77 | int (*start_kdump)(int); |
| 78 | unsigned long prefix; |
| 79 | |
| 80 | /* store_status() saved the prefix register to lowcore */ |
| 81 | prefix = (unsigned long) S390_lowcore.prefixreg_save_area; |
| 82 | |
| 83 | /* Now do the reset */ |
| 84 | s390_reset_system(); |
| 85 | |
| 86 | /* |
| 87 | * Copy dump CPU store status info to absolute zero. |
| 88 | * This need to be done *after* s390_reset_system set the |
| 89 | * prefix register of this CPU to zero |
| 90 | */ |
| 91 | memcpy((void *) __LC_FPREGS_SAVE_AREA, |
| 92 | (void *)(prefix + __LC_FPREGS_SAVE_AREA), 512); |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 93 | |
Michael Holzheu | fa7c004 | 2012-05-21 11:30:30 +0200 | [diff] [blame] | 94 | __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA); |
Martin Schwidefsky | 1a36a39 | 2015-10-29 10:28:26 +0100 | [diff] [blame] | 95 | start_kdump = (void *)((struct kimage *) image)->start; |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 96 | start_kdump(1); |
Martin Schwidefsky | 1a36a39 | 2015-10-29 10:28:26 +0100 | [diff] [blame] | 97 | |
| 98 | /* Die if start_kdump returns */ |
Martin Schwidefsky | 98587c2 | 2019-04-30 12:33:45 +0200 | [diff] [blame] | 99 | disabled_wait(); |
Martin Schwidefsky | 1a36a39 | 2015-10-29 10:28:26 +0100 | [diff] [blame] | 100 | } |
| 101 | |
| 102 | /* |
| 103 | * Start kdump: create a LGR log entry, store status of all CPUs and |
| 104 | * branch to __do_machine_kdump. |
| 105 | */ |
| 106 | static noinline void __machine_kdump(void *image) |
| 107 | { |
Martin Schwidefsky | 916cda1 | 2016-01-26 14:10:34 +0100 | [diff] [blame] | 108 | struct mcesa *mcesa; |
Martin Schwidefsky | ad3bc0a | 2017-10-12 13:24:45 +0200 | [diff] [blame] | 109 | union ctlreg2 cr2_old, cr2_new; |
Martin Schwidefsky | 1a36a39 | 2015-10-29 10:28:26 +0100 | [diff] [blame] | 110 | int this_cpu, cpu; |
| 111 | |
| 112 | lgr_info_log(); |
| 113 | /* Get status of the other CPUs */ |
| 114 | this_cpu = smp_find_processor_id(stap()); |
| 115 | for_each_online_cpu(cpu) { |
| 116 | if (cpu == this_cpu) |
| 117 | continue; |
| 118 | if (smp_store_status(cpu)) |
| 119 | continue; |
| 120 | } |
| 121 | /* Store status of the boot CPU */ |
Martin Schwidefsky | 916cda1 | 2016-01-26 14:10:34 +0100 | [diff] [blame] | 122 | mcesa = (struct mcesa *)(S390_lowcore.mcesad & MCESA_ORIGIN_MASK); |
Martin Schwidefsky | 1a36a39 | 2015-10-29 10:28:26 +0100 | [diff] [blame] | 123 | if (MACHINE_HAS_VX) |
Martin Schwidefsky | 916cda1 | 2016-01-26 14:10:34 +0100 | [diff] [blame] | 124 | save_vx_regs((__vector128 *) mcesa->vector_save_area); |
| 125 | if (MACHINE_HAS_GS) { |
Martin Schwidefsky | ad3bc0a | 2017-10-12 13:24:45 +0200 | [diff] [blame] | 126 | __ctl_store(cr2_old.val, 2, 2); |
| 127 | cr2_new = cr2_old; |
| 128 | cr2_new.gse = 1; |
| 129 | __ctl_load(cr2_new.val, 2, 2); |
Martin Schwidefsky | 916cda1 | 2016-01-26 14:10:34 +0100 | [diff] [blame] | 130 | save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area); |
Martin Schwidefsky | ad3bc0a | 2017-10-12 13:24:45 +0200 | [diff] [blame] | 131 | __ctl_load(cr2_old.val, 2, 2); |
Martin Schwidefsky | 916cda1 | 2016-01-26 14:10:34 +0100 | [diff] [blame] | 132 | } |
Martin Schwidefsky | 1a36a39 | 2015-10-29 10:28:26 +0100 | [diff] [blame] | 133 | /* |
| 134 | * To create a good backchain for this CPU in the dump store_status |
| 135 | * is passed the address of a function. The address is saved into |
| 136 | * the PSW save area of the boot CPU and the function is invoked as |
| 137 | * a tail call of store_status. The backchain in the dump will look |
| 138 | * like this: |
| 139 | * restart_int_handler -> __machine_kexec -> __do_machine_kdump |
| 140 | * The call to store_status() will not return. |
| 141 | */ |
| 142 | store_status(__do_machine_kdump, image); |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 143 | } |
| 144 | |
Martin Schwidefsky | ce3dc44 | 2017-09-12 16:37:33 +0200 | [diff] [blame] | 145 | static unsigned long do_start_kdump(unsigned long addr) |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 146 | { |
Martin Schwidefsky | ce3dc44 | 2017-09-12 16:37:33 +0200 | [diff] [blame] | 147 | struct kimage *image = (struct kimage *) addr; |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 148 | int (*start_kdump)(int) = (void *)image->start; |
| 149 | int rc; |
| 150 | |
| 151 | __arch_local_irq_stnsm(0xfb); /* disable DAT */ |
| 152 | rc = start_kdump(0); |
| 153 | __arch_local_irq_stosm(0x04); /* enable DAT */ |
Martin Schwidefsky | ce3dc44 | 2017-09-12 16:37:33 +0200 | [diff] [blame] | 154 | return rc; |
| 155 | } |
| 156 | |
Heiko Carstens | 2980ba6 | 2019-06-10 17:22:38 +0200 | [diff] [blame] | 157 | #endif /* CONFIG_CRASH_DUMP */ |
| 158 | |
Martin Schwidefsky | ce3dc44 | 2017-09-12 16:37:33 +0200 | [diff] [blame] | 159 | /* |
| 160 | * Check if kdump checksums are valid: We call purgatory with parameter "0" |
| 161 | */ |
| 162 | static bool kdump_csum_valid(struct kimage *image) |
| 163 | { |
| 164 | #ifdef CONFIG_CRASH_DUMP |
| 165 | int rc; |
| 166 | |
Vasily Gorbik | 7f28dad | 2019-11-22 12:19:16 +0100 | [diff] [blame^] | 167 | preempt_disable(); |
Martin Schwidefsky | ce3dc44 | 2017-09-12 16:37:33 +0200 | [diff] [blame] | 168 | rc = CALL_ON_STACK(do_start_kdump, S390_lowcore.nodat_stack, 1, image); |
Vasily Gorbik | 7f28dad | 2019-11-22 12:19:16 +0100 | [diff] [blame^] | 169 | preempt_enable(); |
Philipp Rudo | 7c3eaaa | 2017-10-17 12:28:08 +0200 | [diff] [blame] | 170 | return rc == 0; |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 171 | #else |
Philipp Rudo | 7c3eaaa | 2017-10-17 12:28:08 +0200 | [diff] [blame] | 172 | return false; |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 173 | #endif |
| 174 | } |
| 175 | |
Xunlei Pang | 7a0058e | 2016-05-23 16:24:22 -0700 | [diff] [blame] | 176 | #ifdef CONFIG_CRASH_DUMP |
| 177 | |
Heiko Carstens | 2d0af22 | 2016-05-31 09:13:59 +0200 | [diff] [blame] | 178 | void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) |
| 179 | { |
| 180 | unsigned long addr, size; |
| 181 | |
| 182 | for (addr = begin; addr < end; addr += PAGE_SIZE) |
| 183 | free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT)); |
| 184 | size = begin - crashk_res.start; |
| 185 | if (size) |
| 186 | os_info_crashkernel_add(crashk_res.start, size); |
| 187 | else |
| 188 | os_info_crashkernel_add(0, 0); |
| 189 | } |
| 190 | |
Heiko Carstens | 4e042af | 2016-05-31 09:14:00 +0200 | [diff] [blame] | 191 | static void crash_protect_pages(int protect) |
Michael Holzheu | dab7a7b | 2011-10-30 15:16:44 +0100 | [diff] [blame] | 192 | { |
Heiko Carstens | 4e042af | 2016-05-31 09:14:00 +0200 | [diff] [blame] | 193 | unsigned long size; |
Michael Holzheu | dab7a7b | 2011-10-30 15:16:44 +0100 | [diff] [blame] | 194 | |
Heiko Carstens | 4e042af | 2016-05-31 09:14:00 +0200 | [diff] [blame] | 195 | if (!crashk_res.end) |
| 196 | return; |
| 197 | size = resource_size(&crashk_res); |
| 198 | if (protect) |
| 199 | set_memory_ro(crashk_res.start, size >> PAGE_SHIFT); |
Heiko Carstens | 2d0af22 | 2016-05-31 09:13:59 +0200 | [diff] [blame] | 200 | else |
Heiko Carstens | 4e042af | 2016-05-31 09:14:00 +0200 | [diff] [blame] | 201 | set_memory_rw(crashk_res.start, size >> PAGE_SHIFT); |
Michael Holzheu | dab7a7b | 2011-10-30 15:16:44 +0100 | [diff] [blame] | 202 | } |
| 203 | |
Xunlei Pang | 7a0058e | 2016-05-23 16:24:22 -0700 | [diff] [blame] | 204 | void arch_kexec_protect_crashkres(void) |
Michael Holzheu | dab7a7b | 2011-10-30 15:16:44 +0100 | [diff] [blame] | 205 | { |
Heiko Carstens | 4e042af | 2016-05-31 09:14:00 +0200 | [diff] [blame] | 206 | crash_protect_pages(1); |
Michael Holzheu | dab7a7b | 2011-10-30 15:16:44 +0100 | [diff] [blame] | 207 | } |
| 208 | |
Xunlei Pang | 7a0058e | 2016-05-23 16:24:22 -0700 | [diff] [blame] | 209 | void arch_kexec_unprotect_crashkres(void) |
Michael Holzheu | dab7a7b | 2011-10-30 15:16:44 +0100 | [diff] [blame] | 210 | { |
Heiko Carstens | 4e042af | 2016-05-31 09:14:00 +0200 | [diff] [blame] | 211 | crash_protect_pages(0); |
Michael Holzheu | dab7a7b | 2011-10-30 15:16:44 +0100 | [diff] [blame] | 212 | } |
| 213 | |
Xunlei Pang | 7a0058e | 2016-05-23 16:24:22 -0700 | [diff] [blame] | 214 | #endif |
| 215 | |
Michael Holzheu | dab7a7b | 2011-10-30 15:16:44 +0100 | [diff] [blame] | 216 | /* |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 217 | * Give back memory to hypervisor before new kdump is loaded |
| 218 | */ |
| 219 | static int machine_kexec_prepare_kdump(void) |
| 220 | { |
| 221 | #ifdef CONFIG_CRASH_DUMP |
| 222 | if (MACHINE_IS_VM) |
| 223 | diag10_range(PFN_DOWN(crashk_res.start), |
| 224 | PFN_DOWN(crashk_res.end - crashk_res.start + 1)); |
| 225 | return 0; |
| 226 | #else |
| 227 | return -EINVAL; |
| 228 | #endif |
| 229 | } |
| 230 | |
Heiko Carstens | c6b5b84 | 2006-12-04 15:40:33 +0100 | [diff] [blame] | 231 | int machine_kexec_prepare(struct kimage *image) |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 232 | { |
Heiko Carstens | c6b5b84 | 2006-12-04 15:40:33 +0100 | [diff] [blame] | 233 | void *reboot_code_buffer; |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 234 | |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 235 | if (image->type == KEXEC_TYPE_CRASH) |
| 236 | return machine_kexec_prepare_kdump(); |
| 237 | |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 238 | /* We don't support anything but the default image type for now. */ |
| 239 | if (image->type != KEXEC_TYPE_DEFAULT) |
| 240 | return -EINVAL; |
| 241 | |
| 242 | /* Get the destination where the assembler code should be copied to.*/ |
Heiko Carstens | c6b5b84 | 2006-12-04 15:40:33 +0100 | [diff] [blame] | 243 | reboot_code_buffer = (void *) page_to_phys(image->control_code_page); |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 244 | |
| 245 | /* Then copy it */ |
Heiko Carstens | c6b5b84 | 2006-12-04 15:40:33 +0100 | [diff] [blame] | 246 | memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len); |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 247 | return 0; |
| 248 | } |
| 249 | |
Heiko Carstens | c6b5b84 | 2006-12-04 15:40:33 +0100 | [diff] [blame] | 250 | void machine_kexec_cleanup(struct kimage *image) |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 251 | { |
| 252 | } |
| 253 | |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 254 | void arch_crash_save_vmcoreinfo(void) |
| 255 | { |
| 256 | VMCOREINFO_SYMBOL(lowcore_ptr); |
Michael Holzheu | 7fe7a18 | 2011-12-27 11:27:24 +0100 | [diff] [blame] | 257 | VMCOREINFO_SYMBOL(high_memory); |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 258 | VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); |
Xunlei Pang | 203e9e4 | 2017-07-12 14:33:14 -0700 | [diff] [blame] | 259 | mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); |
Gerald Schaefer | a80313f | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 260 | vmcoreinfo_append_str("SDMA=%lx\n", __sdma); |
| 261 | vmcoreinfo_append_str("EDMA=%lx\n", __edma); |
Gerald Schaefer | b2d24b9 | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 262 | vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 263 | } |
| 264 | |
Heiko Carstens | c6b5b84 | 2006-12-04 15:40:33 +0100 | [diff] [blame] | 265 | void machine_shutdown(void) |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 266 | { |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 267 | } |
| 268 | |
Heiko Carstens | 48a8ca0 | 2012-08-27 15:50:29 +0200 | [diff] [blame] | 269 | void machine_crash_shutdown(struct pt_regs *regs) |
| 270 | { |
Vasily Gorbik | 3b96784 | 2018-04-04 14:42:41 +0200 | [diff] [blame] | 271 | set_os_info_reipl_block(); |
Heiko Carstens | 48a8ca0 | 2012-08-27 15:50:29 +0200 | [diff] [blame] | 272 | } |
| 273 | |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 274 | /* |
| 275 | * Do normal kexec |
| 276 | */ |
| 277 | static void __do_machine_kexec(void *data) |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 278 | { |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 279 | relocate_kernel_t data_mover; |
Heiko Carstens | 2c2df11 | 2010-02-26 22:37:34 +0100 | [diff] [blame] | 280 | struct kimage *image = data; |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 281 | |
Martin Schwidefsky | 1a36a39 | 2015-10-29 10:28:26 +0100 | [diff] [blame] | 282 | s390_reset_system(); |
Heiko Carstens | c6b5b84 | 2006-12-04 15:40:33 +0100 | [diff] [blame] | 283 | data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 284 | |
Heiko Carstens | d0e810e | 2017-11-09 23:00:14 +0100 | [diff] [blame] | 285 | __arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */ |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 286 | /* Call the moving routine */ |
Heiko Carstens | c6b5b84 | 2006-12-04 15:40:33 +0100 | [diff] [blame] | 287 | (*data_mover)(&image->head, image->start); |
Martin Schwidefsky | 1a36a39 | 2015-10-29 10:28:26 +0100 | [diff] [blame] | 288 | |
| 289 | /* Die if kexec returns */ |
Martin Schwidefsky | 98587c2 | 2019-04-30 12:33:45 +0200 | [diff] [blame] | 290 | disabled_wait(); |
Heiko Carstens | cf13f0e | 2005-06-25 14:58:11 -0700 | [diff] [blame] | 291 | } |
Heiko Carstens | 2c2df11 | 2010-02-26 22:37:34 +0100 | [diff] [blame] | 292 | |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 293 | /* |
| 294 | * Reset system and call either kdump or normal kexec |
| 295 | */ |
| 296 | static void __machine_kexec(void *data) |
| 297 | { |
Michael Holzheu | fa7c004 | 2012-05-21 11:30:30 +0200 | [diff] [blame] | 298 | __arch_local_irq_stosm(0x04); /* enable DAT */ |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 299 | pfault_fini(); |
Michael Holzheu | 3ab121a | 2012-03-11 11:59:32 -0400 | [diff] [blame] | 300 | tracing_off(); |
| 301 | debug_locks_off(); |
Martin Schwidefsky | 10ad34b | 2015-01-14 17:52:10 +0100 | [diff] [blame] | 302 | #ifdef CONFIG_CRASH_DUMP |
Martin Schwidefsky | 1a36a39 | 2015-10-29 10:28:26 +0100 | [diff] [blame] | 303 | if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH) |
| 304 | __machine_kdump(data); |
Martin Schwidefsky | 10ad34b | 2015-01-14 17:52:10 +0100 | [diff] [blame] | 305 | #endif |
Martin Schwidefsky | 1a36a39 | 2015-10-29 10:28:26 +0100 | [diff] [blame] | 306 | __do_machine_kexec(data); |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 307 | } |
| 308 | |
| 309 | /* |
| 310 | * Do either kdump or normal kexec. In case of kdump we first ask |
| 311 | * purgatory, if kdump checksums are valid. |
| 312 | */ |
Heiko Carstens | 2c2df11 | 2010-02-26 22:37:34 +0100 | [diff] [blame] | 313 | void machine_kexec(struct kimage *image) |
| 314 | { |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 315 | if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image)) |
| 316 | return; |
Heiko Carstens | 6966727 | 2011-03-15 17:08:33 +0100 | [diff] [blame] | 317 | tracer_disable(); |
Heiko Carstens | 2c2df11 | 2010-02-26 22:37:34 +0100 | [diff] [blame] | 318 | smp_send_stop(); |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 319 | smp_call_ipl_cpu(__machine_kexec, image); |
Heiko Carstens | 2c2df11 | 2010-02-26 22:37:34 +0100 | [diff] [blame] | 320 | } |