blob: 1038494135c8cef847829ebcf43b6fa58d596770 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Geoff Levandd28f6df2016-06-23 17:54:48 +00002/*
3 * kexec for arm64
4 *
5 * Copyright (C) Linaro.
6 * Copyright (C) Huawei Futurewei Technologies.
Geoff Levandd28f6df2016-06-23 17:54:48 +00007 */
8
AKASHI Takahiro78fd5842017-04-03 11:24:36 +09009#include <linux/interrupt.h>
10#include <linux/irq.h>
11#include <linux/kernel.h>
Geoff Levandd28f6df2016-06-23 17:54:48 +000012#include <linux/kexec.h>
AKASHI Takahiro254a41c2017-04-03 11:24:35 +090013#include <linux/page-flags.h>
Mike Rapoport6d47c232021-07-07 18:07:59 -070014#include <linux/set_memory.h>
Geoff Levandd28f6df2016-06-23 17:54:48 +000015#include <linux/smp.h>
16
17#include <asm/cacheflush.h>
18#include <asm/cpu_ops.h>
James Morse0fbeb312017-11-02 12:12:34 +000019#include <asm/daifflags.h>
AKASHI Takahiro20a16622017-04-03 11:24:37 +090020#include <asm/memory.h>
Takahiro Akashi98d2e152017-04-03 11:24:34 +090021#include <asm/mmu.h>
Geoff Levandd28f6df2016-06-23 17:54:48 +000022#include <asm/mmu_context.h>
Takahiro Akashi98d2e152017-04-03 11:24:34 +090023#include <asm/page.h>
Pasha Tatashin19a046f2021-09-30 14:31:08 +000024#include <asm/sections.h>
Pasha Tatashin08eae0e2021-09-30 14:31:06 +000025#include <asm/trans_pgd.h>
Geoff Levandd28f6df2016-06-23 17:54:48 +000026
Geoff Levand221f2c72016-06-23 17:54:48 +000027/**
28 * kexec_image_info - For debugging output.
29 */
30#define kexec_image_info(_i) _kexec_image_info(__func__, __LINE__, _i)
31static void _kexec_image_info(const char *func, int line,
32 const struct kimage *kimage)
33{
34 unsigned long i;
35
36 pr_debug("%s:%d:\n", func, line);
37 pr_debug(" kexec kimage info:\n");
38 pr_debug(" type: %d\n", kimage->type);
39 pr_debug(" start: %lx\n", kimage->start);
40 pr_debug(" head: %lx\n", kimage->head);
41 pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
Pasha Tatashin08eae0e2021-09-30 14:31:06 +000042 pr_debug(" dtb_mem: %pa\n", &kimage->arch.dtb_mem);
Pavel Tatashin4c3c3122021-01-25 14:19:14 -050043 pr_debug(" kern_reloc: %pa\n", &kimage->arch.kern_reloc);
Pasha Tatashin08eae0e2021-09-30 14:31:06 +000044 pr_debug(" el2_vectors: %pa\n", &kimage->arch.el2_vectors);
Geoff Levand221f2c72016-06-23 17:54:48 +000045
46 for (i = 0; i < kimage->nr_segments; i++) {
47 pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
48 i,
49 kimage->segment[i].mem,
50 kimage->segment[i].mem + kimage->segment[i].memsz,
51 kimage->segment[i].memsz,
52 kimage->segment[i].memsz / PAGE_SIZE);
53 }
54}
55
Geoff Levandd28f6df2016-06-23 17:54:48 +000056void machine_kexec_cleanup(struct kimage *kimage)
57{
58 /* Empty routine needed to avoid build errors. */
59}
60
61/**
62 * machine_kexec_prepare - Prepare for a kexec reboot.
63 *
64 * Called from the core kexec code when a kernel image is loaded.
65 * Forbid loading a kexec kernel if we have no way of hotplugging cpus or cpus
66 * are stuck in the kernel. This avoids a panic once we hit machine_kexec().
67 */
68int machine_kexec_prepare(struct kimage *kimage)
69{
Geoff Levandd28f6df2016-06-23 17:54:48 +000070 if (kimage->type != KEXEC_TYPE_CRASH && cpus_are_stuck_in_kernel()) {
71 pr_err("Can't kexec: CPUs are stuck in the kernel.\n");
72 return -EBUSY;
73 }
74
75 return 0;
76}
77
78/**
Geoff Levandd28f6df2016-06-23 17:54:48 +000079 * kexec_segment_flush - Helper to flush the kimage segments to PoC.
80 */
81static void kexec_segment_flush(const struct kimage *kimage)
82{
83 unsigned long i;
84
85 pr_debug("%s:\n", __func__);
86
87 for (i = 0; i < kimage->nr_segments; i++) {
88 pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
89 i,
90 kimage->segment[i].mem,
91 kimage->segment[i].mem + kimage->segment[i].memsz,
92 kimage->segment[i].memsz,
93 kimage->segment[i].memsz / PAGE_SIZE);
94
Fuad Tabbafade9c22021-05-24 09:30:01 +010095 dcache_clean_inval_poc(
Fuad Tabba814b1862021-05-24 09:29:55 +010096 (unsigned long)phys_to_virt(kimage->segment[i].mem),
97 (unsigned long)phys_to_virt(kimage->segment[i].mem) +
98 kimage->segment[i].memsz);
Geoff Levandd28f6df2016-06-23 17:54:48 +000099 }
100}
101
Pasha Tatashin08eae0e2021-09-30 14:31:06 +0000102/* Allocates pages for kexec page table */
103static void *kexec_page_alloc(void *arg)
104{
105 struct kimage *kimage = (struct kimage *)arg;
106 struct page *page = kimage_alloc_control_pages(kimage, 0);
107
108 if (!page)
109 return NULL;
110
111 memset(page_address(page), 0, PAGE_SIZE);
112
113 return page_address(page);
114}
115
Pasha Tatashin0d8732e2021-09-30 14:31:02 +0000116int machine_kexec_post_load(struct kimage *kimage)
117{
Pasha Tatashin3744b522021-09-30 14:31:09 +0000118 int rc;
119 pgd_t *trans_pgd;
Pasha Tatashin0d8732e2021-09-30 14:31:02 +0000120 void *reloc_code = page_to_virt(kimage->control_code_page);
Pasha Tatashin19a046f2021-09-30 14:31:08 +0000121 long reloc_size;
Pasha Tatashin08eae0e2021-09-30 14:31:06 +0000122 struct trans_pgd_info info = {
123 .trans_alloc_page = kexec_page_alloc,
124 .trans_alloc_arg = kimage,
125 };
Pasha Tatashin0d8732e2021-09-30 14:31:02 +0000126
Pasha Tatashin5bb68342021-09-30 14:31:03 +0000127 /* If in place, relocation is not used, only flush next kernel */
128 if (kimage->head & IND_DONE) {
Pasha Tatashin0d8732e2021-09-30 14:31:02 +0000129 kexec_segment_flush(kimage);
Pasha Tatashin5bb68342021-09-30 14:31:03 +0000130 kexec_image_info(kimage);
131 return 0;
132 }
Pasha Tatashin0d8732e2021-09-30 14:31:02 +0000133
Pasha Tatashin08eae0e2021-09-30 14:31:06 +0000134 kimage->arch.el2_vectors = 0;
135 if (is_hyp_nvhe()) {
Pasha Tatashin3744b522021-09-30 14:31:09 +0000136 rc = trans_pgd_copy_el2_vectors(&info,
137 &kimage->arch.el2_vectors);
Pasha Tatashin08eae0e2021-09-30 14:31:06 +0000138 if (rc)
139 return rc;
140 }
141
Pasha Tatashin3744b522021-09-30 14:31:09 +0000142 /* Create a copy of the linear map */
143 trans_pgd = kexec_page_alloc(kimage);
144 if (!trans_pgd)
145 return -ENOMEM;
146 rc = trans_pgd_create_copy(&info, &trans_pgd, PAGE_OFFSET, PAGE_END);
147 if (rc)
148 return rc;
149 kimage->arch.ttbr1 = __pa(trans_pgd);
150 kimage->arch.zero_page = __pa(empty_zero_page);
151
Pasha Tatashin19a046f2021-09-30 14:31:08 +0000152 reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
153 memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
Pasha Tatashin0d8732e2021-09-30 14:31:02 +0000154 kimage->arch.kern_reloc = __pa(reloc_code);
Pasha Tatashinefc2d0f2021-09-30 14:31:10 +0000155 rc = trans_pgd_idmap_page(&info, &kimage->arch.ttbr0,
156 &kimage->arch.t0sz, reloc_code);
157 if (rc)
158 return rc;
159 kimage->arch.phys_offset = virt_to_phys(kimage) - (long)kimage;
Pasha Tatashin0d8732e2021-09-30 14:31:02 +0000160
161 /* Flush the reloc_code in preparation for its execution. */
162 dcache_clean_inval_poc((unsigned long)reloc_code,
Pasha Tatashin19a046f2021-09-30 14:31:08 +0000163 (unsigned long)reloc_code + reloc_size);
Pasha Tatashin0d8732e2021-09-30 14:31:02 +0000164 icache_inval_pou((uintptr_t)reloc_code,
Pasha Tatashin19a046f2021-09-30 14:31:08 +0000165 (uintptr_t)reloc_code + reloc_size);
Pasha Tatashin5bb68342021-09-30 14:31:03 +0000166 kexec_image_info(kimage);
Pasha Tatashin0d8732e2021-09-30 14:31:02 +0000167
168 return 0;
169}
170
Geoff Levandd28f6df2016-06-23 17:54:48 +0000171/**
172 * machine_kexec - Do the kexec reboot.
173 *
174 * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC.
175 */
176void machine_kexec(struct kimage *kimage)
177{
AKASHI Takahiro78fd5842017-04-03 11:24:36 +0900178 bool in_kexec_crash = (kimage == kexec_crash_image);
179 bool stuck_cpus = cpus_are_stuck_in_kernel();
Geoff Levandd28f6df2016-06-23 17:54:48 +0000180
181 /*
182 * New cpus may have become stuck_in_kernel after we loaded the image.
183 */
AKASHI Takahiro78fd5842017-04-03 11:24:36 +0900184 BUG_ON(!in_kexec_crash && (stuck_cpus || (num_online_cpus() > 1)));
185 WARN(in_kexec_crash && (stuck_cpus || smp_crash_stop_failed()),
186 "Some CPUs may be stale, kdump will be unreliable.\n");
Geoff Levandd28f6df2016-06-23 17:54:48 +0000187
Geoff Levandd28f6df2016-06-23 17:54:48 +0000188 pr_info("Bye!\n");
189
James Morse0fbeb312017-11-02 12:12:34 +0000190 local_daif_mask();
Geoff Levandd28f6df2016-06-23 17:54:48 +0000191
192 /*
Pasha Tatashinefc2d0f2021-09-30 14:31:10 +0000193 * Both restart and kernel_reloc will shutdown the MMU, disable data
Pasha Tatashin5bb68342021-09-30 14:31:03 +0000194 * caches. However, restart will start new kernel or purgatory directly,
Pasha Tatashinefc2d0f2021-09-30 14:31:10 +0000195 * kernel_reloc contains the body of arm64_relocate_new_kernel
AKASHI Takahiro4c9e7e62018-11-15 14:52:52 +0900196 * In kexec case, kimage->start points to purgatory assuming that
197 * kernel entry and dtb address are embedded in purgatory by
198 * userspace (kexec-tools).
199 * In kexec_file case, the kernel starts directly without purgatory.
Geoff Levandd28f6df2016-06-23 17:54:48 +0000200 */
Pasha Tatashin5bb68342021-09-30 14:31:03 +0000201 if (kimage->head & IND_DONE) {
Pasha Tatashin7a2512f2021-09-30 14:31:12 +0000202 typeof(cpu_soft_restart) *restart;
Pasha Tatashin5bb68342021-09-30 14:31:03 +0000203
204 cpu_install_idmap();
Pasha Tatashin7a2512f2021-09-30 14:31:12 +0000205 restart = (void *)__pa_symbol(function_nocfi(cpu_soft_restart));
Pasha Tatashin5bb68342021-09-30 14:31:03 +0000206 restart(is_hyp_nvhe(), kimage->start, kimage->arch.dtb_mem,
207 0, 0);
208 } else {
Pasha Tatashinefc2d0f2021-09-30 14:31:10 +0000209 void (*kernel_reloc)(struct kimage *kimage);
210
Pasha Tatashin08eae0e2021-09-30 14:31:06 +0000211 if (is_hyp_nvhe())
212 __hyp_set_vectors(kimage->arch.el2_vectors);
Pasha Tatashinefc2d0f2021-09-30 14:31:10 +0000213 cpu_install_ttbr0(kimage->arch.ttbr0, kimage->arch.t0sz);
214 kernel_reloc = (void *)kimage->arch.kern_reloc;
215 kernel_reloc(kimage);
Pasha Tatashin5bb68342021-09-30 14:31:03 +0000216 }
Geoff Levandd28f6df2016-06-23 17:54:48 +0000217
218 BUG(); /* Should never get here. */
219}
220
AKASHI Takahiro78fd5842017-04-03 11:24:36 +0900221static void machine_kexec_mask_interrupts(void)
222{
223 unsigned int i;
224 struct irq_desc *desc;
225
226 for_each_irq_desc(i, desc) {
227 struct irq_chip *chip;
228 int ret;
229
230 chip = irq_desc_get_chip(desc);
231 if (!chip)
232 continue;
233
234 /*
235 * First try to remove the active state. If this
236 * fails, try to EOI the interrupt.
237 */
238 ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
239
240 if (ret && irqd_irq_inprogress(&desc->irq_data) &&
241 chip->irq_eoi)
242 chip->irq_eoi(&desc->irq_data);
243
244 if (chip->irq_mask)
245 chip->irq_mask(&desc->irq_data);
246
247 if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
248 chip->irq_disable(&desc->irq_data);
249 }
250}
251
252/**
253 * machine_crash_shutdown - shutdown non-crashing cpus and save registers
254 */
Geoff Levandd28f6df2016-06-23 17:54:48 +0000255void machine_crash_shutdown(struct pt_regs *regs)
256{
AKASHI Takahiro78fd5842017-04-03 11:24:36 +0900257 local_irq_disable();
258
259 /* shutdown non-crashing cpus */
Hoeun Ryua88ce632017-08-17 11:24:27 +0900260 crash_smp_send_stop();
AKASHI Takahiro78fd5842017-04-03 11:24:36 +0900261
262 /* for crashing cpu */
263 crash_save_cpu(regs, smp_processor_id());
264 machine_kexec_mask_interrupts();
265
266 pr_info("Starting crashdump kernel...\n");
Geoff Levandd28f6df2016-06-23 17:54:48 +0000267}
Takahiro Akashi98d2e152017-04-03 11:24:34 +0900268
269void arch_kexec_protect_crashkres(void)
270{
271 int i;
272
Takahiro Akashi98d2e152017-04-03 11:24:34 +0900273 for (i = 0; i < kexec_crash_image->nr_segments; i++)
274 set_memory_valid(
275 __phys_to_virt(kexec_crash_image->segment[i].mem),
276 kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 0);
277}
278
279void arch_kexec_unprotect_crashkres(void)
280{
281 int i;
282
283 for (i = 0; i < kexec_crash_image->nr_segments; i++)
284 set_memory_valid(
285 __phys_to_virt(kexec_crash_image->segment[i].mem),
286 kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 1);
287}
AKASHI Takahiro254a41c2017-04-03 11:24:35 +0900288
289#ifdef CONFIG_HIBERNATION
290/*
291 * To preserve the crash dump kernel image, the relevant memory segments
292 * should be mapped again around the hibernation.
293 */
294void crash_prepare_suspend(void)
295{
296 if (kexec_crash_image)
297 arch_kexec_unprotect_crashkres();
298}
299
300void crash_post_resume(void)
301{
302 if (kexec_crash_image)
303 arch_kexec_protect_crashkres();
304}
305
306/*
307 * crash_is_nosave
308 *
309 * Return true only if a page is part of reserved memory for crash dump kernel,
310 * but does not hold any data of loaded kernel image.
311 *
312 * Note that all the pages in crash dump kernel memory have been initially
David Hildenbrandd9fa9d92019-03-05 15:47:28 -0800313 * marked as Reserved as memory was allocated via memblock_reserve().
AKASHI Takahiro254a41c2017-04-03 11:24:35 +0900314 *
315 * In hibernation, the pages which are Reserved and yet "nosave" are excluded
316 * from the hibernation iamge. crash_is_nosave() does thich check for crash
317 * dump kernel and will reduce the total size of hibernation image.
318 */
319
320bool crash_is_nosave(unsigned long pfn)
321{
322 int i;
323 phys_addr_t addr;
324
325 if (!crashk_res.end)
326 return false;
327
328 /* in reserved memory? */
329 addr = __pfn_to_phys(pfn);
330 if ((addr < crashk_res.start) || (crashk_res.end < addr))
331 return false;
332
333 if (!kexec_crash_image)
334 return true;
335
336 /* not part of loaded kernel image? */
337 for (i = 0; i < kexec_crash_image->nr_segments; i++)
338 if (addr >= kexec_crash_image->segment[i].mem &&
339 addr < (kexec_crash_image->segment[i].mem +
340 kexec_crash_image->segment[i].memsz))
341 return false;
342
343 return true;
344}
345
346void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
347{
348 unsigned long addr;
349 struct page *page;
350
351 for (addr = begin; addr < end; addr += PAGE_SIZE) {
352 page = phys_to_page(addr);
AKASHI Takahiro254a41c2017-04-03 11:24:35 +0900353 free_reserved_page(page);
354 }
355}
356#endif /* CONFIG_HIBERNATION */