blob: 542d6edc6806acaba0e57241c280336c9d90d462 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Geoff Levandd28f6df2016-06-23 17:54:48 +00002/*
3 * kexec for arm64
4 *
5 * Copyright (C) Linaro.
6 * Copyright (C) Huawei Futurewei Technologies.
Geoff Levandd28f6df2016-06-23 17:54:48 +00007 */
8
9#include <linux/kexec.h>
10#include <linux/linkage.h>
11
12#include <asm/assembler.h>
13#include <asm/kexec.h>
14#include <asm/page.h>
15#include <asm/sysreg.h>
16
17/*
18 * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
19 *
20 * The memory that the old kernel occupies may be overwritten when coping the
21 * new image to its final location. To assure that the
22 * arm64_relocate_new_kernel routine which does that copy is not overwritten,
23 * all code and data needed by arm64_relocate_new_kernel must be between the
24 * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The
25 * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec
26 * control_code_page, a special page which has been set up to be preserved
27 * during the copy operation.
28 */
Mark Brown0343a7e2020-05-01 12:54:29 +010029SYM_CODE_START(arm64_relocate_new_kernel)
Geoff Levandd28f6df2016-06-23 17:54:48 +000030
31 /* Setup the list loop variables. */
AKASHI Takahiro4c9e7e62018-11-15 14:52:52 +090032 mov x18, x2 /* x18 = dtb address */
Geoff Levandd28f6df2016-06-23 17:54:48 +000033 mov x17, x1 /* x17 = kimage_start */
34 mov x16, x0 /* x16 = kimage_head */
Suzuki K Poulose072f0a62016-09-09 14:07:14 +010035 raw_dcache_line_size x15, x0 /* x15 = dcache line size */
Geoff Levandd28f6df2016-06-23 17:54:48 +000036 mov x14, xzr /* x14 = entry ptr */
37 mov x13, xzr /* x13 = copy dest */
38
39 /* Clear the sctlr_el2 flags. */
40 mrs x0, CurrentEL
41 cmp x0, #CurrentEL_EL2
42 b.ne 1f
43 mrs x0, sctlr_el2
Remi Denis-Courmontdc374b42020-03-04 11:36:31 +020044 mov_q x1, SCTLR_ELx_FLAGS
Geoff Levandd28f6df2016-06-23 17:54:48 +000045 bic x0, x0, x1
Shanker Donthineni932b50c2017-12-11 16:42:32 -060046 pre_disable_mmu_workaround
Geoff Levandd28f6df2016-06-23 17:54:48 +000047 msr sctlr_el2, x0
48 isb
491:
50
51 /* Check if the new image needs relocation. */
52 tbnz x16, IND_DONE_BIT, .Ldone
53
54.Lloop:
55 and x12, x16, PAGE_MASK /* x12 = addr */
56
57 /* Test the entry flags. */
58.Ltest_source:
59 tbz x16, IND_SOURCE_BIT, .Ltest_indirection
60
61 /* Invalidate dest page to PoC. */
62 mov x0, x13
63 add x20, x0, #PAGE_SIZE
64 sub x1, x15, #1
65 bic x0, x0, x1
662: dc ivac, x0
67 add x0, x0, x15
68 cmp x0, x20
69 b.lo 2b
70 dsb sy
71
72 mov x20, x13
73 mov x21, x12
74 copy_page x20, x21, x0, x1, x2, x3, x4, x5, x6, x7
75
76 /* dest += PAGE_SIZE */
77 add x13, x13, PAGE_SIZE
78 b .Lnext
79
80.Ltest_indirection:
81 tbz x16, IND_INDIRECTION_BIT, .Ltest_destination
82
83 /* ptr = addr */
84 mov x14, x12
85 b .Lnext
86
87.Ltest_destination:
88 tbz x16, IND_DESTINATION_BIT, .Lnext
89
90 /* dest = addr */
91 mov x13, x12
92
93.Lnext:
94 /* entry = *ptr++ */
95 ldr x16, [x14], #8
96
97 /* while (!(entry & DONE)) */
98 tbz x16, IND_DONE_BIT, .Lloop
99
100.Ldone:
101 /* wait for writes from copy_page to finish */
102 dsb nsh
103 ic iallu
104 dsb nsh
105 isb
106
107 /* Start new image. */
AKASHI Takahiro4c9e7e62018-11-15 14:52:52 +0900108 mov x0, x18
Geoff Levandd28f6df2016-06-23 17:54:48 +0000109 mov x1, xzr
110 mov x2, xzr
111 mov x3, xzr
112 br x17
113
Mark Brown0343a7e2020-05-01 12:54:29 +0100114SYM_CODE_END(arm64_relocate_new_kernel)
Geoff Levandd28f6df2016-06-23 17:54:48 +0000115
Geoff Levandd28f6df2016-06-23 17:54:48 +0000116.align 3 /* To keep the 64-bit values below naturally aligned. */
117
118.Lcopy_end:
119.org KEXEC_CONTROL_PAGE_SIZE
120
121/*
122 * arm64_relocate_new_kernel_size - Number of bytes to copy to the
123 * control_code_page.
124 */
125.globl arm64_relocate_new_kernel_size
126arm64_relocate_new_kernel_size:
127 .quad .Lcopy_end - arm64_relocate_new_kernel