blob: 7e849797c9c38ddc81311787dcf5188ba9de7a02 [file] [log] [blame]
Thomas Gleixner50acfb22019-05-29 07:18:00 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07002/*
3 * Copyright (C) 2012 Regents of the University of California
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07004 */
5
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07006#include <asm/asm-offsets.h>
7#include <asm/asm.h>
8#include <linux/init.h>
9#include <linux/linkage.h>
10#include <asm/thread_info.h>
11#include <asm/page.h>
12#include <asm/csr.h>
Christoph Hellwig9e806352019-10-28 13:10:40 +010013#include <asm/hwcap.h>
Atish Patra0f327f22019-06-06 16:08:00 -070014#include <asm/image.h>
Atish Patracb7d2dd2020-09-17 15:37:13 -070015#include "efi-header.S"
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070016
Atish Patrae0119952020-03-17 18:11:39 -070017__HEAD
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070018ENTRY(_start)
Atish Patra0f327f22019-06-06 16:08:00 -070019 /*
20 * Image header expected by Linux boot-loaders. The image header data
21 * structure is described in asm/image.h.
22 * Do not modify it without modifying the structure and all bootloaders
23 * that expects this header format!!
24 */
Atish Patracb7d2dd2020-09-17 15:37:13 -070025#ifdef CONFIG_EFI
26 /*
27 * This instruction decodes to "MZ" ASCII required by UEFI.
28 */
29 c.li s4,-13
30 j _start_kernel
31#else
Atish Patra0f327f22019-06-06 16:08:00 -070032 /* jump to start kernel */
33 j _start_kernel
34 /* reserved */
35 .word 0
Atish Patracb7d2dd2020-09-17 15:37:13 -070036#endif
Atish Patra0f327f22019-06-06 16:08:00 -070037 .balign 8
Sean Anderson79605f12020-10-22 16:30:12 -040038#ifdef CONFIG_RISCV_M_MODE
39 /* Image load offset (0MB) from start of RAM for M-mode */
40 .dword 0
41#else
Atish Patra0f327f22019-06-06 16:08:00 -070042#if __riscv_xlen == 64
43 /* Image load offset(2MB) from start of RAM */
44 .dword 0x200000
45#else
46 /* Image load offset(4MB) from start of RAM */
47 .dword 0x400000
48#endif
Sean Anderson79605f12020-10-22 16:30:12 -040049#endif
Atish Patra0f327f22019-06-06 16:08:00 -070050 /* Effective size of kernel image */
51 .dword _end - _start
52 .dword __HEAD_FLAGS
53 .word RISCV_HEADER_VERSION
54 .word 0
55 .dword 0
Paul Walmsley474efec2019-09-13 18:35:50 -070056 .ascii RISCV_IMAGE_MAGIC
Atish Patra0f327f22019-06-06 16:08:00 -070057 .balign 4
Paul Walmsley474efec2019-09-13 18:35:50 -070058 .ascii RISCV_IMAGE_MAGIC2
Atish Patracb7d2dd2020-09-17 15:37:13 -070059#ifdef CONFIG_EFI
60 .word pe_head_start - _start
61pe_head_start:
62
63 __EFI_PE_HEADER
64#else
Atish Patra0f327f22019-06-06 16:08:00 -070065 .word 0
Atish Patracb7d2dd2020-09-17 15:37:13 -070066#endif
Atish Patra0f327f22019-06-06 16:08:00 -070067
Atish Patrae0119952020-03-17 18:11:39 -070068.align 2
69#ifdef CONFIG_MMU
70relocate:
71 /* Relocate return address */
72 li a1, PAGE_OFFSET
73 la a2, _start
74 sub a1, a1, a2
75 add ra, ra, a1
76
77 /* Point stvec to virtual address of intruction after satp write */
78 la a2, 1f
79 add a2, a2, a1
80 csrw CSR_TVEC, a2
81
82 /* Compute satp for kernel page tables, but don't load it yet */
83 srl a2, a0, PAGE_SHIFT
84 li a1, SATP_MODE
85 or a2, a2, a1
86
87 /*
88 * Load trampoline page directory, which will cause us to trap to
89 * stvec if VA != PA, or simply fall through if VA == PA. We need a
90 * full fence here because setup_vm() just wrote these PTEs and we need
91 * to ensure the new translations are in use.
92 */
93 la a0, trampoline_pg_dir
94 srl a0, a0, PAGE_SHIFT
95 or a0, a0, a1
96 sfence.vma
97 csrw CSR_SATP, a0
98.align 2
991:
Qiu Wenbo76d44672020-08-13 11:38:04 +0800100 /* Set trap vector to spin forever to help debug */
101 la a0, .Lsecondary_park
Atish Patrae0119952020-03-17 18:11:39 -0700102 csrw CSR_TVEC, a0
103
104 /* Reload the global pointer */
105.option push
106.option norelax
107 la gp, __global_pointer$
108.option pop
109
110 /*
111 * Switch to kernel page tables. A full fence is necessary in order to
112 * avoid using the trampoline translations, which are only correct for
113 * the first superpage. Fetching the fence is guarnteed to work
114 * because that first superpage is translated the same way.
115 */
116 csrw CSR_SATP, a2
117 sfence.vma
118
119 ret
120#endif /* CONFIG_MMU */
121#ifdef CONFIG_SMP
Atish Patracfafe262020-03-17 18:11:43 -0700122 .global secondary_start_sbi
123secondary_start_sbi:
124 /* Mask all interrupts */
125 csrw CSR_IE, zero
126 csrw CSR_IP, zero
127
128 /* Load the global pointer */
129 .option push
130 .option norelax
131 la gp, __global_pointer$
132 .option pop
133
134 /*
135 * Disable FPU to detect illegal usage of
136 * floating point in kernel space
137 */
138 li t0, SR_FS
139 csrc CSR_STATUS, t0
140
Atish Patrae0119952020-03-17 18:11:39 -0700141 /* Set trap vector to spin forever to help debug */
142 la a3, .Lsecondary_park
143 csrw CSR_TVEC, a3
144
145 slli a3, a0, LGREG
Atish Patracfafe262020-03-17 18:11:43 -0700146 la a4, __cpu_up_stack_pointer
147 la a5, __cpu_up_task_pointer
148 add a4, a3, a4
149 add a5, a3, a5
150 REG_L sp, (a4)
151 REG_L tp, (a5)
152
Atish Patrae0119952020-03-17 18:11:39 -0700153 .global secondary_start_common
154secondary_start_common:
155
156#ifdef CONFIG_MMU
157 /* Enable virtual memory and relocate to virtual address */
158 la a0, swapper_pg_dir
159 call relocate
160#endif
Qiu Wenbo76d44672020-08-13 11:38:04 +0800161 call setup_trap_vector
Atish Patrae0119952020-03-17 18:11:39 -0700162 tail smp_callin
163#endif /* CONFIG_SMP */
164
Qiu Wenbo76d44672020-08-13 11:38:04 +0800165.align 2
166setup_trap_vector:
167 /* Set trap vector to exception handler */
168 la a0, handle_exception
169 csrw CSR_TVEC, a0
170
171 /*
172 * Set sup0 scratch register to 0, indicating to exception vector that
173 * we are presently executing in kernel.
174 */
175 csrw CSR_SCRATCH, zero
176 ret
177
Atish Patrae0119952020-03-17 18:11:39 -0700178.Lsecondary_park:
179 /* We lack SMP support or have too many harts, so park this hart */
180 wfi
181 j .Lsecondary_park
182
183END(_start)
184
185 __INIT
186ENTRY(_start_kernel)
Anup Patela3182c92019-04-25 08:38:41 +0000187 /* Mask all interrupts */
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100188 csrw CSR_IE, zero
189 csrw CSR_IP, zero
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700190
Damien Le Moalaccb9db2019-10-28 13:10:39 +0100191#ifdef CONFIG_RISCV_M_MODE
Christoph Hellwig9e806352019-10-28 13:10:40 +0100192 /* flush the instruction cache */
193 fence.i
194
195 /* Reset all registers except ra, a0, a1 */
196 call reset_regs
197
Palmer Dabbelteb077c92020-04-07 11:33:40 -0700198 /*
199 * Setup a PMP to permit access to all of memory. Some machines may
200 * not implement PMPs, so we set up a quick trap handler to just skip
201 * touching the PMPs on any trap.
202 */
203 la a0, pmp_done
204 csrw CSR_TVEC, a0
205
Greentime Huc68a9032020-01-09 11:17:40 +0800206 li a0, -1
207 csrw CSR_PMPADDR0, a0
208 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
209 csrw CSR_PMPCFG0, a0
Palmer Dabbelteb077c92020-04-07 11:33:40 -0700210.align 2
211pmp_done:
Greentime Huc68a9032020-01-09 11:17:40 +0800212
Damien Le Moalaccb9db2019-10-28 13:10:39 +0100213 /*
214 * The hartid in a0 is expected later on, and we have no firmware
215 * to hand it to us.
216 */
217 csrr a0, CSR_MHARTID
Christoph Hellwig9e806352019-10-28 13:10:40 +0100218#endif /* CONFIG_RISCV_M_MODE */
Damien Le Moalaccb9db2019-10-28 13:10:39 +0100219
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700220 /* Load the global pointer */
221.option push
222.option norelax
223 la gp, __global_pointer$
224.option pop
225
226 /*
227 * Disable FPU to detect illegal usage of
228 * floating point in kernel space
229 */
230 li t0, SR_FS
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100231 csrc CSR_STATUS, t0
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700232
Xiang Wangb47613d2019-09-06 11:56:09 +0800233#ifdef CONFIG_SMP
234 li t0, CONFIG_NR_CPUS
Greentime Hu20d22922020-01-15 14:54:36 +0800235 blt a0, t0, .Lgood_cores
236 tail .Lsecondary_park
237.Lgood_cores:
Xiang Wangb47613d2019-09-06 11:56:09 +0800238#endif
239
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700240 /* Pick one hart to run the main boot sequence */
241 la a3, hart_lottery
242 li a2, 1
243 amoadd.w a3, a2, (a3)
244 bnez a3, .Lsecondary_start
245
Anup Patelc0fbcd92018-11-12 11:25:15 +0530246 /* Clear BSS for flat non-ELF images */
247 la a3, __bss_start
248 la a4, __bss_stop
249 ble a4, a3, clear_bss_done
250clear_bss:
251 REG_S zero, (a3)
252 add a3, a3, RISCV_SZPTR
253 blt a3, a4, clear_bss
254clear_bss_done:
255
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700256 /* Save hart ID and DTB physical address */
257 mv s0, a0
258 mv s1, a1
Atish Patraf99fb602018-10-02 12:15:05 -0700259 la a2, boot_cpu_hartid
260 REG_S a0, (a2)
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700261
262 /* Initialize page tables and relocate to virtual addresses */
263 la sp, init_thread_union + THREAD_SIZE
Anup Patel671f9a32019-06-28 13:36:21 -0700264 mv a0, s1
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700265 call setup_vm
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100266#ifdef CONFIG_MMU
Anup Patel671f9a32019-06-28 13:36:21 -0700267 la a0, early_pg_dir
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700268 call relocate
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100269#endif /* CONFIG_MMU */
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700270
Qiu Wenbo76d44672020-08-13 11:38:04 +0800271 call setup_trap_vector
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700272 /* Restore C environment */
273 la tp, init_task
Atish Patraf99fb602018-10-02 12:15:05 -0700274 sw zero, TASK_TI_CPU(tp)
Christoph Hellwigc637b912019-04-15 11:14:37 +0200275 la sp, init_thread_union + THREAD_SIZE
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700276
Nick Hu8ad8b722020-01-06 10:38:32 -0800277#ifdef CONFIG_KASAN
278 call kasan_early_init
279#endif
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700280 /* Start the kernel */
Damien Le Moal335b1392020-03-16 09:47:38 +0900281 call soc_early_init
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700282 tail start_kernel
283
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700284.Lsecondary_start:
285#ifdef CONFIG_SMP
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700286 /* Set trap vector to spin forever to help debug */
287 la a3, .Lsecondary_park
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100288 csrw CSR_TVEC, a3
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700289
290 slli a3, a0, LGREG
291 la a1, __cpu_up_stack_pointer
292 la a2, __cpu_up_task_pointer
293 add a1, a3, a1
294 add a2, a3, a2
295
296 /*
297 * This hart didn't win the lottery, so we wait for the winning hart to
298 * get far enough along the boot process that it should continue.
299 */
300.Lwait_for_cpu_up:
301 /* FIXME: We should WFI to save some energy here. */
302 REG_L sp, (a1)
303 REG_L tp, (a2)
304 beqz sp, .Lwait_for_cpu_up
305 beqz tp, .Lwait_for_cpu_up
306 fence
307
Atish Patrae0119952020-03-17 18:11:39 -0700308 tail secondary_start_common
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100309#endif
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700310
Atish Patrae0119952020-03-17 18:11:39 -0700311END(_start_kernel)
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700312
Christoph Hellwig9e806352019-10-28 13:10:40 +0100313#ifdef CONFIG_RISCV_M_MODE
314ENTRY(reset_regs)
315 li sp, 0
316 li gp, 0
317 li tp, 0
318 li t0, 0
319 li t1, 0
320 li t2, 0
321 li s0, 0
322 li s1, 0
323 li a2, 0
324 li a3, 0
325 li a4, 0
326 li a5, 0
327 li a6, 0
328 li a7, 0
329 li s2, 0
330 li s3, 0
331 li s4, 0
332 li s5, 0
333 li s6, 0
334 li s7, 0
335 li s8, 0
336 li s9, 0
337 li s10, 0
338 li s11, 0
339 li t3, 0
340 li t4, 0
341 li t5, 0
342 li t6, 0
Greentime Hud411cf02019-12-19 14:44:59 +0800343 csrw CSR_SCRATCH, 0
Christoph Hellwig9e806352019-10-28 13:10:40 +0100344
345#ifdef CONFIG_FPU
346 csrr t0, CSR_MISA
347 andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
Guo Rendc6fcba2020-01-05 10:52:14 +0800348 beqz t0, .Lreset_regs_done
Christoph Hellwig9e806352019-10-28 13:10:40 +0100349
350 li t1, SR_FS
351 csrs CSR_STATUS, t1
352 fmv.s.x f0, zero
353 fmv.s.x f1, zero
354 fmv.s.x f2, zero
355 fmv.s.x f3, zero
356 fmv.s.x f4, zero
357 fmv.s.x f5, zero
358 fmv.s.x f6, zero
359 fmv.s.x f7, zero
360 fmv.s.x f8, zero
361 fmv.s.x f9, zero
362 fmv.s.x f10, zero
363 fmv.s.x f11, zero
364 fmv.s.x f12, zero
365 fmv.s.x f13, zero
366 fmv.s.x f14, zero
367 fmv.s.x f15, zero
368 fmv.s.x f16, zero
369 fmv.s.x f17, zero
370 fmv.s.x f18, zero
371 fmv.s.x f19, zero
372 fmv.s.x f20, zero
373 fmv.s.x f21, zero
374 fmv.s.x f22, zero
375 fmv.s.x f23, zero
376 fmv.s.x f24, zero
377 fmv.s.x f25, zero
378 fmv.s.x f26, zero
379 fmv.s.x f27, zero
380 fmv.s.x f28, zero
381 fmv.s.x f29, zero
382 fmv.s.x f30, zero
383 fmv.s.x f31, zero
384 csrw fcsr, 0
385 /* note that the caller must clear SR_FS */
386#endif /* CONFIG_FPU */
387.Lreset_regs_done:
388 ret
389END(reset_regs)
390#endif /* CONFIG_RISCV_M_MODE */
391
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700392__PAGE_ALIGNED_BSS
393 /* Empty zero page */
394 .balign PAGE_SIZE