blob: ec07f991866a55d9ccad2643f045eed78ecc8f89 [file] [log] [blame]
Thomas Gleixner50acfb22019-05-29 07:18:00 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07002/*
3 * Copyright (C) 2012 Regents of the University of California
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07004 */
5
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07006#include <asm/asm-offsets.h>
7#include <asm/asm.h>
8#include <linux/init.h>
9#include <linux/linkage.h>
10#include <asm/thread_info.h>
11#include <asm/page.h>
Vitaly Wool44c92252021-04-13 02:35:14 -040012#include <asm/pgtable.h>
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070013#include <asm/csr.h>
Atish Patra9a2451f2022-01-20 01:09:13 -080014#include <asm/cpu_ops_sbi.h>
Christoph Hellwig9e806352019-10-28 13:10:40 +010015#include <asm/hwcap.h>
Atish Patra0f327f22019-06-06 16:08:00 -070016#include <asm/image.h>
Atish Patracb7d2dd2020-09-17 15:37:13 -070017#include "efi-header.S"
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070018
Vitaly Wool44c92252021-04-13 02:35:14 -040019#ifdef CONFIG_XIP_KERNEL
20.macro XIP_FIXUP_OFFSET reg
21 REG_L t0, _xip_fixup
22 add \reg, \reg, t0
23.endm
Vitaly Woolf9ace4e2021-10-11 11:14:14 +020024.macro XIP_FIXUP_FLASH_OFFSET reg
Myrtle Shah3c04d842022-01-20 15:33:37 +000025 la t0, __data_loc
26 REG_L t1, _xip_phys_offset
27 sub \reg, \reg, t1
28 add \reg, \reg, t0
Vitaly Woolf9ace4e2021-10-11 11:14:14 +020029.endm
Vitaly Wool44c92252021-04-13 02:35:14 -040030_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
Myrtle Shah3c04d842022-01-20 15:33:37 +000031_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
Vitaly Wool44c92252021-04-13 02:35:14 -040032#else
33.macro XIP_FIXUP_OFFSET reg
34.endm
Vitaly Woolf9ace4e2021-10-11 11:14:14 +020035.macro XIP_FIXUP_FLASH_OFFSET reg
36.endm
Vitaly Wool44c92252021-04-13 02:35:14 -040037#endif /* CONFIG_XIP_KERNEL */
38
Atish Patrae0119952020-03-17 18:11:39 -070039__HEAD
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070040ENTRY(_start)
Atish Patra0f327f22019-06-06 16:08:00 -070041 /*
42 * Image header expected by Linux boot-loaders. The image header data
43 * structure is described in asm/image.h.
44 * Do not modify it without modifying the structure and all bootloaders
45 * that expects this header format!!
46 */
Atish Patracb7d2dd2020-09-17 15:37:13 -070047#ifdef CONFIG_EFI
48 /*
49 * This instruction decodes to "MZ" ASCII required by UEFI.
50 */
51 c.li s4,-13
52 j _start_kernel
53#else
Atish Patra0f327f22019-06-06 16:08:00 -070054 /* jump to start kernel */
55 j _start_kernel
56 /* reserved */
57 .word 0
Atish Patracb7d2dd2020-09-17 15:37:13 -070058#endif
Atish Patra0f327f22019-06-06 16:08:00 -070059 .balign 8
Sean Anderson79605f12020-10-22 16:30:12 -040060#ifdef CONFIG_RISCV_M_MODE
61 /* Image load offset (0MB) from start of RAM for M-mode */
62 .dword 0
63#else
Atish Patra0f327f22019-06-06 16:08:00 -070064#if __riscv_xlen == 64
65 /* Image load offset(2MB) from start of RAM */
66 .dword 0x200000
67#else
68 /* Image load offset(4MB) from start of RAM */
69 .dword 0x400000
70#endif
Sean Anderson79605f12020-10-22 16:30:12 -040071#endif
Atish Patra0f327f22019-06-06 16:08:00 -070072 /* Effective size of kernel image */
73 .dword _end - _start
74 .dword __HEAD_FLAGS
75 .word RISCV_HEADER_VERSION
76 .word 0
77 .dword 0
Paul Walmsley474efec2019-09-13 18:35:50 -070078 .ascii RISCV_IMAGE_MAGIC
Atish Patra0f327f22019-06-06 16:08:00 -070079 .balign 4
Paul Walmsley474efec2019-09-13 18:35:50 -070080 .ascii RISCV_IMAGE_MAGIC2
Atish Patracb7d2dd2020-09-17 15:37:13 -070081#ifdef CONFIG_EFI
82 .word pe_head_start - _start
83pe_head_start:
84
85 __EFI_PE_HEADER
86#else
Atish Patra0f327f22019-06-06 16:08:00 -070087 .word 0
Atish Patracb7d2dd2020-09-17 15:37:13 -070088#endif
Atish Patra0f327f22019-06-06 16:08:00 -070089
Atish Patrae0119952020-03-17 18:11:39 -070090.align 2
91#ifdef CONFIG_MMU
92relocate:
93 /* Relocate return address */
Alexandre Ghiti658e2c52021-06-17 15:53:07 +020094 la a1, kernel_map
Vitaly Wool44c92252021-04-13 02:35:14 -040095 XIP_FIXUP_OFFSET a1
Alexandre Ghiti658e2c52021-06-17 15:53:07 +020096 REG_L a1, KERNEL_MAP_VIRT_ADDR(a1)
Atish Patrae0119952020-03-17 18:11:39 -070097 la a2, _start
98 sub a1, a1, a2
99 add ra, ra, a1
100
101 /* Point stvec to virtual address of intruction after satp write */
102 la a2, 1f
103 add a2, a2, a1
104 csrw CSR_TVEC, a2
105
106 /* Compute satp for kernel page tables, but don't load it yet */
107 srl a2, a0, PAGE_SHIFT
Alexandre Ghitie8a62cc2021-12-06 11:46:51 +0100108 la a1, satp_mode
109 REG_L a1, 0(a1)
Atish Patrae0119952020-03-17 18:11:39 -0700110 or a2, a2, a1
111
112 /*
113 * Load trampoline page directory, which will cause us to trap to
114 * stvec if VA != PA, or simply fall through if VA == PA. We need a
115 * full fence here because setup_vm() just wrote these PTEs and we need
116 * to ensure the new translations are in use.
117 */
118 la a0, trampoline_pg_dir
Vitaly Wool44c92252021-04-13 02:35:14 -0400119 XIP_FIXUP_OFFSET a0
Atish Patrae0119952020-03-17 18:11:39 -0700120 srl a0, a0, PAGE_SHIFT
121 or a0, a0, a1
122 sfence.vma
123 csrw CSR_SATP, a0
124.align 2
1251:
Qiu Wenbo76d44672020-08-13 11:38:04 +0800126 /* Set trap vector to spin forever to help debug */
127 la a0, .Lsecondary_park
Atish Patrae0119952020-03-17 18:11:39 -0700128 csrw CSR_TVEC, a0
129
130 /* Reload the global pointer */
131.option push
132.option norelax
133 la gp, __global_pointer$
134.option pop
135
136 /*
137 * Switch to kernel page tables. A full fence is necessary in order to
138 * avoid using the trampoline translations, which are only correct for
hasheddan8ee30432021-11-20 09:26:05 -0500139 * the first superpage. Fetching the fence is guaranteed to work
Atish Patrae0119952020-03-17 18:11:39 -0700140 * because that first superpage is translated the same way.
141 */
142 csrw CSR_SATP, a2
143 sfence.vma
144
145 ret
146#endif /* CONFIG_MMU */
147#ifdef CONFIG_SMP
Atish Patracfafe262020-03-17 18:11:43 -0700148 .global secondary_start_sbi
149secondary_start_sbi:
150 /* Mask all interrupts */
151 csrw CSR_IE, zero
152 csrw CSR_IP, zero
153
154 /* Load the global pointer */
155 .option push
156 .option norelax
157 la gp, __global_pointer$
158 .option pop
159
160 /*
161 * Disable FPU to detect illegal usage of
162 * floating point in kernel space
163 */
164 li t0, SR_FS
165 csrc CSR_STATUS, t0
166
Atish Patrae0119952020-03-17 18:11:39 -0700167 /* Set trap vector to spin forever to help debug */
168 la a3, .Lsecondary_park
169 csrw CSR_TVEC, a3
170
Atish Patra9a2451f2022-01-20 01:09:13 -0800171 /* a0 contains the hartid & a1 contains boot data */
172 li a2, SBI_HART_BOOT_TASK_PTR_OFFSET
173 XIP_FIXUP_OFFSET a2
174 add a2, a2, a1
175 REG_L tp, (a2)
176 li a3, SBI_HART_BOOT_STACK_PTR_OFFSET
177 XIP_FIXUP_OFFSET a3
178 add a3, a3, a1
179 REG_L sp, (a3)
Atish Patracfafe262020-03-17 18:11:43 -0700180
Jisheng Zhang153c46f2021-11-29 00:07:38 +0800181.Lsecondary_start_common:
Atish Patrae0119952020-03-17 18:11:39 -0700182
183#ifdef CONFIG_MMU
184 /* Enable virtual memory and relocate to virtual address */
185 la a0, swapper_pg_dir
Vitaly Wool44c92252021-04-13 02:35:14 -0400186 XIP_FIXUP_OFFSET a0
Atish Patrae0119952020-03-17 18:11:39 -0700187 call relocate
188#endif
Qiu Wenbo76d44672020-08-13 11:38:04 +0800189 call setup_trap_vector
Atish Patrae0119952020-03-17 18:11:39 -0700190 tail smp_callin
191#endif /* CONFIG_SMP */
192
Qiu Wenbo76d44672020-08-13 11:38:04 +0800193.align 2
194setup_trap_vector:
195 /* Set trap vector to exception handler */
196 la a0, handle_exception
197 csrw CSR_TVEC, a0
198
199 /*
200 * Set sup0 scratch register to 0, indicating to exception vector that
201 * we are presently executing in kernel.
202 */
203 csrw CSR_SCRATCH, zero
204 ret
205
Chen Lu64a19592021-10-18 13:22:38 +0800206.align 2
Atish Patrae0119952020-03-17 18:11:39 -0700207.Lsecondary_park:
208 /* We lack SMP support or have too many harts, so park this hart */
209 wfi
210 j .Lsecondary_park
211
212END(_start)
213
Atish Patrae0119952020-03-17 18:11:39 -0700214ENTRY(_start_kernel)
Anup Patela3182c92019-04-25 08:38:41 +0000215 /* Mask all interrupts */
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100216 csrw CSR_IE, zero
217 csrw CSR_IP, zero
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700218
Damien Le Moalaccb9db2019-10-28 13:10:39 +0100219#ifdef CONFIG_RISCV_M_MODE
Christoph Hellwig9e806352019-10-28 13:10:40 +0100220 /* flush the instruction cache */
221 fence.i
222
223 /* Reset all registers except ra, a0, a1 */
224 call reset_regs
225
Palmer Dabbelteb077c92020-04-07 11:33:40 -0700226 /*
227 * Setup a PMP to permit access to all of memory. Some machines may
228 * not implement PMPs, so we set up a quick trap handler to just skip
229 * touching the PMPs on any trap.
230 */
231 la a0, pmp_done
232 csrw CSR_TVEC, a0
233
Greentime Huc68a9032020-01-09 11:17:40 +0800234 li a0, -1
235 csrw CSR_PMPADDR0, a0
236 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
237 csrw CSR_PMPCFG0, a0
Palmer Dabbelteb077c92020-04-07 11:33:40 -0700238.align 2
239pmp_done:
Greentime Huc68a9032020-01-09 11:17:40 +0800240
Damien Le Moalaccb9db2019-10-28 13:10:39 +0100241 /*
242 * The hartid in a0 is expected later on, and we have no firmware
243 * to hand it to us.
244 */
245 csrr a0, CSR_MHARTID
Christoph Hellwig9e806352019-10-28 13:10:40 +0100246#endif /* CONFIG_RISCV_M_MODE */
Damien Le Moalaccb9db2019-10-28 13:10:39 +0100247
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700248 /* Load the global pointer */
249.option push
250.option norelax
251 la gp, __global_pointer$
252.option pop
253
254 /*
255 * Disable FPU to detect illegal usage of
256 * floating point in kernel space
257 */
258 li t0, SR_FS
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100259 csrc CSR_STATUS, t0
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700260
Atish Patra2ffc48f2022-01-20 01:09:17 -0800261#ifdef CONFIG_RISCV_BOOT_SPINWAIT
Xiang Wangb47613d2019-09-06 11:56:09 +0800262 li t0, CONFIG_NR_CPUS
Greentime Hu20d22922020-01-15 14:54:36 +0800263 blt a0, t0, .Lgood_cores
264 tail .Lsecondary_park
265.Lgood_cores:
Xiang Wangb47613d2019-09-06 11:56:09 +0800266
Atish Patra0b39eb32022-01-20 01:09:16 -0800267 /* The lottery system is only required for spinwait booting method */
Vitaly Wool44c92252021-04-13 02:35:14 -0400268#ifndef CONFIG_XIP_KERNEL
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700269 /* Pick one hart to run the main boot sequence */
270 la a3, hart_lottery
271 li a2, 1
272 amoadd.w a3, a2, (a3)
273 bnez a3, .Lsecondary_start
274
Vitaly Wool44c92252021-04-13 02:35:14 -0400275#else
276 /* hart_lottery in flash contains a magic number */
277 la a3, hart_lottery
278 mv a2, a3
279 XIP_FIXUP_OFFSET a2
Vitaly Woolf9ace4e2021-10-11 11:14:14 +0200280 XIP_FIXUP_FLASH_OFFSET a3
Vitaly Wool44c92252021-04-13 02:35:14 -0400281 lw t1, (a3)
282 amoswap.w t0, t1, (a2)
283 /* first time here if hart_lottery in RAM is not set */
284 beq t0, t1, .Lsecondary_start
285
Atish Patra0b39eb32022-01-20 01:09:16 -0800286#endif /* CONFIG_XIP */
Atish Patra2ffc48f2022-01-20 01:09:17 -0800287#endif /* CONFIG_RISCV_BOOT_SPINWAIT */
Atish Patra0b39eb32022-01-20 01:09:16 -0800288
289#ifdef CONFIG_XIP_KERNEL
Vitaly Wool44c92252021-04-13 02:35:14 -0400290 la sp, _end + THREAD_SIZE
291 XIP_FIXUP_OFFSET sp
292 mv s0, a0
293 call __copy_data
294
295 /* Restore a0 copy */
296 mv a0, s0
297#endif
298
299#ifndef CONFIG_XIP_KERNEL
Anup Patelc0fbcd92018-11-12 11:25:15 +0530300 /* Clear BSS for flat non-ELF images */
301 la a3, __bss_start
302 la a4, __bss_stop
303 ble a4, a3, clear_bss_done
304clear_bss:
305 REG_S zero, (a3)
306 add a3, a3, RISCV_SZPTR
307 blt a3, a4, clear_bss
308clear_bss_done:
Vitaly Wool44c92252021-04-13 02:35:14 -0400309#endif
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700310 /* Save hart ID and DTB physical address */
311 mv s0, a0
312 mv s1, a1
Vitaly Wool44c92252021-04-13 02:35:14 -0400313
Atish Patraf99fb602018-10-02 12:15:05 -0700314 la a2, boot_cpu_hartid
Vitaly Wool44c92252021-04-13 02:35:14 -0400315 XIP_FIXUP_OFFSET a2
Atish Patraf99fb602018-10-02 12:15:05 -0700316 REG_S a0, (a2)
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700317
318 /* Initialize page tables and relocate to virtual addresses */
319 la sp, init_thread_union + THREAD_SIZE
Vitaly Wool44c92252021-04-13 02:35:14 -0400320 XIP_FIXUP_OFFSET sp
Vitaly Woolf105aa92021-01-16 01:49:48 +0200321#ifdef CONFIG_BUILTIN_DTB
322 la a0, __dtb_start
Vitaly Woolf9ace4e2021-10-11 11:14:14 +0200323 XIP_FIXUP_OFFSET a0
Vitaly Woolf105aa92021-01-16 01:49:48 +0200324#else
Anup Patel671f9a32019-06-28 13:36:21 -0700325 mv a0, s1
Vitaly Woolf105aa92021-01-16 01:49:48 +0200326#endif /* CONFIG_BUILTIN_DTB */
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700327 call setup_vm
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100328#ifdef CONFIG_MMU
Anup Patel671f9a32019-06-28 13:36:21 -0700329 la a0, early_pg_dir
Vitaly Wool44c92252021-04-13 02:35:14 -0400330 XIP_FIXUP_OFFSET a0
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700331 call relocate
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100332#endif /* CONFIG_MMU */
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700333
Qiu Wenbo76d44672020-08-13 11:38:04 +0800334 call setup_trap_vector
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700335 /* Restore C environment */
336 la tp, init_task
Christoph Hellwigc637b912019-04-15 11:14:37 +0200337 la sp, init_thread_union + THREAD_SIZE
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700338
Nick Hu8ad8b722020-01-06 10:38:32 -0800339#ifdef CONFIG_KASAN
340 call kasan_early_init
341#endif
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700342 /* Start the kernel */
Damien Le Moal335b1392020-03-16 09:47:38 +0900343 call soc_early_init
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700344 tail start_kernel
345
Atish Patra2ffc48f2022-01-20 01:09:17 -0800346#if CONFIG_RISCV_BOOT_SPINWAIT
Atish Patra0b39eb32022-01-20 01:09:16 -0800347.Lsecondary_start:
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700348 /* Set trap vector to spin forever to help debug */
349 la a3, .Lsecondary_park
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100350 csrw CSR_TVEC, a3
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700351
352 slli a3, a0, LGREG
Atish Patrac78f94f2022-01-20 01:09:15 -0800353 la a1, __cpu_spinwait_stack_pointer
Vitaly Wool44c92252021-04-13 02:35:14 -0400354 XIP_FIXUP_OFFSET a1
Atish Patrac78f94f2022-01-20 01:09:15 -0800355 la a2, __cpu_spinwait_task_pointer
Vitaly Wool44c92252021-04-13 02:35:14 -0400356 XIP_FIXUP_OFFSET a2
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700357 add a1, a3, a1
358 add a2, a3, a2
359
360 /*
361 * This hart didn't win the lottery, so we wait for the winning hart to
362 * get far enough along the boot process that it should continue.
363 */
364.Lwait_for_cpu_up:
365 /* FIXME: We should WFI to save some energy here. */
366 REG_L sp, (a1)
367 REG_L tp, (a2)
368 beqz sp, .Lwait_for_cpu_up
369 beqz tp, .Lwait_for_cpu_up
370 fence
371
Jisheng Zhang153c46f2021-11-29 00:07:38 +0800372 tail .Lsecondary_start_common
Atish Patra2ffc48f2022-01-20 01:09:17 -0800373#endif /* CONFIG_RISCV_BOOT_SPINWAIT */
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700374
Atish Patrae0119952020-03-17 18:11:39 -0700375END(_start_kernel)
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700376
Christoph Hellwig9e806352019-10-28 13:10:40 +0100377#ifdef CONFIG_RISCV_M_MODE
378ENTRY(reset_regs)
379 li sp, 0
380 li gp, 0
381 li tp, 0
382 li t0, 0
383 li t1, 0
384 li t2, 0
385 li s0, 0
386 li s1, 0
387 li a2, 0
388 li a3, 0
389 li a4, 0
390 li a5, 0
391 li a6, 0
392 li a7, 0
393 li s2, 0
394 li s3, 0
395 li s4, 0
396 li s5, 0
397 li s6, 0
398 li s7, 0
399 li s8, 0
400 li s9, 0
401 li s10, 0
402 li s11, 0
403 li t3, 0
404 li t4, 0
405 li t5, 0
406 li t6, 0
Greentime Hud411cf02019-12-19 14:44:59 +0800407 csrw CSR_SCRATCH, 0
Christoph Hellwig9e806352019-10-28 13:10:40 +0100408
409#ifdef CONFIG_FPU
410 csrr t0, CSR_MISA
411 andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
Guo Rendc6fcba2020-01-05 10:52:14 +0800412 beqz t0, .Lreset_regs_done
Christoph Hellwig9e806352019-10-28 13:10:40 +0100413
414 li t1, SR_FS
415 csrs CSR_STATUS, t1
416 fmv.s.x f0, zero
417 fmv.s.x f1, zero
418 fmv.s.x f2, zero
419 fmv.s.x f3, zero
420 fmv.s.x f4, zero
421 fmv.s.x f5, zero
422 fmv.s.x f6, zero
423 fmv.s.x f7, zero
424 fmv.s.x f8, zero
425 fmv.s.x f9, zero
426 fmv.s.x f10, zero
427 fmv.s.x f11, zero
428 fmv.s.x f12, zero
429 fmv.s.x f13, zero
430 fmv.s.x f14, zero
431 fmv.s.x f15, zero
432 fmv.s.x f16, zero
433 fmv.s.x f17, zero
434 fmv.s.x f18, zero
435 fmv.s.x f19, zero
436 fmv.s.x f20, zero
437 fmv.s.x f21, zero
438 fmv.s.x f22, zero
439 fmv.s.x f23, zero
440 fmv.s.x f24, zero
441 fmv.s.x f25, zero
442 fmv.s.x f26, zero
443 fmv.s.x f27, zero
444 fmv.s.x f28, zero
445 fmv.s.x f29, zero
446 fmv.s.x f30, zero
447 fmv.s.x f31, zero
448 csrw fcsr, 0
449 /* note that the caller must clear SR_FS */
450#endif /* CONFIG_FPU */
451.Lreset_regs_done:
452 ret
453END(reset_regs)
454#endif /* CONFIG_RISCV_M_MODE */