blob: 2363b43312fc6490cb7327c420631fbe5ea58163 [file] [log] [blame]
Thomas Gleixner50acfb22019-05-29 07:18:00 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07002/*
3 * Copyright (C) 2012 Regents of the University of California
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07004 */
5
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07006#include <asm/asm-offsets.h>
7#include <asm/asm.h>
8#include <linux/init.h>
9#include <linux/linkage.h>
10#include <asm/thread_info.h>
11#include <asm/page.h>
Vitaly Wool44c92252021-04-13 02:35:14 -040012#include <asm/pgtable.h>
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070013#include <asm/csr.h>
Atish Patra9a2451f2022-01-20 01:09:13 -080014#include <asm/cpu_ops_sbi.h>
Christoph Hellwig9e806352019-10-28 13:10:40 +010015#include <asm/hwcap.h>
Atish Patra0f327f22019-06-06 16:08:00 -070016#include <asm/image.h>
Atish Patracb7d2dd2020-09-17 15:37:13 -070017#include "efi-header.S"
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070018
Vitaly Wool44c92252021-04-13 02:35:14 -040019#ifdef CONFIG_XIP_KERNEL
20.macro XIP_FIXUP_OFFSET reg
21 REG_L t0, _xip_fixup
22 add \reg, \reg, t0
23.endm
Vitaly Woolf9ace4e2021-10-11 11:14:14 +020024.macro XIP_FIXUP_FLASH_OFFSET reg
25 la t1, __data_loc
26 li t0, XIP_OFFSET_MASK
27 and t1, t1, t0
28 li t1, XIP_OFFSET
29 sub t0, t0, t1
30 sub \reg, \reg, t0
31.endm
Vitaly Wool44c92252021-04-13 02:35:14 -040032_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
33#else
34.macro XIP_FIXUP_OFFSET reg
35.endm
Vitaly Woolf9ace4e2021-10-11 11:14:14 +020036.macro XIP_FIXUP_FLASH_OFFSET reg
37.endm
Vitaly Wool44c92252021-04-13 02:35:14 -040038#endif /* CONFIG_XIP_KERNEL */
39
Atish Patrae0119952020-03-17 18:11:39 -070040__HEAD
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070041ENTRY(_start)
Atish Patra0f327f22019-06-06 16:08:00 -070042 /*
43 * Image header expected by Linux boot-loaders. The image header data
44 * structure is described in asm/image.h.
45 * Do not modify it without modifying the structure and all bootloaders
46 * that expects this header format!!
47 */
Atish Patracb7d2dd2020-09-17 15:37:13 -070048#ifdef CONFIG_EFI
49 /*
50 * This instruction decodes to "MZ" ASCII required by UEFI.
51 */
52 c.li s4,-13
53 j _start_kernel
54#else
Atish Patra0f327f22019-06-06 16:08:00 -070055 /* jump to start kernel */
56 j _start_kernel
57 /* reserved */
58 .word 0
Atish Patracb7d2dd2020-09-17 15:37:13 -070059#endif
Atish Patra0f327f22019-06-06 16:08:00 -070060 .balign 8
Sean Anderson79605f12020-10-22 16:30:12 -040061#ifdef CONFIG_RISCV_M_MODE
62 /* Image load offset (0MB) from start of RAM for M-mode */
63 .dword 0
64#else
Atish Patra0f327f22019-06-06 16:08:00 -070065#if __riscv_xlen == 64
66 /* Image load offset(2MB) from start of RAM */
67 .dword 0x200000
68#else
69 /* Image load offset(4MB) from start of RAM */
70 .dword 0x400000
71#endif
Sean Anderson79605f12020-10-22 16:30:12 -040072#endif
Atish Patra0f327f22019-06-06 16:08:00 -070073 /* Effective size of kernel image */
74 .dword _end - _start
75 .dword __HEAD_FLAGS
76 .word RISCV_HEADER_VERSION
77 .word 0
78 .dword 0
Paul Walmsley474efec2019-09-13 18:35:50 -070079 .ascii RISCV_IMAGE_MAGIC
Atish Patra0f327f22019-06-06 16:08:00 -070080 .balign 4
Paul Walmsley474efec2019-09-13 18:35:50 -070081 .ascii RISCV_IMAGE_MAGIC2
Atish Patracb7d2dd2020-09-17 15:37:13 -070082#ifdef CONFIG_EFI
83 .word pe_head_start - _start
84pe_head_start:
85
86 __EFI_PE_HEADER
87#else
Atish Patra0f327f22019-06-06 16:08:00 -070088 .word 0
Atish Patracb7d2dd2020-09-17 15:37:13 -070089#endif
Atish Patra0f327f22019-06-06 16:08:00 -070090
Atish Patrae0119952020-03-17 18:11:39 -070091.align 2
92#ifdef CONFIG_MMU
93relocate:
94 /* Relocate return address */
Alexandre Ghiti658e2c52021-06-17 15:53:07 +020095 la a1, kernel_map
Vitaly Wool44c92252021-04-13 02:35:14 -040096 XIP_FIXUP_OFFSET a1
Alexandre Ghiti658e2c52021-06-17 15:53:07 +020097 REG_L a1, KERNEL_MAP_VIRT_ADDR(a1)
Atish Patrae0119952020-03-17 18:11:39 -070098 la a2, _start
99 sub a1, a1, a2
100 add ra, ra, a1
101
102 /* Point stvec to virtual address of intruction after satp write */
103 la a2, 1f
104 add a2, a2, a1
105 csrw CSR_TVEC, a2
106
107 /* Compute satp for kernel page tables, but don't load it yet */
108 srl a2, a0, PAGE_SHIFT
Alexandre Ghitie8a62cc2021-12-06 11:46:51 +0100109 la a1, satp_mode
110 REG_L a1, 0(a1)
Atish Patrae0119952020-03-17 18:11:39 -0700111 or a2, a2, a1
112
113 /*
114 * Load trampoline page directory, which will cause us to trap to
115 * stvec if VA != PA, or simply fall through if VA == PA. We need a
116 * full fence here because setup_vm() just wrote these PTEs and we need
117 * to ensure the new translations are in use.
118 */
119 la a0, trampoline_pg_dir
Vitaly Wool44c92252021-04-13 02:35:14 -0400120 XIP_FIXUP_OFFSET a0
Atish Patrae0119952020-03-17 18:11:39 -0700121 srl a0, a0, PAGE_SHIFT
122 or a0, a0, a1
123 sfence.vma
124 csrw CSR_SATP, a0
125.align 2
1261:
Qiu Wenbo76d44672020-08-13 11:38:04 +0800127 /* Set trap vector to spin forever to help debug */
128 la a0, .Lsecondary_park
Atish Patrae0119952020-03-17 18:11:39 -0700129 csrw CSR_TVEC, a0
130
131 /* Reload the global pointer */
132.option push
133.option norelax
134 la gp, __global_pointer$
135.option pop
136
137 /*
138 * Switch to kernel page tables. A full fence is necessary in order to
139 * avoid using the trampoline translations, which are only correct for
hasheddan8ee30432021-11-20 09:26:05 -0500140 * the first superpage. Fetching the fence is guaranteed to work
Atish Patrae0119952020-03-17 18:11:39 -0700141 * because that first superpage is translated the same way.
142 */
143 csrw CSR_SATP, a2
144 sfence.vma
145
146 ret
147#endif /* CONFIG_MMU */
148#ifdef CONFIG_SMP
Atish Patracfafe262020-03-17 18:11:43 -0700149 .global secondary_start_sbi
150secondary_start_sbi:
151 /* Mask all interrupts */
152 csrw CSR_IE, zero
153 csrw CSR_IP, zero
154
155 /* Load the global pointer */
156 .option push
157 .option norelax
158 la gp, __global_pointer$
159 .option pop
160
161 /*
162 * Disable FPU to detect illegal usage of
163 * floating point in kernel space
164 */
165 li t0, SR_FS
166 csrc CSR_STATUS, t0
167
Atish Patrae0119952020-03-17 18:11:39 -0700168 /* Set trap vector to spin forever to help debug */
169 la a3, .Lsecondary_park
170 csrw CSR_TVEC, a3
171
Atish Patra9a2451f2022-01-20 01:09:13 -0800172 /* a0 contains the hartid & a1 contains boot data */
173 li a2, SBI_HART_BOOT_TASK_PTR_OFFSET
174 XIP_FIXUP_OFFSET a2
175 add a2, a2, a1
176 REG_L tp, (a2)
177 li a3, SBI_HART_BOOT_STACK_PTR_OFFSET
178 XIP_FIXUP_OFFSET a3
179 add a3, a3, a1
180 REG_L sp, (a3)
Atish Patracfafe262020-03-17 18:11:43 -0700181
Jisheng Zhang153c46f2021-11-29 00:07:38 +0800182.Lsecondary_start_common:
Atish Patrae0119952020-03-17 18:11:39 -0700183
184#ifdef CONFIG_MMU
185 /* Enable virtual memory and relocate to virtual address */
186 la a0, swapper_pg_dir
Vitaly Wool44c92252021-04-13 02:35:14 -0400187 XIP_FIXUP_OFFSET a0
Atish Patrae0119952020-03-17 18:11:39 -0700188 call relocate
189#endif
Qiu Wenbo76d44672020-08-13 11:38:04 +0800190 call setup_trap_vector
Atish Patrae0119952020-03-17 18:11:39 -0700191 tail smp_callin
192#endif /* CONFIG_SMP */
193
Qiu Wenbo76d44672020-08-13 11:38:04 +0800194.align 2
195setup_trap_vector:
196 /* Set trap vector to exception handler */
197 la a0, handle_exception
198 csrw CSR_TVEC, a0
199
200 /*
201 * Set sup0 scratch register to 0, indicating to exception vector that
202 * we are presently executing in kernel.
203 */
204 csrw CSR_SCRATCH, zero
205 ret
206
Chen Lu64a19592021-10-18 13:22:38 +0800207.align 2
Atish Patrae0119952020-03-17 18:11:39 -0700208.Lsecondary_park:
209 /* We lack SMP support or have too many harts, so park this hart */
210 wfi
211 j .Lsecondary_park
212
213END(_start)
214
Atish Patrae0119952020-03-17 18:11:39 -0700215ENTRY(_start_kernel)
Anup Patela3182c92019-04-25 08:38:41 +0000216 /* Mask all interrupts */
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100217 csrw CSR_IE, zero
218 csrw CSR_IP, zero
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700219
Damien Le Moalaccb9db2019-10-28 13:10:39 +0100220#ifdef CONFIG_RISCV_M_MODE
Christoph Hellwig9e806352019-10-28 13:10:40 +0100221 /* flush the instruction cache */
222 fence.i
223
224 /* Reset all registers except ra, a0, a1 */
225 call reset_regs
226
Palmer Dabbelteb077c92020-04-07 11:33:40 -0700227 /*
228 * Setup a PMP to permit access to all of memory. Some machines may
229 * not implement PMPs, so we set up a quick trap handler to just skip
230 * touching the PMPs on any trap.
231 */
232 la a0, pmp_done
233 csrw CSR_TVEC, a0
234
Greentime Huc68a9032020-01-09 11:17:40 +0800235 li a0, -1
236 csrw CSR_PMPADDR0, a0
237 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
238 csrw CSR_PMPCFG0, a0
Palmer Dabbelteb077c92020-04-07 11:33:40 -0700239.align 2
240pmp_done:
Greentime Huc68a9032020-01-09 11:17:40 +0800241
Damien Le Moalaccb9db2019-10-28 13:10:39 +0100242 /*
243 * The hartid in a0 is expected later on, and we have no firmware
244 * to hand it to us.
245 */
246 csrr a0, CSR_MHARTID
Christoph Hellwig9e806352019-10-28 13:10:40 +0100247#endif /* CONFIG_RISCV_M_MODE */
Damien Le Moalaccb9db2019-10-28 13:10:39 +0100248
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700249 /* Load the global pointer */
250.option push
251.option norelax
252 la gp, __global_pointer$
253.option pop
254
255 /*
256 * Disable FPU to detect illegal usage of
257 * floating point in kernel space
258 */
259 li t0, SR_FS
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100260 csrc CSR_STATUS, t0
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700261
Atish Patra2ffc48f2022-01-20 01:09:17 -0800262#ifdef CONFIG_RISCV_BOOT_SPINWAIT
Xiang Wangb47613d2019-09-06 11:56:09 +0800263 li t0, CONFIG_NR_CPUS
Greentime Hu20d22922020-01-15 14:54:36 +0800264 blt a0, t0, .Lgood_cores
265 tail .Lsecondary_park
266.Lgood_cores:
Xiang Wangb47613d2019-09-06 11:56:09 +0800267
Atish Patra0b39eb32022-01-20 01:09:16 -0800268 /* The lottery system is only required for spinwait booting method */
Vitaly Wool44c92252021-04-13 02:35:14 -0400269#ifndef CONFIG_XIP_KERNEL
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700270 /* Pick one hart to run the main boot sequence */
271 la a3, hart_lottery
272 li a2, 1
273 amoadd.w a3, a2, (a3)
274 bnez a3, .Lsecondary_start
275
Vitaly Wool44c92252021-04-13 02:35:14 -0400276#else
277 /* hart_lottery in flash contains a magic number */
278 la a3, hart_lottery
279 mv a2, a3
280 XIP_FIXUP_OFFSET a2
Vitaly Woolf9ace4e2021-10-11 11:14:14 +0200281 XIP_FIXUP_FLASH_OFFSET a3
Vitaly Wool44c92252021-04-13 02:35:14 -0400282 lw t1, (a3)
283 amoswap.w t0, t1, (a2)
284 /* first time here if hart_lottery in RAM is not set */
285 beq t0, t1, .Lsecondary_start
286
Atish Patra0b39eb32022-01-20 01:09:16 -0800287#endif /* CONFIG_XIP */
Atish Patra2ffc48f2022-01-20 01:09:17 -0800288#endif /* CONFIG_RISCV_BOOT_SPINWAIT */
Atish Patra0b39eb32022-01-20 01:09:16 -0800289
290#ifdef CONFIG_XIP_KERNEL
Vitaly Wool44c92252021-04-13 02:35:14 -0400291 la sp, _end + THREAD_SIZE
292 XIP_FIXUP_OFFSET sp
293 mv s0, a0
294 call __copy_data
295
296 /* Restore a0 copy */
297 mv a0, s0
298#endif
299
300#ifndef CONFIG_XIP_KERNEL
Anup Patelc0fbcd92018-11-12 11:25:15 +0530301 /* Clear BSS for flat non-ELF images */
302 la a3, __bss_start
303 la a4, __bss_stop
304 ble a4, a3, clear_bss_done
305clear_bss:
306 REG_S zero, (a3)
307 add a3, a3, RISCV_SZPTR
308 blt a3, a4, clear_bss
309clear_bss_done:
Vitaly Wool44c92252021-04-13 02:35:14 -0400310#endif
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700311 /* Save hart ID and DTB physical address */
312 mv s0, a0
313 mv s1, a1
Vitaly Wool44c92252021-04-13 02:35:14 -0400314
Atish Patraf99fb602018-10-02 12:15:05 -0700315 la a2, boot_cpu_hartid
Vitaly Wool44c92252021-04-13 02:35:14 -0400316 XIP_FIXUP_OFFSET a2
Atish Patraf99fb602018-10-02 12:15:05 -0700317 REG_S a0, (a2)
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700318
319 /* Initialize page tables and relocate to virtual addresses */
320 la sp, init_thread_union + THREAD_SIZE
Vitaly Wool44c92252021-04-13 02:35:14 -0400321 XIP_FIXUP_OFFSET sp
Vitaly Woolf105aa92021-01-16 01:49:48 +0200322#ifdef CONFIG_BUILTIN_DTB
323 la a0, __dtb_start
Vitaly Woolf9ace4e2021-10-11 11:14:14 +0200324 XIP_FIXUP_OFFSET a0
Vitaly Woolf105aa92021-01-16 01:49:48 +0200325#else
Anup Patel671f9a32019-06-28 13:36:21 -0700326 mv a0, s1
Vitaly Woolf105aa92021-01-16 01:49:48 +0200327#endif /* CONFIG_BUILTIN_DTB */
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700328 call setup_vm
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100329#ifdef CONFIG_MMU
Anup Patel671f9a32019-06-28 13:36:21 -0700330 la a0, early_pg_dir
Vitaly Wool44c92252021-04-13 02:35:14 -0400331 XIP_FIXUP_OFFSET a0
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700332 call relocate
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100333#endif /* CONFIG_MMU */
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700334
Qiu Wenbo76d44672020-08-13 11:38:04 +0800335 call setup_trap_vector
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700336 /* Restore C environment */
337 la tp, init_task
Christoph Hellwigc637b912019-04-15 11:14:37 +0200338 la sp, init_thread_union + THREAD_SIZE
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700339
Nick Hu8ad8b722020-01-06 10:38:32 -0800340#ifdef CONFIG_KASAN
341 call kasan_early_init
342#endif
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700343 /* Start the kernel */
Damien Le Moal335b1392020-03-16 09:47:38 +0900344 call soc_early_init
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700345 tail start_kernel
346
Atish Patra2ffc48f2022-01-20 01:09:17 -0800347#if CONFIG_RISCV_BOOT_SPINWAIT
Atish Patra0b39eb32022-01-20 01:09:16 -0800348.Lsecondary_start:
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700349 /* Set trap vector to spin forever to help debug */
350 la a3, .Lsecondary_park
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100351 csrw CSR_TVEC, a3
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700352
353 slli a3, a0, LGREG
Atish Patrac78f94f2022-01-20 01:09:15 -0800354 la a1, __cpu_spinwait_stack_pointer
Vitaly Wool44c92252021-04-13 02:35:14 -0400355 XIP_FIXUP_OFFSET a1
Atish Patrac78f94f2022-01-20 01:09:15 -0800356 la a2, __cpu_spinwait_task_pointer
Vitaly Wool44c92252021-04-13 02:35:14 -0400357 XIP_FIXUP_OFFSET a2
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700358 add a1, a3, a1
359 add a2, a3, a2
360
361 /*
362 * This hart didn't win the lottery, so we wait for the winning hart to
363 * get far enough along the boot process that it should continue.
364 */
365.Lwait_for_cpu_up:
366 /* FIXME: We should WFI to save some energy here. */
367 REG_L sp, (a1)
368 REG_L tp, (a2)
369 beqz sp, .Lwait_for_cpu_up
370 beqz tp, .Lwait_for_cpu_up
371 fence
372
Jisheng Zhang153c46f2021-11-29 00:07:38 +0800373 tail .Lsecondary_start_common
Atish Patra2ffc48f2022-01-20 01:09:17 -0800374#endif /* CONFIG_RISCV_BOOT_SPINWAIT */
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700375
Atish Patrae0119952020-03-17 18:11:39 -0700376END(_start_kernel)
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700377
Christoph Hellwig9e806352019-10-28 13:10:40 +0100378#ifdef CONFIG_RISCV_M_MODE
379ENTRY(reset_regs)
380 li sp, 0
381 li gp, 0
382 li tp, 0
383 li t0, 0
384 li t1, 0
385 li t2, 0
386 li s0, 0
387 li s1, 0
388 li a2, 0
389 li a3, 0
390 li a4, 0
391 li a5, 0
392 li a6, 0
393 li a7, 0
394 li s2, 0
395 li s3, 0
396 li s4, 0
397 li s5, 0
398 li s6, 0
399 li s7, 0
400 li s8, 0
401 li s9, 0
402 li s10, 0
403 li s11, 0
404 li t3, 0
405 li t4, 0
406 li t5, 0
407 li t6, 0
Greentime Hud411cf02019-12-19 14:44:59 +0800408 csrw CSR_SCRATCH, 0
Christoph Hellwig9e806352019-10-28 13:10:40 +0100409
410#ifdef CONFIG_FPU
411 csrr t0, CSR_MISA
412 andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
Guo Rendc6fcba2020-01-05 10:52:14 +0800413 beqz t0, .Lreset_regs_done
Christoph Hellwig9e806352019-10-28 13:10:40 +0100414
415 li t1, SR_FS
416 csrs CSR_STATUS, t1
417 fmv.s.x f0, zero
418 fmv.s.x f1, zero
419 fmv.s.x f2, zero
420 fmv.s.x f3, zero
421 fmv.s.x f4, zero
422 fmv.s.x f5, zero
423 fmv.s.x f6, zero
424 fmv.s.x f7, zero
425 fmv.s.x f8, zero
426 fmv.s.x f9, zero
427 fmv.s.x f10, zero
428 fmv.s.x f11, zero
429 fmv.s.x f12, zero
430 fmv.s.x f13, zero
431 fmv.s.x f14, zero
432 fmv.s.x f15, zero
433 fmv.s.x f16, zero
434 fmv.s.x f17, zero
435 fmv.s.x f18, zero
436 fmv.s.x f19, zero
437 fmv.s.x f20, zero
438 fmv.s.x f21, zero
439 fmv.s.x f22, zero
440 fmv.s.x f23, zero
441 fmv.s.x f24, zero
442 fmv.s.x f25, zero
443 fmv.s.x f26, zero
444 fmv.s.x f27, zero
445 fmv.s.x f28, zero
446 fmv.s.x f29, zero
447 fmv.s.x f30, zero
448 fmv.s.x f31, zero
449 csrw fcsr, 0
450 /* note that the caller must clear SR_FS */
451#endif /* CONFIG_FPU */
452.Lreset_regs_done:
453 ret
454END(reset_regs)
455#endif /* CONFIG_RISCV_M_MODE */