Thomas Gleixner | 50acfb2 | 2019-05-29 07:18:00 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2012 Regents of the University of California |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 4 | */ |
| 5 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 6 | #include <asm/asm-offsets.h> |
| 7 | #include <asm/asm.h> |
| 8 | #include <linux/init.h> |
| 9 | #include <linux/linkage.h> |
| 10 | #include <asm/thread_info.h> |
| 11 | #include <asm/page.h> |
| 12 | #include <asm/csr.h> |
Christoph Hellwig | 9e80635 | 2019-10-28 13:10:40 +0100 | [diff] [blame] | 13 | #include <asm/hwcap.h> |
Atish Patra | 0f327f2 | 2019-06-06 16:08:00 -0700 | [diff] [blame] | 14 | #include <asm/image.h> |
Atish Patra | cb7d2dd | 2020-09-17 15:37:13 -0700 | [diff] [blame] | 15 | #include "efi-header.S" |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 16 | |
Atish Patra | e011995 | 2020-03-17 18:11:39 -0700 | [diff] [blame] | 17 | __HEAD |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 18 | ENTRY(_start) |
Atish Patra | 0f327f2 | 2019-06-06 16:08:00 -0700 | [diff] [blame] | 19 | /* |
| 20 | * Image header expected by Linux boot-loaders. The image header data |
| 21 | * structure is described in asm/image.h. |
| 22 | * Do not modify it without modifying the structure and all bootloaders |
| 23 | * that expects this header format!! |
| 24 | */ |
Atish Patra | cb7d2dd | 2020-09-17 15:37:13 -0700 | [diff] [blame] | 25 | #ifdef CONFIG_EFI |
| 26 | /* |
| 27 | * This instruction decodes to "MZ" ASCII required by UEFI. |
| 28 | */ |
| 29 | c.li s4,-13 |
| 30 | j _start_kernel |
| 31 | #else |
Atish Patra | 0f327f2 | 2019-06-06 16:08:00 -0700 | [diff] [blame] | 32 | /* jump to start kernel */ |
| 33 | j _start_kernel |
| 34 | /* reserved */ |
| 35 | .word 0 |
Atish Patra | cb7d2dd | 2020-09-17 15:37:13 -0700 | [diff] [blame] | 36 | #endif |
Atish Patra | 0f327f2 | 2019-06-06 16:08:00 -0700 | [diff] [blame] | 37 | .balign 8 |
Sean Anderson | 79605f1 | 2020-10-22 16:30:12 -0400 | [diff] [blame^] | 38 | #ifdef CONFIG_RISCV_M_MODE |
| 39 | /* Image load offset (0MB) from start of RAM for M-mode */ |
| 40 | .dword 0 |
| 41 | #else |
Atish Patra | 0f327f2 | 2019-06-06 16:08:00 -0700 | [diff] [blame] | 42 | #if __riscv_xlen == 64 |
| 43 | /* Image load offset(2MB) from start of RAM */ |
| 44 | .dword 0x200000 |
| 45 | #else |
| 46 | /* Image load offset(4MB) from start of RAM */ |
| 47 | .dword 0x400000 |
| 48 | #endif |
Sean Anderson | 79605f1 | 2020-10-22 16:30:12 -0400 | [diff] [blame^] | 49 | #endif |
Atish Patra | 0f327f2 | 2019-06-06 16:08:00 -0700 | [diff] [blame] | 50 | /* Effective size of kernel image */ |
| 51 | .dword _end - _start |
| 52 | .dword __HEAD_FLAGS |
| 53 | .word RISCV_HEADER_VERSION |
| 54 | .word 0 |
| 55 | .dword 0 |
Paul Walmsley | 474efec | 2019-09-13 18:35:50 -0700 | [diff] [blame] | 56 | .ascii RISCV_IMAGE_MAGIC |
Atish Patra | 0f327f2 | 2019-06-06 16:08:00 -0700 | [diff] [blame] | 57 | .balign 4 |
Paul Walmsley | 474efec | 2019-09-13 18:35:50 -0700 | [diff] [blame] | 58 | .ascii RISCV_IMAGE_MAGIC2 |
Atish Patra | cb7d2dd | 2020-09-17 15:37:13 -0700 | [diff] [blame] | 59 | #ifdef CONFIG_EFI |
| 60 | .word pe_head_start - _start |
| 61 | pe_head_start: |
| 62 | |
| 63 | __EFI_PE_HEADER |
| 64 | #else |
Atish Patra | 0f327f2 | 2019-06-06 16:08:00 -0700 | [diff] [blame] | 65 | .word 0 |
Atish Patra | cb7d2dd | 2020-09-17 15:37:13 -0700 | [diff] [blame] | 66 | #endif |
Atish Patra | 0f327f2 | 2019-06-06 16:08:00 -0700 | [diff] [blame] | 67 | |
Atish Patra | e011995 | 2020-03-17 18:11:39 -0700 | [diff] [blame] | 68 | .align 2 |
| 69 | #ifdef CONFIG_MMU |
| 70 | relocate: |
| 71 | /* Relocate return address */ |
| 72 | li a1, PAGE_OFFSET |
| 73 | la a2, _start |
| 74 | sub a1, a1, a2 |
| 75 | add ra, ra, a1 |
| 76 | |
| 77 | /* Point stvec to virtual address of intruction after satp write */ |
| 78 | la a2, 1f |
| 79 | add a2, a2, a1 |
| 80 | csrw CSR_TVEC, a2 |
| 81 | |
| 82 | /* Compute satp for kernel page tables, but don't load it yet */ |
| 83 | srl a2, a0, PAGE_SHIFT |
| 84 | li a1, SATP_MODE |
| 85 | or a2, a2, a1 |
| 86 | |
| 87 | /* |
| 88 | * Load trampoline page directory, which will cause us to trap to |
| 89 | * stvec if VA != PA, or simply fall through if VA == PA. We need a |
| 90 | * full fence here because setup_vm() just wrote these PTEs and we need |
| 91 | * to ensure the new translations are in use. |
| 92 | */ |
| 93 | la a0, trampoline_pg_dir |
| 94 | srl a0, a0, PAGE_SHIFT |
| 95 | or a0, a0, a1 |
| 96 | sfence.vma |
| 97 | csrw CSR_SATP, a0 |
| 98 | .align 2 |
| 99 | 1: |
Qiu Wenbo | 76d4467 | 2020-08-13 11:38:04 +0800 | [diff] [blame] | 100 | /* Set trap vector to spin forever to help debug */ |
| 101 | la a0, .Lsecondary_park |
Atish Patra | e011995 | 2020-03-17 18:11:39 -0700 | [diff] [blame] | 102 | csrw CSR_TVEC, a0 |
| 103 | |
| 104 | /* Reload the global pointer */ |
| 105 | .option push |
| 106 | .option norelax |
| 107 | la gp, __global_pointer$ |
| 108 | .option pop |
| 109 | |
| 110 | /* |
| 111 | * Switch to kernel page tables. A full fence is necessary in order to |
| 112 | * avoid using the trampoline translations, which are only correct for |
| 113 | * the first superpage. Fetching the fence is guarnteed to work |
| 114 | * because that first superpage is translated the same way. |
| 115 | */ |
| 116 | csrw CSR_SATP, a2 |
| 117 | sfence.vma |
| 118 | |
| 119 | ret |
| 120 | #endif /* CONFIG_MMU */ |
| 121 | #ifdef CONFIG_SMP |
Atish Patra | cfafe26 | 2020-03-17 18:11:43 -0700 | [diff] [blame] | 122 | .global secondary_start_sbi |
| 123 | secondary_start_sbi: |
| 124 | /* Mask all interrupts */ |
| 125 | csrw CSR_IE, zero |
| 126 | csrw CSR_IP, zero |
| 127 | |
| 128 | /* Load the global pointer */ |
| 129 | .option push |
| 130 | .option norelax |
| 131 | la gp, __global_pointer$ |
| 132 | .option pop |
| 133 | |
| 134 | /* |
| 135 | * Disable FPU to detect illegal usage of |
| 136 | * floating point in kernel space |
| 137 | */ |
| 138 | li t0, SR_FS |
| 139 | csrc CSR_STATUS, t0 |
| 140 | |
Atish Patra | e011995 | 2020-03-17 18:11:39 -0700 | [diff] [blame] | 141 | /* Set trap vector to spin forever to help debug */ |
| 142 | la a3, .Lsecondary_park |
| 143 | csrw CSR_TVEC, a3 |
| 144 | |
| 145 | slli a3, a0, LGREG |
Atish Patra | cfafe26 | 2020-03-17 18:11:43 -0700 | [diff] [blame] | 146 | la a4, __cpu_up_stack_pointer |
| 147 | la a5, __cpu_up_task_pointer |
| 148 | add a4, a3, a4 |
| 149 | add a5, a3, a5 |
| 150 | REG_L sp, (a4) |
| 151 | REG_L tp, (a5) |
| 152 | |
Atish Patra | e011995 | 2020-03-17 18:11:39 -0700 | [diff] [blame] | 153 | .global secondary_start_common |
| 154 | secondary_start_common: |
| 155 | |
| 156 | #ifdef CONFIG_MMU |
| 157 | /* Enable virtual memory and relocate to virtual address */ |
| 158 | la a0, swapper_pg_dir |
| 159 | call relocate |
| 160 | #endif |
Qiu Wenbo | 76d4467 | 2020-08-13 11:38:04 +0800 | [diff] [blame] | 161 | call setup_trap_vector |
Atish Patra | e011995 | 2020-03-17 18:11:39 -0700 | [diff] [blame] | 162 | tail smp_callin |
| 163 | #endif /* CONFIG_SMP */ |
| 164 | |
Qiu Wenbo | 76d4467 | 2020-08-13 11:38:04 +0800 | [diff] [blame] | 165 | .align 2 |
| 166 | setup_trap_vector: |
| 167 | /* Set trap vector to exception handler */ |
| 168 | la a0, handle_exception |
| 169 | csrw CSR_TVEC, a0 |
| 170 | |
| 171 | /* |
| 172 | * Set sup0 scratch register to 0, indicating to exception vector that |
| 173 | * we are presently executing in kernel. |
| 174 | */ |
| 175 | csrw CSR_SCRATCH, zero |
| 176 | ret |
| 177 | |
Atish Patra | e011995 | 2020-03-17 18:11:39 -0700 | [diff] [blame] | 178 | .Lsecondary_park: |
| 179 | /* We lack SMP support or have too many harts, so park this hart */ |
| 180 | wfi |
| 181 | j .Lsecondary_park |
| 182 | |
| 183 | END(_start) |
| 184 | |
| 185 | __INIT |
| 186 | ENTRY(_start_kernel) |
Anup Patel | a3182c9 | 2019-04-25 08:38:41 +0000 | [diff] [blame] | 187 | /* Mask all interrupts */ |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 188 | csrw CSR_IE, zero |
| 189 | csrw CSR_IP, zero |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 190 | |
Damien Le Moal | accb9db | 2019-10-28 13:10:39 +0100 | [diff] [blame] | 191 | #ifdef CONFIG_RISCV_M_MODE |
Christoph Hellwig | 9e80635 | 2019-10-28 13:10:40 +0100 | [diff] [blame] | 192 | /* flush the instruction cache */ |
| 193 | fence.i |
| 194 | |
| 195 | /* Reset all registers except ra, a0, a1 */ |
| 196 | call reset_regs |
| 197 | |
Palmer Dabbelt | eb077c9 | 2020-04-07 11:33:40 -0700 | [diff] [blame] | 198 | /* |
| 199 | * Setup a PMP to permit access to all of memory. Some machines may |
| 200 | * not implement PMPs, so we set up a quick trap handler to just skip |
| 201 | * touching the PMPs on any trap. |
| 202 | */ |
| 203 | la a0, pmp_done |
| 204 | csrw CSR_TVEC, a0 |
| 205 | |
Greentime Hu | c68a903 | 2020-01-09 11:17:40 +0800 | [diff] [blame] | 206 | li a0, -1 |
| 207 | csrw CSR_PMPADDR0, a0 |
| 208 | li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X) |
| 209 | csrw CSR_PMPCFG0, a0 |
Palmer Dabbelt | eb077c9 | 2020-04-07 11:33:40 -0700 | [diff] [blame] | 210 | .align 2 |
| 211 | pmp_done: |
Greentime Hu | c68a903 | 2020-01-09 11:17:40 +0800 | [diff] [blame] | 212 | |
Damien Le Moal | accb9db | 2019-10-28 13:10:39 +0100 | [diff] [blame] | 213 | /* |
| 214 | * The hartid in a0 is expected later on, and we have no firmware |
| 215 | * to hand it to us. |
| 216 | */ |
| 217 | csrr a0, CSR_MHARTID |
Christoph Hellwig | 9e80635 | 2019-10-28 13:10:40 +0100 | [diff] [blame] | 218 | #endif /* CONFIG_RISCV_M_MODE */ |
Damien Le Moal | accb9db | 2019-10-28 13:10:39 +0100 | [diff] [blame] | 219 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 220 | /* Load the global pointer */ |
| 221 | .option push |
| 222 | .option norelax |
| 223 | la gp, __global_pointer$ |
| 224 | .option pop |
| 225 | |
| 226 | /* |
| 227 | * Disable FPU to detect illegal usage of |
| 228 | * floating point in kernel space |
| 229 | */ |
| 230 | li t0, SR_FS |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 231 | csrc CSR_STATUS, t0 |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 232 | |
Xiang Wang | b47613d | 2019-09-06 11:56:09 +0800 | [diff] [blame] | 233 | #ifdef CONFIG_SMP |
| 234 | li t0, CONFIG_NR_CPUS |
Greentime Hu | 20d2292 | 2020-01-15 14:54:36 +0800 | [diff] [blame] | 235 | blt a0, t0, .Lgood_cores |
| 236 | tail .Lsecondary_park |
| 237 | .Lgood_cores: |
Xiang Wang | b47613d | 2019-09-06 11:56:09 +0800 | [diff] [blame] | 238 | #endif |
| 239 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 240 | /* Pick one hart to run the main boot sequence */ |
| 241 | la a3, hart_lottery |
| 242 | li a2, 1 |
| 243 | amoadd.w a3, a2, (a3) |
| 244 | bnez a3, .Lsecondary_start |
| 245 | |
Anup Patel | c0fbcd9 | 2018-11-12 11:25:15 +0530 | [diff] [blame] | 246 | /* Clear BSS for flat non-ELF images */ |
| 247 | la a3, __bss_start |
| 248 | la a4, __bss_stop |
| 249 | ble a4, a3, clear_bss_done |
| 250 | clear_bss: |
| 251 | REG_S zero, (a3) |
| 252 | add a3, a3, RISCV_SZPTR |
| 253 | blt a3, a4, clear_bss |
| 254 | clear_bss_done: |
| 255 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 256 | /* Save hart ID and DTB physical address */ |
| 257 | mv s0, a0 |
| 258 | mv s1, a1 |
Atish Patra | f99fb60 | 2018-10-02 12:15:05 -0700 | [diff] [blame] | 259 | la a2, boot_cpu_hartid |
| 260 | REG_S a0, (a2) |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 261 | |
| 262 | /* Initialize page tables and relocate to virtual addresses */ |
| 263 | la sp, init_thread_union + THREAD_SIZE |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 264 | mv a0, s1 |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 265 | call setup_vm |
Christoph Hellwig | 6bd33e1 | 2019-10-28 13:10:41 +0100 | [diff] [blame] | 266 | #ifdef CONFIG_MMU |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 267 | la a0, early_pg_dir |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 268 | call relocate |
Christoph Hellwig | 6bd33e1 | 2019-10-28 13:10:41 +0100 | [diff] [blame] | 269 | #endif /* CONFIG_MMU */ |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 270 | |
Qiu Wenbo | 76d4467 | 2020-08-13 11:38:04 +0800 | [diff] [blame] | 271 | call setup_trap_vector |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 272 | /* Restore C environment */ |
| 273 | la tp, init_task |
Atish Patra | f99fb60 | 2018-10-02 12:15:05 -0700 | [diff] [blame] | 274 | sw zero, TASK_TI_CPU(tp) |
Christoph Hellwig | c637b91 | 2019-04-15 11:14:37 +0200 | [diff] [blame] | 275 | la sp, init_thread_union + THREAD_SIZE |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 276 | |
Nick Hu | 8ad8b72 | 2020-01-06 10:38:32 -0800 | [diff] [blame] | 277 | #ifdef CONFIG_KASAN |
| 278 | call kasan_early_init |
| 279 | #endif |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 280 | /* Start the kernel */ |
Damien Le Moal | 335b139 | 2020-03-16 09:47:38 +0900 | [diff] [blame] | 281 | call soc_early_init |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 282 | tail start_kernel |
| 283 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 284 | .Lsecondary_start: |
| 285 | #ifdef CONFIG_SMP |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 286 | /* Set trap vector to spin forever to help debug */ |
| 287 | la a3, .Lsecondary_park |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 288 | csrw CSR_TVEC, a3 |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 289 | |
| 290 | slli a3, a0, LGREG |
| 291 | la a1, __cpu_up_stack_pointer |
| 292 | la a2, __cpu_up_task_pointer |
| 293 | add a1, a3, a1 |
| 294 | add a2, a3, a2 |
| 295 | |
| 296 | /* |
| 297 | * This hart didn't win the lottery, so we wait for the winning hart to |
| 298 | * get far enough along the boot process that it should continue. |
| 299 | */ |
| 300 | .Lwait_for_cpu_up: |
| 301 | /* FIXME: We should WFI to save some energy here. */ |
| 302 | REG_L sp, (a1) |
| 303 | REG_L tp, (a2) |
| 304 | beqz sp, .Lwait_for_cpu_up |
| 305 | beqz tp, .Lwait_for_cpu_up |
| 306 | fence |
| 307 | |
Atish Patra | e011995 | 2020-03-17 18:11:39 -0700 | [diff] [blame] | 308 | tail secondary_start_common |
Christoph Hellwig | 6bd33e1 | 2019-10-28 13:10:41 +0100 | [diff] [blame] | 309 | #endif |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 310 | |
Atish Patra | e011995 | 2020-03-17 18:11:39 -0700 | [diff] [blame] | 311 | END(_start_kernel) |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 312 | |
Christoph Hellwig | 9e80635 | 2019-10-28 13:10:40 +0100 | [diff] [blame] | 313 | #ifdef CONFIG_RISCV_M_MODE |
| 314 | ENTRY(reset_regs) |
| 315 | li sp, 0 |
| 316 | li gp, 0 |
| 317 | li tp, 0 |
| 318 | li t0, 0 |
| 319 | li t1, 0 |
| 320 | li t2, 0 |
| 321 | li s0, 0 |
| 322 | li s1, 0 |
| 323 | li a2, 0 |
| 324 | li a3, 0 |
| 325 | li a4, 0 |
| 326 | li a5, 0 |
| 327 | li a6, 0 |
| 328 | li a7, 0 |
| 329 | li s2, 0 |
| 330 | li s3, 0 |
| 331 | li s4, 0 |
| 332 | li s5, 0 |
| 333 | li s6, 0 |
| 334 | li s7, 0 |
| 335 | li s8, 0 |
| 336 | li s9, 0 |
| 337 | li s10, 0 |
| 338 | li s11, 0 |
| 339 | li t3, 0 |
| 340 | li t4, 0 |
| 341 | li t5, 0 |
| 342 | li t6, 0 |
Greentime Hu | d411cf0 | 2019-12-19 14:44:59 +0800 | [diff] [blame] | 343 | csrw CSR_SCRATCH, 0 |
Christoph Hellwig | 9e80635 | 2019-10-28 13:10:40 +0100 | [diff] [blame] | 344 | |
| 345 | #ifdef CONFIG_FPU |
| 346 | csrr t0, CSR_MISA |
| 347 | andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D) |
Guo Ren | dc6fcba | 2020-01-05 10:52:14 +0800 | [diff] [blame] | 348 | beqz t0, .Lreset_regs_done |
Christoph Hellwig | 9e80635 | 2019-10-28 13:10:40 +0100 | [diff] [blame] | 349 | |
| 350 | li t1, SR_FS |
| 351 | csrs CSR_STATUS, t1 |
| 352 | fmv.s.x f0, zero |
| 353 | fmv.s.x f1, zero |
| 354 | fmv.s.x f2, zero |
| 355 | fmv.s.x f3, zero |
| 356 | fmv.s.x f4, zero |
| 357 | fmv.s.x f5, zero |
| 358 | fmv.s.x f6, zero |
| 359 | fmv.s.x f7, zero |
| 360 | fmv.s.x f8, zero |
| 361 | fmv.s.x f9, zero |
| 362 | fmv.s.x f10, zero |
| 363 | fmv.s.x f11, zero |
| 364 | fmv.s.x f12, zero |
| 365 | fmv.s.x f13, zero |
| 366 | fmv.s.x f14, zero |
| 367 | fmv.s.x f15, zero |
| 368 | fmv.s.x f16, zero |
| 369 | fmv.s.x f17, zero |
| 370 | fmv.s.x f18, zero |
| 371 | fmv.s.x f19, zero |
| 372 | fmv.s.x f20, zero |
| 373 | fmv.s.x f21, zero |
| 374 | fmv.s.x f22, zero |
| 375 | fmv.s.x f23, zero |
| 376 | fmv.s.x f24, zero |
| 377 | fmv.s.x f25, zero |
| 378 | fmv.s.x f26, zero |
| 379 | fmv.s.x f27, zero |
| 380 | fmv.s.x f28, zero |
| 381 | fmv.s.x f29, zero |
| 382 | fmv.s.x f30, zero |
| 383 | fmv.s.x f31, zero |
| 384 | csrw fcsr, 0 |
| 385 | /* note that the caller must clear SR_FS */ |
| 386 | #endif /* CONFIG_FPU */ |
| 387 | .Lreset_regs_done: |
| 388 | ret |
| 389 | END(reset_regs) |
| 390 | #endif /* CONFIG_RISCV_M_MODE */ |
| 391 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 392 | __PAGE_ALIGNED_BSS |
| 393 | /* Empty zero page */ |
| 394 | .balign PAGE_SIZE |