Thomas Gleixner | 50acfb2 | 2019-05-29 07:18:00 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2012 Regents of the University of California |
| 4 | * Copyright (C) 2017 SiFive |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <linux/init.h> |
| 8 | #include <linux/linkage.h> |
| 9 | |
| 10 | #include <asm/asm.h> |
| 11 | #include <asm/csr.h> |
| 12 | #include <asm/unistd.h> |
| 13 | #include <asm/thread_info.h> |
| 14 | #include <asm/asm-offsets.h> |
Vincent Chen | 800149a | 2021-03-22 22:26:05 +0800 | [diff] [blame] | 15 | #include <asm/errata_list.h> |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 16 | |
Palmer Dabbelt | fdff991 | 2020-02-27 11:15:03 -0800 | [diff] [blame] | 17 | #if !IS_ENABLED(CONFIG_PREEMPTION) |
| 18 | .set resume_kernel, restore_all |
| 19 | #endif |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 20 | |
Palmer Dabbelt | fdff991 | 2020-02-27 11:15:03 -0800 | [diff] [blame] | 21 | ENTRY(handle_exception) |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 22 | /* |
| 23 | * If coming from userspace, preserve the user thread pointer and load |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 24 | * the kernel thread pointer. If we came from the kernel, the scratch |
| 25 | * register will contain 0, and we should continue on the current TP. |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 26 | */ |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 27 | csrrw tp, CSR_SCRATCH, tp |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 28 | bnez tp, _save_context |
| 29 | |
| 30 | _restore_kernel_tpsp: |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 31 | csrr tp, CSR_SCRATCH |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 32 | REG_S sp, TASK_TI_KERNEL_SP(tp) |
Tong Tiangen | 31da94c | 2021-06-21 11:28:55 +0800 | [diff] [blame] | 33 | |
| 34 | #ifdef CONFIG_VMAP_STACK |
| 35 | addi sp, sp, -(PT_SIZE_ON_STACK) |
| 36 | srli sp, sp, THREAD_SHIFT |
| 37 | andi sp, sp, 0x1 |
| 38 | bnez sp, handle_kernel_stack_overflow |
| 39 | REG_L sp, TASK_TI_KERNEL_SP(tp) |
| 40 | #endif |
| 41 | |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 42 | _save_context: |
| 43 | REG_S sp, TASK_TI_USER_SP(tp) |
| 44 | REG_L sp, TASK_TI_KERNEL_SP(tp) |
| 45 | addi sp, sp, -(PT_SIZE_ON_STACK) |
| 46 | REG_S x1, PT_RA(sp) |
| 47 | REG_S x3, PT_GP(sp) |
| 48 | REG_S x5, PT_T0(sp) |
| 49 | REG_S x6, PT_T1(sp) |
| 50 | REG_S x7, PT_T2(sp) |
| 51 | REG_S x8, PT_S0(sp) |
| 52 | REG_S x9, PT_S1(sp) |
| 53 | REG_S x10, PT_A0(sp) |
| 54 | REG_S x11, PT_A1(sp) |
| 55 | REG_S x12, PT_A2(sp) |
| 56 | REG_S x13, PT_A3(sp) |
| 57 | REG_S x14, PT_A4(sp) |
| 58 | REG_S x15, PT_A5(sp) |
| 59 | REG_S x16, PT_A6(sp) |
| 60 | REG_S x17, PT_A7(sp) |
| 61 | REG_S x18, PT_S2(sp) |
| 62 | REG_S x19, PT_S3(sp) |
| 63 | REG_S x20, PT_S4(sp) |
| 64 | REG_S x21, PT_S5(sp) |
| 65 | REG_S x22, PT_S6(sp) |
| 66 | REG_S x23, PT_S7(sp) |
| 67 | REG_S x24, PT_S8(sp) |
| 68 | REG_S x25, PT_S9(sp) |
| 69 | REG_S x26, PT_S10(sp) |
| 70 | REG_S x27, PT_S11(sp) |
| 71 | REG_S x28, PT_T3(sp) |
| 72 | REG_S x29, PT_T4(sp) |
| 73 | REG_S x30, PT_T5(sp) |
| 74 | REG_S x31, PT_T6(sp) |
| 75 | |
| 76 | /* |
Christoph Hellwig | fe9b842 | 2018-01-04 19:55:55 +0100 | [diff] [blame] | 77 | * Disable user-mode memory access as it should only be set in the |
| 78 | * actual user copy routines. |
| 79 | * |
| 80 | * Disable the FPU to detect illegal usage of floating point in kernel |
| 81 | * space. |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 82 | */ |
Christoph Hellwig | fe9b842 | 2018-01-04 19:55:55 +0100 | [diff] [blame] | 83 | li t0, SR_SUM | SR_FS |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 84 | |
| 85 | REG_L s0, TASK_TI_USER_SP(tp) |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 86 | csrrc s1, CSR_STATUS, t0 |
| 87 | csrr s2, CSR_EPC |
| 88 | csrr s3, CSR_TVAL |
| 89 | csrr s4, CSR_CAUSE |
| 90 | csrr s5, CSR_SCRATCH |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 91 | REG_S s0, PT_SP(sp) |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 92 | REG_S s1, PT_STATUS(sp) |
| 93 | REG_S s2, PT_EPC(sp) |
| 94 | REG_S s3, PT_BADADDR(sp) |
| 95 | REG_S s4, PT_CAUSE(sp) |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 96 | REG_S s5, PT_TP(sp) |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 97 | |
| 98 | /* |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 99 | * Set the scratch register to 0, so that if a recursive exception |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 100 | * occurs, the exception vector knows it came from the kernel |
| 101 | */ |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 102 | csrw CSR_SCRATCH, x0 |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 103 | |
| 104 | /* Load the global pointer */ |
| 105 | .option push |
| 106 | .option norelax |
| 107 | la gp, __global_pointer$ |
| 108 | .option pop |
| 109 | |
Guo Ren | 3c46979 | 2020-06-27 13:57:08 +0000 | [diff] [blame] | 110 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 111 | call trace_hardirqs_off |
| 112 | #endif |
Greentime Hu | ed48b29 | 2020-06-24 17:03:16 +0800 | [diff] [blame] | 113 | |
| 114 | #ifdef CONFIG_CONTEXT_TRACKING |
| 115 | /* If previous state is in user mode, call context_tracking_user_exit. */ |
| 116 | li a0, SR_PP |
| 117 | and a0, s1, a0 |
| 118 | bnez a0, skip_context_tracking |
| 119 | call context_tracking_user_exit |
| 120 | skip_context_tracking: |
| 121 | #endif |
| 122 | |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 123 | /* |
| 124 | * MSB of cause differentiates between |
| 125 | * interrupts and exceptions |
| 126 | */ |
| 127 | bge s4, zero, 1f |
| 128 | |
Guo Ren | 3c46979 | 2020-06-27 13:57:08 +0000 | [diff] [blame] | 129 | la ra, ret_from_exception |
| 130 | |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 131 | /* Handle interrupts */ |
Palmer Dabbelt | cc6c984 | 2018-03-07 15:57:28 -0800 | [diff] [blame] | 132 | move a0, sp /* pt_regs */ |
Anup Patel | 24dc170 | 2020-06-01 14:45:42 +0530 | [diff] [blame] | 133 | la a1, handle_arch_irq |
| 134 | REG_L a1, (a1) |
| 135 | jr a1 |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 136 | 1: |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 137 | /* |
| 138 | * Exceptions run with interrupts enabled or disabled depending on the |
| 139 | * state of SR_PIE in m/sstatus. |
| 140 | */ |
| 141 | andi t0, s1, SR_PIE |
Vincent Chen | c82dd6d | 2019-09-16 16:47:41 +0800 | [diff] [blame] | 142 | beqz t0, 1f |
Jisheng Zhang | 7ae1163 | 2021-03-30 02:16:24 +0800 | [diff] [blame] | 143 | /* kprobes, entered via ebreak, must have interrupts disabled. */ |
| 144 | li t0, EXC_BREAKPOINT |
| 145 | beq s4, t0, 1f |
Atish Patra | 7cd1af1 | 2020-12-18 16:20:51 -0800 | [diff] [blame] | 146 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 147 | call trace_hardirqs_on |
| 148 | #endif |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 149 | csrs CSR_STATUS, SR_IE |
zongbox@gmail.com | bcae803 | 2018-01-29 23:51:45 -0800 | [diff] [blame] | 150 | |
Vincent Chen | c82dd6d | 2019-09-16 16:47:41 +0800 | [diff] [blame] | 151 | 1: |
Guo Ren | 3c46979 | 2020-06-27 13:57:08 +0000 | [diff] [blame] | 152 | la ra, ret_from_exception |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 153 | /* Handle syscalls */ |
| 154 | li t0, EXC_SYSCALL |
| 155 | beq s4, t0, handle_syscall |
| 156 | |
| 157 | /* Handle other exceptions */ |
| 158 | slli t0, s4, RISCV_LGPTR |
| 159 | la t1, excp_vect_table |
| 160 | la t2, excp_vect_table_end |
| 161 | move a0, sp /* pt_regs */ |
| 162 | add t0, t1, t0 |
| 163 | /* Check if exception code lies within bounds */ |
| 164 | bgeu t0, t2, 1f |
| 165 | REG_L t0, 0(t0) |
| 166 | jr t0 |
| 167 | 1: |
| 168 | tail do_trap_unknown |
| 169 | |
| 170 | handle_syscall: |
Damien Le Moal | 643437b | 2020-12-13 22:50:36 +0900 | [diff] [blame] | 171 | #ifdef CONFIG_RISCV_M_MODE |
| 172 | /* |
| 173 | * When running is M-Mode (no MMU config), MPIE does not get set. |
| 174 | * As a result, we need to force enable interrupts here because |
| 175 | * handle_exception did not do set SR_IE as it always sees SR_PIE |
| 176 | * being cleared. |
| 177 | */ |
| 178 | csrs CSR_STATUS, SR_IE |
| 179 | #endif |
Greentime Hu | ed48b29 | 2020-06-24 17:03:16 +0800 | [diff] [blame] | 180 | #if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING) |
Guo Ren | 3c46979 | 2020-06-27 13:57:08 +0000 | [diff] [blame] | 181 | /* Recover a0 - a7 for system calls */ |
| 182 | REG_L a0, PT_A0(sp) |
| 183 | REG_L a1, PT_A1(sp) |
| 184 | REG_L a2, PT_A2(sp) |
| 185 | REG_L a3, PT_A3(sp) |
| 186 | REG_L a4, PT_A4(sp) |
| 187 | REG_L a5, PT_A5(sp) |
| 188 | REG_L a6, PT_A6(sp) |
| 189 | REG_L a7, PT_A7(sp) |
| 190 | #endif |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 191 | /* save the initial A0 value (needed in signal handlers) */ |
| 192 | REG_S a0, PT_ORIG_A0(sp) |
| 193 | /* |
| 194 | * Advance SEPC to avoid executing the original |
| 195 | * scall instruction on sret |
| 196 | */ |
| 197 | addi s2, s2, 0x4 |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 198 | REG_S s2, PT_EPC(sp) |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 199 | /* Trace syscalls, but only if requested by the user. */ |
| 200 | REG_L t0, TASK_TI_FLAGS(tp) |
David Abdurachmanov | efe75c4 | 2018-10-29 11:48:53 +0100 | [diff] [blame] | 201 | andi t0, t0, _TIF_SYSCALL_WORK |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 202 | bnez t0, handle_syscall_trace_enter |
| 203 | check_syscall_nr: |
| 204 | /* Check to make sure we don't jump to a bogus syscall number. */ |
| 205 | li t0, __NR_syscalls |
| 206 | la s0, sys_ni_syscall |
David Abdurachmanov | 5340627 | 2019-10-04 17:12:22 -0700 | [diff] [blame] | 207 | /* |
David Abdurachmanov | 5340627 | 2019-10-04 17:12:22 -0700 | [diff] [blame] | 208 | * Syscall number held in a7. |
| 209 | * If syscall number is above allowed value, redirect to ni_syscall. |
| 210 | */ |
Andreas Schwab | cf7b2ae | 2020-12-21 23:52:00 +0100 | [diff] [blame] | 211 | bgeu a7, t0, 1f |
David Abdurachmanov | 5340627 | 2019-10-04 17:12:22 -0700 | [diff] [blame] | 212 | /* Call syscall */ |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 213 | la s0, sys_call_table |
| 214 | slli t0, a7, RISCV_LGPTR |
| 215 | add s0, s0, t0 |
| 216 | REG_L s0, 0(s0) |
| 217 | 1: |
| 218 | jalr s0 |
| 219 | |
| 220 | ret_from_syscall: |
| 221 | /* Set user a0 to kernel a0 */ |
| 222 | REG_S a0, PT_A0(sp) |
David Abdurachmanov | 5340627 | 2019-10-04 17:12:22 -0700 | [diff] [blame] | 223 | /* |
| 224 | * We didn't execute the actual syscall. |
| 225 | * Seccomp already set return value for the current task pt_regs. |
| 226 | * (If it was configured with SECCOMP_RET_ERRNO/TRACE) |
| 227 | */ |
| 228 | ret_from_syscall_rejected: |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 229 | /* Trace syscalls, but only if requested by the user. */ |
| 230 | REG_L t0, TASK_TI_FLAGS(tp) |
David Abdurachmanov | efe75c4 | 2018-10-29 11:48:53 +0100 | [diff] [blame] | 231 | andi t0, t0, _TIF_SYSCALL_WORK |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 232 | bnez t0, handle_syscall_trace_exit |
| 233 | |
| 234 | ret_from_exception: |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 235 | REG_L s0, PT_STATUS(sp) |
| 236 | csrc CSR_STATUS, SR_IE |
Guo Ren | 3c46979 | 2020-06-27 13:57:08 +0000 | [diff] [blame] | 237 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 238 | call trace_hardirqs_off |
| 239 | #endif |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 240 | #ifdef CONFIG_RISCV_M_MODE |
| 241 | /* the MPP value is too large to be used as an immediate arg for addi */ |
| 242 | li t0, SR_MPP |
| 243 | and s0, s0, t0 |
| 244 | #else |
Christoph Hellwig | 1125203 | 2018-01-04 18:35:03 +0100 | [diff] [blame] | 245 | andi s0, s0, SR_SPP |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 246 | #endif |
Vincent Chen | 99fd6e8 | 2019-01-03 11:32:33 +0800 | [diff] [blame] | 247 | bnez s0, resume_kernel |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 248 | |
| 249 | resume_userspace: |
| 250 | /* Interrupts must be disabled here so flags are checked atomically */ |
| 251 | REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ |
| 252 | andi s1, s0, _TIF_WORK_MASK |
| 253 | bnez s1, work_pending |
| 254 | |
Greentime Hu | ed48b29 | 2020-06-24 17:03:16 +0800 | [diff] [blame] | 255 | #ifdef CONFIG_CONTEXT_TRACKING |
| 256 | call context_tracking_user_enter |
| 257 | #endif |
| 258 | |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 259 | /* Save unwound kernel stack pointer in thread_info */ |
| 260 | addi s0, sp, PT_SIZE_ON_STACK |
| 261 | REG_S s0, TASK_TI_KERNEL_SP(tp) |
| 262 | |
| 263 | /* |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 264 | * Save TP into the scratch register , so we can find the kernel data |
| 265 | * structures again. |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 266 | */ |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 267 | csrw CSR_SCRATCH, tp |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 268 | |
| 269 | restore_all: |
Guo Ren | 3c46979 | 2020-06-27 13:57:08 +0000 | [diff] [blame] | 270 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 271 | REG_L s1, PT_STATUS(sp) |
| 272 | andi t0, s1, SR_PIE |
| 273 | beqz t0, 1f |
| 274 | call trace_hardirqs_on |
| 275 | j 2f |
| 276 | 1: |
| 277 | call trace_hardirqs_off |
| 278 | 2: |
| 279 | #endif |
Palmer Dabbelt | fdff991 | 2020-02-27 11:15:03 -0800 | [diff] [blame] | 280 | REG_L a0, PT_STATUS(sp) |
| 281 | /* |
| 282 | * The current load reservation is effectively part of the processor's |
| 283 | * state, in the sense that load reservations cannot be shared between |
| 284 | * different hart contexts. We can't actually save and restore a load |
| 285 | * reservation, so instead here we clear any existing reservation -- |
| 286 | * it's always legal for implementations to clear load reservations at |
| 287 | * any point (as long as the forward progress guarantee is kept, but |
| 288 | * we'll ignore that here). |
| 289 | * |
| 290 | * Dangling load reservations can be the result of taking a trap in the |
| 291 | * middle of an LR/SC sequence, but can also be the result of a taken |
| 292 | * forward branch around an SC -- which is how we implement CAS. As a |
| 293 | * result we need to clear reservations between the last CAS and the |
| 294 | * jump back to the new context. While it is unlikely the store |
| 295 | * completes, implementations are allowed to expand reservations to be |
| 296 | * arbitrarily large. |
| 297 | */ |
| 298 | REG_L a2, PT_EPC(sp) |
| 299 | REG_SC x0, a2, PT_EPC(sp) |
| 300 | |
| 301 | csrw CSR_STATUS, a0 |
| 302 | csrw CSR_EPC, a2 |
| 303 | |
| 304 | REG_L x1, PT_RA(sp) |
| 305 | REG_L x3, PT_GP(sp) |
| 306 | REG_L x4, PT_TP(sp) |
| 307 | REG_L x5, PT_T0(sp) |
| 308 | REG_L x6, PT_T1(sp) |
| 309 | REG_L x7, PT_T2(sp) |
| 310 | REG_L x8, PT_S0(sp) |
| 311 | REG_L x9, PT_S1(sp) |
| 312 | REG_L x10, PT_A0(sp) |
| 313 | REG_L x11, PT_A1(sp) |
| 314 | REG_L x12, PT_A2(sp) |
| 315 | REG_L x13, PT_A3(sp) |
| 316 | REG_L x14, PT_A4(sp) |
| 317 | REG_L x15, PT_A5(sp) |
| 318 | REG_L x16, PT_A6(sp) |
| 319 | REG_L x17, PT_A7(sp) |
| 320 | REG_L x18, PT_S2(sp) |
| 321 | REG_L x19, PT_S3(sp) |
| 322 | REG_L x20, PT_S4(sp) |
| 323 | REG_L x21, PT_S5(sp) |
| 324 | REG_L x22, PT_S6(sp) |
| 325 | REG_L x23, PT_S7(sp) |
| 326 | REG_L x24, PT_S8(sp) |
| 327 | REG_L x25, PT_S9(sp) |
| 328 | REG_L x26, PT_S10(sp) |
| 329 | REG_L x27, PT_S11(sp) |
| 330 | REG_L x28, PT_T3(sp) |
| 331 | REG_L x29, PT_T4(sp) |
| 332 | REG_L x30, PT_T5(sp) |
| 333 | REG_L x31, PT_T6(sp) |
| 334 | |
| 335 | REG_L x2, PT_SP(sp) |
| 336 | |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 337 | #ifdef CONFIG_RISCV_M_MODE |
| 338 | mret |
| 339 | #else |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 340 | sret |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 341 | #endif |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 342 | |
Thomas Gleixner | 29ff649 | 2019-10-15 21:18:03 +0200 | [diff] [blame] | 343 | #if IS_ENABLED(CONFIG_PREEMPTION) |
Vincent Chen | 99fd6e8 | 2019-01-03 11:32:33 +0800 | [diff] [blame] | 344 | resume_kernel: |
| 345 | REG_L s0, TASK_TI_PREEMPT_COUNT(tp) |
| 346 | bnez s0, restore_all |
Vincent Chen | 99fd6e8 | 2019-01-03 11:32:33 +0800 | [diff] [blame] | 347 | REG_L s0, TASK_TI_FLAGS(tp) |
| 348 | andi s0, s0, _TIF_NEED_RESCHED |
| 349 | beqz s0, restore_all |
| 350 | call preempt_schedule_irq |
Valentin Schneider | cd9e72b8 | 2019-09-23 15:36:17 +0100 | [diff] [blame] | 351 | j restore_all |
Vincent Chen | 99fd6e8 | 2019-01-03 11:32:33 +0800 | [diff] [blame] | 352 | #endif |
| 353 | |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 354 | work_pending: |
| 355 | /* Enter slow path for supplementary processing */ |
| 356 | la ra, ret_from_exception |
| 357 | andi s1, s0, _TIF_NEED_RESCHED |
| 358 | bnez s1, work_resched |
| 359 | work_notifysig: |
| 360 | /* Handle pending signals and notify-resume requests */ |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 361 | csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */ |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 362 | move a0, sp /* pt_regs */ |
| 363 | move a1, s0 /* current_thread_info->flags */ |
| 364 | tail do_notify_resume |
| 365 | work_resched: |
| 366 | tail schedule |
| 367 | |
| 368 | /* Slow paths for ptrace. */ |
| 369 | handle_syscall_trace_enter: |
| 370 | move a0, sp |
| 371 | call do_syscall_trace_enter |
Tycho Andersen | af33d24 | 2020-02-08 08:18:17 -0700 | [diff] [blame] | 372 | move t0, a0 |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 373 | REG_L a0, PT_A0(sp) |
| 374 | REG_L a1, PT_A1(sp) |
| 375 | REG_L a2, PT_A2(sp) |
| 376 | REG_L a3, PT_A3(sp) |
| 377 | REG_L a4, PT_A4(sp) |
| 378 | REG_L a5, PT_A5(sp) |
| 379 | REG_L a6, PT_A6(sp) |
| 380 | REG_L a7, PT_A7(sp) |
Tycho Andersen | af33d24 | 2020-02-08 08:18:17 -0700 | [diff] [blame] | 381 | bnez t0, ret_from_syscall_rejected |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 382 | j check_syscall_nr |
| 383 | handle_syscall_trace_exit: |
| 384 | move a0, sp |
| 385 | call do_syscall_trace_exit |
| 386 | j ret_from_exception |
| 387 | |
Tong Tiangen | 31da94c | 2021-06-21 11:28:55 +0800 | [diff] [blame] | 388 | #ifdef CONFIG_VMAP_STACK |
| 389 | handle_kernel_stack_overflow: |
| 390 | la sp, shadow_stack |
| 391 | addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE |
| 392 | |
| 393 | //save caller register to shadow stack |
| 394 | addi sp, sp, -(PT_SIZE_ON_STACK) |
| 395 | REG_S x1, PT_RA(sp) |
| 396 | REG_S x5, PT_T0(sp) |
| 397 | REG_S x6, PT_T1(sp) |
| 398 | REG_S x7, PT_T2(sp) |
| 399 | REG_S x10, PT_A0(sp) |
| 400 | REG_S x11, PT_A1(sp) |
| 401 | REG_S x12, PT_A2(sp) |
| 402 | REG_S x13, PT_A3(sp) |
| 403 | REG_S x14, PT_A4(sp) |
| 404 | REG_S x15, PT_A5(sp) |
| 405 | REG_S x16, PT_A6(sp) |
| 406 | REG_S x17, PT_A7(sp) |
| 407 | REG_S x28, PT_T3(sp) |
| 408 | REG_S x29, PT_T4(sp) |
| 409 | REG_S x30, PT_T5(sp) |
| 410 | REG_S x31, PT_T6(sp) |
| 411 | |
| 412 | la ra, restore_caller_reg |
| 413 | tail get_overflow_stack |
| 414 | |
| 415 | restore_caller_reg: |
| 416 | //save per-cpu overflow stack |
| 417 | REG_S a0, -8(sp) |
| 418 | //restore caller register from shadow_stack |
| 419 | REG_L x1, PT_RA(sp) |
| 420 | REG_L x5, PT_T0(sp) |
| 421 | REG_L x6, PT_T1(sp) |
| 422 | REG_L x7, PT_T2(sp) |
| 423 | REG_L x10, PT_A0(sp) |
| 424 | REG_L x11, PT_A1(sp) |
| 425 | REG_L x12, PT_A2(sp) |
| 426 | REG_L x13, PT_A3(sp) |
| 427 | REG_L x14, PT_A4(sp) |
| 428 | REG_L x15, PT_A5(sp) |
| 429 | REG_L x16, PT_A6(sp) |
| 430 | REG_L x17, PT_A7(sp) |
| 431 | REG_L x28, PT_T3(sp) |
| 432 | REG_L x29, PT_T4(sp) |
| 433 | REG_L x30, PT_T5(sp) |
| 434 | REG_L x31, PT_T6(sp) |
| 435 | |
| 436 | //load per-cpu overflow stack |
| 437 | REG_L sp, -8(sp) |
| 438 | addi sp, sp, -(PT_SIZE_ON_STACK) |
| 439 | |
| 440 | //save context to overflow stack |
| 441 | REG_S x1, PT_RA(sp) |
| 442 | REG_S x3, PT_GP(sp) |
| 443 | REG_S x5, PT_T0(sp) |
| 444 | REG_S x6, PT_T1(sp) |
| 445 | REG_S x7, PT_T2(sp) |
| 446 | REG_S x8, PT_S0(sp) |
| 447 | REG_S x9, PT_S1(sp) |
| 448 | REG_S x10, PT_A0(sp) |
| 449 | REG_S x11, PT_A1(sp) |
| 450 | REG_S x12, PT_A2(sp) |
| 451 | REG_S x13, PT_A3(sp) |
| 452 | REG_S x14, PT_A4(sp) |
| 453 | REG_S x15, PT_A5(sp) |
| 454 | REG_S x16, PT_A6(sp) |
| 455 | REG_S x17, PT_A7(sp) |
| 456 | REG_S x18, PT_S2(sp) |
| 457 | REG_S x19, PT_S3(sp) |
| 458 | REG_S x20, PT_S4(sp) |
| 459 | REG_S x21, PT_S5(sp) |
| 460 | REG_S x22, PT_S6(sp) |
| 461 | REG_S x23, PT_S7(sp) |
| 462 | REG_S x24, PT_S8(sp) |
| 463 | REG_S x25, PT_S9(sp) |
| 464 | REG_S x26, PT_S10(sp) |
| 465 | REG_S x27, PT_S11(sp) |
| 466 | REG_S x28, PT_T3(sp) |
| 467 | REG_S x29, PT_T4(sp) |
| 468 | REG_S x30, PT_T5(sp) |
| 469 | REG_S x31, PT_T6(sp) |
| 470 | |
| 471 | REG_L s0, TASK_TI_KERNEL_SP(tp) |
| 472 | csrr s1, CSR_STATUS |
| 473 | csrr s2, CSR_EPC |
| 474 | csrr s3, CSR_TVAL |
| 475 | csrr s4, CSR_CAUSE |
| 476 | csrr s5, CSR_SCRATCH |
| 477 | REG_S s0, PT_SP(sp) |
| 478 | REG_S s1, PT_STATUS(sp) |
| 479 | REG_S s2, PT_EPC(sp) |
| 480 | REG_S s3, PT_BADADDR(sp) |
| 481 | REG_S s4, PT_CAUSE(sp) |
| 482 | REG_S s5, PT_TP(sp) |
| 483 | move a0, sp |
| 484 | tail handle_bad_stack |
| 485 | #endif |
| 486 | |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 487 | END(handle_exception) |
| 488 | |
| 489 | ENTRY(ret_from_fork) |
| 490 | la ra, ret_from_exception |
| 491 | tail schedule_tail |
| 492 | ENDPROC(ret_from_fork) |
| 493 | |
| 494 | ENTRY(ret_from_kernel_thread) |
| 495 | call schedule_tail |
| 496 | /* Call fn(arg) */ |
| 497 | la ra, ret_from_exception |
| 498 | move a0, s1 |
| 499 | jr s0 |
| 500 | ENDPROC(ret_from_kernel_thread) |
| 501 | |
| 502 | |
| 503 | /* |
| 504 | * Integer register context switch |
| 505 | * The callee-saved registers must be saved and restored. |
| 506 | * |
| 507 | * a0: previous task_struct (must be preserved across the switch) |
| 508 | * a1: next task_struct |
| 509 | * |
| 510 | * The value of a0 and a1 must be preserved by this function, as that's how |
| 511 | * arguments are passed to schedule_tail. |
| 512 | */ |
| 513 | ENTRY(__switch_to) |
| 514 | /* Save context into prev->thread */ |
| 515 | li a4, TASK_THREAD_RA |
| 516 | add a3, a0, a4 |
| 517 | add a4, a1, a4 |
| 518 | REG_S ra, TASK_THREAD_RA_RA(a3) |
| 519 | REG_S sp, TASK_THREAD_SP_RA(a3) |
| 520 | REG_S s0, TASK_THREAD_S0_RA(a3) |
| 521 | REG_S s1, TASK_THREAD_S1_RA(a3) |
| 522 | REG_S s2, TASK_THREAD_S2_RA(a3) |
| 523 | REG_S s3, TASK_THREAD_S3_RA(a3) |
| 524 | REG_S s4, TASK_THREAD_S4_RA(a3) |
| 525 | REG_S s5, TASK_THREAD_S5_RA(a3) |
| 526 | REG_S s6, TASK_THREAD_S6_RA(a3) |
| 527 | REG_S s7, TASK_THREAD_S7_RA(a3) |
| 528 | REG_S s8, TASK_THREAD_S8_RA(a3) |
| 529 | REG_S s9, TASK_THREAD_S9_RA(a3) |
| 530 | REG_S s10, TASK_THREAD_S10_RA(a3) |
| 531 | REG_S s11, TASK_THREAD_S11_RA(a3) |
| 532 | /* Restore context from next->thread */ |
| 533 | REG_L ra, TASK_THREAD_RA_RA(a4) |
| 534 | REG_L sp, TASK_THREAD_SP_RA(a4) |
| 535 | REG_L s0, TASK_THREAD_S0_RA(a4) |
| 536 | REG_L s1, TASK_THREAD_S1_RA(a4) |
| 537 | REG_L s2, TASK_THREAD_S2_RA(a4) |
| 538 | REG_L s3, TASK_THREAD_S3_RA(a4) |
| 539 | REG_L s4, TASK_THREAD_S4_RA(a4) |
| 540 | REG_L s5, TASK_THREAD_S5_RA(a4) |
| 541 | REG_L s6, TASK_THREAD_S6_RA(a4) |
| 542 | REG_L s7, TASK_THREAD_S7_RA(a4) |
| 543 | REG_L s8, TASK_THREAD_S8_RA(a4) |
| 544 | REG_L s9, TASK_THREAD_S9_RA(a4) |
| 545 | REG_L s10, TASK_THREAD_S10_RA(a4) |
| 546 | REG_L s11, TASK_THREAD_S11_RA(a4) |
| 547 | /* Swap the CPU entry around. */ |
| 548 | lw a3, TASK_TI_CPU(a0) |
| 549 | lw a4, TASK_TI_CPU(a1) |
| 550 | sw a3, TASK_TI_CPU(a1) |
| 551 | sw a4, TASK_TI_CPU(a0) |
Guo Ren | 3e7b669 | 2020-07-12 13:41:49 +0000 | [diff] [blame] | 552 | /* The offset of thread_info in task_struct is zero. */ |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 553 | move tp, a1 |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 554 | ret |
| 555 | ENDPROC(__switch_to) |
| 556 | |
Christoph Hellwig | 6bd33e1 | 2019-10-28 13:10:41 +0100 | [diff] [blame] | 557 | #ifndef CONFIG_MMU |
| 558 | #define do_page_fault do_trap_unknown |
| 559 | #endif |
| 560 | |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 561 | .section ".rodata" |
Zihao Yu | ac8d0b9 | 2021-03-17 16:17:25 +0800 | [diff] [blame] | 562 | .align LGREG |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 563 | /* Exception vector table */ |
| 564 | ENTRY(excp_vect_table) |
| 565 | RISCV_PTR do_trap_insn_misaligned |
Vincent Chen | 800149a | 2021-03-22 22:26:05 +0800 | [diff] [blame] | 566 | ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault) |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 567 | RISCV_PTR do_trap_insn_illegal |
| 568 | RISCV_PTR do_trap_break |
| 569 | RISCV_PTR do_trap_load_misaligned |
| 570 | RISCV_PTR do_trap_load_fault |
| 571 | RISCV_PTR do_trap_store_misaligned |
| 572 | RISCV_PTR do_trap_store_fault |
| 573 | RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */ |
| 574 | RISCV_PTR do_trap_ecall_s |
| 575 | RISCV_PTR do_trap_unknown |
| 576 | RISCV_PTR do_trap_ecall_m |
Vincent Chen | 800149a | 2021-03-22 22:26:05 +0800 | [diff] [blame] | 577 | /* instruciton page fault */ |
| 578 | ALT_PAGE_FAULT(RISCV_PTR do_page_fault) |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 579 | RISCV_PTR do_page_fault /* load page fault */ |
| 580 | RISCV_PTR do_trap_unknown |
| 581 | RISCV_PTR do_page_fault /* store page fault */ |
| 582 | excp_vect_table_end: |
| 583 | END(excp_vect_table) |
Christoph Hellwig | 6bd33e1 | 2019-10-28 13:10:41 +0100 | [diff] [blame] | 584 | |
| 585 | #ifndef CONFIG_MMU |
| 586 | ENTRY(__user_rt_sigreturn) |
| 587 | li a7, __NR_rt_sigreturn |
| 588 | scall |
| 589 | END(__user_rt_sigreturn) |
| 590 | #endif |