Thomas Gleixner | 50acfb2 | 2019-05-29 07:18:00 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2012 Regents of the University of California |
| 4 | * Copyright (C) 2017 SiFive |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <linux/init.h> |
| 8 | #include <linux/linkage.h> |
| 9 | |
| 10 | #include <asm/asm.h> |
| 11 | #include <asm/csr.h> |
| 12 | #include <asm/unistd.h> |
| 13 | #include <asm/thread_info.h> |
| 14 | #include <asm/asm-offsets.h> |
| 15 | |
Palmer Dabbelt | fdff991 | 2020-02-27 11:15:03 -0800 | [diff] [blame] | 16 | #if !IS_ENABLED(CONFIG_PREEMPTION) |
| 17 | .set resume_kernel, restore_all |
| 18 | #endif |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 19 | |
Palmer Dabbelt | fdff991 | 2020-02-27 11:15:03 -0800 | [diff] [blame] | 20 | ENTRY(handle_exception) |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 21 | /* |
| 22 | * If coming from userspace, preserve the user thread pointer and load |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 23 | * the kernel thread pointer. If we came from the kernel, the scratch |
| 24 | * register will contain 0, and we should continue on the current TP. |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 25 | */ |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 26 | csrrw tp, CSR_SCRATCH, tp |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 27 | bnez tp, _save_context |
| 28 | |
| 29 | _restore_kernel_tpsp: |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 30 | csrr tp, CSR_SCRATCH |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 31 | REG_S sp, TASK_TI_KERNEL_SP(tp) |
| 32 | _save_context: |
| 33 | REG_S sp, TASK_TI_USER_SP(tp) |
| 34 | REG_L sp, TASK_TI_KERNEL_SP(tp) |
| 35 | addi sp, sp, -(PT_SIZE_ON_STACK) |
| 36 | REG_S x1, PT_RA(sp) |
| 37 | REG_S x3, PT_GP(sp) |
| 38 | REG_S x5, PT_T0(sp) |
| 39 | REG_S x6, PT_T1(sp) |
| 40 | REG_S x7, PT_T2(sp) |
| 41 | REG_S x8, PT_S0(sp) |
| 42 | REG_S x9, PT_S1(sp) |
| 43 | REG_S x10, PT_A0(sp) |
| 44 | REG_S x11, PT_A1(sp) |
| 45 | REG_S x12, PT_A2(sp) |
| 46 | REG_S x13, PT_A3(sp) |
| 47 | REG_S x14, PT_A4(sp) |
| 48 | REG_S x15, PT_A5(sp) |
| 49 | REG_S x16, PT_A6(sp) |
| 50 | REG_S x17, PT_A7(sp) |
| 51 | REG_S x18, PT_S2(sp) |
| 52 | REG_S x19, PT_S3(sp) |
| 53 | REG_S x20, PT_S4(sp) |
| 54 | REG_S x21, PT_S5(sp) |
| 55 | REG_S x22, PT_S6(sp) |
| 56 | REG_S x23, PT_S7(sp) |
| 57 | REG_S x24, PT_S8(sp) |
| 58 | REG_S x25, PT_S9(sp) |
| 59 | REG_S x26, PT_S10(sp) |
| 60 | REG_S x27, PT_S11(sp) |
| 61 | REG_S x28, PT_T3(sp) |
| 62 | REG_S x29, PT_T4(sp) |
| 63 | REG_S x30, PT_T5(sp) |
| 64 | REG_S x31, PT_T6(sp) |
| 65 | |
| 66 | /* |
Christoph Hellwig | fe9b842 | 2018-01-04 19:55:55 +0100 | [diff] [blame] | 67 | * Disable user-mode memory access as it should only be set in the |
| 68 | * actual user copy routines. |
| 69 | * |
| 70 | * Disable the FPU to detect illegal usage of floating point in kernel |
| 71 | * space. |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 72 | */ |
Christoph Hellwig | fe9b842 | 2018-01-04 19:55:55 +0100 | [diff] [blame] | 73 | li t0, SR_SUM | SR_FS |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 74 | |
| 75 | REG_L s0, TASK_TI_USER_SP(tp) |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 76 | csrrc s1, CSR_STATUS, t0 |
| 77 | csrr s2, CSR_EPC |
| 78 | csrr s3, CSR_TVAL |
| 79 | csrr s4, CSR_CAUSE |
| 80 | csrr s5, CSR_SCRATCH |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 81 | REG_S s0, PT_SP(sp) |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 82 | REG_S s1, PT_STATUS(sp) |
| 83 | REG_S s2, PT_EPC(sp) |
| 84 | REG_S s3, PT_BADADDR(sp) |
| 85 | REG_S s4, PT_CAUSE(sp) |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 86 | REG_S s5, PT_TP(sp) |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 87 | |
| 88 | /* |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 89 | * Set the scratch register to 0, so that if a recursive exception |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 90 | * occurs, the exception vector knows it came from the kernel |
| 91 | */ |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 92 | csrw CSR_SCRATCH, x0 |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 93 | |
| 94 | /* Load the global pointer */ |
| 95 | .option push |
| 96 | .option norelax |
| 97 | la gp, __global_pointer$ |
| 98 | .option pop |
| 99 | |
| 100 | la ra, ret_from_exception |
| 101 | /* |
| 102 | * MSB of cause differentiates between |
| 103 | * interrupts and exceptions |
| 104 | */ |
| 105 | bge s4, zero, 1f |
| 106 | |
| 107 | /* Handle interrupts */ |
Palmer Dabbelt | cc6c984 | 2018-03-07 15:57:28 -0800 | [diff] [blame] | 108 | move a0, sp /* pt_regs */ |
Anup Patel | 24dc170 | 2020-06-01 14:45:42 +0530 | [diff] [blame^] | 109 | la a1, handle_arch_irq |
| 110 | REG_L a1, (a1) |
| 111 | jr a1 |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 112 | 1: |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 113 | /* |
| 114 | * Exceptions run with interrupts enabled or disabled depending on the |
| 115 | * state of SR_PIE in m/sstatus. |
| 116 | */ |
| 117 | andi t0, s1, SR_PIE |
Vincent Chen | c82dd6d | 2019-09-16 16:47:41 +0800 | [diff] [blame] | 118 | beqz t0, 1f |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 119 | csrs CSR_STATUS, SR_IE |
zongbox@gmail.com | bcae803 | 2018-01-29 23:51:45 -0800 | [diff] [blame] | 120 | |
Vincent Chen | c82dd6d | 2019-09-16 16:47:41 +0800 | [diff] [blame] | 121 | 1: |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 122 | /* Handle syscalls */ |
| 123 | li t0, EXC_SYSCALL |
| 124 | beq s4, t0, handle_syscall |
| 125 | |
| 126 | /* Handle other exceptions */ |
| 127 | slli t0, s4, RISCV_LGPTR |
| 128 | la t1, excp_vect_table |
| 129 | la t2, excp_vect_table_end |
| 130 | move a0, sp /* pt_regs */ |
| 131 | add t0, t1, t0 |
| 132 | /* Check if exception code lies within bounds */ |
| 133 | bgeu t0, t2, 1f |
| 134 | REG_L t0, 0(t0) |
| 135 | jr t0 |
| 136 | 1: |
| 137 | tail do_trap_unknown |
| 138 | |
| 139 | handle_syscall: |
| 140 | /* save the initial A0 value (needed in signal handlers) */ |
| 141 | REG_S a0, PT_ORIG_A0(sp) |
| 142 | /* |
| 143 | * Advance SEPC to avoid executing the original |
| 144 | * scall instruction on sret |
| 145 | */ |
| 146 | addi s2, s2, 0x4 |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 147 | REG_S s2, PT_EPC(sp) |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 148 | /* Trace syscalls, but only if requested by the user. */ |
| 149 | REG_L t0, TASK_TI_FLAGS(tp) |
David Abdurachmanov | efe75c4 | 2018-10-29 11:48:53 +0100 | [diff] [blame] | 150 | andi t0, t0, _TIF_SYSCALL_WORK |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 151 | bnez t0, handle_syscall_trace_enter |
| 152 | check_syscall_nr: |
| 153 | /* Check to make sure we don't jump to a bogus syscall number. */ |
| 154 | li t0, __NR_syscalls |
| 155 | la s0, sys_ni_syscall |
David Abdurachmanov | 5340627 | 2019-10-04 17:12:22 -0700 | [diff] [blame] | 156 | /* |
David Abdurachmanov | 5340627 | 2019-10-04 17:12:22 -0700 | [diff] [blame] | 157 | * Syscall number held in a7. |
| 158 | * If syscall number is above allowed value, redirect to ni_syscall. |
| 159 | */ |
| 160 | bge a7, t0, 1f |
| 161 | /* |
Tycho Andersen | af33d24 | 2020-02-08 08:18:17 -0700 | [diff] [blame] | 162 | * Check if syscall is rejected by tracer, i.e., a7 == -1. |
David Abdurachmanov | 5340627 | 2019-10-04 17:12:22 -0700 | [diff] [blame] | 163 | * If yes, we pretend it was executed. |
| 164 | */ |
| 165 | li t1, -1 |
| 166 | beq a7, t1, ret_from_syscall_rejected |
David Abdurachmanov | 556f47a | 2019-12-18 10:47:56 +0200 | [diff] [blame] | 167 | blt a7, t1, 1f |
David Abdurachmanov | 5340627 | 2019-10-04 17:12:22 -0700 | [diff] [blame] | 168 | /* Call syscall */ |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 169 | la s0, sys_call_table |
| 170 | slli t0, a7, RISCV_LGPTR |
| 171 | add s0, s0, t0 |
| 172 | REG_L s0, 0(s0) |
| 173 | 1: |
| 174 | jalr s0 |
| 175 | |
| 176 | ret_from_syscall: |
| 177 | /* Set user a0 to kernel a0 */ |
| 178 | REG_S a0, PT_A0(sp) |
David Abdurachmanov | 5340627 | 2019-10-04 17:12:22 -0700 | [diff] [blame] | 179 | /* |
| 180 | * We didn't execute the actual syscall. |
| 181 | * Seccomp already set return value for the current task pt_regs. |
| 182 | * (If it was configured with SECCOMP_RET_ERRNO/TRACE) |
| 183 | */ |
| 184 | ret_from_syscall_rejected: |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 185 | /* Trace syscalls, but only if requested by the user. */ |
| 186 | REG_L t0, TASK_TI_FLAGS(tp) |
David Abdurachmanov | efe75c4 | 2018-10-29 11:48:53 +0100 | [diff] [blame] | 187 | andi t0, t0, _TIF_SYSCALL_WORK |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 188 | bnez t0, handle_syscall_trace_exit |
| 189 | |
| 190 | ret_from_exception: |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 191 | REG_L s0, PT_STATUS(sp) |
| 192 | csrc CSR_STATUS, SR_IE |
| 193 | #ifdef CONFIG_RISCV_M_MODE |
| 194 | /* the MPP value is too large to be used as an immediate arg for addi */ |
| 195 | li t0, SR_MPP |
| 196 | and s0, s0, t0 |
| 197 | #else |
Christoph Hellwig | 1125203 | 2018-01-04 18:35:03 +0100 | [diff] [blame] | 198 | andi s0, s0, SR_SPP |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 199 | #endif |
Vincent Chen | 99fd6e8 | 2019-01-03 11:32:33 +0800 | [diff] [blame] | 200 | bnez s0, resume_kernel |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 201 | |
| 202 | resume_userspace: |
| 203 | /* Interrupts must be disabled here so flags are checked atomically */ |
| 204 | REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ |
| 205 | andi s1, s0, _TIF_WORK_MASK |
| 206 | bnez s1, work_pending |
| 207 | |
| 208 | /* Save unwound kernel stack pointer in thread_info */ |
| 209 | addi s0, sp, PT_SIZE_ON_STACK |
| 210 | REG_S s0, TASK_TI_KERNEL_SP(tp) |
| 211 | |
| 212 | /* |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 213 | * Save TP into the scratch register , so we can find the kernel data |
| 214 | * structures again. |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 215 | */ |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 216 | csrw CSR_SCRATCH, tp |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 217 | |
| 218 | restore_all: |
Palmer Dabbelt | fdff991 | 2020-02-27 11:15:03 -0800 | [diff] [blame] | 219 | REG_L a0, PT_STATUS(sp) |
| 220 | /* |
| 221 | * The current load reservation is effectively part of the processor's |
| 222 | * state, in the sense that load reservations cannot be shared between |
| 223 | * different hart contexts. We can't actually save and restore a load |
| 224 | * reservation, so instead here we clear any existing reservation -- |
| 225 | * it's always legal for implementations to clear load reservations at |
| 226 | * any point (as long as the forward progress guarantee is kept, but |
| 227 | * we'll ignore that here). |
| 228 | * |
| 229 | * Dangling load reservations can be the result of taking a trap in the |
| 230 | * middle of an LR/SC sequence, but can also be the result of a taken |
| 231 | * forward branch around an SC -- which is how we implement CAS. As a |
| 232 | * result we need to clear reservations between the last CAS and the |
| 233 | * jump back to the new context. While it is unlikely the store |
| 234 | * completes, implementations are allowed to expand reservations to be |
| 235 | * arbitrarily large. |
| 236 | */ |
| 237 | REG_L a2, PT_EPC(sp) |
| 238 | REG_SC x0, a2, PT_EPC(sp) |
| 239 | |
| 240 | csrw CSR_STATUS, a0 |
| 241 | csrw CSR_EPC, a2 |
| 242 | |
| 243 | REG_L x1, PT_RA(sp) |
| 244 | REG_L x3, PT_GP(sp) |
| 245 | REG_L x4, PT_TP(sp) |
| 246 | REG_L x5, PT_T0(sp) |
| 247 | REG_L x6, PT_T1(sp) |
| 248 | REG_L x7, PT_T2(sp) |
| 249 | REG_L x8, PT_S0(sp) |
| 250 | REG_L x9, PT_S1(sp) |
| 251 | REG_L x10, PT_A0(sp) |
| 252 | REG_L x11, PT_A1(sp) |
| 253 | REG_L x12, PT_A2(sp) |
| 254 | REG_L x13, PT_A3(sp) |
| 255 | REG_L x14, PT_A4(sp) |
| 256 | REG_L x15, PT_A5(sp) |
| 257 | REG_L x16, PT_A6(sp) |
| 258 | REG_L x17, PT_A7(sp) |
| 259 | REG_L x18, PT_S2(sp) |
| 260 | REG_L x19, PT_S3(sp) |
| 261 | REG_L x20, PT_S4(sp) |
| 262 | REG_L x21, PT_S5(sp) |
| 263 | REG_L x22, PT_S6(sp) |
| 264 | REG_L x23, PT_S7(sp) |
| 265 | REG_L x24, PT_S8(sp) |
| 266 | REG_L x25, PT_S9(sp) |
| 267 | REG_L x26, PT_S10(sp) |
| 268 | REG_L x27, PT_S11(sp) |
| 269 | REG_L x28, PT_T3(sp) |
| 270 | REG_L x29, PT_T4(sp) |
| 271 | REG_L x30, PT_T5(sp) |
| 272 | REG_L x31, PT_T6(sp) |
| 273 | |
| 274 | REG_L x2, PT_SP(sp) |
| 275 | |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 276 | #ifdef CONFIG_RISCV_M_MODE |
| 277 | mret |
| 278 | #else |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 279 | sret |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 280 | #endif |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 281 | |
Thomas Gleixner | 29ff649 | 2019-10-15 21:18:03 +0200 | [diff] [blame] | 282 | #if IS_ENABLED(CONFIG_PREEMPTION) |
Vincent Chen | 99fd6e8 | 2019-01-03 11:32:33 +0800 | [diff] [blame] | 283 | resume_kernel: |
| 284 | REG_L s0, TASK_TI_PREEMPT_COUNT(tp) |
| 285 | bnez s0, restore_all |
Vincent Chen | 99fd6e8 | 2019-01-03 11:32:33 +0800 | [diff] [blame] | 286 | REG_L s0, TASK_TI_FLAGS(tp) |
| 287 | andi s0, s0, _TIF_NEED_RESCHED |
| 288 | beqz s0, restore_all |
| 289 | call preempt_schedule_irq |
Valentin Schneider | cd9e72b8 | 2019-09-23 15:36:17 +0100 | [diff] [blame] | 290 | j restore_all |
Vincent Chen | 99fd6e8 | 2019-01-03 11:32:33 +0800 | [diff] [blame] | 291 | #endif |
| 292 | |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 293 | work_pending: |
| 294 | /* Enter slow path for supplementary processing */ |
| 295 | la ra, ret_from_exception |
| 296 | andi s1, s0, _TIF_NEED_RESCHED |
| 297 | bnez s1, work_resched |
| 298 | work_notifysig: |
| 299 | /* Handle pending signals and notify-resume requests */ |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 300 | csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */ |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 301 | move a0, sp /* pt_regs */ |
| 302 | move a1, s0 /* current_thread_info->flags */ |
| 303 | tail do_notify_resume |
| 304 | work_resched: |
| 305 | tail schedule |
| 306 | |
| 307 | /* Slow paths for ptrace. */ |
| 308 | handle_syscall_trace_enter: |
| 309 | move a0, sp |
| 310 | call do_syscall_trace_enter |
Tycho Andersen | af33d24 | 2020-02-08 08:18:17 -0700 | [diff] [blame] | 311 | move t0, a0 |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 312 | REG_L a0, PT_A0(sp) |
| 313 | REG_L a1, PT_A1(sp) |
| 314 | REG_L a2, PT_A2(sp) |
| 315 | REG_L a3, PT_A3(sp) |
| 316 | REG_L a4, PT_A4(sp) |
| 317 | REG_L a5, PT_A5(sp) |
| 318 | REG_L a6, PT_A6(sp) |
| 319 | REG_L a7, PT_A7(sp) |
Tycho Andersen | af33d24 | 2020-02-08 08:18:17 -0700 | [diff] [blame] | 320 | bnez t0, ret_from_syscall_rejected |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 321 | j check_syscall_nr |
| 322 | handle_syscall_trace_exit: |
| 323 | move a0, sp |
| 324 | call do_syscall_trace_exit |
| 325 | j ret_from_exception |
| 326 | |
| 327 | END(handle_exception) |
| 328 | |
| 329 | ENTRY(ret_from_fork) |
| 330 | la ra, ret_from_exception |
| 331 | tail schedule_tail |
| 332 | ENDPROC(ret_from_fork) |
| 333 | |
| 334 | ENTRY(ret_from_kernel_thread) |
| 335 | call schedule_tail |
| 336 | /* Call fn(arg) */ |
| 337 | la ra, ret_from_exception |
| 338 | move a0, s1 |
| 339 | jr s0 |
| 340 | ENDPROC(ret_from_kernel_thread) |
| 341 | |
| 342 | |
| 343 | /* |
| 344 | * Integer register context switch |
| 345 | * The callee-saved registers must be saved and restored. |
| 346 | * |
| 347 | * a0: previous task_struct (must be preserved across the switch) |
| 348 | * a1: next task_struct |
| 349 | * |
| 350 | * The value of a0 and a1 must be preserved by this function, as that's how |
| 351 | * arguments are passed to schedule_tail. |
| 352 | */ |
| 353 | ENTRY(__switch_to) |
| 354 | /* Save context into prev->thread */ |
| 355 | li a4, TASK_THREAD_RA |
| 356 | add a3, a0, a4 |
| 357 | add a4, a1, a4 |
| 358 | REG_S ra, TASK_THREAD_RA_RA(a3) |
| 359 | REG_S sp, TASK_THREAD_SP_RA(a3) |
| 360 | REG_S s0, TASK_THREAD_S0_RA(a3) |
| 361 | REG_S s1, TASK_THREAD_S1_RA(a3) |
| 362 | REG_S s2, TASK_THREAD_S2_RA(a3) |
| 363 | REG_S s3, TASK_THREAD_S3_RA(a3) |
| 364 | REG_S s4, TASK_THREAD_S4_RA(a3) |
| 365 | REG_S s5, TASK_THREAD_S5_RA(a3) |
| 366 | REG_S s6, TASK_THREAD_S6_RA(a3) |
| 367 | REG_S s7, TASK_THREAD_S7_RA(a3) |
| 368 | REG_S s8, TASK_THREAD_S8_RA(a3) |
| 369 | REG_S s9, TASK_THREAD_S9_RA(a3) |
| 370 | REG_S s10, TASK_THREAD_S10_RA(a3) |
| 371 | REG_S s11, TASK_THREAD_S11_RA(a3) |
| 372 | /* Restore context from next->thread */ |
| 373 | REG_L ra, TASK_THREAD_RA_RA(a4) |
| 374 | REG_L sp, TASK_THREAD_SP_RA(a4) |
| 375 | REG_L s0, TASK_THREAD_S0_RA(a4) |
| 376 | REG_L s1, TASK_THREAD_S1_RA(a4) |
| 377 | REG_L s2, TASK_THREAD_S2_RA(a4) |
| 378 | REG_L s3, TASK_THREAD_S3_RA(a4) |
| 379 | REG_L s4, TASK_THREAD_S4_RA(a4) |
| 380 | REG_L s5, TASK_THREAD_S5_RA(a4) |
| 381 | REG_L s6, TASK_THREAD_S6_RA(a4) |
| 382 | REG_L s7, TASK_THREAD_S7_RA(a4) |
| 383 | REG_L s8, TASK_THREAD_S8_RA(a4) |
| 384 | REG_L s9, TASK_THREAD_S9_RA(a4) |
| 385 | REG_L s10, TASK_THREAD_S10_RA(a4) |
| 386 | REG_L s11, TASK_THREAD_S11_RA(a4) |
| 387 | /* Swap the CPU entry around. */ |
| 388 | lw a3, TASK_TI_CPU(a0) |
| 389 | lw a4, TASK_TI_CPU(a1) |
| 390 | sw a3, TASK_TI_CPU(a1) |
| 391 | sw a4, TASK_TI_CPU(a0) |
| 392 | #if TASK_TI != 0 |
| 393 | #error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work." |
| 394 | addi tp, a1, TASK_TI |
| 395 | #else |
| 396 | move tp, a1 |
| 397 | #endif |
| 398 | ret |
| 399 | ENDPROC(__switch_to) |
| 400 | |
Christoph Hellwig | 6bd33e1 | 2019-10-28 13:10:41 +0100 | [diff] [blame] | 401 | #ifndef CONFIG_MMU |
| 402 | #define do_page_fault do_trap_unknown |
| 403 | #endif |
| 404 | |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 405 | .section ".rodata" |
| 406 | /* Exception vector table */ |
| 407 | ENTRY(excp_vect_table) |
| 408 | RISCV_PTR do_trap_insn_misaligned |
| 409 | RISCV_PTR do_trap_insn_fault |
| 410 | RISCV_PTR do_trap_insn_illegal |
| 411 | RISCV_PTR do_trap_break |
| 412 | RISCV_PTR do_trap_load_misaligned |
| 413 | RISCV_PTR do_trap_load_fault |
| 414 | RISCV_PTR do_trap_store_misaligned |
| 415 | RISCV_PTR do_trap_store_fault |
| 416 | RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */ |
| 417 | RISCV_PTR do_trap_ecall_s |
| 418 | RISCV_PTR do_trap_unknown |
| 419 | RISCV_PTR do_trap_ecall_m |
| 420 | RISCV_PTR do_page_fault /* instruction page fault */ |
| 421 | RISCV_PTR do_page_fault /* load page fault */ |
| 422 | RISCV_PTR do_trap_unknown |
| 423 | RISCV_PTR do_page_fault /* store page fault */ |
| 424 | excp_vect_table_end: |
| 425 | END(excp_vect_table) |
Christoph Hellwig | 6bd33e1 | 2019-10-28 13:10:41 +0100 | [diff] [blame] | 426 | |
| 427 | #ifndef CONFIG_MMU |
| 428 | ENTRY(__user_rt_sigreturn) |
| 429 | li a7, __NR_rt_sigreturn |
| 430 | scall |
| 431 | END(__user_rt_sigreturn) |
| 432 | #endif |