Thomas Gleixner | 50acfb2 | 2019-05-29 07:18:00 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2012 Regents of the University of California |
| 4 | * Copyright (C) 2017 SiFive |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <linux/init.h> |
| 8 | #include <linux/linkage.h> |
| 9 | |
| 10 | #include <asm/asm.h> |
| 11 | #include <asm/csr.h> |
| 12 | #include <asm/unistd.h> |
| 13 | #include <asm/thread_info.h> |
| 14 | #include <asm/asm-offsets.h> |
| 15 | |
| 16 | .text |
| 17 | .altmacro |
| 18 | |
| 19 | /* |
| 20 | * Prepares to enter a system call or exception by saving all registers to the |
| 21 | * stack. |
| 22 | */ |
| 23 | .macro SAVE_ALL |
| 24 | LOCAL _restore_kernel_tpsp |
| 25 | LOCAL _save_context |
| 26 | |
| 27 | /* |
| 28 | * If coming from userspace, preserve the user thread pointer and load |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 29 | * the kernel thread pointer. If we came from the kernel, the scratch |
| 30 | * register will contain 0, and we should continue on the current TP. |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 31 | */ |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 32 | csrrw tp, CSR_SCRATCH, tp |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 33 | bnez tp, _save_context |
| 34 | |
| 35 | _restore_kernel_tpsp: |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 36 | csrr tp, CSR_SCRATCH |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 37 | REG_S sp, TASK_TI_KERNEL_SP(tp) |
| 38 | _save_context: |
| 39 | REG_S sp, TASK_TI_USER_SP(tp) |
| 40 | REG_L sp, TASK_TI_KERNEL_SP(tp) |
| 41 | addi sp, sp, -(PT_SIZE_ON_STACK) |
| 42 | REG_S x1, PT_RA(sp) |
| 43 | REG_S x3, PT_GP(sp) |
| 44 | REG_S x5, PT_T0(sp) |
| 45 | REG_S x6, PT_T1(sp) |
| 46 | REG_S x7, PT_T2(sp) |
| 47 | REG_S x8, PT_S0(sp) |
| 48 | REG_S x9, PT_S1(sp) |
| 49 | REG_S x10, PT_A0(sp) |
| 50 | REG_S x11, PT_A1(sp) |
| 51 | REG_S x12, PT_A2(sp) |
| 52 | REG_S x13, PT_A3(sp) |
| 53 | REG_S x14, PT_A4(sp) |
| 54 | REG_S x15, PT_A5(sp) |
| 55 | REG_S x16, PT_A6(sp) |
| 56 | REG_S x17, PT_A7(sp) |
| 57 | REG_S x18, PT_S2(sp) |
| 58 | REG_S x19, PT_S3(sp) |
| 59 | REG_S x20, PT_S4(sp) |
| 60 | REG_S x21, PT_S5(sp) |
| 61 | REG_S x22, PT_S6(sp) |
| 62 | REG_S x23, PT_S7(sp) |
| 63 | REG_S x24, PT_S8(sp) |
| 64 | REG_S x25, PT_S9(sp) |
| 65 | REG_S x26, PT_S10(sp) |
| 66 | REG_S x27, PT_S11(sp) |
| 67 | REG_S x28, PT_T3(sp) |
| 68 | REG_S x29, PT_T4(sp) |
| 69 | REG_S x30, PT_T5(sp) |
| 70 | REG_S x31, PT_T6(sp) |
| 71 | |
| 72 | /* |
Christoph Hellwig | fe9b842 | 2018-01-04 19:55:55 +0100 | [diff] [blame] | 73 | * Disable user-mode memory access as it should only be set in the |
| 74 | * actual user copy routines. |
| 75 | * |
| 76 | * Disable the FPU to detect illegal usage of floating point in kernel |
| 77 | * space. |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 78 | */ |
Christoph Hellwig | fe9b842 | 2018-01-04 19:55:55 +0100 | [diff] [blame] | 79 | li t0, SR_SUM | SR_FS |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 80 | |
| 81 | REG_L s0, TASK_TI_USER_SP(tp) |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 82 | csrrc s1, CSR_STATUS, t0 |
| 83 | csrr s2, CSR_EPC |
| 84 | csrr s3, CSR_TVAL |
| 85 | csrr s4, CSR_CAUSE |
| 86 | csrr s5, CSR_SCRATCH |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 87 | REG_S s0, PT_SP(sp) |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 88 | REG_S s1, PT_STATUS(sp) |
| 89 | REG_S s2, PT_EPC(sp) |
| 90 | REG_S s3, PT_BADADDR(sp) |
| 91 | REG_S s4, PT_CAUSE(sp) |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 92 | REG_S s5, PT_TP(sp) |
| 93 | .endm |
| 94 | |
| 95 | /* |
| 96 | * Prepares to return from a system call or exception by restoring all |
| 97 | * registers from the stack. |
| 98 | */ |
| 99 | .macro RESTORE_ALL |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 100 | REG_L a0, PT_STATUS(sp) |
Palmer Dabbelt | 1885660 | 2019-09-24 17:15:56 -0700 | [diff] [blame] | 101 | /* |
| 102 | * The current load reservation is effectively part of the processor's |
| 103 | * state, in the sense that load reservations cannot be shared between |
| 104 | * different hart contexts. We can't actually save and restore a load |
| 105 | * reservation, so instead here we clear any existing reservation -- |
| 106 | * it's always legal for implementations to clear load reservations at |
| 107 | * any point (as long as the forward progress guarantee is kept, but |
| 108 | * we'll ignore that here). |
| 109 | * |
| 110 | * Dangling load reservations can be the result of taking a trap in the |
| 111 | * middle of an LR/SC sequence, but can also be the result of a taken |
| 112 | * forward branch around an SC -- which is how we implement CAS. As a |
| 113 | * result we need to clear reservations between the last CAS and the |
| 114 | * jump back to the new context. While it is unlikely the store |
| 115 | * completes, implementations are allowed to expand reservations to be |
| 116 | * arbitrarily large. |
| 117 | */ |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 118 | REG_L a2, PT_EPC(sp) |
| 119 | REG_SC x0, a2, PT_EPC(sp) |
Palmer Dabbelt | 1885660 | 2019-09-24 17:15:56 -0700 | [diff] [blame] | 120 | |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 121 | csrw CSR_STATUS, a0 |
| 122 | csrw CSR_EPC, a2 |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 123 | |
| 124 | REG_L x1, PT_RA(sp) |
| 125 | REG_L x3, PT_GP(sp) |
| 126 | REG_L x4, PT_TP(sp) |
| 127 | REG_L x5, PT_T0(sp) |
| 128 | REG_L x6, PT_T1(sp) |
| 129 | REG_L x7, PT_T2(sp) |
| 130 | REG_L x8, PT_S0(sp) |
| 131 | REG_L x9, PT_S1(sp) |
| 132 | REG_L x10, PT_A0(sp) |
| 133 | REG_L x11, PT_A1(sp) |
| 134 | REG_L x12, PT_A2(sp) |
| 135 | REG_L x13, PT_A3(sp) |
| 136 | REG_L x14, PT_A4(sp) |
| 137 | REG_L x15, PT_A5(sp) |
| 138 | REG_L x16, PT_A6(sp) |
| 139 | REG_L x17, PT_A7(sp) |
| 140 | REG_L x18, PT_S2(sp) |
| 141 | REG_L x19, PT_S3(sp) |
| 142 | REG_L x20, PT_S4(sp) |
| 143 | REG_L x21, PT_S5(sp) |
| 144 | REG_L x22, PT_S6(sp) |
| 145 | REG_L x23, PT_S7(sp) |
| 146 | REG_L x24, PT_S8(sp) |
| 147 | REG_L x25, PT_S9(sp) |
| 148 | REG_L x26, PT_S10(sp) |
| 149 | REG_L x27, PT_S11(sp) |
| 150 | REG_L x28, PT_T3(sp) |
| 151 | REG_L x29, PT_T4(sp) |
| 152 | REG_L x30, PT_T5(sp) |
| 153 | REG_L x31, PT_T6(sp) |
| 154 | |
| 155 | REG_L x2, PT_SP(sp) |
| 156 | .endm |
| 157 | |
Vincent Chen | 99fd6e8 | 2019-01-03 11:32:33 +0800 | [diff] [blame] | 158 | #if !IS_ENABLED(CONFIG_PREEMPT) |
| 159 | .set resume_kernel, restore_all |
| 160 | #endif |
| 161 | |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 162 | ENTRY(handle_exception) |
| 163 | SAVE_ALL |
| 164 | |
| 165 | /* |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 166 | * Set the scratch register to 0, so that if a recursive exception |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 167 | * occurs, the exception vector knows it came from the kernel |
| 168 | */ |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 169 | csrw CSR_SCRATCH, x0 |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 170 | |
| 171 | /* Load the global pointer */ |
| 172 | .option push |
| 173 | .option norelax |
| 174 | la gp, __global_pointer$ |
| 175 | .option pop |
| 176 | |
| 177 | la ra, ret_from_exception |
| 178 | /* |
| 179 | * MSB of cause differentiates between |
| 180 | * interrupts and exceptions |
| 181 | */ |
| 182 | bge s4, zero, 1f |
| 183 | |
| 184 | /* Handle interrupts */ |
Palmer Dabbelt | cc6c984 | 2018-03-07 15:57:28 -0800 | [diff] [blame] | 185 | move a0, sp /* pt_regs */ |
Christoph Hellwig | 6ea0f26 | 2018-08-04 10:23:16 +0200 | [diff] [blame] | 186 | tail do_IRQ |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 187 | 1: |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 188 | /* |
| 189 | * Exceptions run with interrupts enabled or disabled depending on the |
| 190 | * state of SR_PIE in m/sstatus. |
| 191 | */ |
| 192 | andi t0, s1, SR_PIE |
Vincent Chen | c82dd6d | 2019-09-16 16:47:41 +0800 | [diff] [blame] | 193 | beqz t0, 1f |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 194 | csrs CSR_STATUS, SR_IE |
zongbox@gmail.com | bcae803 | 2018-01-29 23:51:45 -0800 | [diff] [blame] | 195 | |
Vincent Chen | c82dd6d | 2019-09-16 16:47:41 +0800 | [diff] [blame] | 196 | 1: |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 197 | /* Handle syscalls */ |
| 198 | li t0, EXC_SYSCALL |
| 199 | beq s4, t0, handle_syscall |
| 200 | |
| 201 | /* Handle other exceptions */ |
| 202 | slli t0, s4, RISCV_LGPTR |
| 203 | la t1, excp_vect_table |
| 204 | la t2, excp_vect_table_end |
| 205 | move a0, sp /* pt_regs */ |
| 206 | add t0, t1, t0 |
| 207 | /* Check if exception code lies within bounds */ |
| 208 | bgeu t0, t2, 1f |
| 209 | REG_L t0, 0(t0) |
| 210 | jr t0 |
| 211 | 1: |
| 212 | tail do_trap_unknown |
| 213 | |
| 214 | handle_syscall: |
| 215 | /* save the initial A0 value (needed in signal handlers) */ |
| 216 | REG_S a0, PT_ORIG_A0(sp) |
| 217 | /* |
| 218 | * Advance SEPC to avoid executing the original |
| 219 | * scall instruction on sret |
| 220 | */ |
| 221 | addi s2, s2, 0x4 |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 222 | REG_S s2, PT_EPC(sp) |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 223 | /* Trace syscalls, but only if requested by the user. */ |
| 224 | REG_L t0, TASK_TI_FLAGS(tp) |
David Abdurachmanov | efe75c4 | 2018-10-29 11:48:53 +0100 | [diff] [blame] | 225 | andi t0, t0, _TIF_SYSCALL_WORK |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 226 | bnez t0, handle_syscall_trace_enter |
| 227 | check_syscall_nr: |
| 228 | /* Check to make sure we don't jump to a bogus syscall number. */ |
| 229 | li t0, __NR_syscalls |
| 230 | la s0, sys_ni_syscall |
David Abdurachmanov | 5340627 | 2019-10-04 17:12:22 -0700 | [diff] [blame] | 231 | /* |
| 232 | * The tracer can change syscall number to valid/invalid value. |
| 233 | * We use syscall_set_nr helper in syscall_trace_enter thus we |
| 234 | * cannot trust the current value in a7 and have to reload from |
| 235 | * the current task pt_regs. |
| 236 | */ |
| 237 | REG_L a7, PT_A7(sp) |
| 238 | /* |
| 239 | * Syscall number held in a7. |
| 240 | * If syscall number is above allowed value, redirect to ni_syscall. |
| 241 | */ |
| 242 | bge a7, t0, 1f |
| 243 | /* |
| 244 | * Check if syscall is rejected by tracer or seccomp, i.e., a7 == -1. |
| 245 | * If yes, we pretend it was executed. |
| 246 | */ |
| 247 | li t1, -1 |
| 248 | beq a7, t1, ret_from_syscall_rejected |
David Abdurachmanov | 556f47a | 2019-12-18 10:47:56 +0200 | [diff] [blame] | 249 | blt a7, t1, 1f |
David Abdurachmanov | 5340627 | 2019-10-04 17:12:22 -0700 | [diff] [blame] | 250 | /* Call syscall */ |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 251 | la s0, sys_call_table |
| 252 | slli t0, a7, RISCV_LGPTR |
| 253 | add s0, s0, t0 |
| 254 | REG_L s0, 0(s0) |
| 255 | 1: |
| 256 | jalr s0 |
| 257 | |
| 258 | ret_from_syscall: |
| 259 | /* Set user a0 to kernel a0 */ |
| 260 | REG_S a0, PT_A0(sp) |
David Abdurachmanov | 5340627 | 2019-10-04 17:12:22 -0700 | [diff] [blame] | 261 | /* |
| 262 | * We didn't execute the actual syscall. |
| 263 | * Seccomp already set return value for the current task pt_regs. |
| 264 | * (If it was configured with SECCOMP_RET_ERRNO/TRACE) |
| 265 | */ |
| 266 | ret_from_syscall_rejected: |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 267 | /* Trace syscalls, but only if requested by the user. */ |
| 268 | REG_L t0, TASK_TI_FLAGS(tp) |
David Abdurachmanov | efe75c4 | 2018-10-29 11:48:53 +0100 | [diff] [blame] | 269 | andi t0, t0, _TIF_SYSCALL_WORK |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 270 | bnez t0, handle_syscall_trace_exit |
| 271 | |
| 272 | ret_from_exception: |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 273 | REG_L s0, PT_STATUS(sp) |
| 274 | csrc CSR_STATUS, SR_IE |
| 275 | #ifdef CONFIG_RISCV_M_MODE |
| 276 | /* the MPP value is too large to be used as an immediate arg for addi */ |
| 277 | li t0, SR_MPP |
| 278 | and s0, s0, t0 |
| 279 | #else |
Christoph Hellwig | 1125203 | 2018-01-04 18:35:03 +0100 | [diff] [blame] | 280 | andi s0, s0, SR_SPP |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 281 | #endif |
Vincent Chen | 99fd6e8 | 2019-01-03 11:32:33 +0800 | [diff] [blame] | 282 | bnez s0, resume_kernel |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 283 | |
| 284 | resume_userspace: |
| 285 | /* Interrupts must be disabled here so flags are checked atomically */ |
| 286 | REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ |
| 287 | andi s1, s0, _TIF_WORK_MASK |
| 288 | bnez s1, work_pending |
| 289 | |
| 290 | /* Save unwound kernel stack pointer in thread_info */ |
| 291 | addi s0, sp, PT_SIZE_ON_STACK |
| 292 | REG_S s0, TASK_TI_KERNEL_SP(tp) |
| 293 | |
| 294 | /* |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 295 | * Save TP into the scratch register , so we can find the kernel data |
| 296 | * structures again. |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 297 | */ |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 298 | csrw CSR_SCRATCH, tp |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 299 | |
| 300 | restore_all: |
| 301 | RESTORE_ALL |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 302 | #ifdef CONFIG_RISCV_M_MODE |
| 303 | mret |
| 304 | #else |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 305 | sret |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 306 | #endif |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 307 | |
Vincent Chen | 99fd6e8 | 2019-01-03 11:32:33 +0800 | [diff] [blame] | 308 | #if IS_ENABLED(CONFIG_PREEMPT) |
| 309 | resume_kernel: |
| 310 | REG_L s0, TASK_TI_PREEMPT_COUNT(tp) |
| 311 | bnez s0, restore_all |
Vincent Chen | 99fd6e8 | 2019-01-03 11:32:33 +0800 | [diff] [blame] | 312 | REG_L s0, TASK_TI_FLAGS(tp) |
| 313 | andi s0, s0, _TIF_NEED_RESCHED |
| 314 | beqz s0, restore_all |
| 315 | call preempt_schedule_irq |
Valentin Schneider | cd9e72b8 | 2019-09-23 15:36:17 +0100 | [diff] [blame] | 316 | j restore_all |
Vincent Chen | 99fd6e8 | 2019-01-03 11:32:33 +0800 | [diff] [blame] | 317 | #endif |
| 318 | |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 319 | work_pending: |
| 320 | /* Enter slow path for supplementary processing */ |
| 321 | la ra, ret_from_exception |
| 322 | andi s1, s0, _TIF_NEED_RESCHED |
| 323 | bnez s1, work_resched |
| 324 | work_notifysig: |
| 325 | /* Handle pending signals and notify-resume requests */ |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 326 | csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */ |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 327 | move a0, sp /* pt_regs */ |
| 328 | move a1, s0 /* current_thread_info->flags */ |
| 329 | tail do_notify_resume |
| 330 | work_resched: |
| 331 | tail schedule |
| 332 | |
| 333 | /* Slow paths for ptrace. */ |
| 334 | handle_syscall_trace_enter: |
| 335 | move a0, sp |
| 336 | call do_syscall_trace_enter |
| 337 | REG_L a0, PT_A0(sp) |
| 338 | REG_L a1, PT_A1(sp) |
| 339 | REG_L a2, PT_A2(sp) |
| 340 | REG_L a3, PT_A3(sp) |
| 341 | REG_L a4, PT_A4(sp) |
| 342 | REG_L a5, PT_A5(sp) |
| 343 | REG_L a6, PT_A6(sp) |
| 344 | REG_L a7, PT_A7(sp) |
| 345 | j check_syscall_nr |
| 346 | handle_syscall_trace_exit: |
| 347 | move a0, sp |
| 348 | call do_syscall_trace_exit |
| 349 | j ret_from_exception |
| 350 | |
| 351 | END(handle_exception) |
| 352 | |
| 353 | ENTRY(ret_from_fork) |
| 354 | la ra, ret_from_exception |
| 355 | tail schedule_tail |
| 356 | ENDPROC(ret_from_fork) |
| 357 | |
| 358 | ENTRY(ret_from_kernel_thread) |
| 359 | call schedule_tail |
| 360 | /* Call fn(arg) */ |
| 361 | la ra, ret_from_exception |
| 362 | move a0, s1 |
| 363 | jr s0 |
| 364 | ENDPROC(ret_from_kernel_thread) |
| 365 | |
| 366 | |
| 367 | /* |
| 368 | * Integer register context switch |
| 369 | * The callee-saved registers must be saved and restored. |
| 370 | * |
| 371 | * a0: previous task_struct (must be preserved across the switch) |
| 372 | * a1: next task_struct |
| 373 | * |
| 374 | * The value of a0 and a1 must be preserved by this function, as that's how |
| 375 | * arguments are passed to schedule_tail. |
| 376 | */ |
| 377 | ENTRY(__switch_to) |
| 378 | /* Save context into prev->thread */ |
| 379 | li a4, TASK_THREAD_RA |
| 380 | add a3, a0, a4 |
| 381 | add a4, a1, a4 |
| 382 | REG_S ra, TASK_THREAD_RA_RA(a3) |
| 383 | REG_S sp, TASK_THREAD_SP_RA(a3) |
| 384 | REG_S s0, TASK_THREAD_S0_RA(a3) |
| 385 | REG_S s1, TASK_THREAD_S1_RA(a3) |
| 386 | REG_S s2, TASK_THREAD_S2_RA(a3) |
| 387 | REG_S s3, TASK_THREAD_S3_RA(a3) |
| 388 | REG_S s4, TASK_THREAD_S4_RA(a3) |
| 389 | REG_S s5, TASK_THREAD_S5_RA(a3) |
| 390 | REG_S s6, TASK_THREAD_S6_RA(a3) |
| 391 | REG_S s7, TASK_THREAD_S7_RA(a3) |
| 392 | REG_S s8, TASK_THREAD_S8_RA(a3) |
| 393 | REG_S s9, TASK_THREAD_S9_RA(a3) |
| 394 | REG_S s10, TASK_THREAD_S10_RA(a3) |
| 395 | REG_S s11, TASK_THREAD_S11_RA(a3) |
| 396 | /* Restore context from next->thread */ |
| 397 | REG_L ra, TASK_THREAD_RA_RA(a4) |
| 398 | REG_L sp, TASK_THREAD_SP_RA(a4) |
| 399 | REG_L s0, TASK_THREAD_S0_RA(a4) |
| 400 | REG_L s1, TASK_THREAD_S1_RA(a4) |
| 401 | REG_L s2, TASK_THREAD_S2_RA(a4) |
| 402 | REG_L s3, TASK_THREAD_S3_RA(a4) |
| 403 | REG_L s4, TASK_THREAD_S4_RA(a4) |
| 404 | REG_L s5, TASK_THREAD_S5_RA(a4) |
| 405 | REG_L s6, TASK_THREAD_S6_RA(a4) |
| 406 | REG_L s7, TASK_THREAD_S7_RA(a4) |
| 407 | REG_L s8, TASK_THREAD_S8_RA(a4) |
| 408 | REG_L s9, TASK_THREAD_S9_RA(a4) |
| 409 | REG_L s10, TASK_THREAD_S10_RA(a4) |
| 410 | REG_L s11, TASK_THREAD_S11_RA(a4) |
| 411 | /* Swap the CPU entry around. */ |
| 412 | lw a3, TASK_TI_CPU(a0) |
| 413 | lw a4, TASK_TI_CPU(a1) |
| 414 | sw a3, TASK_TI_CPU(a1) |
| 415 | sw a4, TASK_TI_CPU(a0) |
| 416 | #if TASK_TI != 0 |
| 417 | #error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work." |
| 418 | addi tp, a1, TASK_TI |
| 419 | #else |
| 420 | move tp, a1 |
| 421 | #endif |
| 422 | ret |
| 423 | ENDPROC(__switch_to) |
| 424 | |
Christoph Hellwig | 6bd33e1 | 2019-10-28 13:10:41 +0100 | [diff] [blame] | 425 | #ifndef CONFIG_MMU |
| 426 | #define do_page_fault do_trap_unknown |
| 427 | #endif |
| 428 | |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 429 | .section ".rodata" |
| 430 | /* Exception vector table */ |
| 431 | ENTRY(excp_vect_table) |
| 432 | RISCV_PTR do_trap_insn_misaligned |
| 433 | RISCV_PTR do_trap_insn_fault |
| 434 | RISCV_PTR do_trap_insn_illegal |
| 435 | RISCV_PTR do_trap_break |
| 436 | RISCV_PTR do_trap_load_misaligned |
| 437 | RISCV_PTR do_trap_load_fault |
| 438 | RISCV_PTR do_trap_store_misaligned |
| 439 | RISCV_PTR do_trap_store_fault |
| 440 | RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */ |
| 441 | RISCV_PTR do_trap_ecall_s |
| 442 | RISCV_PTR do_trap_unknown |
| 443 | RISCV_PTR do_trap_ecall_m |
| 444 | RISCV_PTR do_page_fault /* instruction page fault */ |
| 445 | RISCV_PTR do_page_fault /* load page fault */ |
| 446 | RISCV_PTR do_trap_unknown |
| 447 | RISCV_PTR do_page_fault /* store page fault */ |
| 448 | excp_vect_table_end: |
| 449 | END(excp_vect_table) |
Christoph Hellwig | 6bd33e1 | 2019-10-28 13:10:41 +0100 | [diff] [blame] | 450 | |
| 451 | #ifndef CONFIG_MMU |
| 452 | ENTRY(__user_rt_sigreturn) |
| 453 | li a7, __NR_rt_sigreturn |
| 454 | scall |
| 455 | END(__user_rt_sigreturn) |
| 456 | #endif |