blob: 744f3209c48d0b30d0e67f7b00f136b75d5e0fd0 [file] [log] [blame]
Thomas Gleixner50acfb22019-05-29 07:18:00 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Palmer Dabbelt7db91e52017-07-10 18:04:30 -07002/*
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
Palmer Dabbelt7db91e52017-07-10 18:04:30 -07005 */
6
7#include <linux/init.h>
8#include <linux/linkage.h>
9
10#include <asm/asm.h>
11#include <asm/csr.h>
12#include <asm/unistd.h>
13#include <asm/thread_info.h>
14#include <asm/asm-offsets.h>
15
Palmer Dabbeltfdff9912020-02-27 11:15:03 -080016#if !IS_ENABLED(CONFIG_PREEMPTION)
17.set resume_kernel, restore_all
18#endif
Palmer Dabbelt7db91e52017-07-10 18:04:30 -070019
Palmer Dabbeltfdff9912020-02-27 11:15:03 -080020ENTRY(handle_exception)
Palmer Dabbelt7db91e52017-07-10 18:04:30 -070021 /*
22 * If coming from userspace, preserve the user thread pointer and load
Christoph Hellwiga4c37332019-10-28 13:10:32 +010023 * the kernel thread pointer. If we came from the kernel, the scratch
24 * register will contain 0, and we should continue on the current TP.
Palmer Dabbelt7db91e52017-07-10 18:04:30 -070025 */
Christoph Hellwiga4c37332019-10-28 13:10:32 +010026 csrrw tp, CSR_SCRATCH, tp
Palmer Dabbelt7db91e52017-07-10 18:04:30 -070027 bnez tp, _save_context
28
29_restore_kernel_tpsp:
Christoph Hellwiga4c37332019-10-28 13:10:32 +010030 csrr tp, CSR_SCRATCH
Palmer Dabbelt7db91e52017-07-10 18:04:30 -070031 REG_S sp, TASK_TI_KERNEL_SP(tp)
32_save_context:
33 REG_S sp, TASK_TI_USER_SP(tp)
34 REG_L sp, TASK_TI_KERNEL_SP(tp)
35 addi sp, sp, -(PT_SIZE_ON_STACK)
36 REG_S x1, PT_RA(sp)
37 REG_S x3, PT_GP(sp)
38 REG_S x5, PT_T0(sp)
39 REG_S x6, PT_T1(sp)
40 REG_S x7, PT_T2(sp)
41 REG_S x8, PT_S0(sp)
42 REG_S x9, PT_S1(sp)
43 REG_S x10, PT_A0(sp)
44 REG_S x11, PT_A1(sp)
45 REG_S x12, PT_A2(sp)
46 REG_S x13, PT_A3(sp)
47 REG_S x14, PT_A4(sp)
48 REG_S x15, PT_A5(sp)
49 REG_S x16, PT_A6(sp)
50 REG_S x17, PT_A7(sp)
51 REG_S x18, PT_S2(sp)
52 REG_S x19, PT_S3(sp)
53 REG_S x20, PT_S4(sp)
54 REG_S x21, PT_S5(sp)
55 REG_S x22, PT_S6(sp)
56 REG_S x23, PT_S7(sp)
57 REG_S x24, PT_S8(sp)
58 REG_S x25, PT_S9(sp)
59 REG_S x26, PT_S10(sp)
60 REG_S x27, PT_S11(sp)
61 REG_S x28, PT_T3(sp)
62 REG_S x29, PT_T4(sp)
63 REG_S x30, PT_T5(sp)
64 REG_S x31, PT_T6(sp)
65
66 /*
Christoph Hellwigfe9b8422018-01-04 19:55:55 +010067 * Disable user-mode memory access as it should only be set in the
68 * actual user copy routines.
69 *
70 * Disable the FPU to detect illegal usage of floating point in kernel
71 * space.
Palmer Dabbelt7db91e52017-07-10 18:04:30 -070072 */
Christoph Hellwigfe9b8422018-01-04 19:55:55 +010073 li t0, SR_SUM | SR_FS
Palmer Dabbelt7db91e52017-07-10 18:04:30 -070074
75 REG_L s0, TASK_TI_USER_SP(tp)
Christoph Hellwiga4c37332019-10-28 13:10:32 +010076 csrrc s1, CSR_STATUS, t0
77 csrr s2, CSR_EPC
78 csrr s3, CSR_TVAL
79 csrr s4, CSR_CAUSE
80 csrr s5, CSR_SCRATCH
Palmer Dabbelt7db91e52017-07-10 18:04:30 -070081 REG_S s0, PT_SP(sp)
Christoph Hellwiga4c37332019-10-28 13:10:32 +010082 REG_S s1, PT_STATUS(sp)
83 REG_S s2, PT_EPC(sp)
84 REG_S s3, PT_BADADDR(sp)
85 REG_S s4, PT_CAUSE(sp)
Palmer Dabbelt7db91e52017-07-10 18:04:30 -070086 REG_S s5, PT_TP(sp)
Palmer Dabbelt7db91e52017-07-10 18:04:30 -070087
88 /*
Christoph Hellwiga4c37332019-10-28 13:10:32 +010089 * Set the scratch register to 0, so that if a recursive exception
Palmer Dabbelt7db91e52017-07-10 18:04:30 -070090 * occurs, the exception vector knows it came from the kernel
91 */
Christoph Hellwiga4c37332019-10-28 13:10:32 +010092 csrw CSR_SCRATCH, x0
Palmer Dabbelt7db91e52017-07-10 18:04:30 -070093
94 /* Load the global pointer */
95.option push
96.option norelax
97 la gp, __global_pointer$
98.option pop
99
Guo Ren3c469792020-06-27 13:57:08 +0000100#ifdef CONFIG_TRACE_IRQFLAGS
101 call trace_hardirqs_off
102#endif
Greentime Hued48b292020-06-24 17:03:16 +0800103
104#ifdef CONFIG_CONTEXT_TRACKING
105 /* If previous state is in user mode, call context_tracking_user_exit. */
106 li a0, SR_PP
107 and a0, s1, a0
108 bnez a0, skip_context_tracking
109 call context_tracking_user_exit
110skip_context_tracking:
111#endif
112
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700113 /*
114 * MSB of cause differentiates between
115 * interrupts and exceptions
116 */
117 bge s4, zero, 1f
118
Guo Ren3c469792020-06-27 13:57:08 +0000119 la ra, ret_from_exception
120
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700121 /* Handle interrupts */
Palmer Dabbeltcc6c9842018-03-07 15:57:28 -0800122 move a0, sp /* pt_regs */
Anup Patel24dc1702020-06-01 14:45:42 +0530123 la a1, handle_arch_irq
124 REG_L a1, (a1)
125 jr a1
Palmer Dabbelt7db91e52017-07-10 18:04:30 -07001261:
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100127 /*
128 * Exceptions run with interrupts enabled or disabled depending on the
129 * state of SR_PIE in m/sstatus.
130 */
131 andi t0, s1, SR_PIE
Vincent Chenc82dd6d2019-09-16 16:47:41 +0800132 beqz t0, 1f
Atish Patra7cd1af12020-12-18 16:20:51 -0800133#ifdef CONFIG_TRACE_IRQFLAGS
134 call trace_hardirqs_on
135#endif
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100136 csrs CSR_STATUS, SR_IE
zongbox@gmail.combcae8032018-01-29 23:51:45 -0800137
Vincent Chenc82dd6d2019-09-16 16:47:41 +08001381:
Guo Ren3c469792020-06-27 13:57:08 +0000139 la ra, ret_from_exception
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700140 /* Handle syscalls */
141 li t0, EXC_SYSCALL
142 beq s4, t0, handle_syscall
143
144 /* Handle other exceptions */
145 slli t0, s4, RISCV_LGPTR
146 la t1, excp_vect_table
147 la t2, excp_vect_table_end
148 move a0, sp /* pt_regs */
149 add t0, t1, t0
150 /* Check if exception code lies within bounds */
151 bgeu t0, t2, 1f
152 REG_L t0, 0(t0)
153 jr t0
1541:
155 tail do_trap_unknown
156
157handle_syscall:
Damien Le Moal643437b2020-12-13 22:50:36 +0900158#ifdef CONFIG_RISCV_M_MODE
159 /*
160 * When running is M-Mode (no MMU config), MPIE does not get set.
161 * As a result, we need to force enable interrupts here because
162 * handle_exception did not do set SR_IE as it always sees SR_PIE
163 * being cleared.
164 */
165 csrs CSR_STATUS, SR_IE
166#endif
Greentime Hued48b292020-06-24 17:03:16 +0800167#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
Guo Ren3c469792020-06-27 13:57:08 +0000168 /* Recover a0 - a7 for system calls */
169 REG_L a0, PT_A0(sp)
170 REG_L a1, PT_A1(sp)
171 REG_L a2, PT_A2(sp)
172 REG_L a3, PT_A3(sp)
173 REG_L a4, PT_A4(sp)
174 REG_L a5, PT_A5(sp)
175 REG_L a6, PT_A6(sp)
176 REG_L a7, PT_A7(sp)
177#endif
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700178 /* save the initial A0 value (needed in signal handlers) */
179 REG_S a0, PT_ORIG_A0(sp)
180 /*
181 * Advance SEPC to avoid executing the original
182 * scall instruction on sret
183 */
184 addi s2, s2, 0x4
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100185 REG_S s2, PT_EPC(sp)
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700186 /* Trace syscalls, but only if requested by the user. */
187 REG_L t0, TASK_TI_FLAGS(tp)
David Abdurachmanovefe75c42018-10-29 11:48:53 +0100188 andi t0, t0, _TIF_SYSCALL_WORK
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700189 bnez t0, handle_syscall_trace_enter
190check_syscall_nr:
191 /* Check to make sure we don't jump to a bogus syscall number. */
192 li t0, __NR_syscalls
193 la s0, sys_ni_syscall
David Abdurachmanov53406272019-10-04 17:12:22 -0700194 /*
David Abdurachmanov53406272019-10-04 17:12:22 -0700195 * Syscall number held in a7.
196 * If syscall number is above allowed value, redirect to ni_syscall.
197 */
Andreas Schwabcf7b2ae2020-12-21 23:52:00 +0100198 bgeu a7, t0, 1f
David Abdurachmanov53406272019-10-04 17:12:22 -0700199 /* Call syscall */
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700200 la s0, sys_call_table
201 slli t0, a7, RISCV_LGPTR
202 add s0, s0, t0
203 REG_L s0, 0(s0)
2041:
205 jalr s0
206
207ret_from_syscall:
208 /* Set user a0 to kernel a0 */
209 REG_S a0, PT_A0(sp)
David Abdurachmanov53406272019-10-04 17:12:22 -0700210 /*
211 * We didn't execute the actual syscall.
212 * Seccomp already set return value for the current task pt_regs.
213 * (If it was configured with SECCOMP_RET_ERRNO/TRACE)
214 */
215ret_from_syscall_rejected:
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700216 /* Trace syscalls, but only if requested by the user. */
217 REG_L t0, TASK_TI_FLAGS(tp)
David Abdurachmanovefe75c42018-10-29 11:48:53 +0100218 andi t0, t0, _TIF_SYSCALL_WORK
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700219 bnez t0, handle_syscall_trace_exit
220
221ret_from_exception:
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100222 REG_L s0, PT_STATUS(sp)
223 csrc CSR_STATUS, SR_IE
Guo Ren3c469792020-06-27 13:57:08 +0000224#ifdef CONFIG_TRACE_IRQFLAGS
225 call trace_hardirqs_off
226#endif
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100227#ifdef CONFIG_RISCV_M_MODE
228 /* the MPP value is too large to be used as an immediate arg for addi */
229 li t0, SR_MPP
230 and s0, s0, t0
231#else
Christoph Hellwig11252032018-01-04 18:35:03 +0100232 andi s0, s0, SR_SPP
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100233#endif
Vincent Chen99fd6e82019-01-03 11:32:33 +0800234 bnez s0, resume_kernel
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700235
236resume_userspace:
237 /* Interrupts must be disabled here so flags are checked atomically */
238 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
239 andi s1, s0, _TIF_WORK_MASK
240 bnez s1, work_pending
241
Greentime Hued48b292020-06-24 17:03:16 +0800242#ifdef CONFIG_CONTEXT_TRACKING
243 call context_tracking_user_enter
244#endif
245
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700246 /* Save unwound kernel stack pointer in thread_info */
247 addi s0, sp, PT_SIZE_ON_STACK
248 REG_S s0, TASK_TI_KERNEL_SP(tp)
249
250 /*
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100251 * Save TP into the scratch register , so we can find the kernel data
252 * structures again.
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700253 */
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100254 csrw CSR_SCRATCH, tp
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700255
256restore_all:
Guo Ren3c469792020-06-27 13:57:08 +0000257#ifdef CONFIG_TRACE_IRQFLAGS
258 REG_L s1, PT_STATUS(sp)
259 andi t0, s1, SR_PIE
260 beqz t0, 1f
261 call trace_hardirqs_on
262 j 2f
2631:
264 call trace_hardirqs_off
2652:
266#endif
Palmer Dabbeltfdff9912020-02-27 11:15:03 -0800267 REG_L a0, PT_STATUS(sp)
268 /*
269 * The current load reservation is effectively part of the processor's
270 * state, in the sense that load reservations cannot be shared between
271 * different hart contexts. We can't actually save and restore a load
272 * reservation, so instead here we clear any existing reservation --
273 * it's always legal for implementations to clear load reservations at
274 * any point (as long as the forward progress guarantee is kept, but
275 * we'll ignore that here).
276 *
277 * Dangling load reservations can be the result of taking a trap in the
278 * middle of an LR/SC sequence, but can also be the result of a taken
279 * forward branch around an SC -- which is how we implement CAS. As a
280 * result we need to clear reservations between the last CAS and the
281 * jump back to the new context. While it is unlikely the store
282 * completes, implementations are allowed to expand reservations to be
283 * arbitrarily large.
284 */
285 REG_L a2, PT_EPC(sp)
286 REG_SC x0, a2, PT_EPC(sp)
287
288 csrw CSR_STATUS, a0
289 csrw CSR_EPC, a2
290
291 REG_L x1, PT_RA(sp)
292 REG_L x3, PT_GP(sp)
293 REG_L x4, PT_TP(sp)
294 REG_L x5, PT_T0(sp)
295 REG_L x6, PT_T1(sp)
296 REG_L x7, PT_T2(sp)
297 REG_L x8, PT_S0(sp)
298 REG_L x9, PT_S1(sp)
299 REG_L x10, PT_A0(sp)
300 REG_L x11, PT_A1(sp)
301 REG_L x12, PT_A2(sp)
302 REG_L x13, PT_A3(sp)
303 REG_L x14, PT_A4(sp)
304 REG_L x15, PT_A5(sp)
305 REG_L x16, PT_A6(sp)
306 REG_L x17, PT_A7(sp)
307 REG_L x18, PT_S2(sp)
308 REG_L x19, PT_S3(sp)
309 REG_L x20, PT_S4(sp)
310 REG_L x21, PT_S5(sp)
311 REG_L x22, PT_S6(sp)
312 REG_L x23, PT_S7(sp)
313 REG_L x24, PT_S8(sp)
314 REG_L x25, PT_S9(sp)
315 REG_L x26, PT_S10(sp)
316 REG_L x27, PT_S11(sp)
317 REG_L x28, PT_T3(sp)
318 REG_L x29, PT_T4(sp)
319 REG_L x30, PT_T5(sp)
320 REG_L x31, PT_T6(sp)
321
322 REG_L x2, PT_SP(sp)
323
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100324#ifdef CONFIG_RISCV_M_MODE
325 mret
326#else
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700327 sret
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100328#endif
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700329
Thomas Gleixner29ff6492019-10-15 21:18:03 +0200330#if IS_ENABLED(CONFIG_PREEMPTION)
Vincent Chen99fd6e82019-01-03 11:32:33 +0800331resume_kernel:
332 REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
333 bnez s0, restore_all
Vincent Chen99fd6e82019-01-03 11:32:33 +0800334 REG_L s0, TASK_TI_FLAGS(tp)
335 andi s0, s0, _TIF_NEED_RESCHED
336 beqz s0, restore_all
337 call preempt_schedule_irq
Valentin Schneidercd9e72b82019-09-23 15:36:17 +0100338 j restore_all
Vincent Chen99fd6e82019-01-03 11:32:33 +0800339#endif
340
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700341work_pending:
342 /* Enter slow path for supplementary processing */
343 la ra, ret_from_exception
344 andi s1, s0, _TIF_NEED_RESCHED
345 bnez s1, work_resched
346work_notifysig:
347 /* Handle pending signals and notify-resume requests */
Christoph Hellwiga4c37332019-10-28 13:10:32 +0100348 csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700349 move a0, sp /* pt_regs */
350 move a1, s0 /* current_thread_info->flags */
351 tail do_notify_resume
352work_resched:
353 tail schedule
354
355/* Slow paths for ptrace. */
356handle_syscall_trace_enter:
357 move a0, sp
358 call do_syscall_trace_enter
Tycho Andersenaf33d242020-02-08 08:18:17 -0700359 move t0, a0
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700360 REG_L a0, PT_A0(sp)
361 REG_L a1, PT_A1(sp)
362 REG_L a2, PT_A2(sp)
363 REG_L a3, PT_A3(sp)
364 REG_L a4, PT_A4(sp)
365 REG_L a5, PT_A5(sp)
366 REG_L a6, PT_A6(sp)
367 REG_L a7, PT_A7(sp)
Tycho Andersenaf33d242020-02-08 08:18:17 -0700368 bnez t0, ret_from_syscall_rejected
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700369 j check_syscall_nr
370handle_syscall_trace_exit:
371 move a0, sp
372 call do_syscall_trace_exit
373 j ret_from_exception
374
375END(handle_exception)
376
377ENTRY(ret_from_fork)
378 la ra, ret_from_exception
379 tail schedule_tail
380ENDPROC(ret_from_fork)
381
382ENTRY(ret_from_kernel_thread)
383 call schedule_tail
384 /* Call fn(arg) */
385 la ra, ret_from_exception
386 move a0, s1
387 jr s0
388ENDPROC(ret_from_kernel_thread)
389
390
391/*
392 * Integer register context switch
393 * The callee-saved registers must be saved and restored.
394 *
395 * a0: previous task_struct (must be preserved across the switch)
396 * a1: next task_struct
397 *
398 * The value of a0 and a1 must be preserved by this function, as that's how
399 * arguments are passed to schedule_tail.
400 */
401ENTRY(__switch_to)
402 /* Save context into prev->thread */
403 li a4, TASK_THREAD_RA
404 add a3, a0, a4
405 add a4, a1, a4
406 REG_S ra, TASK_THREAD_RA_RA(a3)
407 REG_S sp, TASK_THREAD_SP_RA(a3)
408 REG_S s0, TASK_THREAD_S0_RA(a3)
409 REG_S s1, TASK_THREAD_S1_RA(a3)
410 REG_S s2, TASK_THREAD_S2_RA(a3)
411 REG_S s3, TASK_THREAD_S3_RA(a3)
412 REG_S s4, TASK_THREAD_S4_RA(a3)
413 REG_S s5, TASK_THREAD_S5_RA(a3)
414 REG_S s6, TASK_THREAD_S6_RA(a3)
415 REG_S s7, TASK_THREAD_S7_RA(a3)
416 REG_S s8, TASK_THREAD_S8_RA(a3)
417 REG_S s9, TASK_THREAD_S9_RA(a3)
418 REG_S s10, TASK_THREAD_S10_RA(a3)
419 REG_S s11, TASK_THREAD_S11_RA(a3)
420 /* Restore context from next->thread */
421 REG_L ra, TASK_THREAD_RA_RA(a4)
422 REG_L sp, TASK_THREAD_SP_RA(a4)
423 REG_L s0, TASK_THREAD_S0_RA(a4)
424 REG_L s1, TASK_THREAD_S1_RA(a4)
425 REG_L s2, TASK_THREAD_S2_RA(a4)
426 REG_L s3, TASK_THREAD_S3_RA(a4)
427 REG_L s4, TASK_THREAD_S4_RA(a4)
428 REG_L s5, TASK_THREAD_S5_RA(a4)
429 REG_L s6, TASK_THREAD_S6_RA(a4)
430 REG_L s7, TASK_THREAD_S7_RA(a4)
431 REG_L s8, TASK_THREAD_S8_RA(a4)
432 REG_L s9, TASK_THREAD_S9_RA(a4)
433 REG_L s10, TASK_THREAD_S10_RA(a4)
434 REG_L s11, TASK_THREAD_S11_RA(a4)
435 /* Swap the CPU entry around. */
436 lw a3, TASK_TI_CPU(a0)
437 lw a4, TASK_TI_CPU(a1)
438 sw a3, TASK_TI_CPU(a1)
439 sw a4, TASK_TI_CPU(a0)
Guo Ren3e7b6692020-07-12 13:41:49 +0000440 /* The offset of thread_info in task_struct is zero. */
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700441 move tp, a1
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700442 ret
443ENDPROC(__switch_to)
444
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100445#ifndef CONFIG_MMU
446#define do_page_fault do_trap_unknown
447#endif
448
Palmer Dabbelt7db91e52017-07-10 18:04:30 -0700449 .section ".rodata"
450 /* Exception vector table */
451ENTRY(excp_vect_table)
452 RISCV_PTR do_trap_insn_misaligned
453 RISCV_PTR do_trap_insn_fault
454 RISCV_PTR do_trap_insn_illegal
455 RISCV_PTR do_trap_break
456 RISCV_PTR do_trap_load_misaligned
457 RISCV_PTR do_trap_load_fault
458 RISCV_PTR do_trap_store_misaligned
459 RISCV_PTR do_trap_store_fault
460 RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
461 RISCV_PTR do_trap_ecall_s
462 RISCV_PTR do_trap_unknown
463 RISCV_PTR do_trap_ecall_m
464 RISCV_PTR do_page_fault /* instruction page fault */
465 RISCV_PTR do_page_fault /* load page fault */
466 RISCV_PTR do_trap_unknown
467 RISCV_PTR do_page_fault /* store page fault */
468excp_vect_table_end:
469END(excp_vect_table)
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100470
471#ifndef CONFIG_MMU
472ENTRY(__user_rt_sigreturn)
473 li a7, __NR_rt_sigreturn
474 scall
475END(__user_rt_sigreturn)
476#endif