Thomas Gleixner | 50acfb2 | 2019-05-29 07:18:00 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2012 Regents of the University of California |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #ifndef _ASM_RISCV_SWITCH_TO_H |
| 7 | #define _ASM_RISCV_SWITCH_TO_H |
| 8 | |
Jisheng Zhang | 37a7a2a | 2021-05-12 22:55:45 +0800 | [diff] [blame] | 9 | #include <linux/jump_label.h> |
Paul Walmsley | 5ed881b | 2019-10-17 15:21:28 -0700 | [diff] [blame] | 10 | #include <linux/sched/task_stack.h> |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 11 | #include <asm/processor.h> |
| 12 | #include <asm/ptrace.h> |
| 13 | #include <asm/csr.h> |
| 14 | |
Alan Kao | 9671f70 | 2018-10-09 10:18:33 +0800 | [diff] [blame] | 15 | #ifdef CONFIG_FPU |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 16 | extern void __fstate_save(struct task_struct *save_to); |
| 17 | extern void __fstate_restore(struct task_struct *restore_from); |
| 18 | |
| 19 | static inline void __fstate_clean(struct pt_regs *regs) |
| 20 | { |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 21 | regs->status = (regs->status & ~SR_FS) | SR_FS_CLEAN; |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 22 | } |
| 23 | |
Vincent Chen | 8ac71d7 | 2019-08-14 16:23:52 +0800 | [diff] [blame] | 24 | static inline void fstate_off(struct task_struct *task, |
| 25 | struct pt_regs *regs) |
| 26 | { |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 27 | regs->status = (regs->status & ~SR_FS) | SR_FS_OFF; |
Vincent Chen | 8ac71d7 | 2019-08-14 16:23:52 +0800 | [diff] [blame] | 28 | } |
| 29 | |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 30 | static inline void fstate_save(struct task_struct *task, |
| 31 | struct pt_regs *regs) |
| 32 | { |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 33 | if ((regs->status & SR_FS) == SR_FS_DIRTY) { |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 34 | __fstate_save(task); |
| 35 | __fstate_clean(regs); |
| 36 | } |
| 37 | } |
| 38 | |
| 39 | static inline void fstate_restore(struct task_struct *task, |
| 40 | struct pt_regs *regs) |
| 41 | { |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 42 | if ((regs->status & SR_FS) != SR_FS_OFF) { |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 43 | __fstate_restore(task); |
| 44 | __fstate_clean(regs); |
| 45 | } |
| 46 | } |
| 47 | |
| 48 | static inline void __switch_to_aux(struct task_struct *prev, |
| 49 | struct task_struct *next) |
| 50 | { |
| 51 | struct pt_regs *regs; |
| 52 | |
| 53 | regs = task_pt_regs(prev); |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 54 | if (unlikely(regs->status & SR_SD)) |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 55 | fstate_save(prev, regs); |
| 56 | fstate_restore(next, task_pt_regs(next)); |
| 57 | } |
| 58 | |
Jisheng Zhang | 37a7a2a | 2021-05-12 22:55:45 +0800 | [diff] [blame] | 59 | extern struct static_key_false cpu_hwcap_fpu; |
| 60 | static __always_inline bool has_fpu(void) |
| 61 | { |
| 62 | return static_branch_likely(&cpu_hwcap_fpu); |
| 63 | } |
Alan Kao | 9671f70 | 2018-10-09 10:18:33 +0800 | [diff] [blame] | 64 | #else |
Jisheng Zhang | 37a7a2a | 2021-05-12 22:55:45 +0800 | [diff] [blame] | 65 | static __always_inline bool has_fpu(void) { return false; } |
Alan Kao | 9671f70 | 2018-10-09 10:18:33 +0800 | [diff] [blame] | 66 | #define fstate_save(task, regs) do { } while (0) |
| 67 | #define fstate_restore(task, regs) do { } while (0) |
| 68 | #define __switch_to_aux(__prev, __next) do { } while (0) |
Alan Kao | 9671f70 | 2018-10-09 10:18:33 +0800 | [diff] [blame] | 69 | #endif |
| 70 | |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 71 | extern struct task_struct *__switch_to(struct task_struct *, |
| 72 | struct task_struct *); |
| 73 | |
| 74 | #define switch_to(prev, next, last) \ |
| 75 | do { \ |
| 76 | struct task_struct *__prev = (prev); \ |
| 77 | struct task_struct *__next = (next); \ |
Jisheng Zhang | 37a7a2a | 2021-05-12 22:55:45 +0800 | [diff] [blame] | 78 | if (has_fpu()) \ |
Alan Kao | 9411ec6 | 2018-10-09 10:18:34 +0800 | [diff] [blame] | 79 | __switch_to_aux(__prev, __next); \ |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 80 | ((last) = __switch_to(__prev, __next)); \ |
| 81 | } while (0) |
| 82 | |
| 83 | #endif /* _ASM_RISCV_SWITCH_TO_H */ |