Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Roland McGrath | 1eeaed7 | 2008-01-30 13:31:51 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 1994 Linus Torvalds |
| 4 | * |
| 5 | * Pentium III FXSR, SSE support |
| 6 | * General FPU state handling cleanups |
| 7 | * Gareth Hughes <gareth@valinux.com>, May 2000 |
| 8 | * x86-64 work by Andi Kleen 2002 |
| 9 | */ |
| 10 | |
Ingo Molnar | df6b35f | 2015-04-24 02:46:00 +0200 | [diff] [blame] | 11 | #ifndef _ASM_X86_FPU_API_H |
| 12 | #define _ASM_X86_FPU_API_H |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 13 | #include <linux/bottom_half.h> |
Roland McGrath | 1eeaed7 | 2008-01-30 13:31:51 +0100 | [diff] [blame] | 14 | |
Thomas Gleixner | a0ff061 | 2021-10-15 03:16:12 +0200 | [diff] [blame] | 15 | #include <asm/fpu/types.h> |
| 16 | |
Suresh Siddha | b1a74bf | 2012-09-20 11:01:49 -0700 | [diff] [blame] | 17 | /* |
Sebastian Andrzej Siewior | 1220999 | 2018-11-29 16:02:10 +0100 | [diff] [blame] | 18 | * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It |
| 19 | * disables preemption so be careful if you intend to use it for long periods |
| 20 | * of time. |
Andy Lutomirski | e451228 | 2021-01-20 21:09:48 -0800 | [diff] [blame] | 21 | * If you intend to use the FPU in irq/softirq you need to check first with |
Sebastian Andrzej Siewior | 1220999 | 2018-11-29 16:02:10 +0100 | [diff] [blame] | 22 | * irq_fpu_usable() if it is possible. |
Suresh Siddha | b1a74bf | 2012-09-20 11:01:49 -0700 | [diff] [blame] | 23 | */ |
Andy Lutomirski | e451228 | 2021-01-20 21:09:48 -0800 | [diff] [blame] | 24 | |
| 25 | /* Kernel FPU states to initialize in kernel_fpu_begin_mask() */ |
| 26 | #define KFPU_387 _BITUL(0) /* 387 state will be initialized */ |
| 27 | #define KFPU_MXCSR _BITUL(1) /* MXCSR will be initialized */ |
| 28 | |
| 29 | extern void kernel_fpu_begin_mask(unsigned int kfpu_mask); |
Ingo Molnar | d63e79b | 2015-04-26 12:07:18 +0200 | [diff] [blame] | 30 | extern void kernel_fpu_end(void); |
Ingo Molnar | 952f07e | 2015-04-26 16:56:05 +0200 | [diff] [blame] | 31 | extern bool irq_fpu_usable(void); |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 32 | extern void fpregs_mark_activate(void); |
Roland McGrath | 1eeaed7 | 2008-01-30 13:31:51 +0100 | [diff] [blame] | 33 | |
Andy Lutomirski | e451228 | 2021-01-20 21:09:48 -0800 | [diff] [blame] | 34 | /* Code that is unaware of kernel_fpu_begin_mask() can use this */ |
| 35 | static inline void kernel_fpu_begin(void) |
| 36 | { |
Andy Lutomirski | 49200d1 | 2021-01-20 21:09:51 -0800 | [diff] [blame] | 37 | #ifdef CONFIG_X86_64 |
| 38 | /* |
| 39 | * Any 64-bit code that uses 387 instructions must explicitly request |
| 40 | * KFPU_387. |
| 41 | */ |
| 42 | kernel_fpu_begin_mask(KFPU_MXCSR); |
| 43 | #else |
| 44 | /* |
| 45 | * 32-bit kernel code may use 387 operations as well as SSE2, etc, |
| 46 | * as long as it checks that the CPU has the required capability. |
| 47 | */ |
Andy Lutomirski | e451228 | 2021-01-20 21:09:48 -0800 | [diff] [blame] | 48 | kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR); |
Andy Lutomirski | 49200d1 | 2021-01-20 21:09:51 -0800 | [diff] [blame] | 49 | #endif |
Andy Lutomirski | e451228 | 2021-01-20 21:09:48 -0800 | [diff] [blame] | 50 | } |
| 51 | |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 52 | /* |
Thomas Gleixner | c20942c | 2021-10-13 16:55:39 +0200 | [diff] [blame] | 53 | * Use fpregs_lock() while editing CPU's FPU registers or fpu->fpstate. |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 54 | * A context switch will (and softirq might) save CPU's FPU registers to |
Thomas Gleixner | c20942c | 2021-10-13 16:55:39 +0200 | [diff] [blame] | 55 | * fpu->fpstate.regs and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 56 | * a random state. |
Thomas Gleixner | 5f0c712 | 2020-10-27 11:09:50 +0100 | [diff] [blame] | 57 | * |
| 58 | * local_bh_disable() protects against both preemption and soft interrupts |
| 59 | * on !RT kernels. |
Thomas Gleixner | cba08c5 | 2020-10-27 11:09:51 +0100 | [diff] [blame] | 60 | * |
| 61 | * On RT kernels local_bh_disable() is not sufficient because it only |
| 62 | * serializes soft interrupt related sections via a local lock, but stays |
| 63 | * preemptible. Disabling preemption is the right choice here as bottom |
| 64 | * half processing is always in thread context on RT kernels so it |
| 65 | * implicitly prevents bottom half processing as well. |
| 66 | * |
| 67 | * Disabling preemption also serializes against kernel_fpu_begin(). |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 68 | */ |
Rik van Riel | 4ee9151 | 2019-04-03 18:41:38 +0200 | [diff] [blame] | 69 | static inline void fpregs_lock(void) |
| 70 | { |
Thomas Gleixner | cba08c5 | 2020-10-27 11:09:51 +0100 | [diff] [blame] | 71 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
| 72 | local_bh_disable(); |
| 73 | else |
| 74 | preempt_disable(); |
Rik van Riel | 4ee9151 | 2019-04-03 18:41:38 +0200 | [diff] [blame] | 75 | } |
| 76 | |
| 77 | static inline void fpregs_unlock(void) |
| 78 | { |
Thomas Gleixner | cba08c5 | 2020-10-27 11:09:51 +0100 | [diff] [blame] | 79 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
| 80 | local_bh_enable(); |
| 81 | else |
| 82 | preempt_enable(); |
Rik van Riel | 4ee9151 | 2019-04-03 18:41:38 +0200 | [diff] [blame] | 83 | } |
| 84 | |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 85 | #ifdef CONFIG_X86_DEBUG_FPU |
| 86 | extern void fpregs_assert_state_consistent(void); |
| 87 | #else |
| 88 | static inline void fpregs_assert_state_consistent(void) { } |
| 89 | #endif |
| 90 | |
| 91 | /* |
| 92 | * Load the task FPU state before returning to userspace. |
| 93 | */ |
| 94 | extern void switch_fpu_return(void); |
| 95 | |
Suresh Siddha | e491401 | 2008-08-13 22:02:26 +1000 | [diff] [blame] | 96 | /* |
Ingo Molnar | 5b07343 | 2015-04-28 08:51:17 +0200 | [diff] [blame] | 97 | * Query the presence of one or more xfeatures. Works on any legacy CPU as well. |
| 98 | * |
| 99 | * If 'feature_name' is set then put a human-readable description of |
| 100 | * the feature there as well - this can be used to print error (or success) |
| 101 | * messages. |
| 102 | */ |
| 103 | extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name); |
| 104 | |
Thomas Gleixner | 6415bb8 | 2021-10-15 03:16:38 +0200 | [diff] [blame] | 105 | /* Trap handling */ |
| 106 | extern int fpu__exception_code(struct fpu *fpu, int trap_nr); |
| 107 | extern void fpu_sync_fpstate(struct fpu *fpu); |
Thomas Gleixner | 079ec41 | 2021-10-15 03:16:41 +0200 | [diff] [blame] | 108 | extern void fpu_reset_from_exception_fixup(void); |
Thomas Gleixner | 6415bb8 | 2021-10-15 03:16:38 +0200 | [diff] [blame] | 109 | |
| 110 | /* Boot, hotplug and resume */ |
| 111 | extern void fpu__init_cpu(void); |
| 112 | extern void fpu__init_system(struct cpuinfo_x86 *c); |
| 113 | extern void fpu__init_check_bugs(void); |
| 114 | extern void fpu__resume_cpu(void); |
| 115 | |
Thomas Gleixner | 90489f1 | 2021-10-15 03:16:33 +0200 | [diff] [blame] | 116 | #ifdef CONFIG_MATH_EMULATION |
| 117 | extern void fpstate_init_soft(struct swregs_state *soft); |
| 118 | #else |
| 119 | static inline void fpstate_init_soft(struct swregs_state *soft) {} |
| 120 | #endif |
| 121 | |
Thomas Gleixner | 0ae67cc3 | 2021-10-15 03:16:35 +0200 | [diff] [blame] | 122 | /* State tracking */ |
| 123 | DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); |
| 124 | |
Chang S. Bae | 500afbf | 2021-10-21 15:55:22 -0700 | [diff] [blame] | 125 | /* Process cleanup */ |
| 126 | #ifdef CONFIG_X86_64 |
| 127 | extern void fpstate_free(struct fpu *fpu); |
| 128 | #else |
| 129 | static inline void fpstate_free(struct fpu *fpu) { } |
| 130 | #endif |
| 131 | |
Thomas Gleixner | a0ff061 | 2021-10-15 03:16:12 +0200 | [diff] [blame] | 132 | /* fpstate-related functions which are exported to KVM */ |
Thomas Gleixner | 087df48 | 2021-10-13 16:55:31 +0200 | [diff] [blame] | 133 | extern void fpstate_clear_xstate_component(struct fpstate *fps, unsigned int xfeature); |
Thomas Gleixner | a0ff061 | 2021-10-15 03:16:12 +0200 | [diff] [blame] | 134 | |
Yang Zhong | c862dcd | 2022-01-13 13:08:25 -0500 | [diff] [blame] | 135 | extern u64 xstate_get_guest_group_perm(void); |
Thomas Gleixner | 980fe2f | 2022-01-05 04:35:12 -0800 | [diff] [blame] | 136 | |
Thomas Gleixner | a0ff061 | 2021-10-15 03:16:12 +0200 | [diff] [blame] | 137 | /* KVM specific functions */ |
Thomas Gleixner | 69f6ed1 | 2021-10-22 20:55:51 +0200 | [diff] [blame] | 138 | extern bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu); |
| 139 | extern void fpu_free_guest_fpstate(struct fpu_guest *gfpu); |
| 140 | extern int fpu_swap_kvm_fpstate(struct fpu_guest *gfpu, bool enter_guest); |
Sean Christopherson | 0781d60 | 2022-01-05 04:35:18 -0800 | [diff] [blame] | 141 | extern int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures); |
Thomas Gleixner | a0ff061 | 2021-10-15 03:16:12 +0200 | [diff] [blame] | 142 | |
Kevin Tian | 8eb9a48 | 2022-01-05 04:35:20 -0800 | [diff] [blame] | 143 | #ifdef CONFIG_X86_64 |
| 144 | extern void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd); |
Thomas Gleixner | 5429cea | 2022-01-05 04:35:31 -0800 | [diff] [blame] | 145 | extern void fpu_sync_guest_vmexit_xfd_state(void); |
Kevin Tian | 8eb9a48 | 2022-01-05 04:35:20 -0800 | [diff] [blame] | 146 | #else |
| 147 | static inline void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) { } |
Thomas Gleixner | 5429cea | 2022-01-05 04:35:31 -0800 | [diff] [blame] | 148 | static inline void fpu_sync_guest_vmexit_xfd_state(void) { } |
Kevin Tian | 8eb9a48 | 2022-01-05 04:35:20 -0800 | [diff] [blame] | 149 | #endif |
| 150 | |
Thomas Gleixner | d69c138 | 2021-10-22 20:55:53 +0200 | [diff] [blame] | 151 | extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, unsigned int size, u32 pkru); |
| 152 | extern int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, u64 xcr0, u32 *vpkru); |
Thomas Gleixner | ea4d693 | 2021-10-15 03:16:13 +0200 | [diff] [blame] | 153 | |
Thomas Gleixner | 69f6ed1 | 2021-10-22 20:55:51 +0200 | [diff] [blame] | 154 | static inline void fpstate_set_confidential(struct fpu_guest *gfpu) |
| 155 | { |
| 156 | gfpu->fpstate->is_confidential = true; |
| 157 | } |
| 158 | |
| 159 | static inline bool fpstate_is_confidential(struct fpu_guest *gfpu) |
| 160 | { |
| 161 | return gfpu->fpstate->is_confidential; |
| 162 | } |
| 163 | |
Chang S. Bae | db8268d | 2021-10-21 15:55:10 -0700 | [diff] [blame] | 164 | /* prctl */ |
| 165 | struct task_struct; |
| 166 | extern long fpu_xstate_prctl(struct task_struct *tsk, int option, unsigned long arg2); |
| 167 | |
Ingo Molnar | df6b35f | 2015-04-24 02:46:00 +0200 | [diff] [blame] | 168 | #endif /* _ASM_X86_FPU_API_H */ |