Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 1994 Linus Torvalds |
| 4 | * |
| 5 | * Pentium III FXSR, SSE support |
| 6 | * General FPU state handling cleanups |
| 7 | * Gareth Hughes <gareth@valinux.com>, May 2000 |
| 8 | * x86-64 work by Andi Kleen 2002 |
| 9 | */ |
| 10 | |
Ingo Molnar | 78f7f1e | 2015-04-24 02:54:44 +0200 | [diff] [blame] | 11 | #ifndef _ASM_X86_FPU_INTERNAL_H |
| 12 | #define _ASM_X86_FPU_INTERNAL_H |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 13 | |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 14 | #include <linux/compat.h> |
Ingo Molnar | 952f07e | 2015-04-26 16:56:05 +0200 | [diff] [blame] | 15 | #include <linux/sched.h> |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 16 | #include <linux/slab.h> |
Rik van Riel | 0cecca9 | 2019-04-03 18:41:44 +0200 | [diff] [blame] | 17 | #include <linux/mm.h> |
Ingo Molnar | f89e32e | 2015-04-22 10:58:10 +0200 | [diff] [blame] | 18 | |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 19 | #include <asm/user.h> |
Ingo Molnar | df6b35f | 2015-04-24 02:46:00 +0200 | [diff] [blame] | 20 | #include <asm/fpu/api.h> |
Ingo Molnar | 669ebab | 2015-04-28 08:41:33 +0200 | [diff] [blame] | 21 | #include <asm/fpu/xstate.h> |
Joerg Roedel | 1b4fb85 | 2020-09-07 15:15:27 +0200 | [diff] [blame] | 22 | #include <asm/fpu/xcr.h> |
Borislav Petkov | cd4d09e | 2016-01-26 22:12:04 +0100 | [diff] [blame] | 23 | #include <asm/cpufeature.h> |
Dave Hansen | d1898b7 | 2016-06-01 10:42:20 -0700 | [diff] [blame] | 24 | #include <asm/trace/fpu.h> |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 25 | |
Ingo Molnar | 6ffc152 | 2015-04-29 20:24:14 +0200 | [diff] [blame] | 26 | /* |
| 27 | * High level FPU state handling functions: |
| 28 | */ |
Ingo Molnar | 82c0e45 | 2015-04-29 21:09:18 +0200 | [diff] [blame] | 29 | extern int fpu__restore_sig(void __user *buf, int ia32_frame); |
Ingo Molnar | 6ffc152 | 2015-04-29 20:24:14 +0200 | [diff] [blame] | 30 | extern void fpu__drop(struct fpu *fpu); |
Fenghua Yu | b860eb8 | 2020-05-12 07:54:39 -0700 | [diff] [blame] | 31 | extern void fpu__clear_user_states(struct fpu *fpu); |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 32 | extern int fpu__exception_code(struct fpu *fpu, int trap_nr); |
Ingo Molnar | 6ffc152 | 2015-04-29 20:24:14 +0200 | [diff] [blame] | 33 | |
Thomas Gleixner | b2681e7 | 2021-06-23 14:02:06 +0200 | [diff] [blame] | 34 | extern void fpu_sync_fpstate(struct fpu *fpu); |
| 35 | |
Thomas Gleixner | e7ecad1 | 2021-06-23 14:02:12 +0200 | [diff] [blame] | 36 | /* Clone and exit operations */ |
Thomas Gleixner | b2681e7 | 2021-06-23 14:02:06 +0200 | [diff] [blame] | 37 | extern int fpu_clone(struct task_struct *dst); |
Thomas Gleixner | e7ecad1 | 2021-06-23 14:02:12 +0200 | [diff] [blame] | 38 | extern void fpu_flush_thread(void); |
Thomas Gleixner | b2681e7 | 2021-06-23 14:02:06 +0200 | [diff] [blame] | 39 | |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 40 | /* |
| 41 | * Boot time FPU initialization functions: |
| 42 | */ |
| 43 | extern void fpu__init_cpu(void); |
| 44 | extern void fpu__init_system_xstate(void); |
| 45 | extern void fpu__init_cpu_xstate(void); |
| 46 | extern void fpu__init_system(struct cpuinfo_x86 *c); |
Ingo Molnar | 952f07e | 2015-04-26 16:56:05 +0200 | [diff] [blame] | 47 | extern void fpu__init_check_bugs(void); |
| 48 | extern void fpu__resume_cpu(void); |
| 49 | |
Ingo Molnar | e97131a | 2015-05-05 11:34:49 +0200 | [diff] [blame] | 50 | /* |
| 51 | * Debugging facility: |
| 52 | */ |
| 53 | #ifdef CONFIG_X86_DEBUG_FPU |
| 54 | # define WARN_ON_FPU(x) WARN_ON_ONCE(x) |
| 55 | #else |
Ingo Molnar | 83242c5 | 2015-05-27 12:22:29 +0200 | [diff] [blame] | 56 | # define WARN_ON_FPU(x) ({ (void)(x); 0; }) |
Ingo Molnar | e97131a | 2015-05-05 11:34:49 +0200 | [diff] [blame] | 57 | #endif |
| 58 | |
Rik van Riel | 1c927ee | 2015-02-06 15:02:01 -0500 | [diff] [blame] | 59 | /* |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 60 | * FPU related CPU feature flag helper routines: |
Rik van Riel | 1c927ee | 2015-02-06 15:02:01 -0500 | [diff] [blame] | 61 | */ |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 62 | static __always_inline __pure bool use_xsaveopt(void) |
| 63 | { |
Borislav Petkov | bc696ca | 2016-01-26 22:12:05 +0100 | [diff] [blame] | 64 | return static_cpu_has(X86_FEATURE_XSAVEOPT); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 65 | } |
| 66 | |
| 67 | static __always_inline __pure bool use_xsave(void) |
| 68 | { |
Borislav Petkov | bc696ca | 2016-01-26 22:12:05 +0100 | [diff] [blame] | 69 | return static_cpu_has(X86_FEATURE_XSAVE); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 70 | } |
| 71 | |
| 72 | static __always_inline __pure bool use_fxsr(void) |
| 73 | { |
Borislav Petkov | bc696ca | 2016-01-26 22:12:05 +0100 | [diff] [blame] | 74 | return static_cpu_has(X86_FEATURE_FXSR); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 75 | } |
| 76 | |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 77 | /* |
| 78 | * fpstate handling functions: |
| 79 | */ |
| 80 | |
| 81 | extern union fpregs_state init_fpstate; |
| 82 | |
| 83 | extern void fpstate_init(union fpregs_state *state); |
| 84 | #ifdef CONFIG_MATH_EMULATION |
| 85 | extern void fpstate_init_soft(struct swregs_state *soft); |
| 86 | #else |
| 87 | static inline void fpstate_init_soft(struct swregs_state *soft) {} |
| 88 | #endif |
Thomas Gleixner | 08ded2c | 2021-06-23 14:02:00 +0200 | [diff] [blame] | 89 | extern void save_fpregs_to_fpstate(struct fpu *fpu); |
Yu-cheng Yu | a5828ed | 2017-01-24 10:25:46 -0800 | [diff] [blame] | 90 | |
Thomas Gleixner | aee8c67 | 2021-06-23 14:02:30 +0200 | [diff] [blame] | 91 | /* Returns 0 or the negated trap number, which results in -EFAULT for #PF */ |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 92 | #define user_insn(insn, output, input...) \ |
| 93 | ({ \ |
| 94 | int err; \ |
Sebastian Andrzej Siewior | 6637401 | 2018-11-28 23:20:11 +0100 | [diff] [blame] | 95 | \ |
| 96 | might_fault(); \ |
| 97 | \ |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 98 | asm volatile(ASM_STAC "\n" \ |
Thomas Gleixner | aee8c67 | 2021-06-23 14:02:30 +0200 | [diff] [blame] | 99 | "1: " #insn "\n" \ |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 100 | "2: " ASM_CLAC "\n" \ |
| 101 | ".section .fixup,\"ax\"\n" \ |
Thomas Gleixner | aee8c67 | 2021-06-23 14:02:30 +0200 | [diff] [blame] | 102 | "3: negl %%eax\n" \ |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 103 | " jmp 2b\n" \ |
| 104 | ".previous\n" \ |
Thomas Gleixner | aee8c67 | 2021-06-23 14:02:30 +0200 | [diff] [blame] | 105 | _ASM_EXTABLE_FAULT(1b, 3b) \ |
| 106 | : [err] "=a" (err), output \ |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 107 | : "0"(0), input); \ |
| 108 | err; \ |
| 109 | }) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 110 | |
Sebastian Andrzej Siewior | 926b21f | 2019-04-03 18:41:50 +0200 | [diff] [blame] | 111 | #define kernel_insn_err(insn, output, input...) \ |
| 112 | ({ \ |
| 113 | int err; \ |
| 114 | asm volatile("1:" #insn "\n\t" \ |
| 115 | "2:\n" \ |
| 116 | ".section .fixup,\"ax\"\n" \ |
| 117 | "3: movl $-1,%[err]\n" \ |
| 118 | " jmp 2b\n" \ |
| 119 | ".previous\n" \ |
| 120 | _ASM_EXTABLE(1b, 3b) \ |
| 121 | : [err] "=r" (err), output \ |
| 122 | : "0"(0), input); \ |
| 123 | err; \ |
| 124 | }) |
| 125 | |
Eric Biggers | d5c8028 | 2017-09-23 15:00:09 +0200 | [diff] [blame] | 126 | #define kernel_insn(insn, output, input...) \ |
Suresh Siddha | 0ca5bd0 | 2012-07-24 16:05:28 -0700 | [diff] [blame] | 127 | asm volatile("1:" #insn "\n\t" \ |
| 128 | "2:\n" \ |
Eric Biggers | d5c8028 | 2017-09-23 15:00:09 +0200 | [diff] [blame] | 129 | _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \ |
| 130 | : output : input) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 131 | |
Thomas Gleixner | 6fdc908c | 2021-06-23 14:01:56 +0200 | [diff] [blame] | 132 | static inline int fnsave_to_user_sigframe(struct fregs_state __user *fx) |
Suresh Siddha | 0ca5bd0 | 2012-07-24 16:05:28 -0700 | [diff] [blame] | 133 | { |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 134 | return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 135 | } |
| 136 | |
Thomas Gleixner | 16dcf43 | 2021-06-23 14:01:54 +0200 | [diff] [blame] | 137 | static inline int fxsave_to_user_sigframe(struct fxregs_state __user *fx) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 138 | { |
Masahiro Yamada | 97f2645 | 2016-08-03 13:45:50 -0700 | [diff] [blame] | 139 | if (IS_ENABLED(CONFIG_X86_32)) |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 140 | return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); |
Borislav Petkov | bae54dc | 2019-01-18 00:05:40 +0100 | [diff] [blame] | 141 | else |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 142 | return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 143 | |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 144 | } |
| 145 | |
Thomas Gleixner | 16dcf43 | 2021-06-23 14:01:54 +0200 | [diff] [blame] | 146 | static inline void fxrstor(struct fxregs_state *fx) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 147 | { |
Borislav Petkov | bae54dc | 2019-01-18 00:05:40 +0100 | [diff] [blame] | 148 | if (IS_ENABLED(CONFIG_X86_32)) |
Eric Biggers | d5c8028 | 2017-09-23 15:00:09 +0200 | [diff] [blame] | 149 | kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
Borislav Petkov | bae54dc | 2019-01-18 00:05:40 +0100 | [diff] [blame] | 150 | else |
| 151 | kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); |
Suresh Siddha | 0ca5bd0 | 2012-07-24 16:05:28 -0700 | [diff] [blame] | 152 | } |
| 153 | |
Thomas Gleixner | 16dcf43 | 2021-06-23 14:01:54 +0200 | [diff] [blame] | 154 | static inline int fxrstor_safe(struct fxregs_state *fx) |
Sebastian Andrzej Siewior | 926b21f | 2019-04-03 18:41:50 +0200 | [diff] [blame] | 155 | { |
| 156 | if (IS_ENABLED(CONFIG_X86_32)) |
| 157 | return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
| 158 | else |
| 159 | return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); |
| 160 | } |
| 161 | |
Thomas Gleixner | 16dcf43 | 2021-06-23 14:01:54 +0200 | [diff] [blame] | 162 | static inline int fxrstor_from_user_sigframe(struct fxregs_state __user *fx) |
H. Peter Anvin | e139e95 | 2012-09-25 15:42:18 -0700 | [diff] [blame] | 163 | { |
Masahiro Yamada | 97f2645 | 2016-08-03 13:45:50 -0700 | [diff] [blame] | 164 | if (IS_ENABLED(CONFIG_X86_32)) |
H. Peter Anvin | e139e95 | 2012-09-25 15:42:18 -0700 | [diff] [blame] | 165 | return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
Borislav Petkov | bae54dc | 2019-01-18 00:05:40 +0100 | [diff] [blame] | 166 | else |
H. Peter Anvin | e139e95 | 2012-09-25 15:42:18 -0700 | [diff] [blame] | 167 | return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); |
H. Peter Anvin | e139e95 | 2012-09-25 15:42:18 -0700 | [diff] [blame] | 168 | } |
| 169 | |
Thomas Gleixner | 6fdc908c | 2021-06-23 14:01:56 +0200 | [diff] [blame] | 170 | static inline void frstor(struct fregs_state *fx) |
Suresh Siddha | 0ca5bd0 | 2012-07-24 16:05:28 -0700 | [diff] [blame] | 171 | { |
Eric Biggers | d5c8028 | 2017-09-23 15:00:09 +0200 | [diff] [blame] | 172 | kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 173 | } |
| 174 | |
Thomas Gleixner | 6fdc908c | 2021-06-23 14:01:56 +0200 | [diff] [blame] | 175 | static inline int frstor_safe(struct fregs_state *fx) |
Sebastian Andrzej Siewior | 926b21f | 2019-04-03 18:41:50 +0200 | [diff] [blame] | 176 | { |
| 177 | return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
| 178 | } |
| 179 | |
Thomas Gleixner | 6fdc908c | 2021-06-23 14:01:56 +0200 | [diff] [blame] | 180 | static inline int frstor_from_user_sigframe(struct fregs_state __user *fx) |
H. Peter Anvin | e139e95 | 2012-09-25 15:42:18 -0700 | [diff] [blame] | 181 | { |
| 182 | return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
| 183 | } |
| 184 | |
Thomas Gleixner | f9dfb5e | 2021-06-18 16:18:25 +0200 | [diff] [blame] | 185 | static inline void fxsave(struct fxregs_state *fx) |
| 186 | { |
| 187 | if (IS_ENABLED(CONFIG_X86_32)) |
| 188 | asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx)); |
| 189 | else |
| 190 | asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx)); |
| 191 | } |
| 192 | |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 193 | /* These macros all use (%edi)/(%rdi) as the single memory argument. */ |
| 194 | #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27" |
| 195 | #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37" |
| 196 | #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f" |
| 197 | #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" |
| 198 | #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" |
| 199 | |
Thomas Gleixner | aee8c67 | 2021-06-23 14:02:30 +0200 | [diff] [blame] | 200 | /* |
| 201 | * After this @err contains 0 on success or the negated trap number when |
| 202 | * the operation raises an exception. For faults this results in -EFAULT. |
| 203 | */ |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 204 | #define XSTATE_OP(op, st, lmask, hmask, err) \ |
| 205 | asm volatile("1:" op "\n\t" \ |
| 206 | "xor %[err], %[err]\n" \ |
| 207 | "2:\n\t" \ |
| 208 | ".pushsection .fixup,\"ax\"\n\t" \ |
Thomas Gleixner | aee8c67 | 2021-06-23 14:02:30 +0200 | [diff] [blame] | 209 | "3: negl %%eax\n\t" \ |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 210 | "jmp 2b\n\t" \ |
| 211 | ".popsection\n\t" \ |
Thomas Gleixner | aee8c67 | 2021-06-23 14:02:30 +0200 | [diff] [blame] | 212 | _ASM_EXTABLE_FAULT(1b, 3b) \ |
| 213 | : [err] "=a" (err) \ |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 214 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ |
| 215 | : "memory") |
| 216 | |
Borislav Petkov | b7106fa | 2015-11-19 12:25:26 +0100 | [diff] [blame] | 217 | /* |
| 218 | * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact |
| 219 | * format and supervisor states in addition to modified optimization in |
| 220 | * XSAVEOPT. |
| 221 | * |
| 222 | * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT |
| 223 | * supports modified optimization which is not supported by XSAVE. |
| 224 | * |
| 225 | * We use XSAVE as a fallback. |
| 226 | * |
| 227 | * The 661 label is defined in the ALTERNATIVE* macros as the address of the |
| 228 | * original instruction which gets replaced. We need to use it here as the |
| 229 | * address of the instruction where we might get an exception at. |
| 230 | */ |
| 231 | #define XSTATE_XSAVE(st, lmask, hmask, err) \ |
| 232 | asm volatile(ALTERNATIVE_2(XSAVE, \ |
| 233 | XSAVEOPT, X86_FEATURE_XSAVEOPT, \ |
| 234 | XSAVES, X86_FEATURE_XSAVES) \ |
| 235 | "\n" \ |
| 236 | "xor %[err], %[err]\n" \ |
| 237 | "3:\n" \ |
| 238 | ".pushsection .fixup,\"ax\"\n" \ |
| 239 | "4: movl $-2, %[err]\n" \ |
| 240 | "jmp 3b\n" \ |
| 241 | ".popsection\n" \ |
| 242 | _ASM_EXTABLE(661b, 4b) \ |
| 243 | : [err] "=r" (err) \ |
| 244 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ |
| 245 | : "memory") |
| 246 | |
| 247 | /* |
| 248 | * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact |
| 249 | * XSAVE area format. |
| 250 | */ |
Eric Biggers | d5c8028 | 2017-09-23 15:00:09 +0200 | [diff] [blame] | 251 | #define XSTATE_XRESTORE(st, lmask, hmask) \ |
Borislav Petkov | b7106fa | 2015-11-19 12:25:26 +0100 | [diff] [blame] | 252 | asm volatile(ALTERNATIVE(XRSTOR, \ |
| 253 | XRSTORS, X86_FEATURE_XSAVES) \ |
| 254 | "\n" \ |
Borislav Petkov | b7106fa | 2015-11-19 12:25:26 +0100 | [diff] [blame] | 255 | "3:\n" \ |
Eric Biggers | d5c8028 | 2017-09-23 15:00:09 +0200 | [diff] [blame] | 256 | _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\ |
| 257 | : \ |
Borislav Petkov | b7106fa | 2015-11-19 12:25:26 +0100 | [diff] [blame] | 258 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ |
| 259 | : "memory") |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 260 | |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 261 | /* |
| 262 | * This function is called only during boot time when x86 caps are not set |
| 263 | * up and alternative can not be used yet. |
| 264 | */ |
Thomas Gleixner | b16313f | 2021-06-23 14:01:52 +0200 | [diff] [blame] | 265 | static inline void os_xrstor_booting(struct xregs_state *xstate) |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 266 | { |
Thomas Gleixner | 30a304a | 2021-06-23 14:02:20 +0200 | [diff] [blame] | 267 | u64 mask = xfeatures_mask_fpstate(); |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 268 | u32 lmask = mask; |
| 269 | u32 hmask = mask >> 32; |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 270 | int err; |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 271 | |
| 272 | WARN_ON(system_state != SYSTEM_BOOTING); |
| 273 | |
Borislav Petkov | 67e87d4 | 2019-03-29 19:52:59 +0100 | [diff] [blame] | 274 | if (boot_cpu_has(X86_FEATURE_XSAVES)) |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 275 | XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 276 | else |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 277 | XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); |
Ingo Molnar | 8c05f05 | 2015-05-24 09:23:25 +0200 | [diff] [blame] | 278 | |
Eric Biggers | d5c8028 | 2017-09-23 15:00:09 +0200 | [diff] [blame] | 279 | /* |
| 280 | * We should never fault when copying from a kernel buffer, and the FPU |
| 281 | * state we set at boot time should be valid. |
| 282 | */ |
Ingo Molnar | 8c05f05 | 2015-05-24 09:23:25 +0200 | [diff] [blame] | 283 | WARN_ON_FPU(err); |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 284 | } |
| 285 | |
| 286 | /* |
| 287 | * Save processor xstate to xsave area. |
Thomas Gleixner | b16313f | 2021-06-23 14:01:52 +0200 | [diff] [blame] | 288 | * |
| 289 | * Uses either XSAVE or XSAVEOPT or XSAVES depending on the CPU features |
| 290 | * and command line options. The choice is permanent until the next reboot. |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 291 | */ |
Thomas Gleixner | b16313f | 2021-06-23 14:01:52 +0200 | [diff] [blame] | 292 | static inline void os_xsave(struct xregs_state *xstate) |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 293 | { |
Kan Liang | a063bf2 | 2020-07-03 05:49:25 -0700 | [diff] [blame] | 294 | u64 mask = xfeatures_mask_all; |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 295 | u32 lmask = mask; |
| 296 | u32 hmask = mask >> 32; |
Borislav Petkov | b7106fa | 2015-11-19 12:25:26 +0100 | [diff] [blame] | 297 | int err; |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 298 | |
Andi Kleen | 03eaec8 | 2017-09-23 15:00:06 +0200 | [diff] [blame] | 299 | WARN_ON_FPU(!alternatives_patched); |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 300 | |
Borislav Petkov | b7106fa | 2015-11-19 12:25:26 +0100 | [diff] [blame] | 301 | XSTATE_XSAVE(xstate, lmask, hmask, err); |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 302 | |
Ingo Molnar | 8c05f05 | 2015-05-24 09:23:25 +0200 | [diff] [blame] | 303 | /* We should never fault when copying to a kernel buffer: */ |
| 304 | WARN_ON_FPU(err); |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 305 | } |
| 306 | |
| 307 | /* |
| 308 | * Restore processor xstate from xsave area. |
Thomas Gleixner | b16313f | 2021-06-23 14:01:52 +0200 | [diff] [blame] | 309 | * |
| 310 | * Uses XRSTORS when XSAVES is used, XRSTOR otherwise. |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 311 | */ |
Thomas Gleixner | b16313f | 2021-06-23 14:01:52 +0200 | [diff] [blame] | 312 | static inline void os_xrstor(struct xregs_state *xstate, u64 mask) |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 313 | { |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 314 | u32 lmask = mask; |
| 315 | u32 hmask = mask >> 32; |
| 316 | |
Eric Biggers | d5c8028 | 2017-09-23 15:00:09 +0200 | [diff] [blame] | 317 | XSTATE_XRESTORE(xstate, lmask, hmask); |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 318 | } |
| 319 | |
| 320 | /* |
| 321 | * Save xstate to user space xsave area. |
| 322 | * |
| 323 | * We don't use modified optimization because xrstor/xrstors might track |
| 324 | * a different application. |
| 325 | * |
| 326 | * We don't use compacted format xsave area for |
| 327 | * backward compatibility for old applications which don't understand |
| 328 | * compacted format of xsave area. |
| 329 | */ |
Thomas Gleixner | 6b862ba | 2021-06-23 14:01:53 +0200 | [diff] [blame] | 330 | static inline int xsave_to_user_sigframe(struct xregs_state __user *buf) |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 331 | { |
Thomas Gleixner | 65e9521 | 2021-06-23 14:02:16 +0200 | [diff] [blame] | 332 | /* |
| 333 | * Include the features which are not xsaved/rstored by the kernel |
| 334 | * internally, e.g. PKRU. That's user space ABI and also required |
| 335 | * to allow the signal handler to modify PKRU. |
| 336 | */ |
| 337 | u64 mask = xfeatures_mask_uabi(); |
Kan Liang | a063bf2 | 2020-07-03 05:49:25 -0700 | [diff] [blame] | 338 | u32 lmask = mask; |
| 339 | u32 hmask = mask >> 32; |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 340 | int err; |
| 341 | |
| 342 | /* |
| 343 | * Clear the xsave header first, so that reserved fields are |
| 344 | * initialized to zero. |
| 345 | */ |
| 346 | err = __clear_user(&buf->header, sizeof(buf->header)); |
| 347 | if (unlikely(err)) |
| 348 | return -EFAULT; |
| 349 | |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 350 | stac(); |
Kan Liang | a063bf2 | 2020-07-03 05:49:25 -0700 | [diff] [blame] | 351 | XSTATE_OP(XSAVE, buf, lmask, hmask, err); |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 352 | clac(); |
| 353 | |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 354 | return err; |
| 355 | } |
| 356 | |
| 357 | /* |
| 358 | * Restore xstate from user space xsave area. |
| 359 | */ |
Thomas Gleixner | 6b862ba | 2021-06-23 14:01:53 +0200 | [diff] [blame] | 360 | static inline int xrstor_from_user_sigframe(struct xregs_state __user *buf, u64 mask) |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 361 | { |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 362 | struct xregs_state *xstate = ((__force struct xregs_state *)buf); |
| 363 | u32 lmask = mask; |
| 364 | u32 hmask = mask >> 32; |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 365 | int err; |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 366 | |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 367 | stac(); |
| 368 | XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); |
| 369 | clac(); |
| 370 | |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 371 | return err; |
| 372 | } |
| 373 | |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 374 | /* |
Sebastian Andrzej Siewior | 926b21f | 2019-04-03 18:41:50 +0200 | [diff] [blame] | 375 | * Restore xstate from kernel space xsave area, return an error code instead of |
| 376 | * an exception. |
| 377 | */ |
Thomas Gleixner | b16313f | 2021-06-23 14:01:52 +0200 | [diff] [blame] | 378 | static inline int os_xrstor_safe(struct xregs_state *xstate, u64 mask) |
Sebastian Andrzej Siewior | 926b21f | 2019-04-03 18:41:50 +0200 | [diff] [blame] | 379 | { |
| 380 | u32 lmask = mask; |
| 381 | u32 hmask = mask >> 32; |
| 382 | int err; |
| 383 | |
Thomas Gleixner | b16313f | 2021-06-23 14:01:52 +0200 | [diff] [blame] | 384 | if (cpu_feature_enabled(X86_FEATURE_XSAVES)) |
Yu-cheng Yu | c95473e | 2020-05-12 07:54:41 -0700 | [diff] [blame] | 385 | XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); |
| 386 | else |
| 387 | XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); |
Sebastian Andrzej Siewior | 926b21f | 2019-04-03 18:41:50 +0200 | [diff] [blame] | 388 | |
| 389 | return err; |
| 390 | } |
| 391 | |
Thomas Gleixner | 1d9bffa | 2021-06-23 14:02:15 +0200 | [diff] [blame] | 392 | extern void __restore_fpregs_from_fpstate(union fpregs_state *fpstate, u64 mask); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 393 | |
Thomas Gleixner | 1c61fad | 2021-06-23 14:02:01 +0200 | [diff] [blame] | 394 | static inline void restore_fpregs_from_fpstate(union fpregs_state *fpstate) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 395 | { |
Thomas Gleixner | 30a304a | 2021-06-23 14:02:20 +0200 | [diff] [blame] | 396 | __restore_fpregs_from_fpstate(fpstate, xfeatures_mask_fpstate()); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 397 | } |
| 398 | |
Ingo Molnar | 87dafd4 | 2015-05-25 10:57:06 +0200 | [diff] [blame] | 399 | extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 400 | |
| 401 | /* |
| 402 | * FPU context switch related helper methods: |
| 403 | */ |
| 404 | |
| 405 | DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); |
| 406 | |
| 407 | /* |
Rik van Riel | 25d83b5 | 2016-10-04 20:34:36 -0400 | [diff] [blame] | 408 | * The in-register FPU state for an FPU context on a CPU is assumed to be |
| 409 | * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx |
| 410 | * matches the FPU. |
| 411 | * |
| 412 | * If the FPU register state is valid, the kernel can skip restoring the |
| 413 | * FPU state from memory. |
| 414 | * |
| 415 | * Any code that clobbers the FPU registers or updates the in-memory |
| 416 | * FPU state for a task MUST let the rest of the kernel know that the |
Rik van Riel | 317b622 | 2016-10-14 08:15:30 -0400 | [diff] [blame] | 417 | * FPU registers are no longer valid for this task. |
Rik van Riel | 25d83b5 | 2016-10-04 20:34:36 -0400 | [diff] [blame] | 418 | * |
Rik van Riel | 317b622 | 2016-10-14 08:15:30 -0400 | [diff] [blame] | 419 | * Either one of these invalidation functions is enough. Invalidate |
| 420 | * a resource you control: CPU if using the CPU for something else |
| 421 | * (with preemption disabled), FPU for the current task, or a task that |
| 422 | * is prevented from running by the current task. |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 423 | */ |
Rik van Riel | 317b622 | 2016-10-14 08:15:30 -0400 | [diff] [blame] | 424 | static inline void __cpu_invalidate_fpregs_state(void) |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 425 | { |
Rik van Riel | 317b622 | 2016-10-14 08:15:30 -0400 | [diff] [blame] | 426 | __this_cpu_write(fpu_fpregs_owner_ctx, NULL); |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 427 | } |
| 428 | |
Rik van Riel | 25d83b5 | 2016-10-04 20:34:36 -0400 | [diff] [blame] | 429 | static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu) |
| 430 | { |
| 431 | fpu->last_cpu = -1; |
| 432 | } |
| 433 | |
| 434 | static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu) |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 435 | { |
Sebastian Andrzej Siewior | 59c4bd8 | 2019-11-28 09:53:06 +0100 | [diff] [blame] | 436 | return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 437 | } |
| 438 | |
Rik van Riel | 66f314e | 2016-10-04 20:34:37 -0400 | [diff] [blame] | 439 | /* |
| 440 | * These generally need preemption protection to work, |
| 441 | * do try to avoid using these on their own: |
| 442 | */ |
| 443 | static inline void fpregs_deactivate(struct fpu *fpu) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 444 | { |
Ingo Molnar | 36b544d | 2015-04-23 12:18:28 +0200 | [diff] [blame] | 445 | this_cpu_write(fpu_fpregs_owner_ctx, NULL); |
Dave Hansen | d1898b7 | 2016-06-01 10:42:20 -0700 | [diff] [blame] | 446 | trace_x86_fpu_regs_deactivated(fpu); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 447 | } |
| 448 | |
Rik van Riel | 66f314e | 2016-10-04 20:34:37 -0400 | [diff] [blame] | 449 | static inline void fpregs_activate(struct fpu *fpu) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 450 | { |
Ingo Molnar | c0311f6 | 2015-04-23 12:24:59 +0200 | [diff] [blame] | 451 | this_cpu_write(fpu_fpregs_owner_ctx, fpu); |
Dave Hansen | d1898b7 | 2016-06-01 10:42:20 -0700 | [diff] [blame] | 452 | trace_x86_fpu_regs_activated(fpu); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 453 | } |
| 454 | |
Thomas Gleixner | 727d011 | 2021-06-23 14:02:14 +0200 | [diff] [blame] | 455 | /* Internal helper for switch_fpu_return() and signal frame setup */ |
| 456 | static inline void fpregs_restore_userregs(void) |
Rik van Riel | 4ee9151 | 2019-04-03 18:41:38 +0200 | [diff] [blame] | 457 | { |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 458 | struct fpu *fpu = ¤t->thread.fpu; |
| 459 | int cpu = smp_processor_id(); |
| 460 | |
Christoph Hellwig | 8d3289f | 2019-06-04 19:54:12 +0200 | [diff] [blame] | 461 | if (WARN_ON_ONCE(current->flags & PF_KTHREAD)) |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 462 | return; |
| 463 | |
Rik van Riel | 4ee9151 | 2019-04-03 18:41:38 +0200 | [diff] [blame] | 464 | if (!fpregs_state_valid(fpu, cpu)) { |
Thomas Gleixner | 2ebe81c | 2021-06-23 14:02:17 +0200 | [diff] [blame] | 465 | u64 mask; |
| 466 | |
| 467 | /* |
| 468 | * This restores _all_ xstate which has not been |
| 469 | * established yet. |
| 470 | * |
| 471 | * If PKRU is enabled, then the PKRU value is already |
| 472 | * correct because it was either set in switch_to() or in |
| 473 | * flush_thread(). So it is excluded because it might be |
| 474 | * not up to date in current->thread.fpu.xsave state. |
| 475 | */ |
| 476 | mask = xfeatures_mask_restore_user() | |
| 477 | xfeatures_mask_supervisor(); |
| 478 | __restore_fpregs_from_fpstate(&fpu->state, mask); |
| 479 | |
Rik van Riel | 4ee9151 | 2019-04-03 18:41:38 +0200 | [diff] [blame] | 480 | fpregs_activate(fpu); |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 481 | fpu->last_cpu = cpu; |
Rik van Riel | 4ee9151 | 2019-04-03 18:41:38 +0200 | [diff] [blame] | 482 | } |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 483 | clear_thread_flag(TIF_NEED_FPU_LOAD); |
Rik van Riel | 4ee9151 | 2019-04-03 18:41:38 +0200 | [diff] [blame] | 484 | } |
| 485 | |
| 486 | /* |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 487 | * FPU state switching for scheduling. |
| 488 | * |
| 489 | * This is a two-stage process: |
| 490 | * |
Rik van Riel | c474e50 | 2016-10-14 08:15:31 -0400 | [diff] [blame] | 491 | * - switch_fpu_prepare() saves the old state. |
| 492 | * This is done within the context of the old process. |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 493 | * |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 494 | * - switch_fpu_finish() sets TIF_NEED_FPU_LOAD; the floating point state |
| 495 | * will get loaded on return to userspace, or when the kernel needs it. |
Sebastian Andrzej Siewior | 2722146 | 2019-04-03 18:41:36 +0200 | [diff] [blame] | 496 | * |
Sebastian Andrzej Siewior | 383c252 | 2019-04-03 18:41:45 +0200 | [diff] [blame] | 497 | * If TIF_NEED_FPU_LOAD is cleared then the CPU's FPU registers |
| 498 | * are saved in the current thread's FPU register state. |
| 499 | * |
| 500 | * If TIF_NEED_FPU_LOAD is set then CPU's FPU registers may not |
| 501 | * hold current()'s FPU registers. It is required to load the |
| 502 | * registers before returning to userland or using the content |
| 503 | * otherwise. |
| 504 | * |
Sebastian Andrzej Siewior | 2722146 | 2019-04-03 18:41:36 +0200 | [diff] [blame] | 505 | * The FPU context is only stored/restored for a user task and |
Christoph Hellwig | 8d3289f | 2019-06-04 19:54:12 +0200 | [diff] [blame] | 506 | * PF_KTHREAD is used to distinguish between kernel and user threads. |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 507 | */ |
Borislav Petkov | 67e87d4 | 2019-03-29 19:52:59 +0100 | [diff] [blame] | 508 | static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 509 | { |
Christoph Hellwig | 8d3289f | 2019-06-04 19:54:12 +0200 | [diff] [blame] | 510 | if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) { |
Thomas Gleixner | 08ded2c | 2021-06-23 14:02:00 +0200 | [diff] [blame] | 511 | save_fpregs_to_fpstate(old_fpu); |
| 512 | /* |
| 513 | * The save operation preserved register state, so the |
| 514 | * fpu_fpregs_owner_ctx is still @old_fpu. Store the |
| 515 | * current CPU number in @old_fpu, so the next return |
| 516 | * to user space can avoid the FPU register restore |
| 517 | * when is returns on the same CPU and still owns the |
| 518 | * context. |
| 519 | */ |
| 520 | old_fpu->last_cpu = cpu; |
Rik van Riel | 1361ef2 | 2015-02-06 15:02:03 -0500 | [diff] [blame] | 521 | |
Dave Hansen | d1898b7 | 2016-06-01 10:42:20 -0700 | [diff] [blame] | 522 | trace_x86_fpu_regs_deactivated(old_fpu); |
Sebastian Andrzej Siewior | 2722146 | 2019-04-03 18:41:36 +0200 | [diff] [blame] | 523 | } |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 524 | } |
| 525 | |
| 526 | /* |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 527 | * Misc helper functions: |
| 528 | */ |
| 529 | |
| 530 | /* |
Thomas Gleixner | 9544369 | 2021-06-23 14:02:21 +0200 | [diff] [blame] | 531 | * Delay loading of the complete FPU state until the return to userland. |
| 532 | * PKRU is handled separately. |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 533 | */ |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 534 | static inline void switch_fpu_finish(struct fpu *new_fpu) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 535 | { |
Thomas Gleixner | 9544369 | 2021-06-23 14:02:21 +0200 | [diff] [blame] | 536 | if (cpu_feature_enabled(X86_FEATURE_FPU)) |
| 537 | set_thread_flag(TIF_NEED_FPU_LOAD); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 538 | } |
| 539 | |
Ingo Molnar | 78f7f1e | 2015-04-24 02:54:44 +0200 | [diff] [blame] | 540 | #endif /* _ASM_X86_FPU_INTERNAL_H */ |