blob: c83b3020350ac264ebd65217bc50e3e9c957376b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Roland McGrath1eeaed72008-01-30 13:31:51 +01002/*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Pentium III FXSR, SSE support
6 * General FPU state handling cleanups
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 * x86-64 work by Andi Kleen 2002
9 */
10
Ingo Molnardf6b35f2015-04-24 02:46:00 +020011#ifndef _ASM_X86_FPU_API_H
12#define _ASM_X86_FPU_API_H
Rik van Riel5f409e22019-04-03 18:41:52 +020013#include <linux/bottom_half.h>
Roland McGrath1eeaed72008-01-30 13:31:51 +010014
Thomas Gleixnera0ff0612021-10-15 03:16:12 +020015#include <asm/fpu/types.h>
16
Suresh Siddhab1a74bf2012-09-20 11:01:49 -070017/*
Sebastian Andrzej Siewior12209992018-11-29 16:02:10 +010018 * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
19 * disables preemption so be careful if you intend to use it for long periods
20 * of time.
Andy Lutomirskie4512282021-01-20 21:09:48 -080021 * If you intend to use the FPU in irq/softirq you need to check first with
Sebastian Andrzej Siewior12209992018-11-29 16:02:10 +010022 * irq_fpu_usable() if it is possible.
Suresh Siddhab1a74bf2012-09-20 11:01:49 -070023 */
Andy Lutomirskie4512282021-01-20 21:09:48 -080024
25/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
26#define KFPU_387 _BITUL(0) /* 387 state will be initialized */
27#define KFPU_MXCSR _BITUL(1) /* MXCSR will be initialized */
28
29extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
Ingo Molnard63e79b2015-04-26 12:07:18 +020030extern void kernel_fpu_end(void);
Ingo Molnar952f07e2015-04-26 16:56:05 +020031extern bool irq_fpu_usable(void);
Rik van Riel5f409e22019-04-03 18:41:52 +020032extern void fpregs_mark_activate(void);
Roland McGrath1eeaed72008-01-30 13:31:51 +010033
Andy Lutomirskie4512282021-01-20 21:09:48 -080034/* Code that is unaware of kernel_fpu_begin_mask() can use this */
35static inline void kernel_fpu_begin(void)
36{
Andy Lutomirski49200d12021-01-20 21:09:51 -080037#ifdef CONFIG_X86_64
38 /*
39 * Any 64-bit code that uses 387 instructions must explicitly request
40 * KFPU_387.
41 */
42 kernel_fpu_begin_mask(KFPU_MXCSR);
43#else
44 /*
45 * 32-bit kernel code may use 387 operations as well as SSE2, etc,
46 * as long as it checks that the CPU has the required capability.
47 */
Andy Lutomirskie4512282021-01-20 21:09:48 -080048 kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
Andy Lutomirski49200d12021-01-20 21:09:51 -080049#endif
Andy Lutomirskie4512282021-01-20 21:09:48 -080050}
51
Rik van Riel5f409e22019-04-03 18:41:52 +020052/*
Thomas Gleixnerc20942c2021-10-13 16:55:39 +020053 * Use fpregs_lock() while editing CPU's FPU registers or fpu->fpstate.
Rik van Riel5f409e22019-04-03 18:41:52 +020054 * A context switch will (and softirq might) save CPU's FPU registers to
Thomas Gleixnerc20942c2021-10-13 16:55:39 +020055 * fpu->fpstate.regs and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in
Rik van Riel5f409e22019-04-03 18:41:52 +020056 * a random state.
Thomas Gleixner5f0c7122020-10-27 11:09:50 +010057 *
58 * local_bh_disable() protects against both preemption and soft interrupts
59 * on !RT kernels.
Thomas Gleixnercba08c52020-10-27 11:09:51 +010060 *
61 * On RT kernels local_bh_disable() is not sufficient because it only
62 * serializes soft interrupt related sections via a local lock, but stays
63 * preemptible. Disabling preemption is the right choice here as bottom
64 * half processing is always in thread context on RT kernels so it
65 * implicitly prevents bottom half processing as well.
66 *
67 * Disabling preemption also serializes against kernel_fpu_begin().
Rik van Riel5f409e22019-04-03 18:41:52 +020068 */
Rik van Riel4ee91512019-04-03 18:41:38 +020069static inline void fpregs_lock(void)
70{
Thomas Gleixnercba08c52020-10-27 11:09:51 +010071 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
72 local_bh_disable();
73 else
74 preempt_disable();
Rik van Riel4ee91512019-04-03 18:41:38 +020075}
76
77static inline void fpregs_unlock(void)
78{
Thomas Gleixnercba08c52020-10-27 11:09:51 +010079 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
80 local_bh_enable();
81 else
82 preempt_enable();
Rik van Riel4ee91512019-04-03 18:41:38 +020083}
84
Rik van Riel5f409e22019-04-03 18:41:52 +020085#ifdef CONFIG_X86_DEBUG_FPU
86extern void fpregs_assert_state_consistent(void);
87#else
88static inline void fpregs_assert_state_consistent(void) { }
89#endif
90
91/*
92 * Load the task FPU state before returning to userspace.
93 */
94extern void switch_fpu_return(void);
95
Suresh Siddhae4914012008-08-13 22:02:26 +100096/*
Ingo Molnar5b073432015-04-28 08:51:17 +020097 * Query the presence of one or more xfeatures. Works on any legacy CPU as well.
98 *
99 * If 'feature_name' is set then put a human-readable description of
100 * the feature there as well - this can be used to print error (or success)
101 * messages.
102 */
103extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
104
Thomas Gleixner6415bb82021-10-15 03:16:38 +0200105/* Trap handling */
106extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
107extern void fpu_sync_fpstate(struct fpu *fpu);
Thomas Gleixner079ec412021-10-15 03:16:41 +0200108extern void fpu_reset_from_exception_fixup(void);
Thomas Gleixner6415bb82021-10-15 03:16:38 +0200109
110/* Boot, hotplug and resume */
111extern void fpu__init_cpu(void);
112extern void fpu__init_system(struct cpuinfo_x86 *c);
113extern void fpu__init_check_bugs(void);
114extern void fpu__resume_cpu(void);
115
Thomas Gleixner90489f12021-10-15 03:16:33 +0200116#ifdef CONFIG_MATH_EMULATION
117extern void fpstate_init_soft(struct swregs_state *soft);
118#else
119static inline void fpstate_init_soft(struct swregs_state *soft) {}
120#endif
121
Thomas Gleixner0ae67cc32021-10-15 03:16:35 +0200122/* State tracking */
123DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
124
Chang S. Bae500afbf2021-10-21 15:55:22 -0700125/* Process cleanup */
126#ifdef CONFIG_X86_64
127extern void fpstate_free(struct fpu *fpu);
128#else
129static inline void fpstate_free(struct fpu *fpu) { }
130#endif
131
Thomas Gleixnera0ff0612021-10-15 03:16:12 +0200132/* fpstate-related functions which are exported to KVM */
Thomas Gleixner087df482021-10-13 16:55:31 +0200133extern void fpstate_clear_xstate_component(struct fpstate *fps, unsigned int xfeature);
Thomas Gleixnera0ff0612021-10-15 03:16:12 +0200134
Yang Zhongc862dcd2022-01-13 13:08:25 -0500135extern u64 xstate_get_guest_group_perm(void);
Thomas Gleixner980fe2f2022-01-05 04:35:12 -0800136
Thomas Gleixnera0ff0612021-10-15 03:16:12 +0200137/* KVM specific functions */
Thomas Gleixner69f6ed12021-10-22 20:55:51 +0200138extern bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu);
139extern void fpu_free_guest_fpstate(struct fpu_guest *gfpu);
140extern int fpu_swap_kvm_fpstate(struct fpu_guest *gfpu, bool enter_guest);
Sean Christopherson0781d602022-01-05 04:35:18 -0800141extern int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures);
Thomas Gleixnera0ff0612021-10-15 03:16:12 +0200142
Kevin Tian8eb9a482022-01-05 04:35:20 -0800143#ifdef CONFIG_X86_64
144extern void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd);
Thomas Gleixner5429cea2022-01-05 04:35:31 -0800145extern void fpu_sync_guest_vmexit_xfd_state(void);
Kevin Tian8eb9a482022-01-05 04:35:20 -0800146#else
147static inline void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) { }
Thomas Gleixner5429cea2022-01-05 04:35:31 -0800148static inline void fpu_sync_guest_vmexit_xfd_state(void) { }
Kevin Tian8eb9a482022-01-05 04:35:20 -0800149#endif
150
Thomas Gleixnerd69c1382021-10-22 20:55:53 +0200151extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, unsigned int size, u32 pkru);
152extern int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, u64 xcr0, u32 *vpkru);
Thomas Gleixnerea4d6932021-10-15 03:16:13 +0200153
Thomas Gleixner69f6ed12021-10-22 20:55:51 +0200154static inline void fpstate_set_confidential(struct fpu_guest *gfpu)
155{
156 gfpu->fpstate->is_confidential = true;
157}
158
159static inline bool fpstate_is_confidential(struct fpu_guest *gfpu)
160{
161 return gfpu->fpstate->is_confidential;
162}
163
Chang S. Baedb8268d2021-10-21 15:55:10 -0700164/* prctl */
165struct task_struct;
166extern long fpu_xstate_prctl(struct task_struct *tsk, int option, unsigned long arg2);
167
Ingo Molnardf6b35f2015-04-24 02:46:00 +0200168#endif /* _ASM_X86_FPU_API_H */