blob: 81049ffab2d601cf67ce6bdf455edb4d65abbc46 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
Alexey Dobriyan129f6942005-06-23 00:08:33 -07008#include <linux/module.h>
Roland McGrath44210112008-01-30 13:31:50 +01009#include <linux/regset.h>
Ingo Molnarf6689642008-03-05 15:37:32 +010010#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090011#include <linux/slab.h>
Ingo Molnarf6689642008-03-05 15:37:32 +010012
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/sigcontext.h>
Ingo Molnarf6689642008-03-05 15:37:32 +010014#include <asm/processor.h>
15#include <asm/math_emu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <asm/uaccess.h>
Ingo Molnarf6689642008-03-05 15:37:32 +010017#include <asm/ptrace.h>
18#include <asm/i387.h>
Linus Torvalds1361b832012-02-21 13:19:22 -080019#include <asm/fpu-internal.h>
Ingo Molnarf6689642008-03-05 15:37:32 +010020#include <asm/user.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Oleg Nesterov14e153e2015-01-15 20:19:43 +010022static DEFINE_PER_CPU(bool, in_kernel_fpu);
23
Oleg Nesterov75756372015-01-15 20:20:28 +010024void kernel_fpu_disable(void)
25{
26 WARN_ON(this_cpu_read(in_kernel_fpu));
27 this_cpu_write(in_kernel_fpu, true);
28}
29
30void kernel_fpu_enable(void)
31{
32 this_cpu_write(in_kernel_fpu, false);
33}
34
Linus Torvalds8546c002012-02-21 10:25:45 -080035/*
36 * Were we in an interrupt that interrupted kernel mode?
37 *
Suresh Siddha304bced2012-08-24 14:13:02 -070038 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
Linus Torvalds8546c002012-02-21 10:25:45 -080039 * pair does nothing at all: the thread must not have fpu (so
40 * that we don't try to save the FPU state), and TS must
41 * be set (so that the clts/stts pair does nothing that is
42 * visible in the interrupted kernel thread).
Pekka Riikonen5187b282013-05-13 14:32:07 +020043 *
44 * Except for the eagerfpu case when we return 1 unless we've already
45 * been eager and saved the state in kernel_fpu_begin().
Linus Torvalds8546c002012-02-21 10:25:45 -080046 */
47static inline bool interrupted_kernel_fpu_idle(void)
48{
Oleg Nesterov14e153e2015-01-15 20:19:43 +010049 if (this_cpu_read(in_kernel_fpu))
50 return false;
51
Suresh Siddha5d2bd702012-09-06 14:58:52 -070052 if (use_eager_fpu())
Pekka Riikonen5187b282013-05-13 14:32:07 +020053 return __thread_has_fpu(current);
Suresh Siddha304bced2012-08-24 14:13:02 -070054
Linus Torvalds8546c002012-02-21 10:25:45 -080055 return !__thread_has_fpu(current) &&
56 (read_cr0() & X86_CR0_TS);
57}
58
59/*
60 * Were we in user mode (or vm86 mode) when we were
61 * interrupted?
62 *
63 * Doing kernel_fpu_begin/end() is ok if we are running
64 * in an interrupt context from user mode - we'll just
65 * save the FPU state as required.
66 */
67static inline bool interrupted_user_mode(void)
68{
69 struct pt_regs *regs = get_irq_regs();
70 return regs && user_mode_vm(regs);
71}
72
73/*
74 * Can we use the FPU in kernel mode with the
75 * whole "kernel_fpu_begin/end()" sequence?
76 *
77 * It's always ok in process context (ie "not interrupt")
78 * but it is sometimes ok even from an irq.
79 */
80bool irq_fpu_usable(void)
81{
82 return !in_interrupt() ||
83 interrupted_user_mode() ||
84 interrupted_kernel_fpu_idle();
85}
86EXPORT_SYMBOL(irq_fpu_usable);
87
Suresh Siddhab1a74bf2012-09-20 11:01:49 -070088void __kernel_fpu_begin(void)
Linus Torvalds8546c002012-02-21 10:25:45 -080089{
90 struct task_struct *me = current;
91
Oleg Nesterov14e153e2015-01-15 20:19:43 +010092 this_cpu_write(in_kernel_fpu, true);
93
Linus Torvalds8546c002012-02-21 10:25:45 -080094 if (__thread_has_fpu(me)) {
Pekka Riikonen5187b282013-05-13 14:32:07 +020095 __save_init_fpu(me);
Suresh Siddha5d2bd702012-09-06 14:58:52 -070096 } else if (!use_eager_fpu()) {
Alex Shic6ae41e2012-05-11 15:35:27 +080097 this_cpu_write(fpu_owner_task, NULL);
Linus Torvalds8546c002012-02-21 10:25:45 -080098 clts();
99 }
100}
Suresh Siddhab1a74bf2012-09-20 11:01:49 -0700101EXPORT_SYMBOL(__kernel_fpu_begin);
Linus Torvalds8546c002012-02-21 10:25:45 -0800102
Suresh Siddhab1a74bf2012-09-20 11:01:49 -0700103void __kernel_fpu_end(void)
Linus Torvalds8546c002012-02-21 10:25:45 -0800104{
Oleg Nesterov33a3ebd2015-01-15 20:20:05 +0100105 struct task_struct *me = current;
106
107 if (__thread_has_fpu(me)) {
108 if (WARN_ON(restore_fpu_checking(me)))
109 drop_init_fpu(me);
110 } else if (!use_eager_fpu()) {
Suresh Siddha304bced2012-08-24 14:13:02 -0700111 stts();
Suresh Siddha731bd6a92014-02-02 22:56:23 -0800112 }
Oleg Nesterov14e153e2015-01-15 20:19:43 +0100113
114 this_cpu_write(in_kernel_fpu, false);
Linus Torvalds8546c002012-02-21 10:25:45 -0800115}
Suresh Siddhab1a74bf2012-09-20 11:01:49 -0700116EXPORT_SYMBOL(__kernel_fpu_end);
Linus Torvalds8546c002012-02-21 10:25:45 -0800117
118void unlazy_fpu(struct task_struct *tsk)
119{
120 preempt_disable();
121 if (__thread_has_fpu(tsk)) {
122 __save_init_fpu(tsk);
123 __thread_fpu_end(tsk);
124 } else
Vineet Guptac375f152013-11-12 15:08:46 -0800125 tsk->thread.fpu_counter = 0;
Linus Torvalds8546c002012-02-21 10:25:45 -0800126 preempt_enable();
127}
128EXPORT_SYMBOL(unlazy_fpu);
129
Suresh Siddha72a671c2012-07-24 16:05:29 -0700130unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
Suresh Siddha61c46282008-03-10 15:28:04 -0700131unsigned int xstate_size;
Xiaotian Fengf45755b2010-08-13 15:19:11 +0800132EXPORT_SYMBOL_GPL(xstate_size);
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400133static struct i387_fxsave_struct fx_scratch;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400135static void mxcsr_feature_mask_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136{
137 unsigned long mask = 0;
Ingo Molnarf6689642008-03-05 15:37:32 +0100138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 if (cpu_has_fxsr) {
Suresh Siddha61c46282008-03-10 15:28:04 -0700140 memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
H.J. Lueaa5a992013-07-26 09:11:56 -0700141 asm volatile("fxsave %0" : "+m" (fx_scratch));
Suresh Siddha61c46282008-03-10 15:28:04 -0700142 mask = fx_scratch.mxcsr_mask;
Cyrill Gorcunov3b095a02008-01-30 13:31:26 +0100143 if (mask == 0)
144 mask = 0x0000ffbf;
145 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 mxcsr_feature_mask &= mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147}
148
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400149static void init_thread_xstate(void)
Suresh Siddha61c46282008-03-10 15:28:04 -0700150{
Robert Richter0e49bf62010-07-21 19:03:52 +0200151 /*
152 * Note that xstate_size might be overwriten later during
153 * xsave_init().
154 */
155
H. Peter Anvin60e019e2013-04-29 16:04:20 +0200156 if (!cpu_has_fpu) {
Robert Richter1f999ab2010-07-21 19:03:57 +0200157 /*
158 * Disable xsave as we do not support it if i387
159 * emulation is enabled.
160 */
161 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
162 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
Suresh Siddhae8a496a2008-05-23 16:26:37 -0700163 xstate_size = sizeof(struct i387_soft_struct);
164 return;
165 }
166
Suresh Siddha61c46282008-03-10 15:28:04 -0700167 if (cpu_has_fxsr)
168 xstate_size = sizeof(struct i387_fxsave_struct);
Suresh Siddha61c46282008-03-10 15:28:04 -0700169 else
170 xstate_size = sizeof(struct i387_fsave_struct);
Suresh Siddha61c46282008-03-10 15:28:04 -0700171}
172
Roland McGrath44210112008-01-30 13:31:50 +0100173/*
174 * Called at bootup to set up the initial FPU state that is later cloned
175 * into all processes.
176 */
Robert Richter0e49bf62010-07-21 19:03:52 +0200177
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400178void fpu_init(void)
Roland McGrath44210112008-01-30 13:31:50 +0100179{
Brian Gerst6ac8bac2010-09-03 21:17:09 -0400180 unsigned long cr0;
181 unsigned long cr4_mask = 0;
Ingo Molnarf6689642008-03-05 15:37:32 +0100182
H. Peter Anvin60e019e2013-04-29 16:04:20 +0200183#ifndef CONFIG_MATH_EMULATION
184 if (!cpu_has_fpu) {
185 pr_emerg("No FPU found and no math emulation present\n");
186 pr_emerg("Giving up\n");
187 for (;;)
188 asm volatile("hlt");
189 }
190#endif
Brian Gerst6ac8bac2010-09-03 21:17:09 -0400191 if (cpu_has_fxsr)
192 cr4_mask |= X86_CR4_OSFXSR;
193 if (cpu_has_xmm)
194 cr4_mask |= X86_CR4_OSXMMEXCPT;
195 if (cr4_mask)
196 set_in_cr4(cr4_mask);
Roland McGrath44210112008-01-30 13:31:50 +0100197
Brian Gerst6ac8bac2010-09-03 21:17:09 -0400198 cr0 = read_cr0();
199 cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
H. Peter Anvin60e019e2013-04-29 16:04:20 +0200200 if (!cpu_has_fpu)
Brian Gerst6ac8bac2010-09-03 21:17:09 -0400201 cr0 |= X86_CR0_EM;
202 write_cr0(cr0);
Roland McGrath44210112008-01-30 13:31:50 +0100203
Fenghua Yu6f5298c2012-11-13 11:32:50 -0800204 /*
205 * init_thread_xstate is only called once to avoid overriding
206 * xstate_size during boot time or during CPU hotplug.
207 */
208 if (xstate_size == 0)
Suresh Siddhadc1e35c2008-07-29 10:29:19 -0700209 init_thread_xstate();
Suresh Siddhadc1e35c2008-07-29 10:29:19 -0700210
Roland McGrath44210112008-01-30 13:31:50 +0100211 mxcsr_feature_mask_init();
Suresh Siddha5d2bd702012-09-06 14:58:52 -0700212 xsave_init();
213 eager_fpu_init();
Roland McGrath44210112008-01-30 13:31:50 +0100214}
Robert Richter0e49bf62010-07-21 19:03:52 +0200215
Sheng Yang5ee481d2010-05-17 17:22:23 +0800216void fpu_finit(struct fpu *fpu)
Avi Kivity86603282010-05-06 11:45:46 +0300217{
H. Peter Anvin60e019e2013-04-29 16:04:20 +0200218 if (!cpu_has_fpu) {
Avi Kivity86603282010-05-06 11:45:46 +0300219 finit_soft_fpu(&fpu->state->soft);
220 return;
221 }
Avi Kivity86603282010-05-06 11:45:46 +0300222
223 if (cpu_has_fxsr) {
Suresh Siddha5d2bd702012-09-06 14:58:52 -0700224 fx_finit(&fpu->state->fxsave);
Avi Kivity86603282010-05-06 11:45:46 +0300225 } else {
226 struct i387_fsave_struct *fp = &fpu->state->fsave;
227 memset(fp, 0, xstate_size);
228 fp->cwd = 0xffff037fu;
229 fp->swd = 0xffff0000u;
230 fp->twd = 0xffffffffu;
231 fp->fos = 0xffff0000u;
232 }
233}
Sheng Yang5ee481d2010-05-17 17:22:23 +0800234EXPORT_SYMBOL_GPL(fpu_finit);
Avi Kivity86603282010-05-06 11:45:46 +0300235
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236/*
237 * The _current_ task is using the FPU for the first time
238 * so initialize it and set the mxcsr to its default
239 * value at reset if we support XMM instructions and then
Lucas De Marchi0d2eb442011-03-17 16:24:16 -0300240 * remember the current task has used the FPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 */
Suresh Siddhaaa283f42008-03-10 15:28:05 -0700242int init_fpu(struct task_struct *tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243{
Avi Kivity86603282010-05-06 11:45:46 +0300244 int ret;
245
Roland McGrath44210112008-01-30 13:31:50 +0100246 if (tsk_used_math(tsk)) {
H. Peter Anvin60e019e2013-04-29 16:04:20 +0200247 if (cpu_has_fpu && tsk == current)
Roland McGrath44210112008-01-30 13:31:50 +0100248 unlazy_fpu(tsk);
Oleg Nesterov089f9fb2012-04-16 22:48:15 +0200249 tsk->thread.fpu.last_cpu = ~0;
Suresh Siddhaaa283f42008-03-10 15:28:05 -0700250 return 0;
251 }
252
253 /*
254 * Memory allocation at the first usage of the FPU and other state.
255 */
Avi Kivity86603282010-05-06 11:45:46 +0300256 ret = fpu_alloc(&tsk->thread.fpu);
257 if (ret)
258 return ret;
Roland McGrath44210112008-01-30 13:31:50 +0100259
Avi Kivity86603282010-05-06 11:45:46 +0300260 fpu_finit(&tsk->thread.fpu);
Suresh Siddhae8a496a2008-05-23 16:26:37 -0700261
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 set_stopped_child_used_math(tsk);
Suresh Siddhaaa283f42008-03-10 15:28:05 -0700263 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
Avi Kivitye5c30142011-01-11 12:15:54 +0200265EXPORT_SYMBOL_GPL(init_fpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
Suresh Siddha5b3efd52010-02-11 11:50:59 -0800267/*
268 * The xstateregs_active() routine is the same as the fpregs_active() routine,
269 * as the "regset->n" for the xstate regset will be updated based on the feature
270 * capabilites supported by the xsave.
271 */
Roland McGrath44210112008-01-30 13:31:50 +0100272int fpregs_active(struct task_struct *target, const struct user_regset *regset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273{
Roland McGrath44210112008-01-30 13:31:50 +0100274 return tsk_used_math(target) ? regset->n : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275}
Roland McGrath44210112008-01-30 13:31:50 +0100276
277int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
278{
279 return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0;
280}
281
282int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
283 unsigned int pos, unsigned int count,
284 void *kbuf, void __user *ubuf)
285{
Suresh Siddhaaa283f42008-03-10 15:28:05 -0700286 int ret;
287
Roland McGrath44210112008-01-30 13:31:50 +0100288 if (!cpu_has_fxsr)
289 return -ENODEV;
290
Suresh Siddhaaa283f42008-03-10 15:28:05 -0700291 ret = init_fpu(target);
292 if (ret)
293 return ret;
Roland McGrath44210112008-01-30 13:31:50 +0100294
Suresh Siddha29104e12010-07-19 16:05:49 -0700295 sanitize_i387_state(target);
296
Roland McGrath44210112008-01-30 13:31:50 +0100297 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Avi Kivity86603282010-05-06 11:45:46 +0300298 &target->thread.fpu.state->fxsave, 0, -1);
Roland McGrath44210112008-01-30 13:31:50 +0100299}
300
301int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
302 unsigned int pos, unsigned int count,
303 const void *kbuf, const void __user *ubuf)
304{
305 int ret;
306
307 if (!cpu_has_fxsr)
308 return -ENODEV;
309
Suresh Siddhaaa283f42008-03-10 15:28:05 -0700310 ret = init_fpu(target);
311 if (ret)
312 return ret;
313
Suresh Siddha29104e12010-07-19 16:05:49 -0700314 sanitize_i387_state(target);
315
Roland McGrath44210112008-01-30 13:31:50 +0100316 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
Avi Kivity86603282010-05-06 11:45:46 +0300317 &target->thread.fpu.state->fxsave, 0, -1);
Roland McGrath44210112008-01-30 13:31:50 +0100318
319 /*
320 * mxcsr reserved bits must be masked to zero for security reasons.
321 */
Avi Kivity86603282010-05-06 11:45:46 +0300322 target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
Roland McGrath44210112008-01-30 13:31:50 +0100323
Suresh Siddha42deec62008-07-29 10:29:26 -0700324 /*
325 * update the header bits in the xsave header, indicating the
326 * presence of FP and SSE state.
327 */
328 if (cpu_has_xsave)
Avi Kivity86603282010-05-06 11:45:46 +0300329 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
Suresh Siddha42deec62008-07-29 10:29:26 -0700330
Roland McGrath44210112008-01-30 13:31:50 +0100331 return ret;
332}
333
Suresh Siddha5b3efd52010-02-11 11:50:59 -0800334int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
335 unsigned int pos, unsigned int count,
336 void *kbuf, void __user *ubuf)
337{
338 int ret;
339
340 if (!cpu_has_xsave)
341 return -ENODEV;
342
343 ret = init_fpu(target);
344 if (ret)
345 return ret;
346
347 /*
Suresh Siddhaff7fbc72010-02-22 14:51:33 -0800348 * Copy the 48bytes defined by the software first into the xstate
349 * memory layout in the thread struct, so that we can copy the entire
350 * xstateregs to the user using one user_regset_copyout().
Suresh Siddha5b3efd52010-02-11 11:50:59 -0800351 */
Avi Kivity86603282010-05-06 11:45:46 +0300352 memcpy(&target->thread.fpu.state->fxsave.sw_reserved,
Suresh Siddhaff7fbc72010-02-22 14:51:33 -0800353 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
Suresh Siddha5b3efd52010-02-11 11:50:59 -0800354
355 /*
Suresh Siddhaff7fbc72010-02-22 14:51:33 -0800356 * Copy the xstate memory layout.
Suresh Siddha5b3efd52010-02-11 11:50:59 -0800357 */
358 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Avi Kivity86603282010-05-06 11:45:46 +0300359 &target->thread.fpu.state->xsave, 0, -1);
Suresh Siddha5b3efd52010-02-11 11:50:59 -0800360 return ret;
361}
362
363int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
364 unsigned int pos, unsigned int count,
365 const void *kbuf, const void __user *ubuf)
366{
367 int ret;
368 struct xsave_hdr_struct *xsave_hdr;
369
370 if (!cpu_has_xsave)
371 return -ENODEV;
372
373 ret = init_fpu(target);
374 if (ret)
375 return ret;
376
377 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
Avi Kivity86603282010-05-06 11:45:46 +0300378 &target->thread.fpu.state->xsave, 0, -1);
Suresh Siddha5b3efd52010-02-11 11:50:59 -0800379
380 /*
381 * mxcsr reserved bits must be masked to zero for security reasons.
382 */
Avi Kivity86603282010-05-06 11:45:46 +0300383 target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
Suresh Siddha5b3efd52010-02-11 11:50:59 -0800384
Avi Kivity86603282010-05-06 11:45:46 +0300385 xsave_hdr = &target->thread.fpu.state->xsave.xsave_hdr;
Suresh Siddha5b3efd52010-02-11 11:50:59 -0800386
387 xsave_hdr->xstate_bv &= pcntxt_mask;
388 /*
389 * These bits must be zero.
390 */
Fenghua Yu21e726c2014-05-29 11:12:39 -0700391 memset(xsave_hdr->reserved, 0, 48);
Suresh Siddha5b3efd52010-02-11 11:50:59 -0800392
393 return ret;
394}
395
Roland McGrath44210112008-01-30 13:31:50 +0100396#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398/*
399 * FPU tag word conversions.
400 */
401
Cyrill Gorcunov3b095a02008-01-30 13:31:26 +0100402static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403{
404 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
Cyrill Gorcunov3b095a02008-01-30 13:31:26 +0100405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
Cyrill Gorcunov3b095a02008-01-30 13:31:26 +0100407 tmp = ~twd;
Roland McGrath44210112008-01-30 13:31:50 +0100408 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
Cyrill Gorcunov3b095a02008-01-30 13:31:26 +0100409 /* and move the valid bits to the lower byte. */
410 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
411 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
412 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
Ingo Molnarf6689642008-03-05 15:37:32 +0100413
Cyrill Gorcunov3b095a02008-01-30 13:31:26 +0100414 return tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
416
Phil Carmody497888c2011-07-14 15:07:13 +0300417#define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
Roland McGrath44210112008-01-30 13:31:50 +0100418#define FP_EXP_TAG_VALID 0
419#define FP_EXP_TAG_ZERO 1
420#define FP_EXP_TAG_SPECIAL 2
421#define FP_EXP_TAG_EMPTY 3
422
423static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424{
Roland McGrath44210112008-01-30 13:31:50 +0100425 struct _fpxreg *st;
426 u32 tos = (fxsave->swd >> 11) & 7;
427 u32 twd = (unsigned long) fxsave->twd;
428 u32 tag;
429 u32 ret = 0xffff0000u;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 int i;
431
Roland McGrath44210112008-01-30 13:31:50 +0100432 for (i = 0; i < 8; i++, twd >>= 1) {
Cyrill Gorcunov3b095a02008-01-30 13:31:26 +0100433 if (twd & 0x1) {
434 st = FPREG_ADDR(fxsave, (i - tos) & 7);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
Cyrill Gorcunov3b095a02008-01-30 13:31:26 +0100436 switch (st->exponent & 0x7fff) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 case 0x7fff:
Roland McGrath44210112008-01-30 13:31:50 +0100438 tag = FP_EXP_TAG_SPECIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 break;
440 case 0x0000:
Cyrill Gorcunov3b095a02008-01-30 13:31:26 +0100441 if (!st->significand[0] &&
442 !st->significand[1] &&
443 !st->significand[2] &&
Roland McGrath44210112008-01-30 13:31:50 +0100444 !st->significand[3])
445 tag = FP_EXP_TAG_ZERO;
446 else
447 tag = FP_EXP_TAG_SPECIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 break;
449 default:
Roland McGrath44210112008-01-30 13:31:50 +0100450 if (st->significand[3] & 0x8000)
451 tag = FP_EXP_TAG_VALID;
452 else
453 tag = FP_EXP_TAG_SPECIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 break;
455 }
456 } else {
Roland McGrath44210112008-01-30 13:31:50 +0100457 tag = FP_EXP_TAG_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 }
Roland McGrath44210112008-01-30 13:31:50 +0100459 ret |= tag << (2 * i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 }
461 return ret;
462}
463
464/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 * FXSR floating point environment conversions.
466 */
467
Suresh Siddha72a671c2012-07-24 16:05:29 -0700468void
Ingo Molnarf6689642008-03-05 15:37:32 +0100469convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470{
Avi Kivity86603282010-05-06 11:45:46 +0300471 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
Roland McGrath44210112008-01-30 13:31:50 +0100472 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
473 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 int i;
475
Roland McGrath44210112008-01-30 13:31:50 +0100476 env->cwd = fxsave->cwd | 0xffff0000u;
477 env->swd = fxsave->swd | 0xffff0000u;
478 env->twd = twd_fxsr_to_i387(fxsave);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
Roland McGrath44210112008-01-30 13:31:50 +0100480#ifdef CONFIG_X86_64
481 env->fip = fxsave->rip;
482 env->foo = fxsave->rdp;
Brian Gerst10c11f32010-09-03 21:17:13 -0400483 /*
484 * should be actually ds/cs at fpu exception time, but
485 * that information is not available in 64bit mode.
486 */
487 env->fcs = task_pt_regs(tsk)->cs;
Roland McGrath44210112008-01-30 13:31:50 +0100488 if (tsk == current) {
Brian Gerst10c11f32010-09-03 21:17:13 -0400489 savesegment(ds, env->fos);
Roland McGrath44210112008-01-30 13:31:50 +0100490 } else {
Brian Gerst10c11f32010-09-03 21:17:13 -0400491 env->fos = tsk->thread.ds;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 }
Brian Gerst10c11f32010-09-03 21:17:13 -0400493 env->fos |= 0xffff0000;
Roland McGrath44210112008-01-30 13:31:50 +0100494#else
495 env->fip = fxsave->fip;
Jan Beulich609b5292008-03-05 08:35:14 +0000496 env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
Roland McGrath44210112008-01-30 13:31:50 +0100497 env->foo = fxsave->foo;
498 env->fos = fxsave->fos;
499#endif
500
501 for (i = 0; i < 8; ++i)
502 memcpy(&to[i], &from[i], sizeof(to[0]));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503}
504
Suresh Siddha72a671c2012-07-24 16:05:29 -0700505void convert_to_fxsr(struct task_struct *tsk,
506 const struct user_i387_ia32_struct *env)
Roland McGrath44210112008-01-30 13:31:50 +0100507
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508{
Avi Kivity86603282010-05-06 11:45:46 +0300509 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
Roland McGrath44210112008-01-30 13:31:50 +0100510 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
511 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 int i;
513
Roland McGrath44210112008-01-30 13:31:50 +0100514 fxsave->cwd = env->cwd;
515 fxsave->swd = env->swd;
516 fxsave->twd = twd_i387_to_fxsr(env->twd);
517 fxsave->fop = (u16) ((u32) env->fcs >> 16);
518#ifdef CONFIG_X86_64
519 fxsave->rip = env->fip;
520 fxsave->rdp = env->foo;
521 /* cs and ds ignored */
522#else
523 fxsave->fip = env->fip;
524 fxsave->fcs = (env->fcs & 0xffff);
525 fxsave->foo = env->foo;
526 fxsave->fos = env->fos;
527#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
Roland McGrath44210112008-01-30 13:31:50 +0100529 for (i = 0; i < 8; ++i)
530 memcpy(&to[i], &from[i], sizeof(from[0]));
531}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
Roland McGrath44210112008-01-30 13:31:50 +0100533int fpregs_get(struct task_struct *target, const struct user_regset *regset,
534 unsigned int pos, unsigned int count,
535 void *kbuf, void __user *ubuf)
536{
537 struct user_i387_ia32_struct env;
Suresh Siddhaaa283f42008-03-10 15:28:05 -0700538 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
Suresh Siddhaaa283f42008-03-10 15:28:05 -0700540 ret = init_fpu(target);
541 if (ret)
542 return ret;
Roland McGrath44210112008-01-30 13:31:50 +0100543
H. Peter Anvin60e019e2013-04-29 16:04:20 +0200544 if (!static_cpu_has(X86_FEATURE_FPU))
Suresh Siddhae8a496a2008-05-23 16:26:37 -0700545 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
546
H. Peter Anvin60e019e2013-04-29 16:04:20 +0200547 if (!cpu_has_fxsr)
Roland McGrath44210112008-01-30 13:31:50 +0100548 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Avi Kivity86603282010-05-06 11:45:46 +0300549 &target->thread.fpu.state->fsave, 0,
Suresh Siddha61c46282008-03-10 15:28:04 -0700550 -1);
Roland McGrath44210112008-01-30 13:31:50 +0100551
Suresh Siddha29104e12010-07-19 16:05:49 -0700552 sanitize_i387_state(target);
553
Roland McGrath44210112008-01-30 13:31:50 +0100554 if (kbuf && pos == 0 && count == sizeof(env)) {
555 convert_from_fxsr(kbuf, target);
556 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 }
Roland McGrath44210112008-01-30 13:31:50 +0100558
559 convert_from_fxsr(&env, target);
Ingo Molnarf6689642008-03-05 15:37:32 +0100560
Roland McGrath44210112008-01-30 13:31:50 +0100561 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
562}
563
564int fpregs_set(struct task_struct *target, const struct user_regset *regset,
565 unsigned int pos, unsigned int count,
566 const void *kbuf, const void __user *ubuf)
567{
568 struct user_i387_ia32_struct env;
569 int ret;
570
Suresh Siddhaaa283f42008-03-10 15:28:05 -0700571 ret = init_fpu(target);
572 if (ret)
573 return ret;
574
Suresh Siddha29104e12010-07-19 16:05:49 -0700575 sanitize_i387_state(target);
576
H. Peter Anvin60e019e2013-04-29 16:04:20 +0200577 if (!static_cpu_has(X86_FEATURE_FPU))
Suresh Siddhae8a496a2008-05-23 16:26:37 -0700578 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
579
H. Peter Anvin60e019e2013-04-29 16:04:20 +0200580 if (!cpu_has_fxsr)
Roland McGrath44210112008-01-30 13:31:50 +0100581 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
H. Peter Anvin60e019e2013-04-29 16:04:20 +0200582 &target->thread.fpu.state->fsave, 0,
583 -1);
Roland McGrath44210112008-01-30 13:31:50 +0100584
585 if (pos > 0 || count < sizeof(env))
586 convert_from_fxsr(&env, target);
587
588 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
589 if (!ret)
590 convert_to_fxsr(target, &env);
591
Suresh Siddha42deec62008-07-29 10:29:26 -0700592 /*
593 * update the header bit in the xsave header, indicating the
594 * presence of FP.
595 */
596 if (cpu_has_xsave)
Avi Kivity86603282010-05-06 11:45:46 +0300597 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
Roland McGrath44210112008-01-30 13:31:50 +0100598 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599}
600
601/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 * FPU state for core dumps.
Roland McGrath60b3b9a2008-01-30 13:31:55 +0100603 * This is only used for a.out dumps now.
604 * It is declared generically using elf_fpregset_t (which is
605 * struct user_i387_struct) but is in fact only used for 32-bit
606 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 */
Cyrill Gorcunov3b095a02008-01-30 13:31:26 +0100608int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 struct task_struct *tsk = current;
Ingo Molnarf6689642008-03-05 15:37:32 +0100611 int fpvalid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
613 fpvalid = !!used_math();
Roland McGrath60b3b9a2008-01-30 13:31:55 +0100614 if (fpvalid)
615 fpvalid = !fpregs_get(tsk, NULL,
616 0, sizeof(struct user_i387_ia32_struct),
617 fpu, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
619 return fpvalid;
620}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700621EXPORT_SYMBOL(dump_fpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
Roland McGrath60b3b9a2008-01-30 13:31:55 +0100623#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
H. Peter Anvin60e019e2013-04-29 16:04:20 +0200624
625static int __init no_387(char *s)
626{
627 setup_clear_cpu_cap(X86_FEATURE_FPU);
628 return 1;
629}
630
631__setup("no387", no_387);
632
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400633void fpu_detect(struct cpuinfo_x86 *c)
H. Peter Anvin60e019e2013-04-29 16:04:20 +0200634{
635 unsigned long cr0;
636 u16 fsw, fcw;
637
638 fsw = fcw = 0xffff;
639
640 cr0 = read_cr0();
641 cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
642 write_cr0(cr0);
643
644 asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
645 : "+m" (fsw), "+m" (fcw));
646
647 if (fsw == 0 && (fcw & 0x103f) == 0x003f)
648 set_cpu_cap(c, X86_FEATURE_FPU);
649 else
650 clear_cpu_cap(c, X86_FEATURE_FPU);
651
652 /* The final cr0 value is set in fpu_init() */
653}