Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 1 | /* |
| 2 | * xsave/xrstor support. |
| 3 | * |
| 4 | * Author: Suresh Siddha <suresh.b.siddha@intel.com> |
| 5 | */ |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 6 | |
| 7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 8 | |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 9 | #include <linux/bootmem.h> |
| 10 | #include <linux/compat.h> |
| 11 | #include <asm/i387.h> |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 12 | #include <asm/fpu-internal.h> |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 13 | #include <asm/sigframe.h> |
H. Peter Anvin | 6152e4b | 2008-07-29 17:23:16 -0700 | [diff] [blame] | 14 | #include <asm/xcr.h> |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 15 | |
| 16 | /* |
| 17 | * Supported feature mask by the CPU and the kernel. |
| 18 | */ |
H. Peter Anvin | 6152e4b | 2008-07-29 17:23:16 -0700 | [diff] [blame] | 19 | u64 pcntxt_mask; |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 20 | |
Robert Richter | 45c2d7f | 2010-07-21 19:03:55 +0200 | [diff] [blame] | 21 | /* |
| 22 | * Represents init state for the supported extended state. |
| 23 | */ |
Suresh Siddha | 304bced | 2012-08-24 14:13:02 -0700 | [diff] [blame] | 24 | struct xsave_struct *init_xstate_buf; |
Robert Richter | 45c2d7f | 2010-07-21 19:03:55 +0200 | [diff] [blame] | 25 | |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 26 | static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32; |
Suresh Siddha | a1488f8 | 2010-07-19 16:05:48 -0700 | [diff] [blame] | 27 | static unsigned int *xstate_offsets, *xstate_sizes, xstate_features; |
| 28 | |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 29 | /* |
Suresh Siddha | 29104e1 | 2010-07-19 16:05:49 -0700 | [diff] [blame] | 30 | * If a processor implementation discern that a processor state component is |
| 31 | * in its initialized state it may modify the corresponding bit in the |
| 32 | * xsave_hdr.xstate_bv as '0', with out modifying the corresponding memory |
| 33 | * layout in the case of xsaveopt. While presenting the xstate information to |
| 34 | * the user, we always ensure that the memory layout of a feature will be in |
| 35 | * the init state if the corresponding header bit is zero. This is to ensure |
| 36 | * that the user doesn't see some stale state in the memory layout during |
| 37 | * signal handling, debugging etc. |
| 38 | */ |
| 39 | void __sanitize_i387_state(struct task_struct *tsk) |
| 40 | { |
Suresh Siddha | 29104e1 | 2010-07-19 16:05:49 -0700 | [diff] [blame] | 41 | struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave; |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 42 | int feature_bit = 0x2; |
| 43 | u64 xstate_bv; |
Suresh Siddha | 29104e1 | 2010-07-19 16:05:49 -0700 | [diff] [blame] | 44 | |
| 45 | if (!fx) |
| 46 | return; |
| 47 | |
Suresh Siddha | 29104e1 | 2010-07-19 16:05:49 -0700 | [diff] [blame] | 48 | xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; |
| 49 | |
| 50 | /* |
| 51 | * None of the feature bits are in init state. So nothing else |
Lucas De Marchi | 0d2eb44 | 2011-03-17 16:24:16 -0300 | [diff] [blame] | 52 | * to do for us, as the memory layout is up to date. |
Suresh Siddha | 29104e1 | 2010-07-19 16:05:49 -0700 | [diff] [blame] | 53 | */ |
| 54 | if ((xstate_bv & pcntxt_mask) == pcntxt_mask) |
| 55 | return; |
| 56 | |
| 57 | /* |
| 58 | * FP is in init state |
| 59 | */ |
| 60 | if (!(xstate_bv & XSTATE_FP)) { |
| 61 | fx->cwd = 0x37f; |
| 62 | fx->swd = 0; |
| 63 | fx->twd = 0; |
| 64 | fx->fop = 0; |
| 65 | fx->rip = 0; |
| 66 | fx->rdp = 0; |
| 67 | memset(&fx->st_space[0], 0, 128); |
| 68 | } |
| 69 | |
| 70 | /* |
| 71 | * SSE is in init state |
| 72 | */ |
| 73 | if (!(xstate_bv & XSTATE_SSE)) |
| 74 | memset(&fx->xmm_space[0], 0, 256); |
| 75 | |
| 76 | xstate_bv = (pcntxt_mask & ~xstate_bv) >> 2; |
| 77 | |
| 78 | /* |
| 79 | * Update all the other memory layouts for which the corresponding |
| 80 | * header bit is in the init state. |
| 81 | */ |
| 82 | while (xstate_bv) { |
| 83 | if (xstate_bv & 0x1) { |
| 84 | int offset = xstate_offsets[feature_bit]; |
| 85 | int size = xstate_sizes[feature_bit]; |
| 86 | |
| 87 | memcpy(((void *) fx) + offset, |
| 88 | ((void *) init_xstate_buf) + offset, |
| 89 | size); |
| 90 | } |
| 91 | |
| 92 | xstate_bv >>= 1; |
| 93 | feature_bit++; |
| 94 | } |
| 95 | } |
| 96 | |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 97 | /* |
| 98 | * Check for the presence of extended state information in the |
| 99 | * user fpstate pointer in the sigcontext. |
| 100 | */ |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 101 | static inline int check_for_xstate(struct i387_fxsave_struct __user *buf, |
| 102 | void __user *fpstate, |
| 103 | struct _fpx_sw_bytes *fx_sw) |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 104 | { |
| 105 | int min_xstate_size = sizeof(struct i387_fxsave_struct) + |
| 106 | sizeof(struct xsave_hdr_struct); |
| 107 | unsigned int magic2; |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 108 | |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 109 | if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw))) |
| 110 | return -1; |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 111 | |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 112 | /* Check for the first magic field and other error scenarios. */ |
| 113 | if (fx_sw->magic1 != FP_XSTATE_MAGIC1 || |
| 114 | fx_sw->xstate_size < min_xstate_size || |
| 115 | fx_sw->xstate_size > xstate_size || |
| 116 | fx_sw->xstate_size > fx_sw->extended_size) |
| 117 | return -1; |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 118 | |
| 119 | /* |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 120 | * Check for the presence of second magic word at the end of memory |
| 121 | * layout. This detects the case where the user just copied the legacy |
| 122 | * fpstate layout with out copying the extended state information |
| 123 | * in the memory layout. |
| 124 | */ |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 125 | if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size)) |
| 126 | || magic2 != FP_XSTATE_MAGIC2) |
| 127 | return -1; |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 128 | |
| 129 | return 0; |
| 130 | } |
| 131 | |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 132 | /* |
| 133 | * Signal frame handlers. |
| 134 | */ |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 135 | static inline int save_fsave_header(struct task_struct *tsk, void __user *buf) |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 136 | { |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 137 | if (use_fxsr()) { |
| 138 | struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; |
| 139 | struct user_i387_ia32_struct env; |
| 140 | struct _fpstate_ia32 __user *fp = buf; |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 141 | |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 142 | convert_from_fxsr(&env, tsk); |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 143 | |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 144 | if (__copy_to_user(buf, &env, sizeof(env)) || |
| 145 | __put_user(xsave->i387.swd, &fp->status) || |
| 146 | __put_user(X86_FXSR_MAGIC, &fp->magic)) |
| 147 | return -1; |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 148 | } else { |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 149 | struct i387_fsave_struct __user *fp = buf; |
| 150 | u32 swd; |
| 151 | if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status)) |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 152 | return -1; |
| 153 | } |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 154 | |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 155 | return 0; |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 156 | } |
| 157 | |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 158 | static inline int save_xstate_epilog(void __user *buf, int ia32_frame) |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 159 | { |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 160 | struct xsave_struct __user *x = buf; |
| 161 | struct _fpx_sw_bytes *sw_bytes; |
| 162 | u32 xstate_bv; |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 163 | int err; |
| 164 | |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 165 | /* Setup the bytes not touched by the [f]xsave and reserved for SW. */ |
| 166 | sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved; |
| 167 | err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes)); |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 168 | |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 169 | if (!use_xsave()) |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 170 | return err; |
| 171 | |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 172 | err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size)); |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 173 | |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 174 | /* |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 175 | * Read the xstate_bv which we copied (directly from the cpu or |
| 176 | * from the state in task struct) to the user buffers. |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 177 | */ |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 178 | err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv); |
| 179 | |
| 180 | /* |
| 181 | * For legacy compatible, we always set FP/SSE bits in the bit |
| 182 | * vector while saving the state to the user context. This will |
| 183 | * enable us capturing any changes(during sigreturn) to |
| 184 | * the FP/SSE bits by the legacy applications which don't touch |
| 185 | * xstate_bv in the xsave header. |
| 186 | * |
| 187 | * xsave aware apps can change the xstate_bv in the xsave |
| 188 | * header as well as change any contents in the memory layout. |
| 189 | * xrestore as part of sigreturn will capture all the changes. |
| 190 | */ |
| 191 | xstate_bv |= XSTATE_FPSSE; |
| 192 | |
| 193 | err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv); |
| 194 | |
| 195 | return err; |
| 196 | } |
| 197 | |
| 198 | static inline int save_user_xstate(struct xsave_struct __user *buf) |
| 199 | { |
| 200 | int err; |
| 201 | |
| 202 | if (use_xsave()) |
| 203 | err = xsave_user(buf); |
| 204 | else if (use_fxsr()) |
| 205 | err = fxsave_user((struct i387_fxsave_struct __user *) buf); |
| 206 | else |
| 207 | err = fsave_user((struct i387_fsave_struct __user *) buf); |
| 208 | |
| 209 | if (unlikely(err) && __clear_user(buf, xstate_size)) |
| 210 | err = -EFAULT; |
| 211 | return err; |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 212 | } |
| 213 | |
| 214 | /* |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 215 | * Save the fpu, extended register state to the user signal frame. |
| 216 | * |
| 217 | * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save |
| 218 | * state is copied. |
| 219 | * 'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'. |
| 220 | * |
| 221 | * buf == buf_fx for 64-bit frames and 32-bit fsave frame. |
| 222 | * buf != buf_fx for 32-bit frames with fxstate. |
| 223 | * |
| 224 | * If the fpu, extended register state is live, save the state directly |
| 225 | * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise, |
| 226 | * copy the thread's fpu state to the user frame starting at 'buf_fx'. |
| 227 | * |
| 228 | * If this is a 32-bit frame with fxstate, put a fsave header before |
| 229 | * the aligned state at 'buf_fx'. |
| 230 | * |
| 231 | * For [f]xsave state, update the SW reserved fields in the [f]xsave frame |
| 232 | * indicating the absence/presence of the extended state to the user. |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 233 | */ |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 234 | int save_xstate_sig(void __user *buf, void __user *buf_fx, int size) |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 235 | { |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 236 | struct xsave_struct *xsave = ¤t->thread.fpu.state->xsave; |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 237 | struct task_struct *tsk = current; |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 238 | int ia32_fxstate = (buf != buf_fx); |
| 239 | |
| 240 | ia32_fxstate &= (config_enabled(CONFIG_X86_32) || |
| 241 | config_enabled(CONFIG_IA32_EMULATION)); |
| 242 | |
| 243 | if (!access_ok(VERIFY_WRITE, buf, size)) |
| 244 | return -EACCES; |
| 245 | |
| 246 | if (!HAVE_HWFP) |
| 247 | return fpregs_soft_get(current, NULL, 0, |
| 248 | sizeof(struct user_i387_ia32_struct), NULL, |
| 249 | (struct _fpstate_ia32 __user *) buf) ? -1 : 1; |
| 250 | |
| 251 | if (user_has_fpu()) { |
| 252 | /* Save the live register state to the user directly. */ |
| 253 | if (save_user_xstate(buf_fx)) |
| 254 | return -1; |
| 255 | /* Update the thread's fxstate to save the fsave header. */ |
| 256 | if (ia32_fxstate) |
| 257 | fpu_fxsave(&tsk->thread.fpu); |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 258 | } else { |
| 259 | sanitize_i387_state(tsk); |
| 260 | if (__copy_to_user(buf_fx, xsave, xstate_size)) |
| 261 | return -1; |
| 262 | } |
| 263 | |
| 264 | /* Save the fsave header for the 32-bit frames. */ |
| 265 | if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf)) |
| 266 | return -1; |
| 267 | |
| 268 | if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate)) |
| 269 | return -1; |
| 270 | |
Suresh Siddha | 304bced | 2012-08-24 14:13:02 -0700 | [diff] [blame] | 271 | drop_init_fpu(tsk); /* trigger finit */ |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 272 | |
| 273 | return 0; |
| 274 | } |
| 275 | |
| 276 | static inline void |
| 277 | sanitize_restored_xstate(struct task_struct *tsk, |
| 278 | struct user_i387_ia32_struct *ia32_env, |
| 279 | u64 xstate_bv, int fx_only) |
| 280 | { |
| 281 | struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; |
| 282 | struct xsave_hdr_struct *xsave_hdr = &xsave->xsave_hdr; |
| 283 | |
| 284 | if (use_xsave()) { |
| 285 | /* These bits must be zero. */ |
| 286 | xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0; |
| 287 | |
| 288 | /* |
| 289 | * Init the state that is not present in the memory |
| 290 | * layout and not enabled by the OS. |
| 291 | */ |
| 292 | if (fx_only) |
| 293 | xsave_hdr->xstate_bv = XSTATE_FPSSE; |
| 294 | else |
| 295 | xsave_hdr->xstate_bv &= (pcntxt_mask & xstate_bv); |
| 296 | } |
| 297 | |
| 298 | if (use_fxsr()) { |
| 299 | /* |
| 300 | * mscsr reserved bits must be masked to zero for security |
| 301 | * reasons. |
| 302 | */ |
| 303 | xsave->i387.mxcsr &= mxcsr_feature_mask; |
| 304 | |
| 305 | convert_to_fxsr(tsk, ia32_env); |
| 306 | } |
| 307 | } |
| 308 | |
| 309 | /* |
| 310 | * Restore the extended state if present. Otherwise, restore the FP/SSE state. |
| 311 | */ |
| 312 | static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only) |
| 313 | { |
| 314 | if (use_xsave()) { |
| 315 | if ((unsigned long)buf % 64 || fx_only) { |
| 316 | u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE; |
| 317 | xrstor_state(init_xstate_buf, init_bv); |
H. Peter Anvin | e139e95 | 2012-09-25 15:42:18 -0700 | [diff] [blame^] | 318 | return fxrstor_user(buf); |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 319 | } else { |
| 320 | u64 init_bv = pcntxt_mask & ~xbv; |
| 321 | if (unlikely(init_bv)) |
| 322 | xrstor_state(init_xstate_buf, init_bv); |
| 323 | return xrestore_user(buf, xbv); |
| 324 | } |
| 325 | } else if (use_fxsr()) { |
H. Peter Anvin | e139e95 | 2012-09-25 15:42:18 -0700 | [diff] [blame^] | 326 | return fxrstor_user(buf); |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 327 | } else |
H. Peter Anvin | e139e95 | 2012-09-25 15:42:18 -0700 | [diff] [blame^] | 328 | return frstor_user(buf); |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 329 | } |
| 330 | |
| 331 | int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) |
| 332 | { |
| 333 | int ia32_fxstate = (buf != buf_fx); |
| 334 | struct task_struct *tsk = current; |
| 335 | int state_size = xstate_size; |
| 336 | u64 xstate_bv = 0; |
| 337 | int fx_only = 0; |
| 338 | |
| 339 | ia32_fxstate &= (config_enabled(CONFIG_X86_32) || |
| 340 | config_enabled(CONFIG_IA32_EMULATION)); |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 341 | |
| 342 | if (!buf) { |
Suresh Siddha | 304bced | 2012-08-24 14:13:02 -0700 | [diff] [blame] | 343 | drop_init_fpu(tsk); |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 344 | return 0; |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 345 | } |
| 346 | |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 347 | if (!access_ok(VERIFY_READ, buf, size)) |
| 348 | return -EACCES; |
| 349 | |
| 350 | if (!used_math() && init_fpu(tsk)) |
| 351 | return -1; |
| 352 | |
| 353 | if (!HAVE_HWFP) { |
| 354 | return fpregs_soft_set(current, NULL, |
| 355 | 0, sizeof(struct user_i387_ia32_struct), |
| 356 | NULL, buf) != 0; |
| 357 | } |
| 358 | |
| 359 | if (use_xsave()) { |
| 360 | struct _fpx_sw_bytes fx_sw_user; |
| 361 | if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) { |
| 362 | /* |
| 363 | * Couldn't find the extended state information in the |
| 364 | * memory layout. Restore just the FP/SSE and init all |
| 365 | * the other extended state. |
| 366 | */ |
| 367 | state_size = sizeof(struct i387_fxsave_struct); |
| 368 | fx_only = 1; |
| 369 | } else { |
| 370 | state_size = fx_sw_user.xstate_size; |
| 371 | xstate_bv = fx_sw_user.xstate_bv; |
| 372 | } |
| 373 | } |
| 374 | |
| 375 | if (ia32_fxstate) { |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 376 | /* |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 377 | * For 32-bit frames with fxstate, copy the user state to the |
| 378 | * thread's fpu state, reconstruct fxstate from the fsave |
| 379 | * header. Sanitize the copied state etc. |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 380 | */ |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 381 | struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; |
| 382 | struct user_i387_ia32_struct env; |
Suresh Siddha | 304bced | 2012-08-24 14:13:02 -0700 | [diff] [blame] | 383 | int err = 0; |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 384 | |
Suresh Siddha | 304bced | 2012-08-24 14:13:02 -0700 | [diff] [blame] | 385 | /* |
| 386 | * Drop the current fpu which clears used_math(). This ensures |
| 387 | * that any context-switch during the copy of the new state, |
| 388 | * avoids the intermediate state from getting restored/saved. |
| 389 | * Thus avoiding the new restored state from getting corrupted. |
| 390 | * We will be ready to restore/save the state only after |
| 391 | * set_used_math() is again set. |
| 392 | */ |
Suresh Siddha | e962591 | 2012-08-24 14:12:57 -0700 | [diff] [blame] | 393 | drop_fpu(tsk); |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 394 | |
| 395 | if (__copy_from_user(xsave, buf_fx, state_size) || |
Suresh Siddha | 304bced | 2012-08-24 14:13:02 -0700 | [diff] [blame] | 396 | __copy_from_user(&env, buf, sizeof(env))) { |
| 397 | err = -1; |
| 398 | } else { |
| 399 | sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); |
| 400 | set_used_math(); |
| 401 | } |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 402 | |
Suresh Siddha | 5d2bd70 | 2012-09-06 14:58:52 -0700 | [diff] [blame] | 403 | if (use_eager_fpu()) |
Suresh Siddha | 304bced | 2012-08-24 14:13:02 -0700 | [diff] [blame] | 404 | math_state_restore(); |
| 405 | |
| 406 | return err; |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 407 | } else { |
| 408 | /* |
| 409 | * For 64-bit frames and 32-bit fsave frames, restore the user |
| 410 | * state to the registers directly (with exceptions handled). |
| 411 | */ |
| 412 | user_fpu_begin(); |
| 413 | if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) { |
Suresh Siddha | 304bced | 2012-08-24 14:13:02 -0700 | [diff] [blame] | 414 | drop_init_fpu(tsk); |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 415 | return -1; |
| 416 | } |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 417 | } |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 418 | |
| 419 | return 0; |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 420 | } |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 421 | |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 422 | /* |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 423 | * Prepare the SW reserved portion of the fxsave memory layout, indicating |
| 424 | * the presence of the extended state information in the memory layout |
| 425 | * pointed by the fpstate pointer in the sigcontext. |
| 426 | * This will be saved when ever the FP and extended state context is |
| 427 | * saved on the user stack during the signal handler delivery to the user. |
| 428 | */ |
roel kluin | 8bcad30 | 2008-10-21 19:49:09 -0400 | [diff] [blame] | 429 | static void prepare_fx_sw_frame(void) |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 430 | { |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 431 | int fsave_header_size = sizeof(struct i387_fsave_struct); |
| 432 | int size = xstate_size + FP_XSTATE_MAGIC2_SIZE; |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 433 | |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 434 | if (config_enabled(CONFIG_X86_32)) |
| 435 | size += fsave_header_size; |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 436 | |
| 437 | fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 438 | fx_sw_reserved.extended_size = size; |
H. Peter Anvin | 6152e4b | 2008-07-29 17:23:16 -0700 | [diff] [blame] | 439 | fx_sw_reserved.xstate_bv = pcntxt_mask; |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 440 | fx_sw_reserved.xstate_size = xstate_size; |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 441 | |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 442 | if (config_enabled(CONFIG_IA32_EMULATION)) { |
| 443 | fx_sw_reserved_ia32 = fx_sw_reserved; |
| 444 | fx_sw_reserved_ia32.extended_size += fsave_header_size; |
| 445 | } |
| 446 | } |
Suresh Siddha | 3c1c7f1 | 2008-07-29 10:29:21 -0700 | [diff] [blame] | 447 | |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 448 | /* |
| 449 | * Enable the extended processor state save/restore feature |
| 450 | */ |
H. Peter Anvin | 1cff92d | 2010-07-21 14:23:10 -0700 | [diff] [blame] | 451 | static inline void xstate_enable(void) |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 452 | { |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 453 | set_in_cr4(X86_CR4_OSXSAVE); |
H. Peter Anvin | 6152e4b | 2008-07-29 17:23:16 -0700 | [diff] [blame] | 454 | xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 455 | } |
| 456 | |
| 457 | /* |
Suresh Siddha | a1488f8 | 2010-07-19 16:05:48 -0700 | [diff] [blame] | 458 | * Record the offsets and sizes of different state managed by the xsave |
| 459 | * memory layout. |
| 460 | */ |
Robert Richter | 4995b9d | 2010-07-21 19:03:56 +0200 | [diff] [blame] | 461 | static void __init setup_xstate_features(void) |
Suresh Siddha | a1488f8 | 2010-07-19 16:05:48 -0700 | [diff] [blame] | 462 | { |
| 463 | int eax, ebx, ecx, edx, leaf = 0x2; |
| 464 | |
| 465 | xstate_features = fls64(pcntxt_mask); |
| 466 | xstate_offsets = alloc_bootmem(xstate_features * sizeof(int)); |
| 467 | xstate_sizes = alloc_bootmem(xstate_features * sizeof(int)); |
| 468 | |
| 469 | do { |
Robert Richter | ee813d5 | 2010-07-21 19:03:54 +0200 | [diff] [blame] | 470 | cpuid_count(XSTATE_CPUID, leaf, &eax, &ebx, &ecx, &edx); |
Suresh Siddha | a1488f8 | 2010-07-19 16:05:48 -0700 | [diff] [blame] | 471 | |
| 472 | if (eax == 0) |
| 473 | break; |
| 474 | |
| 475 | xstate_offsets[leaf] = ebx; |
| 476 | xstate_sizes[leaf] = eax; |
| 477 | |
| 478 | leaf++; |
| 479 | } while (1); |
| 480 | } |
| 481 | |
| 482 | /* |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 483 | * setup the xstate image representing the init state |
| 484 | */ |
Suresh Siddha | 5d2bd70 | 2012-09-06 14:58:52 -0700 | [diff] [blame] | 485 | static void __init setup_init_fpu_buf(void) |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 486 | { |
Suresh Siddha | 29104e1 | 2010-07-19 16:05:49 -0700 | [diff] [blame] | 487 | /* |
| 488 | * Setup init_xstate_buf to represent the init state of |
| 489 | * all the features managed by the xsave |
| 490 | */ |
Suresh Siddha | 10340ae | 2010-11-16 13:23:51 -0800 | [diff] [blame] | 491 | init_xstate_buf = alloc_bootmem_align(xstate_size, |
| 492 | __alignof__(struct xsave_struct)); |
Suresh Siddha | 5d2bd70 | 2012-09-06 14:58:52 -0700 | [diff] [blame] | 493 | fx_finit(&init_xstate_buf->i387); |
| 494 | |
| 495 | if (!cpu_has_xsave) |
| 496 | return; |
| 497 | |
| 498 | setup_xstate_features(); |
Suresh Siddha | a1488f8 | 2010-07-19 16:05:48 -0700 | [diff] [blame] | 499 | |
Suresh Siddha | 29104e1 | 2010-07-19 16:05:49 -0700 | [diff] [blame] | 500 | /* |
| 501 | * Init all the features state with header_bv being 0x0 |
| 502 | */ |
| 503 | xrstor_state(init_xstate_buf, -1); |
| 504 | /* |
| 505 | * Dump the init state again. This is to identify the init state |
| 506 | * of any feature which is not represented by all zero's. |
| 507 | */ |
| 508 | xsave_state(init_xstate_buf, -1); |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 509 | } |
| 510 | |
Suresh Siddha | e002298 | 2012-09-10 10:32:32 -0700 | [diff] [blame] | 511 | static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO; |
Suresh Siddha | 5d2bd70 | 2012-09-06 14:58:52 -0700 | [diff] [blame] | 512 | static int __init eager_fpu_setup(char *s) |
| 513 | { |
| 514 | if (!strcmp(s, "on")) |
Suresh Siddha | e002298 | 2012-09-10 10:32:32 -0700 | [diff] [blame] | 515 | eagerfpu = ENABLE; |
Suresh Siddha | 5d2bd70 | 2012-09-06 14:58:52 -0700 | [diff] [blame] | 516 | else if (!strcmp(s, "off")) |
Suresh Siddha | e002298 | 2012-09-10 10:32:32 -0700 | [diff] [blame] | 517 | eagerfpu = DISABLE; |
| 518 | else if (!strcmp(s, "auto")) |
| 519 | eagerfpu = AUTO; |
Suresh Siddha | 5d2bd70 | 2012-09-06 14:58:52 -0700 | [diff] [blame] | 520 | return 1; |
| 521 | } |
| 522 | __setup("eagerfpu=", eager_fpu_setup); |
| 523 | |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 524 | /* |
| 525 | * Enable and initialize the xsave feature. |
| 526 | */ |
H. Peter Anvin | 1cff92d | 2010-07-21 14:23:10 -0700 | [diff] [blame] | 527 | static void __init xstate_enable_boot_cpu(void) |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 528 | { |
| 529 | unsigned int eax, ebx, ecx, edx; |
| 530 | |
Robert Richter | ee813d5 | 2010-07-21 19:03:54 +0200 | [diff] [blame] | 531 | if (boot_cpu_data.cpuid_level < XSTATE_CPUID) { |
| 532 | WARN(1, KERN_ERR "XSTATE_CPUID missing\n"); |
| 533 | return; |
| 534 | } |
| 535 | |
| 536 | cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx); |
H. Peter Anvin | 6152e4b | 2008-07-29 17:23:16 -0700 | [diff] [blame] | 537 | pcntxt_mask = eax + ((u64)edx << 32); |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 538 | |
H. Peter Anvin | 6152e4b | 2008-07-29 17:23:16 -0700 | [diff] [blame] | 539 | if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 540 | pr_err("FP/SSE not shown under xsave features 0x%llx\n", |
H. Peter Anvin | 6152e4b | 2008-07-29 17:23:16 -0700 | [diff] [blame] | 541 | pcntxt_mask); |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 542 | BUG(); |
| 543 | } |
| 544 | |
| 545 | /* |
Suresh Siddha | a30469e | 2009-04-10 15:21:24 -0700 | [diff] [blame] | 546 | * Support only the state known to OS. |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 547 | */ |
H. Peter Anvin | 6152e4b | 2008-07-29 17:23:16 -0700 | [diff] [blame] | 548 | pcntxt_mask = pcntxt_mask & XCNTXT_MASK; |
Robert Richter | 97e80a7 | 2010-07-21 19:03:53 +0200 | [diff] [blame] | 549 | |
H. Peter Anvin | 1cff92d | 2010-07-21 14:23:10 -0700 | [diff] [blame] | 550 | xstate_enable(); |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 551 | |
| 552 | /* |
| 553 | * Recompute the context size for enabled features |
| 554 | */ |
Robert Richter | ee813d5 | 2010-07-21 19:03:54 +0200 | [diff] [blame] | 555 | cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx); |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 556 | xstate_size = ebx; |
| 557 | |
Suresh Siddha | 5b3efd5 | 2010-02-11 11:50:59 -0800 | [diff] [blame] | 558 | update_regset_xstate_info(xstate_size, pcntxt_mask); |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 559 | prepare_fx_sw_frame(); |
Suresh Siddha | 5d2bd70 | 2012-09-06 14:58:52 -0700 | [diff] [blame] | 560 | setup_init_fpu_buf(); |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 561 | |
Suresh Siddha | e002298 | 2012-09-10 10:32:32 -0700 | [diff] [blame] | 562 | /* Auto enable eagerfpu for xsaveopt */ |
| 563 | if (cpu_has_xsaveopt && eagerfpu != DISABLE) |
| 564 | eagerfpu = ENABLE; |
Suresh Siddha | 212b021 | 2012-09-06 15:05:18 -0700 | [diff] [blame] | 565 | |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 566 | pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n", |
| 567 | pcntxt_mask, xstate_size); |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 568 | } |
Robert Richter | 82d4150 | 2010-07-20 20:50:51 +0200 | [diff] [blame] | 569 | |
H. Peter Anvin | 1cff92d | 2010-07-21 14:23:10 -0700 | [diff] [blame] | 570 | /* |
| 571 | * For the very first instance, this calls xstate_enable_boot_cpu(); |
| 572 | * for all subsequent instances, this calls xstate_enable(). |
| 573 | * |
| 574 | * This is somewhat obfuscated due to the lack of powerful enough |
| 575 | * overrides for the section checks. |
| 576 | */ |
Robert Richter | 82d4150 | 2010-07-20 20:50:51 +0200 | [diff] [blame] | 577 | void __cpuinit xsave_init(void) |
| 578 | { |
H. Peter Anvin | 1cff92d | 2010-07-21 14:23:10 -0700 | [diff] [blame] | 579 | static __refdata void (*next_func)(void) = xstate_enable_boot_cpu; |
| 580 | void (*this_func)(void); |
| 581 | |
Robert Richter | 0e49bf6 | 2010-07-21 19:03:52 +0200 | [diff] [blame] | 582 | if (!cpu_has_xsave) |
| 583 | return; |
| 584 | |
H. Peter Anvin | 1cff92d | 2010-07-21 14:23:10 -0700 | [diff] [blame] | 585 | this_func = next_func; |
Suresh Siddha | 5d2bd70 | 2012-09-06 14:58:52 -0700 | [diff] [blame] | 586 | next_func = xstate_enable; |
H. Peter Anvin | 1cff92d | 2010-07-21 14:23:10 -0700 | [diff] [blame] | 587 | this_func(); |
Robert Richter | 82d4150 | 2010-07-20 20:50:51 +0200 | [diff] [blame] | 588 | } |
Suresh Siddha | 5d2bd70 | 2012-09-06 14:58:52 -0700 | [diff] [blame] | 589 | |
| 590 | static inline void __init eager_fpu_init_bp(void) |
| 591 | { |
| 592 | current->thread.fpu.state = |
| 593 | alloc_bootmem_align(xstate_size, __alignof__(struct xsave_struct)); |
| 594 | if (!init_xstate_buf) |
| 595 | setup_init_fpu_buf(); |
| 596 | } |
| 597 | |
| 598 | void __cpuinit eager_fpu_init(void) |
| 599 | { |
| 600 | static __refdata void (*boot_func)(void) = eager_fpu_init_bp; |
| 601 | |
| 602 | clear_used_math(); |
| 603 | current_thread_info()->status = 0; |
Suresh Siddha | e002298 | 2012-09-10 10:32:32 -0700 | [diff] [blame] | 604 | |
| 605 | if (eagerfpu == ENABLE) |
| 606 | setup_force_cpu_cap(X86_FEATURE_EAGER_FPU); |
| 607 | |
Suresh Siddha | 5d2bd70 | 2012-09-06 14:58:52 -0700 | [diff] [blame] | 608 | if (!cpu_has_eager_fpu) { |
| 609 | stts(); |
| 610 | return; |
| 611 | } |
| 612 | |
| 613 | if (boot_func) { |
| 614 | boot_func(); |
| 615 | boot_func = NULL; |
| 616 | } |
| 617 | |
| 618 | /* |
| 619 | * This is same as math_state_restore(). But use_xsave() is |
| 620 | * not yet patched to use math_state_restore(). |
| 621 | */ |
| 622 | init_fpu(current); |
| 623 | __thread_fpu_begin(current); |
| 624 | if (cpu_has_xsave) |
| 625 | xrstor_state(init_xstate_buf, -1); |
| 626 | else |
| 627 | fxrstor_checking(&init_xstate_buf->i387); |
| 628 | } |