blob: a1c6c25552e92e1cd70d35cfb6a7bbb83616c01f [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/******************************************************************************
Avi Kivity56e82312009-08-12 15:04:37 +03002 * emulate.c
Avi Kivity6aa8b732006-12-10 02:21:36 -08003 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
Rusty Russelldcc07662007-07-17 23:16:56 +10009 * privileged instructions:
Avi Kivity6aa8b732006-12-10 02:21:36 -080010 *
11 * Copyright (C) 2006 Qumranet
Nicolas Kaiser9611c182010-10-06 14:23:22 +020012 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -080013 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
Avi Kivityedf88412007-12-16 11:02:48 +020023#include <linux/kvm_host.h>
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030024#include "kvm_cache_regs.h"
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/module.h>
Avi Kivity56e82312009-08-12 15:04:37 +030026#include <asm/kvm_emulate.h>
Avi Kivityb7d491e2013-01-04 16:18:49 +020027#include <linux/stringify.h>
Nadav Amit3db176d2015-04-19 21:12:59 +030028#include <asm/debugreg.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080029
Avi Kivity3eeb3282010-01-21 15:31:48 +020030#include "x86.h"
Gleb Natapov38ba30b2010-03-18 15:20:17 +020031#include "tss.h"
Andre Przywarae99f0502009-06-17 15:50:33 +020032
Avi Kivity6aa8b732006-12-10 02:21:36 -080033/*
Avi Kivitya9945542011-09-13 10:45:41 +030034 * Operand types
35 */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030036#define OpNone 0ull
37#define OpImplicit 1ull /* No generic decode */
38#define OpReg 2ull /* Register */
39#define OpMem 3ull /* Memory */
40#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
41#define OpDI 5ull /* ES:DI/EDI/RDI */
42#define OpMem64 6ull /* Memory, 64-bit */
43#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
44#define OpDX 8ull /* DX register */
Avi Kivity4dd6a572011-09-13 10:45:43 +030045#define OpCL 9ull /* CL register (for shifts) */
46#define OpImmByte 10ull /* 8-bit sign extended immediate */
47#define OpOne 11ull /* Implied 1 */
Nadav Amit5e2c6882012-12-06 21:55:10 -020048#define OpImm 12ull /* Sign extended up to 32-bit immediate */
Avi Kivity0fe59122011-09-13 10:45:47 +030049#define OpMem16 13ull /* Memory operand (16-bit). */
50#define OpMem32 14ull /* Memory operand (32-bit). */
51#define OpImmU 15ull /* Immediate operand, zero extended */
52#define OpSI 16ull /* SI/ESI/RSI */
53#define OpImmFAddr 17ull /* Immediate far address */
54#define OpMemFAddr 18ull /* Far address in memory */
55#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
Avi Kivityc191a7a2011-09-13 10:45:49 +030056#define OpES 20ull /* ES */
57#define OpCS 21ull /* CS */
58#define OpSS 22ull /* SS */
59#define OpDS 23ull /* DS */
60#define OpFS 24ull /* FS */
61#define OpGS 25ull /* GS */
Avi Kivity28867ce2012-01-16 15:08:44 +020062#define OpMem8 26ull /* 8-bit zero extended memory operand */
Nadav Amit5e2c6882012-12-06 21:55:10 -020063#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
Paolo Bonzini7fa57952013-05-09 11:32:50 +020064#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
Avi Kivity820207c2013-02-09 11:31:45 +020065#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
66#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
Avi Kivitya9945542011-09-13 10:45:41 +030067
Avi Kivity0fe59122011-09-13 10:45:47 +030068#define OpBits 5 /* Width of operand field */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030069#define OpMask ((1ull << OpBits) - 1)
Avi Kivitya9945542011-09-13 10:45:41 +030070
71/*
Avi Kivity6aa8b732006-12-10 02:21:36 -080072 * Opcode effective-address decode tables.
73 * Note that we only emulate instructions that have at least one memory
74 * operand (excluding implicit stack references). We assume that stack
75 * references and instruction fetches will never occur in special memory
76 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
77 * not be handled.
78 */
79
80/* Operand sizes: 8-bit operands or specified/overridden size. */
Avi Kivityab85b12b2010-07-29 15:11:49 +030081#define ByteOp (1<<0) /* 8-bit operands. */
Avi Kivity6aa8b732006-12-10 02:21:36 -080082/* Destination operand type. */
Avi Kivitya9945542011-09-13 10:45:41 +030083#define DstShift 1
84#define ImplicitOps (OpImplicit << DstShift)
85#define DstReg (OpReg << DstShift)
86#define DstMem (OpMem << DstShift)
87#define DstAcc (OpAcc << DstShift)
88#define DstDI (OpDI << DstShift)
89#define DstMem64 (OpMem64 << DstShift)
Nadav Amit16bebef2014-12-25 02:52:18 +020090#define DstMem16 (OpMem16 << DstShift)
Avi Kivitya9945542011-09-13 10:45:41 +030091#define DstImmUByte (OpImmUByte << DstShift)
92#define DstDX (OpDX << DstShift)
Avi Kivity820207c2013-02-09 11:31:45 +020093#define DstAccLo (OpAccLo << DstShift)
Avi Kivitya9945542011-09-13 10:45:41 +030094#define DstMask (OpMask << DstShift)
Avi Kivity6aa8b732006-12-10 02:21:36 -080095/* Source operand type. */
Avi Kivity0fe59122011-09-13 10:45:47 +030096#define SrcShift 6
97#define SrcNone (OpNone << SrcShift)
98#define SrcReg (OpReg << SrcShift)
99#define SrcMem (OpMem << SrcShift)
100#define SrcMem16 (OpMem16 << SrcShift)
101#define SrcMem32 (OpMem32 << SrcShift)
102#define SrcImm (OpImm << SrcShift)
103#define SrcImmByte (OpImmByte << SrcShift)
104#define SrcOne (OpOne << SrcShift)
105#define SrcImmUByte (OpImmUByte << SrcShift)
106#define SrcImmU (OpImmU << SrcShift)
107#define SrcSI (OpSI << SrcShift)
Paolo Bonzini7fa57952013-05-09 11:32:50 +0200108#define SrcXLat (OpXLat << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300109#define SrcImmFAddr (OpImmFAddr << SrcShift)
110#define SrcMemFAddr (OpMemFAddr << SrcShift)
111#define SrcAcc (OpAcc << SrcShift)
112#define SrcImmU16 (OpImmU16 << SrcShift)
Nadav Amit5e2c6882012-12-06 21:55:10 -0200113#define SrcImm64 (OpImm64 << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300114#define SrcDX (OpDX << SrcShift)
Avi Kivity28867ce2012-01-16 15:08:44 +0200115#define SrcMem8 (OpMem8 << SrcShift)
Avi Kivity820207c2013-02-09 11:31:45 +0200116#define SrcAccHi (OpAccHi << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300117#define SrcMask (OpMask << SrcShift)
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300118#define BitOp (1<<11)
119#define MemAbs (1<<12) /* Memory operand is absolute displacement */
120#define String (1<<13) /* String instruction (rep capable) */
121#define Stack (1<<14) /* Stack instruction (push/pop) */
122#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
123#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
124#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
125#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
126#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
Gleb Natapov045a2822012-12-20 16:57:43 +0200127#define Escape (5<<15) /* Escape to coprocessor instruction */
Nadav Amit39f062f2014-11-26 15:47:18 +0200128#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
Nadav Amit2276b512015-01-26 09:32:24 +0200129#define ModeDual (7<<15) /* Different instruction for 32/64 bit */
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300130#define Sse (1<<18) /* SSE Vector instruction */
Avi Kivity20c29ff2011-09-13 10:45:44 +0300131/* Generic ModRM decode. */
132#define ModRM (1<<19)
133/* Destination is only written; never read. */
134#define Mov (1<<20)
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300135/* Misc flags */
Joerg Roedel8ea7d6a2011-04-04 12:39:26 +0200136#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
Borislav Petkovb51e9742013-09-22 16:44:52 +0200137#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
Avi Kivity5a506b12010-08-01 15:10:29 +0300138#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
Avi Kivity7f9b4b72010-08-01 14:46:54 +0300139#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
Avi Kivity047a4812010-07-26 14:37:47 +0300140#define Undefined (1<<25) /* No Such Instruction */
Gleb Natapovd380a5e2010-02-10 14:21:36 +0200141#define Lock (1<<26) /* lock prefix is allowed for the instruction */
Gleb Natapove92805a2010-02-10 14:21:35 +0200142#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300143#define No64 (1<<28)
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +0800144#define PageTable (1 << 29) /* instruction used to write page table */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300145#define NotImpl (1 << 30) /* instruction is not implemented */
Guillaume Thouvenin0dc8d102008-12-04 14:26:42 +0100146/* Source 2 operand type */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300147#define Src2Shift (31)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300148#define Src2None (OpNone << Src2Shift)
Avi Kivityab2c5ce2013-02-09 11:31:46 +0200149#define Src2Mem (OpMem << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300150#define Src2CL (OpCL << Src2Shift)
151#define Src2ImmByte (OpImmByte << Src2Shift)
152#define Src2One (OpOne << Src2Shift)
153#define Src2Imm (OpImm << Src2Shift)
Avi Kivityc191a7a2011-09-13 10:45:49 +0300154#define Src2ES (OpES << Src2Shift)
155#define Src2CS (OpCS << Src2Shift)
156#define Src2SS (OpSS << Src2Shift)
157#define Src2DS (OpDS << Src2Shift)
158#define Src2FS (OpFS << Src2Shift)
159#define Src2GS (OpGS << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300160#define Src2Mask (OpMask << Src2Shift)
Avi Kivitycbe2c9d2012-04-09 18:40:02 +0300161#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
Avi Kivity1c11b372012-04-09 18:39:59 +0300162#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
163#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
164#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
Avi Kivitye28bbd42013-01-04 16:18:48 +0200165#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
Avi Kivityb6744dc2013-01-04 16:18:50 +0200166#define NoWrite ((u64)1 << 45) /* No writeback */
Avi Kivityfb32b1e2013-02-09 11:31:44 +0200167#define SrcWrite ((u64)1 << 46) /* Write back src operand */
Nadav Amit9b88ae92014-05-25 23:05:21 +0300168#define NoMod ((u64)1 << 47) /* Mod field is ignored */
Paolo Bonzinid40a6892014-03-27 11:58:02 +0100169#define Intercept ((u64)1 << 48) /* Has valid intercept field */
170#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
Nadav Amit68efa762014-06-18 17:19:35 +0300171#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
Nadav Amit58b70752014-10-24 11:35:09 +0300172#define NearBranch ((u64)1 << 52) /* Near branches */
Nadav Amited9aad22014-11-02 11:55:00 +0200173#define No16 ((u64)1 << 53) /* No 16 bit operand */
Nadav Amitab708092014-12-25 02:52:21 +0200174#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800175
Avi Kivity820207c2013-02-09 11:31:45 +0200176#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800177
Avi Kivityd0e53322010-07-29 15:11:54 +0300178#define X2(x...) x, x
179#define X3(x...) X2(x), x
180#define X4(x...) X2(x), X2(x)
181#define X5(x...) X4(x), x
182#define X6(x...) X4(x), X2(x)
183#define X7(x...) X4(x), X3(x)
184#define X8(x...) X4(x), X4(x)
185#define X16(x...) X8(x), X8(x)
Avi Kivity83babbc2010-07-26 14:37:39 +0300186
Avi Kivitye28bbd42013-01-04 16:18:48 +0200187#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
188#define FASTOP_SIZE 8
189
190/*
191 * fastop functions have a special calling convention:
192 *
Avi Kivity017da7b2013-02-09 11:31:47 +0200193 * dst: rax (in/out)
194 * src: rdx (in/out)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200195 * src2: rcx (in)
196 * flags: rflags (in/out)
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200197 * ex: rsi (in:fastop pointer, out:zero if exception)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200198 *
199 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
200 * different operand sizes can be reached by calculation, rather than a jump
201 * table (which would be bigger than the code).
202 *
203 * fastop functions are declared as taking a never-defined fastop parameter,
204 * so they can't be called from C directly.
205 */
206
207struct fastop;
208
Avi Kivityd65b1de2010-07-29 15:11:35 +0300209struct opcode {
Avi Kivityb1ea50b2011-09-13 10:45:42 +0300210 u64 flags : 56;
211 u64 intercept : 8;
Avi Kivity120df892010-07-29 15:11:39 +0300212 union {
Avi Kivityef65c882010-07-29 15:11:51 +0300213 int (*execute)(struct x86_emulate_ctxt *ctxt);
Mathias Krausefd0a0d82012-08-30 01:30:15 +0200214 const struct opcode *group;
215 const struct group_dual *gdual;
216 const struct gprefix *gprefix;
Gleb Natapov045a2822012-12-20 16:57:43 +0200217 const struct escape *esc;
Nadav Amit39f062f2014-11-26 15:47:18 +0200218 const struct instr_dual *idual;
Nadav Amit2276b512015-01-26 09:32:24 +0200219 const struct mode_dual *mdual;
Avi Kivitye28bbd42013-01-04 16:18:48 +0200220 void (*fastop)(struct fastop *fake);
Avi Kivity120df892010-07-29 15:11:39 +0300221 } u;
Joerg Roedeld09beab2011-04-04 12:39:25 +0200222 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
Avi Kivity120df892010-07-29 15:11:39 +0300223};
224
225struct group_dual {
226 struct opcode mod012[8];
227 struct opcode mod3[8];
Avi Kivityd65b1de2010-07-29 15:11:35 +0300228};
229
Avi Kivity0d7cdee2011-03-29 11:34:38 +0200230struct gprefix {
231 struct opcode pfx_no;
232 struct opcode pfx_66;
233 struct opcode pfx_f2;
234 struct opcode pfx_f3;
235};
236
Gleb Natapov045a2822012-12-20 16:57:43 +0200237struct escape {
238 struct opcode op[8];
239 struct opcode high[64];
240};
241
Nadav Amit39f062f2014-11-26 15:47:18 +0200242struct instr_dual {
243 struct opcode mod012;
244 struct opcode mod3;
245};
246
Nadav Amit2276b512015-01-26 09:32:24 +0200247struct mode_dual {
248 struct opcode mode32;
249 struct opcode mode64;
250};
251
Mohammed Gamal62bd4302010-07-28 12:38:40 +0300252#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
Mohammed Gamal62bd4302010-07-28 12:38:40 +0300253
Nadav Amit3dc4bc42014-12-25 02:52:19 +0200254enum x86_transfer_type {
255 X86_TRANSFER_NONE,
256 X86_TRANSFER_CALL_JMP,
257 X86_TRANSFER_RET,
258 X86_TRANSFER_TASK_SWITCH,
259};
260
Avi Kivitydd856ef2012-08-27 23:46:17 +0300261static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
262{
263 if (!(ctxt->regs_valid & (1 << nr))) {
264 ctxt->regs_valid |= 1 << nr;
265 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
266 }
267 return ctxt->_regs[nr];
268}
269
270static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
271{
272 ctxt->regs_valid |= 1 << nr;
273 ctxt->regs_dirty |= 1 << nr;
274 return &ctxt->_regs[nr];
275}
276
277static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
278{
279 reg_read(ctxt, nr);
280 return reg_write(ctxt, nr);
281}
282
283static void writeback_registers(struct x86_emulate_ctxt *ctxt)
284{
285 unsigned reg;
286
287 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
288 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
289}
290
291static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
292{
293 ctxt->regs_dirty = 0;
294 ctxt->regs_valid = 0;
295}
296
Avi Kivity6aa8b732006-12-10 02:21:36 -0800297/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800298 * These EFLAGS bits are restored from saved value during emulation, and
299 * any changes are written back to the saved value after emulation.
300 */
Nadav Amit0efb0442015-03-29 16:33:03 +0300301#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
302 X86_EFLAGS_PF|X86_EFLAGS_CF)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800303
Avi Kivitydda96d82008-11-26 15:14:10 +0200304#ifdef CONFIG_X86_64
305#define ON64(x) x
306#else
307#define ON64(x)
308#endif
309
Avi Kivity4d758342013-01-19 19:51:55 +0200310static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
311
Avi Kivityb7d491e2013-01-04 16:18:49 +0200312#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
313#define FOP_RET "ret \n\t"
314
315#define FOP_START(op) \
316 extern void em_##op(struct fastop *fake); \
317 asm(".pushsection .text, \"ax\" \n\t" \
318 ".global em_" #op " \n\t" \
319 FOP_ALIGN \
320 "em_" #op ": \n\t"
321
322#define FOP_END \
323 ".popsection")
324
Avi Kivity0bdea062013-01-19 19:51:50 +0200325#define FOPNOP() FOP_ALIGN FOP_RET
326
Avi Kivityb7d491e2013-01-04 16:18:49 +0200327#define FOP1E(op, dst) \
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200328 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
329
330#define FOP1EEX(op, dst) \
331 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
Avi Kivityb7d491e2013-01-04 16:18:49 +0200332
333#define FASTOP1(op) \
334 FOP_START(op) \
335 FOP1E(op##b, al) \
336 FOP1E(op##w, ax) \
337 FOP1E(op##l, eax) \
338 ON64(FOP1E(op##q, rax)) \
339 FOP_END
340
Avi Kivityb9fa4092013-02-09 11:31:48 +0200341/* 1-operand, using src2 (for MUL/DIV r/m) */
342#define FASTOP1SRC2(op, name) \
343 FOP_START(name) \
344 FOP1E(op, cl) \
345 FOP1E(op, cx) \
346 FOP1E(op, ecx) \
347 ON64(FOP1E(op, rcx)) \
348 FOP_END
349
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200350/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
351#define FASTOP1SRC2EX(op, name) \
352 FOP_START(name) \
353 FOP1EEX(op, cl) \
354 FOP1EEX(op, cx) \
355 FOP1EEX(op, ecx) \
356 ON64(FOP1EEX(op, rcx)) \
357 FOP_END
358
Avi Kivityf7857f32013-01-04 16:18:53 +0200359#define FOP2E(op, dst, src) \
360 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
361
362#define FASTOP2(op) \
363 FOP_START(op) \
Avi Kivity017da7b2013-02-09 11:31:47 +0200364 FOP2E(op##b, al, dl) \
365 FOP2E(op##w, ax, dx) \
366 FOP2E(op##l, eax, edx) \
367 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivityf7857f32013-01-04 16:18:53 +0200368 FOP_END
369
Avi Kivity11c363b2013-01-19 19:51:54 +0200370/* 2 operand, word only */
371#define FASTOP2W(op) \
372 FOP_START(op) \
373 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200374 FOP2E(op##w, ax, dx) \
375 FOP2E(op##l, eax, edx) \
376 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivity11c363b2013-01-19 19:51:54 +0200377 FOP_END
378
Avi Kivity007a3b52013-01-19 19:51:51 +0200379/* 2 operand, src is CL */
380#define FASTOP2CL(op) \
381 FOP_START(op) \
382 FOP2E(op##b, al, cl) \
383 FOP2E(op##w, ax, cl) \
384 FOP2E(op##l, eax, cl) \
385 ON64(FOP2E(op##q, rax, cl)) \
386 FOP_END
387
Nadav Amit5aca3722014-11-02 11:54:50 +0200388/* 2 operand, src and dest are reversed */
389#define FASTOP2R(op, name) \
390 FOP_START(name) \
391 FOP2E(op##b, dl, al) \
392 FOP2E(op##w, dx, ax) \
393 FOP2E(op##l, edx, eax) \
394 ON64(FOP2E(op##q, rdx, rax)) \
395 FOP_END
396
Avi Kivity0bdea062013-01-19 19:51:50 +0200397#define FOP3E(op, dst, src, src2) \
398 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
399
400/* 3-operand, word-only, src2=cl */
401#define FASTOP3WCL(op) \
402 FOP_START(op) \
403 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200404 FOP3E(op##w, ax, dx, cl) \
405 FOP3E(op##l, eax, edx, cl) \
406 ON64(FOP3E(op##q, rax, rdx, cl)) \
Avi Kivity0bdea062013-01-19 19:51:50 +0200407 FOP_END
408
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200409/* Special case for SETcc - 1 instruction per cc */
410#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
411
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200412asm(".global kvm_fastop_exception \n"
413 "kvm_fastop_exception: xor %esi, %esi; ret");
414
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200415FOP_START(setcc)
416FOP_SETCC(seto)
417FOP_SETCC(setno)
418FOP_SETCC(setc)
419FOP_SETCC(setnc)
420FOP_SETCC(setz)
421FOP_SETCC(setnz)
422FOP_SETCC(setbe)
423FOP_SETCC(setnbe)
424FOP_SETCC(sets)
425FOP_SETCC(setns)
426FOP_SETCC(setp)
427FOP_SETCC(setnp)
428FOP_SETCC(setl)
429FOP_SETCC(setnl)
430FOP_SETCC(setle)
431FOP_SETCC(setnle)
432FOP_END;
433
Paolo Bonzini326f5782013-05-09 11:32:51 +0200434FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
435FOP_END;
436
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200437static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
438 enum x86_intercept intercept,
439 enum x86_intercept_stage stage)
440{
441 struct x86_instruction_info info = {
442 .intercept = intercept,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300443 .rep_prefix = ctxt->rep_prefix,
444 .modrm_mod = ctxt->modrm_mod,
445 .modrm_reg = ctxt->modrm_reg,
446 .modrm_rm = ctxt->modrm_rm,
447 .src_val = ctxt->src.val64,
Jan Kiszka6cbc5f52014-06-30 12:52:55 +0200448 .dst_val = ctxt->dst.val64,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300449 .src_bytes = ctxt->src.bytes,
450 .dst_bytes = ctxt->dst.bytes,
451 .ad_bytes = ctxt->ad_bytes,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200452 .next_rip = ctxt->eip,
453 };
454
Avi Kivity29535382011-04-20 13:37:53 +0300455 return ctxt->ops->intercept(ctxt, &info, stage);
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200456}
457
Avi Kivityf47cfa32012-06-07 17:49:24 +0300458static void assign_masked(ulong *dest, ulong src, ulong mask)
459{
460 *dest = (*dest & ~mask) | (src & mask);
461}
462
Nadav Amit6fd8e122015-03-30 15:39:20 +0300463static void assign_register(unsigned long *reg, u64 val, int bytes)
464{
465 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
466 switch (bytes) {
467 case 1:
468 *(u8 *)reg = (u8)val;
469 break;
470 case 2:
471 *(u16 *)reg = (u16)val;
472 break;
473 case 4:
474 *reg = (u32)val;
475 break; /* 64b: zero-extend */
476 case 8:
477 *reg = val;
478 break;
479 }
480}
481
Avi Kivity9dac77f2011-06-01 15:34:25 +0300482static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800483{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300484 return (1UL << (ctxt->ad_bytes << 3)) - 1;
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800485}
486
Avi Kivityf47cfa32012-06-07 17:49:24 +0300487static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
488{
489 u16 sel;
490 struct desc_struct ss;
491
492 if (ctxt->mode == X86EMUL_MODE_PROT64)
493 return ~0UL;
494 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
495 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
496}
497
Avi Kivity612e89f2012-06-12 20:03:23 +0300498static int stack_size(struct x86_emulate_ctxt *ctxt)
499{
500 return (__fls(stack_mask(ctxt)) + 1) >> 3;
501}
502
Avi Kivity6aa8b732006-12-10 02:21:36 -0800503/* Access/update address held in a register, based on addressing mode. */
Harvey Harrisone4706772008-02-19 07:40:38 -0800504static inline unsigned long
Avi Kivity9dac77f2011-06-01 15:34:25 +0300505address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800506{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300507 if (ctxt->ad_bytes == sizeof(unsigned long))
Harvey Harrisone4706772008-02-19 07:40:38 -0800508 return reg;
509 else
Avi Kivity9dac77f2011-06-01 15:34:25 +0300510 return reg & ad_mask(ctxt);
Harvey Harrisone4706772008-02-19 07:40:38 -0800511}
512
513static inline unsigned long
Paolo Bonzini01485a22014-11-19 18:25:08 +0100514register_address(struct x86_emulate_ctxt *ctxt, int reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800515{
Paolo Bonzini01485a22014-11-19 18:25:08 +0100516 return address_mask(ctxt, reg_read(ctxt, reg));
Harvey Harrisone4706772008-02-19 07:40:38 -0800517}
518
Avi Kivity5ad105e2012-08-19 14:34:31 +0300519static void masked_increment(ulong *reg, ulong mask, int inc)
520{
521 assign_masked(reg, *reg + inc, mask);
522}
523
Harvey Harrison7a9572752008-02-19 07:40:41 -0800524static inline void
Paolo Bonzini01485a22014-11-19 18:25:08 +0100525register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
Harvey Harrison7a9572752008-02-19 07:40:41 -0800526{
Nadav Amitee122a72015-04-28 13:06:00 +0300527 ulong *preg = reg_rmw(ctxt, reg);
Avi Kivity5ad105e2012-08-19 14:34:31 +0300528
Nadav Amitee122a72015-04-28 13:06:00 +0300529 assign_register(preg, *preg + inc, ctxt->ad_bytes);
Avi Kivity5ad105e2012-08-19 14:34:31 +0300530}
531
532static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
533{
Avi Kivitydd856ef2012-08-27 23:46:17 +0300534 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
Harvey Harrison7a9572752008-02-19 07:40:41 -0800535}
Avi Kivity6aa8b732006-12-10 02:21:36 -0800536
Avi Kivity56697682011-04-03 14:08:51 +0300537static u32 desc_limit_scaled(struct desc_struct *desc)
538{
539 u32 limit = get_desc_limit(desc);
540
541 return desc->g ? (limit << 12) | 0xfff : limit;
542}
543
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900544static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300545{
546 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
547 return 0;
548
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900549 return ctxt->ops->get_cached_segment_base(ctxt, seg);
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300550}
551
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200552static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
553 u32 error, bool valid)
Gleb Natapov54b84862010-04-28 19:15:44 +0300554{
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +0200555 WARN_ON(vec > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +0200556 ctxt->exception.vector = vec;
557 ctxt->exception.error_code = error;
558 ctxt->exception.error_code_valid = valid;
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200559 return X86EMUL_PROPAGATE_FAULT;
Gleb Natapov54b84862010-04-28 19:15:44 +0300560}
561
Joerg Roedel3b88e412011-04-04 12:39:29 +0200562static int emulate_db(struct x86_emulate_ctxt *ctxt)
563{
564 return emulate_exception(ctxt, DB_VECTOR, 0, false);
565}
566
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200567static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300568{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200569 return emulate_exception(ctxt, GP_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300570}
571
Avi Kivity618ff152011-04-03 12:32:09 +0300572static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
573{
574 return emulate_exception(ctxt, SS_VECTOR, err, true);
575}
576
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200577static int emulate_ud(struct x86_emulate_ctxt *ctxt)
Gleb Natapov54b84862010-04-28 19:15:44 +0300578{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200579 return emulate_exception(ctxt, UD_VECTOR, 0, false);
Gleb Natapov54b84862010-04-28 19:15:44 +0300580}
581
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200582static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300583{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200584 return emulate_exception(ctxt, TS_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300585}
586
Avi Kivity34d1f492010-08-26 11:59:01 +0300587static int emulate_de(struct x86_emulate_ctxt *ctxt)
588{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200589 return emulate_exception(ctxt, DE_VECTOR, 0, false);
Avi Kivity34d1f492010-08-26 11:59:01 +0300590}
591
Avi Kivity12537912011-03-29 11:41:27 +0200592static int emulate_nm(struct x86_emulate_ctxt *ctxt)
593{
594 return emulate_exception(ctxt, NM_VECTOR, 0, false);
595}
596
Avi Kivity1aa36612011-04-27 13:20:30 +0300597static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
598{
599 u16 selector;
600 struct desc_struct desc;
601
602 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
603 return selector;
604}
605
606static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
607 unsigned seg)
608{
609 u16 dummy;
610 u32 base3;
611 struct desc_struct desc;
612
613 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
614 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
615}
616
Avi Kivity1c11b372012-04-09 18:39:59 +0300617/*
618 * x86 defines three classes of vector instructions: explicitly
619 * aligned, explicitly unaligned, and the rest, which change behaviour
620 * depending on whether they're AVX encoded or not.
621 *
622 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
623 * subject to the same check.
624 */
625static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
626{
627 if (likely(size < 16))
628 return false;
629
630 if (ctxt->d & Aligned)
631 return true;
632 else if (ctxt->d & Unaligned)
633 return false;
634 else if (ctxt->d & Avx)
635 return false;
636 else
637 return true;
638}
639
Paolo Bonzinid09155d2014-10-27 14:54:44 +0100640static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
641 struct segmented_address addr,
642 unsigned *max_size, unsigned size,
643 bool write, bool fetch,
Nadav Amitd50eaa12014-11-19 17:43:11 +0200644 enum x86emul_mode mode, ulong *linear)
Avi Kivity52fd8b42011-04-03 12:33:12 +0300645{
Avi Kivity618ff152011-04-03 12:32:09 +0300646 struct desc_struct desc;
647 bool usable;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300648 ulong la;
Avi Kivity618ff152011-04-03 12:32:09 +0300649 u32 lim;
Avi Kivity1aa36612011-04-27 13:20:30 +0300650 u16 sel;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300651
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900652 la = seg_base(ctxt, addr.seg) + addr.ea;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100653 *max_size = 0;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200654 switch (mode) {
Avi Kivity618ff152011-04-03 12:32:09 +0300655 case X86EMUL_MODE_PROT64:
Nadav Amit4be4de72014-09-18 22:39:40 +0300656 if (is_noncanonical_address(la))
Nadav Amitabc7d8a2014-11-19 17:43:12 +0200657 goto bad;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100658
659 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
660 if (size > *max_size)
661 goto bad;
Avi Kivity618ff152011-04-03 12:32:09 +0300662 break;
663 default:
Avi Kivity1aa36612011-04-27 13:20:30 +0300664 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
665 addr.seg);
Avi Kivity618ff152011-04-03 12:32:09 +0300666 if (!usable)
667 goto bad;
Gleb Natapov58b78252012-12-11 15:14:12 +0200668 /* code segment in protected mode or read-only data segment */
669 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
670 || !(desc.type & 2)) && write)
Avi Kivity618ff152011-04-03 12:32:09 +0300671 goto bad;
672 /* unreadable code segment */
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400673 if (!fetch && (desc.type & 8) && !(desc.type & 2))
Avi Kivity618ff152011-04-03 12:32:09 +0300674 goto bad;
675 lim = desc_limit_scaled(&desc);
Paolo Bonzini997b0412014-11-19 18:33:38 +0100676 if (!(desc.type & 8) && (desc.type & 4)) {
Guo Chaofc058682012-06-28 15:19:51 +0800677 /* expand-down segment */
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100678 if (addr.ea <= lim)
Avi Kivity618ff152011-04-03 12:32:09 +0300679 goto bad;
680 lim = desc.d ? 0xffffffff : 0xffff;
Avi Kivity618ff152011-04-03 12:32:09 +0300681 }
Paolo Bonzini997b0412014-11-19 18:33:38 +0100682 if (addr.ea > lim)
683 goto bad;
Nadav Amitbac155312015-01-26 09:32:26 +0200684 if (lim == 0xffffffff)
685 *max_size = ~0u;
686 else {
687 *max_size = (u64)lim + 1 - addr.ea;
688 if (size > *max_size)
689 goto bad;
690 }
Nadav Amit31ff6482014-11-19 17:43:13 +0200691 la &= (u32)-1;
Avi Kivity618ff152011-04-03 12:32:09 +0300692 break;
693 }
Avi Kivity1c11b372012-04-09 18:39:59 +0300694 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
695 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300696 *linear = la;
697 return X86EMUL_CONTINUE;
Avi Kivity618ff152011-04-03 12:32:09 +0300698bad:
699 if (addr.seg == VCPU_SREG_SS)
Paolo Bonzini36061892014-10-27 14:40:49 +0100700 return emulate_ss(ctxt, 0);
Avi Kivity618ff152011-04-03 12:32:09 +0300701 else
Paolo Bonzini36061892014-10-27 14:40:49 +0100702 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300703}
704
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400705static int linearize(struct x86_emulate_ctxt *ctxt,
706 struct segmented_address addr,
707 unsigned size, bool write,
708 ulong *linear)
709{
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100710 unsigned max_size;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200711 return __linearize(ctxt, addr, &max_size, size, write, false,
712 ctxt->mode, linear);
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400713}
714
Nadav Amitd50eaa12014-11-19 17:43:11 +0200715static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
716 enum x86emul_mode mode)
717{
718 ulong linear;
719 int rc;
720 unsigned max_size;
721 struct segmented_address addr = { .seg = VCPU_SREG_CS,
722 .ea = dst };
723
724 if (ctxt->op_bytes != sizeof(unsigned long))
725 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
726 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
727 if (rc == X86EMUL_CONTINUE)
728 ctxt->_eip = addr.ea;
729 return rc;
730}
731
732static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
733{
734 return assign_eip(ctxt, dst, ctxt->mode);
735}
736
737static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
738 const struct desc_struct *cs_desc)
739{
740 enum x86emul_mode mode = ctxt->mode;
Nadav Amit82268082015-01-26 09:32:27 +0200741 int rc;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200742
743#ifdef CONFIG_X86_64
Nadav Amit82268082015-01-26 09:32:27 +0200744 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
745 if (cs_desc->l) {
746 u64 efer = 0;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200747
Nadav Amit82268082015-01-26 09:32:27 +0200748 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
749 if (efer & EFER_LMA)
750 mode = X86EMUL_MODE_PROT64;
751 } else
752 mode = X86EMUL_MODE_PROT32; /* temporary value */
Nadav Amitd50eaa12014-11-19 17:43:11 +0200753 }
754#endif
755 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
756 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
Nadav Amit82268082015-01-26 09:32:27 +0200757 rc = assign_eip(ctxt, dst, mode);
758 if (rc == X86EMUL_CONTINUE)
759 ctxt->mode = mode;
760 return rc;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200761}
762
763static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
764{
765 return assign_eip_near(ctxt, ctxt->_eip + rel);
766}
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400767
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200768static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
769 struct segmented_address addr,
770 void *data,
771 unsigned size)
772{
Avi Kivity9fa088f2011-03-31 18:54:30 +0200773 int rc;
774 ulong linear;
775
Avi Kivity83b87952011-04-03 11:31:19 +0300776 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +0200777 if (rc != X86EMUL_CONTINUE)
778 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +0300779 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200780}
781
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900782/*
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200783 * Prefetch the remaining bytes of the instruction without crossing page
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900784 * boundary if they are not in fetch_cache yet.
785 */
Paolo Bonzini9506d572014-05-06 13:05:25 +0200786static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
Avi Kivity62266862007-11-20 13:15:52 +0200787{
Avi Kivity62266862007-11-20 13:15:52 +0200788 int rc;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100789 unsigned size, max_size;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200790 unsigned long linear;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200791 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200792 struct segmented_address addr = { .seg = VCPU_SREG_CS,
Paolo Bonzini17052f12014-05-06 16:33:01 +0200793 .ea = ctxt->eip + cur_size };
794
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100795 /*
796 * We do not know exactly how many bytes will be needed, and
797 * __linearize is expensive, so fetch as much as possible. We
798 * just have to avoid going beyond the 15 byte limit, the end
799 * of the segment, or the end of the page.
800 *
801 * __linearize is called with size 0 so that it does not do any
802 * boundary check itself. Instead, we use max_size to check
803 * against op_size.
804 */
Nadav Amitd50eaa12014-11-19 17:43:11 +0200805 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
806 &linear);
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200807 if (unlikely(rc != X86EMUL_CONTINUE))
808 return rc;
809
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100810 size = min_t(unsigned, 15UL ^ cur_size, max_size);
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200811 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
Paolo Bonzini5cfc7e02014-05-06 13:05:25 +0200812
813 /*
814 * One instruction can only straddle two pages,
815 * and one has been loaded at the beginning of
816 * x86_decode_insn. So, if not enough bytes
817 * still, we must have hit the 15-byte boundary.
818 */
819 if (unlikely(size < op_size))
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100820 return emulate_gp(ctxt, 0);
821
Paolo Bonzini17052f12014-05-06 16:33:01 +0200822 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200823 size, &ctxt->exception);
824 if (unlikely(rc != X86EMUL_CONTINUE))
825 return rc;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200826 ctxt->fetch.end += size;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +0900827 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200828}
829
Paolo Bonzini9506d572014-05-06 13:05:25 +0200830static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
831 unsigned size)
Avi Kivity62266862007-11-20 13:15:52 +0200832{
Nadav Amit08da44a2014-10-03 01:10:04 +0300833 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
834
835 if (unlikely(done_size < size))
836 return __do_insn_fetch_bytes(ctxt, size - done_size);
Paolo Bonzini9506d572014-05-06 13:05:25 +0200837 else
838 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200839}
840
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900841/* Fetch next part of the instruction being emulated. */
Takuya Yoshikawae85a1082011-07-30 18:01:26 +0900842#define insn_fetch(_type, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200843({ _type _x; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200844 \
845 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900846 if (rc != X86EMUL_CONTINUE) \
847 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200848 ctxt->_eip += sizeof(_type); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200849 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
850 ctxt->fetch.ptr += sizeof(_type); \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200851 _x; \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900852})
853
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900854#define insn_fetch_arr(_arr, _size, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200855({ \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200856 rc = do_insn_fetch_bytes(_ctxt, _size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900857 if (rc != X86EMUL_CONTINUE) \
858 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200859 ctxt->_eip += (_size); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200860 memcpy(_arr, ctxt->fetch.ptr, _size); \
861 ctxt->fetch.ptr += (_size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900862})
863
Rusty Russell1e3c5cb2007-07-17 23:16:11 +1000864/*
865 * Given the 'reg' portion of a ModRM byte, and a register block, return a
866 * pointer into the block that addresses the relevant register.
867 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
868 */
Avi Kivitydd856ef2012-08-27 23:46:17 +0300869static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200870 int byteop)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800871{
872 void *p;
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200873 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800874
Avi Kivity6aa8b732006-12-10 02:21:36 -0800875 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
Avi Kivitydd856ef2012-08-27 23:46:17 +0300876 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
877 else
878 p = reg_rmw(ctxt, modrm_reg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800879 return p;
880}
881
882static int read_descriptor(struct x86_emulate_ctxt *ctxt,
Avi Kivity90de84f2010-11-17 15:28:21 +0200883 struct segmented_address addr,
Avi Kivity6aa8b732006-12-10 02:21:36 -0800884 u16 *size, unsigned long *address, int op_bytes)
885{
886 int rc;
887
888 if (op_bytes == 2)
889 op_bytes = 3;
890 *address = 0;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200891 rc = segmented_read_std(ctxt, addr, size, 2);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +0900892 if (rc != X86EMUL_CONTINUE)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800893 return rc;
Avi Kivity30b31ab2010-11-17 15:28:22 +0200894 addr.ea += 2;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200895 rc = segmented_read_std(ctxt, addr, address, op_bytes);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800896 return rc;
897}
898
Avi Kivity34b77652013-01-19 19:51:56 +0200899FASTOP2(add);
900FASTOP2(or);
901FASTOP2(adc);
902FASTOP2(sbb);
903FASTOP2(and);
904FASTOP2(sub);
905FASTOP2(xor);
906FASTOP2(cmp);
907FASTOP2(test);
908
Avi Kivityb9fa4092013-02-09 11:31:48 +0200909FASTOP1SRC2(mul, mul_ex);
910FASTOP1SRC2(imul, imul_ex);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200911FASTOP1SRC2EX(div, div_ex);
912FASTOP1SRC2EX(idiv, idiv_ex);
Avi Kivityb9fa4092013-02-09 11:31:48 +0200913
Avi Kivity34b77652013-01-19 19:51:56 +0200914FASTOP3WCL(shld);
915FASTOP3WCL(shrd);
916
917FASTOP2W(imul);
918
919FASTOP1(not);
920FASTOP1(neg);
921FASTOP1(inc);
922FASTOP1(dec);
923
924FASTOP2CL(rol);
925FASTOP2CL(ror);
926FASTOP2CL(rcl);
927FASTOP2CL(rcr);
928FASTOP2CL(shl);
929FASTOP2CL(shr);
930FASTOP2CL(sar);
931
932FASTOP2W(bsf);
933FASTOP2W(bsr);
934FASTOP2W(bt);
935FASTOP2W(bts);
936FASTOP2W(btr);
937FASTOP2W(btc);
938
Avi Kivitye47a5f52013-02-09 11:31:51 +0200939FASTOP2(xadd);
940
Nadav Amit5aca3722014-11-02 11:54:50 +0200941FASTOP2R(cmp, cmp_r);
942
Nadav Amit900efe22015-03-30 15:39:21 +0300943static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
944{
945 /* If src is zero, do not writeback, but update flags */
946 if (ctxt->src.val == 0)
947 ctxt->dst.type = OP_NONE;
948 return fastop(ctxt, em_bsf);
949}
950
951static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
952{
953 /* If src is zero, do not writeback, but update flags */
954 if (ctxt->src.val == 0)
955 ctxt->dst.type = OP_NONE;
956 return fastop(ctxt, em_bsr);
957}
958
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200959static u8 test_cc(unsigned int condition, unsigned long flags)
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300960{
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200961 u8 rc;
962 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300963
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200964 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivity3f0c3d02013-01-26 23:56:04 +0200965 asm("push %[flags]; popf; call *%[fastop]"
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200966 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
967 return rc;
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300968}
969
Avi Kivity91ff3cb2010-08-01 12:53:09 +0300970static void fetch_register_operand(struct operand *op)
971{
972 switch (op->bytes) {
973 case 1:
974 op->val = *(u8 *)op->addr.reg;
975 break;
976 case 2:
977 op->val = *(u16 *)op->addr.reg;
978 break;
979 case 4:
980 op->val = *(u32 *)op->addr.reg;
981 break;
982 case 8:
983 op->val = *(u64 *)op->addr.reg;
984 break;
985 }
986}
987
Avi Kivity12537912011-03-29 11:41:27 +0200988static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
989{
990 ctxt->ops->get_fpu(ctxt);
991 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200992 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
993 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
994 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
995 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
996 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
997 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
998 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
999 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +02001000#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +02001001 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1002 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1003 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1004 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1005 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1006 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1007 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1008 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +02001009#endif
1010 default: BUG();
1011 }
1012 ctxt->ops->put_fpu(ctxt);
1013}
1014
1015static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1016 int reg)
1017{
1018 ctxt->ops->get_fpu(ctxt);
1019 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +02001020 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1021 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1022 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1023 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1024 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1025 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1026 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1027 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +02001028#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +02001029 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1030 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1031 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1032 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1033 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1034 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1035 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1036 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +02001037#endif
1038 default: BUG();
1039 }
1040 ctxt->ops->put_fpu(ctxt);
1041}
1042
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001043static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1044{
1045 ctxt->ops->get_fpu(ctxt);
1046 switch (reg) {
1047 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1048 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1049 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1050 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1051 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1052 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1053 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1054 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1055 default: BUG();
1056 }
1057 ctxt->ops->put_fpu(ctxt);
1058}
1059
1060static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1061{
1062 ctxt->ops->get_fpu(ctxt);
1063 switch (reg) {
1064 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1065 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1066 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1067 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1068 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1069 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1070 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1071 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1072 default: BUG();
1073 }
1074 ctxt->ops->put_fpu(ctxt);
1075}
1076
Gleb Natapov045a2822012-12-20 16:57:43 +02001077static int em_fninit(struct x86_emulate_ctxt *ctxt)
1078{
1079 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1080 return emulate_nm(ctxt);
1081
1082 ctxt->ops->get_fpu(ctxt);
1083 asm volatile("fninit");
1084 ctxt->ops->put_fpu(ctxt);
1085 return X86EMUL_CONTINUE;
1086}
1087
1088static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1089{
1090 u16 fcw;
1091
1092 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1093 return emulate_nm(ctxt);
1094
1095 ctxt->ops->get_fpu(ctxt);
1096 asm volatile("fnstcw %0": "+m"(fcw));
1097 ctxt->ops->put_fpu(ctxt);
1098
Gleb Natapov045a2822012-12-20 16:57:43 +02001099 ctxt->dst.val = fcw;
1100
1101 return X86EMUL_CONTINUE;
1102}
1103
1104static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1105{
1106 u16 fsw;
1107
1108 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1109 return emulate_nm(ctxt);
1110
1111 ctxt->ops->get_fpu(ctxt);
1112 asm volatile("fnstsw %0": "+m"(fsw));
1113 ctxt->ops->put_fpu(ctxt);
1114
Gleb Natapov045a2822012-12-20 16:57:43 +02001115 ctxt->dst.val = fsw;
1116
1117 return X86EMUL_CONTINUE;
1118}
1119
Avi Kivity12537912011-03-29 11:41:27 +02001120static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
Avi Kivity2adb5ad2012-01-16 15:08:45 +02001121 struct operand *op)
Avi Kivity3c118e22007-10-31 10:27:04 +02001122{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001123 unsigned reg = ctxt->modrm_reg;
Avi Kivity33615aa2007-10-31 11:15:56 +02001124
Avi Kivity9dac77f2011-06-01 15:34:25 +03001125 if (!(ctxt->d & ModRM))
1126 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
Avi Kivity12537912011-03-29 11:41:27 +02001127
Avi Kivity9dac77f2011-06-01 15:34:25 +03001128 if (ctxt->d & Sse) {
Avi Kivity12537912011-03-29 11:41:27 +02001129 op->type = OP_XMM;
1130 op->bytes = 16;
1131 op->addr.xmm = reg;
1132 read_sse_reg(ctxt, &op->vec_val, reg);
1133 return;
1134 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001135 if (ctxt->d & Mmx) {
1136 reg &= 7;
1137 op->type = OP_MM;
1138 op->bytes = 8;
1139 op->addr.mm = reg;
1140 return;
1141 }
Avi Kivity12537912011-03-29 11:41:27 +02001142
Avi Kivity3c118e22007-10-31 10:27:04 +02001143 op->type = OP_REG;
Gleb Natapov6d4d85e2013-11-04 15:52:42 +02001144 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1145 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1146
Avi Kivity91ff3cb2010-08-01 12:53:09 +03001147 fetch_register_operand(op);
Avi Kivity3c118e22007-10-31 10:27:04 +02001148 op->orig_val = op->val;
1149}
1150
Avi Kivitya6e34072012-06-10 17:15:39 +03001151static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1152{
1153 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1154 ctxt->modrm_seg = VCPU_SREG_SS;
1155}
1156
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001157static int decode_modrm(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001158 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001159{
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001160 u8 sib;
Bandan Das02357bd2014-04-16 12:46:11 -04001161 int index_reg, base_reg, scale;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001162 int rc = X86EMUL_CONTINUE;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001163 ulong modrm_ea = 0;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001164
Bandan Das02357bd2014-04-16 12:46:11 -04001165 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1166 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1167 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001168
Bandan Das02357bd2014-04-16 12:46:11 -04001169 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001170 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
Bandan Das02357bd2014-04-16 12:46:11 -04001171 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001172 ctxt->modrm_seg = VCPU_SREG_DS;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001173
Nadav Amit9b88ae92014-05-25 23:05:21 +03001174 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001175 op->type = OP_REG;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001176 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Paolo Bonzini8acb42072013-05-30 16:35:55 +02001177 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02001178 ctxt->d & ByteOp);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001179 if (ctxt->d & Sse) {
Avi Kivity12537912011-03-29 11:41:27 +02001180 op->type = OP_XMM;
1181 op->bytes = 16;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001182 op->addr.xmm = ctxt->modrm_rm;
1183 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
Avi Kivity12537912011-03-29 11:41:27 +02001184 return rc;
1185 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001186 if (ctxt->d & Mmx) {
1187 op->type = OP_MM;
1188 op->bytes = 8;
Paolo Bonzinibdc90722014-05-06 14:03:29 +02001189 op->addr.mm = ctxt->modrm_rm & 7;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001190 return rc;
1191 }
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001192 fetch_register_operand(op);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001193 return rc;
1194 }
1195
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001196 op->type = OP_MEM;
1197
Avi Kivity9dac77f2011-06-01 15:34:25 +03001198 if (ctxt->ad_bytes == 2) {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001199 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1200 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1201 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1202 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001203
1204 /* 16-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001205 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001206 case 0:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001207 if (ctxt->modrm_rm == 6)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001208 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001209 break;
1210 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001211 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001212 break;
1213 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001214 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001215 break;
1216 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001217 switch (ctxt->modrm_rm) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001218 case 0:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001219 modrm_ea += bx + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001220 break;
1221 case 1:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001222 modrm_ea += bx + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001223 break;
1224 case 2:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001225 modrm_ea += bp + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001226 break;
1227 case 3:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001228 modrm_ea += bp + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001229 break;
1230 case 4:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001231 modrm_ea += si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001232 break;
1233 case 5:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001234 modrm_ea += di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001235 break;
1236 case 6:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001237 if (ctxt->modrm_mod != 0)
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001238 modrm_ea += bp;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001239 break;
1240 case 7:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001241 modrm_ea += bx;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001242 break;
1243 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001244 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1245 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1246 ctxt->modrm_seg = VCPU_SREG_SS;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001247 modrm_ea = (u16)modrm_ea;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001248 } else {
1249 /* 32/64-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001250 if ((ctxt->modrm_rm & 7) == 4) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001251 sib = insn_fetch(u8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001252 index_reg |= (sib >> 3) & 7;
1253 base_reg |= sib & 7;
1254 scale = sib >> 6;
1255
Avi Kivity9dac77f2011-06-01 15:34:25 +03001256 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001257 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivitya6e34072012-06-10 17:15:39 +03001258 else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001259 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001260 adjust_modrm_seg(ctxt, base_reg);
Nadav Amitab708092014-12-25 02:52:21 +02001261 /* Increment ESP on POP [ESP] */
1262 if ((ctxt->d & IncSP) &&
1263 base_reg == VCPU_REGS_RSP)
1264 modrm_ea += ctxt->op_bytes;
Avi Kivitya6e34072012-06-10 17:15:39 +03001265 }
Avi Kivitydc71d0f2008-06-15 21:23:17 -07001266 if (index_reg != 4)
Avi Kivitydd856ef2012-08-27 23:46:17 +03001267 modrm_ea += reg_read(ctxt, index_reg) << scale;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001268 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
Nadav Amit5b38ab82014-11-02 11:54:41 +02001269 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity84411d82008-06-15 21:53:26 -07001270 if (ctxt->mode == X86EMUL_MODE_PROT64)
Avi Kivity9dac77f2011-06-01 15:34:25 +03001271 ctxt->rip_relative = 1;
Avi Kivitya6e34072012-06-10 17:15:39 +03001272 } else {
1273 base_reg = ctxt->modrm_rm;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001274 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001275 adjust_modrm_seg(ctxt, base_reg);
1276 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001277 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001278 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001279 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001280 break;
1281 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001282 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001283 break;
1284 }
1285 }
Avi Kivity90de84f2010-11-17 15:28:21 +02001286 op->addr.mem.ea = modrm_ea;
Bandan Das41061cd2014-04-16 12:46:14 -04001287 if (ctxt->ad_bytes != 8)
1288 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1289
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001290done:
1291 return rc;
1292}
1293
1294static int decode_abs(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001295 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001296{
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001297 int rc = X86EMUL_CONTINUE;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001298
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001299 op->type = OP_MEM;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001300 switch (ctxt->ad_bytes) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001301 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001302 op->addr.mem.ea = insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001303 break;
1304 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001305 op->addr.mem.ea = insn_fetch(u32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001306 break;
1307 case 8:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001308 op->addr.mem.ea = insn_fetch(u64, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001309 break;
1310 }
1311done:
1312 return rc;
1313}
1314
Avi Kivity9dac77f2011-06-01 15:34:25 +03001315static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
Wei Yongjun35c843c2010-08-09 11:34:56 +08001316{
Sheng Yang7129eec2010-09-28 16:33:32 +08001317 long sv = 0, mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001318
Avi Kivity9dac77f2011-06-01 15:34:25 +03001319 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
Nadav Amit7dec5602014-06-15 16:12:57 +03001320 mask = ~((long)ctxt->dst.bytes * 8 - 1);
Wei Yongjun35c843c2010-08-09 11:34:56 +08001321
Avi Kivity9dac77f2011-06-01 15:34:25 +03001322 if (ctxt->src.bytes == 2)
1323 sv = (s16)ctxt->src.val & (s16)mask;
1324 else if (ctxt->src.bytes == 4)
1325 sv = (s32)ctxt->src.val & (s32)mask;
Nadav Amit7dec5602014-06-15 16:12:57 +03001326 else
1327 sv = (s64)ctxt->src.val & (s64)mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001328
Nadav Amit1c1c35a2014-11-19 17:43:09 +02001329 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1330 ctxt->dst.addr.mem.ea + (sv >> 3));
Wei Yongjun35c843c2010-08-09 11:34:56 +08001331 }
Wei Yongjunba7ff2b2010-08-09 11:39:14 +08001332
1333 /* only subword offset */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001334 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001335}
1336
Gleb Natapov9de41572010-04-28 19:15:22 +03001337static int read_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov9de41572010-04-28 19:15:22 +03001338 unsigned long addr, void *dest, unsigned size)
1339{
1340 int rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001341 struct read_cache *mc = &ctxt->mem_read;
Gleb Natapov9de41572010-04-28 19:15:22 +03001342
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001343 if (mc->pos < mc->end)
1344 goto read_cached;
Gleb Natapov9de41572010-04-28 19:15:22 +03001345
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001346 WARN_ON((mc->end + size) >= sizeof(mc->data));
Gleb Natapov9de41572010-04-28 19:15:22 +03001347
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001348 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1349 &ctxt->exception);
1350 if (rc != X86EMUL_CONTINUE)
1351 return rc;
1352
1353 mc->end += size;
1354
1355read_cached:
1356 memcpy(dest, mc->data + mc->pos, size);
1357 mc->pos += size;
Gleb Natapov9de41572010-04-28 19:15:22 +03001358 return X86EMUL_CONTINUE;
1359}
1360
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001361static int segmented_read(struct x86_emulate_ctxt *ctxt,
1362 struct segmented_address addr,
1363 void *data,
1364 unsigned size)
1365{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001366 int rc;
1367 ulong linear;
1368
Avi Kivity83b87952011-04-03 11:31:19 +03001369 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001370 if (rc != X86EMUL_CONTINUE)
1371 return rc;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001372 return read_emulated(ctxt, linear, data, size);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001373}
1374
1375static int segmented_write(struct x86_emulate_ctxt *ctxt,
1376 struct segmented_address addr,
1377 const void *data,
1378 unsigned size)
1379{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001380 int rc;
1381 ulong linear;
1382
Avi Kivity83b87952011-04-03 11:31:19 +03001383 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001384 if (rc != X86EMUL_CONTINUE)
1385 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001386 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1387 &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001388}
1389
1390static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1391 struct segmented_address addr,
1392 const void *orig_data, const void *data,
1393 unsigned size)
1394{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001395 int rc;
1396 ulong linear;
1397
Avi Kivity83b87952011-04-03 11:31:19 +03001398 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001399 if (rc != X86EMUL_CONTINUE)
1400 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001401 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1402 size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001403}
1404
Gleb Natapov7b262e92010-03-18 15:20:27 +02001405static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov7b262e92010-03-18 15:20:27 +02001406 unsigned int size, unsigned short port,
1407 void *dest)
1408{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001409 struct read_cache *rc = &ctxt->io_read;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001410
1411 if (rc->pos == rc->end) { /* refill pio read ahead */
Gleb Natapov7b262e92010-03-18 15:20:27 +02001412 unsigned int in_page, n;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001413 unsigned int count = ctxt->rep_prefix ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001414 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
Nadav Amit0efb0442015-03-29 16:33:03 +03001415 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001416 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1417 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
Mark Rustadb55a8142014-07-25 06:27:05 -07001418 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
Gleb Natapov7b262e92010-03-18 15:20:27 +02001419 if (n == 0)
1420 n = 1;
1421 rc->pos = rc->end = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001422 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
Gleb Natapov7b262e92010-03-18 15:20:27 +02001423 return 0;
1424 rc->end = n * size;
1425 }
1426
Nadav Amite6e39f02014-04-18 03:35:10 +03001427 if (ctxt->rep_prefix && (ctxt->d & String) &&
Nadav Amit0efb0442015-03-29 16:33:03 +03001428 !(ctxt->eflags & X86_EFLAGS_DF)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001429 ctxt->dst.data = rc->data + rc->pos;
1430 ctxt->dst.type = OP_MEM_STR;
1431 ctxt->dst.count = (rc->end - rc->pos) / size;
1432 rc->pos = rc->end;
1433 } else {
1434 memcpy(dest, rc->data + rc->pos, size);
1435 rc->pos += size;
1436 }
Gleb Natapov7b262e92010-03-18 15:20:27 +02001437 return 1;
1438}
1439
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01001440static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1441 u16 index, struct desc_struct *desc)
1442{
1443 struct desc_ptr dt;
1444 ulong addr;
1445
1446 ctxt->ops->get_idt(ctxt, &dt);
1447
1448 if (dt.size < index * 8 + 7)
1449 return emulate_gp(ctxt, index << 3 | 0x2);
1450
1451 addr = dt.address + index * 8;
1452 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1453 &ctxt->exception);
1454}
1455
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001456static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001457 u16 selector, struct desc_ptr *dt)
1458{
Mathias Krause0225fb52012-08-30 01:30:16 +02001459 const struct x86_emulate_ops *ops = ctxt->ops;
Nadav Amit2eedcac2014-06-02 18:34:05 +03001460 u32 base3 = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001461
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001462 if (selector & 1 << 2) {
1463 struct desc_struct desc;
Avi Kivity1aa36612011-04-27 13:20:30 +03001464 u16 sel;
1465
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001466 memset (dt, 0, sizeof *dt);
Nadav Amit2eedcac2014-06-02 18:34:05 +03001467 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1468 VCPU_SREG_LDTR))
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001469 return;
1470
1471 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
Nadav Amit2eedcac2014-06-02 18:34:05 +03001472 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001473 } else
Avi Kivity4bff1e862011-04-20 13:37:53 +03001474 ops->get_gdt(ctxt, dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001475}
1476
Nadav Amitedccda72014-12-25 02:52:23 +02001477static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1478 u16 selector, ulong *desc_addr_p)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001479{
1480 struct desc_ptr dt;
1481 u16 index = selector >> 3;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001482 ulong addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001483
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001484 get_descriptor_table_ptr(ctxt, selector, &dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001485
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001486 if (dt.size < index * 8 + 7)
1487 return emulate_gp(ctxt, selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001488
1489 addr = dt.address + index * 8;
Nadav Amitedccda72014-12-25 02:52:23 +02001490
1491#ifdef CONFIG_X86_64
1492 if (addr >> 32 != 0) {
1493 u64 efer = 0;
1494
1495 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1496 if (!(efer & EFER_LMA))
1497 addr &= (u32)-1;
1498 }
1499#endif
1500
1501 *desc_addr_p = addr;
1502 return X86EMUL_CONTINUE;
1503}
1504
1505/* allowed just for 8 bytes segments */
1506static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1507 u16 selector, struct desc_struct *desc,
1508 ulong *desc_addr_p)
1509{
1510 int rc;
1511
1512 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1513 if (rc != X86EMUL_CONTINUE)
1514 return rc;
1515
1516 return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1517 &ctxt->exception);
1518}
1519
1520/* allowed just for 8 bytes segments */
1521static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1522 u16 selector, struct desc_struct *desc)
1523{
1524 int rc;
1525 ulong addr;
1526
1527 rc = get_descriptor_ptr(ctxt, selector, &addr);
1528 if (rc != X86EMUL_CONTINUE)
1529 return rc;
1530
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001531 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1532 &ctxt->exception);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001533}
1534
Gleb Natapov5601d052011-03-07 14:55:06 +02001535/* Does not support long mode */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001536static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Nadav Amitd1442d82014-09-18 22:39:39 +03001537 u16 selector, int seg, u8 cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001538 enum x86_transfer_type transfer,
Nadav Amitd1442d82014-09-18 22:39:39 +03001539 struct desc_struct *desc)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001540{
Avi Kivity869be992012-06-13 16:30:53 +03001541 struct desc_struct seg_desc, old_desc;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001542 u8 dpl, rpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001543 unsigned err_vec = GP_VECTOR;
1544 u32 err_code = 0;
1545 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
Avi Kivitye9194642012-06-13 16:29:39 +03001546 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001547 int ret;
Avi Kivity03ebebe2012-08-21 17:07:04 +03001548 u16 dummy;
Nadav Amite37a75a2014-06-02 18:34:04 +03001549 u32 base3 = 0;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001550
1551 memset(&seg_desc, 0, sizeof seg_desc);
1552
Kevin Wolff8da94e2013-04-11 14:06:03 +02001553 if (ctxt->mode == X86EMUL_MODE_REAL) {
1554 /* set real mode segment descriptor (keep limit etc. for
1555 * unreal mode) */
Avi Kivity03ebebe2012-08-21 17:07:04 +03001556 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001557 set_desc_base(&seg_desc, selector << 4);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001558 goto load;
Kevin Wolff8da94e2013-04-11 14:06:03 +02001559 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1560 /* VM86 needs a clean new segment descriptor */
1561 set_desc_base(&seg_desc, selector << 4);
1562 set_desc_limit(&seg_desc, 0xffff);
1563 seg_desc.type = 3;
1564 seg_desc.p = 1;
1565 seg_desc.s = 1;
1566 seg_desc.dpl = 3;
1567 goto load;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001568 }
1569
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001570 rpl = selector & 3;
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001571
1572 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1573 if ((seg == VCPU_SREG_CS
1574 || (seg == VCPU_SREG_SS
1575 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1576 || seg == VCPU_SREG_TR)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001577 && null_selector)
1578 goto exception;
1579
1580 /* TR should be in GDT only */
1581 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1582 goto exception;
1583
1584 if (null_selector) /* for NULL selector skip all following checks */
1585 goto load;
1586
Avi Kivitye9194642012-06-13 16:29:39 +03001587 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001588 if (ret != X86EMUL_CONTINUE)
1589 return ret;
1590
1591 err_code = selector & 0xfffc;
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001592 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1593 GP_VECTOR;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001594
Guo Chaofc058682012-06-28 15:19:51 +08001595 /* can't load system descriptor into segment selector */
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001596 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1597 if (transfer == X86_TRANSFER_CALL_JMP)
1598 return X86EMUL_UNHANDLEABLE;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001599 goto exception;
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001600 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001601
1602 if (!seg_desc.p) {
1603 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1604 goto exception;
1605 }
1606
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001607 dpl = seg_desc.dpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001608
1609 switch (seg) {
1610 case VCPU_SREG_SS:
1611 /*
1612 * segment is not a writable data segment or segment
1613 * selector's RPL != CPL or segment selector's RPL != CPL
1614 */
1615 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1616 goto exception;
1617 break;
1618 case VCPU_SREG_CS:
1619 if (!(seg_desc.type & 8))
1620 goto exception;
1621
1622 if (seg_desc.type & 4) {
1623 /* conforming */
1624 if (dpl > cpl)
1625 goto exception;
1626 } else {
1627 /* nonconforming */
1628 if (rpl > cpl || dpl != cpl)
1629 goto exception;
1630 }
Nadav Amit040c8dc2014-09-18 22:39:43 +03001631 /* in long-mode d/b must be clear if l is set */
1632 if (seg_desc.d && seg_desc.l) {
1633 u64 efer = 0;
1634
1635 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1636 if (efer & EFER_LMA)
1637 goto exception;
1638 }
1639
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001640 /* CS(RPL) <- CPL */
1641 selector = (selector & 0xfffc) | cpl;
1642 break;
1643 case VCPU_SREG_TR:
1644 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1645 goto exception;
Avi Kivity869be992012-06-13 16:30:53 +03001646 old_desc = seg_desc;
1647 seg_desc.type |= 2; /* busy */
1648 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1649 sizeof(seg_desc), &ctxt->exception);
1650 if (ret != X86EMUL_CONTINUE)
1651 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001652 break;
1653 case VCPU_SREG_LDTR:
1654 if (seg_desc.s || seg_desc.type != 2)
1655 goto exception;
1656 break;
1657 default: /* DS, ES, FS, or GS */
1658 /*
1659 * segment is not a data or readable code segment or
1660 * ((segment is a data or nonconforming code segment)
1661 * and (both RPL and CPL > DPL))
1662 */
1663 if ((seg_desc.type & 0xa) == 0x8 ||
1664 (((seg_desc.type & 0xc) != 0xc) &&
1665 (rpl > dpl && cpl > dpl)))
1666 goto exception;
1667 break;
1668 }
1669
1670 if (seg_desc.s) {
1671 /* mark segment as accessed */
Nadav Amite2cefa72014-12-25 02:52:22 +02001672 if (!(seg_desc.type & 1)) {
1673 seg_desc.type |= 1;
1674 ret = write_segment_descriptor(ctxt, selector,
1675 &seg_desc);
1676 if (ret != X86EMUL_CONTINUE)
1677 return ret;
1678 }
Nadav Amite37a75a2014-06-02 18:34:04 +03001679 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1680 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1681 sizeof(base3), &ctxt->exception);
1682 if (ret != X86EMUL_CONTINUE)
1683 return ret;
Nadav Amit9a9abf62014-11-02 11:54:56 +02001684 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1685 ((u64)base3 << 32)))
1686 return emulate_gp(ctxt, 0);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001687 }
1688load:
Nadav Amite37a75a2014-06-02 18:34:04 +03001689 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
Nadav Amitd1442d82014-09-18 22:39:39 +03001690 if (desc)
1691 *desc = seg_desc;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001692 return X86EMUL_CONTINUE;
1693exception:
Paolo Bonzini592f0852014-08-20 10:05:08 +02001694 return emulate_exception(ctxt, err_vec, err_code, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001695}
1696
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001697static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1698 u16 selector, int seg)
1699{
1700 u8 cpl = ctxt->ops->cpl(ctxt);
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001701 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1702 X86_TRANSFER_NONE, NULL);
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001703}
1704
Wei Yongjun31be40b2010-08-17 09:17:30 +08001705static void write_register_operand(struct operand *op)
1706{
Nadav Amit6fd8e122015-03-30 15:39:20 +03001707 return assign_register(op->addr.reg, op->val, op->bytes);
Wei Yongjun31be40b2010-08-17 09:17:30 +08001708}
1709
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001710static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
Wei Yongjunc37eda12010-06-15 09:03:33 +08001711{
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001712 switch (op->type) {
Wei Yongjunc37eda12010-06-15 09:03:33 +08001713 case OP_REG:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001714 write_register_operand(op);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001715 break;
1716 case OP_MEM:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001717 if (ctxt->lock_prefix)
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001718 return segmented_cmpxchg(ctxt,
1719 op->addr.mem,
1720 &op->orig_val,
1721 &op->val,
1722 op->bytes);
1723 else
1724 return segmented_write(ctxt,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001725 op->addr.mem,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001726 &op->val,
1727 op->bytes);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001728 break;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001729 case OP_MEM_STR:
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001730 return segmented_write(ctxt,
1731 op->addr.mem,
1732 op->data,
1733 op->bytes * op->count);
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001734 break;
Avi Kivity12537912011-03-29 11:41:27 +02001735 case OP_XMM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001736 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
Avi Kivity12537912011-03-29 11:41:27 +02001737 break;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001738 case OP_MM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001739 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001740 break;
Wei Yongjunc37eda12010-06-15 09:03:33 +08001741 case OP_NONE:
1742 /* no writeback */
1743 break;
1744 default:
1745 break;
1746 }
1747 return X86EMUL_CONTINUE;
1748}
1749
Avi Kivity51ddff52012-06-12 20:19:40 +03001750static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001751{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001752 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001753
Avi Kivity5ad105e2012-08-19 14:34:31 +03001754 rsp_increment(ctxt, -bytes);
Avi Kivitydd856ef2012-08-27 23:46:17 +03001755 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001756 addr.seg = VCPU_SREG_SS;
1757
Avi Kivity51ddff52012-06-12 20:19:40 +03001758 return segmented_write(ctxt, addr, data, bytes);
1759}
1760
1761static int em_push(struct x86_emulate_ctxt *ctxt)
1762{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001763 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001764 ctxt->dst.type = OP_NONE;
Avi Kivity51ddff52012-06-12 20:19:40 +03001765 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001766}
1767
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001768static int emulate_pop(struct x86_emulate_ctxt *ctxt,
Avi Kivity350f69d2009-01-05 11:12:40 +02001769 void *dest, int len)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001770{
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001771 int rc;
Avi Kivity90de84f2010-11-17 15:28:21 +02001772 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001773
Avi Kivitydd856ef2012-08-27 23:46:17 +03001774 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Avi Kivity90de84f2010-11-17 15:28:21 +02001775 addr.seg = VCPU_SREG_SS;
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001776 rc = segmented_read(ctxt, addr, dest, len);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09001777 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001778 return rc;
1779
Avi Kivity5ad105e2012-08-19 14:34:31 +03001780 rsp_increment(ctxt, len);
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001781 return rc;
1782}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001783
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001784static int em_pop(struct x86_emulate_ctxt *ctxt)
1785{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001786 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001787}
1788
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001789static int emulate_popf(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001790 void *dest, int len)
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001791{
1792 int rc;
1793 unsigned long val, change_mask;
Nadav Amit0efb0442015-03-29 16:33:03 +03001794 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001795 int cpl = ctxt->ops->cpl(ctxt);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001796
Takuya Yoshikawa3b9be3b2011-05-02 02:27:55 +09001797 rc = emulate_pop(ctxt, &val, len);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001798 if (rc != X86EMUL_CONTINUE)
1799 return rc;
1800
Nadav Amit0efb0442015-03-29 16:33:03 +03001801 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1802 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1803 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1804 X86_EFLAGS_AC | X86_EFLAGS_ID;
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001805
1806 switch(ctxt->mode) {
1807 case X86EMUL_MODE_PROT64:
1808 case X86EMUL_MODE_PROT32:
1809 case X86EMUL_MODE_PROT16:
1810 if (cpl == 0)
Nadav Amit0efb0442015-03-29 16:33:03 +03001811 change_mask |= X86_EFLAGS_IOPL;
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001812 if (cpl <= iopl)
Nadav Amit0efb0442015-03-29 16:33:03 +03001813 change_mask |= X86_EFLAGS_IF;
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001814 break;
1815 case X86EMUL_MODE_VM86:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001816 if (iopl < 3)
1817 return emulate_gp(ctxt, 0);
Nadav Amit0efb0442015-03-29 16:33:03 +03001818 change_mask |= X86_EFLAGS_IF;
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001819 break;
1820 default: /* real mode */
Nadav Amit0efb0442015-03-29 16:33:03 +03001821 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001822 break;
1823 }
1824
1825 *(unsigned long *)dest =
1826 (ctxt->eflags & ~change_mask) | (val & change_mask);
1827
1828 return rc;
1829}
1830
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001831static int em_popf(struct x86_emulate_ctxt *ctxt)
1832{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001833 ctxt->dst.type = OP_REG;
1834 ctxt->dst.addr.reg = &ctxt->eflags;
1835 ctxt->dst.bytes = ctxt->op_bytes;
1836 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001837}
1838
Avi Kivity612e89f2012-06-12 20:03:23 +03001839static int em_enter(struct x86_emulate_ctxt *ctxt)
1840{
1841 int rc;
1842 unsigned frame_size = ctxt->src.val;
1843 unsigned nesting_level = ctxt->src2.val & 31;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001844 ulong rbp;
Avi Kivity612e89f2012-06-12 20:03:23 +03001845
1846 if (nesting_level)
1847 return X86EMUL_UNHANDLEABLE;
1848
Avi Kivitydd856ef2012-08-27 23:46:17 +03001849 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1850 rc = push(ctxt, &rbp, stack_size(ctxt));
Avi Kivity612e89f2012-06-12 20:03:23 +03001851 if (rc != X86EMUL_CONTINUE)
1852 return rc;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001853 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
Avi Kivity612e89f2012-06-12 20:03:23 +03001854 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001855 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1856 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
Avi Kivity612e89f2012-06-12 20:03:23 +03001857 stack_mask(ctxt));
1858 return X86EMUL_CONTINUE;
1859}
1860
Avi Kivityf47cfa32012-06-07 17:49:24 +03001861static int em_leave(struct x86_emulate_ctxt *ctxt)
1862{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001863 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
Avi Kivityf47cfa32012-06-07 17:49:24 +03001864 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001865 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
Avi Kivityf47cfa32012-06-07 17:49:24 +03001866}
1867
Avi Kivity1cd196e2011-09-13 10:45:51 +03001868static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001869{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001870 int seg = ctxt->src2.val;
1871
Avi Kivity9dac77f2011-06-01 15:34:25 +03001872 ctxt->src.val = get_segment_selector(ctxt, seg);
Nadav Amit0fcc2072014-11-02 11:54:51 +02001873 if (ctxt->op_bytes == 4) {
1874 rsp_increment(ctxt, -2);
1875 ctxt->op_bytes = 2;
1876 }
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001877
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001878 return em_push(ctxt);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001879}
1880
Avi Kivity1cd196e2011-09-13 10:45:51 +03001881static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001882{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001883 int seg = ctxt->src2.val;
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001884 unsigned long selector;
1885 int rc;
1886
Nadav Amit3313bc42014-12-25 02:52:17 +02001887 rc = emulate_pop(ctxt, &selector, 2);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001888 if (rc != X86EMUL_CONTINUE)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001889 return rc;
1890
Paolo Bonzinia5457e72014-06-05 17:29:34 +02001891 if (ctxt->modrm_reg == VCPU_SREG_SS)
1892 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
Nadav Amit3313bc42014-12-25 02:52:17 +02001893 if (ctxt->op_bytes > 2)
1894 rsp_increment(ctxt, ctxt->op_bytes - 2);
Paolo Bonzinia5457e72014-06-05 17:29:34 +02001895
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001896 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001897 return rc;
1898}
1899
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001900static int em_pusha(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001901{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001902 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001903 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001904 int reg = VCPU_REGS_RAX;
1905
1906 while (reg <= VCPU_REGS_RDI) {
1907 (reg == VCPU_REGS_RSP) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001908 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001909
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001910 rc = em_push(ctxt);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001911 if (rc != X86EMUL_CONTINUE)
1912 return rc;
1913
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001914 ++reg;
1915 }
Wei Yongjunc37eda12010-06-15 09:03:33 +08001916
Wei Yongjunc37eda12010-06-15 09:03:33 +08001917 return rc;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001918}
1919
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001920static int em_pushf(struct x86_emulate_ctxt *ctxt)
1921{
Nadav Amit0efb0442015-03-29 16:33:03 +03001922 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001923 return em_push(ctxt);
1924}
1925
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001926static int em_popa(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001927{
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001928 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001929 int reg = VCPU_REGS_RDI;
Nadav Amit6fd8e122015-03-30 15:39:20 +03001930 u32 val;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001931
1932 while (reg >= VCPU_REGS_RAX) {
1933 if (reg == VCPU_REGS_RSP) {
Avi Kivity5ad105e2012-08-19 14:34:31 +03001934 rsp_increment(ctxt, ctxt->op_bytes);
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001935 --reg;
1936 }
1937
Nadav Amit6fd8e122015-03-30 15:39:20 +03001938 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001939 if (rc != X86EMUL_CONTINUE)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001940 break;
Nadav Amit6fd8e122015-03-30 15:39:20 +03001941 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001942 --reg;
1943 }
1944 return rc;
1945}
1946
Avi Kivitydd856ef2012-08-27 23:46:17 +03001947static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001948{
Mathias Krause0225fb52012-08-30 01:30:16 +02001949 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001950 int rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001951 struct desc_ptr dt;
1952 gva_t cs_addr;
1953 gva_t eip_addr;
1954 u16 cs, eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001955
1956 /* TODO: Add limit checks */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001957 ctxt->src.val = ctxt->eflags;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001958 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001959 if (rc != X86EMUL_CONTINUE)
1960 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001961
Nadav Amit0efb0442015-03-29 16:33:03 +03001962 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001963
Avi Kivity9dac77f2011-06-01 15:34:25 +03001964 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001965 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001966 if (rc != X86EMUL_CONTINUE)
1967 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001968
Avi Kivity9dac77f2011-06-01 15:34:25 +03001969 ctxt->src.val = ctxt->_eip;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001970 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001971 if (rc != X86EMUL_CONTINUE)
1972 return rc;
1973
Avi Kivity4bff1e862011-04-20 13:37:53 +03001974 ops->get_idt(ctxt, &dt);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001975
1976 eip_addr = dt.address + (irq << 2);
1977 cs_addr = dt.address + (irq << 2) + 2;
1978
Avi Kivity0f65dd72011-04-20 13:37:53 +03001979 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001980 if (rc != X86EMUL_CONTINUE)
1981 return rc;
1982
Avi Kivity0f65dd72011-04-20 13:37:53 +03001983 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001984 if (rc != X86EMUL_CONTINUE)
1985 return rc;
1986
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001987 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001988 if (rc != X86EMUL_CONTINUE)
1989 return rc;
1990
Avi Kivity9dac77f2011-06-01 15:34:25 +03001991 ctxt->_eip = eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001992
1993 return rc;
1994}
1995
Avi Kivitydd856ef2012-08-27 23:46:17 +03001996int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1997{
1998 int rc;
1999
2000 invalidate_registers(ctxt);
2001 rc = __emulate_int_real(ctxt, irq);
2002 if (rc == X86EMUL_CONTINUE)
2003 writeback_registers(ctxt);
2004 return rc;
2005}
2006
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002007static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03002008{
2009 switch(ctxt->mode) {
2010 case X86EMUL_MODE_REAL:
Avi Kivitydd856ef2012-08-27 23:46:17 +03002011 return __emulate_int_real(ctxt, irq);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03002012 case X86EMUL_MODE_VM86:
2013 case X86EMUL_MODE_PROT16:
2014 case X86EMUL_MODE_PROT32:
2015 case X86EMUL_MODE_PROT64:
2016 default:
2017 /* Protected mode interrupts unimplemented yet */
2018 return X86EMUL_UNHANDLEABLE;
2019 }
2020}
2021
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002022static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002023{
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002024 int rc = X86EMUL_CONTINUE;
2025 unsigned long temp_eip = 0;
2026 unsigned long temp_eflags = 0;
2027 unsigned long cs = 0;
Nadav Amit0efb0442015-03-29 16:33:03 +03002028 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2029 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2030 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2031 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2032 X86_EFLAGS_AC | X86_EFLAGS_ID |
Wanpeng Li35fd68a2015-04-08 14:08:14 +08002033 X86_EFLAGS_FIXED;
Nadav Amit0efb0442015-03-29 16:33:03 +03002034 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2035 X86_EFLAGS_VIP;
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002036
2037 /* TODO: Add stack limit check */
2038
Avi Kivity9dac77f2011-06-01 15:34:25 +03002039 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002040
2041 if (rc != X86EMUL_CONTINUE)
2042 return rc;
2043
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002044 if (temp_eip & ~0xffff)
2045 return emulate_gp(ctxt, 0);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002046
Avi Kivity9dac77f2011-06-01 15:34:25 +03002047 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002048
2049 if (rc != X86EMUL_CONTINUE)
2050 return rc;
2051
Avi Kivity9dac77f2011-06-01 15:34:25 +03002052 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002053
2054 if (rc != X86EMUL_CONTINUE)
2055 return rc;
2056
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002057 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002058
2059 if (rc != X86EMUL_CONTINUE)
2060 return rc;
2061
Avi Kivity9dac77f2011-06-01 15:34:25 +03002062 ctxt->_eip = temp_eip;
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002063
Avi Kivity9dac77f2011-06-01 15:34:25 +03002064 if (ctxt->op_bytes == 4)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002065 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
Avi Kivity9dac77f2011-06-01 15:34:25 +03002066 else if (ctxt->op_bytes == 2) {
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002067 ctxt->eflags &= ~0xffff;
2068 ctxt->eflags |= temp_eflags;
2069 }
2070
2071 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
Wanpeng Li35fd68a2015-04-08 14:08:14 +08002072 ctxt->eflags |= X86_EFLAGS_FIXED;
Nadav Amit801806d2015-01-26 09:32:23 +02002073 ctxt->ops->set_nmi_mask(ctxt, false);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002074
2075 return rc;
2076}
2077
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002078static int em_iret(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002079{
2080 switch(ctxt->mode) {
2081 case X86EMUL_MODE_REAL:
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002082 return emulate_iret_real(ctxt);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002083 case X86EMUL_MODE_VM86:
2084 case X86EMUL_MODE_PROT16:
2085 case X86EMUL_MODE_PROT32:
2086 case X86EMUL_MODE_PROT64:
2087 default:
2088 /* iret from protected mode unimplemented yet */
2089 return X86EMUL_UNHANDLEABLE;
2090 }
2091}
2092
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002093static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2094{
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002095 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002096 unsigned short sel, old_sel;
2097 struct desc_struct old_desc, new_desc;
2098 const struct x86_emulate_ops *ops = ctxt->ops;
2099 u8 cpl = ctxt->ops->cpl(ctxt);
2100
2101 /* Assignment of RIP may only fail in 64-bit mode */
2102 if (ctxt->mode == X86EMUL_MODE_PROT64)
2103 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2104 VCPU_SREG_CS);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002105
Avi Kivity9dac77f2011-06-01 15:34:25 +03002106 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002107
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002108 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2109 X86_TRANSFER_CALL_JMP,
Nadav Amitd1442d82014-09-18 22:39:39 +03002110 &new_desc);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002111 if (rc != X86EMUL_CONTINUE)
2112 return rc;
2113
Nadav Amitd50eaa12014-11-19 17:43:11 +02002114 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03002115 if (rc != X86EMUL_CONTINUE) {
Nadav Amit7e46ddd2014-10-28 00:03:43 +02002116 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
Nadav Amitd1442d82014-09-18 22:39:39 +03002117 /* assigning eip failed; restore the old cs */
2118 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2119 return rc;
2120 }
2121 return rc;
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002122}
2123
Nadav Amitf7784042014-09-18 22:39:41 +03002124static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002125{
Nadav Amitf7784042014-09-18 22:39:41 +03002126 return assign_eip_near(ctxt, ctxt->src.val);
2127}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002128
Nadav Amitf7784042014-09-18 22:39:41 +03002129static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2130{
2131 int rc;
2132 long int old_eip;
2133
2134 old_eip = ctxt->_eip;
2135 rc = assign_eip_near(ctxt, ctxt->src.val);
2136 if (rc != X86EMUL_CONTINUE)
2137 return rc;
2138 ctxt->src.val = old_eip;
2139 rc = em_push(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09002140 return rc;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002141}
2142
Takuya Yoshikawae0dac402011-12-06 18:07:27 +09002143static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002144{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002145 u64 old = ctxt->dst.orig_val64;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002146
Nadav Amitaaa05f22014-06-02 18:34:10 +03002147 if (ctxt->dst.bytes == 16)
2148 return X86EMUL_UNHANDLEABLE;
2149
Avi Kivitydd856ef2012-08-27 23:46:17 +03002150 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2151 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2152 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2153 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
Nadav Amit0efb0442015-03-29 16:33:03 +03002154 ctxt->eflags &= ~X86_EFLAGS_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002155 } else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03002156 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2157 (u32) reg_read(ctxt, VCPU_REGS_RBX);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002158
Nadav Amit0efb0442015-03-29 16:33:03 +03002159 ctxt->eflags |= X86_EFLAGS_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002160 }
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002161 return X86EMUL_CONTINUE;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002162}
2163
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002164static int em_ret(struct x86_emulate_ctxt *ctxt)
2165{
Nadav Amit234f3ce2014-09-18 22:39:38 +03002166 int rc;
2167 unsigned long eip;
2168
2169 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2170 if (rc != X86EMUL_CONTINUE)
2171 return rc;
2172
2173 return assign_eip_near(ctxt, eip);
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002174}
2175
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002176static int em_ret_far(struct x86_emulate_ctxt *ctxt)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002177{
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002178 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002179 unsigned long eip, cs;
2180 u16 old_cs;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002181 int cpl = ctxt->ops->cpl(ctxt);
Nadav Amitd1442d82014-09-18 22:39:39 +03002182 struct desc_struct old_desc, new_desc;
2183 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002184
Nadav Amitd1442d82014-09-18 22:39:39 +03002185 if (ctxt->mode == X86EMUL_MODE_PROT64)
2186 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2187 VCPU_SREG_CS);
2188
2189 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002190 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002191 return rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002192 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002193 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002194 return rc;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002195 /* Outer-privilege level return is not implemented */
2196 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2197 return X86EMUL_UNHANDLEABLE;
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002198 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2199 X86_TRANSFER_RET,
Nadav Amitd1442d82014-09-18 22:39:39 +03002200 &new_desc);
2201 if (rc != X86EMUL_CONTINUE)
2202 return rc;
Nadav Amitd50eaa12014-11-19 17:43:11 +02002203 rc = assign_eip_far(ctxt, eip, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03002204 if (rc != X86EMUL_CONTINUE) {
Nadav Amit7e46ddd2014-10-28 00:03:43 +02002205 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
Nadav Amitd1442d82014-09-18 22:39:39 +03002206 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2207 }
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002208 return rc;
2209}
2210
Bruce Rogers32611072013-09-09 09:40:20 -06002211static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2212{
2213 int rc;
2214
2215 rc = em_ret_far(ctxt);
2216 if (rc != X86EMUL_CONTINUE)
2217 return rc;
2218 rsp_increment(ctxt, ctxt->src.val);
2219 return X86EMUL_CONTINUE;
2220}
2221
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002222static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2223{
2224 /* Save real source value, then compare EAX against destination. */
Nadav Amit37c564f2014-06-02 18:34:07 +03002225 ctxt->dst.orig_val = ctxt->dst.val;
2226 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002227 ctxt->src.orig_val = ctxt->src.val;
Nadav Amit37c564f2014-06-02 18:34:07 +03002228 ctxt->src.val = ctxt->dst.orig_val;
Avi Kivity158de572013-01-19 19:51:57 +02002229 fastop(ctxt, em_cmp);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002230
Nadav Amit0efb0442015-03-29 16:33:03 +03002231 if (ctxt->eflags & X86_EFLAGS_ZF) {
Nadav Amit2fcf5c82015-01-26 09:32:21 +02002232 /* Success: write back to memory; no update of EAX */
2233 ctxt->src.type = OP_NONE;
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002234 ctxt->dst.val = ctxt->src.orig_val;
2235 } else {
2236 /* Failure: write the value we saw to EAX. */
Nadav Amit2fcf5c82015-01-26 09:32:21 +02002237 ctxt->src.type = OP_REG;
2238 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2239 ctxt->src.val = ctxt->dst.orig_val;
2240 /* Create write-cycle to dest by writing the same value */
Nadav Amit37c564f2014-06-02 18:34:07 +03002241 ctxt->dst.val = ctxt->dst.orig_val;
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002242 }
2243 return X86EMUL_CONTINUE;
2244}
2245
Avi Kivityd4b43252011-09-13 10:45:50 +03002246static int em_lseg(struct x86_emulate_ctxt *ctxt)
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002247{
Avi Kivityd4b43252011-09-13 10:45:50 +03002248 int seg = ctxt->src2.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002249 unsigned short sel;
2250 int rc;
2251
Avi Kivity9dac77f2011-06-01 15:34:25 +03002252 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002253
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002254 rc = load_segment_descriptor(ctxt, sel, seg);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002255 if (rc != X86EMUL_CONTINUE)
2256 return rc;
2257
Avi Kivity9dac77f2011-06-01 15:34:25 +03002258 ctxt->dst.val = ctxt->src.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002259 return rc;
2260}
2261
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002262static void
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002263setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002264 struct desc_struct *cs, struct desc_struct *ss)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002265{
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002266 cs->l = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002267 set_desc_base(cs, 0); /* flat segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002268 cs->g = 1; /* 4kb granularity */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002269 set_desc_limit(cs, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002270 cs->type = 0x0b; /* Read, Execute, Accessed */
2271 cs->s = 1;
2272 cs->dpl = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002273 cs->p = 1;
2274 cs->d = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002275 cs->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002276
Gleb Natapov79168fd2010-04-28 19:15:30 +03002277 set_desc_base(ss, 0); /* flat segment */
2278 set_desc_limit(ss, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002279 ss->g = 1; /* 4kb granularity */
2280 ss->s = 1;
2281 ss->type = 0x03; /* Read/Write, Accessed */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002282 ss->d = 1; /* 32bit stack segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002283 ss->dpl = 0;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002284 ss->p = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002285 ss->l = 0;
2286 ss->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002287}
2288
Avi Kivity1a18a692012-02-01 12:23:21 +02002289static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2290{
2291 u32 eax, ebx, ecx, edx;
2292
2293 eax = ecx = 0;
Avi Kivity0017f932012-06-07 14:10:16 +03002294 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2295 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
Avi Kivity1a18a692012-02-01 12:23:21 +02002296 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2297 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2298}
2299
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002300static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2301{
Mathias Krause0225fb52012-08-30 01:30:16 +02002302 const struct x86_emulate_ops *ops = ctxt->ops;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002303 u32 eax, ebx, ecx, edx;
2304
2305 /*
2306 * syscall should always be enabled in longmode - so only become
2307 * vendor specific (cpuid) if other modes are active...
2308 */
2309 if (ctxt->mode == X86EMUL_MODE_PROT64)
2310 return true;
2311
2312 eax = 0x00000000;
2313 ecx = 0x00000000;
Avi Kivity0017f932012-06-07 14:10:16 +03002314 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2315 /*
2316 * Intel ("GenuineIntel")
2317 * remark: Intel CPUs only support "syscall" in 64bit
2318 * longmode. Also an 64bit guest with a
2319 * 32bit compat-app running will #UD !! While this
2320 * behaviour can be fixed (by emulating) into AMD
2321 * response - CPUs of AMD can't behave like Intel.
2322 */
2323 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2324 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2325 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2326 return false;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002327
Avi Kivity0017f932012-06-07 14:10:16 +03002328 /* AMD ("AuthenticAMD") */
2329 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2330 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2331 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2332 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002333
Avi Kivity0017f932012-06-07 14:10:16 +03002334 /* AMD ("AMDisbetter!") */
2335 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2336 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2337 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2338 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002339
2340 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2341 return false;
2342}
2343
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002344static int em_syscall(struct x86_emulate_ctxt *ctxt)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002345{
Mathias Krause0225fb52012-08-30 01:30:16 +02002346 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002347 struct desc_struct cs, ss;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002348 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002349 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002350 u64 efer = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002351
2352 /* syscall is not available in real mode */
Gleb Natapov2e901c42010-03-18 15:20:12 +02002353 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002354 ctxt->mode == X86EMUL_MODE_VM86)
2355 return emulate_ud(ctxt);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002356
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002357 if (!(em_syscall_is_enabled(ctxt)))
2358 return emulate_ud(ctxt);
2359
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002360 ops->get_msr(ctxt, MSR_EFER, &efer);
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002361 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002362
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002363 if (!(efer & EFER_SCE))
2364 return emulate_ud(ctxt);
2365
Avi Kivity717746e2011-04-20 13:37:53 +03002366 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002367 msr_data >>= 32;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002368 cs_sel = (u16)(msr_data & 0xfffc);
2369 ss_sel = (u16)(msr_data + 8);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002370
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002371 if (efer & EFER_LMA) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002372 cs.d = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002373 cs.l = 1;
2374 }
Avi Kivity1aa36612011-04-27 13:20:30 +03002375 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2376 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002377
Avi Kivitydd856ef2012-08-27 23:46:17 +03002378 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002379 if (efer & EFER_LMA) {
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002380#ifdef CONFIG_X86_64
Nadav Amit6c6cb692014-07-21 14:37:30 +03002381 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002382
Avi Kivity717746e2011-04-20 13:37:53 +03002383 ops->get_msr(ctxt,
Gleb Natapov3fb1b5d2010-04-28 19:15:28 +03002384 ctxt->mode == X86EMUL_MODE_PROT64 ?
2385 MSR_LSTAR : MSR_CSTAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002386 ctxt->_eip = msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002387
Avi Kivity717746e2011-04-20 13:37:53 +03002388 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
Nadav Amit6c6cb692014-07-21 14:37:30 +03002389 ctxt->eflags &= ~msr_data;
Wanpeng Li35fd68a2015-04-08 14:08:14 +08002390 ctxt->eflags |= X86_EFLAGS_FIXED;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002391#endif
2392 } else {
2393 /* legacy mode */
Avi Kivity717746e2011-04-20 13:37:53 +03002394 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002395 ctxt->_eip = (u32)msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002396
Nadav Amit0efb0442015-03-29 16:33:03 +03002397 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002398 }
2399
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002400 return X86EMUL_CONTINUE;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002401}
2402
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002403static int em_sysenter(struct x86_emulate_ctxt *ctxt)
Andre Przywara8c604352009-06-18 12:56:01 +02002404{
Mathias Krause0225fb52012-08-30 01:30:16 +02002405 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002406 struct desc_struct cs, ss;
Andre Przywara8c604352009-06-18 12:56:01 +02002407 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002408 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002409 u64 efer = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002410
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002411 ops->get_msr(ctxt, MSR_EFER, &efer);
Gleb Natapova0044752010-02-10 14:21:31 +02002412 /* inject #GP if in real mode */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002413 if (ctxt->mode == X86EMUL_MODE_REAL)
2414 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002415
Avi Kivity1a18a692012-02-01 12:23:21 +02002416 /*
2417 * Not recognized on AMD in compat mode (but is recognized in legacy
2418 * mode).
2419 */
Nadav Amitf3747372015-01-01 23:11:11 +02002420 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
Avi Kivity1a18a692012-02-01 12:23:21 +02002421 && !vendor_intel(ctxt))
2422 return emulate_ud(ctxt);
2423
Nadav Amitb2c9d432014-11-02 11:55:01 +02002424 /* sysenter/sysexit have not been tested in 64bit mode. */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002425 if (ctxt->mode == X86EMUL_MODE_PROT64)
Nadav Amitb2c9d432014-11-02 11:55:01 +02002426 return X86EMUL_UNHANDLEABLE;
Andre Przywara8c604352009-06-18 12:56:01 +02002427
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002428 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara8c604352009-06-18 12:56:01 +02002429
Avi Kivity717746e2011-04-20 13:37:53 +03002430 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Nadav Amitf3747372015-01-01 23:11:11 +02002431 if ((msr_data & 0xfffc) == 0x0)
2432 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002433
Nadav Amit0efb0442015-03-29 16:33:03 +03002434 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
Nadav Amitb32a9912015-03-29 16:33:04 +03002435 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002436 ss_sel = cs_sel + 8;
Nadav Amitf3747372015-01-01 23:11:11 +02002437 if (efer & EFER_LMA) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002438 cs.d = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002439 cs.l = 1;
2440 }
2441
Avi Kivity1aa36612011-04-27 13:20:30 +03002442 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2443 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara8c604352009-06-18 12:56:01 +02002444
Avi Kivity717746e2011-04-20 13:37:53 +03002445 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
Nadav Amitf3747372015-01-01 23:11:11 +02002446 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002447
Avi Kivity717746e2011-04-20 13:37:53 +03002448 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
Nadav Amitf3747372015-01-01 23:11:11 +02002449 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2450 (u32)msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002451
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002452 return X86EMUL_CONTINUE;
Andre Przywara8c604352009-06-18 12:56:01 +02002453}
2454
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002455static int em_sysexit(struct x86_emulate_ctxt *ctxt)
Andre Przywara4668f052009-06-18 12:56:02 +02002456{
Mathias Krause0225fb52012-08-30 01:30:16 +02002457 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002458 struct desc_struct cs, ss;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002459 u64 msr_data, rcx, rdx;
Andre Przywara4668f052009-06-18 12:56:02 +02002460 int usermode;
Xiao Guangrong1249b962011-05-15 23:25:10 +08002461 u16 cs_sel = 0, ss_sel = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002462
Gleb Natapova0044752010-02-10 14:21:31 +02002463 /* inject #GP if in real mode or Virtual 8086 mode */
2464 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002465 ctxt->mode == X86EMUL_MODE_VM86)
2466 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002467
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002468 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara4668f052009-06-18 12:56:02 +02002469
Avi Kivity9dac77f2011-06-01 15:34:25 +03002470 if ((ctxt->rex_prefix & 0x8) != 0x0)
Andre Przywara4668f052009-06-18 12:56:02 +02002471 usermode = X86EMUL_MODE_PROT64;
2472 else
2473 usermode = X86EMUL_MODE_PROT32;
2474
Nadav Amit234f3ce2014-09-18 22:39:38 +03002475 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2476 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2477
Andre Przywara4668f052009-06-18 12:56:02 +02002478 cs.dpl = 3;
2479 ss.dpl = 3;
Avi Kivity717746e2011-04-20 13:37:53 +03002480 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara4668f052009-06-18 12:56:02 +02002481 switch (usermode) {
2482 case X86EMUL_MODE_PROT32:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002483 cs_sel = (u16)(msr_data + 16);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002484 if ((msr_data & 0xfffc) == 0x0)
2485 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002486 ss_sel = (u16)(msr_data + 24);
Nadav Amitbf0b6822014-09-18 22:39:45 +03002487 rcx = (u32)rcx;
2488 rdx = (u32)rdx;
Andre Przywara4668f052009-06-18 12:56:02 +02002489 break;
2490 case X86EMUL_MODE_PROT64:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002491 cs_sel = (u16)(msr_data + 32);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002492 if (msr_data == 0x0)
2493 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002494 ss_sel = cs_sel + 8;
2495 cs.d = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002496 cs.l = 1;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002497 if (is_noncanonical_address(rcx) ||
2498 is_noncanonical_address(rdx))
2499 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002500 break;
2501 }
Nadav Amitb32a9912015-03-29 16:33:04 +03002502 cs_sel |= SEGMENT_RPL_MASK;
2503 ss_sel |= SEGMENT_RPL_MASK;
Andre Przywara4668f052009-06-18 12:56:02 +02002504
Avi Kivity1aa36612011-04-27 13:20:30 +03002505 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2506 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara4668f052009-06-18 12:56:02 +02002507
Nadav Amit234f3ce2014-09-18 22:39:38 +03002508 ctxt->_eip = rdx;
2509 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
Andre Przywara4668f052009-06-18 12:56:02 +02002510
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002511 return X86EMUL_CONTINUE;
Andre Przywara4668f052009-06-18 12:56:02 +02002512}
2513
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002514static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002515{
2516 int iopl;
2517 if (ctxt->mode == X86EMUL_MODE_REAL)
2518 return false;
2519 if (ctxt->mode == X86EMUL_MODE_VM86)
2520 return true;
Nadav Amit0efb0442015-03-29 16:33:03 +03002521 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002522 return ctxt->ops->cpl(ctxt) > iopl;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002523}
2524
2525static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002526 u16 port, u16 len)
2527{
Mathias Krause0225fb52012-08-30 01:30:16 +02002528 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002529 struct desc_struct tr_seg;
Gleb Natapov5601d052011-03-07 14:55:06 +02002530 u32 base3;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002531 int r;
Avi Kivity1aa36612011-04-27 13:20:30 +03002532 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002533 unsigned mask = (1 << len) - 1;
Gleb Natapov5601d052011-03-07 14:55:06 +02002534 unsigned long base;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002535
Avi Kivity1aa36612011-04-27 13:20:30 +03002536 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002537 if (!tr_seg.p)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002538 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002539 if (desc_limit_scaled(&tr_seg) < 103)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002540 return false;
Gleb Natapov5601d052011-03-07 14:55:06 +02002541 base = get_desc_base(&tr_seg);
2542#ifdef CONFIG_X86_64
2543 base |= ((u64)base3) << 32;
2544#endif
Avi Kivity0f65dd72011-04-20 13:37:53 +03002545 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002546 if (r != X86EMUL_CONTINUE)
2547 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002548 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002549 return false;
Avi Kivity0f65dd72011-04-20 13:37:53 +03002550 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002551 if (r != X86EMUL_CONTINUE)
2552 return false;
2553 if ((perm >> bit_idx) & mask)
2554 return false;
2555 return true;
2556}
2557
2558static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002559 u16 port, u16 len)
2560{
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002561 if (ctxt->perm_ok)
2562 return true;
2563
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002564 if (emulator_bad_iopl(ctxt))
2565 if (!emulator_io_port_access_allowed(ctxt, port, len))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002566 return false;
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002567
2568 ctxt->perm_ok = true;
2569
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002570 return true;
2571}
2572
Nadav Amit428e3d02015-04-28 13:06:01 +03002573static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2574{
2575 /*
2576 * Intel CPUs mask the counter and pointers in quite strange
2577 * manner when ECX is zero due to REP-string optimizations.
2578 */
2579#ifdef CONFIG_X86_64
2580 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2581 return;
2582
2583 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2584
2585 switch (ctxt->b) {
2586 case 0xa4: /* movsb */
2587 case 0xa5: /* movsd/w */
2588 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2589 /* fall through */
2590 case 0xaa: /* stosb */
2591 case 0xab: /* stosd/w */
2592 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2593 }
2594#endif
2595}
2596
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002597static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002598 struct tss_segment_16 *tss)
2599{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002600 tss->ip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002601 tss->flag = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002602 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2603 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2604 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2605 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2606 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2607 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2608 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2609 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002610
Avi Kivity1aa36612011-04-27 13:20:30 +03002611 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2612 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2613 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2614 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2615 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002616}
2617
2618static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002619 struct tss_segment_16 *tss)
2620{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002621 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002622 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002623
Avi Kivity9dac77f2011-06-01 15:34:25 +03002624 ctxt->_eip = tss->ip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002625 ctxt->eflags = tss->flag | 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002626 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2627 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2628 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2629 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2630 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2631 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2632 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2633 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002634
2635 /*
2636 * SDM says that segment selectors are loaded before segment
2637 * descriptors
2638 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002639 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2640 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2641 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2642 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2643 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002644
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002645 cpl = tss->cs & 3;
2646
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002647 /*
Guo Chaofc058682012-06-28 15:19:51 +08002648 * Now load segment descriptors. If fault happens at this stage
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002649 * it is handled in a context of new task
2650 */
Nadav Amitd1442d82014-09-18 22:39:39 +03002651 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002652 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002653 if (ret != X86EMUL_CONTINUE)
2654 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002655 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002656 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002657 if (ret != X86EMUL_CONTINUE)
2658 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002659 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002660 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002661 if (ret != X86EMUL_CONTINUE)
2662 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002663 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002664 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002665 if (ret != X86EMUL_CONTINUE)
2666 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002667 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002668 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002669 if (ret != X86EMUL_CONTINUE)
2670 return ret;
2671
2672 return X86EMUL_CONTINUE;
2673}
2674
2675static int task_switch_16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002676 u16 tss_selector, u16 old_tss_sel,
2677 ulong old_tss_base, struct desc_struct *new_desc)
2678{
Mathias Krause0225fb52012-08-30 01:30:16 +02002679 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002680 struct tss_segment_16 tss_seg;
2681 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002682 u32 new_tss_base = get_desc_base(new_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002683
Avi Kivity0f65dd72011-04-20 13:37:53 +03002684 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002685 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002686 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002687 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002688
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002689 save_state_to_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002690
Avi Kivity0f65dd72011-04-20 13:37:53 +03002691 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002692 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002693 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002694 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002695
Avi Kivity0f65dd72011-04-20 13:37:53 +03002696 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002697 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002698 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002699 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002700
2701 if (old_tss_sel != 0xffff) {
2702 tss_seg.prev_task_link = old_tss_sel;
2703
Avi Kivity0f65dd72011-04-20 13:37:53 +03002704 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002705 &tss_seg.prev_task_link,
2706 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002707 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002708 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002709 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002710 }
2711
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002712 return load_state_from_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002713}
2714
2715static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002716 struct tss_segment_32 *tss)
2717{
Nadav Amit5c7411e2014-04-07 18:37:47 +03002718 /* CR3 and ldt selector are not saved intentionally */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002719 tss->eip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002720 tss->eflags = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002721 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2722 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2723 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2724 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2725 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2726 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2727 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2728 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002729
Avi Kivity1aa36612011-04-27 13:20:30 +03002730 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2731 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2732 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2733 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2734 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2735 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002736}
2737
2738static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002739 struct tss_segment_32 *tss)
2740{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002741 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002742 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002743
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002744 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002745 return emulate_gp(ctxt, 0);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002746 ctxt->_eip = tss->eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002747 ctxt->eflags = tss->eflags | 2;
Kevin Wolf4cee4792012-02-08 14:34:41 +01002748
2749 /* General purpose registers */
Avi Kivitydd856ef2012-08-27 23:46:17 +03002750 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2751 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2752 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2753 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2754 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2755 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2756 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2757 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002758
2759 /*
2760 * SDM says that segment selectors are loaded before segment
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002761 * descriptors. This is important because CPL checks will
2762 * use CS.RPL.
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002763 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002764 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2765 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2766 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2767 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2768 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2769 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2770 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002771
2772 /*
Kevin Wolf4cee4792012-02-08 14:34:41 +01002773 * If we're switching between Protected Mode and VM86, we need to make
2774 * sure to update the mode before loading the segment descriptors so
2775 * that the selectors are interpreted correctly.
Kevin Wolf4cee4792012-02-08 14:34:41 +01002776 */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002777 if (ctxt->eflags & X86_EFLAGS_VM) {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002778 ctxt->mode = X86EMUL_MODE_VM86;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002779 cpl = 3;
2780 } else {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002781 ctxt->mode = X86EMUL_MODE_PROT32;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002782 cpl = tss->cs & 3;
2783 }
Kevin Wolf4cee4792012-02-08 14:34:41 +01002784
2785 /*
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002786 * Now load segment descriptors. If fault happenes at this stage
2787 * it is handled in a context of new task
2788 */
Nadav Amitd1442d82014-09-18 22:39:39 +03002789 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002790 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002791 if (ret != X86EMUL_CONTINUE)
2792 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002793 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002794 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002795 if (ret != X86EMUL_CONTINUE)
2796 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002797 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002798 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002799 if (ret != X86EMUL_CONTINUE)
2800 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002801 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002802 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002803 if (ret != X86EMUL_CONTINUE)
2804 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002805 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002806 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002807 if (ret != X86EMUL_CONTINUE)
2808 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002809 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002810 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002811 if (ret != X86EMUL_CONTINUE)
2812 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002813 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002814 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002815
Eugene Korenevsky2f729b12015-03-29 01:27:17 +03002816 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002817}
2818
2819static int task_switch_32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002820 u16 tss_selector, u16 old_tss_sel,
2821 ulong old_tss_base, struct desc_struct *new_desc)
2822{
Mathias Krause0225fb52012-08-30 01:30:16 +02002823 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002824 struct tss_segment_32 tss_seg;
2825 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002826 u32 new_tss_base = get_desc_base(new_desc);
Nadav Amit5c7411e2014-04-07 18:37:47 +03002827 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2828 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002829
Avi Kivity0f65dd72011-04-20 13:37:53 +03002830 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002831 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002832 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002833 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002834
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002835 save_state_to_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002836
Nadav Amit5c7411e2014-04-07 18:37:47 +03002837 /* Only GP registers and segment selectors are saved */
2838 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2839 ldt_sel_offset - eip_offset, &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002840 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002841 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002842
Avi Kivity0f65dd72011-04-20 13:37:53 +03002843 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002844 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002845 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002846 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002847
2848 if (old_tss_sel != 0xffff) {
2849 tss_seg.prev_task_link = old_tss_sel;
2850
Avi Kivity0f65dd72011-04-20 13:37:53 +03002851 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002852 &tss_seg.prev_task_link,
2853 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002854 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002855 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002856 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002857 }
2858
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002859 return load_state_from_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002860}
2861
2862static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002863 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002864 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002865{
Mathias Krause0225fb52012-08-30 01:30:16 +02002866 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002867 struct desc_struct curr_tss_desc, next_tss_desc;
2868 int ret;
Avi Kivity1aa36612011-04-27 13:20:30 +03002869 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002870 ulong old_tss_base =
Avi Kivity4bff1e862011-04-20 13:37:53 +03002871 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
Gleb Natapovceffb452010-03-18 15:20:19 +02002872 u32 desc_limit;
Nadav Amit3db176d2015-04-19 21:12:59 +03002873 ulong desc_addr, dr7;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002874
2875 /* FIXME: old_tss_base == ~0 ? */
2876
Avi Kivitye9194642012-06-13 16:29:39 +03002877 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002878 if (ret != X86EMUL_CONTINUE)
2879 return ret;
Avi Kivitye9194642012-06-13 16:29:39 +03002880 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002881 if (ret != X86EMUL_CONTINUE)
2882 return ret;
2883
2884 /* FIXME: check that next_tss_desc is tss */
2885
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002886 /*
2887 * Check privileges. The three cases are task switch caused by...
2888 *
2889 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2890 * 2. Exception/IRQ/iret: No check is performed
Nadav Amit2c2ca2d2014-11-02 11:54:57 +02002891 * 3. jmp/call to TSS/task-gate: No check is performed since the
2892 * hardware checks it before exiting.
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002893 */
2894 if (reason == TASK_SWITCH_GATE) {
2895 if (idt_index != -1) {
2896 /* Software interrupts */
2897 struct desc_struct task_gate_desc;
2898 int dpl;
2899
2900 ret = read_interrupt_descriptor(ctxt, idt_index,
2901 &task_gate_desc);
2902 if (ret != X86EMUL_CONTINUE)
2903 return ret;
2904
2905 dpl = task_gate_desc.dpl;
2906 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2907 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2908 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002909 }
2910
Gleb Natapovceffb452010-03-18 15:20:19 +02002911 desc_limit = desc_limit_scaled(&next_tss_desc);
2912 if (!next_tss_desc.p ||
2913 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2914 desc_limit < 0x2b)) {
Paolo Bonzini592f0852014-08-20 10:05:08 +02002915 return emulate_ts(ctxt, tss_selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002916 }
2917
2918 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2919 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002920 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002921 }
2922
2923 if (reason == TASK_SWITCH_IRET)
2924 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2925
2926 /* set back link to prev task only if NT bit is set in eflags
Guo Chaofc058682012-06-28 15:19:51 +08002927 note that old_tss_sel is not used after this point */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002928 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2929 old_tss_sel = 0xffff;
2930
2931 if (next_tss_desc.type & 8)
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002932 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002933 old_tss_base, &next_tss_desc);
2934 else
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002935 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002936 old_tss_base, &next_tss_desc);
Jan Kiszka0760d442010-04-14 15:50:57 +02002937 if (ret != X86EMUL_CONTINUE)
2938 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002939
2940 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2941 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2942
2943 if (reason != TASK_SWITCH_IRET) {
2944 next_tss_desc.type |= (1 << 1); /* set busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002945 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002946 }
2947
Avi Kivity717746e2011-04-20 13:37:53 +03002948 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
Avi Kivity1aa36612011-04-27 13:20:30 +03002949 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002950
Jan Kiszkae269fb22010-04-14 15:51:09 +02002951 if (has_error_code) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002952 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2953 ctxt->lock_prefix = 0;
2954 ctxt->src.val = (unsigned long) error_code;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09002955 ret = em_push(ctxt);
Jan Kiszkae269fb22010-04-14 15:51:09 +02002956 }
2957
Nadav Amit3db176d2015-04-19 21:12:59 +03002958 ops->get_dr(ctxt, 7, &dr7);
2959 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
2960
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002961 return ret;
2962}
2963
2964int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002965 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002966 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002967{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002968 int rc;
2969
Avi Kivitydd856ef2012-08-27 23:46:17 +03002970 invalidate_registers(ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002971 ctxt->_eip = ctxt->eip;
2972 ctxt->dst.type = OP_NONE;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002973
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002974 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002975 has_error_code, error_code);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002976
Avi Kivitydd856ef2012-08-27 23:46:17 +03002977 if (rc == X86EMUL_CONTINUE) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002978 ctxt->eip = ctxt->_eip;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002979 writeback_registers(ctxt);
2980 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002981
Gleb Natapova0c0ab22011-03-28 16:57:49 +02002982 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002983}
2984
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03002985static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2986 struct operand *op)
Gleb Natapova682e352010-03-18 15:20:21 +02002987{
Nadav Amit0efb0442015-03-29 16:33:03 +03002988 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
Gleb Natapova682e352010-03-18 15:20:21 +02002989
Paolo Bonzini01485a22014-11-19 18:25:08 +01002990 register_address_increment(ctxt, reg, df * op->bytes);
2991 op->addr.mem.ea = register_address(ctxt, reg);
Gleb Natapova682e352010-03-18 15:20:21 +02002992}
2993
Avi Kivity7af04fc2010-08-18 14:16:35 +03002994static int em_das(struct x86_emulate_ctxt *ctxt)
2995{
Avi Kivity7af04fc2010-08-18 14:16:35 +03002996 u8 al, old_al;
2997 bool af, cf, old_cf;
2998
2999 cf = ctxt->eflags & X86_EFLAGS_CF;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003000 al = ctxt->dst.val;
Avi Kivity7af04fc2010-08-18 14:16:35 +03003001
3002 old_al = al;
3003 old_cf = cf;
3004 cf = false;
3005 af = ctxt->eflags & X86_EFLAGS_AF;
3006 if ((al & 0x0f) > 9 || af) {
3007 al -= 6;
3008 cf = old_cf | (al >= 250);
3009 af = true;
3010 } else {
3011 af = false;
3012 }
3013 if (old_al > 0x99 || old_cf) {
3014 al -= 0x60;
3015 cf = true;
3016 }
3017
Avi Kivity9dac77f2011-06-01 15:34:25 +03003018 ctxt->dst.val = al;
Avi Kivity7af04fc2010-08-18 14:16:35 +03003019 /* Set PF, ZF, SF */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003020 ctxt->src.type = OP_IMM;
3021 ctxt->src.val = 0;
3022 ctxt->src.bytes = 1;
Avi Kivity158de572013-01-19 19:51:57 +02003023 fastop(ctxt, em_or);
Avi Kivity7af04fc2010-08-18 14:16:35 +03003024 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3025 if (cf)
3026 ctxt->eflags |= X86_EFLAGS_CF;
3027 if (af)
3028 ctxt->eflags |= X86_EFLAGS_AF;
3029 return X86EMUL_CONTINUE;
3030}
3031
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02003032static int em_aam(struct x86_emulate_ctxt *ctxt)
3033{
3034 u8 al, ah;
3035
3036 if (ctxt->src.val == 0)
3037 return emulate_de(ctxt);
3038
3039 al = ctxt->dst.val & 0xff;
3040 ah = al / ctxt->src.val;
3041 al %= ctxt->src.val;
3042
3043 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3044
3045 /* Set PF, ZF, SF */
3046 ctxt->src.type = OP_IMM;
3047 ctxt->src.val = 0;
3048 ctxt->src.bytes = 1;
3049 fastop(ctxt, em_or);
3050
3051 return X86EMUL_CONTINUE;
3052}
3053
Gleb Natapov7f662272012-12-10 11:42:30 +02003054static int em_aad(struct x86_emulate_ctxt *ctxt)
3055{
3056 u8 al = ctxt->dst.val & 0xff;
3057 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3058
3059 al = (al + (ah * ctxt->src.val)) & 0xff;
3060
3061 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3062
Gleb Natapovf583c292013-02-13 17:50:39 +02003063 /* Set PF, ZF, SF */
3064 ctxt->src.type = OP_IMM;
3065 ctxt->src.val = 0;
3066 ctxt->src.bytes = 1;
3067 fastop(ctxt, em_or);
Gleb Natapov7f662272012-12-10 11:42:30 +02003068
3069 return X86EMUL_CONTINUE;
3070}
3071
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003072static int em_call(struct x86_emulate_ctxt *ctxt)
3073{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003074 int rc;
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003075 long rel = ctxt->src.val;
3076
3077 ctxt->src.val = (unsigned long)ctxt->_eip;
Nadav Amit234f3ce2014-09-18 22:39:38 +03003078 rc = jmp_rel(ctxt, rel);
3079 if (rc != X86EMUL_CONTINUE)
3080 return rc;
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003081 return em_push(ctxt);
3082}
3083
Avi Kivity0ef753b2010-08-18 14:51:45 +03003084static int em_call_far(struct x86_emulate_ctxt *ctxt)
3085{
Avi Kivity0ef753b2010-08-18 14:51:45 +03003086 u16 sel, old_cs;
3087 ulong old_eip;
3088 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03003089 struct desc_struct old_desc, new_desc;
3090 const struct x86_emulate_ops *ops = ctxt->ops;
3091 int cpl = ctxt->ops->cpl(ctxt);
Nadav Amit82268082015-01-26 09:32:27 +02003092 enum x86emul_mode prev_mode = ctxt->mode;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003093
Avi Kivity9dac77f2011-06-01 15:34:25 +03003094 old_eip = ctxt->_eip;
Nadav Amitd1442d82014-09-18 22:39:39 +03003095 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003096
Avi Kivity9dac77f2011-06-01 15:34:25 +03003097 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Nadav Amit3dc4bc42014-12-25 02:52:19 +02003098 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3099 X86_TRANSFER_CALL_JMP, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03003100 if (rc != X86EMUL_CONTINUE)
Nadav Amit80976db2014-12-25 02:52:20 +02003101 return rc;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003102
Nadav Amitd50eaa12014-11-19 17:43:11 +02003103 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03003104 if (rc != X86EMUL_CONTINUE)
3105 goto fail;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003106
Avi Kivity9dac77f2011-06-01 15:34:25 +03003107 ctxt->src.val = old_cs;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09003108 rc = em_push(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003109 if (rc != X86EMUL_CONTINUE)
Nadav Amitd1442d82014-09-18 22:39:39 +03003110 goto fail;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003111
Avi Kivity9dac77f2011-06-01 15:34:25 +03003112 ctxt->src.val = old_eip;
Nadav Amitd1442d82014-09-18 22:39:39 +03003113 rc = em_push(ctxt);
3114 /* If we failed, we tainted the memory, but the very least we should
3115 restore cs */
Nadav Amit82268082015-01-26 09:32:27 +02003116 if (rc != X86EMUL_CONTINUE) {
3117 pr_warn_once("faulting far call emulation tainted memory\n");
Nadav Amitd1442d82014-09-18 22:39:39 +03003118 goto fail;
Nadav Amit82268082015-01-26 09:32:27 +02003119 }
Nadav Amitd1442d82014-09-18 22:39:39 +03003120 return rc;
3121fail:
3122 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
Nadav Amit82268082015-01-26 09:32:27 +02003123 ctxt->mode = prev_mode;
Nadav Amitd1442d82014-09-18 22:39:39 +03003124 return rc;
3125
Avi Kivity0ef753b2010-08-18 14:51:45 +03003126}
3127
Avi Kivity40ece7c2010-08-18 15:12:09 +03003128static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3129{
Avi Kivity40ece7c2010-08-18 15:12:09 +03003130 int rc;
Nadav Amit234f3ce2014-09-18 22:39:38 +03003131 unsigned long eip;
Avi Kivity40ece7c2010-08-18 15:12:09 +03003132
Nadav Amit234f3ce2014-09-18 22:39:38 +03003133 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3134 if (rc != X86EMUL_CONTINUE)
3135 return rc;
3136 rc = assign_eip_near(ctxt, eip);
Avi Kivity40ece7c2010-08-18 15:12:09 +03003137 if (rc != X86EMUL_CONTINUE)
3138 return rc;
Avi Kivity5ad105e2012-08-19 14:34:31 +03003139 rsp_increment(ctxt, ctxt->src.val);
Avi Kivity40ece7c2010-08-18 15:12:09 +03003140 return X86EMUL_CONTINUE;
3141}
3142
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003143static int em_xchg(struct x86_emulate_ctxt *ctxt)
3144{
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003145 /* Write back the register source. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003146 ctxt->src.val = ctxt->dst.val;
3147 write_register_operand(&ctxt->src);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003148
3149 /* Write back the memory destination with implicit LOCK prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003150 ctxt->dst.val = ctxt->src.orig_val;
3151 ctxt->lock_prefix = 1;
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003152 return X86EMUL_CONTINUE;
3153}
3154
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003155static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3156{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003157 ctxt->dst.val = ctxt->src2.val;
Avi Kivity4d758342013-01-19 19:51:55 +02003158 return fastop(ctxt, em_imul);
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003159}
3160
Avi Kivity61429142010-08-19 15:13:00 +03003161static int em_cwd(struct x86_emulate_ctxt *ctxt)
3162{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003163 ctxt->dst.type = OP_REG;
3164 ctxt->dst.bytes = ctxt->src.bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03003165 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivity9dac77f2011-06-01 15:34:25 +03003166 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
Avi Kivity61429142010-08-19 15:13:00 +03003167
3168 return X86EMUL_CONTINUE;
3169}
3170
Avi Kivity48bb5d32010-08-18 18:54:34 +03003171static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3172{
Avi Kivity48bb5d32010-08-18 18:54:34 +03003173 u64 tsc = 0;
3174
Avi Kivity717746e2011-04-20 13:37:53 +03003175 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003176 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3177 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
Avi Kivity48bb5d32010-08-18 18:54:34 +03003178 return X86EMUL_CONTINUE;
3179}
3180
Avi Kivity222d21a2011-11-10 14:57:30 +02003181static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3182{
3183 u64 pmc;
3184
Avi Kivitydd856ef2012-08-27 23:46:17 +03003185 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
Avi Kivity222d21a2011-11-10 14:57:30 +02003186 return emulate_gp(ctxt, 0);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003187 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3188 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
Avi Kivity222d21a2011-11-10 14:57:30 +02003189 return X86EMUL_CONTINUE;
3190}
3191
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003192static int em_mov(struct x86_emulate_ctxt *ctxt)
3193{
Paolo Bonzini54cfdb32014-03-27 11:36:25 +01003194 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003195 return X86EMUL_CONTINUE;
3196}
3197
Borislav Petkov84cffe42013-10-29 12:54:56 +01003198#define FFL(x) bit(X86_FEATURE_##x)
3199
3200static int em_movbe(struct x86_emulate_ctxt *ctxt)
3201{
3202 u32 ebx, ecx, edx, eax = 1;
3203 u16 tmp;
3204
3205 /*
3206 * Check MOVBE is set in the guest-visible CPUID leaf.
3207 */
3208 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3209 if (!(ecx & FFL(MOVBE)))
3210 return emulate_ud(ctxt);
3211
3212 switch (ctxt->op_bytes) {
3213 case 2:
3214 /*
3215 * From MOVBE definition: "...When the operand size is 16 bits,
3216 * the upper word of the destination register remains unchanged
3217 * ..."
3218 *
3219 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3220 * rules so we have to do the operation almost per hand.
3221 */
3222 tmp = (u16)ctxt->src.val;
3223 ctxt->dst.val &= ~0xffffUL;
3224 ctxt->dst.val |= (unsigned long)swab16(tmp);
3225 break;
3226 case 4:
3227 ctxt->dst.val = swab32((u32)ctxt->src.val);
3228 break;
3229 case 8:
3230 ctxt->dst.val = swab64(ctxt->src.val);
3231 break;
3232 default:
Paolo Bonzini592f0852014-08-20 10:05:08 +02003233 BUG();
Borislav Petkov84cffe42013-10-29 12:54:56 +01003234 }
3235 return X86EMUL_CONTINUE;
3236}
3237
Takuya Yoshikawabc00f8d2011-11-22 15:19:19 +09003238static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3239{
3240 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3241 return emulate_gp(ctxt, 0);
3242
3243 /* Disable writeback. */
3244 ctxt->dst.type = OP_NONE;
3245 return X86EMUL_CONTINUE;
3246}
3247
3248static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3249{
3250 unsigned long val;
3251
3252 if (ctxt->mode == X86EMUL_MODE_PROT64)
3253 val = ctxt->src.val & ~0ULL;
3254 else
3255 val = ctxt->src.val & ~0U;
3256
3257 /* #UD condition is already handled. */
3258 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3259 return emulate_gp(ctxt, 0);
3260
3261 /* Disable writeback. */
3262 ctxt->dst.type = OP_NONE;
3263 return X86EMUL_CONTINUE;
3264}
3265
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003266static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3267{
3268 u64 msr_data;
3269
Avi Kivitydd856ef2012-08-27 23:46:17 +03003270 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3271 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3272 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003273 return emulate_gp(ctxt, 0);
3274
3275 return X86EMUL_CONTINUE;
3276}
3277
3278static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3279{
3280 u64 msr_data;
3281
Avi Kivitydd856ef2012-08-27 23:46:17 +03003282 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003283 return emulate_gp(ctxt, 0);
3284
Avi Kivitydd856ef2012-08-27 23:46:17 +03003285 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3286 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003287 return X86EMUL_CONTINUE;
3288}
3289
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003290static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3291{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003292 if (ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003293 return emulate_ud(ctxt);
3294
Avi Kivity9dac77f2011-06-01 15:34:25 +03003295 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
Nadav Amitb5bbf102014-11-02 11:54:46 +02003296 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3297 ctxt->dst.bytes = 2;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003298 return X86EMUL_CONTINUE;
3299}
3300
3301static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3302{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003303 u16 sel = ctxt->src.val;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003304
Avi Kivity9dac77f2011-06-01 15:34:25 +03003305 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003306 return emulate_ud(ctxt);
3307
Avi Kivity9dac77f2011-06-01 15:34:25 +03003308 if (ctxt->modrm_reg == VCPU_SREG_SS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003309 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3310
3311 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003312 ctxt->dst.type = OP_NONE;
3313 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003314}
3315
Avi Kivitya14e5792012-06-13 12:28:33 +03003316static int em_lldt(struct x86_emulate_ctxt *ctxt)
3317{
3318 u16 sel = ctxt->src.val;
3319
3320 /* Disable writeback. */
3321 ctxt->dst.type = OP_NONE;
3322 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3323}
3324
Avi Kivity80890002012-06-13 16:33:29 +03003325static int em_ltr(struct x86_emulate_ctxt *ctxt)
3326{
3327 u16 sel = ctxt->src.val;
3328
3329 /* Disable writeback. */
3330 ctxt->dst.type = OP_NONE;
3331 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3332}
3333
Avi Kivity38503912011-03-31 18:48:09 +02003334static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3335{
Avi Kivity9fa088f2011-03-31 18:54:30 +02003336 int rc;
3337 ulong linear;
3338
Avi Kivity9dac77f2011-06-01 15:34:25 +03003339 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02003340 if (rc == X86EMUL_CONTINUE)
Avi Kivity3cb16fe2011-04-20 15:38:44 +03003341 ctxt->ops->invlpg(ctxt, linear);
Avi Kivity38503912011-03-31 18:48:09 +02003342 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003343 ctxt->dst.type = OP_NONE;
Avi Kivity38503912011-03-31 18:48:09 +02003344 return X86EMUL_CONTINUE;
3345}
3346
Avi Kivity2d04a052011-04-20 15:32:49 +03003347static int em_clts(struct x86_emulate_ctxt *ctxt)
3348{
3349 ulong cr0;
3350
3351 cr0 = ctxt->ops->get_cr(ctxt, 0);
3352 cr0 &= ~X86_CR0_TS;
3353 ctxt->ops->set_cr(ctxt, 0, cr0);
3354 return X86EMUL_CONTINUE;
3355}
3356
Jan Kiszkab34a8052015-03-09 20:27:43 +01003357static int em_hypercall(struct x86_emulate_ctxt *ctxt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003358{
Nadav Amit0f54a322014-08-29 11:26:55 +03003359 int rc = ctxt->ops->fix_hypercall(ctxt);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003360
Avi Kivity26d05cc2011-04-21 12:07:59 +03003361 if (rc != X86EMUL_CONTINUE)
3362 return rc;
3363
3364 /* Let the processor re-execute the fixed hypercall */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003365 ctxt->_eip = ctxt->eip;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003366 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003367 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003368 return X86EMUL_CONTINUE;
3369}
3370
Avi Kivity96051572012-06-10 17:21:18 +03003371static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3372 void (*get)(struct x86_emulate_ctxt *ctxt,
3373 struct desc_ptr *ptr))
3374{
3375 struct desc_ptr desc_ptr;
3376
3377 if (ctxt->mode == X86EMUL_MODE_PROT64)
3378 ctxt->op_bytes = 8;
3379 get(ctxt, &desc_ptr);
3380 if (ctxt->op_bytes == 2) {
3381 ctxt->op_bytes = 4;
3382 desc_ptr.address &= 0x00ffffff;
3383 }
3384 /* Disable writeback. */
3385 ctxt->dst.type = OP_NONE;
3386 return segmented_write(ctxt, ctxt->dst.addr.mem,
3387 &desc_ptr, 2 + ctxt->op_bytes);
3388}
3389
3390static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3391{
3392 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3393}
3394
3395static int em_sidt(struct x86_emulate_ctxt *ctxt)
3396{
3397 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3398}
3399
Nadav Amit5b7f6a12014-11-02 11:54:55 +02003400static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003401{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003402 struct desc_ptr desc_ptr;
3403 int rc;
3404
Avi Kivity510425f2012-06-07 17:04:36 +03003405 if (ctxt->mode == X86EMUL_MODE_PROT64)
3406 ctxt->op_bytes = 8;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003407 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
Avi Kivity26d05cc2011-04-21 12:07:59 +03003408 &desc_ptr.size, &desc_ptr.address,
Avi Kivity9dac77f2011-06-01 15:34:25 +03003409 ctxt->op_bytes);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003410 if (rc != X86EMUL_CONTINUE)
3411 return rc;
Nadav Amit9a9abf62014-11-02 11:54:56 +02003412 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3413 is_noncanonical_address(desc_ptr.address))
3414 return emulate_gp(ctxt, 0);
Nadav Amit5b7f6a12014-11-02 11:54:55 +02003415 if (lgdt)
3416 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3417 else
3418 ctxt->ops->set_idt(ctxt, &desc_ptr);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003419 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003420 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003421 return X86EMUL_CONTINUE;
3422}
3423
Nadav Amit5b7f6a12014-11-02 11:54:55 +02003424static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3425{
3426 return em_lgdt_lidt(ctxt, true);
3427}
3428
Avi Kivity26d05cc2011-04-21 12:07:59 +03003429static int em_lidt(struct x86_emulate_ctxt *ctxt)
3430{
Nadav Amit5b7f6a12014-11-02 11:54:55 +02003431 return em_lgdt_lidt(ctxt, false);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003432}
3433
3434static int em_smsw(struct x86_emulate_ctxt *ctxt)
3435{
Nadav Amit32e94d02014-06-02 18:34:11 +03003436 if (ctxt->dst.type == OP_MEM)
3437 ctxt->dst.bytes = 2;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003438 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003439 return X86EMUL_CONTINUE;
3440}
3441
3442static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3443{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003444 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
Avi Kivity9dac77f2011-06-01 15:34:25 +03003445 | (ctxt->src.val & 0x0f));
3446 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003447 return X86EMUL_CONTINUE;
3448}
3449
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003450static int em_loop(struct x86_emulate_ctxt *ctxt)
3451{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003452 int rc = X86EMUL_CONTINUE;
3453
Paolo Bonzini01485a22014-11-19 18:25:08 +01003454 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003455 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
Avi Kivity9dac77f2011-06-01 15:34:25 +03003456 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
Nadav Amit234f3ce2014-09-18 22:39:38 +03003457 rc = jmp_rel(ctxt, ctxt->src.val);
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003458
Nadav Amit234f3ce2014-09-18 22:39:38 +03003459 return rc;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003460}
3461
3462static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3463{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003464 int rc = X86EMUL_CONTINUE;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003465
Nadav Amit234f3ce2014-09-18 22:39:38 +03003466 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3467 rc = jmp_rel(ctxt, ctxt->src.val);
3468
3469 return rc;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003470}
3471
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003472static int em_in(struct x86_emulate_ctxt *ctxt)
3473{
3474 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3475 &ctxt->dst.val))
3476 return X86EMUL_IO_NEEDED;
3477
3478 return X86EMUL_CONTINUE;
3479}
3480
3481static int em_out(struct x86_emulate_ctxt *ctxt)
3482{
3483 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3484 &ctxt->src.val, 1);
3485 /* Disable writeback. */
3486 ctxt->dst.type = OP_NONE;
3487 return X86EMUL_CONTINUE;
3488}
3489
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09003490static int em_cli(struct x86_emulate_ctxt *ctxt)
3491{
3492 if (emulator_bad_iopl(ctxt))
3493 return emulate_gp(ctxt, 0);
3494
3495 ctxt->eflags &= ~X86_EFLAGS_IF;
3496 return X86EMUL_CONTINUE;
3497}
3498
3499static int em_sti(struct x86_emulate_ctxt *ctxt)
3500{
3501 if (emulator_bad_iopl(ctxt))
3502 return emulate_gp(ctxt, 0);
3503
3504 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3505 ctxt->eflags |= X86_EFLAGS_IF;
3506 return X86EMUL_CONTINUE;
3507}
3508
Avi Kivity6d6eede2012-06-07 14:11:36 +03003509static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3510{
3511 u32 eax, ebx, ecx, edx;
3512
Avi Kivitydd856ef2012-08-27 23:46:17 +03003513 eax = reg_read(ctxt, VCPU_REGS_RAX);
3514 ecx = reg_read(ctxt, VCPU_REGS_RCX);
Avi Kivity6d6eede2012-06-07 14:11:36 +03003515 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003516 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3517 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3518 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3519 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
Avi Kivity6d6eede2012-06-07 14:11:36 +03003520 return X86EMUL_CONTINUE;
3521}
3522
Paolo Bonzini98f73632013-10-31 11:19:42 +01003523static int em_sahf(struct x86_emulate_ctxt *ctxt)
3524{
3525 u32 flags;
3526
Nadav Amit0efb0442015-03-29 16:33:03 +03003527 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3528 X86_EFLAGS_SF;
Paolo Bonzini98f73632013-10-31 11:19:42 +01003529 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3530
3531 ctxt->eflags &= ~0xffUL;
3532 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3533 return X86EMUL_CONTINUE;
3534}
3535
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003536static int em_lahf(struct x86_emulate_ctxt *ctxt)
3537{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003538 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3539 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003540 return X86EMUL_CONTINUE;
3541}
3542
Avi Kivity92998362012-06-13 12:25:06 +03003543static int em_bswap(struct x86_emulate_ctxt *ctxt)
3544{
3545 switch (ctxt->op_bytes) {
3546#ifdef CONFIG_X86_64
3547 case 8:
3548 asm("bswap %0" : "+r"(ctxt->dst.val));
3549 break;
3550#endif
3551 default:
3552 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3553 break;
3554 }
3555 return X86EMUL_CONTINUE;
3556}
3557
Nadav Amit13e457e2014-10-13 13:04:13 +03003558static int em_clflush(struct x86_emulate_ctxt *ctxt)
3559{
3560 /* emulating clflush regardless of cpuid */
3561 return X86EMUL_CONTINUE;
3562}
3563
Nadav Amit2276b512015-01-26 09:32:24 +02003564static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3565{
3566 ctxt->dst.val = (s32) ctxt->src.val;
3567 return X86EMUL_CONTINUE;
3568}
3569
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003570static bool valid_cr(int nr)
3571{
3572 switch (nr) {
3573 case 0:
3574 case 2 ... 4:
3575 case 8:
3576 return true;
3577 default:
3578 return false;
3579 }
3580}
3581
3582static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3583{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003584 if (!valid_cr(ctxt->modrm_reg))
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003585 return emulate_ud(ctxt);
3586
3587 return X86EMUL_CONTINUE;
3588}
3589
3590static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3591{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003592 u64 new_val = ctxt->src.val64;
3593 int cr = ctxt->modrm_reg;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003594 u64 efer = 0;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003595
3596 static u64 cr_reserved_bits[] = {
3597 0xffffffff00000000ULL,
3598 0, 0, 0, /* CR3 checked later */
3599 CR4_RESERVED_BITS,
3600 0, 0, 0,
3601 CR8_RESERVED_BITS,
3602 };
3603
3604 if (!valid_cr(cr))
3605 return emulate_ud(ctxt);
3606
3607 if (new_val & cr_reserved_bits[cr])
3608 return emulate_gp(ctxt, 0);
3609
3610 switch (cr) {
3611 case 0: {
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003612 u64 cr4;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003613 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3614 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3615 return emulate_gp(ctxt, 0);
3616
Avi Kivity717746e2011-04-20 13:37:53 +03003617 cr4 = ctxt->ops->get_cr(ctxt, 4);
3618 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003619
3620 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3621 !(cr4 & X86_CR4_PAE))
3622 return emulate_gp(ctxt, 0);
3623
3624 break;
3625 }
3626 case 3: {
3627 u64 rsvd = 0;
3628
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003629 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3630 if (efer & EFER_LMA)
Nadav Amit9d88fca2014-11-02 11:54:52 +02003631 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003632
3633 if (new_val & rsvd)
3634 return emulate_gp(ctxt, 0);
3635
3636 break;
3637 }
3638 case 4: {
Avi Kivity717746e2011-04-20 13:37:53 +03003639 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003640
3641 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3642 return emulate_gp(ctxt, 0);
3643
3644 break;
3645 }
3646 }
3647
3648 return X86EMUL_CONTINUE;
3649}
3650
Joerg Roedel3b88e412011-04-04 12:39:29 +02003651static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3652{
3653 unsigned long dr7;
3654
Avi Kivity717746e2011-04-20 13:37:53 +03003655 ctxt->ops->get_dr(ctxt, 7, &dr7);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003656
3657 /* Check if DR7.Global_Enable is set */
3658 return dr7 & (1 << 13);
3659}
3660
3661static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3662{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003663 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003664 u64 cr4;
3665
3666 if (dr > 7)
3667 return emulate_ud(ctxt);
3668
Avi Kivity717746e2011-04-20 13:37:53 +03003669 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003670 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3671 return emulate_ud(ctxt);
3672
Nadav Amit6d2a0522014-11-02 11:54:43 +02003673 if (check_dr7_gd(ctxt)) {
3674 ulong dr6;
3675
3676 ctxt->ops->get_dr(ctxt, 6, &dr6);
3677 dr6 &= ~15;
3678 dr6 |= DR6_BD | DR6_RTM;
3679 ctxt->ops->set_dr(ctxt, 6, dr6);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003680 return emulate_db(ctxt);
Nadav Amit6d2a0522014-11-02 11:54:43 +02003681 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02003682
3683 return X86EMUL_CONTINUE;
3684}
3685
3686static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3687{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003688 u64 new_val = ctxt->src.val64;
3689 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003690
3691 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3692 return emulate_gp(ctxt, 0);
3693
3694 return check_dr_read(ctxt);
3695}
3696
Joerg Roedel01de8b02011-04-04 12:39:31 +02003697static int check_svme(struct x86_emulate_ctxt *ctxt)
3698{
3699 u64 efer;
3700
Avi Kivity717746e2011-04-20 13:37:53 +03003701 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003702
3703 if (!(efer & EFER_SVME))
3704 return emulate_ud(ctxt);
3705
3706 return X86EMUL_CONTINUE;
3707}
3708
3709static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3710{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003711 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003712
3713 /* Valid physical address? */
Randy Dunlapd4224442011-04-21 09:09:22 -07003714 if (rax & 0xffff000000000000ULL)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003715 return emulate_gp(ctxt, 0);
3716
3717 return check_svme(ctxt);
3718}
3719
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003720static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3721{
Avi Kivity717746e2011-04-20 13:37:53 +03003722 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003723
Avi Kivity717746e2011-04-20 13:37:53 +03003724 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003725 return emulate_ud(ctxt);
3726
3727 return X86EMUL_CONTINUE;
3728}
3729
Joerg Roedel80612522011-04-04 12:39:33 +02003730static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3731{
Avi Kivity717746e2011-04-20 13:37:53 +03003732 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003733 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
Joerg Roedel80612522011-04-04 12:39:33 +02003734
Avi Kivity717746e2011-04-20 13:37:53 +03003735 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
Nadav Amit67f4d422014-06-02 18:34:09 +03003736 ctxt->ops->check_pmc(ctxt, rcx))
Joerg Roedel80612522011-04-04 12:39:33 +02003737 return emulate_gp(ctxt, 0);
3738
3739 return X86EMUL_CONTINUE;
3740}
3741
Joerg Roedelf6511932011-04-04 12:39:35 +02003742static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3743{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003744 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3745 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003746 return emulate_gp(ctxt, 0);
3747
3748 return X86EMUL_CONTINUE;
3749}
3750
3751static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3752{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003753 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3754 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003755 return emulate_gp(ctxt, 0);
3756
3757 return X86EMUL_CONTINUE;
3758}
3759
Avi Kivity73fba5f2010-07-29 15:11:53 +03003760#define D(_y) { .flags = (_y) }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003761#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3762#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3763 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Gleb Natapov0b789ee2013-04-11 11:59:55 +03003764#define N D(NotImpl)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003765#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003766#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3767#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
Nadav Amit39f062f2014-11-26 15:47:18 +02003768#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
Nadav Amit2276b512015-01-26 09:32:24 +02003769#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
Gleb Natapov045a2822012-12-20 16:57:43 +02003770#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003771#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
Avi Kivitye28bbd42013-01-04 16:18:48 +02003772#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
Avi Kivityc4f035c2011-04-04 12:39:22 +02003773#define II(_f, _e, _i) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003774 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
Joerg Roedeld09beab2011-04-04 12:39:25 +02003775#define IIP(_f, _e, _i, _p) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003776 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3777 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Avi Kivityaa97bb42010-01-20 18:09:23 +02003778#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003779
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003780#define D2bv(_f) D((_f) | ByteOp), D(_f)
Joerg Roedelf6511932011-04-04 12:39:35 +02003781#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003782#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
Avi Kivityf7857f32013-01-04 16:18:53 +02003783#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003784#define I2bvIP(_f, _e, _i, _p) \
3785 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003786
Avi Kivityfb864fb2013-01-04 16:18:54 +02003787#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3788 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3789 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
Avi Kivity6230f7f2010-08-26 18:34:55 +03003790
Nadav Amit0f54a322014-08-29 11:26:55 +03003791static const struct opcode group7_rm0[] = {
3792 N,
Jan Kiszkab34a8052015-03-09 20:27:43 +01003793 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
Nadav Amit0f54a322014-08-29 11:26:55 +03003794 N, N, N, N, N, N,
3795};
3796
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003797static const struct opcode group7_rm1[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003798 DI(SrcNone | Priv, monitor),
3799 DI(SrcNone | Priv, mwait),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003800 N, N, N, N, N, N,
3801};
3802
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003803static const struct opcode group7_rm3[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003804 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
Jan Kiszkab34a8052015-03-09 20:27:43 +01003805 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003806 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3807 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3808 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3809 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3810 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3811 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003812};
Avi Kivity6230f7f2010-08-26 18:34:55 +03003813
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003814static const struct opcode group7_rm7[] = {
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003815 N,
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003816 DIP(SrcNone, rdtscp, check_rdtsc),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003817 N, N, N, N, N, N,
3818};
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09003819
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003820static const struct opcode group1[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003821 F(Lock, em_add),
3822 F(Lock | PageTable, em_or),
3823 F(Lock, em_adc),
3824 F(Lock, em_sbb),
3825 F(Lock | PageTable, em_and),
3826 F(Lock, em_sub),
3827 F(Lock, em_xor),
3828 F(NoWrite, em_cmp),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003829};
3830
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003831static const struct opcode group1A[] = {
Nadav Amitab708092014-12-25 02:52:21 +02003832 I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003833};
3834
Avi Kivity007a3b52013-01-19 19:51:51 +02003835static const struct opcode group2[] = {
3836 F(DstMem | ModRM, em_rol),
3837 F(DstMem | ModRM, em_ror),
3838 F(DstMem | ModRM, em_rcl),
3839 F(DstMem | ModRM, em_rcr),
3840 F(DstMem | ModRM, em_shl),
3841 F(DstMem | ModRM, em_shr),
3842 F(DstMem | ModRM, em_shl),
3843 F(DstMem | ModRM, em_sar),
3844};
3845
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003846static const struct opcode group3[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003847 F(DstMem | SrcImm | NoWrite, em_test),
3848 F(DstMem | SrcImm | NoWrite, em_test),
Avi Kivity45a14672013-01-04 16:18:52 +02003849 F(DstMem | SrcNone | Lock, em_not),
3850 F(DstMem | SrcNone | Lock, em_neg),
Avi Kivityb9fa4092013-02-09 11:31:48 +02003851 F(DstXacc | Src2Mem, em_mul_ex),
3852 F(DstXacc | Src2Mem, em_imul_ex),
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02003853 F(DstXacc | Src2Mem, em_div_ex),
3854 F(DstXacc | Src2Mem, em_idiv_ex),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003855};
3856
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003857static const struct opcode group4[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003858 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3859 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003860 N, N, N, N, N, N,
3861};
3862
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003863static const struct opcode group5[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003864 F(DstMem | SrcNone | Lock, em_inc),
3865 F(DstMem | SrcNone | Lock, em_dec),
Nadav Amit58b70752014-10-24 11:35:09 +03003866 I(SrcMem | NearBranch, em_call_near_abs),
Nadav Amitacac6f892015-05-03 20:22:57 +03003867 I(SrcMemFAddr | ImplicitOps, em_call_far),
Nadav Amit58b70752014-10-24 11:35:09 +03003868 I(SrcMem | NearBranch, em_jmp_abs),
Nadav Amitf7784042014-09-18 22:39:41 +03003869 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3870 I(SrcMem | Stack, em_push), D(Undefined),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003871};
3872
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003873static const struct opcode group6[] = {
Nadav Amit63ea0a42015-01-08 11:59:03 +01003874 DI(Prot | DstMem, sldt),
3875 DI(Prot | DstMem, str),
Avi Kivitya14e5792012-06-13 12:28:33 +03003876 II(Prot | Priv | SrcMem16, em_lldt, lldt),
Avi Kivity80890002012-06-13 16:33:29 +03003877 II(Prot | Priv | SrcMem16, em_ltr, ltr),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02003878 N, N, N, N,
3879};
3880
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003881static const struct group_dual group7 = { {
Nadav Amit606b1c32014-06-02 18:34:06 +03003882 II(Mov | DstMem, em_sgdt, sgdt),
3883 II(Mov | DstMem, em_sidt, sidt),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003884 II(SrcMem | Priv, em_lgdt, lgdt),
3885 II(SrcMem | Priv, em_lidt, lidt),
3886 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3887 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3888 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003889}, {
Nadav Amit0f54a322014-08-29 11:26:55 +03003890 EXT(0, group7_rm0),
Avi Kivity5ef39c72011-04-21 12:21:50 +03003891 EXT(0, group7_rm1),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003892 N, EXT(0, group7_rm3),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003893 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3894 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3895 EXT(0, group7_rm7),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003896} };
3897
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003898static const struct opcode group8[] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003899 N, N, N, N,
Avi Kivity11c363b2013-01-19 19:51:54 +02003900 F(DstMem | SrcImmByte | NoWrite, em_bt),
3901 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3902 F(DstMem | SrcImmByte | Lock, em_btr),
3903 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003904};
3905
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003906static const struct group_dual group9 = { {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003907 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003908}, {
3909 N, N, N, N, N, N, N, N,
3910} };
3911
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003912static const struct opcode group11[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003913 I(DstMem | SrcImm | Mov | PageTable, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003914 X7(D(Undefined)),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03003915};
3916
Nadav Amit13e457e2014-10-13 13:04:13 +03003917static const struct gprefix pfx_0f_ae_7 = {
Nadav Amit3f6f1482014-10-13 13:04:14 +03003918 I(SrcMem | ByteOp, em_clflush), N, N, N,
Nadav Amit13e457e2014-10-13 13:04:13 +03003919};
3920
3921static const struct group_dual group15 = { {
3922 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3923}, {
3924 N, N, N, N, N, N, N, N,
3925} };
3926
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003927static const struct gprefix pfx_0f_6f_0f_7f = {
Avi Kivitye5971752012-04-09 18:40:03 +03003928 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
Avi Kivityaa97bb42010-01-20 18:09:23 +02003929};
3930
Nadav Amit39f062f2014-11-26 15:47:18 +02003931static const struct instr_dual instr_dual_0f_2b = {
3932 I(0, em_mov), N
3933};
3934
Paolo Bonzinid5b77062014-07-14 12:54:48 +02003935static const struct gprefix pfx_0f_2b = {
Nadav Amit39f062f2014-11-26 15:47:18 +02003936 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
Avi Kivity3e114eb2012-04-09 18:40:01 +03003937};
3938
Igor Mammedov27ce8252014-03-15 21:01:59 +01003939static const struct gprefix pfx_0f_28_0f_29 = {
Igor Mammedov6fec27d2014-03-15 21:02:00 +01003940 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01003941};
3942
Alex Williamson0a370272014-07-11 11:56:31 -06003943static const struct gprefix pfx_0f_e7 = {
3944 N, I(Sse, em_mov), N, N,
3945};
3946
Gleb Natapov045a2822012-12-20 16:57:43 +02003947static const struct escape escape_d9 = { {
Nadav Amit16bebef2014-12-25 02:52:18 +02003948 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
Gleb Natapov045a2822012-12-20 16:57:43 +02003949}, {
3950 /* 0xC0 - 0xC7 */
3951 N, N, N, N, N, N, N, N,
3952 /* 0xC8 - 0xCF */
3953 N, N, N, N, N, N, N, N,
3954 /* 0xD0 - 0xC7 */
3955 N, N, N, N, N, N, N, N,
3956 /* 0xD8 - 0xDF */
3957 N, N, N, N, N, N, N, N,
3958 /* 0xE0 - 0xE7 */
3959 N, N, N, N, N, N, N, N,
3960 /* 0xE8 - 0xEF */
3961 N, N, N, N, N, N, N, N,
3962 /* 0xF0 - 0xF7 */
3963 N, N, N, N, N, N, N, N,
3964 /* 0xF8 - 0xFF */
3965 N, N, N, N, N, N, N, N,
3966} };
3967
3968static const struct escape escape_db = { {
3969 N, N, N, N, N, N, N, N,
3970}, {
3971 /* 0xC0 - 0xC7 */
3972 N, N, N, N, N, N, N, N,
3973 /* 0xC8 - 0xCF */
3974 N, N, N, N, N, N, N, N,
3975 /* 0xD0 - 0xC7 */
3976 N, N, N, N, N, N, N, N,
3977 /* 0xD8 - 0xDF */
3978 N, N, N, N, N, N, N, N,
3979 /* 0xE0 - 0xE7 */
3980 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3981 /* 0xE8 - 0xEF */
3982 N, N, N, N, N, N, N, N,
3983 /* 0xF0 - 0xF7 */
3984 N, N, N, N, N, N, N, N,
3985 /* 0xF8 - 0xFF */
3986 N, N, N, N, N, N, N, N,
3987} };
3988
3989static const struct escape escape_dd = { {
Nadav Amit16bebef2014-12-25 02:52:18 +02003990 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
Gleb Natapov045a2822012-12-20 16:57:43 +02003991}, {
3992 /* 0xC0 - 0xC7 */
3993 N, N, N, N, N, N, N, N,
3994 /* 0xC8 - 0xCF */
3995 N, N, N, N, N, N, N, N,
3996 /* 0xD0 - 0xC7 */
3997 N, N, N, N, N, N, N, N,
3998 /* 0xD8 - 0xDF */
3999 N, N, N, N, N, N, N, N,
4000 /* 0xE0 - 0xE7 */
4001 N, N, N, N, N, N, N, N,
4002 /* 0xE8 - 0xEF */
4003 N, N, N, N, N, N, N, N,
4004 /* 0xF0 - 0xF7 */
4005 N, N, N, N, N, N, N, N,
4006 /* 0xF8 - 0xFF */
4007 N, N, N, N, N, N, N, N,
4008} };
4009
Nadav Amit39f062f2014-11-26 15:47:18 +02004010static const struct instr_dual instr_dual_0f_c3 = {
4011 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4012};
4013
Nadav Amit2276b512015-01-26 09:32:24 +02004014static const struct mode_dual mode_dual_63 = {
4015 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4016};
4017
Mathias Krausefd0a0d82012-08-30 01:30:15 +02004018static const struct opcode opcode_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03004019 /* 0x00 - 0x07 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004020 F6ALU(Lock, em_add),
Avi Kivity1cd196e2011-09-13 10:45:51 +03004021 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4022 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004023 /* 0x08 - 0x0F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004024 F6ALU(Lock | PageTable, em_or),
Avi Kivity1cd196e2011-09-13 10:45:51 +03004025 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4026 N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004027 /* 0x10 - 0x17 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004028 F6ALU(Lock, em_adc),
Avi Kivity1cd196e2011-09-13 10:45:51 +03004029 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4030 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004031 /* 0x18 - 0x1F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004032 F6ALU(Lock, em_sbb),
Avi Kivity1cd196e2011-09-13 10:45:51 +03004033 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4034 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004035 /* 0x20 - 0x27 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004036 F6ALU(Lock | PageTable, em_and), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004037 /* 0x28 - 0x2F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004038 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004039 /* 0x30 - 0x37 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004040 F6ALU(Lock, em_xor), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004041 /* 0x38 - 0x3F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004042 F6ALU(NoWrite, em_cmp), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004043 /* 0x40 - 0x4F */
Avi Kivity95413dc2013-01-19 19:51:53 +02004044 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004045 /* 0x50 - 0x57 */
Avi Kivity63540382010-07-29 15:11:55 +03004046 X8(I(SrcReg | Stack, em_push)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004047 /* 0x58 - 0x5F */
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09004048 X8(I(DstReg | Stack, em_pop)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004049 /* 0x60 - 0x67 */
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09004050 I(ImplicitOps | Stack | No64, em_pusha),
4051 I(ImplicitOps | Stack | No64, em_popa),
Nadav Amit2276b512015-01-26 09:32:24 +02004052 N, MD(ModRM, &mode_dual_63),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004053 N, N, N, N,
4054 /* 0x68 - 0x6F */
Avi Kivityd46164d2010-08-18 19:29:33 +03004055 I(SrcImm | Mov | Stack, em_push),
4056 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03004057 I(SrcImmByte | Mov | Stack, em_push),
4058 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004059 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
Takuya Yoshikawa2b5e97e2011-11-23 12:27:39 +09004060 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
Avi Kivity73fba5f2010-07-29 15:11:53 +03004061 /* 0x70 - 0x7F */
Nadav Amit58b70752014-10-24 11:35:09 +03004062 X16(D(SrcImmByte | NearBranch)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004063 /* 0x80 - 0x87 */
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09004064 G(ByteOp | DstMem | SrcImm, group1),
4065 G(DstMem | SrcImm, group1),
4066 G(ByteOp | DstMem | SrcImm | No64, group1),
4067 G(DstMem | SrcImmByte, group1),
Avi Kivityfb864fb2013-01-04 16:18:54 +02004068 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004069 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004070 /* 0x88 - 0x8F */
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004071 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004072 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004073 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09004074 D(ModRM | SrcMem | NoAccess | DstReg),
4075 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4076 G(0, group1A),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004077 /* 0x90 - 0x97 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02004078 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004079 /* 0x98 - 0x9F */
Avi Kivity61429142010-08-19 15:13:00 +03004080 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
Wei Yongjuncc4feed2010-08-25 14:10:53 +08004081 I(SrcImmFAddr | No64, em_call_far), N,
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09004082 II(ImplicitOps | Stack, em_pushf, pushf),
Paolo Bonzini98f73632013-10-31 11:19:42 +01004083 II(ImplicitOps | Stack, em_popf, popf),
4084 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004085 /* 0xA0 - 0xA7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004086 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004087 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004088 I2bv(SrcSI | DstDI | Mov | String, em_mov),
Nadav Amit5aca3722014-11-02 11:54:50 +02004089 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004090 /* 0xA8 - 0xAF */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004091 F2bv(DstAcc | SrcImm | NoWrite, em_test),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004092 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4093 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
Nadav Amit5aca3722014-11-02 11:54:50 +02004094 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004095 /* 0xB0 - 0xB7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004096 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004097 /* 0xB8 - 0xBF */
Nadav Amit5e2c6882012-12-06 21:55:10 -02004098 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004099 /* 0xC0 - 0xC7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02004100 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
Nadav Amit58b70752014-10-24 11:35:09 +03004101 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4102 I(ImplicitOps | NearBranch, em_ret),
Avi Kivityd4b43252011-09-13 10:45:50 +03004103 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4104 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03004105 G(ByteOp, group11), G(0, group11),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004106 /* 0xC8 - 0xCF */
Avi Kivity612e89f2012-06-12 20:03:23 +03004107 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
Nadav Amit16794aa2015-01-26 09:32:22 +02004108 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4109 I(ImplicitOps, em_ret_far),
Avi Kivity3c6e2762011-04-04 12:39:23 +02004110 D(ImplicitOps), DI(SrcImmByte, intn),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004111 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004112 /* 0xD0 - 0xD7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02004113 G(Src2One | ByteOp, group2), G(Src2One, group2),
4114 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02004115 I(DstAcc | SrcImmUByte | No64, em_aam),
Paolo Bonzini326f5782013-05-09 11:32:51 +02004116 I(DstAcc | SrcImmUByte | No64, em_aad),
4117 F(DstAcc | ByteOp | No64, em_salc),
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004118 I(DstAcc | SrcXLat | ByteOp, em_mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004119 /* 0xD8 - 0xDF */
Gleb Natapov045a2822012-12-20 16:57:43 +02004120 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004121 /* 0xE0 - 0xE7 */
Nadav Amit58b70752014-10-24 11:35:09 +03004122 X3(I(SrcImmByte | NearBranch, em_loop)),
4123 I(SrcImmByte | NearBranch, em_jcxz),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004124 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4125 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004126 /* 0xE8 - 0xEF */
Nadav Amit58b70752014-10-24 11:35:09 +03004127 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4128 I(SrcImmFAddr | No64, em_jmp_far),
4129 D(SrcImmByte | ImplicitOps | NearBranch),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004130 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4131 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004132 /* 0xF0 - 0xF7 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02004133 N, DI(ImplicitOps, icebp), N, N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02004134 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4135 G(ByteOp, group3), G(0, group3),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004136 /* 0xF8 - 0xFF */
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09004137 D(ImplicitOps), D(ImplicitOps),
4138 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004139 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4140};
4141
Mathias Krausefd0a0d82012-08-30 01:30:15 +02004142static const struct opcode twobyte_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03004143 /* 0x00 - 0x0F */
Joerg Roedeldee6bb72011-04-04 12:39:30 +02004144 G(0, group6), GD(0, &group7), N, N,
Borislav Petkovb51e9742013-09-22 16:44:52 +02004145 N, I(ImplicitOps | EmulateOnUD, em_syscall),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004146 II(ImplicitOps | Priv, em_clts, clts), N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02004147 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
Nadav Amit3f6f1482014-10-13 13:04:14 +03004148 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004149 /* 0x10 - 0x1F */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02004150 N, N, N, N, N, N, N, N,
Nadav Amit3f6f1482014-10-13 13:04:14 +03004151 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4152 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004153 /* 0x20 - 0x2F */
Nadav Amit9b88ae92014-05-25 23:05:21 +03004154 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4155 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4156 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4157 check_cr_write),
4158 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4159 check_dr_write),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004160 N, N, N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01004161 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4162 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
Paolo Bonzinid5b77062014-07-14 12:54:48 +02004163 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
Avi Kivity3e114eb2012-04-09 18:40:01 +03004164 N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004165 /* 0x30 - 0x3F */
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09004166 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
Joerg Roedel80612522011-04-04 12:39:33 +02004167 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09004168 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
Avi Kivity222d21a2011-11-10 14:57:30 +02004169 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
Borislav Petkovb51e9742013-09-22 16:44:52 +02004170 I(ImplicitOps | EmulateOnUD, em_sysenter),
4171 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
Avi Kivityd8671622011-02-01 16:32:03 +02004172 N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004173 N, N, N, N, N, N, N, N,
4174 /* 0x40 - 0x4F */
Nadav Amit140bad82014-06-15 16:13:00 +03004175 X16(D(DstReg | SrcMem | ModRM)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004176 /* 0x50 - 0x5F */
4177 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4178 /* 0x60 - 0x6F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02004179 N, N, N, N,
4180 N, N, N, N,
4181 N, N, N, N,
4182 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004183 /* 0x70 - 0x7F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02004184 N, N, N, N,
4185 N, N, N, N,
4186 N, N, N, N,
4187 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004188 /* 0x80 - 0x8F */
Nadav Amit58b70752014-10-24 11:35:09 +03004189 X16(D(SrcImm | NearBranch)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004190 /* 0x90 - 0x9F */
Wei Yongjunee45b582010-08-06 17:10:07 +08004191 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004192 /* 0xA0 - 0xA7 */
Avi Kivity1cd196e2011-09-13 10:45:51 +03004193 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
Avi Kivity11c363b2013-01-19 19:51:54 +02004194 II(ImplicitOps, em_cpuid, cpuid),
4195 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
Avi Kivity0bdea062013-01-19 19:51:50 +02004196 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4197 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004198 /* 0xA8 - 0xAF */
Avi Kivity1cd196e2011-09-13 10:45:51 +03004199 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004200 DI(ImplicitOps, rsm),
Avi Kivity11c363b2013-01-19 19:51:54 +02004201 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
Avi Kivity0bdea062013-01-19 19:51:50 +02004202 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4203 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
Nadav Amit13e457e2014-10-13 13:04:13 +03004204 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004205 /* 0xB0 - 0xB7 */
Nadav Amit2fcf5c82015-01-26 09:32:21 +02004206 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
Avi Kivityd4b43252011-09-13 10:45:50 +03004207 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
Avi Kivity11c363b2013-01-19 19:51:54 +02004208 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
Avi Kivityd4b43252011-09-13 10:45:50 +03004209 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4210 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004211 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004212 /* 0xB8 - 0xBF */
4213 N, N,
Takuya Yoshikawace7faab2011-11-22 15:17:48 +09004214 G(BitOp, group8),
Avi Kivity11c363b2013-01-19 19:51:54 +02004215 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
Nadav Amit900efe22015-03-30 15:39:21 +03004216 I(DstReg | SrcMem | ModRM, em_bsf_c),
4217 I(DstReg | SrcMem | ModRM, em_bsr_c),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004218 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity92998362012-06-13 12:25:06 +03004219 /* 0xC0 - 0xC7 */
Avi Kivitye47a5f52013-02-09 11:31:51 +02004220 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
Nadav Amit39f062f2014-11-26 15:47:18 +02004221 N, ID(0, &instr_dual_0f_c3),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004222 N, N, N, GD(0, &group9),
Avi Kivity92998362012-06-13 12:25:06 +03004223 /* 0xC8 - 0xCF */
4224 X8(I(DstReg, em_bswap)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004225 /* 0xD0 - 0xDF */
4226 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4227 /* 0xE0 - 0xEF */
Alex Williamson0a370272014-07-11 11:56:31 -06004228 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4229 N, N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004230 /* 0xF0 - 0xFF */
4231 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4232};
4233
Nadav Amit39f062f2014-11-26 15:47:18 +02004234static const struct instr_dual instr_dual_0f_38_f0 = {
4235 I(DstReg | SrcMem | Mov, em_movbe), N
4236};
4237
4238static const struct instr_dual instr_dual_0f_38_f1 = {
4239 I(DstMem | SrcReg | Mov, em_movbe), N
4240};
4241
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004242static const struct gprefix three_byte_0f_38_f0 = {
Nadav Amit39f062f2014-11-26 15:47:18 +02004243 ID(0, &instr_dual_0f_38_f0), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004244};
4245
4246static const struct gprefix three_byte_0f_38_f1 = {
Nadav Amit39f062f2014-11-26 15:47:18 +02004247 ID(0, &instr_dual_0f_38_f1), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004248};
4249
4250/*
4251 * Insns below are selected by the prefix which indexed by the third opcode
4252 * byte.
4253 */
4254static const struct opcode opcode_map_0f_38[256] = {
4255 /* 0x00 - 0x7f */
4256 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
Borislav Petkov84cffe42013-10-29 12:54:56 +01004257 /* 0x80 - 0xef */
4258 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4259 /* 0xf0 - 0xf1 */
Nadav Amit53bb4f72014-12-07 11:49:42 +02004260 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4261 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
Borislav Petkov84cffe42013-10-29 12:54:56 +01004262 /* 0xf2 - 0xff */
4263 N, N, X4(N), X8(N)
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004264};
4265
Avi Kivity73fba5f2010-07-29 15:11:53 +03004266#undef D
4267#undef N
4268#undef G
4269#undef GD
4270#undef I
Avi Kivityaa97bb42010-01-20 18:09:23 +02004271#undef GP
Joerg Roedel01de8b02011-04-04 12:39:31 +02004272#undef EXT
Nadav Amit2276b512015-01-26 09:32:24 +02004273#undef MD
Nadav Amit2b42fce2015-01-26 09:32:25 +02004274#undef ID
Avi Kivity73fba5f2010-07-29 15:11:53 +03004275
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004276#undef D2bv
Joerg Roedelf6511932011-04-04 12:39:35 +02004277#undef D2bvIP
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004278#undef I2bv
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004279#undef I2bvIP
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09004280#undef I6ALU
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004281
Avi Kivity9dac77f2011-06-01 15:34:25 +03004282static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
Avi Kivity39f21ee2010-08-18 19:20:21 +03004283{
4284 unsigned size;
4285
Avi Kivity9dac77f2011-06-01 15:34:25 +03004286 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004287 if (size == 8)
4288 size = 4;
4289 return size;
4290}
4291
4292static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4293 unsigned size, bool sign_extension)
4294{
Avi Kivity39f21ee2010-08-18 19:20:21 +03004295 int rc = X86EMUL_CONTINUE;
4296
4297 op->type = OP_IMM;
4298 op->bytes = size;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004299 op->addr.mem.ea = ctxt->_eip;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004300 /* NB. Immediates are sign-extended as necessary. */
4301 switch (op->bytes) {
4302 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004303 op->val = insn_fetch(s8, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004304 break;
4305 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004306 op->val = insn_fetch(s16, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004307 break;
4308 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004309 op->val = insn_fetch(s32, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004310 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004311 case 8:
4312 op->val = insn_fetch(s64, ctxt);
4313 break;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004314 }
4315 if (!sign_extension) {
4316 switch (op->bytes) {
4317 case 1:
4318 op->val &= 0xff;
4319 break;
4320 case 2:
4321 op->val &= 0xffff;
4322 break;
4323 case 4:
4324 op->val &= 0xffffffff;
4325 break;
4326 }
4327 }
4328done:
4329 return rc;
4330}
4331
Avi Kivitya9945542011-09-13 10:45:41 +03004332static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4333 unsigned d)
4334{
4335 int rc = X86EMUL_CONTINUE;
4336
4337 switch (d) {
4338 case OpReg:
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004339 decode_register_operand(ctxt, op);
Avi Kivitya9945542011-09-13 10:45:41 +03004340 break;
4341 case OpImmUByte:
Avi Kivity608aabe2011-09-13 10:45:45 +03004342 rc = decode_imm(ctxt, op, 1, false);
Avi Kivitya9945542011-09-13 10:45:41 +03004343 break;
4344 case OpMem:
Avi Kivity41ddf972011-09-13 10:45:48 +03004345 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity0fe59122011-09-13 10:45:47 +03004346 mem_common:
Avi Kivitya9945542011-09-13 10:45:41 +03004347 *op = ctxt->memop;
4348 ctxt->memopp = op;
Paolo Bonzini96888972014-04-01 14:54:19 +02004349 if (ctxt->d & BitOp)
Avi Kivitya9945542011-09-13 10:45:41 +03004350 fetch_bit_operand(ctxt);
4351 op->orig_val = op->val;
4352 break;
Avi Kivity41ddf972011-09-13 10:45:48 +03004353 case OpMem64:
Nadav Amitaaa05f22014-06-02 18:34:10 +03004354 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
Avi Kivity41ddf972011-09-13 10:45:48 +03004355 goto mem_common;
Avi Kivitya9945542011-09-13 10:45:41 +03004356 case OpAcc:
4357 op->type = OP_REG;
4358 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004359 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
Avi Kivitya9945542011-09-13 10:45:41 +03004360 fetch_register_operand(op);
4361 op->orig_val = op->val;
4362 break;
Avi Kivity820207c2013-02-09 11:31:45 +02004363 case OpAccLo:
4364 op->type = OP_REG;
4365 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4366 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4367 fetch_register_operand(op);
4368 op->orig_val = op->val;
4369 break;
4370 case OpAccHi:
4371 if (ctxt->d & ByteOp) {
4372 op->type = OP_NONE;
4373 break;
4374 }
4375 op->type = OP_REG;
4376 op->bytes = ctxt->op_bytes;
4377 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4378 fetch_register_operand(op);
4379 op->orig_val = op->val;
4380 break;
Avi Kivitya9945542011-09-13 10:45:41 +03004381 case OpDI:
4382 op->type = OP_MEM;
4383 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4384 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004385 register_address(ctxt, VCPU_REGS_RDI);
Avi Kivitya9945542011-09-13 10:45:41 +03004386 op->addr.mem.seg = VCPU_SREG_ES;
4387 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004388 op->count = 1;
Avi Kivitya9945542011-09-13 10:45:41 +03004389 break;
4390 case OpDX:
4391 op->type = OP_REG;
4392 op->bytes = 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004393 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivitya9945542011-09-13 10:45:41 +03004394 fetch_register_operand(op);
4395 break;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004396 case OpCL:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004397 op->type = OP_IMM;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004398 op->bytes = 1;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004399 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004400 break;
4401 case OpImmByte:
4402 rc = decode_imm(ctxt, op, 1, true);
4403 break;
4404 case OpOne:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004405 op->type = OP_IMM;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004406 op->bytes = 1;
4407 op->val = 1;
4408 break;
4409 case OpImm:
4410 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4411 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004412 case OpImm64:
4413 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4414 break;
Avi Kivity28867ce2012-01-16 15:08:44 +02004415 case OpMem8:
4416 ctxt->memop.bytes = 1;
Gleb Natapov660696d2013-04-24 13:38:36 +03004417 if (ctxt->memop.type == OP_REG) {
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02004418 ctxt->memop.addr.reg = decode_register(ctxt,
4419 ctxt->modrm_rm, true);
Gleb Natapov660696d2013-04-24 13:38:36 +03004420 fetch_register_operand(&ctxt->memop);
4421 }
Avi Kivity28867ce2012-01-16 15:08:44 +02004422 goto mem_common;
Avi Kivity0fe59122011-09-13 10:45:47 +03004423 case OpMem16:
4424 ctxt->memop.bytes = 2;
4425 goto mem_common;
4426 case OpMem32:
4427 ctxt->memop.bytes = 4;
4428 goto mem_common;
4429 case OpImmU16:
4430 rc = decode_imm(ctxt, op, 2, false);
4431 break;
4432 case OpImmU:
4433 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4434 break;
4435 case OpSI:
4436 op->type = OP_MEM;
4437 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4438 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004439 register_address(ctxt, VCPU_REGS_RSI);
Bandan Das573e80f2014-04-16 12:46:13 -04004440 op->addr.mem.seg = ctxt->seg_override;
Avi Kivity0fe59122011-09-13 10:45:47 +03004441 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004442 op->count = 1;
Avi Kivity0fe59122011-09-13 10:45:47 +03004443 break;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004444 case OpXLat:
4445 op->type = OP_MEM;
4446 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4447 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004448 address_mask(ctxt,
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004449 reg_read(ctxt, VCPU_REGS_RBX) +
4450 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
Bandan Das573e80f2014-04-16 12:46:13 -04004451 op->addr.mem.seg = ctxt->seg_override;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004452 op->val = 0;
4453 break;
Avi Kivity0fe59122011-09-13 10:45:47 +03004454 case OpImmFAddr:
4455 op->type = OP_IMM;
4456 op->addr.mem.ea = ctxt->_eip;
4457 op->bytes = ctxt->op_bytes + 2;
4458 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4459 break;
4460 case OpMemFAddr:
4461 ctxt->memop.bytes = ctxt->op_bytes + 2;
4462 goto mem_common;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004463 case OpES:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004464 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004465 op->val = VCPU_SREG_ES;
4466 break;
4467 case OpCS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004468 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004469 op->val = VCPU_SREG_CS;
4470 break;
4471 case OpSS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004472 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004473 op->val = VCPU_SREG_SS;
4474 break;
4475 case OpDS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004476 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004477 op->val = VCPU_SREG_DS;
4478 break;
4479 case OpFS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004480 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004481 op->val = VCPU_SREG_FS;
4482 break;
4483 case OpGS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004484 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004485 op->val = VCPU_SREG_GS;
4486 break;
Avi Kivitya9945542011-09-13 10:45:41 +03004487 case OpImplicit:
4488 /* Special instructions do their own operand decoding. */
4489 default:
4490 op->type = OP_NONE; /* Disable writeback. */
4491 break;
4492 }
4493
4494done:
4495 return rc;
4496}
4497
Takuya Yoshikawaef5d75c2011-05-15 00:57:43 +09004498int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004499{
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004500 int rc = X86EMUL_CONTINUE;
4501 int mode = ctxt->mode;
Avi Kivity46561642011-04-24 14:09:59 +03004502 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004503 bool op_prefix = false;
Bandan Das573e80f2014-04-16 12:46:13 -04004504 bool has_seg_override = false;
Avi Kivity46561642011-04-24 14:09:59 +03004505 struct opcode opcode;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004506
Avi Kivityf09ed832011-09-13 10:45:40 +03004507 ctxt->memop.type = OP_NONE;
4508 ctxt->memopp = NULL;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004509 ctxt->_eip = ctxt->eip;
Paolo Bonzini17052f12014-05-06 16:33:01 +02004510 ctxt->fetch.ptr = ctxt->fetch.data;
4511 ctxt->fetch.end = ctxt->fetch.data + insn_len;
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004512 ctxt->opcode_len = 1;
Andre Przywaradc25e892010-12-21 11:12:07 +01004513 if (insn_len > 0)
Avi Kivity9dac77f2011-06-01 15:34:25 +03004514 memcpy(ctxt->fetch.data, insn, insn_len);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004515 else {
Paolo Bonzini9506d572014-05-06 13:05:25 +02004516 rc = __do_insn_fetch_bytes(ctxt, 1);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004517 if (rc != X86EMUL_CONTINUE)
4518 return rc;
4519 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004520
4521 switch (mode) {
4522 case X86EMUL_MODE_REAL:
4523 case X86EMUL_MODE_VM86:
4524 case X86EMUL_MODE_PROT16:
4525 def_op_bytes = def_ad_bytes = 2;
4526 break;
4527 case X86EMUL_MODE_PROT32:
4528 def_op_bytes = def_ad_bytes = 4;
4529 break;
4530#ifdef CONFIG_X86_64
4531 case X86EMUL_MODE_PROT64:
4532 def_op_bytes = 4;
4533 def_ad_bytes = 8;
4534 break;
4535#endif
4536 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004537 return EMULATION_FAILED;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004538 }
4539
Avi Kivity9dac77f2011-06-01 15:34:25 +03004540 ctxt->op_bytes = def_op_bytes;
4541 ctxt->ad_bytes = def_ad_bytes;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004542
4543 /* Legacy prefixes. */
4544 for (;;) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004545 switch (ctxt->b = insn_fetch(u8, ctxt)) {
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004546 case 0x66: /* operand-size override */
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004547 op_prefix = true;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004548 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004549 ctxt->op_bytes = def_op_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004550 break;
4551 case 0x67: /* address-size override */
4552 if (mode == X86EMUL_MODE_PROT64)
4553 /* switch between 4/8 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004554 ctxt->ad_bytes = def_ad_bytes ^ 12;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004555 else
4556 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004557 ctxt->ad_bytes = def_ad_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004558 break;
4559 case 0x26: /* ES override */
4560 case 0x2e: /* CS override */
4561 case 0x36: /* SS override */
4562 case 0x3e: /* DS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004563 has_seg_override = true;
4564 ctxt->seg_override = (ctxt->b >> 3) & 3;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004565 break;
4566 case 0x64: /* FS override */
4567 case 0x65: /* GS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004568 has_seg_override = true;
4569 ctxt->seg_override = ctxt->b & 7;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004570 break;
4571 case 0x40 ... 0x4f: /* REX */
4572 if (mode != X86EMUL_MODE_PROT64)
4573 goto done_prefixes;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004574 ctxt->rex_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004575 continue;
4576 case 0xf0: /* LOCK */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004577 ctxt->lock_prefix = 1;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004578 break;
4579 case 0xf2: /* REPNE/REPNZ */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004580 case 0xf3: /* REP/REPE/REPZ */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004581 ctxt->rep_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004582 break;
4583 default:
4584 goto done_prefixes;
4585 }
4586
4587 /* Any legacy prefix after a REX prefix nullifies its effect. */
4588
Avi Kivity9dac77f2011-06-01 15:34:25 +03004589 ctxt->rex_prefix = 0;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004590 }
4591
4592done_prefixes:
4593
4594 /* REX prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004595 if (ctxt->rex_prefix & 8)
4596 ctxt->op_bytes = 8; /* REX.W */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004597
4598 /* Opcode byte(s). */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004599 opcode = opcode_table[ctxt->b];
Wei Yongjund3ad6242010-08-05 16:34:39 +08004600 /* Two-byte opcode? */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004601 if (ctxt->b == 0x0f) {
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004602 ctxt->opcode_len = 2;
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004603 ctxt->b = insn_fetch(u8, ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03004604 opcode = twobyte_table[ctxt->b];
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004605
4606 /* 0F_38 opcode map */
4607 if (ctxt->b == 0x38) {
4608 ctxt->opcode_len = 3;
4609 ctxt->b = insn_fetch(u8, ctxt);
4610 opcode = opcode_map_0f_38[ctxt->b];
4611 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004612 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004613 ctxt->d = opcode.flags;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004614
Takuya Yoshikawa9f4260e2012-04-30 17:48:25 +09004615 if (ctxt->d & ModRM)
4616 ctxt->modrm = insn_fetch(u8, ctxt);
4617
Nadav Amit7fe864d2014-06-02 18:34:03 +03004618 /* vex-prefix instructions are not implemented */
4619 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
Nadav Amitd14cb5d2014-11-02 11:54:58 +02004620 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
Nadav Amit7fe864d2014-06-02 18:34:03 +03004621 ctxt->d = NotImpl;
4622 }
4623
Avi Kivity9dac77f2011-06-01 15:34:25 +03004624 while (ctxt->d & GroupMask) {
4625 switch (ctxt->d & GroupMask) {
Avi Kivity46561642011-04-24 14:09:59 +03004626 case Group:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004627 goffset = (ctxt->modrm >> 3) & 7;
Avi Kivity46561642011-04-24 14:09:59 +03004628 opcode = opcode.u.group[goffset];
4629 break;
4630 case GroupDual:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004631 goffset = (ctxt->modrm >> 3) & 7;
4632 if ((ctxt->modrm >> 6) == 3)
Avi Kivity46561642011-04-24 14:09:59 +03004633 opcode = opcode.u.gdual->mod3[goffset];
4634 else
4635 opcode = opcode.u.gdual->mod012[goffset];
4636 break;
4637 case RMExt:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004638 goffset = ctxt->modrm & 7;
Joerg Roedel01de8b02011-04-04 12:39:31 +02004639 opcode = opcode.u.group[goffset];
Avi Kivity46561642011-04-24 14:09:59 +03004640 break;
4641 case Prefix:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004642 if (ctxt->rep_prefix && op_prefix)
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004643 return EMULATION_FAILED;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004644 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
Avi Kivity46561642011-04-24 14:09:59 +03004645 switch (simd_prefix) {
4646 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4647 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4648 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4649 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4650 }
4651 break;
Gleb Natapov045a2822012-12-20 16:57:43 +02004652 case Escape:
4653 if (ctxt->modrm > 0xbf)
4654 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4655 else
4656 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4657 break;
Nadav Amit39f062f2014-11-26 15:47:18 +02004658 case InstrDual:
4659 if ((ctxt->modrm >> 6) == 3)
4660 opcode = opcode.u.idual->mod3;
4661 else
4662 opcode = opcode.u.idual->mod012;
4663 break;
Nadav Amit2276b512015-01-26 09:32:24 +02004664 case ModeDual:
4665 if (ctxt->mode == X86EMUL_MODE_PROT64)
4666 opcode = opcode.u.mdual->mode64;
4667 else
4668 opcode = opcode.u.mdual->mode32;
4669 break;
Avi Kivity46561642011-04-24 14:09:59 +03004670 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004671 return EMULATION_FAILED;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004672 }
Avi Kivity46561642011-04-24 14:09:59 +03004673
Avi Kivityb1ea50b2011-09-13 10:45:42 +03004674 ctxt->d &= ~(u64)GroupMask;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004675 ctxt->d |= opcode.flags;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004676 }
4677
Paolo Bonzinie24186e2014-03-27 12:00:57 +01004678 /* Unrecognised? */
4679 if (ctxt->d == 0)
4680 return EMULATION_FAILED;
4681
Avi Kivity9dac77f2011-06-01 15:34:25 +03004682 ctxt->execute = opcode.u.execute;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004683
Nadav Amit3a6095a2014-08-13 16:50:13 +03004684 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4685 return EMULATION_FAILED;
4686
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004687 if (unlikely(ctxt->d &
Nadav Amited9aad22014-11-02 11:55:00 +02004688 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4689 No16))) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004690 /*
4691 * These are copied unconditionally here, and checked unconditionally
4692 * in x86_emulate_insn.
4693 */
4694 ctxt->check_perm = opcode.check_perm;
4695 ctxt->intercept = opcode.intercept;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004696
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004697 if (ctxt->d & NotImpl)
4698 return EMULATION_FAILED;
Avi Kivityd8671622011-02-01 16:32:03 +02004699
Nadav Amit58b70752014-10-24 11:35:09 +03004700 if (mode == X86EMUL_MODE_PROT64) {
4701 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4702 ctxt->op_bytes = 8;
4703 else if (ctxt->d & NearBranch)
4704 ctxt->op_bytes = 8;
4705 }
Avi Kivity7f9b4b72010-08-01 14:46:54 +03004706
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004707 if (ctxt->d & Op3264) {
4708 if (mode == X86EMUL_MODE_PROT64)
4709 ctxt->op_bytes = 8;
4710 else
4711 ctxt->op_bytes = 4;
4712 }
4713
Nadav Amited9aad22014-11-02 11:55:00 +02004714 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4715 ctxt->op_bytes = 4;
4716
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004717 if (ctxt->d & Sse)
4718 ctxt->op_bytes = 16;
4719 else if (ctxt->d & Mmx)
4720 ctxt->op_bytes = 8;
4721 }
Avi Kivity12537912011-03-29 11:41:27 +02004722
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004723 /* ModRM and SIB bytes. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004724 if (ctxt->d & ModRM) {
Avi Kivityf09ed832011-09-13 10:45:40 +03004725 rc = decode_modrm(ctxt, &ctxt->memop);
Bandan Das573e80f2014-04-16 12:46:13 -04004726 if (!has_seg_override) {
4727 has_seg_override = true;
4728 ctxt->seg_override = ctxt->modrm_seg;
4729 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004730 } else if (ctxt->d & MemAbs)
Avi Kivityf09ed832011-09-13 10:45:40 +03004731 rc = decode_abs(ctxt, &ctxt->memop);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004732 if (rc != X86EMUL_CONTINUE)
4733 goto done;
4734
Bandan Das573e80f2014-04-16 12:46:13 -04004735 if (!has_seg_override)
4736 ctxt->seg_override = VCPU_SREG_DS;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004737
Bandan Das573e80f2014-04-16 12:46:13 -04004738 ctxt->memop.addr.mem.seg = ctxt->seg_override;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004739
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004740 /*
4741 * Decode and fetch the source operand: register, memory
4742 * or immediate.
4743 */
Avi Kivity0fe59122011-09-13 10:45:47 +03004744 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004745 if (rc != X86EMUL_CONTINUE)
4746 goto done;
4747
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004748 /*
4749 * Decode and fetch the second source operand: register, memory
4750 * or immediate.
4751 */
Avi Kivity4dd6a572011-09-13 10:45:43 +03004752 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004753 if (rc != X86EMUL_CONTINUE)
4754 goto done;
4755
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004756 /* Decode and fetch the destination operand: register or memory. */
Avi Kivitya9945542011-09-13 10:45:41 +03004757 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004758
Bandan Das41061cd2014-04-16 12:46:14 -04004759 if (ctxt->rip_relative)
Nadav Amit1c1c35a2014-11-19 17:43:09 +02004760 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
4761 ctxt->memopp->addr.mem.ea + ctxt->_eip);
Avi Kivitycb16c342011-06-19 19:21:11 +03004762
Paolo Bonzinia430c912014-10-23 14:54:14 +02004763done:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004764 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004765}
4766
Xiao Guangrong1cb3f3a2011-09-22 17:02:48 +08004767bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4768{
4769 return ctxt->d & PageTable;
4770}
4771
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004772static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4773{
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004774 /* The second termination condition only applies for REPE
4775 * and REPNE. Test if the repeat string operation prefix is
4776 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4777 * corresponding termination condition according to:
4778 * - if REPE/REPZ and ZF = 0 then done
4779 * - if REPNE/REPNZ and ZF = 1 then done
4780 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004781 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4782 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4783 && (((ctxt->rep_prefix == REPE_PREFIX) &&
Nadav Amit0efb0442015-03-29 16:33:03 +03004784 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
Avi Kivity9dac77f2011-06-01 15:34:25 +03004785 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
Nadav Amit0efb0442015-03-29 16:33:03 +03004786 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004787 return true;
4788
4789 return false;
4790}
4791
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004792static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4793{
4794 bool fault = false;
4795
4796 ctxt->ops->get_fpu(ctxt);
4797 asm volatile("1: fwait \n\t"
4798 "2: \n\t"
4799 ".pushsection .fixup,\"ax\" \n\t"
4800 "3: \n\t"
4801 "movb $1, %[fault] \n\t"
4802 "jmp 2b \n\t"
4803 ".popsection \n\t"
4804 _ASM_EXTABLE(1b, 3b)
Avi Kivity38e8a2d2012-04-22 15:12:50 +03004805 : [fault]"+qm"(fault));
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004806 ctxt->ops->put_fpu(ctxt);
4807
4808 if (unlikely(fault))
4809 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4810
4811 return X86EMUL_CONTINUE;
4812}
4813
4814static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4815 struct operand *op)
4816{
4817 if (op->type == OP_MM)
4818 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4819}
4820
Avi Kivitye28bbd42013-01-04 16:18:48 +02004821static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4822{
4823 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivityb9fa4092013-02-09 11:31:48 +02004824 if (!(ctxt->d & ByteOp))
4825 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
Avi Kivitye28bbd42013-01-04 16:18:48 +02004826 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004827 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4828 [fastop]"+S"(fop)
4829 : "c"(ctxt->src2.val));
Avi Kivitye28bbd42013-01-04 16:18:48 +02004830 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004831 if (!fop) /* exception is returned in fop variable */
4832 return emulate_de(ctxt);
Avi Kivitye28bbd42013-01-04 16:18:48 +02004833 return X86EMUL_CONTINUE;
4834}
Avi Kivitydd856ef2012-08-27 23:46:17 +03004835
Bandan Das14985072014-04-16 12:46:09 -04004836void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4837{
Bandan Das573e80f2014-04-16 12:46:13 -04004838 memset(&ctxt->rip_relative, 0,
4839 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
Bandan Das14985072014-04-16 12:46:09 -04004840
Bandan Das14985072014-04-16 12:46:09 -04004841 ctxt->io_read.pos = 0;
4842 ctxt->io_read.end = 0;
Bandan Das14985072014-04-16 12:46:09 -04004843 ctxt->mem_read.end = 0;
4844}
4845
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09004846int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004847{
Mathias Krause0225fb52012-08-30 01:30:16 +02004848 const struct x86_emulate_ops *ops = ctxt->ops;
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09004849 int rc = X86EMUL_CONTINUE;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004850 int saved_dst_type = ctxt->dst.type;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004851
Avi Kivity9dac77f2011-06-01 15:34:25 +03004852 ctxt->mem_read.pos = 0;
Glauber Costa310b5d32009-05-12 16:21:06 -04004853
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004854 /* LOCK prefix is allowed only with some instructions */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004855 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004856 rc = emulate_ud(ctxt);
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004857 goto done;
4858 }
4859
Avi Kivity9dac77f2011-06-01 15:34:25 +03004860 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004861 rc = emulate_ud(ctxt);
Avi Kivity081bca02010-08-26 11:06:15 +03004862 goto done;
4863 }
4864
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004865 if (unlikely(ctxt->d &
4866 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4867 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4868 (ctxt->d & Undefined)) {
4869 rc = emulate_ud(ctxt);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004870 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004871 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004872
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004873 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4874 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4875 rc = emulate_ud(ctxt);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004876 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004877 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004878
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004879 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4880 rc = emulate_nm(ctxt);
Joerg Roedeld09beab2011-04-04 12:39:25 +02004881 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004882 }
Joerg Roedeld09beab2011-04-04 12:39:25 +02004883
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004884 if (ctxt->d & Mmx) {
4885 rc = flush_pending_x87_faults(ctxt);
4886 if (rc != X86EMUL_CONTINUE)
4887 goto done;
4888 /*
4889 * Now that we know the fpu is exception safe, we can fetch
4890 * operands from it.
4891 */
4892 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4893 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4894 if (!(ctxt->d & Mov))
4895 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4896 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004897
Paolo Bonzinia5845392015-04-01 18:18:53 +02004898 if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004899 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4900 X86_ICPT_PRE_EXCEPT);
4901 if (rc != X86EMUL_CONTINUE)
4902 goto done;
4903 }
4904
Nadav Amit64a38292014-12-10 11:19:04 +02004905 /* Instruction can only be executed in protected mode */
4906 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4907 rc = emulate_ud(ctxt);
4908 goto done;
4909 }
4910
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004911 /* Privileged instruction can be executed only in CPL=0 */
4912 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
Nadav Amit68efa762014-06-18 17:19:35 +03004913 if (ctxt->d & PrivUD)
4914 rc = emulate_ud(ctxt);
4915 else
4916 rc = emulate_gp(ctxt, 0);
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004917 goto done;
4918 }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004919
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004920 /* Do instruction specific permission checks */
Bandan Das685bbf42014-04-16 12:46:10 -04004921 if (ctxt->d & CheckPerm) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004922 rc = ctxt->check_perm(ctxt);
4923 if (rc != X86EMUL_CONTINUE)
4924 goto done;
4925 }
4926
Paolo Bonzinia5845392015-04-01 18:18:53 +02004927 if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004928 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4929 X86_ICPT_POST_EXCEPT);
4930 if (rc != X86EMUL_CONTINUE)
4931 goto done;
4932 }
4933
4934 if (ctxt->rep_prefix && (ctxt->d & String)) {
4935 /* All REP prefixes have the same first termination condition */
4936 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
Nadav Amit428e3d02015-04-28 13:06:01 +03004937 string_registers_quirk(ctxt);
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004938 ctxt->eip = ctxt->_eip;
Nadav Amit0efb0442015-03-29 16:33:03 +03004939 ctxt->eflags &= ~X86_EFLAGS_RF;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004940 goto done;
4941 }
4942 }
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004943 }
4944
Avi Kivity9dac77f2011-06-01 15:34:25 +03004945 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4946 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4947 ctxt->src.valptr, ctxt->src.bytes);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09004948 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004949 goto done;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004950 ctxt->src.orig_val64 = ctxt->src.val64;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004951 }
4952
Avi Kivity9dac77f2011-06-01 15:34:25 +03004953 if (ctxt->src2.type == OP_MEM) {
4954 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4955 &ctxt->src2.val, ctxt->src2.bytes);
Gleb Natapove35b7b92010-02-25 16:36:42 +02004956 if (rc != X86EMUL_CONTINUE)
4957 goto done;
4958 }
4959
Avi Kivity9dac77f2011-06-01 15:34:25 +03004960 if ((ctxt->d & DstMask) == ImplicitOps)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004961 goto special_insn;
4962
4963
Avi Kivity9dac77f2011-06-01 15:34:25 +03004964 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004965 /* optimisation - avoid slow emulated read if Mov */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004966 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4967 &ctxt->dst.val, ctxt->dst.bytes);
Nadav Amitc205fb72014-12-25 02:52:16 +02004968 if (rc != X86EMUL_CONTINUE) {
Paolo Bonzinid44e1212015-02-09 10:02:05 +01004969 if (!(ctxt->d & NoWrite) &&
4970 rc == X86EMUL_PROPAGATE_FAULT &&
Nadav Amitc205fb72014-12-25 02:52:16 +02004971 ctxt->exception.vector == PF_VECTOR)
4972 ctxt->exception.error_code |= PFERR_WRITE_MASK;
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004973 goto done;
Nadav Amitc205fb72014-12-25 02:52:16 +02004974 }
Avi Kivity038e51d2007-01-22 20:40:40 -08004975 }
Paolo Bonzini4ff6f8e2015-02-12 17:04:47 +01004976 /* Copy full 64-bit value for CMPXCHG8B. */
4977 ctxt->dst.orig_val64 = ctxt->dst.val64;
Avi Kivity038e51d2007-01-22 20:40:40 -08004978
Avi Kivity018a98d2007-11-27 19:30:56 +02004979special_insn:
4980
Paolo Bonzinia5845392015-04-01 18:18:53 +02004981 if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03004982 rc = emulator_check_intercept(ctxt, ctxt->intercept,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004983 X86_ICPT_POST_MEMACCESS);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004984 if (rc != X86EMUL_CONTINUE)
4985 goto done;
4986 }
4987
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03004988 if (ctxt->rep_prefix && (ctxt->d & String))
Nadav Amit0efb0442015-03-29 16:33:03 +03004989 ctxt->eflags |= X86_EFLAGS_RF;
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03004990 else
Nadav Amit0efb0442015-03-29 16:33:03 +03004991 ctxt->eflags &= ~X86_EFLAGS_RF;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004992
Avi Kivity9dac77f2011-06-01 15:34:25 +03004993 if (ctxt->execute) {
Avi Kivitye28bbd42013-01-04 16:18:48 +02004994 if (ctxt->d & Fastop) {
4995 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4996 rc = fastop(ctxt, fop);
4997 if (rc != X86EMUL_CONTINUE)
4998 goto done;
4999 goto writeback;
5000 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03005001 rc = ctxt->execute(ctxt);
Avi Kivityef65c882010-07-29 15:11:51 +03005002 if (rc != X86EMUL_CONTINUE)
5003 goto done;
5004 goto writeback;
5005 }
5006
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02005007 if (ctxt->opcode_len == 2)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005008 goto twobyte_insn;
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01005009 else if (ctxt->opcode_len == 3)
5010 goto threebyte_insn;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005011
Avi Kivity9dac77f2011-06-01 15:34:25 +03005012 switch (ctxt->b) {
Gleb Natapovb2833e32009-04-12 13:36:30 +03005013 case 0x70 ... 0x7f: /* jcc (short) */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005014 if (test_cc(ctxt->b, ctxt->eflags))
Nadav Amit234f3ce2014-09-18 22:39:38 +03005015 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02005016 break;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03005017 case 0x8d: /* lea r16/r32, m */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005018 ctxt->dst.val = ctxt->src.addr.mem.ea;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03005019 break;
Avi Kivity3d9e77d2010-08-01 12:41:59 +03005020 case 0x90 ... 0x97: /* nop / xchg reg, rax */
Avi Kivitydd856ef2012-08-27 23:46:17 +03005021 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
Nadav Amita825f5c2014-06-15 16:13:01 +03005022 ctxt->dst.type = OP_NONE;
5023 else
5024 rc = em_xchg(ctxt);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09005025 break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08005026 case 0x98: /* cbw/cwde/cdqe */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005027 switch (ctxt->op_bytes) {
5028 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5029 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5030 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08005031 }
5032 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03005033 case 0xcc: /* int3 */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09005034 rc = emulate_int(ctxt, 3);
5035 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03005036 case 0xcd: /* int n */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005037 rc = emulate_int(ctxt, ctxt->src.val);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03005038 break;
5039 case 0xce: /* into */
Nadav Amit0efb0442015-03-29 16:33:03 +03005040 if (ctxt->eflags & X86_EFLAGS_OF)
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09005041 rc = emulate_int(ctxt, 4);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03005042 break;
Nitin A Kamble1a52e052007-09-18 16:34:25 -07005043 case 0xe9: /* jmp rel */
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09005044 case 0xeb: /* jmp rel short */
Nadav Amit234f3ce2014-09-18 22:39:38 +03005045 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity9dac77f2011-06-01 15:34:25 +03005046 ctxt->dst.type = OP_NONE; /* Disable writeback. */
Nitin A Kamble1a52e052007-09-18 16:34:25 -07005047 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02005048 case 0xf4: /* hlt */
Avi Kivity6c3287f2011-04-20 15:43:05 +03005049 ctxt->ops->halt(ctxt);
Mohammed Gamal19fdfa02008-07-06 16:51:26 +03005050 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02005051 case 0xf5: /* cmc */
5052 /* complement carry flag from eflags reg */
Nadav Amit0efb0442015-03-29 16:33:03 +03005053 ctxt->eflags ^= X86_EFLAGS_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02005054 break;
5055 case 0xf8: /* clc */
Nadav Amit0efb0442015-03-29 16:33:03 +03005056 ctxt->eflags &= ~X86_EFLAGS_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02005057 break;
Mohammed Gamal8744aa92010-08-05 15:42:49 +03005058 case 0xf9: /* stc */
Nadav Amit0efb0442015-03-29 16:33:03 +03005059 ctxt->eflags |= X86_EFLAGS_CF;
Mohammed Gamal8744aa92010-08-05 15:42:49 +03005060 break;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03005061 case 0xfc: /* cld */
Nadav Amit0efb0442015-03-29 16:33:03 +03005062 ctxt->eflags &= ~X86_EFLAGS_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03005063 break;
5064 case 0xfd: /* std */
Nadav Amit0efb0442015-03-29 16:33:03 +03005065 ctxt->eflags |= X86_EFLAGS_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03005066 break;
Avi Kivity91269b82010-07-25 14:51:16 +03005067 default:
5068 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005069 }
Avi Kivity018a98d2007-11-27 19:30:56 +02005070
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005071 if (rc != X86EMUL_CONTINUE)
5072 goto done;
5073
Avi Kivity018a98d2007-11-27 19:30:56 +02005074writeback:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02005075 if (ctxt->d & SrcWrite) {
5076 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5077 rc = writeback(ctxt, &ctxt->src);
5078 if (rc != X86EMUL_CONTINUE)
5079 goto done;
5080 }
Nadav Amitee212292014-06-15 16:12:58 +03005081 if (!(ctxt->d & NoWrite)) {
5082 rc = writeback(ctxt, &ctxt->dst);
5083 if (rc != X86EMUL_CONTINUE)
5084 goto done;
5085 }
Avi Kivity018a98d2007-11-27 19:30:56 +02005086
Gleb Natapov5cd21912010-03-18 15:20:26 +02005087 /*
5088 * restore dst type in case the decoding will be reused
5089 * (happens for string instruction )
5090 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005091 ctxt->dst.type = saved_dst_type;
Gleb Natapov5cd21912010-03-18 15:20:26 +02005092
Avi Kivity9dac77f2011-06-01 15:34:25 +03005093 if ((ctxt->d & SrcMask) == SrcSI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03005094 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
Gleb Natapova682e352010-03-18 15:20:21 +02005095
Avi Kivity9dac77f2011-06-01 15:34:25 +03005096 if ((ctxt->d & DstMask) == DstDI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03005097 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
Gleb Natapovd9271122010-03-18 15:20:22 +02005098
Avi Kivity9dac77f2011-06-01 15:34:25 +03005099 if (ctxt->rep_prefix && (ctxt->d & String)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03005100 unsigned int count;
Avi Kivity9dac77f2011-06-01 15:34:25 +03005101 struct read_cache *r = &ctxt->io_read;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03005102 if ((ctxt->d & SrcMask) == SrcSI)
5103 count = ctxt->src.count;
5104 else
5105 count = ctxt->dst.count;
Paolo Bonzini01485a22014-11-19 18:25:08 +01005106 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03005107
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005108 if (!string_insn_completed(ctxt)) {
5109 /*
5110 * Re-enter guest when pio read ahead buffer is empty
5111 * or, if it is not used, after each 1024 iteration.
5112 */
Avi Kivitydd856ef2012-08-27 23:46:17 +03005113 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005114 (r->end == 0 || r->end != r->pos)) {
5115 /*
5116 * Reset read cache. Usually happens before
5117 * decode, but since instruction is restarted
5118 * we have to do it here.
5119 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005120 ctxt->mem_read.end = 0;
Avi Kivitydd856ef2012-08-27 23:46:17 +03005121 writeback_registers(ctxt);
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005122 return EMULATION_RESTART;
5123 }
5124 goto done; /* skip rip writeback */
Avi Kivity0fa6ccb2010-08-17 11:22:17 +03005125 }
Nadav Amit0efb0442015-03-29 16:33:03 +03005126 ctxt->eflags &= ~X86_EFLAGS_RF;
Gleb Natapov5cd21912010-03-18 15:20:26 +02005127 }
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005128
Avi Kivity9dac77f2011-06-01 15:34:25 +03005129 ctxt->eip = ctxt->_eip;
Avi Kivity018a98d2007-11-27 19:30:56 +02005130
5131done:
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02005132 if (rc == X86EMUL_PROPAGATE_FAULT) {
5133 WARN_ON(ctxt->exception.vector > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +02005134 ctxt->have_exception = true;
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02005135 }
Joerg Roedel775fde82011-04-04 12:39:24 +02005136 if (rc == X86EMUL_INTERCEPTED)
5137 return EMULATION_INTERCEPTED;
5138
Avi Kivitydd856ef2012-08-27 23:46:17 +03005139 if (rc == X86EMUL_CONTINUE)
5140 writeback_registers(ctxt);
5141
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005142 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005143
5144twobyte_insn:
Avi Kivity9dac77f2011-06-01 15:34:25 +03005145 switch (ctxt->b) {
Avi Kivity018a98d2007-11-27 19:30:56 +02005146 case 0x09: /* wbinvd */
Clemens Nosscfb22372011-04-21 21:16:05 +02005147 (ctxt->ops->wbinvd)(ctxt);
Sheng Yangf5f48ee2010-06-30 12:25:15 +08005148 break;
5149 case 0x08: /* invd */
Avi Kivity018a98d2007-11-27 19:30:56 +02005150 case 0x0d: /* GrpP (prefetch) */
5151 case 0x18: /* Grp16 (prefetch/nop) */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02005152 case 0x1f: /* nop */
Avi Kivity018a98d2007-11-27 19:30:56 +02005153 break;
5154 case 0x20: /* mov cr, reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005155 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
Avi Kivity018a98d2007-11-27 19:30:56 +02005156 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005157 case 0x21: /* mov from dr to reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005158 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005159 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005160 case 0x40 ... 0x4f: /* cmov */
Nadav Amit140bad82014-06-15 16:13:00 +03005161 if (test_cc(ctxt->b, ctxt->eflags))
5162 ctxt->dst.val = ctxt->src.val;
Nadav Amitb91aa142015-03-30 15:39:19 +03005163 else if (ctxt->op_bytes != 4)
Avi Kivity9dac77f2011-06-01 15:34:25 +03005164 ctxt->dst.type = OP_NONE; /* no writeback */
Avi Kivity6aa8b732006-12-10 02:21:36 -08005165 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03005166 case 0x80 ... 0x8f: /* jnz rel, etc*/
Avi Kivity9dac77f2011-06-01 15:34:25 +03005167 if (test_cc(ctxt->b, ctxt->eflags))
Nadav Amit234f3ce2014-09-18 22:39:38 +03005168 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02005169 break;
Wei Yongjunee45b582010-08-06 17:10:07 +08005170 case 0x90 ... 0x9f: /* setcc r/m8 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005171 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
Wei Yongjunee45b582010-08-06 17:10:07 +08005172 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005173 case 0xb6 ... 0xb7: /* movzx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005174 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03005175 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
Avi Kivity9dac77f2011-06-01 15:34:25 +03005176 : (u16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005177 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005178 case 0xbe ... 0xbf: /* movsx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005179 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03005180 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
Avi Kivity9dac77f2011-06-01 15:34:25 +03005181 (s16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005182 break;
Avi Kivity91269b82010-07-25 14:51:16 +03005183 default:
5184 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005185 }
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005186
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01005187threebyte_insn:
5188
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005189 if (rc != X86EMUL_CONTINUE)
5190 goto done;
5191
Avi Kivity6aa8b732006-12-10 02:21:36 -08005192 goto writeback;
5193
5194cannot_emulate:
Gleb Natapova0c0ab22011-03-28 16:57:49 +02005195 return EMULATION_FAILED;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005196}
Avi Kivitydd856ef2012-08-27 23:46:17 +03005197
5198void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5199{
5200 invalidate_registers(ctxt);
5201}
5202
5203void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5204{
5205 writeback_registers(ctxt);
5206}