blob: a46207a0583508ad662d179f2db1ede320a7645e [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/******************************************************************************
Avi Kivity56e82312009-08-12 15:04:37 +03002 * emulate.c
Avi Kivity6aa8b732006-12-10 02:21:36 -08003 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
Rusty Russelldcc07662007-07-17 23:16:56 +10009 * privileged instructions:
Avi Kivity6aa8b732006-12-10 02:21:36 -080010 *
11 * Copyright (C) 2006 Qumranet
Nicolas Kaiser9611c182010-10-06 14:23:22 +020012 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -080013 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
Avi Kivityedf88412007-12-16 11:02:48 +020023#include <linux/kvm_host.h>
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030024#include "kvm_cache_regs.h"
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/module.h>
Avi Kivity56e82312009-08-12 15:04:37 +030026#include <asm/kvm_emulate.h>
Avi Kivityb7d491e2013-01-04 16:18:49 +020027#include <linux/stringify.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028
Avi Kivity3eeb3282010-01-21 15:31:48 +020029#include "x86.h"
Gleb Natapov38ba30b2010-03-18 15:20:17 +020030#include "tss.h"
Andre Przywarae99f0502009-06-17 15:50:33 +020031
Avi Kivity6aa8b732006-12-10 02:21:36 -080032/*
Avi Kivitya9945542011-09-13 10:45:41 +030033 * Operand types
34 */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030035#define OpNone 0ull
36#define OpImplicit 1ull /* No generic decode */
37#define OpReg 2ull /* Register */
38#define OpMem 3ull /* Memory */
39#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40#define OpDI 5ull /* ES:DI/EDI/RDI */
41#define OpMem64 6ull /* Memory, 64-bit */
42#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43#define OpDX 8ull /* DX register */
Avi Kivity4dd6a572011-09-13 10:45:43 +030044#define OpCL 9ull /* CL register (for shifts) */
45#define OpImmByte 10ull /* 8-bit sign extended immediate */
46#define OpOne 11ull /* Implied 1 */
Nadav Amit5e2c6882012-12-06 21:55:10 -020047#define OpImm 12ull /* Sign extended up to 32-bit immediate */
Avi Kivity0fe59122011-09-13 10:45:47 +030048#define OpMem16 13ull /* Memory operand (16-bit). */
49#define OpMem32 14ull /* Memory operand (32-bit). */
50#define OpImmU 15ull /* Immediate operand, zero extended */
51#define OpSI 16ull /* SI/ESI/RSI */
52#define OpImmFAddr 17ull /* Immediate far address */
53#define OpMemFAddr 18ull /* Far address in memory */
54#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
Avi Kivityc191a7a2011-09-13 10:45:49 +030055#define OpES 20ull /* ES */
56#define OpCS 21ull /* CS */
57#define OpSS 22ull /* SS */
58#define OpDS 23ull /* DS */
59#define OpFS 24ull /* FS */
60#define OpGS 25ull /* GS */
Avi Kivity28867ce2012-01-16 15:08:44 +020061#define OpMem8 26ull /* 8-bit zero extended memory operand */
Nadav Amit5e2c6882012-12-06 21:55:10 -020062#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
Paolo Bonzini7fa57952013-05-09 11:32:50 +020063#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
Avi Kivity820207c2013-02-09 11:31:45 +020064#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
Avi Kivitya9945542011-09-13 10:45:41 +030066
Avi Kivity0fe59122011-09-13 10:45:47 +030067#define OpBits 5 /* Width of operand field */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030068#define OpMask ((1ull << OpBits) - 1)
Avi Kivitya9945542011-09-13 10:45:41 +030069
70/*
Avi Kivity6aa8b732006-12-10 02:21:36 -080071 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
76 * not be handled.
77 */
78
79/* Operand sizes: 8-bit operands or specified/overridden size. */
Avi Kivityab85b12b2010-07-29 15:11:49 +030080#define ByteOp (1<<0) /* 8-bit operands. */
Avi Kivity6aa8b732006-12-10 02:21:36 -080081/* Destination operand type. */
Avi Kivitya9945542011-09-13 10:45:41 +030082#define DstShift 1
83#define ImplicitOps (OpImplicit << DstShift)
84#define DstReg (OpReg << DstShift)
85#define DstMem (OpMem << DstShift)
86#define DstAcc (OpAcc << DstShift)
87#define DstDI (OpDI << DstShift)
88#define DstMem64 (OpMem64 << DstShift)
89#define DstImmUByte (OpImmUByte << DstShift)
90#define DstDX (OpDX << DstShift)
Avi Kivity820207c2013-02-09 11:31:45 +020091#define DstAccLo (OpAccLo << DstShift)
Avi Kivitya9945542011-09-13 10:45:41 +030092#define DstMask (OpMask << DstShift)
Avi Kivity6aa8b732006-12-10 02:21:36 -080093/* Source operand type. */
Avi Kivity0fe59122011-09-13 10:45:47 +030094#define SrcShift 6
95#define SrcNone (OpNone << SrcShift)
96#define SrcReg (OpReg << SrcShift)
97#define SrcMem (OpMem << SrcShift)
98#define SrcMem16 (OpMem16 << SrcShift)
99#define SrcMem32 (OpMem32 << SrcShift)
100#define SrcImm (OpImm << SrcShift)
101#define SrcImmByte (OpImmByte << SrcShift)
102#define SrcOne (OpOne << SrcShift)
103#define SrcImmUByte (OpImmUByte << SrcShift)
104#define SrcImmU (OpImmU << SrcShift)
105#define SrcSI (OpSI << SrcShift)
Paolo Bonzini7fa57952013-05-09 11:32:50 +0200106#define SrcXLat (OpXLat << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300107#define SrcImmFAddr (OpImmFAddr << SrcShift)
108#define SrcMemFAddr (OpMemFAddr << SrcShift)
109#define SrcAcc (OpAcc << SrcShift)
110#define SrcImmU16 (OpImmU16 << SrcShift)
Nadav Amit5e2c6882012-12-06 21:55:10 -0200111#define SrcImm64 (OpImm64 << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300112#define SrcDX (OpDX << SrcShift)
Avi Kivity28867ce2012-01-16 15:08:44 +0200113#define SrcMem8 (OpMem8 << SrcShift)
Avi Kivity820207c2013-02-09 11:31:45 +0200114#define SrcAccHi (OpAccHi << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300115#define SrcMask (OpMask << SrcShift)
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300116#define BitOp (1<<11)
117#define MemAbs (1<<12) /* Memory operand is absolute displacement */
118#define String (1<<13) /* String instruction (rep capable) */
119#define Stack (1<<14) /* Stack instruction (push/pop) */
120#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
Gleb Natapov045a2822012-12-20 16:57:43 +0200125#define Escape (5<<15) /* Escape to coprocessor instruction */
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300126#define Sse (1<<18) /* SSE Vector instruction */
Avi Kivity20c29ff2011-09-13 10:45:44 +0300127/* Generic ModRM decode. */
128#define ModRM (1<<19)
129/* Destination is only written; never read. */
130#define Mov (1<<20)
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300131/* Misc flags */
Joerg Roedel8ea7d6a2011-04-04 12:39:26 +0200132#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
Borislav Petkovb51e9742013-09-22 16:44:52 +0200133#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
Avi Kivity5a506b12010-08-01 15:10:29 +0300134#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
Avi Kivity7f9b4b72010-08-01 14:46:54 +0300135#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
Avi Kivity047a4812010-07-26 14:37:47 +0300136#define Undefined (1<<25) /* No Such Instruction */
Gleb Natapovd380a5e2010-02-10 14:21:36 +0200137#define Lock (1<<26) /* lock prefix is allowed for the instruction */
Gleb Natapove92805a2010-02-10 14:21:35 +0200138#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300139#define No64 (1<<28)
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +0800140#define PageTable (1 << 29) /* instruction used to write page table */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300141#define NotImpl (1 << 30) /* instruction is not implemented */
Guillaume Thouvenin0dc8d102008-12-04 14:26:42 +0100142/* Source 2 operand type */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300143#define Src2Shift (31)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300144#define Src2None (OpNone << Src2Shift)
Avi Kivityab2c5ce2013-02-09 11:31:46 +0200145#define Src2Mem (OpMem << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300146#define Src2CL (OpCL << Src2Shift)
147#define Src2ImmByte (OpImmByte << Src2Shift)
148#define Src2One (OpOne << Src2Shift)
149#define Src2Imm (OpImm << Src2Shift)
Avi Kivityc191a7a2011-09-13 10:45:49 +0300150#define Src2ES (OpES << Src2Shift)
151#define Src2CS (OpCS << Src2Shift)
152#define Src2SS (OpSS << Src2Shift)
153#define Src2DS (OpDS << Src2Shift)
154#define Src2FS (OpFS << Src2Shift)
155#define Src2GS (OpGS << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300156#define Src2Mask (OpMask << Src2Shift)
Avi Kivitycbe2c9d2012-04-09 18:40:02 +0300157#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
Avi Kivity1c11b372012-04-09 18:39:59 +0300158#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
Avi Kivitye28bbd42013-01-04 16:18:48 +0200161#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
Avi Kivityb6744dc2013-01-04 16:18:50 +0200162#define NoWrite ((u64)1 << 45) /* No writeback */
Avi Kivityfb32b1e2013-02-09 11:31:44 +0200163#define SrcWrite ((u64)1 << 46) /* Write back src operand */
Nadav Amit9b88ae92014-05-25 23:05:21 +0300164#define NoMod ((u64)1 << 47) /* Mod field is ignored */
Paolo Bonzinid40a6892014-03-27 11:58:02 +0100165#define Intercept ((u64)1 << 48) /* Has valid intercept field */
166#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
Nadav Amit10e38fc2014-06-18 17:19:34 +0300167#define NoBigReal ((u64)1 << 50) /* No big real mode */
Nadav Amit68efa762014-06-18 17:19:35 +0300168#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800169
Avi Kivity820207c2013-02-09 11:31:45 +0200170#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800171
Avi Kivityd0e53322010-07-29 15:11:54 +0300172#define X2(x...) x, x
173#define X3(x...) X2(x), x
174#define X4(x...) X2(x), X2(x)
175#define X5(x...) X4(x), x
176#define X6(x...) X4(x), X2(x)
177#define X7(x...) X4(x), X3(x)
178#define X8(x...) X4(x), X4(x)
179#define X16(x...) X8(x), X8(x)
Avi Kivity83babbc2010-07-26 14:37:39 +0300180
Avi Kivitye28bbd42013-01-04 16:18:48 +0200181#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
182#define FASTOP_SIZE 8
183
184/*
185 * fastop functions have a special calling convention:
186 *
Avi Kivity017da7b2013-02-09 11:31:47 +0200187 * dst: rax (in/out)
188 * src: rdx (in/out)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200189 * src2: rcx (in)
190 * flags: rflags (in/out)
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200191 * ex: rsi (in:fastop pointer, out:zero if exception)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200192 *
193 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
194 * different operand sizes can be reached by calculation, rather than a jump
195 * table (which would be bigger than the code).
196 *
197 * fastop functions are declared as taking a never-defined fastop parameter,
198 * so they can't be called from C directly.
199 */
200
201struct fastop;
202
Avi Kivityd65b1de2010-07-29 15:11:35 +0300203struct opcode {
Avi Kivityb1ea50b2011-09-13 10:45:42 +0300204 u64 flags : 56;
205 u64 intercept : 8;
Avi Kivity120df892010-07-29 15:11:39 +0300206 union {
Avi Kivityef65c882010-07-29 15:11:51 +0300207 int (*execute)(struct x86_emulate_ctxt *ctxt);
Mathias Krausefd0a0d82012-08-30 01:30:15 +0200208 const struct opcode *group;
209 const struct group_dual *gdual;
210 const struct gprefix *gprefix;
Gleb Natapov045a2822012-12-20 16:57:43 +0200211 const struct escape *esc;
Avi Kivitye28bbd42013-01-04 16:18:48 +0200212 void (*fastop)(struct fastop *fake);
Avi Kivity120df892010-07-29 15:11:39 +0300213 } u;
Joerg Roedeld09beab2011-04-04 12:39:25 +0200214 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
Avi Kivity120df892010-07-29 15:11:39 +0300215};
216
217struct group_dual {
218 struct opcode mod012[8];
219 struct opcode mod3[8];
Avi Kivityd65b1de2010-07-29 15:11:35 +0300220};
221
Avi Kivity0d7cdee2011-03-29 11:34:38 +0200222struct gprefix {
223 struct opcode pfx_no;
224 struct opcode pfx_66;
225 struct opcode pfx_f2;
226 struct opcode pfx_f3;
227};
228
Gleb Natapov045a2822012-12-20 16:57:43 +0200229struct escape {
230 struct opcode op[8];
231 struct opcode high[64];
232};
233
Avi Kivity6aa8b732006-12-10 02:21:36 -0800234/* EFLAGS bit definitions. */
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200235#define EFLG_ID (1<<21)
236#define EFLG_VIP (1<<20)
237#define EFLG_VIF (1<<19)
238#define EFLG_AC (1<<18)
Andre Przywarab1d86142009-06-17 15:50:32 +0200239#define EFLG_VM (1<<17)
240#define EFLG_RF (1<<16)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200241#define EFLG_IOPL (3<<12)
242#define EFLG_NT (1<<14)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800243#define EFLG_OF (1<<11)
244#define EFLG_DF (1<<10)
Andre Przywarab1d86142009-06-17 15:50:32 +0200245#define EFLG_IF (1<<9)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200246#define EFLG_TF (1<<8)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800247#define EFLG_SF (1<<7)
248#define EFLG_ZF (1<<6)
249#define EFLG_AF (1<<4)
250#define EFLG_PF (1<<2)
251#define EFLG_CF (1<<0)
252
Mohammed Gamal62bd4302010-07-28 12:38:40 +0300253#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
254#define EFLG_RESERVED_ONE_MASK 2
255
Avi Kivitydd856ef2012-08-27 23:46:17 +0300256static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
257{
258 if (!(ctxt->regs_valid & (1 << nr))) {
259 ctxt->regs_valid |= 1 << nr;
260 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
261 }
262 return ctxt->_regs[nr];
263}
264
265static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
266{
267 ctxt->regs_valid |= 1 << nr;
268 ctxt->regs_dirty |= 1 << nr;
269 return &ctxt->_regs[nr];
270}
271
272static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
273{
274 reg_read(ctxt, nr);
275 return reg_write(ctxt, nr);
276}
277
278static void writeback_registers(struct x86_emulate_ctxt *ctxt)
279{
280 unsigned reg;
281
282 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
283 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
284}
285
286static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
287{
288 ctxt->regs_dirty = 0;
289 ctxt->regs_valid = 0;
290}
291
Avi Kivity6aa8b732006-12-10 02:21:36 -0800292/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800293 * These EFLAGS bits are restored from saved value during emulation, and
294 * any changes are written back to the saved value after emulation.
295 */
296#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
297
Avi Kivitydda96d82008-11-26 15:14:10 +0200298#ifdef CONFIG_X86_64
299#define ON64(x) x
300#else
301#define ON64(x)
302#endif
303
Avi Kivity4d758342013-01-19 19:51:55 +0200304static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
305
Avi Kivityb7d491e2013-01-04 16:18:49 +0200306#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
307#define FOP_RET "ret \n\t"
308
309#define FOP_START(op) \
310 extern void em_##op(struct fastop *fake); \
311 asm(".pushsection .text, \"ax\" \n\t" \
312 ".global em_" #op " \n\t" \
313 FOP_ALIGN \
314 "em_" #op ": \n\t"
315
316#define FOP_END \
317 ".popsection")
318
Avi Kivity0bdea062013-01-19 19:51:50 +0200319#define FOPNOP() FOP_ALIGN FOP_RET
320
Avi Kivityb7d491e2013-01-04 16:18:49 +0200321#define FOP1E(op, dst) \
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200322 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
323
324#define FOP1EEX(op, dst) \
325 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
Avi Kivityb7d491e2013-01-04 16:18:49 +0200326
327#define FASTOP1(op) \
328 FOP_START(op) \
329 FOP1E(op##b, al) \
330 FOP1E(op##w, ax) \
331 FOP1E(op##l, eax) \
332 ON64(FOP1E(op##q, rax)) \
333 FOP_END
334
Avi Kivityb9fa4092013-02-09 11:31:48 +0200335/* 1-operand, using src2 (for MUL/DIV r/m) */
336#define FASTOP1SRC2(op, name) \
337 FOP_START(name) \
338 FOP1E(op, cl) \
339 FOP1E(op, cx) \
340 FOP1E(op, ecx) \
341 ON64(FOP1E(op, rcx)) \
342 FOP_END
343
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200344/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
345#define FASTOP1SRC2EX(op, name) \
346 FOP_START(name) \
347 FOP1EEX(op, cl) \
348 FOP1EEX(op, cx) \
349 FOP1EEX(op, ecx) \
350 ON64(FOP1EEX(op, rcx)) \
351 FOP_END
352
Avi Kivityf7857f32013-01-04 16:18:53 +0200353#define FOP2E(op, dst, src) \
354 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
355
356#define FASTOP2(op) \
357 FOP_START(op) \
Avi Kivity017da7b2013-02-09 11:31:47 +0200358 FOP2E(op##b, al, dl) \
359 FOP2E(op##w, ax, dx) \
360 FOP2E(op##l, eax, edx) \
361 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivityf7857f32013-01-04 16:18:53 +0200362 FOP_END
363
Avi Kivity11c363b2013-01-19 19:51:54 +0200364/* 2 operand, word only */
365#define FASTOP2W(op) \
366 FOP_START(op) \
367 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200368 FOP2E(op##w, ax, dx) \
369 FOP2E(op##l, eax, edx) \
370 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivity11c363b2013-01-19 19:51:54 +0200371 FOP_END
372
Avi Kivity007a3b52013-01-19 19:51:51 +0200373/* 2 operand, src is CL */
374#define FASTOP2CL(op) \
375 FOP_START(op) \
376 FOP2E(op##b, al, cl) \
377 FOP2E(op##w, ax, cl) \
378 FOP2E(op##l, eax, cl) \
379 ON64(FOP2E(op##q, rax, cl)) \
380 FOP_END
381
Avi Kivity0bdea062013-01-19 19:51:50 +0200382#define FOP3E(op, dst, src, src2) \
383 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
384
385/* 3-operand, word-only, src2=cl */
386#define FASTOP3WCL(op) \
387 FOP_START(op) \
388 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200389 FOP3E(op##w, ax, dx, cl) \
390 FOP3E(op##l, eax, edx, cl) \
391 ON64(FOP3E(op##q, rax, rdx, cl)) \
Avi Kivity0bdea062013-01-19 19:51:50 +0200392 FOP_END
393
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200394/* Special case for SETcc - 1 instruction per cc */
395#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
396
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200397asm(".global kvm_fastop_exception \n"
398 "kvm_fastop_exception: xor %esi, %esi; ret");
399
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200400FOP_START(setcc)
401FOP_SETCC(seto)
402FOP_SETCC(setno)
403FOP_SETCC(setc)
404FOP_SETCC(setnc)
405FOP_SETCC(setz)
406FOP_SETCC(setnz)
407FOP_SETCC(setbe)
408FOP_SETCC(setnbe)
409FOP_SETCC(sets)
410FOP_SETCC(setns)
411FOP_SETCC(setp)
412FOP_SETCC(setnp)
413FOP_SETCC(setl)
414FOP_SETCC(setnl)
415FOP_SETCC(setle)
416FOP_SETCC(setnle)
417FOP_END;
418
Paolo Bonzini326f5782013-05-09 11:32:51 +0200419FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
420FOP_END;
421
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200422static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
423 enum x86_intercept intercept,
424 enum x86_intercept_stage stage)
425{
426 struct x86_instruction_info info = {
427 .intercept = intercept,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300428 .rep_prefix = ctxt->rep_prefix,
429 .modrm_mod = ctxt->modrm_mod,
430 .modrm_reg = ctxt->modrm_reg,
431 .modrm_rm = ctxt->modrm_rm,
432 .src_val = ctxt->src.val64,
Jan Kiszka6cbc5f52014-06-30 12:52:55 +0200433 .dst_val = ctxt->dst.val64,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300434 .src_bytes = ctxt->src.bytes,
435 .dst_bytes = ctxt->dst.bytes,
436 .ad_bytes = ctxt->ad_bytes,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200437 .next_rip = ctxt->eip,
438 };
439
Avi Kivity29535382011-04-20 13:37:53 +0300440 return ctxt->ops->intercept(ctxt, &info, stage);
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200441}
442
Avi Kivityf47cfa32012-06-07 17:49:24 +0300443static void assign_masked(ulong *dest, ulong src, ulong mask)
444{
445 *dest = (*dest & ~mask) | (src & mask);
446}
447
Avi Kivity9dac77f2011-06-01 15:34:25 +0300448static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800449{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300450 return (1UL << (ctxt->ad_bytes << 3)) - 1;
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800451}
452
Avi Kivityf47cfa32012-06-07 17:49:24 +0300453static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
454{
455 u16 sel;
456 struct desc_struct ss;
457
458 if (ctxt->mode == X86EMUL_MODE_PROT64)
459 return ~0UL;
460 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
461 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
462}
463
Avi Kivity612e89f2012-06-12 20:03:23 +0300464static int stack_size(struct x86_emulate_ctxt *ctxt)
465{
466 return (__fls(stack_mask(ctxt)) + 1) >> 3;
467}
468
Avi Kivity6aa8b732006-12-10 02:21:36 -0800469/* Access/update address held in a register, based on addressing mode. */
Harvey Harrisone4706772008-02-19 07:40:38 -0800470static inline unsigned long
Avi Kivity9dac77f2011-06-01 15:34:25 +0300471address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800472{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300473 if (ctxt->ad_bytes == sizeof(unsigned long))
Harvey Harrisone4706772008-02-19 07:40:38 -0800474 return reg;
475 else
Avi Kivity9dac77f2011-06-01 15:34:25 +0300476 return reg & ad_mask(ctxt);
Harvey Harrisone4706772008-02-19 07:40:38 -0800477}
478
479static inline unsigned long
Avi Kivity9dac77f2011-06-01 15:34:25 +0300480register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800481{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300482 return address_mask(ctxt, reg);
Harvey Harrisone4706772008-02-19 07:40:38 -0800483}
484
Avi Kivity5ad105e2012-08-19 14:34:31 +0300485static void masked_increment(ulong *reg, ulong mask, int inc)
486{
487 assign_masked(reg, *reg + inc, mask);
488}
489
Harvey Harrison7a9572752008-02-19 07:40:41 -0800490static inline void
Avi Kivity9dac77f2011-06-01 15:34:25 +0300491register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
Harvey Harrison7a9572752008-02-19 07:40:41 -0800492{
Avi Kivity5ad105e2012-08-19 14:34:31 +0300493 ulong mask;
494
Avi Kivity9dac77f2011-06-01 15:34:25 +0300495 if (ctxt->ad_bytes == sizeof(unsigned long))
Avi Kivity5ad105e2012-08-19 14:34:31 +0300496 mask = ~0UL;
Harvey Harrison7a9572752008-02-19 07:40:41 -0800497 else
Avi Kivity5ad105e2012-08-19 14:34:31 +0300498 mask = ad_mask(ctxt);
499 masked_increment(reg, mask, inc);
500}
501
502static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
503{
Avi Kivitydd856ef2012-08-27 23:46:17 +0300504 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
Harvey Harrison7a9572752008-02-19 07:40:41 -0800505}
Avi Kivity6aa8b732006-12-10 02:21:36 -0800506
Avi Kivity9dac77f2011-06-01 15:34:25 +0300507static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
Harvey Harrison7a9572752008-02-19 07:40:41 -0800508{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300509 register_address_increment(ctxt, &ctxt->_eip, rel);
Harvey Harrison7a9572752008-02-19 07:40:41 -0800510}
Nitin A Kamble098c9372007-08-19 11:00:36 +0300511
Avi Kivity56697682011-04-03 14:08:51 +0300512static u32 desc_limit_scaled(struct desc_struct *desc)
513{
514 u32 limit = get_desc_limit(desc);
515
516 return desc->g ? (limit << 12) | 0xfff : limit;
517}
518
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900519static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300520{
521 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
522 return 0;
523
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900524 return ctxt->ops->get_cached_segment_base(ctxt, seg);
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300525}
526
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200527static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
528 u32 error, bool valid)
Gleb Natapov54b84862010-04-28 19:15:44 +0300529{
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +0200530 WARN_ON(vec > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +0200531 ctxt->exception.vector = vec;
532 ctxt->exception.error_code = error;
533 ctxt->exception.error_code_valid = valid;
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200534 return X86EMUL_PROPAGATE_FAULT;
Gleb Natapov54b84862010-04-28 19:15:44 +0300535}
536
Joerg Roedel3b88e412011-04-04 12:39:29 +0200537static int emulate_db(struct x86_emulate_ctxt *ctxt)
538{
539 return emulate_exception(ctxt, DB_VECTOR, 0, false);
540}
541
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200542static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300543{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200544 return emulate_exception(ctxt, GP_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300545}
546
Avi Kivity618ff152011-04-03 12:32:09 +0300547static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
548{
549 return emulate_exception(ctxt, SS_VECTOR, err, true);
550}
551
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200552static int emulate_ud(struct x86_emulate_ctxt *ctxt)
Gleb Natapov54b84862010-04-28 19:15:44 +0300553{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200554 return emulate_exception(ctxt, UD_VECTOR, 0, false);
Gleb Natapov54b84862010-04-28 19:15:44 +0300555}
556
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200557static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300558{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200559 return emulate_exception(ctxt, TS_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300560}
561
Avi Kivity34d1f492010-08-26 11:59:01 +0300562static int emulate_de(struct x86_emulate_ctxt *ctxt)
563{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200564 return emulate_exception(ctxt, DE_VECTOR, 0, false);
Avi Kivity34d1f492010-08-26 11:59:01 +0300565}
566
Avi Kivity12537912011-03-29 11:41:27 +0200567static int emulate_nm(struct x86_emulate_ctxt *ctxt)
568{
569 return emulate_exception(ctxt, NM_VECTOR, 0, false);
570}
571
Avi Kivity1aa36612011-04-27 13:20:30 +0300572static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
573{
574 u16 selector;
575 struct desc_struct desc;
576
577 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
578 return selector;
579}
580
581static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
582 unsigned seg)
583{
584 u16 dummy;
585 u32 base3;
586 struct desc_struct desc;
587
588 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
589 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
590}
591
Avi Kivity1c11b372012-04-09 18:39:59 +0300592/*
593 * x86 defines three classes of vector instructions: explicitly
594 * aligned, explicitly unaligned, and the rest, which change behaviour
595 * depending on whether they're AVX encoded or not.
596 *
597 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
598 * subject to the same check.
599 */
600static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
601{
602 if (likely(size < 16))
603 return false;
604
605 if (ctxt->d & Aligned)
606 return true;
607 else if (ctxt->d & Unaligned)
608 return false;
609 else if (ctxt->d & Avx)
610 return false;
611 else
612 return true;
613}
614
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400615static int __linearize(struct x86_emulate_ctxt *ctxt,
Avi Kivity52fd8b42011-04-03 12:33:12 +0300616 struct segmented_address addr,
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400617 unsigned size, bool write, bool fetch,
Avi Kivity52fd8b42011-04-03 12:33:12 +0300618 ulong *linear)
619{
Avi Kivity618ff152011-04-03 12:32:09 +0300620 struct desc_struct desc;
621 bool usable;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300622 ulong la;
Avi Kivity618ff152011-04-03 12:32:09 +0300623 u32 lim;
Avi Kivity1aa36612011-04-27 13:20:30 +0300624 u16 sel;
Gleb Natapov3a78a4f2012-12-20 16:57:42 +0200625 unsigned cpl;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300626
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900627 la = seg_base(ctxt, addr.seg) + addr.ea;
Avi Kivity618ff152011-04-03 12:32:09 +0300628 switch (ctxt->mode) {
Avi Kivity618ff152011-04-03 12:32:09 +0300629 case X86EMUL_MODE_PROT64:
630 if (((signed long)la << 16) >> 16 != la)
631 return emulate_gp(ctxt, 0);
632 break;
633 default:
Avi Kivity1aa36612011-04-27 13:20:30 +0300634 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
635 addr.seg);
Avi Kivity618ff152011-04-03 12:32:09 +0300636 if (!usable)
637 goto bad;
Gleb Natapov58b78252012-12-11 15:14:12 +0200638 /* code segment in protected mode or read-only data segment */
639 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
640 || !(desc.type & 2)) && write)
Avi Kivity618ff152011-04-03 12:32:09 +0300641 goto bad;
642 /* unreadable code segment */
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400643 if (!fetch && (desc.type & 8) && !(desc.type & 2))
Avi Kivity618ff152011-04-03 12:32:09 +0300644 goto bad;
645 lim = desc_limit_scaled(&desc);
Nadav Amit10e38fc2014-06-18 17:19:34 +0300646 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
647 (ctxt->d & NoBigReal)) {
648 /* la is between zero and 0xffff */
649 if (la > 0xffff || (u32)(la + size - 1) > 0xffff)
650 goto bad;
651 } else if ((desc.type & 8) || !(desc.type & 4)) {
Avi Kivity618ff152011-04-03 12:32:09 +0300652 /* expand-up segment */
653 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
654 goto bad;
655 } else {
Guo Chaofc058682012-06-28 15:19:51 +0800656 /* expand-down segment */
Avi Kivity618ff152011-04-03 12:32:09 +0300657 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
658 goto bad;
659 lim = desc.d ? 0xffffffff : 0xffff;
660 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
661 goto bad;
662 }
Avi Kivity717746e2011-04-20 13:37:53 +0300663 cpl = ctxt->ops->cpl(ctxt);
Avi Kivity618ff152011-04-03 12:32:09 +0300664 if (!(desc.type & 8)) {
665 /* data segment */
666 if (cpl > desc.dpl)
667 goto bad;
668 } else if ((desc.type & 8) && !(desc.type & 4)) {
669 /* nonconforming code segment */
670 if (cpl != desc.dpl)
671 goto bad;
672 } else if ((desc.type & 8) && (desc.type & 4)) {
673 /* conforming code segment */
674 if (cpl < desc.dpl)
675 goto bad;
676 }
677 break;
678 }
Avi Kivity9dac77f2011-06-01 15:34:25 +0300679 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
Avi Kivity52fd8b42011-04-03 12:33:12 +0300680 la &= (u32)-1;
Avi Kivity1c11b372012-04-09 18:39:59 +0300681 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
682 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300683 *linear = la;
684 return X86EMUL_CONTINUE;
Avi Kivity618ff152011-04-03 12:32:09 +0300685bad:
686 if (addr.seg == VCPU_SREG_SS)
Avi Kivity0afbe2f2012-08-21 17:07:06 +0300687 return emulate_ss(ctxt, sel);
Avi Kivity618ff152011-04-03 12:32:09 +0300688 else
Avi Kivity0afbe2f2012-08-21 17:07:06 +0300689 return emulate_gp(ctxt, sel);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300690}
691
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400692static int linearize(struct x86_emulate_ctxt *ctxt,
693 struct segmented_address addr,
694 unsigned size, bool write,
695 ulong *linear)
696{
697 return __linearize(ctxt, addr, size, write, false, linear);
698}
699
700
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200701static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
702 struct segmented_address addr,
703 void *data,
704 unsigned size)
705{
Avi Kivity9fa088f2011-03-31 18:54:30 +0200706 int rc;
707 ulong linear;
708
Avi Kivity83b87952011-04-03 11:31:19 +0300709 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +0200710 if (rc != X86EMUL_CONTINUE)
711 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +0300712 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200713}
714
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900715/*
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200716 * Prefetch the remaining bytes of the instruction without crossing page
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900717 * boundary if they are not in fetch_cache yet.
718 */
Paolo Bonzini9506d572014-05-06 13:05:25 +0200719static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
Avi Kivity62266862007-11-20 13:15:52 +0200720{
Avi Kivity62266862007-11-20 13:15:52 +0200721 int rc;
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200722 unsigned size;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200723 unsigned long linear;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200724 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200725 struct segmented_address addr = { .seg = VCPU_SREG_CS,
Paolo Bonzini17052f12014-05-06 16:33:01 +0200726 .ea = ctxt->eip + cur_size };
727
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200728 size = 15UL ^ cur_size;
729 rc = __linearize(ctxt, addr, size, false, true, &linear);
730 if (unlikely(rc != X86EMUL_CONTINUE))
731 return rc;
732
733 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
Paolo Bonzini5cfc7e02014-05-06 13:05:25 +0200734
735 /*
736 * One instruction can only straddle two pages,
737 * and one has been loaded at the beginning of
738 * x86_decode_insn. So, if not enough bytes
739 * still, we must have hit the 15-byte boundary.
740 */
741 if (unlikely(size < op_size))
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200742 return X86EMUL_UNHANDLEABLE;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200743 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200744 size, &ctxt->exception);
745 if (unlikely(rc != X86EMUL_CONTINUE))
746 return rc;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200747 ctxt->fetch.end += size;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +0900748 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200749}
750
Paolo Bonzini9506d572014-05-06 13:05:25 +0200751static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
752 unsigned size)
Avi Kivity62266862007-11-20 13:15:52 +0200753{
Paolo Bonzini17052f12014-05-06 16:33:01 +0200754 if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size))
Paolo Bonzini9506d572014-05-06 13:05:25 +0200755 return __do_insn_fetch_bytes(ctxt, size);
756 else
757 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200758}
759
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900760/* Fetch next part of the instruction being emulated. */
Takuya Yoshikawae85a1082011-07-30 18:01:26 +0900761#define insn_fetch(_type, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200762({ _type _x; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200763 \
764 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900765 if (rc != X86EMUL_CONTINUE) \
766 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200767 ctxt->_eip += sizeof(_type); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200768 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
769 ctxt->fetch.ptr += sizeof(_type); \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200770 _x; \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900771})
772
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900773#define insn_fetch_arr(_arr, _size, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200774({ \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200775 rc = do_insn_fetch_bytes(_ctxt, _size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900776 if (rc != X86EMUL_CONTINUE) \
777 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200778 ctxt->_eip += (_size); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200779 memcpy(_arr, ctxt->fetch.ptr, _size); \
780 ctxt->fetch.ptr += (_size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900781})
782
Rusty Russell1e3c5cb2007-07-17 23:16:11 +1000783/*
784 * Given the 'reg' portion of a ModRM byte, and a register block, return a
785 * pointer into the block that addresses the relevant register.
786 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
787 */
Avi Kivitydd856ef2012-08-27 23:46:17 +0300788static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200789 int byteop)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800790{
791 void *p;
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200792 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800793
Avi Kivity6aa8b732006-12-10 02:21:36 -0800794 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
Avi Kivitydd856ef2012-08-27 23:46:17 +0300795 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
796 else
797 p = reg_rmw(ctxt, modrm_reg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800798 return p;
799}
800
801static int read_descriptor(struct x86_emulate_ctxt *ctxt,
Avi Kivity90de84f2010-11-17 15:28:21 +0200802 struct segmented_address addr,
Avi Kivity6aa8b732006-12-10 02:21:36 -0800803 u16 *size, unsigned long *address, int op_bytes)
804{
805 int rc;
806
807 if (op_bytes == 2)
808 op_bytes = 3;
809 *address = 0;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200810 rc = segmented_read_std(ctxt, addr, size, 2);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +0900811 if (rc != X86EMUL_CONTINUE)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800812 return rc;
Avi Kivity30b31ab2010-11-17 15:28:22 +0200813 addr.ea += 2;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200814 rc = segmented_read_std(ctxt, addr, address, op_bytes);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800815 return rc;
816}
817
Avi Kivity34b77652013-01-19 19:51:56 +0200818FASTOP2(add);
819FASTOP2(or);
820FASTOP2(adc);
821FASTOP2(sbb);
822FASTOP2(and);
823FASTOP2(sub);
824FASTOP2(xor);
825FASTOP2(cmp);
826FASTOP2(test);
827
Avi Kivityb9fa4092013-02-09 11:31:48 +0200828FASTOP1SRC2(mul, mul_ex);
829FASTOP1SRC2(imul, imul_ex);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200830FASTOP1SRC2EX(div, div_ex);
831FASTOP1SRC2EX(idiv, idiv_ex);
Avi Kivityb9fa4092013-02-09 11:31:48 +0200832
Avi Kivity34b77652013-01-19 19:51:56 +0200833FASTOP3WCL(shld);
834FASTOP3WCL(shrd);
835
836FASTOP2W(imul);
837
838FASTOP1(not);
839FASTOP1(neg);
840FASTOP1(inc);
841FASTOP1(dec);
842
843FASTOP2CL(rol);
844FASTOP2CL(ror);
845FASTOP2CL(rcl);
846FASTOP2CL(rcr);
847FASTOP2CL(shl);
848FASTOP2CL(shr);
849FASTOP2CL(sar);
850
851FASTOP2W(bsf);
852FASTOP2W(bsr);
853FASTOP2W(bt);
854FASTOP2W(bts);
855FASTOP2W(btr);
856FASTOP2W(btc);
857
Avi Kivitye47a5f52013-02-09 11:31:51 +0200858FASTOP2(xadd);
859
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200860static u8 test_cc(unsigned int condition, unsigned long flags)
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300861{
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200862 u8 rc;
863 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300864
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200865 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivity3f0c3d02013-01-26 23:56:04 +0200866 asm("push %[flags]; popf; call *%[fastop]"
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200867 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
868 return rc;
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300869}
870
Avi Kivity91ff3cb2010-08-01 12:53:09 +0300871static void fetch_register_operand(struct operand *op)
872{
873 switch (op->bytes) {
874 case 1:
875 op->val = *(u8 *)op->addr.reg;
876 break;
877 case 2:
878 op->val = *(u16 *)op->addr.reg;
879 break;
880 case 4:
881 op->val = *(u32 *)op->addr.reg;
882 break;
883 case 8:
884 op->val = *(u64 *)op->addr.reg;
885 break;
886 }
887}
888
Avi Kivity12537912011-03-29 11:41:27 +0200889static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
890{
891 ctxt->ops->get_fpu(ctxt);
892 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200893 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
894 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
895 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
896 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
897 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
898 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
899 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
900 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +0200901#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +0200902 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
903 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
904 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
905 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
906 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
907 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
908 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
909 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +0200910#endif
911 default: BUG();
912 }
913 ctxt->ops->put_fpu(ctxt);
914}
915
916static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
917 int reg)
918{
919 ctxt->ops->get_fpu(ctxt);
920 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200921 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
922 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
923 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
924 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
925 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
926 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
927 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
928 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +0200929#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +0200930 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
931 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
932 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
933 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
934 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
935 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
936 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
937 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +0200938#endif
939 default: BUG();
940 }
941 ctxt->ops->put_fpu(ctxt);
942}
943
Avi Kivitycbe2c9d2012-04-09 18:40:02 +0300944static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
945{
946 ctxt->ops->get_fpu(ctxt);
947 switch (reg) {
948 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
949 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
950 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
951 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
952 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
953 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
954 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
955 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
956 default: BUG();
957 }
958 ctxt->ops->put_fpu(ctxt);
959}
960
961static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
962{
963 ctxt->ops->get_fpu(ctxt);
964 switch (reg) {
965 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
966 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
967 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
968 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
969 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
970 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
971 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
972 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
973 default: BUG();
974 }
975 ctxt->ops->put_fpu(ctxt);
976}
977
Gleb Natapov045a2822012-12-20 16:57:43 +0200978static int em_fninit(struct x86_emulate_ctxt *ctxt)
979{
980 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
981 return emulate_nm(ctxt);
982
983 ctxt->ops->get_fpu(ctxt);
984 asm volatile("fninit");
985 ctxt->ops->put_fpu(ctxt);
986 return X86EMUL_CONTINUE;
987}
988
989static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
990{
991 u16 fcw;
992
993 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
994 return emulate_nm(ctxt);
995
996 ctxt->ops->get_fpu(ctxt);
997 asm volatile("fnstcw %0": "+m"(fcw));
998 ctxt->ops->put_fpu(ctxt);
999
1000 /* force 2 byte destination */
1001 ctxt->dst.bytes = 2;
1002 ctxt->dst.val = fcw;
1003
1004 return X86EMUL_CONTINUE;
1005}
1006
1007static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1008{
1009 u16 fsw;
1010
1011 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1012 return emulate_nm(ctxt);
1013
1014 ctxt->ops->get_fpu(ctxt);
1015 asm volatile("fnstsw %0": "+m"(fsw));
1016 ctxt->ops->put_fpu(ctxt);
1017
1018 /* force 2 byte destination */
1019 ctxt->dst.bytes = 2;
1020 ctxt->dst.val = fsw;
1021
1022 return X86EMUL_CONTINUE;
1023}
1024
Avi Kivity12537912011-03-29 11:41:27 +02001025static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
Avi Kivity2adb5ad2012-01-16 15:08:45 +02001026 struct operand *op)
Avi Kivity3c118e22007-10-31 10:27:04 +02001027{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001028 unsigned reg = ctxt->modrm_reg;
Avi Kivity33615aa2007-10-31 11:15:56 +02001029
Avi Kivity9dac77f2011-06-01 15:34:25 +03001030 if (!(ctxt->d & ModRM))
1031 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
Avi Kivity12537912011-03-29 11:41:27 +02001032
Avi Kivity9dac77f2011-06-01 15:34:25 +03001033 if (ctxt->d & Sse) {
Avi Kivity12537912011-03-29 11:41:27 +02001034 op->type = OP_XMM;
1035 op->bytes = 16;
1036 op->addr.xmm = reg;
1037 read_sse_reg(ctxt, &op->vec_val, reg);
1038 return;
1039 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001040 if (ctxt->d & Mmx) {
1041 reg &= 7;
1042 op->type = OP_MM;
1043 op->bytes = 8;
1044 op->addr.mm = reg;
1045 return;
1046 }
Avi Kivity12537912011-03-29 11:41:27 +02001047
Avi Kivity3c118e22007-10-31 10:27:04 +02001048 op->type = OP_REG;
Gleb Natapov6d4d85e2013-11-04 15:52:42 +02001049 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1050 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1051
Avi Kivity91ff3cb2010-08-01 12:53:09 +03001052 fetch_register_operand(op);
Avi Kivity3c118e22007-10-31 10:27:04 +02001053 op->orig_val = op->val;
1054}
1055
Avi Kivitya6e34072012-06-10 17:15:39 +03001056static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1057{
1058 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1059 ctxt->modrm_seg = VCPU_SREG_SS;
1060}
1061
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001062static int decode_modrm(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001063 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001064{
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001065 u8 sib;
Bandan Das02357bd2014-04-16 12:46:11 -04001066 int index_reg, base_reg, scale;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001067 int rc = X86EMUL_CONTINUE;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001068 ulong modrm_ea = 0;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001069
Bandan Das02357bd2014-04-16 12:46:11 -04001070 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1071 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1072 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001073
Bandan Das02357bd2014-04-16 12:46:11 -04001074 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001075 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
Bandan Das02357bd2014-04-16 12:46:11 -04001076 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001077 ctxt->modrm_seg = VCPU_SREG_DS;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001078
Nadav Amit9b88ae92014-05-25 23:05:21 +03001079 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001080 op->type = OP_REG;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001081 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Paolo Bonzini8acb42072013-05-30 16:35:55 +02001082 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02001083 ctxt->d & ByteOp);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001084 if (ctxt->d & Sse) {
Avi Kivity12537912011-03-29 11:41:27 +02001085 op->type = OP_XMM;
1086 op->bytes = 16;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001087 op->addr.xmm = ctxt->modrm_rm;
1088 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
Avi Kivity12537912011-03-29 11:41:27 +02001089 return rc;
1090 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001091 if (ctxt->d & Mmx) {
1092 op->type = OP_MM;
1093 op->bytes = 8;
Paolo Bonzinibdc90722014-05-06 14:03:29 +02001094 op->addr.mm = ctxt->modrm_rm & 7;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001095 return rc;
1096 }
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001097 fetch_register_operand(op);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001098 return rc;
1099 }
1100
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001101 op->type = OP_MEM;
1102
Avi Kivity9dac77f2011-06-01 15:34:25 +03001103 if (ctxt->ad_bytes == 2) {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001104 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1105 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1106 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1107 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001108
1109 /* 16-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001110 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001111 case 0:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001112 if (ctxt->modrm_rm == 6)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001113 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001114 break;
1115 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001116 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001117 break;
1118 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001119 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001120 break;
1121 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001122 switch (ctxt->modrm_rm) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001123 case 0:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001124 modrm_ea += bx + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001125 break;
1126 case 1:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001127 modrm_ea += bx + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001128 break;
1129 case 2:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001130 modrm_ea += bp + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001131 break;
1132 case 3:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001133 modrm_ea += bp + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001134 break;
1135 case 4:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001136 modrm_ea += si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001137 break;
1138 case 5:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001139 modrm_ea += di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001140 break;
1141 case 6:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001142 if (ctxt->modrm_mod != 0)
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001143 modrm_ea += bp;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001144 break;
1145 case 7:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001146 modrm_ea += bx;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001147 break;
1148 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001149 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1150 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1151 ctxt->modrm_seg = VCPU_SREG_SS;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001152 modrm_ea = (u16)modrm_ea;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001153 } else {
1154 /* 32/64-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001155 if ((ctxt->modrm_rm & 7) == 4) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001156 sib = insn_fetch(u8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001157 index_reg |= (sib >> 3) & 7;
1158 base_reg |= sib & 7;
1159 scale = sib >> 6;
1160
Avi Kivity9dac77f2011-06-01 15:34:25 +03001161 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001162 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivitya6e34072012-06-10 17:15:39 +03001163 else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001164 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001165 adjust_modrm_seg(ctxt, base_reg);
1166 }
Avi Kivitydc71d0f2008-06-15 21:23:17 -07001167 if (index_reg != 4)
Avi Kivitydd856ef2012-08-27 23:46:17 +03001168 modrm_ea += reg_read(ctxt, index_reg) << scale;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001169 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
Avi Kivity84411d82008-06-15 21:53:26 -07001170 if (ctxt->mode == X86EMUL_MODE_PROT64)
Avi Kivity9dac77f2011-06-01 15:34:25 +03001171 ctxt->rip_relative = 1;
Avi Kivitya6e34072012-06-10 17:15:39 +03001172 } else {
1173 base_reg = ctxt->modrm_rm;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001174 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001175 adjust_modrm_seg(ctxt, base_reg);
1176 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001177 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001178 case 0:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001179 if (ctxt->modrm_rm == 5)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001180 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001181 break;
1182 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001183 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001184 break;
1185 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001186 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001187 break;
1188 }
1189 }
Avi Kivity90de84f2010-11-17 15:28:21 +02001190 op->addr.mem.ea = modrm_ea;
Bandan Das41061cd2014-04-16 12:46:14 -04001191 if (ctxt->ad_bytes != 8)
1192 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1193
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001194done:
1195 return rc;
1196}
1197
1198static int decode_abs(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001199 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001200{
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001201 int rc = X86EMUL_CONTINUE;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001202
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001203 op->type = OP_MEM;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001204 switch (ctxt->ad_bytes) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001205 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001206 op->addr.mem.ea = insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001207 break;
1208 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001209 op->addr.mem.ea = insn_fetch(u32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001210 break;
1211 case 8:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001212 op->addr.mem.ea = insn_fetch(u64, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001213 break;
1214 }
1215done:
1216 return rc;
1217}
1218
Avi Kivity9dac77f2011-06-01 15:34:25 +03001219static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
Wei Yongjun35c843c2010-08-09 11:34:56 +08001220{
Sheng Yang7129eec2010-09-28 16:33:32 +08001221 long sv = 0, mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001222
Avi Kivity9dac77f2011-06-01 15:34:25 +03001223 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
Nadav Amit7dec5602014-06-15 16:12:57 +03001224 mask = ~((long)ctxt->dst.bytes * 8 - 1);
Wei Yongjun35c843c2010-08-09 11:34:56 +08001225
Avi Kivity9dac77f2011-06-01 15:34:25 +03001226 if (ctxt->src.bytes == 2)
1227 sv = (s16)ctxt->src.val & (s16)mask;
1228 else if (ctxt->src.bytes == 4)
1229 sv = (s32)ctxt->src.val & (s32)mask;
Nadav Amit7dec5602014-06-15 16:12:57 +03001230 else
1231 sv = (s64)ctxt->src.val & (s64)mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001232
Avi Kivity9dac77f2011-06-01 15:34:25 +03001233 ctxt->dst.addr.mem.ea += (sv >> 3);
Wei Yongjun35c843c2010-08-09 11:34:56 +08001234 }
Wei Yongjunba7ff2b2010-08-09 11:39:14 +08001235
1236 /* only subword offset */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001237 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001238}
1239
Gleb Natapov9de41572010-04-28 19:15:22 +03001240static int read_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov9de41572010-04-28 19:15:22 +03001241 unsigned long addr, void *dest, unsigned size)
1242{
1243 int rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001244 struct read_cache *mc = &ctxt->mem_read;
Gleb Natapov9de41572010-04-28 19:15:22 +03001245
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001246 if (mc->pos < mc->end)
1247 goto read_cached;
Gleb Natapov9de41572010-04-28 19:15:22 +03001248
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001249 WARN_ON((mc->end + size) >= sizeof(mc->data));
Gleb Natapov9de41572010-04-28 19:15:22 +03001250
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001251 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1252 &ctxt->exception);
1253 if (rc != X86EMUL_CONTINUE)
1254 return rc;
1255
1256 mc->end += size;
1257
1258read_cached:
1259 memcpy(dest, mc->data + mc->pos, size);
1260 mc->pos += size;
Gleb Natapov9de41572010-04-28 19:15:22 +03001261 return X86EMUL_CONTINUE;
1262}
1263
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001264static int segmented_read(struct x86_emulate_ctxt *ctxt,
1265 struct segmented_address addr,
1266 void *data,
1267 unsigned size)
1268{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001269 int rc;
1270 ulong linear;
1271
Avi Kivity83b87952011-04-03 11:31:19 +03001272 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001273 if (rc != X86EMUL_CONTINUE)
1274 return rc;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001275 return read_emulated(ctxt, linear, data, size);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001276}
1277
1278static int segmented_write(struct x86_emulate_ctxt *ctxt,
1279 struct segmented_address addr,
1280 const void *data,
1281 unsigned size)
1282{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001283 int rc;
1284 ulong linear;
1285
Avi Kivity83b87952011-04-03 11:31:19 +03001286 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001287 if (rc != X86EMUL_CONTINUE)
1288 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001289 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1290 &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001291}
1292
1293static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1294 struct segmented_address addr,
1295 const void *orig_data, const void *data,
1296 unsigned size)
1297{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001298 int rc;
1299 ulong linear;
1300
Avi Kivity83b87952011-04-03 11:31:19 +03001301 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001302 if (rc != X86EMUL_CONTINUE)
1303 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001304 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1305 size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001306}
1307
Gleb Natapov7b262e92010-03-18 15:20:27 +02001308static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov7b262e92010-03-18 15:20:27 +02001309 unsigned int size, unsigned short port,
1310 void *dest)
1311{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001312 struct read_cache *rc = &ctxt->io_read;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001313
1314 if (rc->pos == rc->end) { /* refill pio read ahead */
Gleb Natapov7b262e92010-03-18 15:20:27 +02001315 unsigned int in_page, n;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001316 unsigned int count = ctxt->rep_prefix ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001317 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001318 in_page = (ctxt->eflags & EFLG_DF) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001319 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1320 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
Mark Rustadb55a8142014-07-25 06:27:05 -07001321 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
Gleb Natapov7b262e92010-03-18 15:20:27 +02001322 if (n == 0)
1323 n = 1;
1324 rc->pos = rc->end = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001325 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
Gleb Natapov7b262e92010-03-18 15:20:27 +02001326 return 0;
1327 rc->end = n * size;
1328 }
1329
Nadav Amite6e39f02014-04-18 03:35:10 +03001330 if (ctxt->rep_prefix && (ctxt->d & String) &&
1331 !(ctxt->eflags & EFLG_DF)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001332 ctxt->dst.data = rc->data + rc->pos;
1333 ctxt->dst.type = OP_MEM_STR;
1334 ctxt->dst.count = (rc->end - rc->pos) / size;
1335 rc->pos = rc->end;
1336 } else {
1337 memcpy(dest, rc->data + rc->pos, size);
1338 rc->pos += size;
1339 }
Gleb Natapov7b262e92010-03-18 15:20:27 +02001340 return 1;
1341}
1342
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01001343static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1344 u16 index, struct desc_struct *desc)
1345{
1346 struct desc_ptr dt;
1347 ulong addr;
1348
1349 ctxt->ops->get_idt(ctxt, &dt);
1350
1351 if (dt.size < index * 8 + 7)
1352 return emulate_gp(ctxt, index << 3 | 0x2);
1353
1354 addr = dt.address + index * 8;
1355 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1356 &ctxt->exception);
1357}
1358
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001359static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001360 u16 selector, struct desc_ptr *dt)
1361{
Mathias Krause0225fb52012-08-30 01:30:16 +02001362 const struct x86_emulate_ops *ops = ctxt->ops;
Nadav Amit2eedcac2014-06-02 18:34:05 +03001363 u32 base3 = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001364
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001365 if (selector & 1 << 2) {
1366 struct desc_struct desc;
Avi Kivity1aa36612011-04-27 13:20:30 +03001367 u16 sel;
1368
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001369 memset (dt, 0, sizeof *dt);
Nadav Amit2eedcac2014-06-02 18:34:05 +03001370 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1371 VCPU_SREG_LDTR))
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001372 return;
1373
1374 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
Nadav Amit2eedcac2014-06-02 18:34:05 +03001375 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001376 } else
Avi Kivity4bff1e862011-04-20 13:37:53 +03001377 ops->get_gdt(ctxt, dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001378}
1379
1380/* allowed just for 8 bytes segments */
1381static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Avi Kivitye9194642012-06-13 16:29:39 +03001382 u16 selector, struct desc_struct *desc,
1383 ulong *desc_addr_p)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001384{
1385 struct desc_ptr dt;
1386 u16 index = selector >> 3;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001387 ulong addr;
1388
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001389 get_descriptor_table_ptr(ctxt, selector, &dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001390
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001391 if (dt.size < index * 8 + 7)
1392 return emulate_gp(ctxt, selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001393
Avi Kivitye9194642012-06-13 16:29:39 +03001394 *desc_addr_p = addr = dt.address + index * 8;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001395 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1396 &ctxt->exception);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001397}
1398
1399/* allowed just for 8 bytes segments */
1400static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001401 u16 selector, struct desc_struct *desc)
1402{
1403 struct desc_ptr dt;
1404 u16 index = selector >> 3;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001405 ulong addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001406
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001407 get_descriptor_table_ptr(ctxt, selector, &dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001408
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001409 if (dt.size < index * 8 + 7)
1410 return emulate_gp(ctxt, selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001411
1412 addr = dt.address + index * 8;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001413 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1414 &ctxt->exception);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001415}
1416
Gleb Natapov5601d052011-03-07 14:55:06 +02001417/* Does not support long mode */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001418static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Paolo Bonzini5045b462014-05-15 18:09:29 +02001419 u16 selector, int seg, u8 cpl, bool in_task_switch)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001420{
Avi Kivity869be992012-06-13 16:30:53 +03001421 struct desc_struct seg_desc, old_desc;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001422 u8 dpl, rpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001423 unsigned err_vec = GP_VECTOR;
1424 u32 err_code = 0;
1425 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
Avi Kivitye9194642012-06-13 16:29:39 +03001426 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001427 int ret;
Avi Kivity03ebebe2012-08-21 17:07:04 +03001428 u16 dummy;
Nadav Amite37a75a2014-06-02 18:34:04 +03001429 u32 base3 = 0;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001430
1431 memset(&seg_desc, 0, sizeof seg_desc);
1432
Kevin Wolff8da94e2013-04-11 14:06:03 +02001433 if (ctxt->mode == X86EMUL_MODE_REAL) {
1434 /* set real mode segment descriptor (keep limit etc. for
1435 * unreal mode) */
Avi Kivity03ebebe2012-08-21 17:07:04 +03001436 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001437 set_desc_base(&seg_desc, selector << 4);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001438 goto load;
Kevin Wolff8da94e2013-04-11 14:06:03 +02001439 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1440 /* VM86 needs a clean new segment descriptor */
1441 set_desc_base(&seg_desc, selector << 4);
1442 set_desc_limit(&seg_desc, 0xffff);
1443 seg_desc.type = 3;
1444 seg_desc.p = 1;
1445 seg_desc.s = 1;
1446 seg_desc.dpl = 3;
1447 goto load;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001448 }
1449
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001450 rpl = selector & 3;
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001451
1452 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1453 if ((seg == VCPU_SREG_CS
1454 || (seg == VCPU_SREG_SS
1455 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1456 || seg == VCPU_SREG_TR)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001457 && null_selector)
1458 goto exception;
1459
1460 /* TR should be in GDT only */
1461 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1462 goto exception;
1463
1464 if (null_selector) /* for NULL selector skip all following checks */
1465 goto load;
1466
Avi Kivitye9194642012-06-13 16:29:39 +03001467 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001468 if (ret != X86EMUL_CONTINUE)
1469 return ret;
1470
1471 err_code = selector & 0xfffc;
Paolo Bonzini15fc0752014-08-18 13:17:00 +02001472 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001473
Guo Chaofc058682012-06-28 15:19:51 +08001474 /* can't load system descriptor into segment selector */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001475 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1476 goto exception;
1477
1478 if (!seg_desc.p) {
1479 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1480 goto exception;
1481 }
1482
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001483 dpl = seg_desc.dpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001484
1485 switch (seg) {
1486 case VCPU_SREG_SS:
1487 /*
1488 * segment is not a writable data segment or segment
1489 * selector's RPL != CPL or segment selector's RPL != CPL
1490 */
1491 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1492 goto exception;
1493 break;
1494 case VCPU_SREG_CS:
1495 if (!(seg_desc.type & 8))
1496 goto exception;
1497
1498 if (seg_desc.type & 4) {
1499 /* conforming */
1500 if (dpl > cpl)
1501 goto exception;
1502 } else {
1503 /* nonconforming */
1504 if (rpl > cpl || dpl != cpl)
1505 goto exception;
1506 }
Nadav Amit040c8dc2014-09-18 22:39:43 +03001507 /* in long-mode d/b must be clear if l is set */
1508 if (seg_desc.d && seg_desc.l) {
1509 u64 efer = 0;
1510
1511 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1512 if (efer & EFER_LMA)
1513 goto exception;
1514 }
1515
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001516 /* CS(RPL) <- CPL */
1517 selector = (selector & 0xfffc) | cpl;
1518 break;
1519 case VCPU_SREG_TR:
1520 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1521 goto exception;
Avi Kivity869be992012-06-13 16:30:53 +03001522 old_desc = seg_desc;
1523 seg_desc.type |= 2; /* busy */
1524 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1525 sizeof(seg_desc), &ctxt->exception);
1526 if (ret != X86EMUL_CONTINUE)
1527 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001528 break;
1529 case VCPU_SREG_LDTR:
1530 if (seg_desc.s || seg_desc.type != 2)
1531 goto exception;
1532 break;
1533 default: /* DS, ES, FS, or GS */
1534 /*
1535 * segment is not a data or readable code segment or
1536 * ((segment is a data or nonconforming code segment)
1537 * and (both RPL and CPL > DPL))
1538 */
1539 if ((seg_desc.type & 0xa) == 0x8 ||
1540 (((seg_desc.type & 0xc) != 0xc) &&
1541 (rpl > dpl && cpl > dpl)))
1542 goto exception;
1543 break;
1544 }
1545
1546 if (seg_desc.s) {
1547 /* mark segment as accessed */
1548 seg_desc.type |= 1;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001549 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001550 if (ret != X86EMUL_CONTINUE)
1551 return ret;
Nadav Amite37a75a2014-06-02 18:34:04 +03001552 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1553 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1554 sizeof(base3), &ctxt->exception);
1555 if (ret != X86EMUL_CONTINUE)
1556 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001557 }
1558load:
Nadav Amite37a75a2014-06-02 18:34:04 +03001559 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001560 return X86EMUL_CONTINUE;
1561exception:
Paolo Bonzini592f0852014-08-20 10:05:08 +02001562 return emulate_exception(ctxt, err_vec, err_code, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001563}
1564
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001565static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1566 u16 selector, int seg)
1567{
1568 u8 cpl = ctxt->ops->cpl(ctxt);
Paolo Bonzini5045b462014-05-15 18:09:29 +02001569 return __load_segment_descriptor(ctxt, selector, seg, cpl, false);
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001570}
1571
Wei Yongjun31be40b2010-08-17 09:17:30 +08001572static void write_register_operand(struct operand *op)
1573{
1574 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1575 switch (op->bytes) {
1576 case 1:
1577 *(u8 *)op->addr.reg = (u8)op->val;
1578 break;
1579 case 2:
1580 *(u16 *)op->addr.reg = (u16)op->val;
1581 break;
1582 case 4:
1583 *op->addr.reg = (u32)op->val;
1584 break; /* 64b: zero-extend */
1585 case 8:
1586 *op->addr.reg = op->val;
1587 break;
1588 }
1589}
1590
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001591static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
Wei Yongjunc37eda12010-06-15 09:03:33 +08001592{
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001593 switch (op->type) {
Wei Yongjunc37eda12010-06-15 09:03:33 +08001594 case OP_REG:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001595 write_register_operand(op);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001596 break;
1597 case OP_MEM:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001598 if (ctxt->lock_prefix)
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001599 return segmented_cmpxchg(ctxt,
1600 op->addr.mem,
1601 &op->orig_val,
1602 &op->val,
1603 op->bytes);
1604 else
1605 return segmented_write(ctxt,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001606 op->addr.mem,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001607 &op->val,
1608 op->bytes);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001609 break;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001610 case OP_MEM_STR:
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001611 return segmented_write(ctxt,
1612 op->addr.mem,
1613 op->data,
1614 op->bytes * op->count);
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001615 break;
Avi Kivity12537912011-03-29 11:41:27 +02001616 case OP_XMM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001617 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
Avi Kivity12537912011-03-29 11:41:27 +02001618 break;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001619 case OP_MM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001620 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001621 break;
Wei Yongjunc37eda12010-06-15 09:03:33 +08001622 case OP_NONE:
1623 /* no writeback */
1624 break;
1625 default:
1626 break;
1627 }
1628 return X86EMUL_CONTINUE;
1629}
1630
Avi Kivity51ddff52012-06-12 20:19:40 +03001631static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001632{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001633 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001634
Avi Kivity5ad105e2012-08-19 14:34:31 +03001635 rsp_increment(ctxt, -bytes);
Avi Kivitydd856ef2012-08-27 23:46:17 +03001636 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001637 addr.seg = VCPU_SREG_SS;
1638
Avi Kivity51ddff52012-06-12 20:19:40 +03001639 return segmented_write(ctxt, addr, data, bytes);
1640}
1641
1642static int em_push(struct x86_emulate_ctxt *ctxt)
1643{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001644 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001645 ctxt->dst.type = OP_NONE;
Avi Kivity51ddff52012-06-12 20:19:40 +03001646 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001647}
1648
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001649static int emulate_pop(struct x86_emulate_ctxt *ctxt,
Avi Kivity350f69d2009-01-05 11:12:40 +02001650 void *dest, int len)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001651{
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001652 int rc;
Avi Kivity90de84f2010-11-17 15:28:21 +02001653 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001654
Avi Kivitydd856ef2012-08-27 23:46:17 +03001655 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Avi Kivity90de84f2010-11-17 15:28:21 +02001656 addr.seg = VCPU_SREG_SS;
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001657 rc = segmented_read(ctxt, addr, dest, len);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09001658 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001659 return rc;
1660
Avi Kivity5ad105e2012-08-19 14:34:31 +03001661 rsp_increment(ctxt, len);
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001662 return rc;
1663}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001664
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001665static int em_pop(struct x86_emulate_ctxt *ctxt)
1666{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001667 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001668}
1669
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001670static int emulate_popf(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001671 void *dest, int len)
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001672{
1673 int rc;
1674 unsigned long val, change_mask;
1675 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001676 int cpl = ctxt->ops->cpl(ctxt);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001677
Takuya Yoshikawa3b9be3b2011-05-02 02:27:55 +09001678 rc = emulate_pop(ctxt, &val, len);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001679 if (rc != X86EMUL_CONTINUE)
1680 return rc;
1681
1682 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
Nadav Amit163b1352014-07-21 14:37:28 +03001683 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001684
1685 switch(ctxt->mode) {
1686 case X86EMUL_MODE_PROT64:
1687 case X86EMUL_MODE_PROT32:
1688 case X86EMUL_MODE_PROT16:
1689 if (cpl == 0)
1690 change_mask |= EFLG_IOPL;
1691 if (cpl <= iopl)
1692 change_mask |= EFLG_IF;
1693 break;
1694 case X86EMUL_MODE_VM86:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001695 if (iopl < 3)
1696 return emulate_gp(ctxt, 0);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001697 change_mask |= EFLG_IF;
1698 break;
1699 default: /* real mode */
1700 change_mask |= (EFLG_IOPL | EFLG_IF);
1701 break;
1702 }
1703
1704 *(unsigned long *)dest =
1705 (ctxt->eflags & ~change_mask) | (val & change_mask);
1706
1707 return rc;
1708}
1709
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001710static int em_popf(struct x86_emulate_ctxt *ctxt)
1711{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001712 ctxt->dst.type = OP_REG;
1713 ctxt->dst.addr.reg = &ctxt->eflags;
1714 ctxt->dst.bytes = ctxt->op_bytes;
1715 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001716}
1717
Avi Kivity612e89f2012-06-12 20:03:23 +03001718static int em_enter(struct x86_emulate_ctxt *ctxt)
1719{
1720 int rc;
1721 unsigned frame_size = ctxt->src.val;
1722 unsigned nesting_level = ctxt->src2.val & 31;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001723 ulong rbp;
Avi Kivity612e89f2012-06-12 20:03:23 +03001724
1725 if (nesting_level)
1726 return X86EMUL_UNHANDLEABLE;
1727
Avi Kivitydd856ef2012-08-27 23:46:17 +03001728 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1729 rc = push(ctxt, &rbp, stack_size(ctxt));
Avi Kivity612e89f2012-06-12 20:03:23 +03001730 if (rc != X86EMUL_CONTINUE)
1731 return rc;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001732 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
Avi Kivity612e89f2012-06-12 20:03:23 +03001733 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001734 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1735 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
Avi Kivity612e89f2012-06-12 20:03:23 +03001736 stack_mask(ctxt));
1737 return X86EMUL_CONTINUE;
1738}
1739
Avi Kivityf47cfa32012-06-07 17:49:24 +03001740static int em_leave(struct x86_emulate_ctxt *ctxt)
1741{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001742 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
Avi Kivityf47cfa32012-06-07 17:49:24 +03001743 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001744 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
Avi Kivityf47cfa32012-06-07 17:49:24 +03001745}
1746
Avi Kivity1cd196e2011-09-13 10:45:51 +03001747static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001748{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001749 int seg = ctxt->src2.val;
1750
Avi Kivity9dac77f2011-06-01 15:34:25 +03001751 ctxt->src.val = get_segment_selector(ctxt, seg);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001752
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001753 return em_push(ctxt);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001754}
1755
Avi Kivity1cd196e2011-09-13 10:45:51 +03001756static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001757{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001758 int seg = ctxt->src2.val;
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001759 unsigned long selector;
1760 int rc;
1761
Avi Kivity9dac77f2011-06-01 15:34:25 +03001762 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001763 if (rc != X86EMUL_CONTINUE)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001764 return rc;
1765
Paolo Bonzinia5457e72014-06-05 17:29:34 +02001766 if (ctxt->modrm_reg == VCPU_SREG_SS)
1767 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1768
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001769 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001770 return rc;
1771}
1772
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001773static int em_pusha(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001774{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001775 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001776 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001777 int reg = VCPU_REGS_RAX;
1778
1779 while (reg <= VCPU_REGS_RDI) {
1780 (reg == VCPU_REGS_RSP) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001781 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001782
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001783 rc = em_push(ctxt);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001784 if (rc != X86EMUL_CONTINUE)
1785 return rc;
1786
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001787 ++reg;
1788 }
Wei Yongjunc37eda12010-06-15 09:03:33 +08001789
Wei Yongjunc37eda12010-06-15 09:03:33 +08001790 return rc;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001791}
1792
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001793static int em_pushf(struct x86_emulate_ctxt *ctxt)
1794{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001795 ctxt->src.val = (unsigned long)ctxt->eflags;
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001796 return em_push(ctxt);
1797}
1798
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001799static int em_popa(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001800{
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001801 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001802 int reg = VCPU_REGS_RDI;
1803
1804 while (reg >= VCPU_REGS_RAX) {
1805 if (reg == VCPU_REGS_RSP) {
Avi Kivity5ad105e2012-08-19 14:34:31 +03001806 rsp_increment(ctxt, ctxt->op_bytes);
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001807 --reg;
1808 }
1809
Avi Kivitydd856ef2012-08-27 23:46:17 +03001810 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001811 if (rc != X86EMUL_CONTINUE)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001812 break;
1813 --reg;
1814 }
1815 return rc;
1816}
1817
Avi Kivitydd856ef2012-08-27 23:46:17 +03001818static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001819{
Mathias Krause0225fb52012-08-30 01:30:16 +02001820 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001821 int rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001822 struct desc_ptr dt;
1823 gva_t cs_addr;
1824 gva_t eip_addr;
1825 u16 cs, eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001826
1827 /* TODO: Add limit checks */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001828 ctxt->src.val = ctxt->eflags;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001829 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001830 if (rc != X86EMUL_CONTINUE)
1831 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001832
1833 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1834
Avi Kivity9dac77f2011-06-01 15:34:25 +03001835 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001836 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001837 if (rc != X86EMUL_CONTINUE)
1838 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001839
Avi Kivity9dac77f2011-06-01 15:34:25 +03001840 ctxt->src.val = ctxt->_eip;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001841 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001842 if (rc != X86EMUL_CONTINUE)
1843 return rc;
1844
Avi Kivity4bff1e862011-04-20 13:37:53 +03001845 ops->get_idt(ctxt, &dt);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001846
1847 eip_addr = dt.address + (irq << 2);
1848 cs_addr = dt.address + (irq << 2) + 2;
1849
Avi Kivity0f65dd72011-04-20 13:37:53 +03001850 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001851 if (rc != X86EMUL_CONTINUE)
1852 return rc;
1853
Avi Kivity0f65dd72011-04-20 13:37:53 +03001854 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001855 if (rc != X86EMUL_CONTINUE)
1856 return rc;
1857
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001858 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001859 if (rc != X86EMUL_CONTINUE)
1860 return rc;
1861
Avi Kivity9dac77f2011-06-01 15:34:25 +03001862 ctxt->_eip = eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001863
1864 return rc;
1865}
1866
Avi Kivitydd856ef2012-08-27 23:46:17 +03001867int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1868{
1869 int rc;
1870
1871 invalidate_registers(ctxt);
1872 rc = __emulate_int_real(ctxt, irq);
1873 if (rc == X86EMUL_CONTINUE)
1874 writeback_registers(ctxt);
1875 return rc;
1876}
1877
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001878static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001879{
1880 switch(ctxt->mode) {
1881 case X86EMUL_MODE_REAL:
Avi Kivitydd856ef2012-08-27 23:46:17 +03001882 return __emulate_int_real(ctxt, irq);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001883 case X86EMUL_MODE_VM86:
1884 case X86EMUL_MODE_PROT16:
1885 case X86EMUL_MODE_PROT32:
1886 case X86EMUL_MODE_PROT64:
1887 default:
1888 /* Protected mode interrupts unimplemented yet */
1889 return X86EMUL_UNHANDLEABLE;
1890 }
1891}
1892
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001893static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001894{
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001895 int rc = X86EMUL_CONTINUE;
1896 unsigned long temp_eip = 0;
1897 unsigned long temp_eflags = 0;
1898 unsigned long cs = 0;
1899 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1900 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1901 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1902 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1903
1904 /* TODO: Add stack limit check */
1905
Avi Kivity9dac77f2011-06-01 15:34:25 +03001906 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001907
1908 if (rc != X86EMUL_CONTINUE)
1909 return rc;
1910
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001911 if (temp_eip & ~0xffff)
1912 return emulate_gp(ctxt, 0);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001913
Avi Kivity9dac77f2011-06-01 15:34:25 +03001914 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001915
1916 if (rc != X86EMUL_CONTINUE)
1917 return rc;
1918
Avi Kivity9dac77f2011-06-01 15:34:25 +03001919 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001920
1921 if (rc != X86EMUL_CONTINUE)
1922 return rc;
1923
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001924 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001925
1926 if (rc != X86EMUL_CONTINUE)
1927 return rc;
1928
Avi Kivity9dac77f2011-06-01 15:34:25 +03001929 ctxt->_eip = temp_eip;
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001930
1931
Avi Kivity9dac77f2011-06-01 15:34:25 +03001932 if (ctxt->op_bytes == 4)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001933 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
Avi Kivity9dac77f2011-06-01 15:34:25 +03001934 else if (ctxt->op_bytes == 2) {
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001935 ctxt->eflags &= ~0xffff;
1936 ctxt->eflags |= temp_eflags;
1937 }
1938
1939 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1940 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1941
1942 return rc;
1943}
1944
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09001945static int em_iret(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001946{
1947 switch(ctxt->mode) {
1948 case X86EMUL_MODE_REAL:
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001949 return emulate_iret_real(ctxt);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001950 case X86EMUL_MODE_VM86:
1951 case X86EMUL_MODE_PROT16:
1952 case X86EMUL_MODE_PROT32:
1953 case X86EMUL_MODE_PROT64:
1954 default:
1955 /* iret from protected mode unimplemented yet */
1956 return X86EMUL_UNHANDLEABLE;
1957 }
1958}
1959
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09001960static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1961{
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09001962 int rc;
1963 unsigned short sel;
1964
Avi Kivity9dac77f2011-06-01 15:34:25 +03001965 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09001966
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001967 rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09001968 if (rc != X86EMUL_CONTINUE)
1969 return rc;
1970
Avi Kivity9dac77f2011-06-01 15:34:25 +03001971 ctxt->_eip = 0;
1972 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09001973 return X86EMUL_CONTINUE;
1974}
1975
Takuya Yoshikawa51187682011-05-02 02:29:17 +09001976static int em_grp45(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001977{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001978 int rc = X86EMUL_CONTINUE;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001979
Avi Kivity9dac77f2011-06-01 15:34:25 +03001980 switch (ctxt->modrm_reg) {
Mohammed Gamald19292e2008-09-08 21:47:19 +03001981 case 2: /* call near abs */ {
1982 long int old_eip;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001983 old_eip = ctxt->_eip;
1984 ctxt->_eip = ctxt->src.val;
1985 ctxt->src.val = old_eip;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001986 rc = em_push(ctxt);
Mohammed Gamald19292e2008-09-08 21:47:19 +03001987 break;
1988 }
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001989 case 4: /* jmp abs */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001990 ctxt->_eip = ctxt->src.val;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001991 break;
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09001992 case 5: /* jmp far */
1993 rc = em_jmp_far(ctxt);
1994 break;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001995 case 6: /* push */
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001996 rc = em_push(ctxt);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001997 break;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001998 }
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001999 return rc;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002000}
2001
Takuya Yoshikawae0dac402011-12-06 18:07:27 +09002002static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002003{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002004 u64 old = ctxt->dst.orig_val64;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002005
Nadav Amitaaa05f22014-06-02 18:34:10 +03002006 if (ctxt->dst.bytes == 16)
2007 return X86EMUL_UNHANDLEABLE;
2008
Avi Kivitydd856ef2012-08-27 23:46:17 +03002009 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2010 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2011 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2012 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
Laurent Vivier05f086f2007-09-24 11:10:55 +02002013 ctxt->eflags &= ~EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002014 } else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03002015 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2016 (u32) reg_read(ctxt, VCPU_REGS_RBX);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002017
Laurent Vivier05f086f2007-09-24 11:10:55 +02002018 ctxt->eflags |= EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002019 }
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002020 return X86EMUL_CONTINUE;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002021}
2022
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002023static int em_ret(struct x86_emulate_ctxt *ctxt)
2024{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002025 ctxt->dst.type = OP_REG;
2026 ctxt->dst.addr.reg = &ctxt->_eip;
2027 ctxt->dst.bytes = ctxt->op_bytes;
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002028 return em_pop(ctxt);
2029}
2030
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002031static int em_ret_far(struct x86_emulate_ctxt *ctxt)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002032{
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002033 int rc;
2034 unsigned long cs;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002035 int cpl = ctxt->ops->cpl(ctxt);
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002036
Avi Kivity9dac77f2011-06-01 15:34:25 +03002037 rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002038 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002039 return rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002040 if (ctxt->op_bytes == 4)
2041 ctxt->_eip = (u32)ctxt->_eip;
2042 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002043 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002044 return rc;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002045 /* Outer-privilege level return is not implemented */
2046 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2047 return X86EMUL_UNHANDLEABLE;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002048 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002049 return rc;
2050}
2051
Bruce Rogers32611072013-09-09 09:40:20 -06002052static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2053{
2054 int rc;
2055
2056 rc = em_ret_far(ctxt);
2057 if (rc != X86EMUL_CONTINUE)
2058 return rc;
2059 rsp_increment(ctxt, ctxt->src.val);
2060 return X86EMUL_CONTINUE;
2061}
2062
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002063static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2064{
2065 /* Save real source value, then compare EAX against destination. */
Nadav Amit37c564f2014-06-02 18:34:07 +03002066 ctxt->dst.orig_val = ctxt->dst.val;
2067 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002068 ctxt->src.orig_val = ctxt->src.val;
Nadav Amit37c564f2014-06-02 18:34:07 +03002069 ctxt->src.val = ctxt->dst.orig_val;
Avi Kivity158de572013-01-19 19:51:57 +02002070 fastop(ctxt, em_cmp);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002071
2072 if (ctxt->eflags & EFLG_ZF) {
2073 /* Success: write back to memory. */
2074 ctxt->dst.val = ctxt->src.orig_val;
2075 } else {
2076 /* Failure: write the value we saw to EAX. */
2077 ctxt->dst.type = OP_REG;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002078 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
Nadav Amit37c564f2014-06-02 18:34:07 +03002079 ctxt->dst.val = ctxt->dst.orig_val;
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002080 }
2081 return X86EMUL_CONTINUE;
2082}
2083
Avi Kivityd4b43252011-09-13 10:45:50 +03002084static int em_lseg(struct x86_emulate_ctxt *ctxt)
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002085{
Avi Kivityd4b43252011-09-13 10:45:50 +03002086 int seg = ctxt->src2.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002087 unsigned short sel;
2088 int rc;
2089
Avi Kivity9dac77f2011-06-01 15:34:25 +03002090 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002091
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002092 rc = load_segment_descriptor(ctxt, sel, seg);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002093 if (rc != X86EMUL_CONTINUE)
2094 return rc;
2095
Avi Kivity9dac77f2011-06-01 15:34:25 +03002096 ctxt->dst.val = ctxt->src.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002097 return rc;
2098}
2099
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002100static void
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002101setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002102 struct desc_struct *cs, struct desc_struct *ss)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002103{
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002104 cs->l = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002105 set_desc_base(cs, 0); /* flat segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002106 cs->g = 1; /* 4kb granularity */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002107 set_desc_limit(cs, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002108 cs->type = 0x0b; /* Read, Execute, Accessed */
2109 cs->s = 1;
2110 cs->dpl = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002111 cs->p = 1;
2112 cs->d = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002113 cs->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002114
Gleb Natapov79168fd2010-04-28 19:15:30 +03002115 set_desc_base(ss, 0); /* flat segment */
2116 set_desc_limit(ss, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002117 ss->g = 1; /* 4kb granularity */
2118 ss->s = 1;
2119 ss->type = 0x03; /* Read/Write, Accessed */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002120 ss->d = 1; /* 32bit stack segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002121 ss->dpl = 0;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002122 ss->p = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002123 ss->l = 0;
2124 ss->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002125}
2126
Avi Kivity1a18a692012-02-01 12:23:21 +02002127static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2128{
2129 u32 eax, ebx, ecx, edx;
2130
2131 eax = ecx = 0;
Avi Kivity0017f932012-06-07 14:10:16 +03002132 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2133 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
Avi Kivity1a18a692012-02-01 12:23:21 +02002134 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2135 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2136}
2137
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002138static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2139{
Mathias Krause0225fb52012-08-30 01:30:16 +02002140 const struct x86_emulate_ops *ops = ctxt->ops;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002141 u32 eax, ebx, ecx, edx;
2142
2143 /*
2144 * syscall should always be enabled in longmode - so only become
2145 * vendor specific (cpuid) if other modes are active...
2146 */
2147 if (ctxt->mode == X86EMUL_MODE_PROT64)
2148 return true;
2149
2150 eax = 0x00000000;
2151 ecx = 0x00000000;
Avi Kivity0017f932012-06-07 14:10:16 +03002152 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2153 /*
2154 * Intel ("GenuineIntel")
2155 * remark: Intel CPUs only support "syscall" in 64bit
2156 * longmode. Also an 64bit guest with a
2157 * 32bit compat-app running will #UD !! While this
2158 * behaviour can be fixed (by emulating) into AMD
2159 * response - CPUs of AMD can't behave like Intel.
2160 */
2161 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2162 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2163 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2164 return false;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002165
Avi Kivity0017f932012-06-07 14:10:16 +03002166 /* AMD ("AuthenticAMD") */
2167 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2168 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2169 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2170 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002171
Avi Kivity0017f932012-06-07 14:10:16 +03002172 /* AMD ("AMDisbetter!") */
2173 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2174 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2175 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2176 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002177
2178 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2179 return false;
2180}
2181
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002182static int em_syscall(struct x86_emulate_ctxt *ctxt)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002183{
Mathias Krause0225fb52012-08-30 01:30:16 +02002184 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002185 struct desc_struct cs, ss;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002186 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002187 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002188 u64 efer = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002189
2190 /* syscall is not available in real mode */
Gleb Natapov2e901c42010-03-18 15:20:12 +02002191 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002192 ctxt->mode == X86EMUL_MODE_VM86)
2193 return emulate_ud(ctxt);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002194
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002195 if (!(em_syscall_is_enabled(ctxt)))
2196 return emulate_ud(ctxt);
2197
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002198 ops->get_msr(ctxt, MSR_EFER, &efer);
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002199 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002200
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002201 if (!(efer & EFER_SCE))
2202 return emulate_ud(ctxt);
2203
Avi Kivity717746e2011-04-20 13:37:53 +03002204 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002205 msr_data >>= 32;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002206 cs_sel = (u16)(msr_data & 0xfffc);
2207 ss_sel = (u16)(msr_data + 8);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002208
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002209 if (efer & EFER_LMA) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002210 cs.d = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002211 cs.l = 1;
2212 }
Avi Kivity1aa36612011-04-27 13:20:30 +03002213 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2214 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002215
Avi Kivitydd856ef2012-08-27 23:46:17 +03002216 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002217 if (efer & EFER_LMA) {
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002218#ifdef CONFIG_X86_64
Nadav Amit6c6cb692014-07-21 14:37:30 +03002219 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002220
Avi Kivity717746e2011-04-20 13:37:53 +03002221 ops->get_msr(ctxt,
Gleb Natapov3fb1b5d2010-04-28 19:15:28 +03002222 ctxt->mode == X86EMUL_MODE_PROT64 ?
2223 MSR_LSTAR : MSR_CSTAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002224 ctxt->_eip = msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002225
Avi Kivity717746e2011-04-20 13:37:53 +03002226 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
Nadav Amit6c6cb692014-07-21 14:37:30 +03002227 ctxt->eflags &= ~msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002228#endif
2229 } else {
2230 /* legacy mode */
Avi Kivity717746e2011-04-20 13:37:53 +03002231 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002232 ctxt->_eip = (u32)msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002233
Nadav Amit6c6cb692014-07-21 14:37:30 +03002234 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002235 }
2236
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002237 return X86EMUL_CONTINUE;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002238}
2239
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002240static int em_sysenter(struct x86_emulate_ctxt *ctxt)
Andre Przywara8c604352009-06-18 12:56:01 +02002241{
Mathias Krause0225fb52012-08-30 01:30:16 +02002242 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002243 struct desc_struct cs, ss;
Andre Przywara8c604352009-06-18 12:56:01 +02002244 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002245 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002246 u64 efer = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002247
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002248 ops->get_msr(ctxt, MSR_EFER, &efer);
Gleb Natapova0044752010-02-10 14:21:31 +02002249 /* inject #GP if in real mode */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002250 if (ctxt->mode == X86EMUL_MODE_REAL)
2251 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002252
Avi Kivity1a18a692012-02-01 12:23:21 +02002253 /*
2254 * Not recognized on AMD in compat mode (but is recognized in legacy
2255 * mode).
2256 */
2257 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2258 && !vendor_intel(ctxt))
2259 return emulate_ud(ctxt);
2260
Andre Przywara8c604352009-06-18 12:56:01 +02002261 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2262 * Therefore, we inject an #UD.
2263 */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002264 if (ctxt->mode == X86EMUL_MODE_PROT64)
2265 return emulate_ud(ctxt);
Andre Przywara8c604352009-06-18 12:56:01 +02002266
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002267 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara8c604352009-06-18 12:56:01 +02002268
Avi Kivity717746e2011-04-20 13:37:53 +03002269 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara8c604352009-06-18 12:56:01 +02002270 switch (ctxt->mode) {
2271 case X86EMUL_MODE_PROT32:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002272 if ((msr_data & 0xfffc) == 0x0)
2273 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002274 break;
2275 case X86EMUL_MODE_PROT64:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002276 if (msr_data == 0x0)
2277 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002278 break;
Gleb Natapov9d1b39a2012-09-03 15:24:27 +03002279 default:
2280 break;
Andre Przywara8c604352009-06-18 12:56:01 +02002281 }
2282
Nadav Amit6c6cb692014-07-21 14:37:30 +03002283 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002284 cs_sel = (u16)msr_data;
2285 cs_sel &= ~SELECTOR_RPL_MASK;
2286 ss_sel = cs_sel + 8;
2287 ss_sel &= ~SELECTOR_RPL_MASK;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002288 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002289 cs.d = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002290 cs.l = 1;
2291 }
2292
Avi Kivity1aa36612011-04-27 13:20:30 +03002293 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2294 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara8c604352009-06-18 12:56:01 +02002295
Avi Kivity717746e2011-04-20 13:37:53 +03002296 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002297 ctxt->_eip = msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002298
Avi Kivity717746e2011-04-20 13:37:53 +03002299 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
Avi Kivitydd856ef2012-08-27 23:46:17 +03002300 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002301
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002302 return X86EMUL_CONTINUE;
Andre Przywara8c604352009-06-18 12:56:01 +02002303}
2304
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002305static int em_sysexit(struct x86_emulate_ctxt *ctxt)
Andre Przywara4668f052009-06-18 12:56:02 +02002306{
Mathias Krause0225fb52012-08-30 01:30:16 +02002307 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002308 struct desc_struct cs, ss;
Andre Przywara4668f052009-06-18 12:56:02 +02002309 u64 msr_data;
2310 int usermode;
Xiao Guangrong1249b962011-05-15 23:25:10 +08002311 u16 cs_sel = 0, ss_sel = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002312
Gleb Natapova0044752010-02-10 14:21:31 +02002313 /* inject #GP if in real mode or Virtual 8086 mode */
2314 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002315 ctxt->mode == X86EMUL_MODE_VM86)
2316 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002317
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002318 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara4668f052009-06-18 12:56:02 +02002319
Avi Kivity9dac77f2011-06-01 15:34:25 +03002320 if ((ctxt->rex_prefix & 0x8) != 0x0)
Andre Przywara4668f052009-06-18 12:56:02 +02002321 usermode = X86EMUL_MODE_PROT64;
2322 else
2323 usermode = X86EMUL_MODE_PROT32;
2324
2325 cs.dpl = 3;
2326 ss.dpl = 3;
Avi Kivity717746e2011-04-20 13:37:53 +03002327 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara4668f052009-06-18 12:56:02 +02002328 switch (usermode) {
2329 case X86EMUL_MODE_PROT32:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002330 cs_sel = (u16)(msr_data + 16);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002331 if ((msr_data & 0xfffc) == 0x0)
2332 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002333 ss_sel = (u16)(msr_data + 24);
Andre Przywara4668f052009-06-18 12:56:02 +02002334 break;
2335 case X86EMUL_MODE_PROT64:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002336 cs_sel = (u16)(msr_data + 32);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002337 if (msr_data == 0x0)
2338 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002339 ss_sel = cs_sel + 8;
2340 cs.d = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002341 cs.l = 1;
2342 break;
2343 }
Gleb Natapov79168fd2010-04-28 19:15:30 +03002344 cs_sel |= SELECTOR_RPL_MASK;
2345 ss_sel |= SELECTOR_RPL_MASK;
Andre Przywara4668f052009-06-18 12:56:02 +02002346
Avi Kivity1aa36612011-04-27 13:20:30 +03002347 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2348 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara4668f052009-06-18 12:56:02 +02002349
Avi Kivitydd856ef2012-08-27 23:46:17 +03002350 ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
2351 *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
Andre Przywara4668f052009-06-18 12:56:02 +02002352
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002353 return X86EMUL_CONTINUE;
Andre Przywara4668f052009-06-18 12:56:02 +02002354}
2355
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002356static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002357{
2358 int iopl;
2359 if (ctxt->mode == X86EMUL_MODE_REAL)
2360 return false;
2361 if (ctxt->mode == X86EMUL_MODE_VM86)
2362 return true;
2363 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002364 return ctxt->ops->cpl(ctxt) > iopl;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002365}
2366
2367static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002368 u16 port, u16 len)
2369{
Mathias Krause0225fb52012-08-30 01:30:16 +02002370 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002371 struct desc_struct tr_seg;
Gleb Natapov5601d052011-03-07 14:55:06 +02002372 u32 base3;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002373 int r;
Avi Kivity1aa36612011-04-27 13:20:30 +03002374 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002375 unsigned mask = (1 << len) - 1;
Gleb Natapov5601d052011-03-07 14:55:06 +02002376 unsigned long base;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002377
Avi Kivity1aa36612011-04-27 13:20:30 +03002378 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002379 if (!tr_seg.p)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002380 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002381 if (desc_limit_scaled(&tr_seg) < 103)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002382 return false;
Gleb Natapov5601d052011-03-07 14:55:06 +02002383 base = get_desc_base(&tr_seg);
2384#ifdef CONFIG_X86_64
2385 base |= ((u64)base3) << 32;
2386#endif
Avi Kivity0f65dd72011-04-20 13:37:53 +03002387 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002388 if (r != X86EMUL_CONTINUE)
2389 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002390 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002391 return false;
Avi Kivity0f65dd72011-04-20 13:37:53 +03002392 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002393 if (r != X86EMUL_CONTINUE)
2394 return false;
2395 if ((perm >> bit_idx) & mask)
2396 return false;
2397 return true;
2398}
2399
2400static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002401 u16 port, u16 len)
2402{
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002403 if (ctxt->perm_ok)
2404 return true;
2405
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002406 if (emulator_bad_iopl(ctxt))
2407 if (!emulator_io_port_access_allowed(ctxt, port, len))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002408 return false;
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002409
2410 ctxt->perm_ok = true;
2411
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002412 return true;
2413}
2414
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002415static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002416 struct tss_segment_16 *tss)
2417{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002418 tss->ip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002419 tss->flag = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002420 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2421 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2422 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2423 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2424 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2425 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2426 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2427 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002428
Avi Kivity1aa36612011-04-27 13:20:30 +03002429 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2430 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2431 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2432 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2433 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002434}
2435
2436static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002437 struct tss_segment_16 *tss)
2438{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002439 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002440 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002441
Avi Kivity9dac77f2011-06-01 15:34:25 +03002442 ctxt->_eip = tss->ip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002443 ctxt->eflags = tss->flag | 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002444 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2445 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2446 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2447 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2448 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2449 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2450 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2451 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002452
2453 /*
2454 * SDM says that segment selectors are loaded before segment
2455 * descriptors
2456 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002457 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2458 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2459 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2460 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2461 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002462
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002463 cpl = tss->cs & 3;
2464
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002465 /*
Guo Chaofc058682012-06-28 15:19:51 +08002466 * Now load segment descriptors. If fault happens at this stage
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002467 * it is handled in a context of new task
2468 */
Paolo Bonzini5045b462014-05-15 18:09:29 +02002469 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002470 if (ret != X86EMUL_CONTINUE)
2471 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002472 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002473 if (ret != X86EMUL_CONTINUE)
2474 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002475 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002476 if (ret != X86EMUL_CONTINUE)
2477 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002478 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002479 if (ret != X86EMUL_CONTINUE)
2480 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002481 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002482 if (ret != X86EMUL_CONTINUE)
2483 return ret;
2484
2485 return X86EMUL_CONTINUE;
2486}
2487
2488static int task_switch_16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002489 u16 tss_selector, u16 old_tss_sel,
2490 ulong old_tss_base, struct desc_struct *new_desc)
2491{
Mathias Krause0225fb52012-08-30 01:30:16 +02002492 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002493 struct tss_segment_16 tss_seg;
2494 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002495 u32 new_tss_base = get_desc_base(new_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002496
Avi Kivity0f65dd72011-04-20 13:37:53 +03002497 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002498 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002499 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002500 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002501 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002502
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002503 save_state_to_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002504
Avi Kivity0f65dd72011-04-20 13:37:53 +03002505 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002506 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002507 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002508 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002509 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002510
Avi Kivity0f65dd72011-04-20 13:37:53 +03002511 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002512 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002513 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002514 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002515 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002516
2517 if (old_tss_sel != 0xffff) {
2518 tss_seg.prev_task_link = old_tss_sel;
2519
Avi Kivity0f65dd72011-04-20 13:37:53 +03002520 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002521 &tss_seg.prev_task_link,
2522 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002523 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002524 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002525 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002526 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002527 }
2528
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002529 return load_state_from_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002530}
2531
2532static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002533 struct tss_segment_32 *tss)
2534{
Nadav Amit5c7411e2014-04-07 18:37:47 +03002535 /* CR3 and ldt selector are not saved intentionally */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002536 tss->eip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002537 tss->eflags = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002538 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2539 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2540 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2541 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2542 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2543 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2544 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2545 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002546
Avi Kivity1aa36612011-04-27 13:20:30 +03002547 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2548 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2549 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2550 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2551 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2552 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002553}
2554
2555static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002556 struct tss_segment_32 *tss)
2557{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002558 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002559 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002560
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002561 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002562 return emulate_gp(ctxt, 0);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002563 ctxt->_eip = tss->eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002564 ctxt->eflags = tss->eflags | 2;
Kevin Wolf4cee4792012-02-08 14:34:41 +01002565
2566 /* General purpose registers */
Avi Kivitydd856ef2012-08-27 23:46:17 +03002567 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2568 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2569 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2570 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2571 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2572 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2573 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2574 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002575
2576 /*
2577 * SDM says that segment selectors are loaded before segment
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002578 * descriptors. This is important because CPL checks will
2579 * use CS.RPL.
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002580 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002581 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2582 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2583 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2584 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2585 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2586 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2587 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002588
2589 /*
Kevin Wolf4cee4792012-02-08 14:34:41 +01002590 * If we're switching between Protected Mode and VM86, we need to make
2591 * sure to update the mode before loading the segment descriptors so
2592 * that the selectors are interpreted correctly.
Kevin Wolf4cee4792012-02-08 14:34:41 +01002593 */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002594 if (ctxt->eflags & X86_EFLAGS_VM) {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002595 ctxt->mode = X86EMUL_MODE_VM86;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002596 cpl = 3;
2597 } else {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002598 ctxt->mode = X86EMUL_MODE_PROT32;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002599 cpl = tss->cs & 3;
2600 }
Kevin Wolf4cee4792012-02-08 14:34:41 +01002601
2602 /*
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002603 * Now load segment descriptors. If fault happenes at this stage
2604 * it is handled in a context of new task
2605 */
Paolo Bonzini5045b462014-05-15 18:09:29 +02002606 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002607 if (ret != X86EMUL_CONTINUE)
2608 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002609 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002610 if (ret != X86EMUL_CONTINUE)
2611 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002612 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002613 if (ret != X86EMUL_CONTINUE)
2614 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002615 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002616 if (ret != X86EMUL_CONTINUE)
2617 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002618 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002619 if (ret != X86EMUL_CONTINUE)
2620 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002621 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002622 if (ret != X86EMUL_CONTINUE)
2623 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002624 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002625 if (ret != X86EMUL_CONTINUE)
2626 return ret;
2627
2628 return X86EMUL_CONTINUE;
2629}
2630
2631static int task_switch_32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002632 u16 tss_selector, u16 old_tss_sel,
2633 ulong old_tss_base, struct desc_struct *new_desc)
2634{
Mathias Krause0225fb52012-08-30 01:30:16 +02002635 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002636 struct tss_segment_32 tss_seg;
2637 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002638 u32 new_tss_base = get_desc_base(new_desc);
Nadav Amit5c7411e2014-04-07 18:37:47 +03002639 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2640 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002641
Avi Kivity0f65dd72011-04-20 13:37:53 +03002642 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002643 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002644 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002645 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002646 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002647
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002648 save_state_to_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002649
Nadav Amit5c7411e2014-04-07 18:37:47 +03002650 /* Only GP registers and segment selectors are saved */
2651 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2652 ldt_sel_offset - eip_offset, &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002653 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002654 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002655 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002656
Avi Kivity0f65dd72011-04-20 13:37:53 +03002657 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002658 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002659 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002660 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002661 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002662
2663 if (old_tss_sel != 0xffff) {
2664 tss_seg.prev_task_link = old_tss_sel;
2665
Avi Kivity0f65dd72011-04-20 13:37:53 +03002666 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002667 &tss_seg.prev_task_link,
2668 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002669 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002670 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002671 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002672 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002673 }
2674
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002675 return load_state_from_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002676}
2677
2678static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002679 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002680 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002681{
Mathias Krause0225fb52012-08-30 01:30:16 +02002682 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002683 struct desc_struct curr_tss_desc, next_tss_desc;
2684 int ret;
Avi Kivity1aa36612011-04-27 13:20:30 +03002685 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002686 ulong old_tss_base =
Avi Kivity4bff1e862011-04-20 13:37:53 +03002687 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
Gleb Natapovceffb452010-03-18 15:20:19 +02002688 u32 desc_limit;
Avi Kivitye9194642012-06-13 16:29:39 +03002689 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002690
2691 /* FIXME: old_tss_base == ~0 ? */
2692
Avi Kivitye9194642012-06-13 16:29:39 +03002693 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002694 if (ret != X86EMUL_CONTINUE)
2695 return ret;
Avi Kivitye9194642012-06-13 16:29:39 +03002696 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002697 if (ret != X86EMUL_CONTINUE)
2698 return ret;
2699
2700 /* FIXME: check that next_tss_desc is tss */
2701
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002702 /*
2703 * Check privileges. The three cases are task switch caused by...
2704 *
2705 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2706 * 2. Exception/IRQ/iret: No check is performed
Guo Chaofc058682012-06-28 15:19:51 +08002707 * 3. jmp/call to TSS: Check against DPL of the TSS
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002708 */
2709 if (reason == TASK_SWITCH_GATE) {
2710 if (idt_index != -1) {
2711 /* Software interrupts */
2712 struct desc_struct task_gate_desc;
2713 int dpl;
2714
2715 ret = read_interrupt_descriptor(ctxt, idt_index,
2716 &task_gate_desc);
2717 if (ret != X86EMUL_CONTINUE)
2718 return ret;
2719
2720 dpl = task_gate_desc.dpl;
2721 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2722 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2723 }
2724 } else if (reason != TASK_SWITCH_IRET) {
2725 int dpl = next_tss_desc.dpl;
2726 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2727 return emulate_gp(ctxt, tss_selector);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002728 }
2729
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002730
Gleb Natapovceffb452010-03-18 15:20:19 +02002731 desc_limit = desc_limit_scaled(&next_tss_desc);
2732 if (!next_tss_desc.p ||
2733 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2734 desc_limit < 0x2b)) {
Paolo Bonzini592f0852014-08-20 10:05:08 +02002735 return emulate_ts(ctxt, tss_selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002736 }
2737
2738 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2739 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002740 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002741 }
2742
2743 if (reason == TASK_SWITCH_IRET)
2744 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2745
2746 /* set back link to prev task only if NT bit is set in eflags
Guo Chaofc058682012-06-28 15:19:51 +08002747 note that old_tss_sel is not used after this point */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002748 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2749 old_tss_sel = 0xffff;
2750
2751 if (next_tss_desc.type & 8)
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002752 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002753 old_tss_base, &next_tss_desc);
2754 else
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002755 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002756 old_tss_base, &next_tss_desc);
Jan Kiszka0760d442010-04-14 15:50:57 +02002757 if (ret != X86EMUL_CONTINUE)
2758 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002759
2760 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2761 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2762
2763 if (reason != TASK_SWITCH_IRET) {
2764 next_tss_desc.type |= (1 << 1); /* set busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002765 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002766 }
2767
Avi Kivity717746e2011-04-20 13:37:53 +03002768 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
Avi Kivity1aa36612011-04-27 13:20:30 +03002769 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002770
Jan Kiszkae269fb22010-04-14 15:51:09 +02002771 if (has_error_code) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002772 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2773 ctxt->lock_prefix = 0;
2774 ctxt->src.val = (unsigned long) error_code;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09002775 ret = em_push(ctxt);
Jan Kiszkae269fb22010-04-14 15:51:09 +02002776 }
2777
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002778 return ret;
2779}
2780
2781int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002782 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002783 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002784{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002785 int rc;
2786
Avi Kivitydd856ef2012-08-27 23:46:17 +03002787 invalidate_registers(ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002788 ctxt->_eip = ctxt->eip;
2789 ctxt->dst.type = OP_NONE;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002790
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002791 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002792 has_error_code, error_code);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002793
Avi Kivitydd856ef2012-08-27 23:46:17 +03002794 if (rc == X86EMUL_CONTINUE) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002795 ctxt->eip = ctxt->_eip;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002796 writeback_registers(ctxt);
2797 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002798
Gleb Natapova0c0ab22011-03-28 16:57:49 +02002799 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002800}
2801
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03002802static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2803 struct operand *op)
Gleb Natapova682e352010-03-18 15:20:21 +02002804{
Gleb Natapovb3356bf2012-09-03 15:24:29 +03002805 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
Gleb Natapova682e352010-03-18 15:20:21 +02002806
Avi Kivitydd856ef2012-08-27 23:46:17 +03002807 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2808 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
Gleb Natapova682e352010-03-18 15:20:21 +02002809}
2810
Avi Kivity7af04fc2010-08-18 14:16:35 +03002811static int em_das(struct x86_emulate_ctxt *ctxt)
2812{
Avi Kivity7af04fc2010-08-18 14:16:35 +03002813 u8 al, old_al;
2814 bool af, cf, old_cf;
2815
2816 cf = ctxt->eflags & X86_EFLAGS_CF;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002817 al = ctxt->dst.val;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002818
2819 old_al = al;
2820 old_cf = cf;
2821 cf = false;
2822 af = ctxt->eflags & X86_EFLAGS_AF;
2823 if ((al & 0x0f) > 9 || af) {
2824 al -= 6;
2825 cf = old_cf | (al >= 250);
2826 af = true;
2827 } else {
2828 af = false;
2829 }
2830 if (old_al > 0x99 || old_cf) {
2831 al -= 0x60;
2832 cf = true;
2833 }
2834
Avi Kivity9dac77f2011-06-01 15:34:25 +03002835 ctxt->dst.val = al;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002836 /* Set PF, ZF, SF */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002837 ctxt->src.type = OP_IMM;
2838 ctxt->src.val = 0;
2839 ctxt->src.bytes = 1;
Avi Kivity158de572013-01-19 19:51:57 +02002840 fastop(ctxt, em_or);
Avi Kivity7af04fc2010-08-18 14:16:35 +03002841 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2842 if (cf)
2843 ctxt->eflags |= X86_EFLAGS_CF;
2844 if (af)
2845 ctxt->eflags |= X86_EFLAGS_AF;
2846 return X86EMUL_CONTINUE;
2847}
2848
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02002849static int em_aam(struct x86_emulate_ctxt *ctxt)
2850{
2851 u8 al, ah;
2852
2853 if (ctxt->src.val == 0)
2854 return emulate_de(ctxt);
2855
2856 al = ctxt->dst.val & 0xff;
2857 ah = al / ctxt->src.val;
2858 al %= ctxt->src.val;
2859
2860 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2861
2862 /* Set PF, ZF, SF */
2863 ctxt->src.type = OP_IMM;
2864 ctxt->src.val = 0;
2865 ctxt->src.bytes = 1;
2866 fastop(ctxt, em_or);
2867
2868 return X86EMUL_CONTINUE;
2869}
2870
Gleb Natapov7f662272012-12-10 11:42:30 +02002871static int em_aad(struct x86_emulate_ctxt *ctxt)
2872{
2873 u8 al = ctxt->dst.val & 0xff;
2874 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2875
2876 al = (al + (ah * ctxt->src.val)) & 0xff;
2877
2878 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2879
Gleb Natapovf583c292013-02-13 17:50:39 +02002880 /* Set PF, ZF, SF */
2881 ctxt->src.type = OP_IMM;
2882 ctxt->src.val = 0;
2883 ctxt->src.bytes = 1;
2884 fastop(ctxt, em_or);
Gleb Natapov7f662272012-12-10 11:42:30 +02002885
2886 return X86EMUL_CONTINUE;
2887}
2888
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09002889static int em_call(struct x86_emulate_ctxt *ctxt)
2890{
2891 long rel = ctxt->src.val;
2892
2893 ctxt->src.val = (unsigned long)ctxt->_eip;
2894 jmp_rel(ctxt, rel);
2895 return em_push(ctxt);
2896}
2897
Avi Kivity0ef753b2010-08-18 14:51:45 +03002898static int em_call_far(struct x86_emulate_ctxt *ctxt)
2899{
Avi Kivity0ef753b2010-08-18 14:51:45 +03002900 u16 sel, old_cs;
2901 ulong old_eip;
2902 int rc;
2903
Avi Kivity1aa36612011-04-27 13:20:30 +03002904 old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002905 old_eip = ctxt->_eip;
Avi Kivity0ef753b2010-08-18 14:51:45 +03002906
Avi Kivity9dac77f2011-06-01 15:34:25 +03002907 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002908 if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
Avi Kivity0ef753b2010-08-18 14:51:45 +03002909 return X86EMUL_CONTINUE;
2910
Avi Kivity9dac77f2011-06-01 15:34:25 +03002911 ctxt->_eip = 0;
2912 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
Avi Kivity0ef753b2010-08-18 14:51:45 +03002913
Avi Kivity9dac77f2011-06-01 15:34:25 +03002914 ctxt->src.val = old_cs;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09002915 rc = em_push(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03002916 if (rc != X86EMUL_CONTINUE)
2917 return rc;
2918
Avi Kivity9dac77f2011-06-01 15:34:25 +03002919 ctxt->src.val = old_eip;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09002920 return em_push(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03002921}
2922
Avi Kivity40ece7c2010-08-18 15:12:09 +03002923static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2924{
Avi Kivity40ece7c2010-08-18 15:12:09 +03002925 int rc;
2926
Avi Kivity9dac77f2011-06-01 15:34:25 +03002927 ctxt->dst.type = OP_REG;
2928 ctxt->dst.addr.reg = &ctxt->_eip;
2929 ctxt->dst.bytes = ctxt->op_bytes;
2930 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Avi Kivity40ece7c2010-08-18 15:12:09 +03002931 if (rc != X86EMUL_CONTINUE)
2932 return rc;
Avi Kivity5ad105e2012-08-19 14:34:31 +03002933 rsp_increment(ctxt, ctxt->src.val);
Avi Kivity40ece7c2010-08-18 15:12:09 +03002934 return X86EMUL_CONTINUE;
2935}
2936
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09002937static int em_xchg(struct x86_emulate_ctxt *ctxt)
2938{
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09002939 /* Write back the register source. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002940 ctxt->src.val = ctxt->dst.val;
2941 write_register_operand(&ctxt->src);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09002942
2943 /* Write back the memory destination with implicit LOCK prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002944 ctxt->dst.val = ctxt->src.orig_val;
2945 ctxt->lock_prefix = 1;
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09002946 return X86EMUL_CONTINUE;
2947}
2948
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03002949static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2950{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002951 ctxt->dst.val = ctxt->src2.val;
Avi Kivity4d758342013-01-19 19:51:55 +02002952 return fastop(ctxt, em_imul);
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03002953}
2954
Avi Kivity61429142010-08-19 15:13:00 +03002955static int em_cwd(struct x86_emulate_ctxt *ctxt)
2956{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002957 ctxt->dst.type = OP_REG;
2958 ctxt->dst.bytes = ctxt->src.bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002959 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002960 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
Avi Kivity61429142010-08-19 15:13:00 +03002961
2962 return X86EMUL_CONTINUE;
2963}
2964
Avi Kivity48bb5d32010-08-18 18:54:34 +03002965static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2966{
Avi Kivity48bb5d32010-08-18 18:54:34 +03002967 u64 tsc = 0;
2968
Avi Kivity717746e2011-04-20 13:37:53 +03002969 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
Avi Kivitydd856ef2012-08-27 23:46:17 +03002970 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
2971 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
Avi Kivity48bb5d32010-08-18 18:54:34 +03002972 return X86EMUL_CONTINUE;
2973}
2974
Avi Kivity222d21a2011-11-10 14:57:30 +02002975static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
2976{
2977 u64 pmc;
2978
Avi Kivitydd856ef2012-08-27 23:46:17 +03002979 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
Avi Kivity222d21a2011-11-10 14:57:30 +02002980 return emulate_gp(ctxt, 0);
Avi Kivitydd856ef2012-08-27 23:46:17 +03002981 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
2982 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
Avi Kivity222d21a2011-11-10 14:57:30 +02002983 return X86EMUL_CONTINUE;
2984}
2985
Avi Kivityb9eac5f2010-08-03 14:46:56 +03002986static int em_mov(struct x86_emulate_ctxt *ctxt)
2987{
Paolo Bonzini54cfdb32014-03-27 11:36:25 +01002988 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
Avi Kivityb9eac5f2010-08-03 14:46:56 +03002989 return X86EMUL_CONTINUE;
2990}
2991
Borislav Petkov84cffe42013-10-29 12:54:56 +01002992#define FFL(x) bit(X86_FEATURE_##x)
2993
2994static int em_movbe(struct x86_emulate_ctxt *ctxt)
2995{
2996 u32 ebx, ecx, edx, eax = 1;
2997 u16 tmp;
2998
2999 /*
3000 * Check MOVBE is set in the guest-visible CPUID leaf.
3001 */
3002 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3003 if (!(ecx & FFL(MOVBE)))
3004 return emulate_ud(ctxt);
3005
3006 switch (ctxt->op_bytes) {
3007 case 2:
3008 /*
3009 * From MOVBE definition: "...When the operand size is 16 bits,
3010 * the upper word of the destination register remains unchanged
3011 * ..."
3012 *
3013 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3014 * rules so we have to do the operation almost per hand.
3015 */
3016 tmp = (u16)ctxt->src.val;
3017 ctxt->dst.val &= ~0xffffUL;
3018 ctxt->dst.val |= (unsigned long)swab16(tmp);
3019 break;
3020 case 4:
3021 ctxt->dst.val = swab32((u32)ctxt->src.val);
3022 break;
3023 case 8:
3024 ctxt->dst.val = swab64(ctxt->src.val);
3025 break;
3026 default:
Paolo Bonzini592f0852014-08-20 10:05:08 +02003027 BUG();
Borislav Petkov84cffe42013-10-29 12:54:56 +01003028 }
3029 return X86EMUL_CONTINUE;
3030}
3031
Takuya Yoshikawabc00f8d2011-11-22 15:19:19 +09003032static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3033{
3034 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3035 return emulate_gp(ctxt, 0);
3036
3037 /* Disable writeback. */
3038 ctxt->dst.type = OP_NONE;
3039 return X86EMUL_CONTINUE;
3040}
3041
3042static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3043{
3044 unsigned long val;
3045
3046 if (ctxt->mode == X86EMUL_MODE_PROT64)
3047 val = ctxt->src.val & ~0ULL;
3048 else
3049 val = ctxt->src.val & ~0U;
3050
3051 /* #UD condition is already handled. */
3052 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3053 return emulate_gp(ctxt, 0);
3054
3055 /* Disable writeback. */
3056 ctxt->dst.type = OP_NONE;
3057 return X86EMUL_CONTINUE;
3058}
3059
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003060static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3061{
3062 u64 msr_data;
3063
Avi Kivitydd856ef2012-08-27 23:46:17 +03003064 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3065 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3066 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003067 return emulate_gp(ctxt, 0);
3068
3069 return X86EMUL_CONTINUE;
3070}
3071
3072static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3073{
3074 u64 msr_data;
3075
Avi Kivitydd856ef2012-08-27 23:46:17 +03003076 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003077 return emulate_gp(ctxt, 0);
3078
Avi Kivitydd856ef2012-08-27 23:46:17 +03003079 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3080 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003081 return X86EMUL_CONTINUE;
3082}
3083
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003084static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3085{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003086 if (ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003087 return emulate_ud(ctxt);
3088
Avi Kivity9dac77f2011-06-01 15:34:25 +03003089 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003090 return X86EMUL_CONTINUE;
3091}
3092
3093static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3094{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003095 u16 sel = ctxt->src.val;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003096
Avi Kivity9dac77f2011-06-01 15:34:25 +03003097 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003098 return emulate_ud(ctxt);
3099
Avi Kivity9dac77f2011-06-01 15:34:25 +03003100 if (ctxt->modrm_reg == VCPU_SREG_SS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003101 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3102
3103 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003104 ctxt->dst.type = OP_NONE;
3105 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003106}
3107
Avi Kivitya14e5792012-06-13 12:28:33 +03003108static int em_lldt(struct x86_emulate_ctxt *ctxt)
3109{
3110 u16 sel = ctxt->src.val;
3111
3112 /* Disable writeback. */
3113 ctxt->dst.type = OP_NONE;
3114 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3115}
3116
Avi Kivity80890002012-06-13 16:33:29 +03003117static int em_ltr(struct x86_emulate_ctxt *ctxt)
3118{
3119 u16 sel = ctxt->src.val;
3120
3121 /* Disable writeback. */
3122 ctxt->dst.type = OP_NONE;
3123 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3124}
3125
Avi Kivity38503912011-03-31 18:48:09 +02003126static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3127{
Avi Kivity9fa088f2011-03-31 18:54:30 +02003128 int rc;
3129 ulong linear;
3130
Avi Kivity9dac77f2011-06-01 15:34:25 +03003131 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02003132 if (rc == X86EMUL_CONTINUE)
Avi Kivity3cb16fe2011-04-20 15:38:44 +03003133 ctxt->ops->invlpg(ctxt, linear);
Avi Kivity38503912011-03-31 18:48:09 +02003134 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003135 ctxt->dst.type = OP_NONE;
Avi Kivity38503912011-03-31 18:48:09 +02003136 return X86EMUL_CONTINUE;
3137}
3138
Avi Kivity2d04a052011-04-20 15:32:49 +03003139static int em_clts(struct x86_emulate_ctxt *ctxt)
3140{
3141 ulong cr0;
3142
3143 cr0 = ctxt->ops->get_cr(ctxt, 0);
3144 cr0 &= ~X86_CR0_TS;
3145 ctxt->ops->set_cr(ctxt, 0, cr0);
3146 return X86EMUL_CONTINUE;
3147}
3148
Avi Kivity26d05cc2011-04-21 12:07:59 +03003149static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3150{
Nadav Amit0f54a322014-08-29 11:26:55 +03003151 int rc = ctxt->ops->fix_hypercall(ctxt);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003152
Avi Kivity26d05cc2011-04-21 12:07:59 +03003153 if (rc != X86EMUL_CONTINUE)
3154 return rc;
3155
3156 /* Let the processor re-execute the fixed hypercall */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003157 ctxt->_eip = ctxt->eip;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003158 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003159 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003160 return X86EMUL_CONTINUE;
3161}
3162
Avi Kivity96051572012-06-10 17:21:18 +03003163static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3164 void (*get)(struct x86_emulate_ctxt *ctxt,
3165 struct desc_ptr *ptr))
3166{
3167 struct desc_ptr desc_ptr;
3168
3169 if (ctxt->mode == X86EMUL_MODE_PROT64)
3170 ctxt->op_bytes = 8;
3171 get(ctxt, &desc_ptr);
3172 if (ctxt->op_bytes == 2) {
3173 ctxt->op_bytes = 4;
3174 desc_ptr.address &= 0x00ffffff;
3175 }
3176 /* Disable writeback. */
3177 ctxt->dst.type = OP_NONE;
3178 return segmented_write(ctxt, ctxt->dst.addr.mem,
3179 &desc_ptr, 2 + ctxt->op_bytes);
3180}
3181
3182static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3183{
3184 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3185}
3186
3187static int em_sidt(struct x86_emulate_ctxt *ctxt)
3188{
3189 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3190}
3191
Avi Kivity26d05cc2011-04-21 12:07:59 +03003192static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3193{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003194 struct desc_ptr desc_ptr;
3195 int rc;
3196
Avi Kivity510425f2012-06-07 17:04:36 +03003197 if (ctxt->mode == X86EMUL_MODE_PROT64)
3198 ctxt->op_bytes = 8;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003199 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
Avi Kivity26d05cc2011-04-21 12:07:59 +03003200 &desc_ptr.size, &desc_ptr.address,
Avi Kivity9dac77f2011-06-01 15:34:25 +03003201 ctxt->op_bytes);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003202 if (rc != X86EMUL_CONTINUE)
3203 return rc;
3204 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3205 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003206 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003207 return X86EMUL_CONTINUE;
3208}
3209
Avi Kivity5ef39c72011-04-21 12:21:50 +03003210static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003211{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003212 int rc;
3213
Avi Kivity5ef39c72011-04-21 12:21:50 +03003214 rc = ctxt->ops->fix_hypercall(ctxt);
3215
Avi Kivity26d05cc2011-04-21 12:07:59 +03003216 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003217 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003218 return rc;
3219}
3220
3221static int em_lidt(struct x86_emulate_ctxt *ctxt)
3222{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003223 struct desc_ptr desc_ptr;
3224 int rc;
3225
Avi Kivity510425f2012-06-07 17:04:36 +03003226 if (ctxt->mode == X86EMUL_MODE_PROT64)
3227 ctxt->op_bytes = 8;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003228 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
Takuya Yoshikawa509cf9f2011-05-02 02:25:07 +09003229 &desc_ptr.size, &desc_ptr.address,
Avi Kivity9dac77f2011-06-01 15:34:25 +03003230 ctxt->op_bytes);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003231 if (rc != X86EMUL_CONTINUE)
3232 return rc;
3233 ctxt->ops->set_idt(ctxt, &desc_ptr);
3234 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003235 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003236 return X86EMUL_CONTINUE;
3237}
3238
3239static int em_smsw(struct x86_emulate_ctxt *ctxt)
3240{
Nadav Amit32e94d02014-06-02 18:34:11 +03003241 if (ctxt->dst.type == OP_MEM)
3242 ctxt->dst.bytes = 2;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003243 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003244 return X86EMUL_CONTINUE;
3245}
3246
3247static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3248{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003249 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
Avi Kivity9dac77f2011-06-01 15:34:25 +03003250 | (ctxt->src.val & 0x0f));
3251 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003252 return X86EMUL_CONTINUE;
3253}
3254
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003255static int em_loop(struct x86_emulate_ctxt *ctxt)
3256{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003257 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3258 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
Avi Kivity9dac77f2011-06-01 15:34:25 +03003259 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3260 jmp_rel(ctxt, ctxt->src.val);
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003261
3262 return X86EMUL_CONTINUE;
3263}
3264
3265static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3266{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003267 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
Avi Kivity9dac77f2011-06-01 15:34:25 +03003268 jmp_rel(ctxt, ctxt->src.val);
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003269
3270 return X86EMUL_CONTINUE;
3271}
3272
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003273static int em_in(struct x86_emulate_ctxt *ctxt)
3274{
3275 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3276 &ctxt->dst.val))
3277 return X86EMUL_IO_NEEDED;
3278
3279 return X86EMUL_CONTINUE;
3280}
3281
3282static int em_out(struct x86_emulate_ctxt *ctxt)
3283{
3284 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3285 &ctxt->src.val, 1);
3286 /* Disable writeback. */
3287 ctxt->dst.type = OP_NONE;
3288 return X86EMUL_CONTINUE;
3289}
3290
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09003291static int em_cli(struct x86_emulate_ctxt *ctxt)
3292{
3293 if (emulator_bad_iopl(ctxt))
3294 return emulate_gp(ctxt, 0);
3295
3296 ctxt->eflags &= ~X86_EFLAGS_IF;
3297 return X86EMUL_CONTINUE;
3298}
3299
3300static int em_sti(struct x86_emulate_ctxt *ctxt)
3301{
3302 if (emulator_bad_iopl(ctxt))
3303 return emulate_gp(ctxt, 0);
3304
3305 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3306 ctxt->eflags |= X86_EFLAGS_IF;
3307 return X86EMUL_CONTINUE;
3308}
3309
Avi Kivity6d6eede2012-06-07 14:11:36 +03003310static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3311{
3312 u32 eax, ebx, ecx, edx;
3313
Avi Kivitydd856ef2012-08-27 23:46:17 +03003314 eax = reg_read(ctxt, VCPU_REGS_RAX);
3315 ecx = reg_read(ctxt, VCPU_REGS_RCX);
Avi Kivity6d6eede2012-06-07 14:11:36 +03003316 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003317 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3318 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3319 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3320 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
Avi Kivity6d6eede2012-06-07 14:11:36 +03003321 return X86EMUL_CONTINUE;
3322}
3323
Paolo Bonzini98f73632013-10-31 11:19:42 +01003324static int em_sahf(struct x86_emulate_ctxt *ctxt)
3325{
3326 u32 flags;
3327
3328 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3329 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3330
3331 ctxt->eflags &= ~0xffUL;
3332 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3333 return X86EMUL_CONTINUE;
3334}
3335
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003336static int em_lahf(struct x86_emulate_ctxt *ctxt)
3337{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003338 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3339 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003340 return X86EMUL_CONTINUE;
3341}
3342
Avi Kivity92998362012-06-13 12:25:06 +03003343static int em_bswap(struct x86_emulate_ctxt *ctxt)
3344{
3345 switch (ctxt->op_bytes) {
3346#ifdef CONFIG_X86_64
3347 case 8:
3348 asm("bswap %0" : "+r"(ctxt->dst.val));
3349 break;
3350#endif
3351 default:
3352 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3353 break;
3354 }
3355 return X86EMUL_CONTINUE;
3356}
3357
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003358static bool valid_cr(int nr)
3359{
3360 switch (nr) {
3361 case 0:
3362 case 2 ... 4:
3363 case 8:
3364 return true;
3365 default:
3366 return false;
3367 }
3368}
3369
3370static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3371{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003372 if (!valid_cr(ctxt->modrm_reg))
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003373 return emulate_ud(ctxt);
3374
3375 return X86EMUL_CONTINUE;
3376}
3377
3378static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3379{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003380 u64 new_val = ctxt->src.val64;
3381 int cr = ctxt->modrm_reg;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003382 u64 efer = 0;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003383
3384 static u64 cr_reserved_bits[] = {
3385 0xffffffff00000000ULL,
3386 0, 0, 0, /* CR3 checked later */
3387 CR4_RESERVED_BITS,
3388 0, 0, 0,
3389 CR8_RESERVED_BITS,
3390 };
3391
3392 if (!valid_cr(cr))
3393 return emulate_ud(ctxt);
3394
3395 if (new_val & cr_reserved_bits[cr])
3396 return emulate_gp(ctxt, 0);
3397
3398 switch (cr) {
3399 case 0: {
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003400 u64 cr4;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003401 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3402 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3403 return emulate_gp(ctxt, 0);
3404
Avi Kivity717746e2011-04-20 13:37:53 +03003405 cr4 = ctxt->ops->get_cr(ctxt, 4);
3406 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003407
3408 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3409 !(cr4 & X86_CR4_PAE))
3410 return emulate_gp(ctxt, 0);
3411
3412 break;
3413 }
3414 case 3: {
3415 u64 rsvd = 0;
3416
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003417 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3418 if (efer & EFER_LMA)
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003419 rsvd = CR3_L_MODE_RESERVED_BITS;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003420
3421 if (new_val & rsvd)
3422 return emulate_gp(ctxt, 0);
3423
3424 break;
3425 }
3426 case 4: {
Avi Kivity717746e2011-04-20 13:37:53 +03003427 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003428
3429 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3430 return emulate_gp(ctxt, 0);
3431
3432 break;
3433 }
3434 }
3435
3436 return X86EMUL_CONTINUE;
3437}
3438
Joerg Roedel3b88e412011-04-04 12:39:29 +02003439static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3440{
3441 unsigned long dr7;
3442
Avi Kivity717746e2011-04-20 13:37:53 +03003443 ctxt->ops->get_dr(ctxt, 7, &dr7);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003444
3445 /* Check if DR7.Global_Enable is set */
3446 return dr7 & (1 << 13);
3447}
3448
3449static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3450{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003451 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003452 u64 cr4;
3453
3454 if (dr > 7)
3455 return emulate_ud(ctxt);
3456
Avi Kivity717746e2011-04-20 13:37:53 +03003457 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003458 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3459 return emulate_ud(ctxt);
3460
3461 if (check_dr7_gd(ctxt))
3462 return emulate_db(ctxt);
3463
3464 return X86EMUL_CONTINUE;
3465}
3466
3467static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3468{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003469 u64 new_val = ctxt->src.val64;
3470 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003471
3472 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3473 return emulate_gp(ctxt, 0);
3474
3475 return check_dr_read(ctxt);
3476}
3477
Joerg Roedel01de8b02011-04-04 12:39:31 +02003478static int check_svme(struct x86_emulate_ctxt *ctxt)
3479{
3480 u64 efer;
3481
Avi Kivity717746e2011-04-20 13:37:53 +03003482 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003483
3484 if (!(efer & EFER_SVME))
3485 return emulate_ud(ctxt);
3486
3487 return X86EMUL_CONTINUE;
3488}
3489
3490static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3491{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003492 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003493
3494 /* Valid physical address? */
Randy Dunlapd4224442011-04-21 09:09:22 -07003495 if (rax & 0xffff000000000000ULL)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003496 return emulate_gp(ctxt, 0);
3497
3498 return check_svme(ctxt);
3499}
3500
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003501static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3502{
Avi Kivity717746e2011-04-20 13:37:53 +03003503 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003504
Avi Kivity717746e2011-04-20 13:37:53 +03003505 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003506 return emulate_ud(ctxt);
3507
3508 return X86EMUL_CONTINUE;
3509}
3510
Joerg Roedel80612522011-04-04 12:39:33 +02003511static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3512{
Avi Kivity717746e2011-04-20 13:37:53 +03003513 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003514 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
Joerg Roedel80612522011-04-04 12:39:33 +02003515
Avi Kivity717746e2011-04-20 13:37:53 +03003516 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
Nadav Amit67f4d422014-06-02 18:34:09 +03003517 ctxt->ops->check_pmc(ctxt, rcx))
Joerg Roedel80612522011-04-04 12:39:33 +02003518 return emulate_gp(ctxt, 0);
3519
3520 return X86EMUL_CONTINUE;
3521}
3522
Joerg Roedelf6511932011-04-04 12:39:35 +02003523static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3524{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003525 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3526 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003527 return emulate_gp(ctxt, 0);
3528
3529 return X86EMUL_CONTINUE;
3530}
3531
3532static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3533{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003534 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3535 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003536 return emulate_gp(ctxt, 0);
3537
3538 return X86EMUL_CONTINUE;
3539}
3540
Avi Kivity73fba5f2010-07-29 15:11:53 +03003541#define D(_y) { .flags = (_y) }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003542#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3543#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3544 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Gleb Natapov0b789ee2013-04-11 11:59:55 +03003545#define N D(NotImpl)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003546#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003547#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3548#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
Gleb Natapov045a2822012-12-20 16:57:43 +02003549#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003550#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
Avi Kivitye28bbd42013-01-04 16:18:48 +02003551#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
Avi Kivityc4f035c2011-04-04 12:39:22 +02003552#define II(_f, _e, _i) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003553 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
Joerg Roedeld09beab2011-04-04 12:39:25 +02003554#define IIP(_f, _e, _i, _p) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003555 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3556 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Avi Kivityaa97bb42010-01-20 18:09:23 +02003557#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003558
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003559#define D2bv(_f) D((_f) | ByteOp), D(_f)
Joerg Roedelf6511932011-04-04 12:39:35 +02003560#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003561#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
Avi Kivityf7857f32013-01-04 16:18:53 +02003562#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003563#define I2bvIP(_f, _e, _i, _p) \
3564 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003565
Avi Kivityfb864fb2013-01-04 16:18:54 +02003566#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3567 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3568 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
Avi Kivity6230f7f2010-08-26 18:34:55 +03003569
Nadav Amit0f54a322014-08-29 11:26:55 +03003570static const struct opcode group7_rm0[] = {
3571 N,
3572 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3573 N, N, N, N, N, N,
3574};
3575
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003576static const struct opcode group7_rm1[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003577 DI(SrcNone | Priv, monitor),
3578 DI(SrcNone | Priv, mwait),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003579 N, N, N, N, N, N,
3580};
3581
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003582static const struct opcode group7_rm3[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003583 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
Borislav Petkovb51e9742013-09-22 16:44:52 +02003584 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003585 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3586 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3587 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3588 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3589 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3590 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003591};
Avi Kivity6230f7f2010-08-26 18:34:55 +03003592
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003593static const struct opcode group7_rm7[] = {
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003594 N,
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003595 DIP(SrcNone, rdtscp, check_rdtsc),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003596 N, N, N, N, N, N,
3597};
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09003598
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003599static const struct opcode group1[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003600 F(Lock, em_add),
3601 F(Lock | PageTable, em_or),
3602 F(Lock, em_adc),
3603 F(Lock, em_sbb),
3604 F(Lock | PageTable, em_and),
3605 F(Lock, em_sub),
3606 F(Lock, em_xor),
3607 F(NoWrite, em_cmp),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003608};
3609
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003610static const struct opcode group1A[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003611 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003612};
3613
Avi Kivity007a3b52013-01-19 19:51:51 +02003614static const struct opcode group2[] = {
3615 F(DstMem | ModRM, em_rol),
3616 F(DstMem | ModRM, em_ror),
3617 F(DstMem | ModRM, em_rcl),
3618 F(DstMem | ModRM, em_rcr),
3619 F(DstMem | ModRM, em_shl),
3620 F(DstMem | ModRM, em_shr),
3621 F(DstMem | ModRM, em_shl),
3622 F(DstMem | ModRM, em_sar),
3623};
3624
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003625static const struct opcode group3[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003626 F(DstMem | SrcImm | NoWrite, em_test),
3627 F(DstMem | SrcImm | NoWrite, em_test),
Avi Kivity45a14672013-01-04 16:18:52 +02003628 F(DstMem | SrcNone | Lock, em_not),
3629 F(DstMem | SrcNone | Lock, em_neg),
Avi Kivityb9fa4092013-02-09 11:31:48 +02003630 F(DstXacc | Src2Mem, em_mul_ex),
3631 F(DstXacc | Src2Mem, em_imul_ex),
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02003632 F(DstXacc | Src2Mem, em_div_ex),
3633 F(DstXacc | Src2Mem, em_idiv_ex),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003634};
3635
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003636static const struct opcode group4[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003637 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3638 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003639 N, N, N, N, N, N,
3640};
3641
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003642static const struct opcode group5[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003643 F(DstMem | SrcNone | Lock, em_inc),
3644 F(DstMem | SrcNone | Lock, em_dec),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003645 I(SrcMem | Stack, em_grp45),
3646 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3647 I(SrcMem | Stack, em_grp45),
3648 I(SrcMemFAddr | ImplicitOps, em_grp45),
Gleb Natapov188424b2013-04-11 12:32:14 +03003649 I(SrcMem | Stack, em_grp45), D(Undefined),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003650};
3651
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003652static const struct opcode group6[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003653 DI(Prot, sldt),
3654 DI(Prot, str),
Avi Kivitya14e5792012-06-13 12:28:33 +03003655 II(Prot | Priv | SrcMem16, em_lldt, lldt),
Avi Kivity80890002012-06-13 16:33:29 +03003656 II(Prot | Priv | SrcMem16, em_ltr, ltr),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02003657 N, N, N, N,
3658};
3659
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003660static const struct group_dual group7 = { {
Nadav Amit606b1c32014-06-02 18:34:06 +03003661 II(Mov | DstMem, em_sgdt, sgdt),
3662 II(Mov | DstMem, em_sidt, sidt),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003663 II(SrcMem | Priv, em_lgdt, lgdt),
3664 II(SrcMem | Priv, em_lidt, lidt),
3665 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3666 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3667 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003668}, {
Nadav Amit0f54a322014-08-29 11:26:55 +03003669 EXT(0, group7_rm0),
Avi Kivity5ef39c72011-04-21 12:21:50 +03003670 EXT(0, group7_rm1),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003671 N, EXT(0, group7_rm3),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003672 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3673 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3674 EXT(0, group7_rm7),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003675} };
3676
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003677static const struct opcode group8[] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003678 N, N, N, N,
Avi Kivity11c363b2013-01-19 19:51:54 +02003679 F(DstMem | SrcImmByte | NoWrite, em_bt),
3680 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3681 F(DstMem | SrcImmByte | Lock, em_btr),
3682 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003683};
3684
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003685static const struct group_dual group9 = { {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003686 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003687}, {
3688 N, N, N, N, N, N, N, N,
3689} };
3690
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003691static const struct opcode group11[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003692 I(DstMem | SrcImm | Mov | PageTable, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003693 X7(D(Undefined)),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03003694};
3695
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003696static const struct gprefix pfx_0f_6f_0f_7f = {
Avi Kivitye5971752012-04-09 18:40:03 +03003697 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
Avi Kivityaa97bb42010-01-20 18:09:23 +02003698};
3699
Paolo Bonzinid5b77062014-07-14 12:54:48 +02003700static const struct gprefix pfx_0f_2b = {
3701 I(0, em_mov), I(0, em_mov), N, N,
Avi Kivity3e114eb2012-04-09 18:40:01 +03003702};
3703
Igor Mammedov27ce8252014-03-15 21:01:59 +01003704static const struct gprefix pfx_0f_28_0f_29 = {
Igor Mammedov6fec27d2014-03-15 21:02:00 +01003705 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01003706};
3707
Alex Williamson0a370272014-07-11 11:56:31 -06003708static const struct gprefix pfx_0f_e7 = {
3709 N, I(Sse, em_mov), N, N,
3710};
3711
Gleb Natapov045a2822012-12-20 16:57:43 +02003712static const struct escape escape_d9 = { {
3713 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3714}, {
3715 /* 0xC0 - 0xC7 */
3716 N, N, N, N, N, N, N, N,
3717 /* 0xC8 - 0xCF */
3718 N, N, N, N, N, N, N, N,
3719 /* 0xD0 - 0xC7 */
3720 N, N, N, N, N, N, N, N,
3721 /* 0xD8 - 0xDF */
3722 N, N, N, N, N, N, N, N,
3723 /* 0xE0 - 0xE7 */
3724 N, N, N, N, N, N, N, N,
3725 /* 0xE8 - 0xEF */
3726 N, N, N, N, N, N, N, N,
3727 /* 0xF0 - 0xF7 */
3728 N, N, N, N, N, N, N, N,
3729 /* 0xF8 - 0xFF */
3730 N, N, N, N, N, N, N, N,
3731} };
3732
3733static const struct escape escape_db = { {
3734 N, N, N, N, N, N, N, N,
3735}, {
3736 /* 0xC0 - 0xC7 */
3737 N, N, N, N, N, N, N, N,
3738 /* 0xC8 - 0xCF */
3739 N, N, N, N, N, N, N, N,
3740 /* 0xD0 - 0xC7 */
3741 N, N, N, N, N, N, N, N,
3742 /* 0xD8 - 0xDF */
3743 N, N, N, N, N, N, N, N,
3744 /* 0xE0 - 0xE7 */
3745 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3746 /* 0xE8 - 0xEF */
3747 N, N, N, N, N, N, N, N,
3748 /* 0xF0 - 0xF7 */
3749 N, N, N, N, N, N, N, N,
3750 /* 0xF8 - 0xFF */
3751 N, N, N, N, N, N, N, N,
3752} };
3753
3754static const struct escape escape_dd = { {
3755 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3756}, {
3757 /* 0xC0 - 0xC7 */
3758 N, N, N, N, N, N, N, N,
3759 /* 0xC8 - 0xCF */
3760 N, N, N, N, N, N, N, N,
3761 /* 0xD0 - 0xC7 */
3762 N, N, N, N, N, N, N, N,
3763 /* 0xD8 - 0xDF */
3764 N, N, N, N, N, N, N, N,
3765 /* 0xE0 - 0xE7 */
3766 N, N, N, N, N, N, N, N,
3767 /* 0xE8 - 0xEF */
3768 N, N, N, N, N, N, N, N,
3769 /* 0xF0 - 0xF7 */
3770 N, N, N, N, N, N, N, N,
3771 /* 0xF8 - 0xFF */
3772 N, N, N, N, N, N, N, N,
3773} };
3774
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003775static const struct opcode opcode_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003776 /* 0x00 - 0x07 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003777 F6ALU(Lock, em_add),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003778 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3779 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003780 /* 0x08 - 0x0F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003781 F6ALU(Lock | PageTable, em_or),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003782 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3783 N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003784 /* 0x10 - 0x17 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003785 F6ALU(Lock, em_adc),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003786 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3787 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003788 /* 0x18 - 0x1F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003789 F6ALU(Lock, em_sbb),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003790 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3791 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003792 /* 0x20 - 0x27 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003793 F6ALU(Lock | PageTable, em_and), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003794 /* 0x28 - 0x2F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003795 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003796 /* 0x30 - 0x37 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003797 F6ALU(Lock, em_xor), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003798 /* 0x38 - 0x3F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003799 F6ALU(NoWrite, em_cmp), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003800 /* 0x40 - 0x4F */
Avi Kivity95413dc2013-01-19 19:51:53 +02003801 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003802 /* 0x50 - 0x57 */
Avi Kivity63540382010-07-29 15:11:55 +03003803 X8(I(SrcReg | Stack, em_push)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003804 /* 0x58 - 0x5F */
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09003805 X8(I(DstReg | Stack, em_pop)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003806 /* 0x60 - 0x67 */
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09003807 I(ImplicitOps | Stack | No64, em_pusha),
3808 I(ImplicitOps | Stack | No64, em_popa),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003809 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3810 N, N, N, N,
3811 /* 0x68 - 0x6F */
Avi Kivityd46164d2010-08-18 19:29:33 +03003812 I(SrcImm | Mov | Stack, em_push),
3813 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003814 I(SrcImmByte | Mov | Stack, em_push),
3815 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
Gleb Natapovb3356bf2012-09-03 15:24:29 +03003816 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
Takuya Yoshikawa2b5e97e2011-11-23 12:27:39 +09003817 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
Avi Kivity73fba5f2010-07-29 15:11:53 +03003818 /* 0x70 - 0x7F */
3819 X16(D(SrcImmByte)),
3820 /* 0x80 - 0x87 */
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003821 G(ByteOp | DstMem | SrcImm, group1),
3822 G(DstMem | SrcImm, group1),
3823 G(ByteOp | DstMem | SrcImm | No64, group1),
3824 G(DstMem | SrcImmByte, group1),
Avi Kivityfb864fb2013-01-04 16:18:54 +02003825 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003826 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003827 /* 0x88 - 0x8F */
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003828 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003829 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003830 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003831 D(ModRM | SrcMem | NoAccess | DstReg),
3832 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3833 G(0, group1A),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003834 /* 0x90 - 0x97 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02003835 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003836 /* 0x98 - 0x9F */
Avi Kivity61429142010-08-19 15:13:00 +03003837 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
Wei Yongjuncc4feed2010-08-25 14:10:53 +08003838 I(SrcImmFAddr | No64, em_call_far), N,
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09003839 II(ImplicitOps | Stack, em_pushf, pushf),
Paolo Bonzini98f73632013-10-31 11:19:42 +01003840 II(ImplicitOps | Stack, em_popf, popf),
3841 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003842 /* 0xA0 - 0xA7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003843 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003844 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003845 I2bv(SrcSI | DstDI | Mov | String, em_mov),
Avi Kivityfb864fb2013-01-04 16:18:54 +02003846 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003847 /* 0xA8 - 0xAF */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003848 F2bv(DstAcc | SrcImm | NoWrite, em_test),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003849 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3850 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
Avi Kivityfb864fb2013-01-04 16:18:54 +02003851 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003852 /* 0xB0 - 0xB7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003853 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003854 /* 0xB8 - 0xBF */
Nadav Amit5e2c6882012-12-06 21:55:10 -02003855 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003856 /* 0xC0 - 0xC7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02003857 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
Avi Kivity40ece7c2010-08-18 15:12:09 +03003858 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09003859 I(ImplicitOps | Stack, em_ret),
Avi Kivityd4b43252011-09-13 10:45:50 +03003860 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
3861 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03003862 G(ByteOp, group11), G(0, group11),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003863 /* 0xC8 - 0xCF */
Avi Kivity612e89f2012-06-12 20:03:23 +03003864 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
Bruce Rogers32611072013-09-09 09:40:20 -06003865 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
3866 I(ImplicitOps | Stack, em_ret_far),
Avi Kivity3c6e2762011-04-04 12:39:23 +02003867 D(ImplicitOps), DI(SrcImmByte, intn),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09003868 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003869 /* 0xD0 - 0xD7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02003870 G(Src2One | ByteOp, group2), G(Src2One, group2),
3871 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02003872 I(DstAcc | SrcImmUByte | No64, em_aam),
Paolo Bonzini326f5782013-05-09 11:32:51 +02003873 I(DstAcc | SrcImmUByte | No64, em_aad),
3874 F(DstAcc | ByteOp | No64, em_salc),
Paolo Bonzini7fa57952013-05-09 11:32:50 +02003875 I(DstAcc | SrcXLat | ByteOp, em_mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003876 /* 0xD8 - 0xDF */
Gleb Natapov045a2822012-12-20 16:57:43 +02003877 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003878 /* 0xE0 - 0xE7 */
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003879 X3(I(SrcImmByte, em_loop)),
3880 I(SrcImmByte, em_jcxz),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003881 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
3882 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003883 /* 0xE8 - 0xEF */
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003884 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09003885 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003886 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
3887 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003888 /* 0xF0 - 0xF7 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02003889 N, DI(ImplicitOps, icebp), N, N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02003890 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3891 G(ByteOp, group3), G(0, group3),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003892 /* 0xF8 - 0xFF */
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09003893 D(ImplicitOps), D(ImplicitOps),
3894 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003895 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3896};
3897
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003898static const struct opcode twobyte_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003899 /* 0x00 - 0x0F */
Joerg Roedeldee6bb72011-04-04 12:39:30 +02003900 G(0, group6), GD(0, &group7), N, N,
Borislav Petkovb51e9742013-09-22 16:44:52 +02003901 N, I(ImplicitOps | EmulateOnUD, em_syscall),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09003902 II(ImplicitOps | Priv, em_clts, clts), N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02003903 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003904 N, D(ImplicitOps | ModRM), N, N,
3905 /* 0x10 - 0x1F */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02003906 N, N, N, N, N, N, N, N,
3907 D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003908 /* 0x20 - 0x2F */
Nadav Amit9b88ae92014-05-25 23:05:21 +03003909 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
3910 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
3911 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
3912 check_cr_write),
3913 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
3914 check_dr_write),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003915 N, N, N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01003916 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
3917 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
Paolo Bonzinid5b77062014-07-14 12:54:48 +02003918 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
Avi Kivity3e114eb2012-04-09 18:40:01 +03003919 N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003920 /* 0x30 - 0x3F */
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003921 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
Joerg Roedel80612522011-04-04 12:39:33 +02003922 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003923 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
Avi Kivity222d21a2011-11-10 14:57:30 +02003924 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
Borislav Petkovb51e9742013-09-22 16:44:52 +02003925 I(ImplicitOps | EmulateOnUD, em_sysenter),
3926 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
Avi Kivityd8671622011-02-01 16:32:03 +02003927 N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003928 N, N, N, N, N, N, N, N,
3929 /* 0x40 - 0x4F */
Nadav Amit140bad82014-06-15 16:13:00 +03003930 X16(D(DstReg | SrcMem | ModRM)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003931 /* 0x50 - 0x5F */
3932 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3933 /* 0x60 - 0x6F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02003934 N, N, N, N,
3935 N, N, N, N,
3936 N, N, N, N,
3937 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003938 /* 0x70 - 0x7F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02003939 N, N, N, N,
3940 N, N, N, N,
3941 N, N, N, N,
3942 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003943 /* 0x80 - 0x8F */
3944 X16(D(SrcImm)),
3945 /* 0x90 - 0x9F */
Wei Yongjunee45b582010-08-06 17:10:07 +08003946 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003947 /* 0xA0 - 0xA7 */
Avi Kivity1cd196e2011-09-13 10:45:51 +03003948 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
Avi Kivity11c363b2013-01-19 19:51:54 +02003949 II(ImplicitOps, em_cpuid, cpuid),
3950 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
Avi Kivity0bdea062013-01-19 19:51:50 +02003951 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
3952 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003953 /* 0xA8 - 0xAF */
Avi Kivity1cd196e2011-09-13 10:45:51 +03003954 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003955 DI(ImplicitOps, rsm),
Avi Kivity11c363b2013-01-19 19:51:54 +02003956 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
Avi Kivity0bdea062013-01-19 19:51:50 +02003957 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
3958 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
Avi Kivity4d758342013-01-19 19:51:55 +02003959 D(ModRM), F(DstReg | SrcMem | ModRM, em_imul),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003960 /* 0xB0 - 0xB7 */
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09003961 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
Avi Kivityd4b43252011-09-13 10:45:50 +03003962 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
Avi Kivity11c363b2013-01-19 19:51:54 +02003963 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
Avi Kivityd4b43252011-09-13 10:45:50 +03003964 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
3965 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02003966 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003967 /* 0xB8 - 0xBF */
3968 N, N,
Takuya Yoshikawace7faab2011-11-22 15:17:48 +09003969 G(BitOp, group8),
Avi Kivity11c363b2013-01-19 19:51:54 +02003970 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
3971 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02003972 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity92998362012-06-13 12:25:06 +03003973 /* 0xC0 - 0xC7 */
Avi Kivitye47a5f52013-02-09 11:31:51 +02003974 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
Wei Yongjun92f738a2010-08-17 09:19:34 +08003975 N, D(DstMem | SrcReg | ModRM | Mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003976 N, N, N, GD(0, &group9),
Avi Kivity92998362012-06-13 12:25:06 +03003977 /* 0xC8 - 0xCF */
3978 X8(I(DstReg, em_bswap)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003979 /* 0xD0 - 0xDF */
3980 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3981 /* 0xE0 - 0xEF */
Alex Williamson0a370272014-07-11 11:56:31 -06003982 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
3983 N, N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003984 /* 0xF0 - 0xFF */
3985 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3986};
3987
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01003988static const struct gprefix three_byte_0f_38_f0 = {
Borislav Petkov84cffe42013-10-29 12:54:56 +01003989 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01003990};
3991
3992static const struct gprefix three_byte_0f_38_f1 = {
Borislav Petkov84cffe42013-10-29 12:54:56 +01003993 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01003994};
3995
3996/*
3997 * Insns below are selected by the prefix which indexed by the third opcode
3998 * byte.
3999 */
4000static const struct opcode opcode_map_0f_38[256] = {
4001 /* 0x00 - 0x7f */
4002 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
Borislav Petkov84cffe42013-10-29 12:54:56 +01004003 /* 0x80 - 0xef */
4004 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4005 /* 0xf0 - 0xf1 */
4006 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
4007 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4008 /* 0xf2 - 0xff */
4009 N, N, X4(N), X8(N)
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004010};
4011
Avi Kivity73fba5f2010-07-29 15:11:53 +03004012#undef D
4013#undef N
4014#undef G
4015#undef GD
4016#undef I
Avi Kivityaa97bb42010-01-20 18:09:23 +02004017#undef GP
Joerg Roedel01de8b02011-04-04 12:39:31 +02004018#undef EXT
Avi Kivity73fba5f2010-07-29 15:11:53 +03004019
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004020#undef D2bv
Joerg Roedelf6511932011-04-04 12:39:35 +02004021#undef D2bvIP
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004022#undef I2bv
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004023#undef I2bvIP
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09004024#undef I6ALU
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004025
Avi Kivity9dac77f2011-06-01 15:34:25 +03004026static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
Avi Kivity39f21ee2010-08-18 19:20:21 +03004027{
4028 unsigned size;
4029
Avi Kivity9dac77f2011-06-01 15:34:25 +03004030 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004031 if (size == 8)
4032 size = 4;
4033 return size;
4034}
4035
4036static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4037 unsigned size, bool sign_extension)
4038{
Avi Kivity39f21ee2010-08-18 19:20:21 +03004039 int rc = X86EMUL_CONTINUE;
4040
4041 op->type = OP_IMM;
4042 op->bytes = size;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004043 op->addr.mem.ea = ctxt->_eip;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004044 /* NB. Immediates are sign-extended as necessary. */
4045 switch (op->bytes) {
4046 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004047 op->val = insn_fetch(s8, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004048 break;
4049 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004050 op->val = insn_fetch(s16, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004051 break;
4052 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004053 op->val = insn_fetch(s32, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004054 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004055 case 8:
4056 op->val = insn_fetch(s64, ctxt);
4057 break;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004058 }
4059 if (!sign_extension) {
4060 switch (op->bytes) {
4061 case 1:
4062 op->val &= 0xff;
4063 break;
4064 case 2:
4065 op->val &= 0xffff;
4066 break;
4067 case 4:
4068 op->val &= 0xffffffff;
4069 break;
4070 }
4071 }
4072done:
4073 return rc;
4074}
4075
Avi Kivitya9945542011-09-13 10:45:41 +03004076static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4077 unsigned d)
4078{
4079 int rc = X86EMUL_CONTINUE;
4080
4081 switch (d) {
4082 case OpReg:
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004083 decode_register_operand(ctxt, op);
Avi Kivitya9945542011-09-13 10:45:41 +03004084 break;
4085 case OpImmUByte:
Avi Kivity608aabe2011-09-13 10:45:45 +03004086 rc = decode_imm(ctxt, op, 1, false);
Avi Kivitya9945542011-09-13 10:45:41 +03004087 break;
4088 case OpMem:
Avi Kivity41ddf972011-09-13 10:45:48 +03004089 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity0fe59122011-09-13 10:45:47 +03004090 mem_common:
Avi Kivitya9945542011-09-13 10:45:41 +03004091 *op = ctxt->memop;
4092 ctxt->memopp = op;
Paolo Bonzini96888972014-04-01 14:54:19 +02004093 if (ctxt->d & BitOp)
Avi Kivitya9945542011-09-13 10:45:41 +03004094 fetch_bit_operand(ctxt);
4095 op->orig_val = op->val;
4096 break;
Avi Kivity41ddf972011-09-13 10:45:48 +03004097 case OpMem64:
Nadav Amitaaa05f22014-06-02 18:34:10 +03004098 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
Avi Kivity41ddf972011-09-13 10:45:48 +03004099 goto mem_common;
Avi Kivitya9945542011-09-13 10:45:41 +03004100 case OpAcc:
4101 op->type = OP_REG;
4102 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004103 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
Avi Kivitya9945542011-09-13 10:45:41 +03004104 fetch_register_operand(op);
4105 op->orig_val = op->val;
4106 break;
Avi Kivity820207c2013-02-09 11:31:45 +02004107 case OpAccLo:
4108 op->type = OP_REG;
4109 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4110 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4111 fetch_register_operand(op);
4112 op->orig_val = op->val;
4113 break;
4114 case OpAccHi:
4115 if (ctxt->d & ByteOp) {
4116 op->type = OP_NONE;
4117 break;
4118 }
4119 op->type = OP_REG;
4120 op->bytes = ctxt->op_bytes;
4121 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4122 fetch_register_operand(op);
4123 op->orig_val = op->val;
4124 break;
Avi Kivitya9945542011-09-13 10:45:41 +03004125 case OpDI:
4126 op->type = OP_MEM;
4127 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4128 op->addr.mem.ea =
Avi Kivitydd856ef2012-08-27 23:46:17 +03004129 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
Avi Kivitya9945542011-09-13 10:45:41 +03004130 op->addr.mem.seg = VCPU_SREG_ES;
4131 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004132 op->count = 1;
Avi Kivitya9945542011-09-13 10:45:41 +03004133 break;
4134 case OpDX:
4135 op->type = OP_REG;
4136 op->bytes = 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004137 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivitya9945542011-09-13 10:45:41 +03004138 fetch_register_operand(op);
4139 break;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004140 case OpCL:
4141 op->bytes = 1;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004142 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004143 break;
4144 case OpImmByte:
4145 rc = decode_imm(ctxt, op, 1, true);
4146 break;
4147 case OpOne:
4148 op->bytes = 1;
4149 op->val = 1;
4150 break;
4151 case OpImm:
4152 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4153 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004154 case OpImm64:
4155 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4156 break;
Avi Kivity28867ce2012-01-16 15:08:44 +02004157 case OpMem8:
4158 ctxt->memop.bytes = 1;
Gleb Natapov660696d2013-04-24 13:38:36 +03004159 if (ctxt->memop.type == OP_REG) {
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02004160 ctxt->memop.addr.reg = decode_register(ctxt,
4161 ctxt->modrm_rm, true);
Gleb Natapov660696d2013-04-24 13:38:36 +03004162 fetch_register_operand(&ctxt->memop);
4163 }
Avi Kivity28867ce2012-01-16 15:08:44 +02004164 goto mem_common;
Avi Kivity0fe59122011-09-13 10:45:47 +03004165 case OpMem16:
4166 ctxt->memop.bytes = 2;
4167 goto mem_common;
4168 case OpMem32:
4169 ctxt->memop.bytes = 4;
4170 goto mem_common;
4171 case OpImmU16:
4172 rc = decode_imm(ctxt, op, 2, false);
4173 break;
4174 case OpImmU:
4175 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4176 break;
4177 case OpSI:
4178 op->type = OP_MEM;
4179 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4180 op->addr.mem.ea =
Avi Kivitydd856ef2012-08-27 23:46:17 +03004181 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
Bandan Das573e80f2014-04-16 12:46:13 -04004182 op->addr.mem.seg = ctxt->seg_override;
Avi Kivity0fe59122011-09-13 10:45:47 +03004183 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004184 op->count = 1;
Avi Kivity0fe59122011-09-13 10:45:47 +03004185 break;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004186 case OpXLat:
4187 op->type = OP_MEM;
4188 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4189 op->addr.mem.ea =
4190 register_address(ctxt,
4191 reg_read(ctxt, VCPU_REGS_RBX) +
4192 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
Bandan Das573e80f2014-04-16 12:46:13 -04004193 op->addr.mem.seg = ctxt->seg_override;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004194 op->val = 0;
4195 break;
Avi Kivity0fe59122011-09-13 10:45:47 +03004196 case OpImmFAddr:
4197 op->type = OP_IMM;
4198 op->addr.mem.ea = ctxt->_eip;
4199 op->bytes = ctxt->op_bytes + 2;
4200 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4201 break;
4202 case OpMemFAddr:
4203 ctxt->memop.bytes = ctxt->op_bytes + 2;
4204 goto mem_common;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004205 case OpES:
4206 op->val = VCPU_SREG_ES;
4207 break;
4208 case OpCS:
4209 op->val = VCPU_SREG_CS;
4210 break;
4211 case OpSS:
4212 op->val = VCPU_SREG_SS;
4213 break;
4214 case OpDS:
4215 op->val = VCPU_SREG_DS;
4216 break;
4217 case OpFS:
4218 op->val = VCPU_SREG_FS;
4219 break;
4220 case OpGS:
4221 op->val = VCPU_SREG_GS;
4222 break;
Avi Kivitya9945542011-09-13 10:45:41 +03004223 case OpImplicit:
4224 /* Special instructions do their own operand decoding. */
4225 default:
4226 op->type = OP_NONE; /* Disable writeback. */
4227 break;
4228 }
4229
4230done:
4231 return rc;
4232}
4233
Takuya Yoshikawaef5d75c2011-05-15 00:57:43 +09004234int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004235{
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004236 int rc = X86EMUL_CONTINUE;
4237 int mode = ctxt->mode;
Avi Kivity46561642011-04-24 14:09:59 +03004238 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004239 bool op_prefix = false;
Bandan Das573e80f2014-04-16 12:46:13 -04004240 bool has_seg_override = false;
Avi Kivity46561642011-04-24 14:09:59 +03004241 struct opcode opcode;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004242
Avi Kivityf09ed832011-09-13 10:45:40 +03004243 ctxt->memop.type = OP_NONE;
4244 ctxt->memopp = NULL;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004245 ctxt->_eip = ctxt->eip;
Paolo Bonzini17052f12014-05-06 16:33:01 +02004246 ctxt->fetch.ptr = ctxt->fetch.data;
4247 ctxt->fetch.end = ctxt->fetch.data + insn_len;
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004248 ctxt->opcode_len = 1;
Andre Przywaradc25e892010-12-21 11:12:07 +01004249 if (insn_len > 0)
Avi Kivity9dac77f2011-06-01 15:34:25 +03004250 memcpy(ctxt->fetch.data, insn, insn_len);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004251 else {
Paolo Bonzini9506d572014-05-06 13:05:25 +02004252 rc = __do_insn_fetch_bytes(ctxt, 1);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004253 if (rc != X86EMUL_CONTINUE)
4254 return rc;
4255 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004256
4257 switch (mode) {
4258 case X86EMUL_MODE_REAL:
4259 case X86EMUL_MODE_VM86:
4260 case X86EMUL_MODE_PROT16:
4261 def_op_bytes = def_ad_bytes = 2;
4262 break;
4263 case X86EMUL_MODE_PROT32:
4264 def_op_bytes = def_ad_bytes = 4;
4265 break;
4266#ifdef CONFIG_X86_64
4267 case X86EMUL_MODE_PROT64:
4268 def_op_bytes = 4;
4269 def_ad_bytes = 8;
4270 break;
4271#endif
4272 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004273 return EMULATION_FAILED;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004274 }
4275
Avi Kivity9dac77f2011-06-01 15:34:25 +03004276 ctxt->op_bytes = def_op_bytes;
4277 ctxt->ad_bytes = def_ad_bytes;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004278
4279 /* Legacy prefixes. */
4280 for (;;) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004281 switch (ctxt->b = insn_fetch(u8, ctxt)) {
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004282 case 0x66: /* operand-size override */
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004283 op_prefix = true;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004284 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004285 ctxt->op_bytes = def_op_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004286 break;
4287 case 0x67: /* address-size override */
4288 if (mode == X86EMUL_MODE_PROT64)
4289 /* switch between 4/8 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004290 ctxt->ad_bytes = def_ad_bytes ^ 12;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004291 else
4292 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004293 ctxt->ad_bytes = def_ad_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004294 break;
4295 case 0x26: /* ES override */
4296 case 0x2e: /* CS override */
4297 case 0x36: /* SS override */
4298 case 0x3e: /* DS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004299 has_seg_override = true;
4300 ctxt->seg_override = (ctxt->b >> 3) & 3;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004301 break;
4302 case 0x64: /* FS override */
4303 case 0x65: /* GS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004304 has_seg_override = true;
4305 ctxt->seg_override = ctxt->b & 7;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004306 break;
4307 case 0x40 ... 0x4f: /* REX */
4308 if (mode != X86EMUL_MODE_PROT64)
4309 goto done_prefixes;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004310 ctxt->rex_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004311 continue;
4312 case 0xf0: /* LOCK */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004313 ctxt->lock_prefix = 1;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004314 break;
4315 case 0xf2: /* REPNE/REPNZ */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004316 case 0xf3: /* REP/REPE/REPZ */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004317 ctxt->rep_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004318 break;
4319 default:
4320 goto done_prefixes;
4321 }
4322
4323 /* Any legacy prefix after a REX prefix nullifies its effect. */
4324
Avi Kivity9dac77f2011-06-01 15:34:25 +03004325 ctxt->rex_prefix = 0;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004326 }
4327
4328done_prefixes:
4329
4330 /* REX prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004331 if (ctxt->rex_prefix & 8)
4332 ctxt->op_bytes = 8; /* REX.W */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004333
4334 /* Opcode byte(s). */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004335 opcode = opcode_table[ctxt->b];
Wei Yongjund3ad6242010-08-05 16:34:39 +08004336 /* Two-byte opcode? */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004337 if (ctxt->b == 0x0f) {
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004338 ctxt->opcode_len = 2;
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004339 ctxt->b = insn_fetch(u8, ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03004340 opcode = twobyte_table[ctxt->b];
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004341
4342 /* 0F_38 opcode map */
4343 if (ctxt->b == 0x38) {
4344 ctxt->opcode_len = 3;
4345 ctxt->b = insn_fetch(u8, ctxt);
4346 opcode = opcode_map_0f_38[ctxt->b];
4347 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004348 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004349 ctxt->d = opcode.flags;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004350
Takuya Yoshikawa9f4260e2012-04-30 17:48:25 +09004351 if (ctxt->d & ModRM)
4352 ctxt->modrm = insn_fetch(u8, ctxt);
4353
Nadav Amit7fe864d2014-06-02 18:34:03 +03004354 /* vex-prefix instructions are not implemented */
4355 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4356 (mode == X86EMUL_MODE_PROT64 ||
4357 (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
4358 ctxt->d = NotImpl;
4359 }
4360
Avi Kivity9dac77f2011-06-01 15:34:25 +03004361 while (ctxt->d & GroupMask) {
4362 switch (ctxt->d & GroupMask) {
Avi Kivity46561642011-04-24 14:09:59 +03004363 case Group:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004364 goffset = (ctxt->modrm >> 3) & 7;
Avi Kivity46561642011-04-24 14:09:59 +03004365 opcode = opcode.u.group[goffset];
4366 break;
4367 case GroupDual:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004368 goffset = (ctxt->modrm >> 3) & 7;
4369 if ((ctxt->modrm >> 6) == 3)
Avi Kivity46561642011-04-24 14:09:59 +03004370 opcode = opcode.u.gdual->mod3[goffset];
4371 else
4372 opcode = opcode.u.gdual->mod012[goffset];
4373 break;
4374 case RMExt:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004375 goffset = ctxt->modrm & 7;
Joerg Roedel01de8b02011-04-04 12:39:31 +02004376 opcode = opcode.u.group[goffset];
Avi Kivity46561642011-04-24 14:09:59 +03004377 break;
4378 case Prefix:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004379 if (ctxt->rep_prefix && op_prefix)
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004380 return EMULATION_FAILED;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004381 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
Avi Kivity46561642011-04-24 14:09:59 +03004382 switch (simd_prefix) {
4383 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4384 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4385 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4386 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4387 }
4388 break;
Gleb Natapov045a2822012-12-20 16:57:43 +02004389 case Escape:
4390 if (ctxt->modrm > 0xbf)
4391 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4392 else
4393 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4394 break;
Avi Kivity46561642011-04-24 14:09:59 +03004395 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004396 return EMULATION_FAILED;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004397 }
Avi Kivity46561642011-04-24 14:09:59 +03004398
Avi Kivityb1ea50b2011-09-13 10:45:42 +03004399 ctxt->d &= ~(u64)GroupMask;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004400 ctxt->d |= opcode.flags;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004401 }
4402
Paolo Bonzinie24186e2014-03-27 12:00:57 +01004403 /* Unrecognised? */
4404 if (ctxt->d == 0)
4405 return EMULATION_FAILED;
4406
Avi Kivity9dac77f2011-06-01 15:34:25 +03004407 ctxt->execute = opcode.u.execute;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004408
Nadav Amit3a6095a2014-08-13 16:50:13 +03004409 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4410 return EMULATION_FAILED;
4411
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004412 if (unlikely(ctxt->d &
Nadav Amit3a6095a2014-08-13 16:50:13 +03004413 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004414 /*
4415 * These are copied unconditionally here, and checked unconditionally
4416 * in x86_emulate_insn.
4417 */
4418 ctxt->check_perm = opcode.check_perm;
4419 ctxt->intercept = opcode.intercept;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004420
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004421 if (ctxt->d & NotImpl)
4422 return EMULATION_FAILED;
Avi Kivityd8671622011-02-01 16:32:03 +02004423
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004424 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
Avi Kivity9dac77f2011-06-01 15:34:25 +03004425 ctxt->op_bytes = 8;
Avi Kivity7f9b4b72010-08-01 14:46:54 +03004426
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004427 if (ctxt->d & Op3264) {
4428 if (mode == X86EMUL_MODE_PROT64)
4429 ctxt->op_bytes = 8;
4430 else
4431 ctxt->op_bytes = 4;
4432 }
4433
4434 if (ctxt->d & Sse)
4435 ctxt->op_bytes = 16;
4436 else if (ctxt->d & Mmx)
4437 ctxt->op_bytes = 8;
4438 }
Avi Kivity12537912011-03-29 11:41:27 +02004439
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004440 /* ModRM and SIB bytes. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004441 if (ctxt->d & ModRM) {
Avi Kivityf09ed832011-09-13 10:45:40 +03004442 rc = decode_modrm(ctxt, &ctxt->memop);
Bandan Das573e80f2014-04-16 12:46:13 -04004443 if (!has_seg_override) {
4444 has_seg_override = true;
4445 ctxt->seg_override = ctxt->modrm_seg;
4446 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004447 } else if (ctxt->d & MemAbs)
Avi Kivityf09ed832011-09-13 10:45:40 +03004448 rc = decode_abs(ctxt, &ctxt->memop);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004449 if (rc != X86EMUL_CONTINUE)
4450 goto done;
4451
Bandan Das573e80f2014-04-16 12:46:13 -04004452 if (!has_seg_override)
4453 ctxt->seg_override = VCPU_SREG_DS;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004454
Bandan Das573e80f2014-04-16 12:46:13 -04004455 ctxt->memop.addr.mem.seg = ctxt->seg_override;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004456
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004457 /*
4458 * Decode and fetch the source operand: register, memory
4459 * or immediate.
4460 */
Avi Kivity0fe59122011-09-13 10:45:47 +03004461 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004462 if (rc != X86EMUL_CONTINUE)
4463 goto done;
4464
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004465 /*
4466 * Decode and fetch the second source operand: register, memory
4467 * or immediate.
4468 */
Avi Kivity4dd6a572011-09-13 10:45:43 +03004469 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004470 if (rc != X86EMUL_CONTINUE)
4471 goto done;
4472
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004473 /* Decode and fetch the destination operand: register or memory. */
Avi Kivitya9945542011-09-13 10:45:41 +03004474 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004475
4476done:
Bandan Das41061cd2014-04-16 12:46:14 -04004477 if (ctxt->rip_relative)
Avi Kivityf09ed832011-09-13 10:45:40 +03004478 ctxt->memopp->addr.mem.ea += ctxt->_eip;
Avi Kivitycb16c342011-06-19 19:21:11 +03004479
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004480 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004481}
4482
Xiao Guangrong1cb3f3a2011-09-22 17:02:48 +08004483bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4484{
4485 return ctxt->d & PageTable;
4486}
4487
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004488static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4489{
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004490 /* The second termination condition only applies for REPE
4491 * and REPNE. Test if the repeat string operation prefix is
4492 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4493 * corresponding termination condition according to:
4494 * - if REPE/REPZ and ZF = 0 then done
4495 * - if REPNE/REPNZ and ZF = 1 then done
4496 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004497 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4498 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4499 && (((ctxt->rep_prefix == REPE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004500 ((ctxt->eflags & EFLG_ZF) == 0))
Avi Kivity9dac77f2011-06-01 15:34:25 +03004501 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004502 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4503 return true;
4504
4505 return false;
4506}
4507
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004508static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4509{
4510 bool fault = false;
4511
4512 ctxt->ops->get_fpu(ctxt);
4513 asm volatile("1: fwait \n\t"
4514 "2: \n\t"
4515 ".pushsection .fixup,\"ax\" \n\t"
4516 "3: \n\t"
4517 "movb $1, %[fault] \n\t"
4518 "jmp 2b \n\t"
4519 ".popsection \n\t"
4520 _ASM_EXTABLE(1b, 3b)
Avi Kivity38e8a2d2012-04-22 15:12:50 +03004521 : [fault]"+qm"(fault));
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004522 ctxt->ops->put_fpu(ctxt);
4523
4524 if (unlikely(fault))
4525 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4526
4527 return X86EMUL_CONTINUE;
4528}
4529
4530static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4531 struct operand *op)
4532{
4533 if (op->type == OP_MM)
4534 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4535}
4536
Avi Kivitye28bbd42013-01-04 16:18:48 +02004537static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4538{
4539 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivityb9fa4092013-02-09 11:31:48 +02004540 if (!(ctxt->d & ByteOp))
4541 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
Avi Kivitye28bbd42013-01-04 16:18:48 +02004542 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004543 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4544 [fastop]"+S"(fop)
4545 : "c"(ctxt->src2.val));
Avi Kivitye28bbd42013-01-04 16:18:48 +02004546 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004547 if (!fop) /* exception is returned in fop variable */
4548 return emulate_de(ctxt);
Avi Kivitye28bbd42013-01-04 16:18:48 +02004549 return X86EMUL_CONTINUE;
4550}
Avi Kivitydd856ef2012-08-27 23:46:17 +03004551
Bandan Das14985072014-04-16 12:46:09 -04004552void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4553{
Bandan Das573e80f2014-04-16 12:46:13 -04004554 memset(&ctxt->rip_relative, 0,
4555 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
Bandan Das14985072014-04-16 12:46:09 -04004556
Bandan Das14985072014-04-16 12:46:09 -04004557 ctxt->io_read.pos = 0;
4558 ctxt->io_read.end = 0;
Bandan Das14985072014-04-16 12:46:09 -04004559 ctxt->mem_read.end = 0;
4560}
4561
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09004562int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004563{
Mathias Krause0225fb52012-08-30 01:30:16 +02004564 const struct x86_emulate_ops *ops = ctxt->ops;
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09004565 int rc = X86EMUL_CONTINUE;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004566 int saved_dst_type = ctxt->dst.type;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004567
Avi Kivity9dac77f2011-06-01 15:34:25 +03004568 ctxt->mem_read.pos = 0;
Glauber Costa310b5d32009-05-12 16:21:06 -04004569
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004570 /* LOCK prefix is allowed only with some instructions */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004571 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004572 rc = emulate_ud(ctxt);
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004573 goto done;
4574 }
4575
Avi Kivity9dac77f2011-06-01 15:34:25 +03004576 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004577 rc = emulate_ud(ctxt);
Avi Kivity081bca02010-08-26 11:06:15 +03004578 goto done;
4579 }
4580
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004581 if (unlikely(ctxt->d &
4582 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4583 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4584 (ctxt->d & Undefined)) {
4585 rc = emulate_ud(ctxt);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004586 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004587 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004588
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004589 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4590 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4591 rc = emulate_ud(ctxt);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004592 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004593 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004594
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004595 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4596 rc = emulate_nm(ctxt);
Joerg Roedeld09beab2011-04-04 12:39:25 +02004597 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004598 }
Joerg Roedeld09beab2011-04-04 12:39:25 +02004599
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004600 if (ctxt->d & Mmx) {
4601 rc = flush_pending_x87_faults(ctxt);
4602 if (rc != X86EMUL_CONTINUE)
4603 goto done;
4604 /*
4605 * Now that we know the fpu is exception safe, we can fetch
4606 * operands from it.
4607 */
4608 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4609 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4610 if (!(ctxt->d & Mov))
4611 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4612 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004613
Bandan Das685bbf42014-04-16 12:46:10 -04004614 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004615 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4616 X86_ICPT_PRE_EXCEPT);
4617 if (rc != X86EMUL_CONTINUE)
4618 goto done;
4619 }
4620
4621 /* Privileged instruction can be executed only in CPL=0 */
4622 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
Nadav Amit68efa762014-06-18 17:19:35 +03004623 if (ctxt->d & PrivUD)
4624 rc = emulate_ud(ctxt);
4625 else
4626 rc = emulate_gp(ctxt, 0);
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004627 goto done;
4628 }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004629
4630 /* Instruction can only be executed in protected mode */
4631 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4632 rc = emulate_ud(ctxt);
4633 goto done;
4634 }
4635
4636 /* Do instruction specific permission checks */
Bandan Das685bbf42014-04-16 12:46:10 -04004637 if (ctxt->d & CheckPerm) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004638 rc = ctxt->check_perm(ctxt);
4639 if (rc != X86EMUL_CONTINUE)
4640 goto done;
4641 }
4642
Bandan Das685bbf42014-04-16 12:46:10 -04004643 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004644 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4645 X86_ICPT_POST_EXCEPT);
4646 if (rc != X86EMUL_CONTINUE)
4647 goto done;
4648 }
4649
4650 if (ctxt->rep_prefix && (ctxt->d & String)) {
4651 /* All REP prefixes have the same first termination condition */
4652 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4653 ctxt->eip = ctxt->_eip;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004654 ctxt->eflags &= ~EFLG_RF;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004655 goto done;
4656 }
4657 }
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004658 }
4659
Avi Kivity9dac77f2011-06-01 15:34:25 +03004660 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4661 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4662 ctxt->src.valptr, ctxt->src.bytes);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09004663 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004664 goto done;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004665 ctxt->src.orig_val64 = ctxt->src.val64;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004666 }
4667
Avi Kivity9dac77f2011-06-01 15:34:25 +03004668 if (ctxt->src2.type == OP_MEM) {
4669 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4670 &ctxt->src2.val, ctxt->src2.bytes);
Gleb Natapove35b7b92010-02-25 16:36:42 +02004671 if (rc != X86EMUL_CONTINUE)
4672 goto done;
4673 }
4674
Avi Kivity9dac77f2011-06-01 15:34:25 +03004675 if ((ctxt->d & DstMask) == ImplicitOps)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004676 goto special_insn;
4677
4678
Avi Kivity9dac77f2011-06-01 15:34:25 +03004679 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004680 /* optimisation - avoid slow emulated read if Mov */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004681 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4682 &ctxt->dst.val, ctxt->dst.bytes);
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004683 if (rc != X86EMUL_CONTINUE)
4684 goto done;
Avi Kivity038e51d2007-01-22 20:40:40 -08004685 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004686 ctxt->dst.orig_val = ctxt->dst.val;
Avi Kivity038e51d2007-01-22 20:40:40 -08004687
Avi Kivity018a98d2007-11-27 19:30:56 +02004688special_insn:
4689
Bandan Das685bbf42014-04-16 12:46:10 -04004690 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03004691 rc = emulator_check_intercept(ctxt, ctxt->intercept,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004692 X86_ICPT_POST_MEMACCESS);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004693 if (rc != X86EMUL_CONTINUE)
4694 goto done;
4695 }
4696
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03004697 if (ctxt->rep_prefix && (ctxt->d & String))
4698 ctxt->eflags |= EFLG_RF;
4699 else
4700 ctxt->eflags &= ~EFLG_RF;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004701
Avi Kivity9dac77f2011-06-01 15:34:25 +03004702 if (ctxt->execute) {
Avi Kivitye28bbd42013-01-04 16:18:48 +02004703 if (ctxt->d & Fastop) {
4704 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4705 rc = fastop(ctxt, fop);
4706 if (rc != X86EMUL_CONTINUE)
4707 goto done;
4708 goto writeback;
4709 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004710 rc = ctxt->execute(ctxt);
Avi Kivityef65c882010-07-29 15:11:51 +03004711 if (rc != X86EMUL_CONTINUE)
4712 goto done;
4713 goto writeback;
4714 }
4715
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004716 if (ctxt->opcode_len == 2)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004717 goto twobyte_insn;
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004718 else if (ctxt->opcode_len == 3)
4719 goto threebyte_insn;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004720
Avi Kivity9dac77f2011-06-01 15:34:25 +03004721 switch (ctxt->b) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08004722 case 0x63: /* movsxd */
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004723 if (ctxt->mode != X86EMUL_MODE_PROT64)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004724 goto cannot_emulate;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004725 ctxt->dst.val = (s32) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004726 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03004727 case 0x70 ... 0x7f: /* jcc (short) */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004728 if (test_cc(ctxt->b, ctxt->eflags))
4729 jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02004730 break;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03004731 case 0x8d: /* lea r16/r32, m */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004732 ctxt->dst.val = ctxt->src.addr.mem.ea;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03004733 break;
Avi Kivity3d9e77d2010-08-01 12:41:59 +03004734 case 0x90 ... 0x97: /* nop / xchg reg, rax */
Avi Kivitydd856ef2012-08-27 23:46:17 +03004735 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
Nadav Amita825f5c2014-06-15 16:13:01 +03004736 ctxt->dst.type = OP_NONE;
4737 else
4738 rc = em_xchg(ctxt);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09004739 break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08004740 case 0x98: /* cbw/cwde/cdqe */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004741 switch (ctxt->op_bytes) {
4742 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4743 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4744 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08004745 }
4746 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004747 case 0xcc: /* int3 */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09004748 rc = emulate_int(ctxt, 3);
4749 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004750 case 0xcd: /* int n */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004751 rc = emulate_int(ctxt, ctxt->src.val);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004752 break;
4753 case 0xce: /* into */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09004754 if (ctxt->eflags & EFLG_OF)
4755 rc = emulate_int(ctxt, 4);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004756 break;
Nitin A Kamble1a52e052007-09-18 16:34:25 -07004757 case 0xe9: /* jmp rel */
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004758 case 0xeb: /* jmp rel short */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004759 jmp_rel(ctxt, ctxt->src.val);
4760 ctxt->dst.type = OP_NONE; /* Disable writeback. */
Nitin A Kamble1a52e052007-09-18 16:34:25 -07004761 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02004762 case 0xf4: /* hlt */
Avi Kivity6c3287f2011-04-20 15:43:05 +03004763 ctxt->ops->halt(ctxt);
Mohammed Gamal19fdfa02008-07-06 16:51:26 +03004764 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02004765 case 0xf5: /* cmc */
4766 /* complement carry flag from eflags reg */
4767 ctxt->eflags ^= EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02004768 break;
4769 case 0xf8: /* clc */
4770 ctxt->eflags &= ~EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02004771 break;
Mohammed Gamal8744aa92010-08-05 15:42:49 +03004772 case 0xf9: /* stc */
4773 ctxt->eflags |= EFLG_CF;
4774 break;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004775 case 0xfc: /* cld */
4776 ctxt->eflags &= ~EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004777 break;
4778 case 0xfd: /* std */
4779 ctxt->eflags |= EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004780 break;
Avi Kivity91269b82010-07-25 14:51:16 +03004781 default:
4782 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004783 }
Avi Kivity018a98d2007-11-27 19:30:56 +02004784
Avi Kivity7d9ddae2010-08-30 17:12:28 +03004785 if (rc != X86EMUL_CONTINUE)
4786 goto done;
4787
Avi Kivity018a98d2007-11-27 19:30:56 +02004788writeback:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02004789 if (ctxt->d & SrcWrite) {
4790 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4791 rc = writeback(ctxt, &ctxt->src);
4792 if (rc != X86EMUL_CONTINUE)
4793 goto done;
4794 }
Nadav Amitee212292014-06-15 16:12:58 +03004795 if (!(ctxt->d & NoWrite)) {
4796 rc = writeback(ctxt, &ctxt->dst);
4797 if (rc != X86EMUL_CONTINUE)
4798 goto done;
4799 }
Avi Kivity018a98d2007-11-27 19:30:56 +02004800
Gleb Natapov5cd21912010-03-18 15:20:26 +02004801 /*
4802 * restore dst type in case the decoding will be reused
4803 * (happens for string instruction )
4804 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004805 ctxt->dst.type = saved_dst_type;
Gleb Natapov5cd21912010-03-18 15:20:26 +02004806
Avi Kivity9dac77f2011-06-01 15:34:25 +03004807 if ((ctxt->d & SrcMask) == SrcSI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03004808 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
Gleb Natapova682e352010-03-18 15:20:21 +02004809
Avi Kivity9dac77f2011-06-01 15:34:25 +03004810 if ((ctxt->d & DstMask) == DstDI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03004811 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
Gleb Natapovd9271122010-03-18 15:20:22 +02004812
Avi Kivity9dac77f2011-06-01 15:34:25 +03004813 if (ctxt->rep_prefix && (ctxt->d & String)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004814 unsigned int count;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004815 struct read_cache *r = &ctxt->io_read;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004816 if ((ctxt->d & SrcMask) == SrcSI)
4817 count = ctxt->src.count;
4818 else
4819 count = ctxt->dst.count;
4820 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4821 -count);
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004822
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03004823 if (!string_insn_completed(ctxt)) {
4824 /*
4825 * Re-enter guest when pio read ahead buffer is empty
4826 * or, if it is not used, after each 1024 iteration.
4827 */
Avi Kivitydd856ef2012-08-27 23:46:17 +03004828 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03004829 (r->end == 0 || r->end != r->pos)) {
4830 /*
4831 * Reset read cache. Usually happens before
4832 * decode, but since instruction is restarted
4833 * we have to do it here.
4834 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004835 ctxt->mem_read.end = 0;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004836 writeback_registers(ctxt);
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03004837 return EMULATION_RESTART;
4838 }
4839 goto done; /* skip rip writeback */
Avi Kivity0fa6ccb2010-08-17 11:22:17 +03004840 }
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03004841 ctxt->eflags &= ~EFLG_RF;
Gleb Natapov5cd21912010-03-18 15:20:26 +02004842 }
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03004843
Avi Kivity9dac77f2011-06-01 15:34:25 +03004844 ctxt->eip = ctxt->_eip;
Avi Kivity018a98d2007-11-27 19:30:56 +02004845
4846done:
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02004847 if (rc == X86EMUL_PROPAGATE_FAULT) {
4848 WARN_ON(ctxt->exception.vector > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +02004849 ctxt->have_exception = true;
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02004850 }
Joerg Roedel775fde82011-04-04 12:39:24 +02004851 if (rc == X86EMUL_INTERCEPTED)
4852 return EMULATION_INTERCEPTED;
4853
Avi Kivitydd856ef2012-08-27 23:46:17 +03004854 if (rc == X86EMUL_CONTINUE)
4855 writeback_registers(ctxt);
4856
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03004857 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004858
4859twobyte_insn:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004860 switch (ctxt->b) {
Avi Kivity018a98d2007-11-27 19:30:56 +02004861 case 0x09: /* wbinvd */
Clemens Nosscfb22372011-04-21 21:16:05 +02004862 (ctxt->ops->wbinvd)(ctxt);
Sheng Yangf5f48ee2010-06-30 12:25:15 +08004863 break;
4864 case 0x08: /* invd */
Avi Kivity018a98d2007-11-27 19:30:56 +02004865 case 0x0d: /* GrpP (prefetch) */
4866 case 0x18: /* Grp16 (prefetch/nop) */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02004867 case 0x1f: /* nop */
Avi Kivity018a98d2007-11-27 19:30:56 +02004868 break;
4869 case 0x20: /* mov cr, reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004870 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
Avi Kivity018a98d2007-11-27 19:30:56 +02004871 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004872 case 0x21: /* mov from dr to reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004873 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004874 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004875 case 0x40 ... 0x4f: /* cmov */
Nadav Amit140bad82014-06-15 16:13:00 +03004876 if (test_cc(ctxt->b, ctxt->eflags))
4877 ctxt->dst.val = ctxt->src.val;
4878 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
4879 ctxt->op_bytes != 4)
Avi Kivity9dac77f2011-06-01 15:34:25 +03004880 ctxt->dst.type = OP_NONE; /* no writeback */
Avi Kivity6aa8b732006-12-10 02:21:36 -08004881 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03004882 case 0x80 ... 0x8f: /* jnz rel, etc*/
Avi Kivity9dac77f2011-06-01 15:34:25 +03004883 if (test_cc(ctxt->b, ctxt->eflags))
4884 jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02004885 break;
Wei Yongjunee45b582010-08-06 17:10:07 +08004886 case 0x90 ... 0x9f: /* setcc r/m8 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004887 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
Wei Yongjunee45b582010-08-06 17:10:07 +08004888 break;
Glauber Costa2a7c5b82008-07-10 17:08:15 -03004889 case 0xae: /* clflush */
4890 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004891 case 0xb6 ... 0xb7: /* movzx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004892 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03004893 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
Avi Kivity9dac77f2011-06-01 15:34:25 +03004894 : (u16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004895 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004896 case 0xbe ... 0xbf: /* movsx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004897 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03004898 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
Avi Kivity9dac77f2011-06-01 15:34:25 +03004899 (s16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004900 break;
Sheng Yanga012e652007-10-15 14:24:20 +08004901 case 0xc3: /* movnti */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004902 ctxt->dst.bytes = ctxt->op_bytes;
Nadav Amit3b320042014-06-02 18:34:08 +03004903 ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
4904 (u32) ctxt->src.val;
Sheng Yanga012e652007-10-15 14:24:20 +08004905 break;
Avi Kivity91269b82010-07-25 14:51:16 +03004906 default:
4907 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004908 }
Avi Kivity7d9ddae2010-08-30 17:12:28 +03004909
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004910threebyte_insn:
4911
Avi Kivity7d9ddae2010-08-30 17:12:28 +03004912 if (rc != X86EMUL_CONTINUE)
4913 goto done;
4914
Avi Kivity6aa8b732006-12-10 02:21:36 -08004915 goto writeback;
4916
4917cannot_emulate:
Gleb Natapova0c0ab22011-03-28 16:57:49 +02004918 return EMULATION_FAILED;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004919}
Avi Kivitydd856ef2012-08-27 23:46:17 +03004920
4921void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
4922{
4923 invalidate_registers(ctxt);
4924}
4925
4926void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
4927{
4928 writeback_registers(ctxt);
4929}