blob: 851315d93bf776bb504824187caace38330d32be [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/******************************************************************************
Avi Kivity56e82312009-08-12 15:04:37 +03002 * emulate.c
Avi Kivity6aa8b732006-12-10 02:21:36 -08003 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
Rusty Russelldcc07662007-07-17 23:16:56 +10009 * privileged instructions:
Avi Kivity6aa8b732006-12-10 02:21:36 -080010 *
11 * Copyright (C) 2006 Qumranet
Nicolas Kaiser9611c182010-10-06 14:23:22 +020012 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -080013 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
Avi Kivityedf88412007-12-16 11:02:48 +020023#include <linux/kvm_host.h>
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030024#include "kvm_cache_regs.h"
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/module.h>
Avi Kivity56e82312009-08-12 15:04:37 +030026#include <asm/kvm_emulate.h>
Avi Kivityb7d491e2013-01-04 16:18:49 +020027#include <linux/stringify.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028
Avi Kivity3eeb3282010-01-21 15:31:48 +020029#include "x86.h"
Gleb Natapov38ba30b2010-03-18 15:20:17 +020030#include "tss.h"
Andre Przywarae99f0502009-06-17 15:50:33 +020031
Avi Kivity6aa8b732006-12-10 02:21:36 -080032/*
Avi Kivitya9945542011-09-13 10:45:41 +030033 * Operand types
34 */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030035#define OpNone 0ull
36#define OpImplicit 1ull /* No generic decode */
37#define OpReg 2ull /* Register */
38#define OpMem 3ull /* Memory */
39#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40#define OpDI 5ull /* ES:DI/EDI/RDI */
41#define OpMem64 6ull /* Memory, 64-bit */
42#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43#define OpDX 8ull /* DX register */
Avi Kivity4dd6a572011-09-13 10:45:43 +030044#define OpCL 9ull /* CL register (for shifts) */
45#define OpImmByte 10ull /* 8-bit sign extended immediate */
46#define OpOne 11ull /* Implied 1 */
Nadav Amit5e2c6882012-12-06 21:55:10 -020047#define OpImm 12ull /* Sign extended up to 32-bit immediate */
Avi Kivity0fe59122011-09-13 10:45:47 +030048#define OpMem16 13ull /* Memory operand (16-bit). */
49#define OpMem32 14ull /* Memory operand (32-bit). */
50#define OpImmU 15ull /* Immediate operand, zero extended */
51#define OpSI 16ull /* SI/ESI/RSI */
52#define OpImmFAddr 17ull /* Immediate far address */
53#define OpMemFAddr 18ull /* Far address in memory */
54#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
Avi Kivityc191a7a2011-09-13 10:45:49 +030055#define OpES 20ull /* ES */
56#define OpCS 21ull /* CS */
57#define OpSS 22ull /* SS */
58#define OpDS 23ull /* DS */
59#define OpFS 24ull /* FS */
60#define OpGS 25ull /* GS */
Avi Kivity28867ce2012-01-16 15:08:44 +020061#define OpMem8 26ull /* 8-bit zero extended memory operand */
Nadav Amit5e2c6882012-12-06 21:55:10 -020062#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
Paolo Bonzini7fa57952013-05-09 11:32:50 +020063#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
Avi Kivity820207c2013-02-09 11:31:45 +020064#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
Avi Kivitya9945542011-09-13 10:45:41 +030066
Avi Kivity0fe59122011-09-13 10:45:47 +030067#define OpBits 5 /* Width of operand field */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030068#define OpMask ((1ull << OpBits) - 1)
Avi Kivitya9945542011-09-13 10:45:41 +030069
70/*
Avi Kivity6aa8b732006-12-10 02:21:36 -080071 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
76 * not be handled.
77 */
78
79/* Operand sizes: 8-bit operands or specified/overridden size. */
Avi Kivityab85b12b2010-07-29 15:11:49 +030080#define ByteOp (1<<0) /* 8-bit operands. */
Avi Kivity6aa8b732006-12-10 02:21:36 -080081/* Destination operand type. */
Avi Kivitya9945542011-09-13 10:45:41 +030082#define DstShift 1
83#define ImplicitOps (OpImplicit << DstShift)
84#define DstReg (OpReg << DstShift)
85#define DstMem (OpMem << DstShift)
86#define DstAcc (OpAcc << DstShift)
87#define DstDI (OpDI << DstShift)
88#define DstMem64 (OpMem64 << DstShift)
89#define DstImmUByte (OpImmUByte << DstShift)
90#define DstDX (OpDX << DstShift)
Avi Kivity820207c2013-02-09 11:31:45 +020091#define DstAccLo (OpAccLo << DstShift)
Avi Kivitya9945542011-09-13 10:45:41 +030092#define DstMask (OpMask << DstShift)
Avi Kivity6aa8b732006-12-10 02:21:36 -080093/* Source operand type. */
Avi Kivity0fe59122011-09-13 10:45:47 +030094#define SrcShift 6
95#define SrcNone (OpNone << SrcShift)
96#define SrcReg (OpReg << SrcShift)
97#define SrcMem (OpMem << SrcShift)
98#define SrcMem16 (OpMem16 << SrcShift)
99#define SrcMem32 (OpMem32 << SrcShift)
100#define SrcImm (OpImm << SrcShift)
101#define SrcImmByte (OpImmByte << SrcShift)
102#define SrcOne (OpOne << SrcShift)
103#define SrcImmUByte (OpImmUByte << SrcShift)
104#define SrcImmU (OpImmU << SrcShift)
105#define SrcSI (OpSI << SrcShift)
Paolo Bonzini7fa57952013-05-09 11:32:50 +0200106#define SrcXLat (OpXLat << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300107#define SrcImmFAddr (OpImmFAddr << SrcShift)
108#define SrcMemFAddr (OpMemFAddr << SrcShift)
109#define SrcAcc (OpAcc << SrcShift)
110#define SrcImmU16 (OpImmU16 << SrcShift)
Nadav Amit5e2c6882012-12-06 21:55:10 -0200111#define SrcImm64 (OpImm64 << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300112#define SrcDX (OpDX << SrcShift)
Avi Kivity28867ce2012-01-16 15:08:44 +0200113#define SrcMem8 (OpMem8 << SrcShift)
Avi Kivity820207c2013-02-09 11:31:45 +0200114#define SrcAccHi (OpAccHi << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300115#define SrcMask (OpMask << SrcShift)
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300116#define BitOp (1<<11)
117#define MemAbs (1<<12) /* Memory operand is absolute displacement */
118#define String (1<<13) /* String instruction (rep capable) */
119#define Stack (1<<14) /* Stack instruction (push/pop) */
120#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
Gleb Natapov045a2822012-12-20 16:57:43 +0200125#define Escape (5<<15) /* Escape to coprocessor instruction */
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300126#define Sse (1<<18) /* SSE Vector instruction */
Avi Kivity20c29ff2011-09-13 10:45:44 +0300127/* Generic ModRM decode. */
128#define ModRM (1<<19)
129/* Destination is only written; never read. */
130#define Mov (1<<20)
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300131/* Misc flags */
Joerg Roedel8ea7d6a2011-04-04 12:39:26 +0200132#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
Borislav Petkovb51e9742013-09-22 16:44:52 +0200133#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
Avi Kivity5a506b12010-08-01 15:10:29 +0300134#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
Avi Kivity7f9b4b72010-08-01 14:46:54 +0300135#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
Avi Kivity047a4812010-07-26 14:37:47 +0300136#define Undefined (1<<25) /* No Such Instruction */
Gleb Natapovd380a5e2010-02-10 14:21:36 +0200137#define Lock (1<<26) /* lock prefix is allowed for the instruction */
Gleb Natapove92805a2010-02-10 14:21:35 +0200138#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300139#define No64 (1<<28)
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +0800140#define PageTable (1 << 29) /* instruction used to write page table */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300141#define NotImpl (1 << 30) /* instruction is not implemented */
Guillaume Thouvenin0dc8d102008-12-04 14:26:42 +0100142/* Source 2 operand type */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300143#define Src2Shift (31)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300144#define Src2None (OpNone << Src2Shift)
Avi Kivityab2c5ce2013-02-09 11:31:46 +0200145#define Src2Mem (OpMem << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300146#define Src2CL (OpCL << Src2Shift)
147#define Src2ImmByte (OpImmByte << Src2Shift)
148#define Src2One (OpOne << Src2Shift)
149#define Src2Imm (OpImm << Src2Shift)
Avi Kivityc191a7a2011-09-13 10:45:49 +0300150#define Src2ES (OpES << Src2Shift)
151#define Src2CS (OpCS << Src2Shift)
152#define Src2SS (OpSS << Src2Shift)
153#define Src2DS (OpDS << Src2Shift)
154#define Src2FS (OpFS << Src2Shift)
155#define Src2GS (OpGS << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300156#define Src2Mask (OpMask << Src2Shift)
Avi Kivitycbe2c9d2012-04-09 18:40:02 +0300157#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
Avi Kivity1c11b372012-04-09 18:39:59 +0300158#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
Avi Kivitye28bbd42013-01-04 16:18:48 +0200161#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
Avi Kivityb6744dc2013-01-04 16:18:50 +0200162#define NoWrite ((u64)1 << 45) /* No writeback */
Avi Kivityfb32b1e2013-02-09 11:31:44 +0200163#define SrcWrite ((u64)1 << 46) /* Write back src operand */
Nadav Amit9b88ae92014-05-25 23:05:21 +0300164#define NoMod ((u64)1 << 47) /* Mod field is ignored */
Paolo Bonzinid40a6892014-03-27 11:58:02 +0100165#define Intercept ((u64)1 << 48) /* Has valid intercept field */
166#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800167
Avi Kivity820207c2013-02-09 11:31:45 +0200168#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800169
Avi Kivityd0e53322010-07-29 15:11:54 +0300170#define X2(x...) x, x
171#define X3(x...) X2(x), x
172#define X4(x...) X2(x), X2(x)
173#define X5(x...) X4(x), x
174#define X6(x...) X4(x), X2(x)
175#define X7(x...) X4(x), X3(x)
176#define X8(x...) X4(x), X4(x)
177#define X16(x...) X8(x), X8(x)
Avi Kivity83babbc2010-07-26 14:37:39 +0300178
Avi Kivitye28bbd42013-01-04 16:18:48 +0200179#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
180#define FASTOP_SIZE 8
181
182/*
183 * fastop functions have a special calling convention:
184 *
Avi Kivity017da7b2013-02-09 11:31:47 +0200185 * dst: rax (in/out)
186 * src: rdx (in/out)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200187 * src2: rcx (in)
188 * flags: rflags (in/out)
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200189 * ex: rsi (in:fastop pointer, out:zero if exception)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200190 *
191 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
192 * different operand sizes can be reached by calculation, rather than a jump
193 * table (which would be bigger than the code).
194 *
195 * fastop functions are declared as taking a never-defined fastop parameter,
196 * so they can't be called from C directly.
197 */
198
199struct fastop;
200
Avi Kivityd65b1de2010-07-29 15:11:35 +0300201struct opcode {
Avi Kivityb1ea50b2011-09-13 10:45:42 +0300202 u64 flags : 56;
203 u64 intercept : 8;
Avi Kivity120df892010-07-29 15:11:39 +0300204 union {
Avi Kivityef65c882010-07-29 15:11:51 +0300205 int (*execute)(struct x86_emulate_ctxt *ctxt);
Mathias Krausefd0a0d82012-08-30 01:30:15 +0200206 const struct opcode *group;
207 const struct group_dual *gdual;
208 const struct gprefix *gprefix;
Gleb Natapov045a2822012-12-20 16:57:43 +0200209 const struct escape *esc;
Avi Kivitye28bbd42013-01-04 16:18:48 +0200210 void (*fastop)(struct fastop *fake);
Avi Kivity120df892010-07-29 15:11:39 +0300211 } u;
Joerg Roedeld09beab2011-04-04 12:39:25 +0200212 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
Avi Kivity120df892010-07-29 15:11:39 +0300213};
214
215struct group_dual {
216 struct opcode mod012[8];
217 struct opcode mod3[8];
Avi Kivityd65b1de2010-07-29 15:11:35 +0300218};
219
Avi Kivity0d7cdee2011-03-29 11:34:38 +0200220struct gprefix {
221 struct opcode pfx_no;
222 struct opcode pfx_66;
223 struct opcode pfx_f2;
224 struct opcode pfx_f3;
225};
226
Gleb Natapov045a2822012-12-20 16:57:43 +0200227struct escape {
228 struct opcode op[8];
229 struct opcode high[64];
230};
231
Avi Kivity6aa8b732006-12-10 02:21:36 -0800232/* EFLAGS bit definitions. */
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200233#define EFLG_ID (1<<21)
234#define EFLG_VIP (1<<20)
235#define EFLG_VIF (1<<19)
236#define EFLG_AC (1<<18)
Andre Przywarab1d86142009-06-17 15:50:32 +0200237#define EFLG_VM (1<<17)
238#define EFLG_RF (1<<16)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200239#define EFLG_IOPL (3<<12)
240#define EFLG_NT (1<<14)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800241#define EFLG_OF (1<<11)
242#define EFLG_DF (1<<10)
Andre Przywarab1d86142009-06-17 15:50:32 +0200243#define EFLG_IF (1<<9)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200244#define EFLG_TF (1<<8)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800245#define EFLG_SF (1<<7)
246#define EFLG_ZF (1<<6)
247#define EFLG_AF (1<<4)
248#define EFLG_PF (1<<2)
249#define EFLG_CF (1<<0)
250
Mohammed Gamal62bd4302010-07-28 12:38:40 +0300251#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
252#define EFLG_RESERVED_ONE_MASK 2
253
Avi Kivitydd856ef2012-08-27 23:46:17 +0300254static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
255{
256 if (!(ctxt->regs_valid & (1 << nr))) {
257 ctxt->regs_valid |= 1 << nr;
258 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
259 }
260 return ctxt->_regs[nr];
261}
262
263static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
264{
265 ctxt->regs_valid |= 1 << nr;
266 ctxt->regs_dirty |= 1 << nr;
267 return &ctxt->_regs[nr];
268}
269
270static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
271{
272 reg_read(ctxt, nr);
273 return reg_write(ctxt, nr);
274}
275
276static void writeback_registers(struct x86_emulate_ctxt *ctxt)
277{
278 unsigned reg;
279
280 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
281 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
282}
283
284static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
285{
286 ctxt->regs_dirty = 0;
287 ctxt->regs_valid = 0;
288}
289
Avi Kivity6aa8b732006-12-10 02:21:36 -0800290/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800291 * These EFLAGS bits are restored from saved value during emulation, and
292 * any changes are written back to the saved value after emulation.
293 */
294#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
295
Avi Kivitydda96d82008-11-26 15:14:10 +0200296#ifdef CONFIG_X86_64
297#define ON64(x) x
298#else
299#define ON64(x)
300#endif
301
Avi Kivity4d758342013-01-19 19:51:55 +0200302static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
303
Avi Kivityb7d491e2013-01-04 16:18:49 +0200304#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
305#define FOP_RET "ret \n\t"
306
307#define FOP_START(op) \
308 extern void em_##op(struct fastop *fake); \
309 asm(".pushsection .text, \"ax\" \n\t" \
310 ".global em_" #op " \n\t" \
311 FOP_ALIGN \
312 "em_" #op ": \n\t"
313
314#define FOP_END \
315 ".popsection")
316
Avi Kivity0bdea062013-01-19 19:51:50 +0200317#define FOPNOP() FOP_ALIGN FOP_RET
318
Avi Kivityb7d491e2013-01-04 16:18:49 +0200319#define FOP1E(op, dst) \
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200320 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
321
322#define FOP1EEX(op, dst) \
323 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
Avi Kivityb7d491e2013-01-04 16:18:49 +0200324
325#define FASTOP1(op) \
326 FOP_START(op) \
327 FOP1E(op##b, al) \
328 FOP1E(op##w, ax) \
329 FOP1E(op##l, eax) \
330 ON64(FOP1E(op##q, rax)) \
331 FOP_END
332
Avi Kivityb9fa4092013-02-09 11:31:48 +0200333/* 1-operand, using src2 (for MUL/DIV r/m) */
334#define FASTOP1SRC2(op, name) \
335 FOP_START(name) \
336 FOP1E(op, cl) \
337 FOP1E(op, cx) \
338 FOP1E(op, ecx) \
339 ON64(FOP1E(op, rcx)) \
340 FOP_END
341
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200342/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
343#define FASTOP1SRC2EX(op, name) \
344 FOP_START(name) \
345 FOP1EEX(op, cl) \
346 FOP1EEX(op, cx) \
347 FOP1EEX(op, ecx) \
348 ON64(FOP1EEX(op, rcx)) \
349 FOP_END
350
Avi Kivityf7857f32013-01-04 16:18:53 +0200351#define FOP2E(op, dst, src) \
352 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
353
354#define FASTOP2(op) \
355 FOP_START(op) \
Avi Kivity017da7b2013-02-09 11:31:47 +0200356 FOP2E(op##b, al, dl) \
357 FOP2E(op##w, ax, dx) \
358 FOP2E(op##l, eax, edx) \
359 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivityf7857f32013-01-04 16:18:53 +0200360 FOP_END
361
Avi Kivity11c363b2013-01-19 19:51:54 +0200362/* 2 operand, word only */
363#define FASTOP2W(op) \
364 FOP_START(op) \
365 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200366 FOP2E(op##w, ax, dx) \
367 FOP2E(op##l, eax, edx) \
368 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivity11c363b2013-01-19 19:51:54 +0200369 FOP_END
370
Avi Kivity007a3b52013-01-19 19:51:51 +0200371/* 2 operand, src is CL */
372#define FASTOP2CL(op) \
373 FOP_START(op) \
374 FOP2E(op##b, al, cl) \
375 FOP2E(op##w, ax, cl) \
376 FOP2E(op##l, eax, cl) \
377 ON64(FOP2E(op##q, rax, cl)) \
378 FOP_END
379
Avi Kivity0bdea062013-01-19 19:51:50 +0200380#define FOP3E(op, dst, src, src2) \
381 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
382
383/* 3-operand, word-only, src2=cl */
384#define FASTOP3WCL(op) \
385 FOP_START(op) \
386 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200387 FOP3E(op##w, ax, dx, cl) \
388 FOP3E(op##l, eax, edx, cl) \
389 ON64(FOP3E(op##q, rax, rdx, cl)) \
Avi Kivity0bdea062013-01-19 19:51:50 +0200390 FOP_END
391
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200392/* Special case for SETcc - 1 instruction per cc */
393#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
394
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200395asm(".global kvm_fastop_exception \n"
396 "kvm_fastop_exception: xor %esi, %esi; ret");
397
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200398FOP_START(setcc)
399FOP_SETCC(seto)
400FOP_SETCC(setno)
401FOP_SETCC(setc)
402FOP_SETCC(setnc)
403FOP_SETCC(setz)
404FOP_SETCC(setnz)
405FOP_SETCC(setbe)
406FOP_SETCC(setnbe)
407FOP_SETCC(sets)
408FOP_SETCC(setns)
409FOP_SETCC(setp)
410FOP_SETCC(setnp)
411FOP_SETCC(setl)
412FOP_SETCC(setnl)
413FOP_SETCC(setle)
414FOP_SETCC(setnle)
415FOP_END;
416
Paolo Bonzini326f5782013-05-09 11:32:51 +0200417FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
418FOP_END;
419
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200420static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
421 enum x86_intercept intercept,
422 enum x86_intercept_stage stage)
423{
424 struct x86_instruction_info info = {
425 .intercept = intercept,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300426 .rep_prefix = ctxt->rep_prefix,
427 .modrm_mod = ctxt->modrm_mod,
428 .modrm_reg = ctxt->modrm_reg,
429 .modrm_rm = ctxt->modrm_rm,
430 .src_val = ctxt->src.val64,
Jan Kiszka6cbc5f52014-06-30 12:52:55 +0200431 .dst_val = ctxt->dst.val64,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300432 .src_bytes = ctxt->src.bytes,
433 .dst_bytes = ctxt->dst.bytes,
434 .ad_bytes = ctxt->ad_bytes,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200435 .next_rip = ctxt->eip,
436 };
437
Avi Kivity29535382011-04-20 13:37:53 +0300438 return ctxt->ops->intercept(ctxt, &info, stage);
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200439}
440
Avi Kivityf47cfa32012-06-07 17:49:24 +0300441static void assign_masked(ulong *dest, ulong src, ulong mask)
442{
443 *dest = (*dest & ~mask) | (src & mask);
444}
445
Avi Kivity9dac77f2011-06-01 15:34:25 +0300446static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800447{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300448 return (1UL << (ctxt->ad_bytes << 3)) - 1;
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800449}
450
Avi Kivityf47cfa32012-06-07 17:49:24 +0300451static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
452{
453 u16 sel;
454 struct desc_struct ss;
455
456 if (ctxt->mode == X86EMUL_MODE_PROT64)
457 return ~0UL;
458 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
459 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
460}
461
Avi Kivity612e89f2012-06-12 20:03:23 +0300462static int stack_size(struct x86_emulate_ctxt *ctxt)
463{
464 return (__fls(stack_mask(ctxt)) + 1) >> 3;
465}
466
Avi Kivity6aa8b732006-12-10 02:21:36 -0800467/* Access/update address held in a register, based on addressing mode. */
Harvey Harrisone4706772008-02-19 07:40:38 -0800468static inline unsigned long
Avi Kivity9dac77f2011-06-01 15:34:25 +0300469address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800470{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300471 if (ctxt->ad_bytes == sizeof(unsigned long))
Harvey Harrisone4706772008-02-19 07:40:38 -0800472 return reg;
473 else
Avi Kivity9dac77f2011-06-01 15:34:25 +0300474 return reg & ad_mask(ctxt);
Harvey Harrisone4706772008-02-19 07:40:38 -0800475}
476
477static inline unsigned long
Avi Kivity9dac77f2011-06-01 15:34:25 +0300478register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800479{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300480 return address_mask(ctxt, reg);
Harvey Harrisone4706772008-02-19 07:40:38 -0800481}
482
Avi Kivity5ad105e2012-08-19 14:34:31 +0300483static void masked_increment(ulong *reg, ulong mask, int inc)
484{
485 assign_masked(reg, *reg + inc, mask);
486}
487
Harvey Harrison7a9572752008-02-19 07:40:41 -0800488static inline void
Avi Kivity9dac77f2011-06-01 15:34:25 +0300489register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
Harvey Harrison7a9572752008-02-19 07:40:41 -0800490{
Avi Kivity5ad105e2012-08-19 14:34:31 +0300491 ulong mask;
492
Avi Kivity9dac77f2011-06-01 15:34:25 +0300493 if (ctxt->ad_bytes == sizeof(unsigned long))
Avi Kivity5ad105e2012-08-19 14:34:31 +0300494 mask = ~0UL;
Harvey Harrison7a9572752008-02-19 07:40:41 -0800495 else
Avi Kivity5ad105e2012-08-19 14:34:31 +0300496 mask = ad_mask(ctxt);
497 masked_increment(reg, mask, inc);
498}
499
500static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
501{
Avi Kivitydd856ef2012-08-27 23:46:17 +0300502 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
Harvey Harrison7a9572752008-02-19 07:40:41 -0800503}
Avi Kivity6aa8b732006-12-10 02:21:36 -0800504
Avi Kivity9dac77f2011-06-01 15:34:25 +0300505static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
Harvey Harrison7a9572752008-02-19 07:40:41 -0800506{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300507 register_address_increment(ctxt, &ctxt->_eip, rel);
Harvey Harrison7a9572752008-02-19 07:40:41 -0800508}
Nitin A Kamble098c9372007-08-19 11:00:36 +0300509
Avi Kivity56697682011-04-03 14:08:51 +0300510static u32 desc_limit_scaled(struct desc_struct *desc)
511{
512 u32 limit = get_desc_limit(desc);
513
514 return desc->g ? (limit << 12) | 0xfff : limit;
515}
516
Avi Kivity9dac77f2011-06-01 15:34:25 +0300517static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300518{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300519 ctxt->has_seg_override = true;
520 ctxt->seg_override = seg;
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300521}
522
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900523static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300524{
525 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
526 return 0;
527
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900528 return ctxt->ops->get_cached_segment_base(ctxt, seg);
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300529}
530
Avi Kivity9dac77f2011-06-01 15:34:25 +0300531static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300532{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300533 if (!ctxt->has_seg_override)
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300534 return 0;
535
Avi Kivity9dac77f2011-06-01 15:34:25 +0300536 return ctxt->seg_override;
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300537}
538
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200539static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
540 u32 error, bool valid)
Gleb Natapov54b84862010-04-28 19:15:44 +0300541{
Avi Kivityda9cb572010-11-22 17:53:21 +0200542 ctxt->exception.vector = vec;
543 ctxt->exception.error_code = error;
544 ctxt->exception.error_code_valid = valid;
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200545 return X86EMUL_PROPAGATE_FAULT;
Gleb Natapov54b84862010-04-28 19:15:44 +0300546}
547
Joerg Roedel3b88e412011-04-04 12:39:29 +0200548static int emulate_db(struct x86_emulate_ctxt *ctxt)
549{
550 return emulate_exception(ctxt, DB_VECTOR, 0, false);
551}
552
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200553static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300554{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200555 return emulate_exception(ctxt, GP_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300556}
557
Avi Kivity618ff152011-04-03 12:32:09 +0300558static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
559{
560 return emulate_exception(ctxt, SS_VECTOR, err, true);
561}
562
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200563static int emulate_ud(struct x86_emulate_ctxt *ctxt)
Gleb Natapov54b84862010-04-28 19:15:44 +0300564{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200565 return emulate_exception(ctxt, UD_VECTOR, 0, false);
Gleb Natapov54b84862010-04-28 19:15:44 +0300566}
567
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200568static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300569{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200570 return emulate_exception(ctxt, TS_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300571}
572
Avi Kivity34d1f492010-08-26 11:59:01 +0300573static int emulate_de(struct x86_emulate_ctxt *ctxt)
574{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200575 return emulate_exception(ctxt, DE_VECTOR, 0, false);
Avi Kivity34d1f492010-08-26 11:59:01 +0300576}
577
Avi Kivity12537912011-03-29 11:41:27 +0200578static int emulate_nm(struct x86_emulate_ctxt *ctxt)
579{
580 return emulate_exception(ctxt, NM_VECTOR, 0, false);
581}
582
Avi Kivity1aa36612011-04-27 13:20:30 +0300583static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
584{
585 u16 selector;
586 struct desc_struct desc;
587
588 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
589 return selector;
590}
591
592static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
593 unsigned seg)
594{
595 u16 dummy;
596 u32 base3;
597 struct desc_struct desc;
598
599 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
600 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
601}
602
Avi Kivity1c11b372012-04-09 18:39:59 +0300603/*
604 * x86 defines three classes of vector instructions: explicitly
605 * aligned, explicitly unaligned, and the rest, which change behaviour
606 * depending on whether they're AVX encoded or not.
607 *
608 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
609 * subject to the same check.
610 */
611static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
612{
613 if (likely(size < 16))
614 return false;
615
616 if (ctxt->d & Aligned)
617 return true;
618 else if (ctxt->d & Unaligned)
619 return false;
620 else if (ctxt->d & Avx)
621 return false;
622 else
623 return true;
624}
625
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400626static int __linearize(struct x86_emulate_ctxt *ctxt,
Avi Kivity52fd8b42011-04-03 12:33:12 +0300627 struct segmented_address addr,
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400628 unsigned size, bool write, bool fetch,
Avi Kivity52fd8b42011-04-03 12:33:12 +0300629 ulong *linear)
630{
Avi Kivity618ff152011-04-03 12:32:09 +0300631 struct desc_struct desc;
632 bool usable;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300633 ulong la;
Avi Kivity618ff152011-04-03 12:32:09 +0300634 u32 lim;
Avi Kivity1aa36612011-04-27 13:20:30 +0300635 u16 sel;
Gleb Natapov3a78a4f2012-12-20 16:57:42 +0200636 unsigned cpl;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300637
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900638 la = seg_base(ctxt, addr.seg) + addr.ea;
Avi Kivity618ff152011-04-03 12:32:09 +0300639 switch (ctxt->mode) {
Avi Kivity618ff152011-04-03 12:32:09 +0300640 case X86EMUL_MODE_PROT64:
641 if (((signed long)la << 16) >> 16 != la)
642 return emulate_gp(ctxt, 0);
643 break;
644 default:
Avi Kivity1aa36612011-04-27 13:20:30 +0300645 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
646 addr.seg);
Avi Kivity618ff152011-04-03 12:32:09 +0300647 if (!usable)
648 goto bad;
Gleb Natapov58b78252012-12-11 15:14:12 +0200649 /* code segment in protected mode or read-only data segment */
650 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
651 || !(desc.type & 2)) && write)
Avi Kivity618ff152011-04-03 12:32:09 +0300652 goto bad;
653 /* unreadable code segment */
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400654 if (!fetch && (desc.type & 8) && !(desc.type & 2))
Avi Kivity618ff152011-04-03 12:32:09 +0300655 goto bad;
656 lim = desc_limit_scaled(&desc);
657 if ((desc.type & 8) || !(desc.type & 4)) {
658 /* expand-up segment */
659 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
660 goto bad;
661 } else {
Guo Chaofc058682012-06-28 15:19:51 +0800662 /* expand-down segment */
Avi Kivity618ff152011-04-03 12:32:09 +0300663 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
664 goto bad;
665 lim = desc.d ? 0xffffffff : 0xffff;
666 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
667 goto bad;
668 }
Avi Kivity717746e2011-04-20 13:37:53 +0300669 cpl = ctxt->ops->cpl(ctxt);
Avi Kivity618ff152011-04-03 12:32:09 +0300670 if (!(desc.type & 8)) {
671 /* data segment */
672 if (cpl > desc.dpl)
673 goto bad;
674 } else if ((desc.type & 8) && !(desc.type & 4)) {
675 /* nonconforming code segment */
676 if (cpl != desc.dpl)
677 goto bad;
678 } else if ((desc.type & 8) && (desc.type & 4)) {
679 /* conforming code segment */
680 if (cpl < desc.dpl)
681 goto bad;
682 }
683 break;
684 }
Avi Kivity9dac77f2011-06-01 15:34:25 +0300685 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
Avi Kivity52fd8b42011-04-03 12:33:12 +0300686 la &= (u32)-1;
Avi Kivity1c11b372012-04-09 18:39:59 +0300687 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
688 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300689 *linear = la;
690 return X86EMUL_CONTINUE;
Avi Kivity618ff152011-04-03 12:32:09 +0300691bad:
692 if (addr.seg == VCPU_SREG_SS)
Avi Kivity0afbe2f2012-08-21 17:07:06 +0300693 return emulate_ss(ctxt, sel);
Avi Kivity618ff152011-04-03 12:32:09 +0300694 else
Avi Kivity0afbe2f2012-08-21 17:07:06 +0300695 return emulate_gp(ctxt, sel);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300696}
697
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400698static int linearize(struct x86_emulate_ctxt *ctxt,
699 struct segmented_address addr,
700 unsigned size, bool write,
701 ulong *linear)
702{
703 return __linearize(ctxt, addr, size, write, false, linear);
704}
705
706
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200707static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
708 struct segmented_address addr,
709 void *data,
710 unsigned size)
711{
Avi Kivity9fa088f2011-03-31 18:54:30 +0200712 int rc;
713 ulong linear;
714
Avi Kivity83b87952011-04-03 11:31:19 +0300715 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +0200716 if (rc != X86EMUL_CONTINUE)
717 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +0300718 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200719}
720
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900721/*
722 * Fetch the next byte of the instruction being emulated which is pointed to
723 * by ctxt->_eip, then increment ctxt->_eip.
724 *
725 * Also prefetch the remaining bytes of the instruction without crossing page
726 * boundary if they are not in fetch_cache yet.
727 */
728static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
Avi Kivity62266862007-11-20 13:15:52 +0200729{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300730 struct fetch_cache *fc = &ctxt->fetch;
Avi Kivity62266862007-11-20 13:15:52 +0200731 int rc;
Avi Kivity2fb53ad2010-04-11 13:05:15 +0300732 int size, cur_size;
Avi Kivity62266862007-11-20 13:15:52 +0200733
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900734 if (ctxt->_eip == fc->end) {
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400735 unsigned long linear;
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900736 struct segmented_address addr = { .seg = VCPU_SREG_CS,
737 .ea = ctxt->_eip };
Avi Kivity2fb53ad2010-04-11 13:05:15 +0300738 cur_size = fc->end - fc->start;
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900739 size = min(15UL - cur_size,
740 PAGE_SIZE - offset_in_page(ctxt->_eip));
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400741 rc = __linearize(ctxt, addr, size, false, true, &linear);
Takuya Yoshikawa7d88bb42011-07-30 18:02:29 +0900742 if (unlikely(rc != X86EMUL_CONTINUE))
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400743 return rc;
Takuya Yoshikawaef5d75c2011-05-15 00:57:43 +0900744 rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
745 size, &ctxt->exception);
Takuya Yoshikawa7d88bb42011-07-30 18:02:29 +0900746 if (unlikely(rc != X86EMUL_CONTINUE))
Avi Kivity62266862007-11-20 13:15:52 +0200747 return rc;
Avi Kivity2fb53ad2010-04-11 13:05:15 +0300748 fc->end += size;
Avi Kivity62266862007-11-20 13:15:52 +0200749 }
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900750 *dest = fc->data[ctxt->_eip - fc->start];
751 ctxt->_eip++;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +0900752 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200753}
754
755static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900756 void *dest, unsigned size)
Avi Kivity62266862007-11-20 13:15:52 +0200757{
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +0900758 int rc;
Avi Kivity62266862007-11-20 13:15:52 +0200759
Avi Kivityeb3c79e2009-11-24 15:20:15 +0200760 /* x86 instructions are limited to 15 bytes. */
Takuya Yoshikawa7d88bb42011-07-30 18:02:29 +0900761 if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
Avi Kivityeb3c79e2009-11-24 15:20:15 +0200762 return X86EMUL_UNHANDLEABLE;
Avi Kivity62266862007-11-20 13:15:52 +0200763 while (size--) {
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900764 rc = do_insn_fetch_byte(ctxt, dest++);
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +0900765 if (rc != X86EMUL_CONTINUE)
Avi Kivity62266862007-11-20 13:15:52 +0200766 return rc;
767 }
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +0900768 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200769}
770
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900771/* Fetch next part of the instruction being emulated. */
Takuya Yoshikawae85a1082011-07-30 18:01:26 +0900772#define insn_fetch(_type, _ctxt) \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900773({ unsigned long _x; \
Takuya Yoshikawae85a1082011-07-30 18:01:26 +0900774 rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900775 if (rc != X86EMUL_CONTINUE) \
776 goto done; \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900777 (_type)_x; \
778})
779
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900780#define insn_fetch_arr(_arr, _size, _ctxt) \
781({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900782 if (rc != X86EMUL_CONTINUE) \
783 goto done; \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900784})
785
Rusty Russell1e3c5cb2007-07-17 23:16:11 +1000786/*
787 * Given the 'reg' portion of a ModRM byte, and a register block, return a
788 * pointer into the block that addresses the relevant register.
789 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
790 */
Avi Kivitydd856ef2012-08-27 23:46:17 +0300791static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200792 int byteop)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800793{
794 void *p;
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200795 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800796
Avi Kivity6aa8b732006-12-10 02:21:36 -0800797 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
Avi Kivitydd856ef2012-08-27 23:46:17 +0300798 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
799 else
800 p = reg_rmw(ctxt, modrm_reg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800801 return p;
802}
803
804static int read_descriptor(struct x86_emulate_ctxt *ctxt,
Avi Kivity90de84f2010-11-17 15:28:21 +0200805 struct segmented_address addr,
Avi Kivity6aa8b732006-12-10 02:21:36 -0800806 u16 *size, unsigned long *address, int op_bytes)
807{
808 int rc;
809
810 if (op_bytes == 2)
811 op_bytes = 3;
812 *address = 0;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200813 rc = segmented_read_std(ctxt, addr, size, 2);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +0900814 if (rc != X86EMUL_CONTINUE)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800815 return rc;
Avi Kivity30b31ab2010-11-17 15:28:22 +0200816 addr.ea += 2;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200817 rc = segmented_read_std(ctxt, addr, address, op_bytes);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800818 return rc;
819}
820
Avi Kivity34b77652013-01-19 19:51:56 +0200821FASTOP2(add);
822FASTOP2(or);
823FASTOP2(adc);
824FASTOP2(sbb);
825FASTOP2(and);
826FASTOP2(sub);
827FASTOP2(xor);
828FASTOP2(cmp);
829FASTOP2(test);
830
Avi Kivityb9fa4092013-02-09 11:31:48 +0200831FASTOP1SRC2(mul, mul_ex);
832FASTOP1SRC2(imul, imul_ex);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200833FASTOP1SRC2EX(div, div_ex);
834FASTOP1SRC2EX(idiv, idiv_ex);
Avi Kivityb9fa4092013-02-09 11:31:48 +0200835
Avi Kivity34b77652013-01-19 19:51:56 +0200836FASTOP3WCL(shld);
837FASTOP3WCL(shrd);
838
839FASTOP2W(imul);
840
841FASTOP1(not);
842FASTOP1(neg);
843FASTOP1(inc);
844FASTOP1(dec);
845
846FASTOP2CL(rol);
847FASTOP2CL(ror);
848FASTOP2CL(rcl);
849FASTOP2CL(rcr);
850FASTOP2CL(shl);
851FASTOP2CL(shr);
852FASTOP2CL(sar);
853
854FASTOP2W(bsf);
855FASTOP2W(bsr);
856FASTOP2W(bt);
857FASTOP2W(bts);
858FASTOP2W(btr);
859FASTOP2W(btc);
860
Avi Kivitye47a5f52013-02-09 11:31:51 +0200861FASTOP2(xadd);
862
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200863static u8 test_cc(unsigned int condition, unsigned long flags)
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300864{
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200865 u8 rc;
866 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300867
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200868 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivity3f0c3d02013-01-26 23:56:04 +0200869 asm("push %[flags]; popf; call *%[fastop]"
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200870 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
871 return rc;
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300872}
873
Avi Kivity91ff3cb2010-08-01 12:53:09 +0300874static void fetch_register_operand(struct operand *op)
875{
876 switch (op->bytes) {
877 case 1:
878 op->val = *(u8 *)op->addr.reg;
879 break;
880 case 2:
881 op->val = *(u16 *)op->addr.reg;
882 break;
883 case 4:
884 op->val = *(u32 *)op->addr.reg;
885 break;
886 case 8:
887 op->val = *(u64 *)op->addr.reg;
888 break;
889 }
890}
891
Avi Kivity12537912011-03-29 11:41:27 +0200892static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
893{
894 ctxt->ops->get_fpu(ctxt);
895 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200896 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
897 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
898 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
899 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
900 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
901 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
902 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
903 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +0200904#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +0200905 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
906 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
907 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
908 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
909 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
910 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
911 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
912 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +0200913#endif
914 default: BUG();
915 }
916 ctxt->ops->put_fpu(ctxt);
917}
918
919static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
920 int reg)
921{
922 ctxt->ops->get_fpu(ctxt);
923 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200924 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
925 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
926 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
927 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
928 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
929 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
930 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
931 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +0200932#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +0200933 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
934 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
935 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
936 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
937 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
938 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
939 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
940 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +0200941#endif
942 default: BUG();
943 }
944 ctxt->ops->put_fpu(ctxt);
945}
946
Avi Kivitycbe2c9d2012-04-09 18:40:02 +0300947static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
948{
949 ctxt->ops->get_fpu(ctxt);
950 switch (reg) {
951 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
952 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
953 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
954 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
955 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
956 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
957 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
958 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
959 default: BUG();
960 }
961 ctxt->ops->put_fpu(ctxt);
962}
963
964static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
965{
966 ctxt->ops->get_fpu(ctxt);
967 switch (reg) {
968 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
969 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
970 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
971 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
972 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
973 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
974 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
975 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
976 default: BUG();
977 }
978 ctxt->ops->put_fpu(ctxt);
979}
980
Gleb Natapov045a2822012-12-20 16:57:43 +0200981static int em_fninit(struct x86_emulate_ctxt *ctxt)
982{
983 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
984 return emulate_nm(ctxt);
985
986 ctxt->ops->get_fpu(ctxt);
987 asm volatile("fninit");
988 ctxt->ops->put_fpu(ctxt);
989 return X86EMUL_CONTINUE;
990}
991
992static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
993{
994 u16 fcw;
995
996 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
997 return emulate_nm(ctxt);
998
999 ctxt->ops->get_fpu(ctxt);
1000 asm volatile("fnstcw %0": "+m"(fcw));
1001 ctxt->ops->put_fpu(ctxt);
1002
1003 /* force 2 byte destination */
1004 ctxt->dst.bytes = 2;
1005 ctxt->dst.val = fcw;
1006
1007 return X86EMUL_CONTINUE;
1008}
1009
1010static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1011{
1012 u16 fsw;
1013
1014 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1015 return emulate_nm(ctxt);
1016
1017 ctxt->ops->get_fpu(ctxt);
1018 asm volatile("fnstsw %0": "+m"(fsw));
1019 ctxt->ops->put_fpu(ctxt);
1020
1021 /* force 2 byte destination */
1022 ctxt->dst.bytes = 2;
1023 ctxt->dst.val = fsw;
1024
1025 return X86EMUL_CONTINUE;
1026}
1027
Avi Kivity12537912011-03-29 11:41:27 +02001028static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
Avi Kivity2adb5ad2012-01-16 15:08:45 +02001029 struct operand *op)
Avi Kivity3c118e22007-10-31 10:27:04 +02001030{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001031 unsigned reg = ctxt->modrm_reg;
Avi Kivity33615aa2007-10-31 11:15:56 +02001032
Avi Kivity9dac77f2011-06-01 15:34:25 +03001033 if (!(ctxt->d & ModRM))
1034 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
Avi Kivity12537912011-03-29 11:41:27 +02001035
Avi Kivity9dac77f2011-06-01 15:34:25 +03001036 if (ctxt->d & Sse) {
Avi Kivity12537912011-03-29 11:41:27 +02001037 op->type = OP_XMM;
1038 op->bytes = 16;
1039 op->addr.xmm = reg;
1040 read_sse_reg(ctxt, &op->vec_val, reg);
1041 return;
1042 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001043 if (ctxt->d & Mmx) {
1044 reg &= 7;
1045 op->type = OP_MM;
1046 op->bytes = 8;
1047 op->addr.mm = reg;
1048 return;
1049 }
Avi Kivity12537912011-03-29 11:41:27 +02001050
Avi Kivity3c118e22007-10-31 10:27:04 +02001051 op->type = OP_REG;
Gleb Natapov6d4d85e2013-11-04 15:52:42 +02001052 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1053 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1054
Avi Kivity91ff3cb2010-08-01 12:53:09 +03001055 fetch_register_operand(op);
Avi Kivity3c118e22007-10-31 10:27:04 +02001056 op->orig_val = op->val;
1057}
1058
Avi Kivitya6e34072012-06-10 17:15:39 +03001059static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1060{
1061 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1062 ctxt->modrm_seg = VCPU_SREG_SS;
1063}
1064
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001065static int decode_modrm(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001066 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001067{
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001068 u8 sib;
Avi Kivityf5b4edc2008-06-15 22:09:11 -07001069 int index_reg = 0, base_reg = 0, scale;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001070 int rc = X86EMUL_CONTINUE;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001071 ulong modrm_ea = 0;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001072
Avi Kivity9dac77f2011-06-01 15:34:25 +03001073 if (ctxt->rex_prefix) {
1074 ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */
1075 index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
1076 ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001077 }
1078
Avi Kivity9dac77f2011-06-01 15:34:25 +03001079 ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
1080 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1081 ctxt->modrm_rm |= (ctxt->modrm & 0x07);
1082 ctxt->modrm_seg = VCPU_SREG_DS;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001083
Nadav Amit9b88ae92014-05-25 23:05:21 +03001084 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001085 op->type = OP_REG;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001086 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Paolo Bonzini8acb42072013-05-30 16:35:55 +02001087 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02001088 ctxt->d & ByteOp);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001089 if (ctxt->d & Sse) {
Avi Kivity12537912011-03-29 11:41:27 +02001090 op->type = OP_XMM;
1091 op->bytes = 16;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001092 op->addr.xmm = ctxt->modrm_rm;
1093 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
Avi Kivity12537912011-03-29 11:41:27 +02001094 return rc;
1095 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001096 if (ctxt->d & Mmx) {
1097 op->type = OP_MM;
1098 op->bytes = 8;
Paolo Bonzinibdc90722014-05-06 14:03:29 +02001099 op->addr.mm = ctxt->modrm_rm & 7;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001100 return rc;
1101 }
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001102 fetch_register_operand(op);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001103 return rc;
1104 }
1105
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001106 op->type = OP_MEM;
1107
Avi Kivity9dac77f2011-06-01 15:34:25 +03001108 if (ctxt->ad_bytes == 2) {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001109 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1110 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1111 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1112 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001113
1114 /* 16-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001115 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001116 case 0:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001117 if (ctxt->modrm_rm == 6)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001118 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001119 break;
1120 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001121 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001122 break;
1123 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001124 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001125 break;
1126 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001127 switch (ctxt->modrm_rm) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001128 case 0:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001129 modrm_ea += bx + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001130 break;
1131 case 1:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001132 modrm_ea += bx + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001133 break;
1134 case 2:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001135 modrm_ea += bp + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001136 break;
1137 case 3:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001138 modrm_ea += bp + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001139 break;
1140 case 4:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001141 modrm_ea += si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001142 break;
1143 case 5:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001144 modrm_ea += di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001145 break;
1146 case 6:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001147 if (ctxt->modrm_mod != 0)
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001148 modrm_ea += bp;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001149 break;
1150 case 7:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001151 modrm_ea += bx;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001152 break;
1153 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001154 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1155 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1156 ctxt->modrm_seg = VCPU_SREG_SS;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001157 modrm_ea = (u16)modrm_ea;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001158 } else {
1159 /* 32/64-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001160 if ((ctxt->modrm_rm & 7) == 4) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001161 sib = insn_fetch(u8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001162 index_reg |= (sib >> 3) & 7;
1163 base_reg |= sib & 7;
1164 scale = sib >> 6;
1165
Avi Kivity9dac77f2011-06-01 15:34:25 +03001166 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001167 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivitya6e34072012-06-10 17:15:39 +03001168 else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001169 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001170 adjust_modrm_seg(ctxt, base_reg);
1171 }
Avi Kivitydc71d0f2008-06-15 21:23:17 -07001172 if (index_reg != 4)
Avi Kivitydd856ef2012-08-27 23:46:17 +03001173 modrm_ea += reg_read(ctxt, index_reg) << scale;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001174 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
Avi Kivity84411d82008-06-15 21:53:26 -07001175 if (ctxt->mode == X86EMUL_MODE_PROT64)
Avi Kivity9dac77f2011-06-01 15:34:25 +03001176 ctxt->rip_relative = 1;
Avi Kivitya6e34072012-06-10 17:15:39 +03001177 } else {
1178 base_reg = ctxt->modrm_rm;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001179 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001180 adjust_modrm_seg(ctxt, base_reg);
1181 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001182 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001183 case 0:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001184 if (ctxt->modrm_rm == 5)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001185 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001186 break;
1187 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001188 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001189 break;
1190 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001191 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001192 break;
1193 }
1194 }
Avi Kivity90de84f2010-11-17 15:28:21 +02001195 op->addr.mem.ea = modrm_ea;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001196done:
1197 return rc;
1198}
1199
1200static int decode_abs(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001201 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001202{
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001203 int rc = X86EMUL_CONTINUE;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001204
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001205 op->type = OP_MEM;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001206 switch (ctxt->ad_bytes) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001207 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001208 op->addr.mem.ea = insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001209 break;
1210 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001211 op->addr.mem.ea = insn_fetch(u32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001212 break;
1213 case 8:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001214 op->addr.mem.ea = insn_fetch(u64, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001215 break;
1216 }
1217done:
1218 return rc;
1219}
1220
Avi Kivity9dac77f2011-06-01 15:34:25 +03001221static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
Wei Yongjun35c843c2010-08-09 11:34:56 +08001222{
Sheng Yang7129eec2010-09-28 16:33:32 +08001223 long sv = 0, mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001224
Avi Kivity9dac77f2011-06-01 15:34:25 +03001225 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
Nadav Amit7dec5602014-06-15 16:12:57 +03001226 mask = ~((long)ctxt->dst.bytes * 8 - 1);
Wei Yongjun35c843c2010-08-09 11:34:56 +08001227
Avi Kivity9dac77f2011-06-01 15:34:25 +03001228 if (ctxt->src.bytes == 2)
1229 sv = (s16)ctxt->src.val & (s16)mask;
1230 else if (ctxt->src.bytes == 4)
1231 sv = (s32)ctxt->src.val & (s32)mask;
Nadav Amit7dec5602014-06-15 16:12:57 +03001232 else
1233 sv = (s64)ctxt->src.val & (s64)mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001234
Avi Kivity9dac77f2011-06-01 15:34:25 +03001235 ctxt->dst.addr.mem.ea += (sv >> 3);
Wei Yongjun35c843c2010-08-09 11:34:56 +08001236 }
Wei Yongjunba7ff2b2010-08-09 11:39:14 +08001237
1238 /* only subword offset */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001239 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001240}
1241
Gleb Natapov9de41572010-04-28 19:15:22 +03001242static int read_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov9de41572010-04-28 19:15:22 +03001243 unsigned long addr, void *dest, unsigned size)
1244{
1245 int rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001246 struct read_cache *mc = &ctxt->mem_read;
Gleb Natapov9de41572010-04-28 19:15:22 +03001247
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001248 if (mc->pos < mc->end)
1249 goto read_cached;
Gleb Natapov9de41572010-04-28 19:15:22 +03001250
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001251 WARN_ON((mc->end + size) >= sizeof(mc->data));
Gleb Natapov9de41572010-04-28 19:15:22 +03001252
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001253 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1254 &ctxt->exception);
1255 if (rc != X86EMUL_CONTINUE)
1256 return rc;
1257
1258 mc->end += size;
1259
1260read_cached:
1261 memcpy(dest, mc->data + mc->pos, size);
1262 mc->pos += size;
Gleb Natapov9de41572010-04-28 19:15:22 +03001263 return X86EMUL_CONTINUE;
1264}
1265
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001266static int segmented_read(struct x86_emulate_ctxt *ctxt,
1267 struct segmented_address addr,
1268 void *data,
1269 unsigned size)
1270{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001271 int rc;
1272 ulong linear;
1273
Avi Kivity83b87952011-04-03 11:31:19 +03001274 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001275 if (rc != X86EMUL_CONTINUE)
1276 return rc;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001277 return read_emulated(ctxt, linear, data, size);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001278}
1279
1280static int segmented_write(struct x86_emulate_ctxt *ctxt,
1281 struct segmented_address addr,
1282 const void *data,
1283 unsigned size)
1284{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001285 int rc;
1286 ulong linear;
1287
Avi Kivity83b87952011-04-03 11:31:19 +03001288 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001289 if (rc != X86EMUL_CONTINUE)
1290 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001291 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1292 &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001293}
1294
1295static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1296 struct segmented_address addr,
1297 const void *orig_data, const void *data,
1298 unsigned size)
1299{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001300 int rc;
1301 ulong linear;
1302
Avi Kivity83b87952011-04-03 11:31:19 +03001303 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001304 if (rc != X86EMUL_CONTINUE)
1305 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001306 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1307 size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001308}
1309
Gleb Natapov7b262e92010-03-18 15:20:27 +02001310static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov7b262e92010-03-18 15:20:27 +02001311 unsigned int size, unsigned short port,
1312 void *dest)
1313{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001314 struct read_cache *rc = &ctxt->io_read;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001315
1316 if (rc->pos == rc->end) { /* refill pio read ahead */
Gleb Natapov7b262e92010-03-18 15:20:27 +02001317 unsigned int in_page, n;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001318 unsigned int count = ctxt->rep_prefix ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001319 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001320 in_page = (ctxt->eflags & EFLG_DF) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001321 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1322 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
Gleb Natapov7b262e92010-03-18 15:20:27 +02001323 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1324 count);
1325 if (n == 0)
1326 n = 1;
1327 rc->pos = rc->end = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001328 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
Gleb Natapov7b262e92010-03-18 15:20:27 +02001329 return 0;
1330 rc->end = n * size;
1331 }
1332
Nadav Amite6e39f02014-04-18 03:35:10 +03001333 if (ctxt->rep_prefix && (ctxt->d & String) &&
1334 !(ctxt->eflags & EFLG_DF)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001335 ctxt->dst.data = rc->data + rc->pos;
1336 ctxt->dst.type = OP_MEM_STR;
1337 ctxt->dst.count = (rc->end - rc->pos) / size;
1338 rc->pos = rc->end;
1339 } else {
1340 memcpy(dest, rc->data + rc->pos, size);
1341 rc->pos += size;
1342 }
Gleb Natapov7b262e92010-03-18 15:20:27 +02001343 return 1;
1344}
1345
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01001346static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1347 u16 index, struct desc_struct *desc)
1348{
1349 struct desc_ptr dt;
1350 ulong addr;
1351
1352 ctxt->ops->get_idt(ctxt, &dt);
1353
1354 if (dt.size < index * 8 + 7)
1355 return emulate_gp(ctxt, index << 3 | 0x2);
1356
1357 addr = dt.address + index * 8;
1358 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1359 &ctxt->exception);
1360}
1361
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001362static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001363 u16 selector, struct desc_ptr *dt)
1364{
Mathias Krause0225fb52012-08-30 01:30:16 +02001365 const struct x86_emulate_ops *ops = ctxt->ops;
Nadav Amit2eedcac2014-06-02 18:34:05 +03001366 u32 base3 = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001367
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001368 if (selector & 1 << 2) {
1369 struct desc_struct desc;
Avi Kivity1aa36612011-04-27 13:20:30 +03001370 u16 sel;
1371
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001372 memset (dt, 0, sizeof *dt);
Nadav Amit2eedcac2014-06-02 18:34:05 +03001373 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1374 VCPU_SREG_LDTR))
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001375 return;
1376
1377 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
Nadav Amit2eedcac2014-06-02 18:34:05 +03001378 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001379 } else
Avi Kivity4bff1e862011-04-20 13:37:53 +03001380 ops->get_gdt(ctxt, dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001381}
1382
1383/* allowed just for 8 bytes segments */
1384static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Avi Kivitye9194642012-06-13 16:29:39 +03001385 u16 selector, struct desc_struct *desc,
1386 ulong *desc_addr_p)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001387{
1388 struct desc_ptr dt;
1389 u16 index = selector >> 3;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001390 ulong addr;
1391
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001392 get_descriptor_table_ptr(ctxt, selector, &dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001393
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001394 if (dt.size < index * 8 + 7)
1395 return emulate_gp(ctxt, selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001396
Avi Kivitye9194642012-06-13 16:29:39 +03001397 *desc_addr_p = addr = dt.address + index * 8;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001398 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1399 &ctxt->exception);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001400}
1401
1402/* allowed just for 8 bytes segments */
1403static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001404 u16 selector, struct desc_struct *desc)
1405{
1406 struct desc_ptr dt;
1407 u16 index = selector >> 3;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001408 ulong addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001409
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001410 get_descriptor_table_ptr(ctxt, selector, &dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001411
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001412 if (dt.size < index * 8 + 7)
1413 return emulate_gp(ctxt, selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001414
1415 addr = dt.address + index * 8;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001416 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1417 &ctxt->exception);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001418}
1419
Gleb Natapov5601d052011-03-07 14:55:06 +02001420/* Does not support long mode */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001421static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Paolo Bonzini5045b462014-05-15 18:09:29 +02001422 u16 selector, int seg, u8 cpl, bool in_task_switch)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001423{
Avi Kivity869be992012-06-13 16:30:53 +03001424 struct desc_struct seg_desc, old_desc;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001425 u8 dpl, rpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001426 unsigned err_vec = GP_VECTOR;
1427 u32 err_code = 0;
1428 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
Avi Kivitye9194642012-06-13 16:29:39 +03001429 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001430 int ret;
Avi Kivity03ebebe2012-08-21 17:07:04 +03001431 u16 dummy;
Nadav Amite37a75a2014-06-02 18:34:04 +03001432 u32 base3 = 0;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001433
1434 memset(&seg_desc, 0, sizeof seg_desc);
1435
Kevin Wolff8da94e2013-04-11 14:06:03 +02001436 if (ctxt->mode == X86EMUL_MODE_REAL) {
1437 /* set real mode segment descriptor (keep limit etc. for
1438 * unreal mode) */
Avi Kivity03ebebe2012-08-21 17:07:04 +03001439 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001440 set_desc_base(&seg_desc, selector << 4);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001441 goto load;
Kevin Wolff8da94e2013-04-11 14:06:03 +02001442 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1443 /* VM86 needs a clean new segment descriptor */
1444 set_desc_base(&seg_desc, selector << 4);
1445 set_desc_limit(&seg_desc, 0xffff);
1446 seg_desc.type = 3;
1447 seg_desc.p = 1;
1448 seg_desc.s = 1;
1449 seg_desc.dpl = 3;
1450 goto load;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001451 }
1452
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001453 rpl = selector & 3;
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001454
1455 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1456 if ((seg == VCPU_SREG_CS
1457 || (seg == VCPU_SREG_SS
1458 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1459 || seg == VCPU_SREG_TR)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001460 && null_selector)
1461 goto exception;
1462
1463 /* TR should be in GDT only */
1464 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1465 goto exception;
1466
1467 if (null_selector) /* for NULL selector skip all following checks */
1468 goto load;
1469
Avi Kivitye9194642012-06-13 16:29:39 +03001470 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001471 if (ret != X86EMUL_CONTINUE)
1472 return ret;
1473
1474 err_code = selector & 0xfffc;
1475 err_vec = GP_VECTOR;
1476
Guo Chaofc058682012-06-28 15:19:51 +08001477 /* can't load system descriptor into segment selector */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001478 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1479 goto exception;
1480
1481 if (!seg_desc.p) {
1482 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1483 goto exception;
1484 }
1485
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001486 dpl = seg_desc.dpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001487
1488 switch (seg) {
1489 case VCPU_SREG_SS:
1490 /*
1491 * segment is not a writable data segment or segment
1492 * selector's RPL != CPL or segment selector's RPL != CPL
1493 */
1494 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1495 goto exception;
1496 break;
1497 case VCPU_SREG_CS:
Paolo Bonzini5045b462014-05-15 18:09:29 +02001498 if (in_task_switch && rpl != dpl)
1499 goto exception;
1500
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001501 if (!(seg_desc.type & 8))
1502 goto exception;
1503
1504 if (seg_desc.type & 4) {
1505 /* conforming */
1506 if (dpl > cpl)
1507 goto exception;
1508 } else {
1509 /* nonconforming */
1510 if (rpl > cpl || dpl != cpl)
1511 goto exception;
1512 }
1513 /* CS(RPL) <- CPL */
1514 selector = (selector & 0xfffc) | cpl;
1515 break;
1516 case VCPU_SREG_TR:
1517 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1518 goto exception;
Avi Kivity869be992012-06-13 16:30:53 +03001519 old_desc = seg_desc;
1520 seg_desc.type |= 2; /* busy */
1521 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1522 sizeof(seg_desc), &ctxt->exception);
1523 if (ret != X86EMUL_CONTINUE)
1524 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001525 break;
1526 case VCPU_SREG_LDTR:
1527 if (seg_desc.s || seg_desc.type != 2)
1528 goto exception;
1529 break;
1530 default: /* DS, ES, FS, or GS */
1531 /*
1532 * segment is not a data or readable code segment or
1533 * ((segment is a data or nonconforming code segment)
1534 * and (both RPL and CPL > DPL))
1535 */
1536 if ((seg_desc.type & 0xa) == 0x8 ||
1537 (((seg_desc.type & 0xc) != 0xc) &&
1538 (rpl > dpl && cpl > dpl)))
1539 goto exception;
1540 break;
1541 }
1542
1543 if (seg_desc.s) {
1544 /* mark segment as accessed */
1545 seg_desc.type |= 1;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001546 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001547 if (ret != X86EMUL_CONTINUE)
1548 return ret;
Nadav Amite37a75a2014-06-02 18:34:04 +03001549 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1550 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1551 sizeof(base3), &ctxt->exception);
1552 if (ret != X86EMUL_CONTINUE)
1553 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001554 }
1555load:
Nadav Amite37a75a2014-06-02 18:34:04 +03001556 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001557 return X86EMUL_CONTINUE;
1558exception:
Gleb Natapov54b84862010-04-28 19:15:44 +03001559 emulate_exception(ctxt, err_vec, err_code, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001560 return X86EMUL_PROPAGATE_FAULT;
1561}
1562
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001563static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1564 u16 selector, int seg)
1565{
1566 u8 cpl = ctxt->ops->cpl(ctxt);
Paolo Bonzini5045b462014-05-15 18:09:29 +02001567 return __load_segment_descriptor(ctxt, selector, seg, cpl, false);
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001568}
1569
Wei Yongjun31be40b2010-08-17 09:17:30 +08001570static void write_register_operand(struct operand *op)
1571{
1572 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1573 switch (op->bytes) {
1574 case 1:
1575 *(u8 *)op->addr.reg = (u8)op->val;
1576 break;
1577 case 2:
1578 *(u16 *)op->addr.reg = (u16)op->val;
1579 break;
1580 case 4:
1581 *op->addr.reg = (u32)op->val;
1582 break; /* 64b: zero-extend */
1583 case 8:
1584 *op->addr.reg = op->val;
1585 break;
1586 }
1587}
1588
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001589static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
Wei Yongjunc37eda12010-06-15 09:03:33 +08001590{
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001591 switch (op->type) {
Wei Yongjunc37eda12010-06-15 09:03:33 +08001592 case OP_REG:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001593 write_register_operand(op);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001594 break;
1595 case OP_MEM:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001596 if (ctxt->lock_prefix)
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001597 return segmented_cmpxchg(ctxt,
1598 op->addr.mem,
1599 &op->orig_val,
1600 &op->val,
1601 op->bytes);
1602 else
1603 return segmented_write(ctxt,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001604 op->addr.mem,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001605 &op->val,
1606 op->bytes);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001607 break;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001608 case OP_MEM_STR:
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001609 return segmented_write(ctxt,
1610 op->addr.mem,
1611 op->data,
1612 op->bytes * op->count);
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001613 break;
Avi Kivity12537912011-03-29 11:41:27 +02001614 case OP_XMM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001615 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
Avi Kivity12537912011-03-29 11:41:27 +02001616 break;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001617 case OP_MM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001618 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001619 break;
Wei Yongjunc37eda12010-06-15 09:03:33 +08001620 case OP_NONE:
1621 /* no writeback */
1622 break;
1623 default:
1624 break;
1625 }
1626 return X86EMUL_CONTINUE;
1627}
1628
Avi Kivity51ddff52012-06-12 20:19:40 +03001629static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001630{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001631 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001632
Avi Kivity5ad105e2012-08-19 14:34:31 +03001633 rsp_increment(ctxt, -bytes);
Avi Kivitydd856ef2012-08-27 23:46:17 +03001634 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001635 addr.seg = VCPU_SREG_SS;
1636
Avi Kivity51ddff52012-06-12 20:19:40 +03001637 return segmented_write(ctxt, addr, data, bytes);
1638}
1639
1640static int em_push(struct x86_emulate_ctxt *ctxt)
1641{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001642 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001643 ctxt->dst.type = OP_NONE;
Avi Kivity51ddff52012-06-12 20:19:40 +03001644 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001645}
1646
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001647static int emulate_pop(struct x86_emulate_ctxt *ctxt,
Avi Kivity350f69d2009-01-05 11:12:40 +02001648 void *dest, int len)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001649{
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001650 int rc;
Avi Kivity90de84f2010-11-17 15:28:21 +02001651 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001652
Avi Kivitydd856ef2012-08-27 23:46:17 +03001653 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Avi Kivity90de84f2010-11-17 15:28:21 +02001654 addr.seg = VCPU_SREG_SS;
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001655 rc = segmented_read(ctxt, addr, dest, len);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09001656 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001657 return rc;
1658
Avi Kivity5ad105e2012-08-19 14:34:31 +03001659 rsp_increment(ctxt, len);
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001660 return rc;
1661}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001662
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001663static int em_pop(struct x86_emulate_ctxt *ctxt)
1664{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001665 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001666}
1667
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001668static int emulate_popf(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001669 void *dest, int len)
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001670{
1671 int rc;
1672 unsigned long val, change_mask;
1673 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001674 int cpl = ctxt->ops->cpl(ctxt);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001675
Takuya Yoshikawa3b9be3b2011-05-02 02:27:55 +09001676 rc = emulate_pop(ctxt, &val, len);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001677 if (rc != X86EMUL_CONTINUE)
1678 return rc;
1679
1680 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1681 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1682
1683 switch(ctxt->mode) {
1684 case X86EMUL_MODE_PROT64:
1685 case X86EMUL_MODE_PROT32:
1686 case X86EMUL_MODE_PROT16:
1687 if (cpl == 0)
1688 change_mask |= EFLG_IOPL;
1689 if (cpl <= iopl)
1690 change_mask |= EFLG_IF;
1691 break;
1692 case X86EMUL_MODE_VM86:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001693 if (iopl < 3)
1694 return emulate_gp(ctxt, 0);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001695 change_mask |= EFLG_IF;
1696 break;
1697 default: /* real mode */
1698 change_mask |= (EFLG_IOPL | EFLG_IF);
1699 break;
1700 }
1701
1702 *(unsigned long *)dest =
1703 (ctxt->eflags & ~change_mask) | (val & change_mask);
1704
1705 return rc;
1706}
1707
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001708static int em_popf(struct x86_emulate_ctxt *ctxt)
1709{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001710 ctxt->dst.type = OP_REG;
1711 ctxt->dst.addr.reg = &ctxt->eflags;
1712 ctxt->dst.bytes = ctxt->op_bytes;
1713 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001714}
1715
Avi Kivity612e89f2012-06-12 20:03:23 +03001716static int em_enter(struct x86_emulate_ctxt *ctxt)
1717{
1718 int rc;
1719 unsigned frame_size = ctxt->src.val;
1720 unsigned nesting_level = ctxt->src2.val & 31;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001721 ulong rbp;
Avi Kivity612e89f2012-06-12 20:03:23 +03001722
1723 if (nesting_level)
1724 return X86EMUL_UNHANDLEABLE;
1725
Avi Kivitydd856ef2012-08-27 23:46:17 +03001726 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1727 rc = push(ctxt, &rbp, stack_size(ctxt));
Avi Kivity612e89f2012-06-12 20:03:23 +03001728 if (rc != X86EMUL_CONTINUE)
1729 return rc;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001730 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
Avi Kivity612e89f2012-06-12 20:03:23 +03001731 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001732 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1733 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
Avi Kivity612e89f2012-06-12 20:03:23 +03001734 stack_mask(ctxt));
1735 return X86EMUL_CONTINUE;
1736}
1737
Avi Kivityf47cfa32012-06-07 17:49:24 +03001738static int em_leave(struct x86_emulate_ctxt *ctxt)
1739{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001740 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
Avi Kivityf47cfa32012-06-07 17:49:24 +03001741 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001742 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
Avi Kivityf47cfa32012-06-07 17:49:24 +03001743}
1744
Avi Kivity1cd196e2011-09-13 10:45:51 +03001745static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001746{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001747 int seg = ctxt->src2.val;
1748
Avi Kivity9dac77f2011-06-01 15:34:25 +03001749 ctxt->src.val = get_segment_selector(ctxt, seg);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001750
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001751 return em_push(ctxt);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001752}
1753
Avi Kivity1cd196e2011-09-13 10:45:51 +03001754static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001755{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001756 int seg = ctxt->src2.val;
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001757 unsigned long selector;
1758 int rc;
1759
Avi Kivity9dac77f2011-06-01 15:34:25 +03001760 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001761 if (rc != X86EMUL_CONTINUE)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001762 return rc;
1763
Paolo Bonzinia5457e72014-06-05 17:29:34 +02001764 if (ctxt->modrm_reg == VCPU_SREG_SS)
1765 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1766
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001767 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001768 return rc;
1769}
1770
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001771static int em_pusha(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001772{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001773 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001774 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001775 int reg = VCPU_REGS_RAX;
1776
1777 while (reg <= VCPU_REGS_RDI) {
1778 (reg == VCPU_REGS_RSP) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001779 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001780
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001781 rc = em_push(ctxt);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001782 if (rc != X86EMUL_CONTINUE)
1783 return rc;
1784
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001785 ++reg;
1786 }
Wei Yongjunc37eda12010-06-15 09:03:33 +08001787
Wei Yongjunc37eda12010-06-15 09:03:33 +08001788 return rc;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001789}
1790
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001791static int em_pushf(struct x86_emulate_ctxt *ctxt)
1792{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001793 ctxt->src.val = (unsigned long)ctxt->eflags;
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001794 return em_push(ctxt);
1795}
1796
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001797static int em_popa(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001798{
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001799 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001800 int reg = VCPU_REGS_RDI;
1801
1802 while (reg >= VCPU_REGS_RAX) {
1803 if (reg == VCPU_REGS_RSP) {
Avi Kivity5ad105e2012-08-19 14:34:31 +03001804 rsp_increment(ctxt, ctxt->op_bytes);
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001805 --reg;
1806 }
1807
Avi Kivitydd856ef2012-08-27 23:46:17 +03001808 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001809 if (rc != X86EMUL_CONTINUE)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001810 break;
1811 --reg;
1812 }
1813 return rc;
1814}
1815
Avi Kivitydd856ef2012-08-27 23:46:17 +03001816static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001817{
Mathias Krause0225fb52012-08-30 01:30:16 +02001818 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001819 int rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001820 struct desc_ptr dt;
1821 gva_t cs_addr;
1822 gva_t eip_addr;
1823 u16 cs, eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001824
1825 /* TODO: Add limit checks */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001826 ctxt->src.val = ctxt->eflags;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001827 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001828 if (rc != X86EMUL_CONTINUE)
1829 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001830
1831 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1832
Avi Kivity9dac77f2011-06-01 15:34:25 +03001833 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001834 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001835 if (rc != X86EMUL_CONTINUE)
1836 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001837
Avi Kivity9dac77f2011-06-01 15:34:25 +03001838 ctxt->src.val = ctxt->_eip;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001839 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001840 if (rc != X86EMUL_CONTINUE)
1841 return rc;
1842
Avi Kivity4bff1e862011-04-20 13:37:53 +03001843 ops->get_idt(ctxt, &dt);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001844
1845 eip_addr = dt.address + (irq << 2);
1846 cs_addr = dt.address + (irq << 2) + 2;
1847
Avi Kivity0f65dd72011-04-20 13:37:53 +03001848 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001849 if (rc != X86EMUL_CONTINUE)
1850 return rc;
1851
Avi Kivity0f65dd72011-04-20 13:37:53 +03001852 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001853 if (rc != X86EMUL_CONTINUE)
1854 return rc;
1855
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001856 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001857 if (rc != X86EMUL_CONTINUE)
1858 return rc;
1859
Avi Kivity9dac77f2011-06-01 15:34:25 +03001860 ctxt->_eip = eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001861
1862 return rc;
1863}
1864
Avi Kivitydd856ef2012-08-27 23:46:17 +03001865int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1866{
1867 int rc;
1868
1869 invalidate_registers(ctxt);
1870 rc = __emulate_int_real(ctxt, irq);
1871 if (rc == X86EMUL_CONTINUE)
1872 writeback_registers(ctxt);
1873 return rc;
1874}
1875
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001876static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001877{
1878 switch(ctxt->mode) {
1879 case X86EMUL_MODE_REAL:
Avi Kivitydd856ef2012-08-27 23:46:17 +03001880 return __emulate_int_real(ctxt, irq);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001881 case X86EMUL_MODE_VM86:
1882 case X86EMUL_MODE_PROT16:
1883 case X86EMUL_MODE_PROT32:
1884 case X86EMUL_MODE_PROT64:
1885 default:
1886 /* Protected mode interrupts unimplemented yet */
1887 return X86EMUL_UNHANDLEABLE;
1888 }
1889}
1890
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001891static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001892{
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001893 int rc = X86EMUL_CONTINUE;
1894 unsigned long temp_eip = 0;
1895 unsigned long temp_eflags = 0;
1896 unsigned long cs = 0;
1897 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1898 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1899 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1900 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1901
1902 /* TODO: Add stack limit check */
1903
Avi Kivity9dac77f2011-06-01 15:34:25 +03001904 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001905
1906 if (rc != X86EMUL_CONTINUE)
1907 return rc;
1908
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001909 if (temp_eip & ~0xffff)
1910 return emulate_gp(ctxt, 0);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001911
Avi Kivity9dac77f2011-06-01 15:34:25 +03001912 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001913
1914 if (rc != X86EMUL_CONTINUE)
1915 return rc;
1916
Avi Kivity9dac77f2011-06-01 15:34:25 +03001917 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001918
1919 if (rc != X86EMUL_CONTINUE)
1920 return rc;
1921
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001922 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001923
1924 if (rc != X86EMUL_CONTINUE)
1925 return rc;
1926
Avi Kivity9dac77f2011-06-01 15:34:25 +03001927 ctxt->_eip = temp_eip;
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001928
1929
Avi Kivity9dac77f2011-06-01 15:34:25 +03001930 if (ctxt->op_bytes == 4)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001931 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
Avi Kivity9dac77f2011-06-01 15:34:25 +03001932 else if (ctxt->op_bytes == 2) {
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001933 ctxt->eflags &= ~0xffff;
1934 ctxt->eflags |= temp_eflags;
1935 }
1936
1937 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1938 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1939
1940 return rc;
1941}
1942
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09001943static int em_iret(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001944{
1945 switch(ctxt->mode) {
1946 case X86EMUL_MODE_REAL:
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001947 return emulate_iret_real(ctxt);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001948 case X86EMUL_MODE_VM86:
1949 case X86EMUL_MODE_PROT16:
1950 case X86EMUL_MODE_PROT32:
1951 case X86EMUL_MODE_PROT64:
1952 default:
1953 /* iret from protected mode unimplemented yet */
1954 return X86EMUL_UNHANDLEABLE;
1955 }
1956}
1957
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09001958static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1959{
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09001960 int rc;
1961 unsigned short sel;
1962
Avi Kivity9dac77f2011-06-01 15:34:25 +03001963 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09001964
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001965 rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09001966 if (rc != X86EMUL_CONTINUE)
1967 return rc;
1968
Avi Kivity9dac77f2011-06-01 15:34:25 +03001969 ctxt->_eip = 0;
1970 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09001971 return X86EMUL_CONTINUE;
1972}
1973
Takuya Yoshikawa51187682011-05-02 02:29:17 +09001974static int em_grp45(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001975{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001976 int rc = X86EMUL_CONTINUE;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001977
Avi Kivity9dac77f2011-06-01 15:34:25 +03001978 switch (ctxt->modrm_reg) {
Mohammed Gamald19292e2008-09-08 21:47:19 +03001979 case 2: /* call near abs */ {
1980 long int old_eip;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001981 old_eip = ctxt->_eip;
1982 ctxt->_eip = ctxt->src.val;
1983 ctxt->src.val = old_eip;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001984 rc = em_push(ctxt);
Mohammed Gamald19292e2008-09-08 21:47:19 +03001985 break;
1986 }
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001987 case 4: /* jmp abs */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001988 ctxt->_eip = ctxt->src.val;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001989 break;
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09001990 case 5: /* jmp far */
1991 rc = em_jmp_far(ctxt);
1992 break;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001993 case 6: /* push */
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001994 rc = em_push(ctxt);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001995 break;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001996 }
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001997 return rc;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001998}
1999
Takuya Yoshikawae0dac402011-12-06 18:07:27 +09002000static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002001{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002002 u64 old = ctxt->dst.orig_val64;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002003
Nadav Amitaaa05f22014-06-02 18:34:10 +03002004 if (ctxt->dst.bytes == 16)
2005 return X86EMUL_UNHANDLEABLE;
2006
Avi Kivitydd856ef2012-08-27 23:46:17 +03002007 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2008 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2009 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2010 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
Laurent Vivier05f086f2007-09-24 11:10:55 +02002011 ctxt->eflags &= ~EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002012 } else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03002013 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2014 (u32) reg_read(ctxt, VCPU_REGS_RBX);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002015
Laurent Vivier05f086f2007-09-24 11:10:55 +02002016 ctxt->eflags |= EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002017 }
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002018 return X86EMUL_CONTINUE;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002019}
2020
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002021static int em_ret(struct x86_emulate_ctxt *ctxt)
2022{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002023 ctxt->dst.type = OP_REG;
2024 ctxt->dst.addr.reg = &ctxt->_eip;
2025 ctxt->dst.bytes = ctxt->op_bytes;
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002026 return em_pop(ctxt);
2027}
2028
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002029static int em_ret_far(struct x86_emulate_ctxt *ctxt)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002030{
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002031 int rc;
2032 unsigned long cs;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002033 int cpl = ctxt->ops->cpl(ctxt);
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002034
Avi Kivity9dac77f2011-06-01 15:34:25 +03002035 rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002036 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002037 return rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002038 if (ctxt->op_bytes == 4)
2039 ctxt->_eip = (u32)ctxt->_eip;
2040 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002041 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002042 return rc;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002043 /* Outer-privilege level return is not implemented */
2044 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2045 return X86EMUL_UNHANDLEABLE;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002046 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002047 return rc;
2048}
2049
Bruce Rogers32611072013-09-09 09:40:20 -06002050static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2051{
2052 int rc;
2053
2054 rc = em_ret_far(ctxt);
2055 if (rc != X86EMUL_CONTINUE)
2056 return rc;
2057 rsp_increment(ctxt, ctxt->src.val);
2058 return X86EMUL_CONTINUE;
2059}
2060
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002061static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2062{
2063 /* Save real source value, then compare EAX against destination. */
Nadav Amit37c564f2014-06-02 18:34:07 +03002064 ctxt->dst.orig_val = ctxt->dst.val;
2065 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002066 ctxt->src.orig_val = ctxt->src.val;
Nadav Amit37c564f2014-06-02 18:34:07 +03002067 ctxt->src.val = ctxt->dst.orig_val;
Avi Kivity158de572013-01-19 19:51:57 +02002068 fastop(ctxt, em_cmp);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002069
2070 if (ctxt->eflags & EFLG_ZF) {
2071 /* Success: write back to memory. */
2072 ctxt->dst.val = ctxt->src.orig_val;
2073 } else {
2074 /* Failure: write the value we saw to EAX. */
2075 ctxt->dst.type = OP_REG;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002076 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
Nadav Amit37c564f2014-06-02 18:34:07 +03002077 ctxt->dst.val = ctxt->dst.orig_val;
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002078 }
2079 return X86EMUL_CONTINUE;
2080}
2081
Avi Kivityd4b43252011-09-13 10:45:50 +03002082static int em_lseg(struct x86_emulate_ctxt *ctxt)
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002083{
Avi Kivityd4b43252011-09-13 10:45:50 +03002084 int seg = ctxt->src2.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002085 unsigned short sel;
2086 int rc;
2087
Avi Kivity9dac77f2011-06-01 15:34:25 +03002088 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002089
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002090 rc = load_segment_descriptor(ctxt, sel, seg);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002091 if (rc != X86EMUL_CONTINUE)
2092 return rc;
2093
Avi Kivity9dac77f2011-06-01 15:34:25 +03002094 ctxt->dst.val = ctxt->src.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002095 return rc;
2096}
2097
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002098static void
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002099setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002100 struct desc_struct *cs, struct desc_struct *ss)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002101{
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002102 cs->l = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002103 set_desc_base(cs, 0); /* flat segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002104 cs->g = 1; /* 4kb granularity */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002105 set_desc_limit(cs, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002106 cs->type = 0x0b; /* Read, Execute, Accessed */
2107 cs->s = 1;
2108 cs->dpl = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002109 cs->p = 1;
2110 cs->d = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002111 cs->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002112
Gleb Natapov79168fd2010-04-28 19:15:30 +03002113 set_desc_base(ss, 0); /* flat segment */
2114 set_desc_limit(ss, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002115 ss->g = 1; /* 4kb granularity */
2116 ss->s = 1;
2117 ss->type = 0x03; /* Read/Write, Accessed */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002118 ss->d = 1; /* 32bit stack segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002119 ss->dpl = 0;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002120 ss->p = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002121 ss->l = 0;
2122 ss->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002123}
2124
Avi Kivity1a18a692012-02-01 12:23:21 +02002125static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2126{
2127 u32 eax, ebx, ecx, edx;
2128
2129 eax = ecx = 0;
Avi Kivity0017f932012-06-07 14:10:16 +03002130 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2131 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
Avi Kivity1a18a692012-02-01 12:23:21 +02002132 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2133 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2134}
2135
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002136static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2137{
Mathias Krause0225fb52012-08-30 01:30:16 +02002138 const struct x86_emulate_ops *ops = ctxt->ops;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002139 u32 eax, ebx, ecx, edx;
2140
2141 /*
2142 * syscall should always be enabled in longmode - so only become
2143 * vendor specific (cpuid) if other modes are active...
2144 */
2145 if (ctxt->mode == X86EMUL_MODE_PROT64)
2146 return true;
2147
2148 eax = 0x00000000;
2149 ecx = 0x00000000;
Avi Kivity0017f932012-06-07 14:10:16 +03002150 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2151 /*
2152 * Intel ("GenuineIntel")
2153 * remark: Intel CPUs only support "syscall" in 64bit
2154 * longmode. Also an 64bit guest with a
2155 * 32bit compat-app running will #UD !! While this
2156 * behaviour can be fixed (by emulating) into AMD
2157 * response - CPUs of AMD can't behave like Intel.
2158 */
2159 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2160 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2161 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2162 return false;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002163
Avi Kivity0017f932012-06-07 14:10:16 +03002164 /* AMD ("AuthenticAMD") */
2165 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2166 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2167 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2168 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002169
Avi Kivity0017f932012-06-07 14:10:16 +03002170 /* AMD ("AMDisbetter!") */
2171 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2172 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2173 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2174 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002175
2176 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2177 return false;
2178}
2179
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002180static int em_syscall(struct x86_emulate_ctxt *ctxt)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002181{
Mathias Krause0225fb52012-08-30 01:30:16 +02002182 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002183 struct desc_struct cs, ss;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002184 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002185 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002186 u64 efer = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002187
2188 /* syscall is not available in real mode */
Gleb Natapov2e901c42010-03-18 15:20:12 +02002189 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002190 ctxt->mode == X86EMUL_MODE_VM86)
2191 return emulate_ud(ctxt);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002192
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002193 if (!(em_syscall_is_enabled(ctxt)))
2194 return emulate_ud(ctxt);
2195
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002196 ops->get_msr(ctxt, MSR_EFER, &efer);
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002197 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002198
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002199 if (!(efer & EFER_SCE))
2200 return emulate_ud(ctxt);
2201
Avi Kivity717746e2011-04-20 13:37:53 +03002202 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002203 msr_data >>= 32;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002204 cs_sel = (u16)(msr_data & 0xfffc);
2205 ss_sel = (u16)(msr_data + 8);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002206
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002207 if (efer & EFER_LMA) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002208 cs.d = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002209 cs.l = 1;
2210 }
Avi Kivity1aa36612011-04-27 13:20:30 +03002211 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2212 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002213
Avi Kivitydd856ef2012-08-27 23:46:17 +03002214 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002215 if (efer & EFER_LMA) {
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002216#ifdef CONFIG_X86_64
Avi Kivitydd856ef2012-08-27 23:46:17 +03002217 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags & ~EFLG_RF;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002218
Avi Kivity717746e2011-04-20 13:37:53 +03002219 ops->get_msr(ctxt,
Gleb Natapov3fb1b5d2010-04-28 19:15:28 +03002220 ctxt->mode == X86EMUL_MODE_PROT64 ?
2221 MSR_LSTAR : MSR_CSTAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002222 ctxt->_eip = msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002223
Avi Kivity717746e2011-04-20 13:37:53 +03002224 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002225 ctxt->eflags &= ~(msr_data | EFLG_RF);
2226#endif
2227 } else {
2228 /* legacy mode */
Avi Kivity717746e2011-04-20 13:37:53 +03002229 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002230 ctxt->_eip = (u32)msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002231
2232 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2233 }
2234
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002235 return X86EMUL_CONTINUE;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002236}
2237
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002238static int em_sysenter(struct x86_emulate_ctxt *ctxt)
Andre Przywara8c604352009-06-18 12:56:01 +02002239{
Mathias Krause0225fb52012-08-30 01:30:16 +02002240 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002241 struct desc_struct cs, ss;
Andre Przywara8c604352009-06-18 12:56:01 +02002242 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002243 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002244 u64 efer = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002245
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002246 ops->get_msr(ctxt, MSR_EFER, &efer);
Gleb Natapova0044752010-02-10 14:21:31 +02002247 /* inject #GP if in real mode */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002248 if (ctxt->mode == X86EMUL_MODE_REAL)
2249 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002250
Avi Kivity1a18a692012-02-01 12:23:21 +02002251 /*
2252 * Not recognized on AMD in compat mode (but is recognized in legacy
2253 * mode).
2254 */
2255 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2256 && !vendor_intel(ctxt))
2257 return emulate_ud(ctxt);
2258
Andre Przywara8c604352009-06-18 12:56:01 +02002259 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2260 * Therefore, we inject an #UD.
2261 */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002262 if (ctxt->mode == X86EMUL_MODE_PROT64)
2263 return emulate_ud(ctxt);
Andre Przywara8c604352009-06-18 12:56:01 +02002264
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002265 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara8c604352009-06-18 12:56:01 +02002266
Avi Kivity717746e2011-04-20 13:37:53 +03002267 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara8c604352009-06-18 12:56:01 +02002268 switch (ctxt->mode) {
2269 case X86EMUL_MODE_PROT32:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002270 if ((msr_data & 0xfffc) == 0x0)
2271 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002272 break;
2273 case X86EMUL_MODE_PROT64:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002274 if (msr_data == 0x0)
2275 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002276 break;
Gleb Natapov9d1b39a2012-09-03 15:24:27 +03002277 default:
2278 break;
Andre Przywara8c604352009-06-18 12:56:01 +02002279 }
2280
2281 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002282 cs_sel = (u16)msr_data;
2283 cs_sel &= ~SELECTOR_RPL_MASK;
2284 ss_sel = cs_sel + 8;
2285 ss_sel &= ~SELECTOR_RPL_MASK;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002286 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002287 cs.d = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002288 cs.l = 1;
2289 }
2290
Avi Kivity1aa36612011-04-27 13:20:30 +03002291 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2292 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara8c604352009-06-18 12:56:01 +02002293
Avi Kivity717746e2011-04-20 13:37:53 +03002294 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002295 ctxt->_eip = msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002296
Avi Kivity717746e2011-04-20 13:37:53 +03002297 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
Avi Kivitydd856ef2012-08-27 23:46:17 +03002298 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002299
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002300 return X86EMUL_CONTINUE;
Andre Przywara8c604352009-06-18 12:56:01 +02002301}
2302
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002303static int em_sysexit(struct x86_emulate_ctxt *ctxt)
Andre Przywara4668f052009-06-18 12:56:02 +02002304{
Mathias Krause0225fb52012-08-30 01:30:16 +02002305 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002306 struct desc_struct cs, ss;
Andre Przywara4668f052009-06-18 12:56:02 +02002307 u64 msr_data;
2308 int usermode;
Xiao Guangrong1249b962011-05-15 23:25:10 +08002309 u16 cs_sel = 0, ss_sel = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002310
Gleb Natapova0044752010-02-10 14:21:31 +02002311 /* inject #GP if in real mode or Virtual 8086 mode */
2312 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002313 ctxt->mode == X86EMUL_MODE_VM86)
2314 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002315
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002316 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara4668f052009-06-18 12:56:02 +02002317
Avi Kivity9dac77f2011-06-01 15:34:25 +03002318 if ((ctxt->rex_prefix & 0x8) != 0x0)
Andre Przywara4668f052009-06-18 12:56:02 +02002319 usermode = X86EMUL_MODE_PROT64;
2320 else
2321 usermode = X86EMUL_MODE_PROT32;
2322
2323 cs.dpl = 3;
2324 ss.dpl = 3;
Avi Kivity717746e2011-04-20 13:37:53 +03002325 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara4668f052009-06-18 12:56:02 +02002326 switch (usermode) {
2327 case X86EMUL_MODE_PROT32:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002328 cs_sel = (u16)(msr_data + 16);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002329 if ((msr_data & 0xfffc) == 0x0)
2330 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002331 ss_sel = (u16)(msr_data + 24);
Andre Przywara4668f052009-06-18 12:56:02 +02002332 break;
2333 case X86EMUL_MODE_PROT64:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002334 cs_sel = (u16)(msr_data + 32);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002335 if (msr_data == 0x0)
2336 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002337 ss_sel = cs_sel + 8;
2338 cs.d = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002339 cs.l = 1;
2340 break;
2341 }
Gleb Natapov79168fd2010-04-28 19:15:30 +03002342 cs_sel |= SELECTOR_RPL_MASK;
2343 ss_sel |= SELECTOR_RPL_MASK;
Andre Przywara4668f052009-06-18 12:56:02 +02002344
Avi Kivity1aa36612011-04-27 13:20:30 +03002345 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2346 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara4668f052009-06-18 12:56:02 +02002347
Avi Kivitydd856ef2012-08-27 23:46:17 +03002348 ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
2349 *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
Andre Przywara4668f052009-06-18 12:56:02 +02002350
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002351 return X86EMUL_CONTINUE;
Andre Przywara4668f052009-06-18 12:56:02 +02002352}
2353
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002354static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002355{
2356 int iopl;
2357 if (ctxt->mode == X86EMUL_MODE_REAL)
2358 return false;
2359 if (ctxt->mode == X86EMUL_MODE_VM86)
2360 return true;
2361 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002362 return ctxt->ops->cpl(ctxt) > iopl;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002363}
2364
2365static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002366 u16 port, u16 len)
2367{
Mathias Krause0225fb52012-08-30 01:30:16 +02002368 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002369 struct desc_struct tr_seg;
Gleb Natapov5601d052011-03-07 14:55:06 +02002370 u32 base3;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002371 int r;
Avi Kivity1aa36612011-04-27 13:20:30 +03002372 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002373 unsigned mask = (1 << len) - 1;
Gleb Natapov5601d052011-03-07 14:55:06 +02002374 unsigned long base;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002375
Avi Kivity1aa36612011-04-27 13:20:30 +03002376 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002377 if (!tr_seg.p)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002378 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002379 if (desc_limit_scaled(&tr_seg) < 103)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002380 return false;
Gleb Natapov5601d052011-03-07 14:55:06 +02002381 base = get_desc_base(&tr_seg);
2382#ifdef CONFIG_X86_64
2383 base |= ((u64)base3) << 32;
2384#endif
Avi Kivity0f65dd72011-04-20 13:37:53 +03002385 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002386 if (r != X86EMUL_CONTINUE)
2387 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002388 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002389 return false;
Avi Kivity0f65dd72011-04-20 13:37:53 +03002390 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002391 if (r != X86EMUL_CONTINUE)
2392 return false;
2393 if ((perm >> bit_idx) & mask)
2394 return false;
2395 return true;
2396}
2397
2398static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002399 u16 port, u16 len)
2400{
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002401 if (ctxt->perm_ok)
2402 return true;
2403
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002404 if (emulator_bad_iopl(ctxt))
2405 if (!emulator_io_port_access_allowed(ctxt, port, len))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002406 return false;
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002407
2408 ctxt->perm_ok = true;
2409
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002410 return true;
2411}
2412
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002413static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002414 struct tss_segment_16 *tss)
2415{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002416 tss->ip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002417 tss->flag = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002418 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2419 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2420 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2421 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2422 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2423 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2424 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2425 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002426
Avi Kivity1aa36612011-04-27 13:20:30 +03002427 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2428 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2429 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2430 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2431 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002432}
2433
2434static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002435 struct tss_segment_16 *tss)
2436{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002437 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002438 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002439
Avi Kivity9dac77f2011-06-01 15:34:25 +03002440 ctxt->_eip = tss->ip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002441 ctxt->eflags = tss->flag | 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002442 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2443 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2444 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2445 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2446 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2447 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2448 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2449 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002450
2451 /*
2452 * SDM says that segment selectors are loaded before segment
2453 * descriptors
2454 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002455 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2456 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2457 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2458 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2459 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002460
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002461 cpl = tss->cs & 3;
2462
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002463 /*
Guo Chaofc058682012-06-28 15:19:51 +08002464 * Now load segment descriptors. If fault happens at this stage
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002465 * it is handled in a context of new task
2466 */
Paolo Bonzini5045b462014-05-15 18:09:29 +02002467 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002468 if (ret != X86EMUL_CONTINUE)
2469 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002470 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002471 if (ret != X86EMUL_CONTINUE)
2472 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002473 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002474 if (ret != X86EMUL_CONTINUE)
2475 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002476 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002477 if (ret != X86EMUL_CONTINUE)
2478 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002479 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002480 if (ret != X86EMUL_CONTINUE)
2481 return ret;
2482
2483 return X86EMUL_CONTINUE;
2484}
2485
2486static int task_switch_16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002487 u16 tss_selector, u16 old_tss_sel,
2488 ulong old_tss_base, struct desc_struct *new_desc)
2489{
Mathias Krause0225fb52012-08-30 01:30:16 +02002490 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002491 struct tss_segment_16 tss_seg;
2492 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002493 u32 new_tss_base = get_desc_base(new_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002494
Avi Kivity0f65dd72011-04-20 13:37:53 +03002495 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002496 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002497 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002498 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002499 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002500
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002501 save_state_to_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002502
Avi Kivity0f65dd72011-04-20 13:37:53 +03002503 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002504 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002505 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002506 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002507 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002508
Avi Kivity0f65dd72011-04-20 13:37:53 +03002509 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002510 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002511 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002512 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002513 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002514
2515 if (old_tss_sel != 0xffff) {
2516 tss_seg.prev_task_link = old_tss_sel;
2517
Avi Kivity0f65dd72011-04-20 13:37:53 +03002518 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002519 &tss_seg.prev_task_link,
2520 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002521 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002522 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002523 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002524 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002525 }
2526
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002527 return load_state_from_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002528}
2529
2530static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002531 struct tss_segment_32 *tss)
2532{
Nadav Amit5c7411e2014-04-07 18:37:47 +03002533 /* CR3 and ldt selector are not saved intentionally */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002534 tss->eip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002535 tss->eflags = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002536 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2537 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2538 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2539 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2540 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2541 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2542 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2543 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002544
Avi Kivity1aa36612011-04-27 13:20:30 +03002545 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2546 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2547 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2548 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2549 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2550 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002551}
2552
2553static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002554 struct tss_segment_32 *tss)
2555{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002556 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002557 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002558
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002559 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002560 return emulate_gp(ctxt, 0);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002561 ctxt->_eip = tss->eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002562 ctxt->eflags = tss->eflags | 2;
Kevin Wolf4cee4792012-02-08 14:34:41 +01002563
2564 /* General purpose registers */
Avi Kivitydd856ef2012-08-27 23:46:17 +03002565 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2566 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2567 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2568 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2569 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2570 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2571 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2572 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002573
2574 /*
2575 * SDM says that segment selectors are loaded before segment
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002576 * descriptors. This is important because CPL checks will
2577 * use CS.RPL.
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002578 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002579 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2580 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2581 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2582 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2583 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2584 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2585 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002586
2587 /*
Kevin Wolf4cee4792012-02-08 14:34:41 +01002588 * If we're switching between Protected Mode and VM86, we need to make
2589 * sure to update the mode before loading the segment descriptors so
2590 * that the selectors are interpreted correctly.
Kevin Wolf4cee4792012-02-08 14:34:41 +01002591 */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002592 if (ctxt->eflags & X86_EFLAGS_VM) {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002593 ctxt->mode = X86EMUL_MODE_VM86;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002594 cpl = 3;
2595 } else {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002596 ctxt->mode = X86EMUL_MODE_PROT32;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002597 cpl = tss->cs & 3;
2598 }
Kevin Wolf4cee4792012-02-08 14:34:41 +01002599
2600 /*
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002601 * Now load segment descriptors. If fault happenes at this stage
2602 * it is handled in a context of new task
2603 */
Paolo Bonzini5045b462014-05-15 18:09:29 +02002604 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002605 if (ret != X86EMUL_CONTINUE)
2606 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002607 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002608 if (ret != X86EMUL_CONTINUE)
2609 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002610 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002611 if (ret != X86EMUL_CONTINUE)
2612 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002613 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002614 if (ret != X86EMUL_CONTINUE)
2615 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002616 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002617 if (ret != X86EMUL_CONTINUE)
2618 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002619 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002620 if (ret != X86EMUL_CONTINUE)
2621 return ret;
Paolo Bonzini5045b462014-05-15 18:09:29 +02002622 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002623 if (ret != X86EMUL_CONTINUE)
2624 return ret;
2625
2626 return X86EMUL_CONTINUE;
2627}
2628
2629static int task_switch_32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002630 u16 tss_selector, u16 old_tss_sel,
2631 ulong old_tss_base, struct desc_struct *new_desc)
2632{
Mathias Krause0225fb52012-08-30 01:30:16 +02002633 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002634 struct tss_segment_32 tss_seg;
2635 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002636 u32 new_tss_base = get_desc_base(new_desc);
Nadav Amit5c7411e2014-04-07 18:37:47 +03002637 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2638 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002639
Avi Kivity0f65dd72011-04-20 13:37:53 +03002640 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002641 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002642 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002643 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002644 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002645
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002646 save_state_to_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002647
Nadav Amit5c7411e2014-04-07 18:37:47 +03002648 /* Only GP registers and segment selectors are saved */
2649 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2650 ldt_sel_offset - eip_offset, &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002651 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002652 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002653 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002654
Avi Kivity0f65dd72011-04-20 13:37:53 +03002655 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002656 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002657 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002658 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002659 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002660
2661 if (old_tss_sel != 0xffff) {
2662 tss_seg.prev_task_link = old_tss_sel;
2663
Avi Kivity0f65dd72011-04-20 13:37:53 +03002664 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002665 &tss_seg.prev_task_link,
2666 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002667 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002668 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002669 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002670 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002671 }
2672
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002673 return load_state_from_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002674}
2675
2676static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002677 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002678 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002679{
Mathias Krause0225fb52012-08-30 01:30:16 +02002680 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002681 struct desc_struct curr_tss_desc, next_tss_desc;
2682 int ret;
Avi Kivity1aa36612011-04-27 13:20:30 +03002683 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002684 ulong old_tss_base =
Avi Kivity4bff1e862011-04-20 13:37:53 +03002685 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
Gleb Natapovceffb452010-03-18 15:20:19 +02002686 u32 desc_limit;
Avi Kivitye9194642012-06-13 16:29:39 +03002687 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002688
2689 /* FIXME: old_tss_base == ~0 ? */
2690
Avi Kivitye9194642012-06-13 16:29:39 +03002691 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002692 if (ret != X86EMUL_CONTINUE)
2693 return ret;
Avi Kivitye9194642012-06-13 16:29:39 +03002694 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002695 if (ret != X86EMUL_CONTINUE)
2696 return ret;
2697
2698 /* FIXME: check that next_tss_desc is tss */
2699
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002700 /*
2701 * Check privileges. The three cases are task switch caused by...
2702 *
2703 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2704 * 2. Exception/IRQ/iret: No check is performed
Guo Chaofc058682012-06-28 15:19:51 +08002705 * 3. jmp/call to TSS: Check against DPL of the TSS
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002706 */
2707 if (reason == TASK_SWITCH_GATE) {
2708 if (idt_index != -1) {
2709 /* Software interrupts */
2710 struct desc_struct task_gate_desc;
2711 int dpl;
2712
2713 ret = read_interrupt_descriptor(ctxt, idt_index,
2714 &task_gate_desc);
2715 if (ret != X86EMUL_CONTINUE)
2716 return ret;
2717
2718 dpl = task_gate_desc.dpl;
2719 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2720 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2721 }
2722 } else if (reason != TASK_SWITCH_IRET) {
2723 int dpl = next_tss_desc.dpl;
2724 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2725 return emulate_gp(ctxt, tss_selector);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002726 }
2727
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002728
Gleb Natapovceffb452010-03-18 15:20:19 +02002729 desc_limit = desc_limit_scaled(&next_tss_desc);
2730 if (!next_tss_desc.p ||
2731 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2732 desc_limit < 0x2b)) {
Gleb Natapov54b84862010-04-28 19:15:44 +03002733 emulate_ts(ctxt, tss_selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002734 return X86EMUL_PROPAGATE_FAULT;
2735 }
2736
2737 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2738 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002739 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002740 }
2741
2742 if (reason == TASK_SWITCH_IRET)
2743 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2744
2745 /* set back link to prev task only if NT bit is set in eflags
Guo Chaofc058682012-06-28 15:19:51 +08002746 note that old_tss_sel is not used after this point */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002747 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2748 old_tss_sel = 0xffff;
2749
2750 if (next_tss_desc.type & 8)
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002751 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002752 old_tss_base, &next_tss_desc);
2753 else
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002754 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002755 old_tss_base, &next_tss_desc);
Jan Kiszka0760d442010-04-14 15:50:57 +02002756 if (ret != X86EMUL_CONTINUE)
2757 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002758
2759 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2760 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2761
2762 if (reason != TASK_SWITCH_IRET) {
2763 next_tss_desc.type |= (1 << 1); /* set busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002764 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002765 }
2766
Avi Kivity717746e2011-04-20 13:37:53 +03002767 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
Avi Kivity1aa36612011-04-27 13:20:30 +03002768 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002769
Jan Kiszkae269fb22010-04-14 15:51:09 +02002770 if (has_error_code) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002771 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2772 ctxt->lock_prefix = 0;
2773 ctxt->src.val = (unsigned long) error_code;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09002774 ret = em_push(ctxt);
Jan Kiszkae269fb22010-04-14 15:51:09 +02002775 }
2776
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002777 return ret;
2778}
2779
2780int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002781 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002782 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002783{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002784 int rc;
2785
Avi Kivitydd856ef2012-08-27 23:46:17 +03002786 invalidate_registers(ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002787 ctxt->_eip = ctxt->eip;
2788 ctxt->dst.type = OP_NONE;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002789
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002790 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002791 has_error_code, error_code);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002792
Avi Kivitydd856ef2012-08-27 23:46:17 +03002793 if (rc == X86EMUL_CONTINUE) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002794 ctxt->eip = ctxt->_eip;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002795 writeback_registers(ctxt);
2796 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002797
Gleb Natapova0c0ab22011-03-28 16:57:49 +02002798 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002799}
2800
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03002801static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2802 struct operand *op)
Gleb Natapova682e352010-03-18 15:20:21 +02002803{
Gleb Natapovb3356bf2012-09-03 15:24:29 +03002804 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
Gleb Natapova682e352010-03-18 15:20:21 +02002805
Avi Kivitydd856ef2012-08-27 23:46:17 +03002806 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2807 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
Gleb Natapova682e352010-03-18 15:20:21 +02002808}
2809
Avi Kivity7af04fc2010-08-18 14:16:35 +03002810static int em_das(struct x86_emulate_ctxt *ctxt)
2811{
Avi Kivity7af04fc2010-08-18 14:16:35 +03002812 u8 al, old_al;
2813 bool af, cf, old_cf;
2814
2815 cf = ctxt->eflags & X86_EFLAGS_CF;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002816 al = ctxt->dst.val;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002817
2818 old_al = al;
2819 old_cf = cf;
2820 cf = false;
2821 af = ctxt->eflags & X86_EFLAGS_AF;
2822 if ((al & 0x0f) > 9 || af) {
2823 al -= 6;
2824 cf = old_cf | (al >= 250);
2825 af = true;
2826 } else {
2827 af = false;
2828 }
2829 if (old_al > 0x99 || old_cf) {
2830 al -= 0x60;
2831 cf = true;
2832 }
2833
Avi Kivity9dac77f2011-06-01 15:34:25 +03002834 ctxt->dst.val = al;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002835 /* Set PF, ZF, SF */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002836 ctxt->src.type = OP_IMM;
2837 ctxt->src.val = 0;
2838 ctxt->src.bytes = 1;
Avi Kivity158de572013-01-19 19:51:57 +02002839 fastop(ctxt, em_or);
Avi Kivity7af04fc2010-08-18 14:16:35 +03002840 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2841 if (cf)
2842 ctxt->eflags |= X86_EFLAGS_CF;
2843 if (af)
2844 ctxt->eflags |= X86_EFLAGS_AF;
2845 return X86EMUL_CONTINUE;
2846}
2847
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02002848static int em_aam(struct x86_emulate_ctxt *ctxt)
2849{
2850 u8 al, ah;
2851
2852 if (ctxt->src.val == 0)
2853 return emulate_de(ctxt);
2854
2855 al = ctxt->dst.val & 0xff;
2856 ah = al / ctxt->src.val;
2857 al %= ctxt->src.val;
2858
2859 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2860
2861 /* Set PF, ZF, SF */
2862 ctxt->src.type = OP_IMM;
2863 ctxt->src.val = 0;
2864 ctxt->src.bytes = 1;
2865 fastop(ctxt, em_or);
2866
2867 return X86EMUL_CONTINUE;
2868}
2869
Gleb Natapov7f662272012-12-10 11:42:30 +02002870static int em_aad(struct x86_emulate_ctxt *ctxt)
2871{
2872 u8 al = ctxt->dst.val & 0xff;
2873 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2874
2875 al = (al + (ah * ctxt->src.val)) & 0xff;
2876
2877 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2878
Gleb Natapovf583c292013-02-13 17:50:39 +02002879 /* Set PF, ZF, SF */
2880 ctxt->src.type = OP_IMM;
2881 ctxt->src.val = 0;
2882 ctxt->src.bytes = 1;
2883 fastop(ctxt, em_or);
Gleb Natapov7f662272012-12-10 11:42:30 +02002884
2885 return X86EMUL_CONTINUE;
2886}
2887
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09002888static int em_call(struct x86_emulate_ctxt *ctxt)
2889{
2890 long rel = ctxt->src.val;
2891
2892 ctxt->src.val = (unsigned long)ctxt->_eip;
2893 jmp_rel(ctxt, rel);
2894 return em_push(ctxt);
2895}
2896
Avi Kivity0ef753b2010-08-18 14:51:45 +03002897static int em_call_far(struct x86_emulate_ctxt *ctxt)
2898{
Avi Kivity0ef753b2010-08-18 14:51:45 +03002899 u16 sel, old_cs;
2900 ulong old_eip;
2901 int rc;
2902
Avi Kivity1aa36612011-04-27 13:20:30 +03002903 old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002904 old_eip = ctxt->_eip;
Avi Kivity0ef753b2010-08-18 14:51:45 +03002905
Avi Kivity9dac77f2011-06-01 15:34:25 +03002906 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002907 if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
Avi Kivity0ef753b2010-08-18 14:51:45 +03002908 return X86EMUL_CONTINUE;
2909
Avi Kivity9dac77f2011-06-01 15:34:25 +03002910 ctxt->_eip = 0;
2911 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
Avi Kivity0ef753b2010-08-18 14:51:45 +03002912
Avi Kivity9dac77f2011-06-01 15:34:25 +03002913 ctxt->src.val = old_cs;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09002914 rc = em_push(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03002915 if (rc != X86EMUL_CONTINUE)
2916 return rc;
2917
Avi Kivity9dac77f2011-06-01 15:34:25 +03002918 ctxt->src.val = old_eip;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09002919 return em_push(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03002920}
2921
Avi Kivity40ece7c2010-08-18 15:12:09 +03002922static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2923{
Avi Kivity40ece7c2010-08-18 15:12:09 +03002924 int rc;
2925
Avi Kivity9dac77f2011-06-01 15:34:25 +03002926 ctxt->dst.type = OP_REG;
2927 ctxt->dst.addr.reg = &ctxt->_eip;
2928 ctxt->dst.bytes = ctxt->op_bytes;
2929 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Avi Kivity40ece7c2010-08-18 15:12:09 +03002930 if (rc != X86EMUL_CONTINUE)
2931 return rc;
Avi Kivity5ad105e2012-08-19 14:34:31 +03002932 rsp_increment(ctxt, ctxt->src.val);
Avi Kivity40ece7c2010-08-18 15:12:09 +03002933 return X86EMUL_CONTINUE;
2934}
2935
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09002936static int em_xchg(struct x86_emulate_ctxt *ctxt)
2937{
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09002938 /* Write back the register source. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002939 ctxt->src.val = ctxt->dst.val;
2940 write_register_operand(&ctxt->src);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09002941
2942 /* Write back the memory destination with implicit LOCK prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002943 ctxt->dst.val = ctxt->src.orig_val;
2944 ctxt->lock_prefix = 1;
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09002945 return X86EMUL_CONTINUE;
2946}
2947
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03002948static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2949{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002950 ctxt->dst.val = ctxt->src2.val;
Avi Kivity4d758342013-01-19 19:51:55 +02002951 return fastop(ctxt, em_imul);
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03002952}
2953
Avi Kivity61429142010-08-19 15:13:00 +03002954static int em_cwd(struct x86_emulate_ctxt *ctxt)
2955{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002956 ctxt->dst.type = OP_REG;
2957 ctxt->dst.bytes = ctxt->src.bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002958 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002959 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
Avi Kivity61429142010-08-19 15:13:00 +03002960
2961 return X86EMUL_CONTINUE;
2962}
2963
Avi Kivity48bb5d32010-08-18 18:54:34 +03002964static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2965{
Avi Kivity48bb5d32010-08-18 18:54:34 +03002966 u64 tsc = 0;
2967
Avi Kivity717746e2011-04-20 13:37:53 +03002968 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
Avi Kivitydd856ef2012-08-27 23:46:17 +03002969 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
2970 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
Avi Kivity48bb5d32010-08-18 18:54:34 +03002971 return X86EMUL_CONTINUE;
2972}
2973
Avi Kivity222d21a2011-11-10 14:57:30 +02002974static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
2975{
2976 u64 pmc;
2977
Avi Kivitydd856ef2012-08-27 23:46:17 +03002978 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
Avi Kivity222d21a2011-11-10 14:57:30 +02002979 return emulate_gp(ctxt, 0);
Avi Kivitydd856ef2012-08-27 23:46:17 +03002980 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
2981 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
Avi Kivity222d21a2011-11-10 14:57:30 +02002982 return X86EMUL_CONTINUE;
2983}
2984
Avi Kivityb9eac5f2010-08-03 14:46:56 +03002985static int em_mov(struct x86_emulate_ctxt *ctxt)
2986{
Paolo Bonzini54cfdb32014-03-27 11:36:25 +01002987 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
Avi Kivityb9eac5f2010-08-03 14:46:56 +03002988 return X86EMUL_CONTINUE;
2989}
2990
Borislav Petkov84cffe42013-10-29 12:54:56 +01002991#define FFL(x) bit(X86_FEATURE_##x)
2992
2993static int em_movbe(struct x86_emulate_ctxt *ctxt)
2994{
2995 u32 ebx, ecx, edx, eax = 1;
2996 u16 tmp;
2997
2998 /*
2999 * Check MOVBE is set in the guest-visible CPUID leaf.
3000 */
3001 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3002 if (!(ecx & FFL(MOVBE)))
3003 return emulate_ud(ctxt);
3004
3005 switch (ctxt->op_bytes) {
3006 case 2:
3007 /*
3008 * From MOVBE definition: "...When the operand size is 16 bits,
3009 * the upper word of the destination register remains unchanged
3010 * ..."
3011 *
3012 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3013 * rules so we have to do the operation almost per hand.
3014 */
3015 tmp = (u16)ctxt->src.val;
3016 ctxt->dst.val &= ~0xffffUL;
3017 ctxt->dst.val |= (unsigned long)swab16(tmp);
3018 break;
3019 case 4:
3020 ctxt->dst.val = swab32((u32)ctxt->src.val);
3021 break;
3022 case 8:
3023 ctxt->dst.val = swab64(ctxt->src.val);
3024 break;
3025 default:
3026 return X86EMUL_PROPAGATE_FAULT;
3027 }
3028 return X86EMUL_CONTINUE;
3029}
3030
Takuya Yoshikawabc00f8d2011-11-22 15:19:19 +09003031static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3032{
3033 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3034 return emulate_gp(ctxt, 0);
3035
3036 /* Disable writeback. */
3037 ctxt->dst.type = OP_NONE;
3038 return X86EMUL_CONTINUE;
3039}
3040
3041static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3042{
3043 unsigned long val;
3044
3045 if (ctxt->mode == X86EMUL_MODE_PROT64)
3046 val = ctxt->src.val & ~0ULL;
3047 else
3048 val = ctxt->src.val & ~0U;
3049
3050 /* #UD condition is already handled. */
3051 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3052 return emulate_gp(ctxt, 0);
3053
3054 /* Disable writeback. */
3055 ctxt->dst.type = OP_NONE;
3056 return X86EMUL_CONTINUE;
3057}
3058
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003059static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3060{
3061 u64 msr_data;
3062
Avi Kivitydd856ef2012-08-27 23:46:17 +03003063 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3064 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3065 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003066 return emulate_gp(ctxt, 0);
3067
3068 return X86EMUL_CONTINUE;
3069}
3070
3071static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3072{
3073 u64 msr_data;
3074
Avi Kivitydd856ef2012-08-27 23:46:17 +03003075 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003076 return emulate_gp(ctxt, 0);
3077
Avi Kivitydd856ef2012-08-27 23:46:17 +03003078 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3079 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003080 return X86EMUL_CONTINUE;
3081}
3082
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003083static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3084{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003085 if (ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003086 return emulate_ud(ctxt);
3087
Avi Kivity9dac77f2011-06-01 15:34:25 +03003088 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003089 return X86EMUL_CONTINUE;
3090}
3091
3092static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3093{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003094 u16 sel = ctxt->src.val;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003095
Avi Kivity9dac77f2011-06-01 15:34:25 +03003096 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003097 return emulate_ud(ctxt);
3098
Avi Kivity9dac77f2011-06-01 15:34:25 +03003099 if (ctxt->modrm_reg == VCPU_SREG_SS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003100 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3101
3102 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003103 ctxt->dst.type = OP_NONE;
3104 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003105}
3106
Avi Kivitya14e5792012-06-13 12:28:33 +03003107static int em_lldt(struct x86_emulate_ctxt *ctxt)
3108{
3109 u16 sel = ctxt->src.val;
3110
3111 /* Disable writeback. */
3112 ctxt->dst.type = OP_NONE;
3113 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3114}
3115
Avi Kivity80890002012-06-13 16:33:29 +03003116static int em_ltr(struct x86_emulate_ctxt *ctxt)
3117{
3118 u16 sel = ctxt->src.val;
3119
3120 /* Disable writeback. */
3121 ctxt->dst.type = OP_NONE;
3122 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3123}
3124
Avi Kivity38503912011-03-31 18:48:09 +02003125static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3126{
Avi Kivity9fa088f2011-03-31 18:54:30 +02003127 int rc;
3128 ulong linear;
3129
Avi Kivity9dac77f2011-06-01 15:34:25 +03003130 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02003131 if (rc == X86EMUL_CONTINUE)
Avi Kivity3cb16fe2011-04-20 15:38:44 +03003132 ctxt->ops->invlpg(ctxt, linear);
Avi Kivity38503912011-03-31 18:48:09 +02003133 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003134 ctxt->dst.type = OP_NONE;
Avi Kivity38503912011-03-31 18:48:09 +02003135 return X86EMUL_CONTINUE;
3136}
3137
Avi Kivity2d04a052011-04-20 15:32:49 +03003138static int em_clts(struct x86_emulate_ctxt *ctxt)
3139{
3140 ulong cr0;
3141
3142 cr0 = ctxt->ops->get_cr(ctxt, 0);
3143 cr0 &= ~X86_CR0_TS;
3144 ctxt->ops->set_cr(ctxt, 0, cr0);
3145 return X86EMUL_CONTINUE;
3146}
3147
Avi Kivity26d05cc2011-04-21 12:07:59 +03003148static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3149{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003150 int rc;
3151
Avi Kivity9dac77f2011-06-01 15:34:25 +03003152 if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003153 return X86EMUL_UNHANDLEABLE;
3154
3155 rc = ctxt->ops->fix_hypercall(ctxt);
3156 if (rc != X86EMUL_CONTINUE)
3157 return rc;
3158
3159 /* Let the processor re-execute the fixed hypercall */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003160 ctxt->_eip = ctxt->eip;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003161 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003162 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003163 return X86EMUL_CONTINUE;
3164}
3165
Avi Kivity96051572012-06-10 17:21:18 +03003166static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3167 void (*get)(struct x86_emulate_ctxt *ctxt,
3168 struct desc_ptr *ptr))
3169{
3170 struct desc_ptr desc_ptr;
3171
3172 if (ctxt->mode == X86EMUL_MODE_PROT64)
3173 ctxt->op_bytes = 8;
3174 get(ctxt, &desc_ptr);
3175 if (ctxt->op_bytes == 2) {
3176 ctxt->op_bytes = 4;
3177 desc_ptr.address &= 0x00ffffff;
3178 }
3179 /* Disable writeback. */
3180 ctxt->dst.type = OP_NONE;
3181 return segmented_write(ctxt, ctxt->dst.addr.mem,
3182 &desc_ptr, 2 + ctxt->op_bytes);
3183}
3184
3185static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3186{
3187 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3188}
3189
3190static int em_sidt(struct x86_emulate_ctxt *ctxt)
3191{
3192 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3193}
3194
Avi Kivity26d05cc2011-04-21 12:07:59 +03003195static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3196{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003197 struct desc_ptr desc_ptr;
3198 int rc;
3199
Avi Kivity510425f2012-06-07 17:04:36 +03003200 if (ctxt->mode == X86EMUL_MODE_PROT64)
3201 ctxt->op_bytes = 8;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003202 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
Avi Kivity26d05cc2011-04-21 12:07:59 +03003203 &desc_ptr.size, &desc_ptr.address,
Avi Kivity9dac77f2011-06-01 15:34:25 +03003204 ctxt->op_bytes);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003205 if (rc != X86EMUL_CONTINUE)
3206 return rc;
3207 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3208 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003209 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003210 return X86EMUL_CONTINUE;
3211}
3212
Avi Kivity5ef39c72011-04-21 12:21:50 +03003213static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003214{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003215 int rc;
3216
Avi Kivity5ef39c72011-04-21 12:21:50 +03003217 rc = ctxt->ops->fix_hypercall(ctxt);
3218
Avi Kivity26d05cc2011-04-21 12:07:59 +03003219 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003220 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003221 return rc;
3222}
3223
3224static int em_lidt(struct x86_emulate_ctxt *ctxt)
3225{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003226 struct desc_ptr desc_ptr;
3227 int rc;
3228
Avi Kivity510425f2012-06-07 17:04:36 +03003229 if (ctxt->mode == X86EMUL_MODE_PROT64)
3230 ctxt->op_bytes = 8;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003231 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
Takuya Yoshikawa509cf9f2011-05-02 02:25:07 +09003232 &desc_ptr.size, &desc_ptr.address,
Avi Kivity9dac77f2011-06-01 15:34:25 +03003233 ctxt->op_bytes);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003234 if (rc != X86EMUL_CONTINUE)
3235 return rc;
3236 ctxt->ops->set_idt(ctxt, &desc_ptr);
3237 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003238 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003239 return X86EMUL_CONTINUE;
3240}
3241
3242static int em_smsw(struct x86_emulate_ctxt *ctxt)
3243{
Nadav Amit32e94d02014-06-02 18:34:11 +03003244 if (ctxt->dst.type == OP_MEM)
3245 ctxt->dst.bytes = 2;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003246 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003247 return X86EMUL_CONTINUE;
3248}
3249
3250static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3251{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003252 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
Avi Kivity9dac77f2011-06-01 15:34:25 +03003253 | (ctxt->src.val & 0x0f));
3254 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003255 return X86EMUL_CONTINUE;
3256}
3257
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003258static int em_loop(struct x86_emulate_ctxt *ctxt)
3259{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003260 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3261 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
Avi Kivity9dac77f2011-06-01 15:34:25 +03003262 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3263 jmp_rel(ctxt, ctxt->src.val);
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003264
3265 return X86EMUL_CONTINUE;
3266}
3267
3268static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3269{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003270 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
Avi Kivity9dac77f2011-06-01 15:34:25 +03003271 jmp_rel(ctxt, ctxt->src.val);
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003272
3273 return X86EMUL_CONTINUE;
3274}
3275
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003276static int em_in(struct x86_emulate_ctxt *ctxt)
3277{
3278 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3279 &ctxt->dst.val))
3280 return X86EMUL_IO_NEEDED;
3281
3282 return X86EMUL_CONTINUE;
3283}
3284
3285static int em_out(struct x86_emulate_ctxt *ctxt)
3286{
3287 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3288 &ctxt->src.val, 1);
3289 /* Disable writeback. */
3290 ctxt->dst.type = OP_NONE;
3291 return X86EMUL_CONTINUE;
3292}
3293
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09003294static int em_cli(struct x86_emulate_ctxt *ctxt)
3295{
3296 if (emulator_bad_iopl(ctxt))
3297 return emulate_gp(ctxt, 0);
3298
3299 ctxt->eflags &= ~X86_EFLAGS_IF;
3300 return X86EMUL_CONTINUE;
3301}
3302
3303static int em_sti(struct x86_emulate_ctxt *ctxt)
3304{
3305 if (emulator_bad_iopl(ctxt))
3306 return emulate_gp(ctxt, 0);
3307
3308 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3309 ctxt->eflags |= X86_EFLAGS_IF;
3310 return X86EMUL_CONTINUE;
3311}
3312
Avi Kivity6d6eede2012-06-07 14:11:36 +03003313static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3314{
3315 u32 eax, ebx, ecx, edx;
3316
Avi Kivitydd856ef2012-08-27 23:46:17 +03003317 eax = reg_read(ctxt, VCPU_REGS_RAX);
3318 ecx = reg_read(ctxt, VCPU_REGS_RCX);
Avi Kivity6d6eede2012-06-07 14:11:36 +03003319 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003320 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3321 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3322 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3323 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
Avi Kivity6d6eede2012-06-07 14:11:36 +03003324 return X86EMUL_CONTINUE;
3325}
3326
Paolo Bonzini98f73632013-10-31 11:19:42 +01003327static int em_sahf(struct x86_emulate_ctxt *ctxt)
3328{
3329 u32 flags;
3330
3331 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3332 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3333
3334 ctxt->eflags &= ~0xffUL;
3335 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3336 return X86EMUL_CONTINUE;
3337}
3338
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003339static int em_lahf(struct x86_emulate_ctxt *ctxt)
3340{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003341 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3342 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003343 return X86EMUL_CONTINUE;
3344}
3345
Avi Kivity92998362012-06-13 12:25:06 +03003346static int em_bswap(struct x86_emulate_ctxt *ctxt)
3347{
3348 switch (ctxt->op_bytes) {
3349#ifdef CONFIG_X86_64
3350 case 8:
3351 asm("bswap %0" : "+r"(ctxt->dst.val));
3352 break;
3353#endif
3354 default:
3355 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3356 break;
3357 }
3358 return X86EMUL_CONTINUE;
3359}
3360
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003361static bool valid_cr(int nr)
3362{
3363 switch (nr) {
3364 case 0:
3365 case 2 ... 4:
3366 case 8:
3367 return true;
3368 default:
3369 return false;
3370 }
3371}
3372
3373static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3374{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003375 if (!valid_cr(ctxt->modrm_reg))
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003376 return emulate_ud(ctxt);
3377
3378 return X86EMUL_CONTINUE;
3379}
3380
3381static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3382{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003383 u64 new_val = ctxt->src.val64;
3384 int cr = ctxt->modrm_reg;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003385 u64 efer = 0;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003386
3387 static u64 cr_reserved_bits[] = {
3388 0xffffffff00000000ULL,
3389 0, 0, 0, /* CR3 checked later */
3390 CR4_RESERVED_BITS,
3391 0, 0, 0,
3392 CR8_RESERVED_BITS,
3393 };
3394
3395 if (!valid_cr(cr))
3396 return emulate_ud(ctxt);
3397
3398 if (new_val & cr_reserved_bits[cr])
3399 return emulate_gp(ctxt, 0);
3400
3401 switch (cr) {
3402 case 0: {
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003403 u64 cr4;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003404 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3405 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3406 return emulate_gp(ctxt, 0);
3407
Avi Kivity717746e2011-04-20 13:37:53 +03003408 cr4 = ctxt->ops->get_cr(ctxt, 4);
3409 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003410
3411 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3412 !(cr4 & X86_CR4_PAE))
3413 return emulate_gp(ctxt, 0);
3414
3415 break;
3416 }
3417 case 3: {
3418 u64 rsvd = 0;
3419
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003420 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3421 if (efer & EFER_LMA)
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003422 rsvd = CR3_L_MODE_RESERVED_BITS;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003423
3424 if (new_val & rsvd)
3425 return emulate_gp(ctxt, 0);
3426
3427 break;
3428 }
3429 case 4: {
Avi Kivity717746e2011-04-20 13:37:53 +03003430 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003431
3432 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3433 return emulate_gp(ctxt, 0);
3434
3435 break;
3436 }
3437 }
3438
3439 return X86EMUL_CONTINUE;
3440}
3441
Joerg Roedel3b88e412011-04-04 12:39:29 +02003442static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3443{
3444 unsigned long dr7;
3445
Avi Kivity717746e2011-04-20 13:37:53 +03003446 ctxt->ops->get_dr(ctxt, 7, &dr7);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003447
3448 /* Check if DR7.Global_Enable is set */
3449 return dr7 & (1 << 13);
3450}
3451
3452static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3453{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003454 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003455 u64 cr4;
3456
3457 if (dr > 7)
3458 return emulate_ud(ctxt);
3459
Avi Kivity717746e2011-04-20 13:37:53 +03003460 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003461 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3462 return emulate_ud(ctxt);
3463
3464 if (check_dr7_gd(ctxt))
3465 return emulate_db(ctxt);
3466
3467 return X86EMUL_CONTINUE;
3468}
3469
3470static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3471{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003472 u64 new_val = ctxt->src.val64;
3473 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003474
3475 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3476 return emulate_gp(ctxt, 0);
3477
3478 return check_dr_read(ctxt);
3479}
3480
Joerg Roedel01de8b02011-04-04 12:39:31 +02003481static int check_svme(struct x86_emulate_ctxt *ctxt)
3482{
3483 u64 efer;
3484
Avi Kivity717746e2011-04-20 13:37:53 +03003485 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003486
3487 if (!(efer & EFER_SVME))
3488 return emulate_ud(ctxt);
3489
3490 return X86EMUL_CONTINUE;
3491}
3492
3493static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3494{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003495 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003496
3497 /* Valid physical address? */
Randy Dunlapd4224442011-04-21 09:09:22 -07003498 if (rax & 0xffff000000000000ULL)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003499 return emulate_gp(ctxt, 0);
3500
3501 return check_svme(ctxt);
3502}
3503
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003504static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3505{
Avi Kivity717746e2011-04-20 13:37:53 +03003506 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003507
Avi Kivity717746e2011-04-20 13:37:53 +03003508 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003509 return emulate_ud(ctxt);
3510
3511 return X86EMUL_CONTINUE;
3512}
3513
Joerg Roedel80612522011-04-04 12:39:33 +02003514static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3515{
Avi Kivity717746e2011-04-20 13:37:53 +03003516 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003517 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
Joerg Roedel80612522011-04-04 12:39:33 +02003518
Avi Kivity717746e2011-04-20 13:37:53 +03003519 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
Nadav Amit67f4d422014-06-02 18:34:09 +03003520 ctxt->ops->check_pmc(ctxt, rcx))
Joerg Roedel80612522011-04-04 12:39:33 +02003521 return emulate_gp(ctxt, 0);
3522
3523 return X86EMUL_CONTINUE;
3524}
3525
Joerg Roedelf6511932011-04-04 12:39:35 +02003526static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3527{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003528 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3529 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003530 return emulate_gp(ctxt, 0);
3531
3532 return X86EMUL_CONTINUE;
3533}
3534
3535static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3536{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003537 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3538 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003539 return emulate_gp(ctxt, 0);
3540
3541 return X86EMUL_CONTINUE;
3542}
3543
Avi Kivity73fba5f2010-07-29 15:11:53 +03003544#define D(_y) { .flags = (_y) }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003545#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3546#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3547 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Gleb Natapov0b789ee2013-04-11 11:59:55 +03003548#define N D(NotImpl)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003549#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003550#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3551#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
Gleb Natapov045a2822012-12-20 16:57:43 +02003552#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003553#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
Avi Kivitye28bbd42013-01-04 16:18:48 +02003554#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
Avi Kivityc4f035c2011-04-04 12:39:22 +02003555#define II(_f, _e, _i) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003556 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
Joerg Roedeld09beab2011-04-04 12:39:25 +02003557#define IIP(_f, _e, _i, _p) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003558 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3559 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Avi Kivityaa97bb42010-01-20 18:09:23 +02003560#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003561
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003562#define D2bv(_f) D((_f) | ByteOp), D(_f)
Joerg Roedelf6511932011-04-04 12:39:35 +02003563#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003564#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
Avi Kivityf7857f32013-01-04 16:18:53 +02003565#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003566#define I2bvIP(_f, _e, _i, _p) \
3567 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003568
Avi Kivityfb864fb2013-01-04 16:18:54 +02003569#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3570 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3571 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
Avi Kivity6230f7f2010-08-26 18:34:55 +03003572
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003573static const struct opcode group7_rm1[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003574 DI(SrcNone | Priv, monitor),
3575 DI(SrcNone | Priv, mwait),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003576 N, N, N, N, N, N,
3577};
3578
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003579static const struct opcode group7_rm3[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003580 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
Borislav Petkovb51e9742013-09-22 16:44:52 +02003581 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003582 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3583 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3584 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3585 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3586 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3587 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003588};
Avi Kivity6230f7f2010-08-26 18:34:55 +03003589
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003590static const struct opcode group7_rm7[] = {
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003591 N,
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003592 DIP(SrcNone, rdtscp, check_rdtsc),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003593 N, N, N, N, N, N,
3594};
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09003595
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003596static const struct opcode group1[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003597 F(Lock, em_add),
3598 F(Lock | PageTable, em_or),
3599 F(Lock, em_adc),
3600 F(Lock, em_sbb),
3601 F(Lock | PageTable, em_and),
3602 F(Lock, em_sub),
3603 F(Lock, em_xor),
3604 F(NoWrite, em_cmp),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003605};
3606
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003607static const struct opcode group1A[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003608 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003609};
3610
Avi Kivity007a3b52013-01-19 19:51:51 +02003611static const struct opcode group2[] = {
3612 F(DstMem | ModRM, em_rol),
3613 F(DstMem | ModRM, em_ror),
3614 F(DstMem | ModRM, em_rcl),
3615 F(DstMem | ModRM, em_rcr),
3616 F(DstMem | ModRM, em_shl),
3617 F(DstMem | ModRM, em_shr),
3618 F(DstMem | ModRM, em_shl),
3619 F(DstMem | ModRM, em_sar),
3620};
3621
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003622static const struct opcode group3[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003623 F(DstMem | SrcImm | NoWrite, em_test),
3624 F(DstMem | SrcImm | NoWrite, em_test),
Avi Kivity45a14672013-01-04 16:18:52 +02003625 F(DstMem | SrcNone | Lock, em_not),
3626 F(DstMem | SrcNone | Lock, em_neg),
Avi Kivityb9fa4092013-02-09 11:31:48 +02003627 F(DstXacc | Src2Mem, em_mul_ex),
3628 F(DstXacc | Src2Mem, em_imul_ex),
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02003629 F(DstXacc | Src2Mem, em_div_ex),
3630 F(DstXacc | Src2Mem, em_idiv_ex),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003631};
3632
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003633static const struct opcode group4[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003634 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3635 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003636 N, N, N, N, N, N,
3637};
3638
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003639static const struct opcode group5[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003640 F(DstMem | SrcNone | Lock, em_inc),
3641 F(DstMem | SrcNone | Lock, em_dec),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003642 I(SrcMem | Stack, em_grp45),
3643 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3644 I(SrcMem | Stack, em_grp45),
3645 I(SrcMemFAddr | ImplicitOps, em_grp45),
Gleb Natapov188424b2013-04-11 12:32:14 +03003646 I(SrcMem | Stack, em_grp45), D(Undefined),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003647};
3648
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003649static const struct opcode group6[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003650 DI(Prot, sldt),
3651 DI(Prot, str),
Avi Kivitya14e5792012-06-13 12:28:33 +03003652 II(Prot | Priv | SrcMem16, em_lldt, lldt),
Avi Kivity80890002012-06-13 16:33:29 +03003653 II(Prot | Priv | SrcMem16, em_ltr, ltr),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02003654 N, N, N, N,
3655};
3656
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003657static const struct group_dual group7 = { {
Nadav Amit606b1c32014-06-02 18:34:06 +03003658 II(Mov | DstMem, em_sgdt, sgdt),
3659 II(Mov | DstMem, em_sidt, sidt),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003660 II(SrcMem | Priv, em_lgdt, lgdt),
3661 II(SrcMem | Priv, em_lidt, lidt),
3662 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3663 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3664 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003665}, {
Borislav Petkovb51e9742013-09-22 16:44:52 +02003666 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
Avi Kivity5ef39c72011-04-21 12:21:50 +03003667 EXT(0, group7_rm1),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003668 N, EXT(0, group7_rm3),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003669 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3670 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3671 EXT(0, group7_rm7),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003672} };
3673
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003674static const struct opcode group8[] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003675 N, N, N, N,
Avi Kivity11c363b2013-01-19 19:51:54 +02003676 F(DstMem | SrcImmByte | NoWrite, em_bt),
3677 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3678 F(DstMem | SrcImmByte | Lock, em_btr),
3679 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003680};
3681
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003682static const struct group_dual group9 = { {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003683 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003684}, {
3685 N, N, N, N, N, N, N, N,
3686} };
3687
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003688static const struct opcode group11[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003689 I(DstMem | SrcImm | Mov | PageTable, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003690 X7(D(Undefined)),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03003691};
3692
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003693static const struct gprefix pfx_0f_6f_0f_7f = {
Avi Kivitye5971752012-04-09 18:40:03 +03003694 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
Avi Kivityaa97bb42010-01-20 18:09:23 +02003695};
3696
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003697static const struct gprefix pfx_vmovntpx = {
Avi Kivity3e114eb2012-04-09 18:40:01 +03003698 I(0, em_mov), N, N, N,
3699};
3700
Igor Mammedov27ce8252014-03-15 21:01:59 +01003701static const struct gprefix pfx_0f_28_0f_29 = {
Igor Mammedov6fec27d2014-03-15 21:02:00 +01003702 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01003703};
3704
Gleb Natapov045a2822012-12-20 16:57:43 +02003705static const struct escape escape_d9 = { {
3706 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3707}, {
3708 /* 0xC0 - 0xC7 */
3709 N, N, N, N, N, N, N, N,
3710 /* 0xC8 - 0xCF */
3711 N, N, N, N, N, N, N, N,
3712 /* 0xD0 - 0xC7 */
3713 N, N, N, N, N, N, N, N,
3714 /* 0xD8 - 0xDF */
3715 N, N, N, N, N, N, N, N,
3716 /* 0xE0 - 0xE7 */
3717 N, N, N, N, N, N, N, N,
3718 /* 0xE8 - 0xEF */
3719 N, N, N, N, N, N, N, N,
3720 /* 0xF0 - 0xF7 */
3721 N, N, N, N, N, N, N, N,
3722 /* 0xF8 - 0xFF */
3723 N, N, N, N, N, N, N, N,
3724} };
3725
3726static const struct escape escape_db = { {
3727 N, N, N, N, N, N, N, N,
3728}, {
3729 /* 0xC0 - 0xC7 */
3730 N, N, N, N, N, N, N, N,
3731 /* 0xC8 - 0xCF */
3732 N, N, N, N, N, N, N, N,
3733 /* 0xD0 - 0xC7 */
3734 N, N, N, N, N, N, N, N,
3735 /* 0xD8 - 0xDF */
3736 N, N, N, N, N, N, N, N,
3737 /* 0xE0 - 0xE7 */
3738 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3739 /* 0xE8 - 0xEF */
3740 N, N, N, N, N, N, N, N,
3741 /* 0xF0 - 0xF7 */
3742 N, N, N, N, N, N, N, N,
3743 /* 0xF8 - 0xFF */
3744 N, N, N, N, N, N, N, N,
3745} };
3746
3747static const struct escape escape_dd = { {
3748 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3749}, {
3750 /* 0xC0 - 0xC7 */
3751 N, N, N, N, N, N, N, N,
3752 /* 0xC8 - 0xCF */
3753 N, N, N, N, N, N, N, N,
3754 /* 0xD0 - 0xC7 */
3755 N, N, N, N, N, N, N, N,
3756 /* 0xD8 - 0xDF */
3757 N, N, N, N, N, N, N, N,
3758 /* 0xE0 - 0xE7 */
3759 N, N, N, N, N, N, N, N,
3760 /* 0xE8 - 0xEF */
3761 N, N, N, N, N, N, N, N,
3762 /* 0xF0 - 0xF7 */
3763 N, N, N, N, N, N, N, N,
3764 /* 0xF8 - 0xFF */
3765 N, N, N, N, N, N, N, N,
3766} };
3767
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003768static const struct opcode opcode_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003769 /* 0x00 - 0x07 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003770 F6ALU(Lock, em_add),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003771 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3772 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003773 /* 0x08 - 0x0F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003774 F6ALU(Lock | PageTable, em_or),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003775 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3776 N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003777 /* 0x10 - 0x17 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003778 F6ALU(Lock, em_adc),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003779 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3780 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003781 /* 0x18 - 0x1F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003782 F6ALU(Lock, em_sbb),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003783 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3784 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003785 /* 0x20 - 0x27 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003786 F6ALU(Lock | PageTable, em_and), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003787 /* 0x28 - 0x2F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003788 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003789 /* 0x30 - 0x37 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003790 F6ALU(Lock, em_xor), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003791 /* 0x38 - 0x3F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003792 F6ALU(NoWrite, em_cmp), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003793 /* 0x40 - 0x4F */
Avi Kivity95413dc2013-01-19 19:51:53 +02003794 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003795 /* 0x50 - 0x57 */
Avi Kivity63540382010-07-29 15:11:55 +03003796 X8(I(SrcReg | Stack, em_push)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003797 /* 0x58 - 0x5F */
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09003798 X8(I(DstReg | Stack, em_pop)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003799 /* 0x60 - 0x67 */
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09003800 I(ImplicitOps | Stack | No64, em_pusha),
3801 I(ImplicitOps | Stack | No64, em_popa),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003802 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3803 N, N, N, N,
3804 /* 0x68 - 0x6F */
Avi Kivityd46164d2010-08-18 19:29:33 +03003805 I(SrcImm | Mov | Stack, em_push),
3806 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003807 I(SrcImmByte | Mov | Stack, em_push),
3808 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
Gleb Natapovb3356bf2012-09-03 15:24:29 +03003809 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
Takuya Yoshikawa2b5e97e2011-11-23 12:27:39 +09003810 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
Avi Kivity73fba5f2010-07-29 15:11:53 +03003811 /* 0x70 - 0x7F */
3812 X16(D(SrcImmByte)),
3813 /* 0x80 - 0x87 */
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003814 G(ByteOp | DstMem | SrcImm, group1),
3815 G(DstMem | SrcImm, group1),
3816 G(ByteOp | DstMem | SrcImm | No64, group1),
3817 G(DstMem | SrcImmByte, group1),
Avi Kivityfb864fb2013-01-04 16:18:54 +02003818 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003819 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003820 /* 0x88 - 0x8F */
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003821 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003822 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003823 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003824 D(ModRM | SrcMem | NoAccess | DstReg),
3825 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3826 G(0, group1A),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003827 /* 0x90 - 0x97 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02003828 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003829 /* 0x98 - 0x9F */
Avi Kivity61429142010-08-19 15:13:00 +03003830 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
Wei Yongjuncc4feed2010-08-25 14:10:53 +08003831 I(SrcImmFAddr | No64, em_call_far), N,
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09003832 II(ImplicitOps | Stack, em_pushf, pushf),
Paolo Bonzini98f73632013-10-31 11:19:42 +01003833 II(ImplicitOps | Stack, em_popf, popf),
3834 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003835 /* 0xA0 - 0xA7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003836 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003837 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003838 I2bv(SrcSI | DstDI | Mov | String, em_mov),
Avi Kivityfb864fb2013-01-04 16:18:54 +02003839 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003840 /* 0xA8 - 0xAF */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003841 F2bv(DstAcc | SrcImm | NoWrite, em_test),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003842 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3843 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
Avi Kivityfb864fb2013-01-04 16:18:54 +02003844 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003845 /* 0xB0 - 0xB7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003846 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003847 /* 0xB8 - 0xBF */
Nadav Amit5e2c6882012-12-06 21:55:10 -02003848 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003849 /* 0xC0 - 0xC7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02003850 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
Avi Kivity40ece7c2010-08-18 15:12:09 +03003851 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09003852 I(ImplicitOps | Stack, em_ret),
Avi Kivityd4b43252011-09-13 10:45:50 +03003853 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
3854 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03003855 G(ByteOp, group11), G(0, group11),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003856 /* 0xC8 - 0xCF */
Avi Kivity612e89f2012-06-12 20:03:23 +03003857 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
Bruce Rogers32611072013-09-09 09:40:20 -06003858 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
3859 I(ImplicitOps | Stack, em_ret_far),
Avi Kivity3c6e2762011-04-04 12:39:23 +02003860 D(ImplicitOps), DI(SrcImmByte, intn),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09003861 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003862 /* 0xD0 - 0xD7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02003863 G(Src2One | ByteOp, group2), G(Src2One, group2),
3864 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02003865 I(DstAcc | SrcImmUByte | No64, em_aam),
Paolo Bonzini326f5782013-05-09 11:32:51 +02003866 I(DstAcc | SrcImmUByte | No64, em_aad),
3867 F(DstAcc | ByteOp | No64, em_salc),
Paolo Bonzini7fa57952013-05-09 11:32:50 +02003868 I(DstAcc | SrcXLat | ByteOp, em_mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003869 /* 0xD8 - 0xDF */
Gleb Natapov045a2822012-12-20 16:57:43 +02003870 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003871 /* 0xE0 - 0xE7 */
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003872 X3(I(SrcImmByte, em_loop)),
3873 I(SrcImmByte, em_jcxz),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003874 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
3875 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003876 /* 0xE8 - 0xEF */
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003877 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09003878 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003879 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
3880 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003881 /* 0xF0 - 0xF7 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02003882 N, DI(ImplicitOps, icebp), N, N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02003883 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3884 G(ByteOp, group3), G(0, group3),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003885 /* 0xF8 - 0xFF */
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09003886 D(ImplicitOps), D(ImplicitOps),
3887 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003888 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3889};
3890
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003891static const struct opcode twobyte_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003892 /* 0x00 - 0x0F */
Joerg Roedeldee6bb72011-04-04 12:39:30 +02003893 G(0, group6), GD(0, &group7), N, N,
Borislav Petkovb51e9742013-09-22 16:44:52 +02003894 N, I(ImplicitOps | EmulateOnUD, em_syscall),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09003895 II(ImplicitOps | Priv, em_clts, clts), N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02003896 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003897 N, D(ImplicitOps | ModRM), N, N,
3898 /* 0x10 - 0x1F */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02003899 N, N, N, N, N, N, N, N,
3900 D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003901 /* 0x20 - 0x2F */
Nadav Amit9b88ae92014-05-25 23:05:21 +03003902 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
3903 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
3904 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
3905 check_cr_write),
3906 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
3907 check_dr_write),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003908 N, N, N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01003909 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
3910 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
3911 N, GP(ModRM | DstMem | SrcReg | Sse | Mov | Aligned, &pfx_vmovntpx),
Avi Kivity3e114eb2012-04-09 18:40:01 +03003912 N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003913 /* 0x30 - 0x3F */
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003914 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
Joerg Roedel80612522011-04-04 12:39:33 +02003915 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003916 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
Avi Kivity222d21a2011-11-10 14:57:30 +02003917 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
Borislav Petkovb51e9742013-09-22 16:44:52 +02003918 I(ImplicitOps | EmulateOnUD, em_sysenter),
3919 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
Avi Kivityd8671622011-02-01 16:32:03 +02003920 N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003921 N, N, N, N, N, N, N, N,
3922 /* 0x40 - 0x4F */
Nadav Amit140bad82014-06-15 16:13:00 +03003923 X16(D(DstReg | SrcMem | ModRM)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003924 /* 0x50 - 0x5F */
3925 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3926 /* 0x60 - 0x6F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02003927 N, N, N, N,
3928 N, N, N, N,
3929 N, N, N, N,
3930 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003931 /* 0x70 - 0x7F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02003932 N, N, N, N,
3933 N, N, N, N,
3934 N, N, N, N,
3935 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003936 /* 0x80 - 0x8F */
3937 X16(D(SrcImm)),
3938 /* 0x90 - 0x9F */
Wei Yongjunee45b582010-08-06 17:10:07 +08003939 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003940 /* 0xA0 - 0xA7 */
Avi Kivity1cd196e2011-09-13 10:45:51 +03003941 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
Avi Kivity11c363b2013-01-19 19:51:54 +02003942 II(ImplicitOps, em_cpuid, cpuid),
3943 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
Avi Kivity0bdea062013-01-19 19:51:50 +02003944 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
3945 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003946 /* 0xA8 - 0xAF */
Avi Kivity1cd196e2011-09-13 10:45:51 +03003947 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003948 DI(ImplicitOps, rsm),
Avi Kivity11c363b2013-01-19 19:51:54 +02003949 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
Avi Kivity0bdea062013-01-19 19:51:50 +02003950 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
3951 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
Avi Kivity4d758342013-01-19 19:51:55 +02003952 D(ModRM), F(DstReg | SrcMem | ModRM, em_imul),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003953 /* 0xB0 - 0xB7 */
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09003954 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
Avi Kivityd4b43252011-09-13 10:45:50 +03003955 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
Avi Kivity11c363b2013-01-19 19:51:54 +02003956 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
Avi Kivityd4b43252011-09-13 10:45:50 +03003957 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
3958 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02003959 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003960 /* 0xB8 - 0xBF */
3961 N, N,
Takuya Yoshikawace7faab2011-11-22 15:17:48 +09003962 G(BitOp, group8),
Avi Kivity11c363b2013-01-19 19:51:54 +02003963 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
3964 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02003965 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity92998362012-06-13 12:25:06 +03003966 /* 0xC0 - 0xC7 */
Avi Kivitye47a5f52013-02-09 11:31:51 +02003967 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
Wei Yongjun92f738a2010-08-17 09:19:34 +08003968 N, D(DstMem | SrcReg | ModRM | Mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003969 N, N, N, GD(0, &group9),
Avi Kivity92998362012-06-13 12:25:06 +03003970 /* 0xC8 - 0xCF */
3971 X8(I(DstReg, em_bswap)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003972 /* 0xD0 - 0xDF */
3973 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3974 /* 0xE0 - 0xEF */
3975 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3976 /* 0xF0 - 0xFF */
3977 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3978};
3979
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01003980static const struct gprefix three_byte_0f_38_f0 = {
Borislav Petkov84cffe42013-10-29 12:54:56 +01003981 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01003982};
3983
3984static const struct gprefix three_byte_0f_38_f1 = {
Borislav Petkov84cffe42013-10-29 12:54:56 +01003985 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01003986};
3987
3988/*
3989 * Insns below are selected by the prefix which indexed by the third opcode
3990 * byte.
3991 */
3992static const struct opcode opcode_map_0f_38[256] = {
3993 /* 0x00 - 0x7f */
3994 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
Borislav Petkov84cffe42013-10-29 12:54:56 +01003995 /* 0x80 - 0xef */
3996 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
3997 /* 0xf0 - 0xf1 */
3998 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
3999 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4000 /* 0xf2 - 0xff */
4001 N, N, X4(N), X8(N)
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004002};
4003
Avi Kivity73fba5f2010-07-29 15:11:53 +03004004#undef D
4005#undef N
4006#undef G
4007#undef GD
4008#undef I
Avi Kivityaa97bb42010-01-20 18:09:23 +02004009#undef GP
Joerg Roedel01de8b02011-04-04 12:39:31 +02004010#undef EXT
Avi Kivity73fba5f2010-07-29 15:11:53 +03004011
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004012#undef D2bv
Joerg Roedelf6511932011-04-04 12:39:35 +02004013#undef D2bvIP
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004014#undef I2bv
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004015#undef I2bvIP
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09004016#undef I6ALU
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004017
Avi Kivity9dac77f2011-06-01 15:34:25 +03004018static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
Avi Kivity39f21ee2010-08-18 19:20:21 +03004019{
4020 unsigned size;
4021
Avi Kivity9dac77f2011-06-01 15:34:25 +03004022 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004023 if (size == 8)
4024 size = 4;
4025 return size;
4026}
4027
4028static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4029 unsigned size, bool sign_extension)
4030{
Avi Kivity39f21ee2010-08-18 19:20:21 +03004031 int rc = X86EMUL_CONTINUE;
4032
4033 op->type = OP_IMM;
4034 op->bytes = size;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004035 op->addr.mem.ea = ctxt->_eip;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004036 /* NB. Immediates are sign-extended as necessary. */
4037 switch (op->bytes) {
4038 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004039 op->val = insn_fetch(s8, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004040 break;
4041 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004042 op->val = insn_fetch(s16, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004043 break;
4044 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004045 op->val = insn_fetch(s32, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004046 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004047 case 8:
4048 op->val = insn_fetch(s64, ctxt);
4049 break;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004050 }
4051 if (!sign_extension) {
4052 switch (op->bytes) {
4053 case 1:
4054 op->val &= 0xff;
4055 break;
4056 case 2:
4057 op->val &= 0xffff;
4058 break;
4059 case 4:
4060 op->val &= 0xffffffff;
4061 break;
4062 }
4063 }
4064done:
4065 return rc;
4066}
4067
Avi Kivitya9945542011-09-13 10:45:41 +03004068static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4069 unsigned d)
4070{
4071 int rc = X86EMUL_CONTINUE;
4072
4073 switch (d) {
4074 case OpReg:
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004075 decode_register_operand(ctxt, op);
Avi Kivitya9945542011-09-13 10:45:41 +03004076 break;
4077 case OpImmUByte:
Avi Kivity608aabe2011-09-13 10:45:45 +03004078 rc = decode_imm(ctxt, op, 1, false);
Avi Kivitya9945542011-09-13 10:45:41 +03004079 break;
4080 case OpMem:
Avi Kivity41ddf972011-09-13 10:45:48 +03004081 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity0fe59122011-09-13 10:45:47 +03004082 mem_common:
Avi Kivitya9945542011-09-13 10:45:41 +03004083 *op = ctxt->memop;
4084 ctxt->memopp = op;
Paolo Bonzini96888972014-04-01 14:54:19 +02004085 if (ctxt->d & BitOp)
Avi Kivitya9945542011-09-13 10:45:41 +03004086 fetch_bit_operand(ctxt);
4087 op->orig_val = op->val;
4088 break;
Avi Kivity41ddf972011-09-13 10:45:48 +03004089 case OpMem64:
Nadav Amitaaa05f22014-06-02 18:34:10 +03004090 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
Avi Kivity41ddf972011-09-13 10:45:48 +03004091 goto mem_common;
Avi Kivitya9945542011-09-13 10:45:41 +03004092 case OpAcc:
4093 op->type = OP_REG;
4094 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004095 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
Avi Kivitya9945542011-09-13 10:45:41 +03004096 fetch_register_operand(op);
4097 op->orig_val = op->val;
4098 break;
Avi Kivity820207c2013-02-09 11:31:45 +02004099 case OpAccLo:
4100 op->type = OP_REG;
4101 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4102 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4103 fetch_register_operand(op);
4104 op->orig_val = op->val;
4105 break;
4106 case OpAccHi:
4107 if (ctxt->d & ByteOp) {
4108 op->type = OP_NONE;
4109 break;
4110 }
4111 op->type = OP_REG;
4112 op->bytes = ctxt->op_bytes;
4113 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4114 fetch_register_operand(op);
4115 op->orig_val = op->val;
4116 break;
Avi Kivitya9945542011-09-13 10:45:41 +03004117 case OpDI:
4118 op->type = OP_MEM;
4119 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4120 op->addr.mem.ea =
Avi Kivitydd856ef2012-08-27 23:46:17 +03004121 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
Avi Kivitya9945542011-09-13 10:45:41 +03004122 op->addr.mem.seg = VCPU_SREG_ES;
4123 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004124 op->count = 1;
Avi Kivitya9945542011-09-13 10:45:41 +03004125 break;
4126 case OpDX:
4127 op->type = OP_REG;
4128 op->bytes = 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004129 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivitya9945542011-09-13 10:45:41 +03004130 fetch_register_operand(op);
4131 break;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004132 case OpCL:
4133 op->bytes = 1;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004134 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004135 break;
4136 case OpImmByte:
4137 rc = decode_imm(ctxt, op, 1, true);
4138 break;
4139 case OpOne:
4140 op->bytes = 1;
4141 op->val = 1;
4142 break;
4143 case OpImm:
4144 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4145 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004146 case OpImm64:
4147 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4148 break;
Avi Kivity28867ce2012-01-16 15:08:44 +02004149 case OpMem8:
4150 ctxt->memop.bytes = 1;
Gleb Natapov660696d2013-04-24 13:38:36 +03004151 if (ctxt->memop.type == OP_REG) {
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02004152 ctxt->memop.addr.reg = decode_register(ctxt,
4153 ctxt->modrm_rm, true);
Gleb Natapov660696d2013-04-24 13:38:36 +03004154 fetch_register_operand(&ctxt->memop);
4155 }
Avi Kivity28867ce2012-01-16 15:08:44 +02004156 goto mem_common;
Avi Kivity0fe59122011-09-13 10:45:47 +03004157 case OpMem16:
4158 ctxt->memop.bytes = 2;
4159 goto mem_common;
4160 case OpMem32:
4161 ctxt->memop.bytes = 4;
4162 goto mem_common;
4163 case OpImmU16:
4164 rc = decode_imm(ctxt, op, 2, false);
4165 break;
4166 case OpImmU:
4167 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4168 break;
4169 case OpSI:
4170 op->type = OP_MEM;
4171 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4172 op->addr.mem.ea =
Avi Kivitydd856ef2012-08-27 23:46:17 +03004173 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
Avi Kivity0fe59122011-09-13 10:45:47 +03004174 op->addr.mem.seg = seg_override(ctxt);
4175 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004176 op->count = 1;
Avi Kivity0fe59122011-09-13 10:45:47 +03004177 break;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004178 case OpXLat:
4179 op->type = OP_MEM;
4180 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4181 op->addr.mem.ea =
4182 register_address(ctxt,
4183 reg_read(ctxt, VCPU_REGS_RBX) +
4184 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4185 op->addr.mem.seg = seg_override(ctxt);
4186 op->val = 0;
4187 break;
Avi Kivity0fe59122011-09-13 10:45:47 +03004188 case OpImmFAddr:
4189 op->type = OP_IMM;
4190 op->addr.mem.ea = ctxt->_eip;
4191 op->bytes = ctxt->op_bytes + 2;
4192 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4193 break;
4194 case OpMemFAddr:
4195 ctxt->memop.bytes = ctxt->op_bytes + 2;
4196 goto mem_common;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004197 case OpES:
4198 op->val = VCPU_SREG_ES;
4199 break;
4200 case OpCS:
4201 op->val = VCPU_SREG_CS;
4202 break;
4203 case OpSS:
4204 op->val = VCPU_SREG_SS;
4205 break;
4206 case OpDS:
4207 op->val = VCPU_SREG_DS;
4208 break;
4209 case OpFS:
4210 op->val = VCPU_SREG_FS;
4211 break;
4212 case OpGS:
4213 op->val = VCPU_SREG_GS;
4214 break;
Avi Kivitya9945542011-09-13 10:45:41 +03004215 case OpImplicit:
4216 /* Special instructions do their own operand decoding. */
4217 default:
4218 op->type = OP_NONE; /* Disable writeback. */
4219 break;
4220 }
4221
4222done:
4223 return rc;
4224}
4225
Takuya Yoshikawaef5d75c2011-05-15 00:57:43 +09004226int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004227{
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004228 int rc = X86EMUL_CONTINUE;
4229 int mode = ctxt->mode;
Avi Kivity46561642011-04-24 14:09:59 +03004230 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004231 bool op_prefix = false;
Avi Kivity46561642011-04-24 14:09:59 +03004232 struct opcode opcode;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004233
Avi Kivityf09ed832011-09-13 10:45:40 +03004234 ctxt->memop.type = OP_NONE;
4235 ctxt->memopp = NULL;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004236 ctxt->_eip = ctxt->eip;
4237 ctxt->fetch.start = ctxt->_eip;
4238 ctxt->fetch.end = ctxt->fetch.start + insn_len;
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004239 ctxt->opcode_len = 1;
Andre Przywaradc25e892010-12-21 11:12:07 +01004240 if (insn_len > 0)
Avi Kivity9dac77f2011-06-01 15:34:25 +03004241 memcpy(ctxt->fetch.data, insn, insn_len);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004242
4243 switch (mode) {
4244 case X86EMUL_MODE_REAL:
4245 case X86EMUL_MODE_VM86:
4246 case X86EMUL_MODE_PROT16:
4247 def_op_bytes = def_ad_bytes = 2;
4248 break;
4249 case X86EMUL_MODE_PROT32:
4250 def_op_bytes = def_ad_bytes = 4;
4251 break;
4252#ifdef CONFIG_X86_64
4253 case X86EMUL_MODE_PROT64:
4254 def_op_bytes = 4;
4255 def_ad_bytes = 8;
4256 break;
4257#endif
4258 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004259 return EMULATION_FAILED;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004260 }
4261
Avi Kivity9dac77f2011-06-01 15:34:25 +03004262 ctxt->op_bytes = def_op_bytes;
4263 ctxt->ad_bytes = def_ad_bytes;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004264
4265 /* Legacy prefixes. */
4266 for (;;) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004267 switch (ctxt->b = insn_fetch(u8, ctxt)) {
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004268 case 0x66: /* operand-size override */
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004269 op_prefix = true;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004270 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004271 ctxt->op_bytes = def_op_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004272 break;
4273 case 0x67: /* address-size override */
4274 if (mode == X86EMUL_MODE_PROT64)
4275 /* switch between 4/8 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004276 ctxt->ad_bytes = def_ad_bytes ^ 12;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004277 else
4278 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004279 ctxt->ad_bytes = def_ad_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004280 break;
4281 case 0x26: /* ES override */
4282 case 0x2e: /* CS override */
4283 case 0x36: /* SS override */
4284 case 0x3e: /* DS override */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004285 set_seg_override(ctxt, (ctxt->b >> 3) & 3);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004286 break;
4287 case 0x64: /* FS override */
4288 case 0x65: /* GS override */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004289 set_seg_override(ctxt, ctxt->b & 7);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004290 break;
4291 case 0x40 ... 0x4f: /* REX */
4292 if (mode != X86EMUL_MODE_PROT64)
4293 goto done_prefixes;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004294 ctxt->rex_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004295 continue;
4296 case 0xf0: /* LOCK */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004297 ctxt->lock_prefix = 1;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004298 break;
4299 case 0xf2: /* REPNE/REPNZ */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004300 case 0xf3: /* REP/REPE/REPZ */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004301 ctxt->rep_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004302 break;
4303 default:
4304 goto done_prefixes;
4305 }
4306
4307 /* Any legacy prefix after a REX prefix nullifies its effect. */
4308
Avi Kivity9dac77f2011-06-01 15:34:25 +03004309 ctxt->rex_prefix = 0;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004310 }
4311
4312done_prefixes:
4313
4314 /* REX prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004315 if (ctxt->rex_prefix & 8)
4316 ctxt->op_bytes = 8; /* REX.W */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004317
4318 /* Opcode byte(s). */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004319 opcode = opcode_table[ctxt->b];
Wei Yongjund3ad6242010-08-05 16:34:39 +08004320 /* Two-byte opcode? */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004321 if (ctxt->b == 0x0f) {
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004322 ctxt->opcode_len = 2;
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004323 ctxt->b = insn_fetch(u8, ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03004324 opcode = twobyte_table[ctxt->b];
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004325
4326 /* 0F_38 opcode map */
4327 if (ctxt->b == 0x38) {
4328 ctxt->opcode_len = 3;
4329 ctxt->b = insn_fetch(u8, ctxt);
4330 opcode = opcode_map_0f_38[ctxt->b];
4331 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004332 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004333 ctxt->d = opcode.flags;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004334
Takuya Yoshikawa9f4260e2012-04-30 17:48:25 +09004335 if (ctxt->d & ModRM)
4336 ctxt->modrm = insn_fetch(u8, ctxt);
4337
Nadav Amit7fe864d2014-06-02 18:34:03 +03004338 /* vex-prefix instructions are not implemented */
4339 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4340 (mode == X86EMUL_MODE_PROT64 ||
4341 (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
4342 ctxt->d = NotImpl;
4343 }
4344
Avi Kivity9dac77f2011-06-01 15:34:25 +03004345 while (ctxt->d & GroupMask) {
4346 switch (ctxt->d & GroupMask) {
Avi Kivity46561642011-04-24 14:09:59 +03004347 case Group:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004348 goffset = (ctxt->modrm >> 3) & 7;
Avi Kivity46561642011-04-24 14:09:59 +03004349 opcode = opcode.u.group[goffset];
4350 break;
4351 case GroupDual:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004352 goffset = (ctxt->modrm >> 3) & 7;
4353 if ((ctxt->modrm >> 6) == 3)
Avi Kivity46561642011-04-24 14:09:59 +03004354 opcode = opcode.u.gdual->mod3[goffset];
4355 else
4356 opcode = opcode.u.gdual->mod012[goffset];
4357 break;
4358 case RMExt:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004359 goffset = ctxt->modrm & 7;
Joerg Roedel01de8b02011-04-04 12:39:31 +02004360 opcode = opcode.u.group[goffset];
Avi Kivity46561642011-04-24 14:09:59 +03004361 break;
4362 case Prefix:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004363 if (ctxt->rep_prefix && op_prefix)
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004364 return EMULATION_FAILED;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004365 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
Avi Kivity46561642011-04-24 14:09:59 +03004366 switch (simd_prefix) {
4367 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4368 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4369 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4370 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4371 }
4372 break;
Gleb Natapov045a2822012-12-20 16:57:43 +02004373 case Escape:
4374 if (ctxt->modrm > 0xbf)
4375 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4376 else
4377 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4378 break;
Avi Kivity46561642011-04-24 14:09:59 +03004379 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004380 return EMULATION_FAILED;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004381 }
Avi Kivity46561642011-04-24 14:09:59 +03004382
Avi Kivityb1ea50b2011-09-13 10:45:42 +03004383 ctxt->d &= ~(u64)GroupMask;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004384 ctxt->d |= opcode.flags;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004385 }
4386
Paolo Bonzinie24186e2014-03-27 12:00:57 +01004387 /* Unrecognised? */
4388 if (ctxt->d == 0)
4389 return EMULATION_FAILED;
4390
Avi Kivity9dac77f2011-06-01 15:34:25 +03004391 ctxt->execute = opcode.u.execute;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004392
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004393 if (unlikely(ctxt->d &
4394 (NotImpl|EmulateOnUD|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
4395 /*
4396 * These are copied unconditionally here, and checked unconditionally
4397 * in x86_emulate_insn.
4398 */
4399 ctxt->check_perm = opcode.check_perm;
4400 ctxt->intercept = opcode.intercept;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004401
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004402 if (ctxt->d & NotImpl)
4403 return EMULATION_FAILED;
Avi Kivityd8671622011-02-01 16:32:03 +02004404
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004405 if (!(ctxt->d & EmulateOnUD) && ctxt->ud)
4406 return EMULATION_FAILED;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004407
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004408 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
Avi Kivity9dac77f2011-06-01 15:34:25 +03004409 ctxt->op_bytes = 8;
Avi Kivity7f9b4b72010-08-01 14:46:54 +03004410
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004411 if (ctxt->d & Op3264) {
4412 if (mode == X86EMUL_MODE_PROT64)
4413 ctxt->op_bytes = 8;
4414 else
4415 ctxt->op_bytes = 4;
4416 }
4417
4418 if (ctxt->d & Sse)
4419 ctxt->op_bytes = 16;
4420 else if (ctxt->d & Mmx)
4421 ctxt->op_bytes = 8;
4422 }
Avi Kivity12537912011-03-29 11:41:27 +02004423
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004424 /* ModRM and SIB bytes. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004425 if (ctxt->d & ModRM) {
Avi Kivityf09ed832011-09-13 10:45:40 +03004426 rc = decode_modrm(ctxt, &ctxt->memop);
Avi Kivity9dac77f2011-06-01 15:34:25 +03004427 if (!ctxt->has_seg_override)
4428 set_seg_override(ctxt, ctxt->modrm_seg);
4429 } else if (ctxt->d & MemAbs)
Avi Kivityf09ed832011-09-13 10:45:40 +03004430 rc = decode_abs(ctxt, &ctxt->memop);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004431 if (rc != X86EMUL_CONTINUE)
4432 goto done;
4433
Avi Kivity9dac77f2011-06-01 15:34:25 +03004434 if (!ctxt->has_seg_override)
4435 set_seg_override(ctxt, VCPU_SREG_DS);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004436
Avi Kivityf09ed832011-09-13 10:45:40 +03004437 ctxt->memop.addr.mem.seg = seg_override(ctxt);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004438
Avi Kivityf09ed832011-09-13 10:45:40 +03004439 if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8)
4440 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004441
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004442 /*
4443 * Decode and fetch the source operand: register, memory
4444 * or immediate.
4445 */
Avi Kivity0fe59122011-09-13 10:45:47 +03004446 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004447 if (rc != X86EMUL_CONTINUE)
4448 goto done;
4449
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004450 /*
4451 * Decode and fetch the second source operand: register, memory
4452 * or immediate.
4453 */
Avi Kivity4dd6a572011-09-13 10:45:43 +03004454 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004455 if (rc != X86EMUL_CONTINUE)
4456 goto done;
4457
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004458 /* Decode and fetch the destination operand: register or memory. */
Avi Kivitya9945542011-09-13 10:45:41 +03004459 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004460
4461done:
Avi Kivityf09ed832011-09-13 10:45:40 +03004462 if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative)
4463 ctxt->memopp->addr.mem.ea += ctxt->_eip;
Avi Kivitycb16c342011-06-19 19:21:11 +03004464
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004465 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004466}
4467
Xiao Guangrong1cb3f3a2011-09-22 17:02:48 +08004468bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4469{
4470 return ctxt->d & PageTable;
4471}
4472
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004473static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4474{
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004475 /* The second termination condition only applies for REPE
4476 * and REPNE. Test if the repeat string operation prefix is
4477 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4478 * corresponding termination condition according to:
4479 * - if REPE/REPZ and ZF = 0 then done
4480 * - if REPNE/REPNZ and ZF = 1 then done
4481 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004482 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4483 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4484 && (((ctxt->rep_prefix == REPE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004485 ((ctxt->eflags & EFLG_ZF) == 0))
Avi Kivity9dac77f2011-06-01 15:34:25 +03004486 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004487 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4488 return true;
4489
4490 return false;
4491}
4492
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004493static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4494{
4495 bool fault = false;
4496
4497 ctxt->ops->get_fpu(ctxt);
4498 asm volatile("1: fwait \n\t"
4499 "2: \n\t"
4500 ".pushsection .fixup,\"ax\" \n\t"
4501 "3: \n\t"
4502 "movb $1, %[fault] \n\t"
4503 "jmp 2b \n\t"
4504 ".popsection \n\t"
4505 _ASM_EXTABLE(1b, 3b)
Avi Kivity38e8a2d2012-04-22 15:12:50 +03004506 : [fault]"+qm"(fault));
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004507 ctxt->ops->put_fpu(ctxt);
4508
4509 if (unlikely(fault))
4510 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4511
4512 return X86EMUL_CONTINUE;
4513}
4514
4515static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4516 struct operand *op)
4517{
4518 if (op->type == OP_MM)
4519 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4520}
4521
Avi Kivitye28bbd42013-01-04 16:18:48 +02004522static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4523{
4524 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivityb9fa4092013-02-09 11:31:48 +02004525 if (!(ctxt->d & ByteOp))
4526 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
Avi Kivitye28bbd42013-01-04 16:18:48 +02004527 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004528 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4529 [fastop]"+S"(fop)
4530 : "c"(ctxt->src2.val));
Avi Kivitye28bbd42013-01-04 16:18:48 +02004531 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004532 if (!fop) /* exception is returned in fop variable */
4533 return emulate_de(ctxt);
Avi Kivitye28bbd42013-01-04 16:18:48 +02004534 return X86EMUL_CONTINUE;
4535}
Avi Kivitydd856ef2012-08-27 23:46:17 +03004536
Bandan Das14985072014-04-16 12:46:09 -04004537void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4538{
4539 memset(&ctxt->opcode_len, 0,
4540 (void *)&ctxt->_regs - (void *)&ctxt->opcode_len);
4541
4542 ctxt->fetch.start = 0;
4543 ctxt->fetch.end = 0;
4544 ctxt->io_read.pos = 0;
4545 ctxt->io_read.end = 0;
4546 ctxt->mem_read.pos = 0;
4547 ctxt->mem_read.end = 0;
4548}
4549
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09004550int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004551{
Mathias Krause0225fb52012-08-30 01:30:16 +02004552 const struct x86_emulate_ops *ops = ctxt->ops;
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09004553 int rc = X86EMUL_CONTINUE;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004554 int saved_dst_type = ctxt->dst.type;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004555
Avi Kivity9dac77f2011-06-01 15:34:25 +03004556 ctxt->mem_read.pos = 0;
Glauber Costa310b5d32009-05-12 16:21:06 -04004557
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004558 /* LOCK prefix is allowed only with some instructions */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004559 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004560 rc = emulate_ud(ctxt);
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004561 goto done;
4562 }
4563
Avi Kivity9dac77f2011-06-01 15:34:25 +03004564 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004565 rc = emulate_ud(ctxt);
Avi Kivity081bca02010-08-26 11:06:15 +03004566 goto done;
4567 }
4568
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004569 if (unlikely(ctxt->d &
4570 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4571 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4572 (ctxt->d & Undefined)) {
4573 rc = emulate_ud(ctxt);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004574 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004575 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004576
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004577 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4578 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4579 rc = emulate_ud(ctxt);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004580 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004581 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004582
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004583 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4584 rc = emulate_nm(ctxt);
Joerg Roedeld09beab2011-04-04 12:39:25 +02004585 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004586 }
Joerg Roedeld09beab2011-04-04 12:39:25 +02004587
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004588 if (ctxt->d & Mmx) {
4589 rc = flush_pending_x87_faults(ctxt);
4590 if (rc != X86EMUL_CONTINUE)
4591 goto done;
4592 /*
4593 * Now that we know the fpu is exception safe, we can fetch
4594 * operands from it.
4595 */
4596 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4597 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4598 if (!(ctxt->d & Mov))
4599 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4600 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004601
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004602 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4603 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4604 X86_ICPT_PRE_EXCEPT);
4605 if (rc != X86EMUL_CONTINUE)
4606 goto done;
4607 }
4608
4609 /* Privileged instruction can be executed only in CPL=0 */
4610 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4611 rc = emulate_gp(ctxt, 0);
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004612 goto done;
4613 }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004614
4615 /* Instruction can only be executed in protected mode */
4616 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4617 rc = emulate_ud(ctxt);
4618 goto done;
4619 }
4620
4621 /* Do instruction specific permission checks */
4622 if (ctxt->check_perm) {
4623 rc = ctxt->check_perm(ctxt);
4624 if (rc != X86EMUL_CONTINUE)
4625 goto done;
4626 }
4627
4628 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4629 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4630 X86_ICPT_POST_EXCEPT);
4631 if (rc != X86EMUL_CONTINUE)
4632 goto done;
4633 }
4634
4635 if (ctxt->rep_prefix && (ctxt->d & String)) {
4636 /* All REP prefixes have the same first termination condition */
4637 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4638 ctxt->eip = ctxt->_eip;
4639 goto done;
4640 }
4641 }
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004642 }
4643
Avi Kivity9dac77f2011-06-01 15:34:25 +03004644 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4645 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4646 ctxt->src.valptr, ctxt->src.bytes);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09004647 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004648 goto done;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004649 ctxt->src.orig_val64 = ctxt->src.val64;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004650 }
4651
Avi Kivity9dac77f2011-06-01 15:34:25 +03004652 if (ctxt->src2.type == OP_MEM) {
4653 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4654 &ctxt->src2.val, ctxt->src2.bytes);
Gleb Natapove35b7b92010-02-25 16:36:42 +02004655 if (rc != X86EMUL_CONTINUE)
4656 goto done;
4657 }
4658
Avi Kivity9dac77f2011-06-01 15:34:25 +03004659 if ((ctxt->d & DstMask) == ImplicitOps)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004660 goto special_insn;
4661
4662
Avi Kivity9dac77f2011-06-01 15:34:25 +03004663 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004664 /* optimisation - avoid slow emulated read if Mov */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004665 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4666 &ctxt->dst.val, ctxt->dst.bytes);
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004667 if (rc != X86EMUL_CONTINUE)
4668 goto done;
Avi Kivity038e51d2007-01-22 20:40:40 -08004669 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004670 ctxt->dst.orig_val = ctxt->dst.val;
Avi Kivity038e51d2007-01-22 20:40:40 -08004671
Avi Kivity018a98d2007-11-27 19:30:56 +02004672special_insn:
4673
Avi Kivity9dac77f2011-06-01 15:34:25 +03004674 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4675 rc = emulator_check_intercept(ctxt, ctxt->intercept,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004676 X86_ICPT_POST_MEMACCESS);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004677 if (rc != X86EMUL_CONTINUE)
4678 goto done;
4679 }
4680
Avi Kivity9dac77f2011-06-01 15:34:25 +03004681 if (ctxt->execute) {
Avi Kivitye28bbd42013-01-04 16:18:48 +02004682 if (ctxt->d & Fastop) {
4683 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4684 rc = fastop(ctxt, fop);
4685 if (rc != X86EMUL_CONTINUE)
4686 goto done;
4687 goto writeback;
4688 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004689 rc = ctxt->execute(ctxt);
Avi Kivityef65c882010-07-29 15:11:51 +03004690 if (rc != X86EMUL_CONTINUE)
4691 goto done;
4692 goto writeback;
4693 }
4694
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004695 if (ctxt->opcode_len == 2)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004696 goto twobyte_insn;
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004697 else if (ctxt->opcode_len == 3)
4698 goto threebyte_insn;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004699
Avi Kivity9dac77f2011-06-01 15:34:25 +03004700 switch (ctxt->b) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08004701 case 0x63: /* movsxd */
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004702 if (ctxt->mode != X86EMUL_MODE_PROT64)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004703 goto cannot_emulate;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004704 ctxt->dst.val = (s32) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004705 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03004706 case 0x70 ... 0x7f: /* jcc (short) */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004707 if (test_cc(ctxt->b, ctxt->eflags))
4708 jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02004709 break;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03004710 case 0x8d: /* lea r16/r32, m */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004711 ctxt->dst.val = ctxt->src.addr.mem.ea;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03004712 break;
Avi Kivity3d9e77d2010-08-01 12:41:59 +03004713 case 0x90 ... 0x97: /* nop / xchg reg, rax */
Avi Kivitydd856ef2012-08-27 23:46:17 +03004714 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
Nadav Amita825f5c2014-06-15 16:13:01 +03004715 ctxt->dst.type = OP_NONE;
4716 else
4717 rc = em_xchg(ctxt);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09004718 break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08004719 case 0x98: /* cbw/cwde/cdqe */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004720 switch (ctxt->op_bytes) {
4721 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4722 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4723 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08004724 }
4725 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004726 case 0xcc: /* int3 */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09004727 rc = emulate_int(ctxt, 3);
4728 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004729 case 0xcd: /* int n */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004730 rc = emulate_int(ctxt, ctxt->src.val);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004731 break;
4732 case 0xce: /* into */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09004733 if (ctxt->eflags & EFLG_OF)
4734 rc = emulate_int(ctxt, 4);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004735 break;
Nitin A Kamble1a52e052007-09-18 16:34:25 -07004736 case 0xe9: /* jmp rel */
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004737 case 0xeb: /* jmp rel short */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004738 jmp_rel(ctxt, ctxt->src.val);
4739 ctxt->dst.type = OP_NONE; /* Disable writeback. */
Nitin A Kamble1a52e052007-09-18 16:34:25 -07004740 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02004741 case 0xf4: /* hlt */
Avi Kivity6c3287f2011-04-20 15:43:05 +03004742 ctxt->ops->halt(ctxt);
Mohammed Gamal19fdfa02008-07-06 16:51:26 +03004743 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02004744 case 0xf5: /* cmc */
4745 /* complement carry flag from eflags reg */
4746 ctxt->eflags ^= EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02004747 break;
4748 case 0xf8: /* clc */
4749 ctxt->eflags &= ~EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02004750 break;
Mohammed Gamal8744aa92010-08-05 15:42:49 +03004751 case 0xf9: /* stc */
4752 ctxt->eflags |= EFLG_CF;
4753 break;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004754 case 0xfc: /* cld */
4755 ctxt->eflags &= ~EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004756 break;
4757 case 0xfd: /* std */
4758 ctxt->eflags |= EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004759 break;
Avi Kivity91269b82010-07-25 14:51:16 +03004760 default:
4761 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004762 }
Avi Kivity018a98d2007-11-27 19:30:56 +02004763
Avi Kivity7d9ddae2010-08-30 17:12:28 +03004764 if (rc != X86EMUL_CONTINUE)
4765 goto done;
4766
Avi Kivity018a98d2007-11-27 19:30:56 +02004767writeback:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02004768 if (ctxt->d & SrcWrite) {
4769 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4770 rc = writeback(ctxt, &ctxt->src);
4771 if (rc != X86EMUL_CONTINUE)
4772 goto done;
4773 }
Nadav Amitee212292014-06-15 16:12:58 +03004774 if (!(ctxt->d & NoWrite)) {
4775 rc = writeback(ctxt, &ctxt->dst);
4776 if (rc != X86EMUL_CONTINUE)
4777 goto done;
4778 }
Avi Kivity018a98d2007-11-27 19:30:56 +02004779
Gleb Natapov5cd21912010-03-18 15:20:26 +02004780 /*
4781 * restore dst type in case the decoding will be reused
4782 * (happens for string instruction )
4783 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004784 ctxt->dst.type = saved_dst_type;
Gleb Natapov5cd21912010-03-18 15:20:26 +02004785
Avi Kivity9dac77f2011-06-01 15:34:25 +03004786 if ((ctxt->d & SrcMask) == SrcSI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03004787 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
Gleb Natapova682e352010-03-18 15:20:21 +02004788
Avi Kivity9dac77f2011-06-01 15:34:25 +03004789 if ((ctxt->d & DstMask) == DstDI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03004790 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
Gleb Natapovd9271122010-03-18 15:20:22 +02004791
Avi Kivity9dac77f2011-06-01 15:34:25 +03004792 if (ctxt->rep_prefix && (ctxt->d & String)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004793 unsigned int count;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004794 struct read_cache *r = &ctxt->io_read;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004795 if ((ctxt->d & SrcMask) == SrcSI)
4796 count = ctxt->src.count;
4797 else
4798 count = ctxt->dst.count;
4799 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4800 -count);
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004801
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03004802 if (!string_insn_completed(ctxt)) {
4803 /*
4804 * Re-enter guest when pio read ahead buffer is empty
4805 * or, if it is not used, after each 1024 iteration.
4806 */
Avi Kivitydd856ef2012-08-27 23:46:17 +03004807 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03004808 (r->end == 0 || r->end != r->pos)) {
4809 /*
4810 * Reset read cache. Usually happens before
4811 * decode, but since instruction is restarted
4812 * we have to do it here.
4813 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004814 ctxt->mem_read.end = 0;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004815 writeback_registers(ctxt);
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03004816 return EMULATION_RESTART;
4817 }
4818 goto done; /* skip rip writeback */
Avi Kivity0fa6ccb2010-08-17 11:22:17 +03004819 }
Gleb Natapov5cd21912010-03-18 15:20:26 +02004820 }
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03004821
Avi Kivity9dac77f2011-06-01 15:34:25 +03004822 ctxt->eip = ctxt->_eip;
Avi Kivity018a98d2007-11-27 19:30:56 +02004823
4824done:
Avi Kivityda9cb572010-11-22 17:53:21 +02004825 if (rc == X86EMUL_PROPAGATE_FAULT)
4826 ctxt->have_exception = true;
Joerg Roedel775fde82011-04-04 12:39:24 +02004827 if (rc == X86EMUL_INTERCEPTED)
4828 return EMULATION_INTERCEPTED;
4829
Avi Kivitydd856ef2012-08-27 23:46:17 +03004830 if (rc == X86EMUL_CONTINUE)
4831 writeback_registers(ctxt);
4832
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03004833 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004834
4835twobyte_insn:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004836 switch (ctxt->b) {
Avi Kivity018a98d2007-11-27 19:30:56 +02004837 case 0x09: /* wbinvd */
Clemens Nosscfb22372011-04-21 21:16:05 +02004838 (ctxt->ops->wbinvd)(ctxt);
Sheng Yangf5f48ee2010-06-30 12:25:15 +08004839 break;
4840 case 0x08: /* invd */
Avi Kivity018a98d2007-11-27 19:30:56 +02004841 case 0x0d: /* GrpP (prefetch) */
4842 case 0x18: /* Grp16 (prefetch/nop) */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02004843 case 0x1f: /* nop */
Avi Kivity018a98d2007-11-27 19:30:56 +02004844 break;
4845 case 0x20: /* mov cr, reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004846 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
Avi Kivity018a98d2007-11-27 19:30:56 +02004847 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004848 case 0x21: /* mov from dr to reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004849 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004850 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004851 case 0x40 ... 0x4f: /* cmov */
Nadav Amit140bad82014-06-15 16:13:00 +03004852 if (test_cc(ctxt->b, ctxt->eflags))
4853 ctxt->dst.val = ctxt->src.val;
4854 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
4855 ctxt->op_bytes != 4)
Avi Kivity9dac77f2011-06-01 15:34:25 +03004856 ctxt->dst.type = OP_NONE; /* no writeback */
Avi Kivity6aa8b732006-12-10 02:21:36 -08004857 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03004858 case 0x80 ... 0x8f: /* jnz rel, etc*/
Avi Kivity9dac77f2011-06-01 15:34:25 +03004859 if (test_cc(ctxt->b, ctxt->eflags))
4860 jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02004861 break;
Wei Yongjunee45b582010-08-06 17:10:07 +08004862 case 0x90 ... 0x9f: /* setcc r/m8 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004863 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
Wei Yongjunee45b582010-08-06 17:10:07 +08004864 break;
Glauber Costa2a7c5b82008-07-10 17:08:15 -03004865 case 0xae: /* clflush */
4866 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004867 case 0xb6 ... 0xb7: /* movzx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004868 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03004869 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
Avi Kivity9dac77f2011-06-01 15:34:25 +03004870 : (u16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004871 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004872 case 0xbe ... 0xbf: /* movsx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004873 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03004874 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
Avi Kivity9dac77f2011-06-01 15:34:25 +03004875 (s16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004876 break;
Sheng Yanga012e652007-10-15 14:24:20 +08004877 case 0xc3: /* movnti */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004878 ctxt->dst.bytes = ctxt->op_bytes;
Nadav Amit3b320042014-06-02 18:34:08 +03004879 ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
4880 (u32) ctxt->src.val;
Sheng Yanga012e652007-10-15 14:24:20 +08004881 break;
Avi Kivity91269b82010-07-25 14:51:16 +03004882 default:
4883 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004884 }
Avi Kivity7d9ddae2010-08-30 17:12:28 +03004885
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004886threebyte_insn:
4887
Avi Kivity7d9ddae2010-08-30 17:12:28 +03004888 if (rc != X86EMUL_CONTINUE)
4889 goto done;
4890
Avi Kivity6aa8b732006-12-10 02:21:36 -08004891 goto writeback;
4892
4893cannot_emulate:
Gleb Natapova0c0ab22011-03-28 16:57:49 +02004894 return EMULATION_FAILED;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004895}
Avi Kivitydd856ef2012-08-27 23:46:17 +03004896
4897void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
4898{
4899 invalidate_registers(ctxt);
4900}
4901
4902void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
4903{
4904 writeback_registers(ctxt);
4905}