blob: f62209e7ad84a5f2a5002c8df881eee899633aa8 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/******************************************************************************
Avi Kivity56e82312009-08-12 15:04:37 +03002 * emulate.c
Avi Kivity6aa8b732006-12-10 02:21:36 -08003 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
Rusty Russelldcc07662007-07-17 23:16:56 +10009 * privileged instructions:
Avi Kivity6aa8b732006-12-10 02:21:36 -080010 *
11 * Copyright (C) 2006 Qumranet
Nicolas Kaiser9611c182010-10-06 14:23:22 +020012 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -080013 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
Avi Kivityedf88412007-12-16 11:02:48 +020023#include <linux/kvm_host.h>
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030024#include "kvm_cache_regs.h"
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/module.h>
Avi Kivity56e82312009-08-12 15:04:37 +030026#include <asm/kvm_emulate.h>
Avi Kivityb7d491e2013-01-04 16:18:49 +020027#include <linux/stringify.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028
Avi Kivity3eeb3282010-01-21 15:31:48 +020029#include "x86.h"
Gleb Natapov38ba30b2010-03-18 15:20:17 +020030#include "tss.h"
Andre Przywarae99f0502009-06-17 15:50:33 +020031
Avi Kivity6aa8b732006-12-10 02:21:36 -080032/*
Avi Kivitya99455492011-09-13 10:45:41 +030033 * Operand types
34 */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030035#define OpNone 0ull
36#define OpImplicit 1ull /* No generic decode */
37#define OpReg 2ull /* Register */
38#define OpMem 3ull /* Memory */
39#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40#define OpDI 5ull /* ES:DI/EDI/RDI */
41#define OpMem64 6ull /* Memory, 64-bit */
42#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43#define OpDX 8ull /* DX register */
Avi Kivity4dd6a572011-09-13 10:45:43 +030044#define OpCL 9ull /* CL register (for shifts) */
45#define OpImmByte 10ull /* 8-bit sign extended immediate */
46#define OpOne 11ull /* Implied 1 */
Nadav Amit5e2c6882012-12-06 21:55:10 -020047#define OpImm 12ull /* Sign extended up to 32-bit immediate */
Avi Kivity0fe59122011-09-13 10:45:47 +030048#define OpMem16 13ull /* Memory operand (16-bit). */
49#define OpMem32 14ull /* Memory operand (32-bit). */
50#define OpImmU 15ull /* Immediate operand, zero extended */
51#define OpSI 16ull /* SI/ESI/RSI */
52#define OpImmFAddr 17ull /* Immediate far address */
53#define OpMemFAddr 18ull /* Far address in memory */
54#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
Avi Kivityc191a7a2011-09-13 10:45:49 +030055#define OpES 20ull /* ES */
56#define OpCS 21ull /* CS */
57#define OpSS 22ull /* SS */
58#define OpDS 23ull /* DS */
59#define OpFS 24ull /* FS */
60#define OpGS 25ull /* GS */
Avi Kivity28867ce2012-01-16 15:08:44 +020061#define OpMem8 26ull /* 8-bit zero extended memory operand */
Nadav Amit5e2c6882012-12-06 21:55:10 -020062#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
Paolo Bonzini7fa57952013-05-09 11:32:50 +020063#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
Avi Kivity820207c2013-02-09 11:31:45 +020064#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
Avi Kivitya99455492011-09-13 10:45:41 +030066
Avi Kivity0fe59122011-09-13 10:45:47 +030067#define OpBits 5 /* Width of operand field */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030068#define OpMask ((1ull << OpBits) - 1)
Avi Kivitya99455492011-09-13 10:45:41 +030069
70/*
Avi Kivity6aa8b732006-12-10 02:21:36 -080071 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
76 * not be handled.
77 */
78
79/* Operand sizes: 8-bit operands or specified/overridden size. */
Avi Kivityab85b12b2010-07-29 15:11:49 +030080#define ByteOp (1<<0) /* 8-bit operands. */
Avi Kivity6aa8b732006-12-10 02:21:36 -080081/* Destination operand type. */
Avi Kivitya99455492011-09-13 10:45:41 +030082#define DstShift 1
83#define ImplicitOps (OpImplicit << DstShift)
84#define DstReg (OpReg << DstShift)
85#define DstMem (OpMem << DstShift)
86#define DstAcc (OpAcc << DstShift)
87#define DstDI (OpDI << DstShift)
88#define DstMem64 (OpMem64 << DstShift)
89#define DstImmUByte (OpImmUByte << DstShift)
90#define DstDX (OpDX << DstShift)
Avi Kivity820207c2013-02-09 11:31:45 +020091#define DstAccLo (OpAccLo << DstShift)
Avi Kivitya99455492011-09-13 10:45:41 +030092#define DstMask (OpMask << DstShift)
Avi Kivity6aa8b732006-12-10 02:21:36 -080093/* Source operand type. */
Avi Kivity0fe59122011-09-13 10:45:47 +030094#define SrcShift 6
95#define SrcNone (OpNone << SrcShift)
96#define SrcReg (OpReg << SrcShift)
97#define SrcMem (OpMem << SrcShift)
98#define SrcMem16 (OpMem16 << SrcShift)
99#define SrcMem32 (OpMem32 << SrcShift)
100#define SrcImm (OpImm << SrcShift)
101#define SrcImmByte (OpImmByte << SrcShift)
102#define SrcOne (OpOne << SrcShift)
103#define SrcImmUByte (OpImmUByte << SrcShift)
104#define SrcImmU (OpImmU << SrcShift)
105#define SrcSI (OpSI << SrcShift)
Paolo Bonzini7fa57952013-05-09 11:32:50 +0200106#define SrcXLat (OpXLat << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300107#define SrcImmFAddr (OpImmFAddr << SrcShift)
108#define SrcMemFAddr (OpMemFAddr << SrcShift)
109#define SrcAcc (OpAcc << SrcShift)
110#define SrcImmU16 (OpImmU16 << SrcShift)
Nadav Amit5e2c6882012-12-06 21:55:10 -0200111#define SrcImm64 (OpImm64 << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300112#define SrcDX (OpDX << SrcShift)
Avi Kivity28867ce2012-01-16 15:08:44 +0200113#define SrcMem8 (OpMem8 << SrcShift)
Avi Kivity820207c2013-02-09 11:31:45 +0200114#define SrcAccHi (OpAccHi << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300115#define SrcMask (OpMask << SrcShift)
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300116#define BitOp (1<<11)
117#define MemAbs (1<<12) /* Memory operand is absolute displacement */
118#define String (1<<13) /* String instruction (rep capable) */
119#define Stack (1<<14) /* Stack instruction (push/pop) */
120#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
Gleb Natapov045a2822012-12-20 16:57:43 +0200125#define Escape (5<<15) /* Escape to coprocessor instruction */
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300126#define Sse (1<<18) /* SSE Vector instruction */
Avi Kivity20c29ff2011-09-13 10:45:44 +0300127/* Generic ModRM decode. */
128#define ModRM (1<<19)
129/* Destination is only written; never read. */
130#define Mov (1<<20)
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300131/* Misc flags */
Joerg Roedel8ea7d6a2011-04-04 12:39:26 +0200132#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
Borislav Petkovb51e9742013-09-22 16:44:52 +0200133#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
Avi Kivity5a506b12010-08-01 15:10:29 +0300134#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
Avi Kivity7f9b4b72010-08-01 14:46:54 +0300135#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
Avi Kivity047a4812010-07-26 14:37:47 +0300136#define Undefined (1<<25) /* No Such Instruction */
Gleb Natapovd380a5e2010-02-10 14:21:36 +0200137#define Lock (1<<26) /* lock prefix is allowed for the instruction */
Gleb Natapove92805a2010-02-10 14:21:35 +0200138#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300139#define No64 (1<<28)
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +0800140#define PageTable (1 << 29) /* instruction used to write page table */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300141#define NotImpl (1 << 30) /* instruction is not implemented */
Guillaume Thouvenin0dc8d102008-12-04 14:26:42 +0100142/* Source 2 operand type */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300143#define Src2Shift (31)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300144#define Src2None (OpNone << Src2Shift)
Avi Kivityab2c5ce2013-02-09 11:31:46 +0200145#define Src2Mem (OpMem << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300146#define Src2CL (OpCL << Src2Shift)
147#define Src2ImmByte (OpImmByte << Src2Shift)
148#define Src2One (OpOne << Src2Shift)
149#define Src2Imm (OpImm << Src2Shift)
Avi Kivityc191a7a2011-09-13 10:45:49 +0300150#define Src2ES (OpES << Src2Shift)
151#define Src2CS (OpCS << Src2Shift)
152#define Src2SS (OpSS << Src2Shift)
153#define Src2DS (OpDS << Src2Shift)
154#define Src2FS (OpFS << Src2Shift)
155#define Src2GS (OpGS << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300156#define Src2Mask (OpMask << Src2Shift)
Avi Kivitycbe2c9d2012-04-09 18:40:02 +0300157#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
Avi Kivity1c11b372012-04-09 18:39:59 +0300158#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
Avi Kivitye28bbd42013-01-04 16:18:48 +0200161#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
Avi Kivityb6744dc2013-01-04 16:18:50 +0200162#define NoWrite ((u64)1 << 45) /* No writeback */
Avi Kivityfb32b1e2013-02-09 11:31:44 +0200163#define SrcWrite ((u64)1 << 46) /* Write back src operand */
Nadav Amit9b88ae92014-05-25 23:05:21 +0300164#define NoMod ((u64)1 << 47) /* Mod field is ignored */
Paolo Bonzinid40a6892014-03-27 11:58:02 +0100165#define Intercept ((u64)1 << 48) /* Has valid intercept field */
166#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
Nadav Amit10e38fc2014-06-18 17:19:34 +0300167#define NoBigReal ((u64)1 << 50) /* No big real mode */
Nadav Amit68efa762014-06-18 17:19:35 +0300168#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
Nadav Amit58b70752014-10-24 11:35:09 +0300169#define NearBranch ((u64)1 << 52) /* Near branches */
Nadav Amited9aad22014-11-02 11:55:00 +0200170#define No16 ((u64)1 << 53) /* No 16 bit operand */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800171
Avi Kivity820207c2013-02-09 11:31:45 +0200172#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800173
Avi Kivityd0e53322010-07-29 15:11:54 +0300174#define X2(x...) x, x
175#define X3(x...) X2(x), x
176#define X4(x...) X2(x), X2(x)
177#define X5(x...) X4(x), x
178#define X6(x...) X4(x), X2(x)
179#define X7(x...) X4(x), X3(x)
180#define X8(x...) X4(x), X4(x)
181#define X16(x...) X8(x), X8(x)
Avi Kivity83babbc2010-07-26 14:37:39 +0300182
Avi Kivitye28bbd42013-01-04 16:18:48 +0200183#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
184#define FASTOP_SIZE 8
185
186/*
187 * fastop functions have a special calling convention:
188 *
Avi Kivity017da7b2013-02-09 11:31:47 +0200189 * dst: rax (in/out)
190 * src: rdx (in/out)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200191 * src2: rcx (in)
192 * flags: rflags (in/out)
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200193 * ex: rsi (in:fastop pointer, out:zero if exception)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200194 *
195 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
196 * different operand sizes can be reached by calculation, rather than a jump
197 * table (which would be bigger than the code).
198 *
199 * fastop functions are declared as taking a never-defined fastop parameter,
200 * so they can't be called from C directly.
201 */
202
203struct fastop;
204
Avi Kivityd65b1de2010-07-29 15:11:35 +0300205struct opcode {
Avi Kivityb1ea50b2011-09-13 10:45:42 +0300206 u64 flags : 56;
207 u64 intercept : 8;
Avi Kivity120df892010-07-29 15:11:39 +0300208 union {
Avi Kivityef65c882010-07-29 15:11:51 +0300209 int (*execute)(struct x86_emulate_ctxt *ctxt);
Mathias Krausefd0a0d82012-08-30 01:30:15 +0200210 const struct opcode *group;
211 const struct group_dual *gdual;
212 const struct gprefix *gprefix;
Gleb Natapov045a2822012-12-20 16:57:43 +0200213 const struct escape *esc;
Avi Kivitye28bbd42013-01-04 16:18:48 +0200214 void (*fastop)(struct fastop *fake);
Avi Kivity120df892010-07-29 15:11:39 +0300215 } u;
Joerg Roedeld09beab2011-04-04 12:39:25 +0200216 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
Avi Kivity120df892010-07-29 15:11:39 +0300217};
218
219struct group_dual {
220 struct opcode mod012[8];
221 struct opcode mod3[8];
Avi Kivityd65b1de2010-07-29 15:11:35 +0300222};
223
Avi Kivity0d7cdee2011-03-29 11:34:38 +0200224struct gprefix {
225 struct opcode pfx_no;
226 struct opcode pfx_66;
227 struct opcode pfx_f2;
228 struct opcode pfx_f3;
229};
230
Gleb Natapov045a2822012-12-20 16:57:43 +0200231struct escape {
232 struct opcode op[8];
233 struct opcode high[64];
234};
235
Avi Kivity6aa8b732006-12-10 02:21:36 -0800236/* EFLAGS bit definitions. */
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200237#define EFLG_ID (1<<21)
238#define EFLG_VIP (1<<20)
239#define EFLG_VIF (1<<19)
240#define EFLG_AC (1<<18)
Andre Przywarab1d86142009-06-17 15:50:32 +0200241#define EFLG_VM (1<<17)
242#define EFLG_RF (1<<16)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200243#define EFLG_IOPL (3<<12)
244#define EFLG_NT (1<<14)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800245#define EFLG_OF (1<<11)
246#define EFLG_DF (1<<10)
Andre Przywarab1d86142009-06-17 15:50:32 +0200247#define EFLG_IF (1<<9)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200248#define EFLG_TF (1<<8)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800249#define EFLG_SF (1<<7)
250#define EFLG_ZF (1<<6)
251#define EFLG_AF (1<<4)
252#define EFLG_PF (1<<2)
253#define EFLG_CF (1<<0)
254
Mohammed Gamal62bd4302010-07-28 12:38:40 +0300255#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
256#define EFLG_RESERVED_ONE_MASK 2
257
Avi Kivitydd856ef2012-08-27 23:46:17 +0300258static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
259{
260 if (!(ctxt->regs_valid & (1 << nr))) {
261 ctxt->regs_valid |= 1 << nr;
262 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
263 }
264 return ctxt->_regs[nr];
265}
266
267static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
268{
269 ctxt->regs_valid |= 1 << nr;
270 ctxt->regs_dirty |= 1 << nr;
271 return &ctxt->_regs[nr];
272}
273
274static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
275{
276 reg_read(ctxt, nr);
277 return reg_write(ctxt, nr);
278}
279
280static void writeback_registers(struct x86_emulate_ctxt *ctxt)
281{
282 unsigned reg;
283
284 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
285 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
286}
287
288static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
289{
290 ctxt->regs_dirty = 0;
291 ctxt->regs_valid = 0;
292}
293
Avi Kivity6aa8b732006-12-10 02:21:36 -0800294/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800295 * These EFLAGS bits are restored from saved value during emulation, and
296 * any changes are written back to the saved value after emulation.
297 */
298#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
299
Avi Kivitydda96d82008-11-26 15:14:10 +0200300#ifdef CONFIG_X86_64
301#define ON64(x) x
302#else
303#define ON64(x)
304#endif
305
Avi Kivity4d758342013-01-19 19:51:55 +0200306static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
307
Avi Kivityb7d491e2013-01-04 16:18:49 +0200308#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
309#define FOP_RET "ret \n\t"
310
311#define FOP_START(op) \
312 extern void em_##op(struct fastop *fake); \
313 asm(".pushsection .text, \"ax\" \n\t" \
314 ".global em_" #op " \n\t" \
315 FOP_ALIGN \
316 "em_" #op ": \n\t"
317
318#define FOP_END \
319 ".popsection")
320
Avi Kivity0bdea062013-01-19 19:51:50 +0200321#define FOPNOP() FOP_ALIGN FOP_RET
322
Avi Kivityb7d491e2013-01-04 16:18:49 +0200323#define FOP1E(op, dst) \
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200324 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
325
326#define FOP1EEX(op, dst) \
327 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
Avi Kivityb7d491e2013-01-04 16:18:49 +0200328
329#define FASTOP1(op) \
330 FOP_START(op) \
331 FOP1E(op##b, al) \
332 FOP1E(op##w, ax) \
333 FOP1E(op##l, eax) \
334 ON64(FOP1E(op##q, rax)) \
335 FOP_END
336
Avi Kivityb9fa4092013-02-09 11:31:48 +0200337/* 1-operand, using src2 (for MUL/DIV r/m) */
338#define FASTOP1SRC2(op, name) \
339 FOP_START(name) \
340 FOP1E(op, cl) \
341 FOP1E(op, cx) \
342 FOP1E(op, ecx) \
343 ON64(FOP1E(op, rcx)) \
344 FOP_END
345
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200346/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
347#define FASTOP1SRC2EX(op, name) \
348 FOP_START(name) \
349 FOP1EEX(op, cl) \
350 FOP1EEX(op, cx) \
351 FOP1EEX(op, ecx) \
352 ON64(FOP1EEX(op, rcx)) \
353 FOP_END
354
Avi Kivityf7857f32013-01-04 16:18:53 +0200355#define FOP2E(op, dst, src) \
356 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
357
358#define FASTOP2(op) \
359 FOP_START(op) \
Avi Kivity017da7b2013-02-09 11:31:47 +0200360 FOP2E(op##b, al, dl) \
361 FOP2E(op##w, ax, dx) \
362 FOP2E(op##l, eax, edx) \
363 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivityf7857f32013-01-04 16:18:53 +0200364 FOP_END
365
Avi Kivity11c363b2013-01-19 19:51:54 +0200366/* 2 operand, word only */
367#define FASTOP2W(op) \
368 FOP_START(op) \
369 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200370 FOP2E(op##w, ax, dx) \
371 FOP2E(op##l, eax, edx) \
372 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivity11c363b2013-01-19 19:51:54 +0200373 FOP_END
374
Avi Kivity007a3b52013-01-19 19:51:51 +0200375/* 2 operand, src is CL */
376#define FASTOP2CL(op) \
377 FOP_START(op) \
378 FOP2E(op##b, al, cl) \
379 FOP2E(op##w, ax, cl) \
380 FOP2E(op##l, eax, cl) \
381 ON64(FOP2E(op##q, rax, cl)) \
382 FOP_END
383
Nadav Amit5aca3722014-11-02 11:54:50 +0200384/* 2 operand, src and dest are reversed */
385#define FASTOP2R(op, name) \
386 FOP_START(name) \
387 FOP2E(op##b, dl, al) \
388 FOP2E(op##w, dx, ax) \
389 FOP2E(op##l, edx, eax) \
390 ON64(FOP2E(op##q, rdx, rax)) \
391 FOP_END
392
Avi Kivity0bdea062013-01-19 19:51:50 +0200393#define FOP3E(op, dst, src, src2) \
394 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
395
396/* 3-operand, word-only, src2=cl */
397#define FASTOP3WCL(op) \
398 FOP_START(op) \
399 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200400 FOP3E(op##w, ax, dx, cl) \
401 FOP3E(op##l, eax, edx, cl) \
402 ON64(FOP3E(op##q, rax, rdx, cl)) \
Avi Kivity0bdea062013-01-19 19:51:50 +0200403 FOP_END
404
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200405/* Special case for SETcc - 1 instruction per cc */
406#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
407
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200408asm(".global kvm_fastop_exception \n"
409 "kvm_fastop_exception: xor %esi, %esi; ret");
410
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200411FOP_START(setcc)
412FOP_SETCC(seto)
413FOP_SETCC(setno)
414FOP_SETCC(setc)
415FOP_SETCC(setnc)
416FOP_SETCC(setz)
417FOP_SETCC(setnz)
418FOP_SETCC(setbe)
419FOP_SETCC(setnbe)
420FOP_SETCC(sets)
421FOP_SETCC(setns)
422FOP_SETCC(setp)
423FOP_SETCC(setnp)
424FOP_SETCC(setl)
425FOP_SETCC(setnl)
426FOP_SETCC(setle)
427FOP_SETCC(setnle)
428FOP_END;
429
Paolo Bonzini326f5782013-05-09 11:32:51 +0200430FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
431FOP_END;
432
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200433static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
434 enum x86_intercept intercept,
435 enum x86_intercept_stage stage)
436{
437 struct x86_instruction_info info = {
438 .intercept = intercept,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300439 .rep_prefix = ctxt->rep_prefix,
440 .modrm_mod = ctxt->modrm_mod,
441 .modrm_reg = ctxt->modrm_reg,
442 .modrm_rm = ctxt->modrm_rm,
443 .src_val = ctxt->src.val64,
Jan Kiszka6cbc5f52014-06-30 12:52:55 +0200444 .dst_val = ctxt->dst.val64,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300445 .src_bytes = ctxt->src.bytes,
446 .dst_bytes = ctxt->dst.bytes,
447 .ad_bytes = ctxt->ad_bytes,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200448 .next_rip = ctxt->eip,
449 };
450
Avi Kivity29535382011-04-20 13:37:53 +0300451 return ctxt->ops->intercept(ctxt, &info, stage);
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200452}
453
Avi Kivityf47cfa32012-06-07 17:49:24 +0300454static void assign_masked(ulong *dest, ulong src, ulong mask)
455{
456 *dest = (*dest & ~mask) | (src & mask);
457}
458
Avi Kivity9dac77f2011-06-01 15:34:25 +0300459static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800460{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300461 return (1UL << (ctxt->ad_bytes << 3)) - 1;
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800462}
463
Avi Kivityf47cfa32012-06-07 17:49:24 +0300464static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
465{
466 u16 sel;
467 struct desc_struct ss;
468
469 if (ctxt->mode == X86EMUL_MODE_PROT64)
470 return ~0UL;
471 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
472 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
473}
474
Avi Kivity612e89f2012-06-12 20:03:23 +0300475static int stack_size(struct x86_emulate_ctxt *ctxt)
476{
477 return (__fls(stack_mask(ctxt)) + 1) >> 3;
478}
479
Avi Kivity6aa8b732006-12-10 02:21:36 -0800480/* Access/update address held in a register, based on addressing mode. */
Harvey Harrisone4706772008-02-19 07:40:38 -0800481static inline unsigned long
Avi Kivity9dac77f2011-06-01 15:34:25 +0300482address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800483{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300484 if (ctxt->ad_bytes == sizeof(unsigned long))
Harvey Harrisone4706772008-02-19 07:40:38 -0800485 return reg;
486 else
Avi Kivity9dac77f2011-06-01 15:34:25 +0300487 return reg & ad_mask(ctxt);
Harvey Harrisone4706772008-02-19 07:40:38 -0800488}
489
490static inline unsigned long
Paolo Bonzini01485a22014-11-19 18:25:08 +0100491register_address(struct x86_emulate_ctxt *ctxt, int reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800492{
Paolo Bonzini01485a22014-11-19 18:25:08 +0100493 return address_mask(ctxt, reg_read(ctxt, reg));
Harvey Harrisone4706772008-02-19 07:40:38 -0800494}
495
Avi Kivity5ad105e2012-08-19 14:34:31 +0300496static void masked_increment(ulong *reg, ulong mask, int inc)
497{
498 assign_masked(reg, *reg + inc, mask);
499}
500
Harvey Harrison7a9572752008-02-19 07:40:41 -0800501static inline void
Paolo Bonzini01485a22014-11-19 18:25:08 +0100502register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
Harvey Harrison7a9572752008-02-19 07:40:41 -0800503{
Avi Kivity5ad105e2012-08-19 14:34:31 +0300504 ulong mask;
505
Avi Kivity9dac77f2011-06-01 15:34:25 +0300506 if (ctxt->ad_bytes == sizeof(unsigned long))
Avi Kivity5ad105e2012-08-19 14:34:31 +0300507 mask = ~0UL;
Harvey Harrison7a9572752008-02-19 07:40:41 -0800508 else
Avi Kivity5ad105e2012-08-19 14:34:31 +0300509 mask = ad_mask(ctxt);
Paolo Bonzini01485a22014-11-19 18:25:08 +0100510 masked_increment(reg_rmw(ctxt, reg), mask, inc);
Avi Kivity5ad105e2012-08-19 14:34:31 +0300511}
512
513static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
514{
Avi Kivitydd856ef2012-08-27 23:46:17 +0300515 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
Harvey Harrison7a9572752008-02-19 07:40:41 -0800516}
Avi Kivity6aa8b732006-12-10 02:21:36 -0800517
Avi Kivity56697682011-04-03 14:08:51 +0300518static u32 desc_limit_scaled(struct desc_struct *desc)
519{
520 u32 limit = get_desc_limit(desc);
521
522 return desc->g ? (limit << 12) | 0xfff : limit;
523}
524
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900525static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300526{
527 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
528 return 0;
529
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900530 return ctxt->ops->get_cached_segment_base(ctxt, seg);
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300531}
532
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200533static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
534 u32 error, bool valid)
Gleb Natapov54b84862010-04-28 19:15:44 +0300535{
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +0200536 WARN_ON(vec > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +0200537 ctxt->exception.vector = vec;
538 ctxt->exception.error_code = error;
539 ctxt->exception.error_code_valid = valid;
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200540 return X86EMUL_PROPAGATE_FAULT;
Gleb Natapov54b84862010-04-28 19:15:44 +0300541}
542
Joerg Roedel3b88e412011-04-04 12:39:29 +0200543static int emulate_db(struct x86_emulate_ctxt *ctxt)
544{
545 return emulate_exception(ctxt, DB_VECTOR, 0, false);
546}
547
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200548static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300549{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200550 return emulate_exception(ctxt, GP_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300551}
552
Avi Kivity618ff152011-04-03 12:32:09 +0300553static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
554{
555 return emulate_exception(ctxt, SS_VECTOR, err, true);
556}
557
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200558static int emulate_ud(struct x86_emulate_ctxt *ctxt)
Gleb Natapov54b84862010-04-28 19:15:44 +0300559{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200560 return emulate_exception(ctxt, UD_VECTOR, 0, false);
Gleb Natapov54b84862010-04-28 19:15:44 +0300561}
562
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200563static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300564{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200565 return emulate_exception(ctxt, TS_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300566}
567
Avi Kivity34d1f492010-08-26 11:59:01 +0300568static int emulate_de(struct x86_emulate_ctxt *ctxt)
569{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200570 return emulate_exception(ctxt, DE_VECTOR, 0, false);
Avi Kivity34d1f492010-08-26 11:59:01 +0300571}
572
Avi Kivity1253791d2011-03-29 11:41:27 +0200573static int emulate_nm(struct x86_emulate_ctxt *ctxt)
574{
575 return emulate_exception(ctxt, NM_VECTOR, 0, false);
576}
577
Avi Kivity1aa36612011-04-27 13:20:30 +0300578static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
579{
580 u16 selector;
581 struct desc_struct desc;
582
583 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
584 return selector;
585}
586
587static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
588 unsigned seg)
589{
590 u16 dummy;
591 u32 base3;
592 struct desc_struct desc;
593
594 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
595 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
596}
597
Avi Kivity1c11b372012-04-09 18:39:59 +0300598/*
599 * x86 defines three classes of vector instructions: explicitly
600 * aligned, explicitly unaligned, and the rest, which change behaviour
601 * depending on whether they're AVX encoded or not.
602 *
603 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
604 * subject to the same check.
605 */
606static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
607{
608 if (likely(size < 16))
609 return false;
610
611 if (ctxt->d & Aligned)
612 return true;
613 else if (ctxt->d & Unaligned)
614 return false;
615 else if (ctxt->d & Avx)
616 return false;
617 else
618 return true;
619}
620
Paolo Bonzinid09155d2014-10-27 14:54:44 +0100621static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
622 struct segmented_address addr,
623 unsigned *max_size, unsigned size,
624 bool write, bool fetch,
Nadav Amitd50eaa12014-11-19 17:43:11 +0200625 enum x86emul_mode mode, ulong *linear)
Avi Kivity52fd8b42011-04-03 12:33:12 +0300626{
Avi Kivity618ff152011-04-03 12:32:09 +0300627 struct desc_struct desc;
628 bool usable;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300629 ulong la;
Avi Kivity618ff152011-04-03 12:32:09 +0300630 u32 lim;
Avi Kivity1aa36612011-04-27 13:20:30 +0300631 u16 sel;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300632
Nadav Amit1c1c35a2014-11-19 17:43:09 +0200633 la = seg_base(ctxt, addr.seg) + addr.ea;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100634 *max_size = 0;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200635 switch (mode) {
Avi Kivity618ff152011-04-03 12:32:09 +0300636 case X86EMUL_MODE_PROT64:
Nadav Amit4be4de72014-09-18 22:39:40 +0300637 if (is_noncanonical_address(la))
Nadav Amitabc7d8a2014-11-19 17:43:12 +0200638 goto bad;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100639
640 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
641 if (size > *max_size)
642 goto bad;
Avi Kivity618ff152011-04-03 12:32:09 +0300643 break;
644 default:
Avi Kivity1aa36612011-04-27 13:20:30 +0300645 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
646 addr.seg);
Avi Kivity618ff152011-04-03 12:32:09 +0300647 if (!usable)
648 goto bad;
Gleb Natapov58b78252012-12-11 15:14:12 +0200649 /* code segment in protected mode or read-only data segment */
650 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
651 || !(desc.type & 2)) && write)
Avi Kivity618ff152011-04-03 12:32:09 +0300652 goto bad;
653 /* unreadable code segment */
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400654 if (!fetch && (desc.type & 8) && !(desc.type & 2))
Avi Kivity618ff152011-04-03 12:32:09 +0300655 goto bad;
656 lim = desc_limit_scaled(&desc);
Paolo Bonzini997b0412014-11-19 18:33:38 +0100657 if (!(desc.type & 8) && (desc.type & 4)) {
Guo Chaofc058682012-06-28 15:19:51 +0800658 /* expand-down segment */
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100659 if (addr.ea <= lim)
Avi Kivity618ff152011-04-03 12:32:09 +0300660 goto bad;
661 lim = desc.d ? 0xffffffff : 0xffff;
Avi Kivity618ff152011-04-03 12:32:09 +0300662 }
Paolo Bonzini997b0412014-11-19 18:33:38 +0100663 if (addr.ea > lim)
664 goto bad;
665 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100666 if (size > *max_size)
667 goto bad;
Nadav Amit31ff6482014-11-19 17:43:13 +0200668 la &= (u32)-1;
Avi Kivity618ff152011-04-03 12:32:09 +0300669 break;
670 }
Avi Kivity1c11b372012-04-09 18:39:59 +0300671 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
672 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300673 *linear = la;
674 return X86EMUL_CONTINUE;
Avi Kivity618ff152011-04-03 12:32:09 +0300675bad:
676 if (addr.seg == VCPU_SREG_SS)
Paolo Bonzini36061892014-10-27 14:40:49 +0100677 return emulate_ss(ctxt, 0);
Avi Kivity618ff152011-04-03 12:32:09 +0300678 else
Paolo Bonzini36061892014-10-27 14:40:49 +0100679 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300680}
681
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400682static int linearize(struct x86_emulate_ctxt *ctxt,
683 struct segmented_address addr,
684 unsigned size, bool write,
685 ulong *linear)
686{
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100687 unsigned max_size;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200688 return __linearize(ctxt, addr, &max_size, size, write, false,
689 ctxt->mode, linear);
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400690}
691
Nadav Amitd50eaa12014-11-19 17:43:11 +0200692static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
693 enum x86emul_mode mode)
694{
695 ulong linear;
696 int rc;
697 unsigned max_size;
698 struct segmented_address addr = { .seg = VCPU_SREG_CS,
699 .ea = dst };
700
701 if (ctxt->op_bytes != sizeof(unsigned long))
702 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
703 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
704 if (rc == X86EMUL_CONTINUE)
705 ctxt->_eip = addr.ea;
706 return rc;
707}
708
709static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
710{
711 return assign_eip(ctxt, dst, ctxt->mode);
712}
713
714static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
715 const struct desc_struct *cs_desc)
716{
717 enum x86emul_mode mode = ctxt->mode;
718
719#ifdef CONFIG_X86_64
720 if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) {
721 u64 efer = 0;
722
723 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
724 if (efer & EFER_LMA)
725 mode = X86EMUL_MODE_PROT64;
726 }
727#endif
728 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
729 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
730 return assign_eip(ctxt, dst, mode);
731}
732
733static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
734{
735 return assign_eip_near(ctxt, ctxt->_eip + rel);
736}
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400737
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200738static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
739 struct segmented_address addr,
740 void *data,
741 unsigned size)
742{
Avi Kivity9fa088f2011-03-31 18:54:30 +0200743 int rc;
744 ulong linear;
745
Avi Kivity83b87952011-04-03 11:31:19 +0300746 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +0200747 if (rc != X86EMUL_CONTINUE)
748 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +0300749 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200750}
751
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900752/*
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200753 * Prefetch the remaining bytes of the instruction without crossing page
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900754 * boundary if they are not in fetch_cache yet.
755 */
Paolo Bonzini9506d572014-05-06 13:05:25 +0200756static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
Avi Kivity62266862007-11-20 13:15:52 +0200757{
Avi Kivity62266862007-11-20 13:15:52 +0200758 int rc;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100759 unsigned size, max_size;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200760 unsigned long linear;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200761 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200762 struct segmented_address addr = { .seg = VCPU_SREG_CS,
Paolo Bonzini17052f12014-05-06 16:33:01 +0200763 .ea = ctxt->eip + cur_size };
764
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100765 /*
766 * We do not know exactly how many bytes will be needed, and
767 * __linearize is expensive, so fetch as much as possible. We
768 * just have to avoid going beyond the 15 byte limit, the end
769 * of the segment, or the end of the page.
770 *
771 * __linearize is called with size 0 so that it does not do any
772 * boundary check itself. Instead, we use max_size to check
773 * against op_size.
774 */
Nadav Amitd50eaa12014-11-19 17:43:11 +0200775 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
776 &linear);
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200777 if (unlikely(rc != X86EMUL_CONTINUE))
778 return rc;
779
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100780 size = min_t(unsigned, 15UL ^ cur_size, max_size);
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200781 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
Paolo Bonzini5cfc7e02014-05-06 13:05:25 +0200782
783 /*
784 * One instruction can only straddle two pages,
785 * and one has been loaded at the beginning of
786 * x86_decode_insn. So, if not enough bytes
787 * still, we must have hit the 15-byte boundary.
788 */
789 if (unlikely(size < op_size))
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100790 return emulate_gp(ctxt, 0);
791
Paolo Bonzini17052f12014-05-06 16:33:01 +0200792 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200793 size, &ctxt->exception);
794 if (unlikely(rc != X86EMUL_CONTINUE))
795 return rc;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200796 ctxt->fetch.end += size;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +0900797 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200798}
799
Paolo Bonzini9506d572014-05-06 13:05:25 +0200800static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
801 unsigned size)
Avi Kivity62266862007-11-20 13:15:52 +0200802{
Nadav Amit08da44a2014-10-03 01:10:04 +0300803 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
804
805 if (unlikely(done_size < size))
806 return __do_insn_fetch_bytes(ctxt, size - done_size);
Paolo Bonzini9506d572014-05-06 13:05:25 +0200807 else
808 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200809}
810
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900811/* Fetch next part of the instruction being emulated. */
Takuya Yoshikawae85a1082011-07-30 18:01:26 +0900812#define insn_fetch(_type, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200813({ _type _x; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200814 \
815 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900816 if (rc != X86EMUL_CONTINUE) \
817 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200818 ctxt->_eip += sizeof(_type); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200819 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
820 ctxt->fetch.ptr += sizeof(_type); \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200821 _x; \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900822})
823
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900824#define insn_fetch_arr(_arr, _size, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200825({ \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200826 rc = do_insn_fetch_bytes(_ctxt, _size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900827 if (rc != X86EMUL_CONTINUE) \
828 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200829 ctxt->_eip += (_size); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200830 memcpy(_arr, ctxt->fetch.ptr, _size); \
831 ctxt->fetch.ptr += (_size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900832})
833
Rusty Russell1e3c5cb2007-07-17 23:16:11 +1000834/*
835 * Given the 'reg' portion of a ModRM byte, and a register block, return a
836 * pointer into the block that addresses the relevant register.
837 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
838 */
Avi Kivitydd856ef2012-08-27 23:46:17 +0300839static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200840 int byteop)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800841{
842 void *p;
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200843 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800844
Avi Kivity6aa8b732006-12-10 02:21:36 -0800845 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
Avi Kivitydd856ef2012-08-27 23:46:17 +0300846 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
847 else
848 p = reg_rmw(ctxt, modrm_reg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800849 return p;
850}
851
852static int read_descriptor(struct x86_emulate_ctxt *ctxt,
Avi Kivity90de84f2010-11-17 15:28:21 +0200853 struct segmented_address addr,
Avi Kivity6aa8b732006-12-10 02:21:36 -0800854 u16 *size, unsigned long *address, int op_bytes)
855{
856 int rc;
857
858 if (op_bytes == 2)
859 op_bytes = 3;
860 *address = 0;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200861 rc = segmented_read_std(ctxt, addr, size, 2);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +0900862 if (rc != X86EMUL_CONTINUE)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800863 return rc;
Avi Kivity30b31ab2010-11-17 15:28:22 +0200864 addr.ea += 2;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200865 rc = segmented_read_std(ctxt, addr, address, op_bytes);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800866 return rc;
867}
868
Avi Kivity34b77652013-01-19 19:51:56 +0200869FASTOP2(add);
870FASTOP2(or);
871FASTOP2(adc);
872FASTOP2(sbb);
873FASTOP2(and);
874FASTOP2(sub);
875FASTOP2(xor);
876FASTOP2(cmp);
877FASTOP2(test);
878
Avi Kivityb9fa4092013-02-09 11:31:48 +0200879FASTOP1SRC2(mul, mul_ex);
880FASTOP1SRC2(imul, imul_ex);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200881FASTOP1SRC2EX(div, div_ex);
882FASTOP1SRC2EX(idiv, idiv_ex);
Avi Kivityb9fa4092013-02-09 11:31:48 +0200883
Avi Kivity34b77652013-01-19 19:51:56 +0200884FASTOP3WCL(shld);
885FASTOP3WCL(shrd);
886
887FASTOP2W(imul);
888
889FASTOP1(not);
890FASTOP1(neg);
891FASTOP1(inc);
892FASTOP1(dec);
893
894FASTOP2CL(rol);
895FASTOP2CL(ror);
896FASTOP2CL(rcl);
897FASTOP2CL(rcr);
898FASTOP2CL(shl);
899FASTOP2CL(shr);
900FASTOP2CL(sar);
901
902FASTOP2W(bsf);
903FASTOP2W(bsr);
904FASTOP2W(bt);
905FASTOP2W(bts);
906FASTOP2W(btr);
907FASTOP2W(btc);
908
Avi Kivitye47a5f52013-02-09 11:31:51 +0200909FASTOP2(xadd);
910
Nadav Amit5aca3722014-11-02 11:54:50 +0200911FASTOP2R(cmp, cmp_r);
912
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200913static u8 test_cc(unsigned int condition, unsigned long flags)
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300914{
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200915 u8 rc;
916 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300917
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200918 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivity3f0c3d02013-01-26 23:56:04 +0200919 asm("push %[flags]; popf; call *%[fastop]"
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200920 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
921 return rc;
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300922}
923
Avi Kivity91ff3cb2010-08-01 12:53:09 +0300924static void fetch_register_operand(struct operand *op)
925{
926 switch (op->bytes) {
927 case 1:
928 op->val = *(u8 *)op->addr.reg;
929 break;
930 case 2:
931 op->val = *(u16 *)op->addr.reg;
932 break;
933 case 4:
934 op->val = *(u32 *)op->addr.reg;
935 break;
936 case 8:
937 op->val = *(u64 *)op->addr.reg;
938 break;
939 }
940}
941
Avi Kivity1253791d2011-03-29 11:41:27 +0200942static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
943{
944 ctxt->ops->get_fpu(ctxt);
945 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200946 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
947 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
948 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
949 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
950 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
951 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
952 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
953 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +0200954#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +0200955 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
956 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
957 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
958 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
959 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
960 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
961 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
962 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +0200963#endif
964 default: BUG();
965 }
966 ctxt->ops->put_fpu(ctxt);
967}
968
969static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
970 int reg)
971{
972 ctxt->ops->get_fpu(ctxt);
973 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200974 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
975 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
976 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
977 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
978 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
979 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
980 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
981 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +0200982#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +0200983 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
984 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
985 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
986 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
987 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
988 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
989 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
990 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +0200991#endif
992 default: BUG();
993 }
994 ctxt->ops->put_fpu(ctxt);
995}
996
Avi Kivitycbe2c9d2012-04-09 18:40:02 +0300997static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
998{
999 ctxt->ops->get_fpu(ctxt);
1000 switch (reg) {
1001 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1002 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1003 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1004 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1005 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1006 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1007 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1008 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1009 default: BUG();
1010 }
1011 ctxt->ops->put_fpu(ctxt);
1012}
1013
1014static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1015{
1016 ctxt->ops->get_fpu(ctxt);
1017 switch (reg) {
1018 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1019 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1020 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1021 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1022 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1023 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1024 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1025 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1026 default: BUG();
1027 }
1028 ctxt->ops->put_fpu(ctxt);
1029}
1030
Gleb Natapov045a2822012-12-20 16:57:43 +02001031static int em_fninit(struct x86_emulate_ctxt *ctxt)
1032{
1033 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1034 return emulate_nm(ctxt);
1035
1036 ctxt->ops->get_fpu(ctxt);
1037 asm volatile("fninit");
1038 ctxt->ops->put_fpu(ctxt);
1039 return X86EMUL_CONTINUE;
1040}
1041
1042static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1043{
1044 u16 fcw;
1045
1046 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1047 return emulate_nm(ctxt);
1048
1049 ctxt->ops->get_fpu(ctxt);
1050 asm volatile("fnstcw %0": "+m"(fcw));
1051 ctxt->ops->put_fpu(ctxt);
1052
1053 /* force 2 byte destination */
1054 ctxt->dst.bytes = 2;
1055 ctxt->dst.val = fcw;
1056
1057 return X86EMUL_CONTINUE;
1058}
1059
1060static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1061{
1062 u16 fsw;
1063
1064 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1065 return emulate_nm(ctxt);
1066
1067 ctxt->ops->get_fpu(ctxt);
1068 asm volatile("fnstsw %0": "+m"(fsw));
1069 ctxt->ops->put_fpu(ctxt);
1070
1071 /* force 2 byte destination */
1072 ctxt->dst.bytes = 2;
1073 ctxt->dst.val = fsw;
1074
1075 return X86EMUL_CONTINUE;
1076}
1077
Avi Kivity1253791d2011-03-29 11:41:27 +02001078static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
Avi Kivity2adb5ad2012-01-16 15:08:45 +02001079 struct operand *op)
Avi Kivity3c118e22007-10-31 10:27:04 +02001080{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001081 unsigned reg = ctxt->modrm_reg;
Avi Kivity33615aa2007-10-31 11:15:56 +02001082
Avi Kivity9dac77f2011-06-01 15:34:25 +03001083 if (!(ctxt->d & ModRM))
1084 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
Avi Kivity1253791d2011-03-29 11:41:27 +02001085
Avi Kivity9dac77f2011-06-01 15:34:25 +03001086 if (ctxt->d & Sse) {
Avi Kivity1253791d2011-03-29 11:41:27 +02001087 op->type = OP_XMM;
1088 op->bytes = 16;
1089 op->addr.xmm = reg;
1090 read_sse_reg(ctxt, &op->vec_val, reg);
1091 return;
1092 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001093 if (ctxt->d & Mmx) {
1094 reg &= 7;
1095 op->type = OP_MM;
1096 op->bytes = 8;
1097 op->addr.mm = reg;
1098 return;
1099 }
Avi Kivity1253791d2011-03-29 11:41:27 +02001100
Avi Kivity3c118e22007-10-31 10:27:04 +02001101 op->type = OP_REG;
Gleb Natapov6d4d85e2013-11-04 15:52:42 +02001102 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1103 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1104
Avi Kivity91ff3cb2010-08-01 12:53:09 +03001105 fetch_register_operand(op);
Avi Kivity3c118e22007-10-31 10:27:04 +02001106 op->orig_val = op->val;
1107}
1108
Avi Kivitya6e34072012-06-10 17:15:39 +03001109static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1110{
1111 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1112 ctxt->modrm_seg = VCPU_SREG_SS;
1113}
1114
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001115static int decode_modrm(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001116 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001117{
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001118 u8 sib;
Bandan Das02357bd2014-04-16 12:46:11 -04001119 int index_reg, base_reg, scale;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001120 int rc = X86EMUL_CONTINUE;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001121 ulong modrm_ea = 0;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001122
Bandan Das02357bd2014-04-16 12:46:11 -04001123 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1124 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1125 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001126
Bandan Das02357bd2014-04-16 12:46:11 -04001127 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001128 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
Bandan Das02357bd2014-04-16 12:46:11 -04001129 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001130 ctxt->modrm_seg = VCPU_SREG_DS;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001131
Nadav Amit9b88ae92014-05-25 23:05:21 +03001132 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001133 op->type = OP_REG;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001134 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Paolo Bonzini8acb42072013-05-30 16:35:55 +02001135 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02001136 ctxt->d & ByteOp);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001137 if (ctxt->d & Sse) {
Avi Kivity1253791d2011-03-29 11:41:27 +02001138 op->type = OP_XMM;
1139 op->bytes = 16;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001140 op->addr.xmm = ctxt->modrm_rm;
1141 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
Avi Kivity1253791d2011-03-29 11:41:27 +02001142 return rc;
1143 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001144 if (ctxt->d & Mmx) {
1145 op->type = OP_MM;
1146 op->bytes = 8;
Paolo Bonzinibdc90722014-05-06 14:03:29 +02001147 op->addr.mm = ctxt->modrm_rm & 7;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001148 return rc;
1149 }
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001150 fetch_register_operand(op);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001151 return rc;
1152 }
1153
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001154 op->type = OP_MEM;
1155
Avi Kivity9dac77f2011-06-01 15:34:25 +03001156 if (ctxt->ad_bytes == 2) {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001157 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1158 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1159 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1160 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001161
1162 /* 16-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001163 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001164 case 0:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001165 if (ctxt->modrm_rm == 6)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001166 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001167 break;
1168 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001169 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001170 break;
1171 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001172 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001173 break;
1174 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001175 switch (ctxt->modrm_rm) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001176 case 0:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001177 modrm_ea += bx + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001178 break;
1179 case 1:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001180 modrm_ea += bx + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001181 break;
1182 case 2:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001183 modrm_ea += bp + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001184 break;
1185 case 3:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001186 modrm_ea += bp + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001187 break;
1188 case 4:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001189 modrm_ea += si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001190 break;
1191 case 5:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001192 modrm_ea += di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001193 break;
1194 case 6:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001195 if (ctxt->modrm_mod != 0)
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001196 modrm_ea += bp;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001197 break;
1198 case 7:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001199 modrm_ea += bx;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001200 break;
1201 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001202 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1203 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1204 ctxt->modrm_seg = VCPU_SREG_SS;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001205 modrm_ea = (u16)modrm_ea;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001206 } else {
1207 /* 32/64-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001208 if ((ctxt->modrm_rm & 7) == 4) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001209 sib = insn_fetch(u8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001210 index_reg |= (sib >> 3) & 7;
1211 base_reg |= sib & 7;
1212 scale = sib >> 6;
1213
Avi Kivity9dac77f2011-06-01 15:34:25 +03001214 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001215 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivitya6e34072012-06-10 17:15:39 +03001216 else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001217 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001218 adjust_modrm_seg(ctxt, base_reg);
1219 }
Avi Kivitydc71d0f2008-06-15 21:23:17 -07001220 if (index_reg != 4)
Avi Kivitydd856ef2012-08-27 23:46:17 +03001221 modrm_ea += reg_read(ctxt, index_reg) << scale;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001222 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
Nadav Amit5b38ab82014-11-02 11:54:41 +02001223 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity84411d82008-06-15 21:53:26 -07001224 if (ctxt->mode == X86EMUL_MODE_PROT64)
Avi Kivity9dac77f2011-06-01 15:34:25 +03001225 ctxt->rip_relative = 1;
Avi Kivitya6e34072012-06-10 17:15:39 +03001226 } else {
1227 base_reg = ctxt->modrm_rm;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001228 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001229 adjust_modrm_seg(ctxt, base_reg);
1230 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001231 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001232 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001233 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001234 break;
1235 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001236 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001237 break;
1238 }
1239 }
Avi Kivity90de84f2010-11-17 15:28:21 +02001240 op->addr.mem.ea = modrm_ea;
Bandan Das41061cd2014-04-16 12:46:14 -04001241 if (ctxt->ad_bytes != 8)
1242 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1243
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001244done:
1245 return rc;
1246}
1247
1248static int decode_abs(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001249 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001250{
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001251 int rc = X86EMUL_CONTINUE;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001252
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001253 op->type = OP_MEM;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001254 switch (ctxt->ad_bytes) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001255 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001256 op->addr.mem.ea = insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001257 break;
1258 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001259 op->addr.mem.ea = insn_fetch(u32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001260 break;
1261 case 8:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001262 op->addr.mem.ea = insn_fetch(u64, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001263 break;
1264 }
1265done:
1266 return rc;
1267}
1268
Avi Kivity9dac77f2011-06-01 15:34:25 +03001269static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
Wei Yongjun35c843c2010-08-09 11:34:56 +08001270{
Sheng Yang7129eec2010-09-28 16:33:32 +08001271 long sv = 0, mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001272
Avi Kivity9dac77f2011-06-01 15:34:25 +03001273 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
Nadav Amit7dec5602014-06-15 16:12:57 +03001274 mask = ~((long)ctxt->dst.bytes * 8 - 1);
Wei Yongjun35c843c2010-08-09 11:34:56 +08001275
Avi Kivity9dac77f2011-06-01 15:34:25 +03001276 if (ctxt->src.bytes == 2)
1277 sv = (s16)ctxt->src.val & (s16)mask;
1278 else if (ctxt->src.bytes == 4)
1279 sv = (s32)ctxt->src.val & (s32)mask;
Nadav Amit7dec5602014-06-15 16:12:57 +03001280 else
1281 sv = (s64)ctxt->src.val & (s64)mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001282
Nadav Amit1c1c35a2014-11-19 17:43:09 +02001283 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1284 ctxt->dst.addr.mem.ea + (sv >> 3));
Wei Yongjun35c843c2010-08-09 11:34:56 +08001285 }
Wei Yongjunba7ff2b2010-08-09 11:39:14 +08001286
1287 /* only subword offset */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001288 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001289}
1290
Gleb Natapov9de41572010-04-28 19:15:22 +03001291static int read_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov9de41572010-04-28 19:15:22 +03001292 unsigned long addr, void *dest, unsigned size)
1293{
1294 int rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001295 struct read_cache *mc = &ctxt->mem_read;
Gleb Natapov9de41572010-04-28 19:15:22 +03001296
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001297 if (mc->pos < mc->end)
1298 goto read_cached;
Gleb Natapov9de41572010-04-28 19:15:22 +03001299
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001300 WARN_ON((mc->end + size) >= sizeof(mc->data));
Gleb Natapov9de41572010-04-28 19:15:22 +03001301
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001302 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1303 &ctxt->exception);
1304 if (rc != X86EMUL_CONTINUE)
1305 return rc;
1306
1307 mc->end += size;
1308
1309read_cached:
1310 memcpy(dest, mc->data + mc->pos, size);
1311 mc->pos += size;
Gleb Natapov9de41572010-04-28 19:15:22 +03001312 return X86EMUL_CONTINUE;
1313}
1314
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001315static int segmented_read(struct x86_emulate_ctxt *ctxt,
1316 struct segmented_address addr,
1317 void *data,
1318 unsigned size)
1319{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001320 int rc;
1321 ulong linear;
1322
Avi Kivity83b87952011-04-03 11:31:19 +03001323 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001324 if (rc != X86EMUL_CONTINUE)
1325 return rc;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001326 return read_emulated(ctxt, linear, data, size);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001327}
1328
1329static int segmented_write(struct x86_emulate_ctxt *ctxt,
1330 struct segmented_address addr,
1331 const void *data,
1332 unsigned size)
1333{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001334 int rc;
1335 ulong linear;
1336
Avi Kivity83b87952011-04-03 11:31:19 +03001337 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001338 if (rc != X86EMUL_CONTINUE)
1339 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001340 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1341 &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001342}
1343
1344static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1345 struct segmented_address addr,
1346 const void *orig_data, const void *data,
1347 unsigned size)
1348{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001349 int rc;
1350 ulong linear;
1351
Avi Kivity83b87952011-04-03 11:31:19 +03001352 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001353 if (rc != X86EMUL_CONTINUE)
1354 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001355 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1356 size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001357}
1358
Gleb Natapov7b262e92010-03-18 15:20:27 +02001359static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov7b262e92010-03-18 15:20:27 +02001360 unsigned int size, unsigned short port,
1361 void *dest)
1362{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001363 struct read_cache *rc = &ctxt->io_read;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001364
1365 if (rc->pos == rc->end) { /* refill pio read ahead */
Gleb Natapov7b262e92010-03-18 15:20:27 +02001366 unsigned int in_page, n;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001367 unsigned int count = ctxt->rep_prefix ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001368 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001369 in_page = (ctxt->eflags & EFLG_DF) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001370 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1371 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
Mark Rustadb55a8142014-07-25 06:27:05 -07001372 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
Gleb Natapov7b262e92010-03-18 15:20:27 +02001373 if (n == 0)
1374 n = 1;
1375 rc->pos = rc->end = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001376 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
Gleb Natapov7b262e92010-03-18 15:20:27 +02001377 return 0;
1378 rc->end = n * size;
1379 }
1380
Nadav Amite6e39f02014-04-18 03:35:10 +03001381 if (ctxt->rep_prefix && (ctxt->d & String) &&
1382 !(ctxt->eflags & EFLG_DF)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001383 ctxt->dst.data = rc->data + rc->pos;
1384 ctxt->dst.type = OP_MEM_STR;
1385 ctxt->dst.count = (rc->end - rc->pos) / size;
1386 rc->pos = rc->end;
1387 } else {
1388 memcpy(dest, rc->data + rc->pos, size);
1389 rc->pos += size;
1390 }
Gleb Natapov7b262e92010-03-18 15:20:27 +02001391 return 1;
1392}
1393
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01001394static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1395 u16 index, struct desc_struct *desc)
1396{
1397 struct desc_ptr dt;
1398 ulong addr;
1399
1400 ctxt->ops->get_idt(ctxt, &dt);
1401
1402 if (dt.size < index * 8 + 7)
1403 return emulate_gp(ctxt, index << 3 | 0x2);
1404
1405 addr = dt.address + index * 8;
1406 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1407 &ctxt->exception);
1408}
1409
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001410static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001411 u16 selector, struct desc_ptr *dt)
1412{
Mathias Krause0225fb52012-08-30 01:30:16 +02001413 const struct x86_emulate_ops *ops = ctxt->ops;
Nadav Amit2eedcac2014-06-02 18:34:05 +03001414 u32 base3 = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001415
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001416 if (selector & 1 << 2) {
1417 struct desc_struct desc;
Avi Kivity1aa36612011-04-27 13:20:30 +03001418 u16 sel;
1419
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001420 memset (dt, 0, sizeof *dt);
Nadav Amit2eedcac2014-06-02 18:34:05 +03001421 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1422 VCPU_SREG_LDTR))
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001423 return;
1424
1425 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
Nadav Amit2eedcac2014-06-02 18:34:05 +03001426 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001427 } else
Avi Kivity4bff1e862011-04-20 13:37:53 +03001428 ops->get_gdt(ctxt, dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001429}
1430
1431/* allowed just for 8 bytes segments */
1432static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Avi Kivitye9194642012-06-13 16:29:39 +03001433 u16 selector, struct desc_struct *desc,
1434 ulong *desc_addr_p)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001435{
1436 struct desc_ptr dt;
1437 u16 index = selector >> 3;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001438 ulong addr;
1439
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001440 get_descriptor_table_ptr(ctxt, selector, &dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001441
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001442 if (dt.size < index * 8 + 7)
1443 return emulate_gp(ctxt, selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001444
Avi Kivitye9194642012-06-13 16:29:39 +03001445 *desc_addr_p = addr = dt.address + index * 8;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001446 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1447 &ctxt->exception);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001448}
1449
1450/* allowed just for 8 bytes segments */
1451static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001452 u16 selector, struct desc_struct *desc)
1453{
1454 struct desc_ptr dt;
1455 u16 index = selector >> 3;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001456 ulong addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001457
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001458 get_descriptor_table_ptr(ctxt, selector, &dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001459
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001460 if (dt.size < index * 8 + 7)
1461 return emulate_gp(ctxt, selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001462
1463 addr = dt.address + index * 8;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001464 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1465 &ctxt->exception);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001466}
1467
Gleb Natapov5601d052011-03-07 14:55:06 +02001468/* Does not support long mode */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001469static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Nadav Amitd1442d82014-09-18 22:39:39 +03001470 u16 selector, int seg, u8 cpl,
1471 bool in_task_switch,
1472 struct desc_struct *desc)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001473{
Avi Kivity869be992012-06-13 16:30:53 +03001474 struct desc_struct seg_desc, old_desc;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001475 u8 dpl, rpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001476 unsigned err_vec = GP_VECTOR;
1477 u32 err_code = 0;
1478 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
Avi Kivitye9194642012-06-13 16:29:39 +03001479 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001480 int ret;
Avi Kivity03ebebe2012-08-21 17:07:04 +03001481 u16 dummy;
Nadav Amite37a75a2014-06-02 18:34:04 +03001482 u32 base3 = 0;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001483
1484 memset(&seg_desc, 0, sizeof seg_desc);
1485
Kevin Wolff8da94e2013-04-11 14:06:03 +02001486 if (ctxt->mode == X86EMUL_MODE_REAL) {
1487 /* set real mode segment descriptor (keep limit etc. for
1488 * unreal mode) */
Avi Kivity03ebebe2012-08-21 17:07:04 +03001489 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001490 set_desc_base(&seg_desc, selector << 4);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001491 goto load;
Kevin Wolff8da94e2013-04-11 14:06:03 +02001492 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1493 /* VM86 needs a clean new segment descriptor */
1494 set_desc_base(&seg_desc, selector << 4);
1495 set_desc_limit(&seg_desc, 0xffff);
1496 seg_desc.type = 3;
1497 seg_desc.p = 1;
1498 seg_desc.s = 1;
1499 seg_desc.dpl = 3;
1500 goto load;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001501 }
1502
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001503 rpl = selector & 3;
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001504
1505 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1506 if ((seg == VCPU_SREG_CS
1507 || (seg == VCPU_SREG_SS
1508 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1509 || seg == VCPU_SREG_TR)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001510 && null_selector)
1511 goto exception;
1512
1513 /* TR should be in GDT only */
1514 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1515 goto exception;
1516
1517 if (null_selector) /* for NULL selector skip all following checks */
1518 goto load;
1519
Avi Kivitye9194642012-06-13 16:29:39 +03001520 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001521 if (ret != X86EMUL_CONTINUE)
1522 return ret;
1523
1524 err_code = selector & 0xfffc;
Paolo Bonzini15fc0752014-08-18 13:17:00 +02001525 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001526
Guo Chaofc058682012-06-28 15:19:51 +08001527 /* can't load system descriptor into segment selector */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001528 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1529 goto exception;
1530
1531 if (!seg_desc.p) {
1532 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1533 goto exception;
1534 }
1535
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001536 dpl = seg_desc.dpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001537
1538 switch (seg) {
1539 case VCPU_SREG_SS:
1540 /*
1541 * segment is not a writable data segment or segment
1542 * selector's RPL != CPL or segment selector's RPL != CPL
1543 */
1544 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1545 goto exception;
1546 break;
1547 case VCPU_SREG_CS:
1548 if (!(seg_desc.type & 8))
1549 goto exception;
1550
1551 if (seg_desc.type & 4) {
1552 /* conforming */
1553 if (dpl > cpl)
1554 goto exception;
1555 } else {
1556 /* nonconforming */
1557 if (rpl > cpl || dpl != cpl)
1558 goto exception;
1559 }
Nadav Amit040c8dc2014-09-18 22:39:43 +03001560 /* in long-mode d/b must be clear if l is set */
1561 if (seg_desc.d && seg_desc.l) {
1562 u64 efer = 0;
1563
1564 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1565 if (efer & EFER_LMA)
1566 goto exception;
1567 }
1568
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001569 /* CS(RPL) <- CPL */
1570 selector = (selector & 0xfffc) | cpl;
1571 break;
1572 case VCPU_SREG_TR:
1573 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1574 goto exception;
Avi Kivity869be992012-06-13 16:30:53 +03001575 old_desc = seg_desc;
1576 seg_desc.type |= 2; /* busy */
1577 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1578 sizeof(seg_desc), &ctxt->exception);
1579 if (ret != X86EMUL_CONTINUE)
1580 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001581 break;
1582 case VCPU_SREG_LDTR:
1583 if (seg_desc.s || seg_desc.type != 2)
1584 goto exception;
1585 break;
1586 default: /* DS, ES, FS, or GS */
1587 /*
1588 * segment is not a data or readable code segment or
1589 * ((segment is a data or nonconforming code segment)
1590 * and (both RPL and CPL > DPL))
1591 */
1592 if ((seg_desc.type & 0xa) == 0x8 ||
1593 (((seg_desc.type & 0xc) != 0xc) &&
1594 (rpl > dpl && cpl > dpl)))
1595 goto exception;
1596 break;
1597 }
1598
1599 if (seg_desc.s) {
1600 /* mark segment as accessed */
1601 seg_desc.type |= 1;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001602 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001603 if (ret != X86EMUL_CONTINUE)
1604 return ret;
Nadav Amite37a75a2014-06-02 18:34:04 +03001605 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1606 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1607 sizeof(base3), &ctxt->exception);
1608 if (ret != X86EMUL_CONTINUE)
1609 return ret;
Nadav Amit9a9abf62014-11-02 11:54:56 +02001610 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1611 ((u64)base3 << 32)))
1612 return emulate_gp(ctxt, 0);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001613 }
1614load:
Nadav Amite37a75a2014-06-02 18:34:04 +03001615 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
Nadav Amitd1442d82014-09-18 22:39:39 +03001616 if (desc)
1617 *desc = seg_desc;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001618 return X86EMUL_CONTINUE;
1619exception:
Paolo Bonzini592f0852014-08-20 10:05:08 +02001620 return emulate_exception(ctxt, err_vec, err_code, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001621}
1622
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001623static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1624 u16 selector, int seg)
1625{
1626 u8 cpl = ctxt->ops->cpl(ctxt);
Nadav Amitd1442d82014-09-18 22:39:39 +03001627 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001628}
1629
Wei Yongjun31be40b2010-08-17 09:17:30 +08001630static void write_register_operand(struct operand *op)
1631{
1632 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1633 switch (op->bytes) {
1634 case 1:
1635 *(u8 *)op->addr.reg = (u8)op->val;
1636 break;
1637 case 2:
1638 *(u16 *)op->addr.reg = (u16)op->val;
1639 break;
1640 case 4:
1641 *op->addr.reg = (u32)op->val;
1642 break; /* 64b: zero-extend */
1643 case 8:
1644 *op->addr.reg = op->val;
1645 break;
1646 }
1647}
1648
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001649static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
Wei Yongjunc37eda12010-06-15 09:03:33 +08001650{
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001651 switch (op->type) {
Wei Yongjunc37eda12010-06-15 09:03:33 +08001652 case OP_REG:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001653 write_register_operand(op);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001654 break;
1655 case OP_MEM:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001656 if (ctxt->lock_prefix)
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001657 return segmented_cmpxchg(ctxt,
1658 op->addr.mem,
1659 &op->orig_val,
1660 &op->val,
1661 op->bytes);
1662 else
1663 return segmented_write(ctxt,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001664 op->addr.mem,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001665 &op->val,
1666 op->bytes);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001667 break;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001668 case OP_MEM_STR:
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001669 return segmented_write(ctxt,
1670 op->addr.mem,
1671 op->data,
1672 op->bytes * op->count);
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001673 break;
Avi Kivity1253791d2011-03-29 11:41:27 +02001674 case OP_XMM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001675 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
Avi Kivity1253791d2011-03-29 11:41:27 +02001676 break;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001677 case OP_MM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001678 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001679 break;
Wei Yongjunc37eda12010-06-15 09:03:33 +08001680 case OP_NONE:
1681 /* no writeback */
1682 break;
1683 default:
1684 break;
1685 }
1686 return X86EMUL_CONTINUE;
1687}
1688
Avi Kivity51ddff52012-06-12 20:19:40 +03001689static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001690{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001691 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001692
Avi Kivity5ad105e2012-08-19 14:34:31 +03001693 rsp_increment(ctxt, -bytes);
Avi Kivitydd856ef2012-08-27 23:46:17 +03001694 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001695 addr.seg = VCPU_SREG_SS;
1696
Avi Kivity51ddff52012-06-12 20:19:40 +03001697 return segmented_write(ctxt, addr, data, bytes);
1698}
1699
1700static int em_push(struct x86_emulate_ctxt *ctxt)
1701{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001702 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001703 ctxt->dst.type = OP_NONE;
Avi Kivity51ddff52012-06-12 20:19:40 +03001704 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001705}
1706
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001707static int emulate_pop(struct x86_emulate_ctxt *ctxt,
Avi Kivity350f69d2009-01-05 11:12:40 +02001708 void *dest, int len)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001709{
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001710 int rc;
Avi Kivity90de84f2010-11-17 15:28:21 +02001711 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001712
Avi Kivitydd856ef2012-08-27 23:46:17 +03001713 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Avi Kivity90de84f2010-11-17 15:28:21 +02001714 addr.seg = VCPU_SREG_SS;
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001715 rc = segmented_read(ctxt, addr, dest, len);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09001716 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001717 return rc;
1718
Avi Kivity5ad105e2012-08-19 14:34:31 +03001719 rsp_increment(ctxt, len);
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001720 return rc;
1721}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001722
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001723static int em_pop(struct x86_emulate_ctxt *ctxt)
1724{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001725 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001726}
1727
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001728static int emulate_popf(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001729 void *dest, int len)
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001730{
1731 int rc;
1732 unsigned long val, change_mask;
1733 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001734 int cpl = ctxt->ops->cpl(ctxt);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001735
Takuya Yoshikawa3b9be3b2011-05-02 02:27:55 +09001736 rc = emulate_pop(ctxt, &val, len);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001737 if (rc != X86EMUL_CONTINUE)
1738 return rc;
1739
1740 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
Nadav Amit163b1352014-07-21 14:37:28 +03001741 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001742
1743 switch(ctxt->mode) {
1744 case X86EMUL_MODE_PROT64:
1745 case X86EMUL_MODE_PROT32:
1746 case X86EMUL_MODE_PROT16:
1747 if (cpl == 0)
1748 change_mask |= EFLG_IOPL;
1749 if (cpl <= iopl)
1750 change_mask |= EFLG_IF;
1751 break;
1752 case X86EMUL_MODE_VM86:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001753 if (iopl < 3)
1754 return emulate_gp(ctxt, 0);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001755 change_mask |= EFLG_IF;
1756 break;
1757 default: /* real mode */
1758 change_mask |= (EFLG_IOPL | EFLG_IF);
1759 break;
1760 }
1761
1762 *(unsigned long *)dest =
1763 (ctxt->eflags & ~change_mask) | (val & change_mask);
1764
1765 return rc;
1766}
1767
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001768static int em_popf(struct x86_emulate_ctxt *ctxt)
1769{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001770 ctxt->dst.type = OP_REG;
1771 ctxt->dst.addr.reg = &ctxt->eflags;
1772 ctxt->dst.bytes = ctxt->op_bytes;
1773 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001774}
1775
Avi Kivity612e89f2012-06-12 20:03:23 +03001776static int em_enter(struct x86_emulate_ctxt *ctxt)
1777{
1778 int rc;
1779 unsigned frame_size = ctxt->src.val;
1780 unsigned nesting_level = ctxt->src2.val & 31;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001781 ulong rbp;
Avi Kivity612e89f2012-06-12 20:03:23 +03001782
1783 if (nesting_level)
1784 return X86EMUL_UNHANDLEABLE;
1785
Avi Kivitydd856ef2012-08-27 23:46:17 +03001786 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1787 rc = push(ctxt, &rbp, stack_size(ctxt));
Avi Kivity612e89f2012-06-12 20:03:23 +03001788 if (rc != X86EMUL_CONTINUE)
1789 return rc;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001790 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
Avi Kivity612e89f2012-06-12 20:03:23 +03001791 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001792 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1793 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
Avi Kivity612e89f2012-06-12 20:03:23 +03001794 stack_mask(ctxt));
1795 return X86EMUL_CONTINUE;
1796}
1797
Avi Kivityf47cfa32012-06-07 17:49:24 +03001798static int em_leave(struct x86_emulate_ctxt *ctxt)
1799{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001800 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
Avi Kivityf47cfa32012-06-07 17:49:24 +03001801 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001802 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
Avi Kivityf47cfa32012-06-07 17:49:24 +03001803}
1804
Avi Kivity1cd196e2011-09-13 10:45:51 +03001805static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001806{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001807 int seg = ctxt->src2.val;
1808
Avi Kivity9dac77f2011-06-01 15:34:25 +03001809 ctxt->src.val = get_segment_selector(ctxt, seg);
Nadav Amit0fcc2072014-11-02 11:54:51 +02001810 if (ctxt->op_bytes == 4) {
1811 rsp_increment(ctxt, -2);
1812 ctxt->op_bytes = 2;
1813 }
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001814
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001815 return em_push(ctxt);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001816}
1817
Avi Kivity1cd196e2011-09-13 10:45:51 +03001818static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001819{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001820 int seg = ctxt->src2.val;
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001821 unsigned long selector;
1822 int rc;
1823
Avi Kivity9dac77f2011-06-01 15:34:25 +03001824 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001825 if (rc != X86EMUL_CONTINUE)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001826 return rc;
1827
Paolo Bonzinia5457e72014-06-05 17:29:34 +02001828 if (ctxt->modrm_reg == VCPU_SREG_SS)
1829 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1830
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001831 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001832 return rc;
1833}
1834
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001835static int em_pusha(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001836{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001837 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001838 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001839 int reg = VCPU_REGS_RAX;
1840
1841 while (reg <= VCPU_REGS_RDI) {
1842 (reg == VCPU_REGS_RSP) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001843 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001844
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001845 rc = em_push(ctxt);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001846 if (rc != X86EMUL_CONTINUE)
1847 return rc;
1848
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001849 ++reg;
1850 }
Wei Yongjunc37eda12010-06-15 09:03:33 +08001851
Wei Yongjunc37eda12010-06-15 09:03:33 +08001852 return rc;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001853}
1854
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001855static int em_pushf(struct x86_emulate_ctxt *ctxt)
1856{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001857 ctxt->src.val = (unsigned long)ctxt->eflags;
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001858 return em_push(ctxt);
1859}
1860
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001861static int em_popa(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001862{
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001863 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001864 int reg = VCPU_REGS_RDI;
1865
1866 while (reg >= VCPU_REGS_RAX) {
1867 if (reg == VCPU_REGS_RSP) {
Avi Kivity5ad105e2012-08-19 14:34:31 +03001868 rsp_increment(ctxt, ctxt->op_bytes);
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001869 --reg;
1870 }
1871
Avi Kivitydd856ef2012-08-27 23:46:17 +03001872 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001873 if (rc != X86EMUL_CONTINUE)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001874 break;
1875 --reg;
1876 }
1877 return rc;
1878}
1879
Avi Kivitydd856ef2012-08-27 23:46:17 +03001880static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001881{
Mathias Krause0225fb52012-08-30 01:30:16 +02001882 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001883 int rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001884 struct desc_ptr dt;
1885 gva_t cs_addr;
1886 gva_t eip_addr;
1887 u16 cs, eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001888
1889 /* TODO: Add limit checks */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001890 ctxt->src.val = ctxt->eflags;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001891 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001892 if (rc != X86EMUL_CONTINUE)
1893 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001894
1895 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1896
Avi Kivity9dac77f2011-06-01 15:34:25 +03001897 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001898 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001899 if (rc != X86EMUL_CONTINUE)
1900 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001901
Avi Kivity9dac77f2011-06-01 15:34:25 +03001902 ctxt->src.val = ctxt->_eip;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001903 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001904 if (rc != X86EMUL_CONTINUE)
1905 return rc;
1906
Avi Kivity4bff1e862011-04-20 13:37:53 +03001907 ops->get_idt(ctxt, &dt);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001908
1909 eip_addr = dt.address + (irq << 2);
1910 cs_addr = dt.address + (irq << 2) + 2;
1911
Avi Kivity0f65dd72011-04-20 13:37:53 +03001912 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001913 if (rc != X86EMUL_CONTINUE)
1914 return rc;
1915
Avi Kivity0f65dd72011-04-20 13:37:53 +03001916 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001917 if (rc != X86EMUL_CONTINUE)
1918 return rc;
1919
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001920 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001921 if (rc != X86EMUL_CONTINUE)
1922 return rc;
1923
Avi Kivity9dac77f2011-06-01 15:34:25 +03001924 ctxt->_eip = eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001925
1926 return rc;
1927}
1928
Avi Kivitydd856ef2012-08-27 23:46:17 +03001929int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1930{
1931 int rc;
1932
1933 invalidate_registers(ctxt);
1934 rc = __emulate_int_real(ctxt, irq);
1935 if (rc == X86EMUL_CONTINUE)
1936 writeback_registers(ctxt);
1937 return rc;
1938}
1939
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001940static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001941{
1942 switch(ctxt->mode) {
1943 case X86EMUL_MODE_REAL:
Avi Kivitydd856ef2012-08-27 23:46:17 +03001944 return __emulate_int_real(ctxt, irq);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001945 case X86EMUL_MODE_VM86:
1946 case X86EMUL_MODE_PROT16:
1947 case X86EMUL_MODE_PROT32:
1948 case X86EMUL_MODE_PROT64:
1949 default:
1950 /* Protected mode interrupts unimplemented yet */
1951 return X86EMUL_UNHANDLEABLE;
1952 }
1953}
1954
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001955static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001956{
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001957 int rc = X86EMUL_CONTINUE;
1958 unsigned long temp_eip = 0;
1959 unsigned long temp_eflags = 0;
1960 unsigned long cs = 0;
1961 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1962 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1963 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1964 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1965
1966 /* TODO: Add stack limit check */
1967
Avi Kivity9dac77f2011-06-01 15:34:25 +03001968 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001969
1970 if (rc != X86EMUL_CONTINUE)
1971 return rc;
1972
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001973 if (temp_eip & ~0xffff)
1974 return emulate_gp(ctxt, 0);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001975
Avi Kivity9dac77f2011-06-01 15:34:25 +03001976 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001977
1978 if (rc != X86EMUL_CONTINUE)
1979 return rc;
1980
Avi Kivity9dac77f2011-06-01 15:34:25 +03001981 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001982
1983 if (rc != X86EMUL_CONTINUE)
1984 return rc;
1985
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001986 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001987
1988 if (rc != X86EMUL_CONTINUE)
1989 return rc;
1990
Avi Kivity9dac77f2011-06-01 15:34:25 +03001991 ctxt->_eip = temp_eip;
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001992
1993
Avi Kivity9dac77f2011-06-01 15:34:25 +03001994 if (ctxt->op_bytes == 4)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001995 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
Avi Kivity9dac77f2011-06-01 15:34:25 +03001996 else if (ctxt->op_bytes == 2) {
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001997 ctxt->eflags &= ~0xffff;
1998 ctxt->eflags |= temp_eflags;
1999 }
2000
2001 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2002 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2003
2004 return rc;
2005}
2006
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002007static int em_iret(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002008{
2009 switch(ctxt->mode) {
2010 case X86EMUL_MODE_REAL:
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002011 return emulate_iret_real(ctxt);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002012 case X86EMUL_MODE_VM86:
2013 case X86EMUL_MODE_PROT16:
2014 case X86EMUL_MODE_PROT32:
2015 case X86EMUL_MODE_PROT64:
2016 default:
2017 /* iret from protected mode unimplemented yet */
2018 return X86EMUL_UNHANDLEABLE;
2019 }
2020}
2021
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002022static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2023{
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002024 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002025 unsigned short sel, old_sel;
2026 struct desc_struct old_desc, new_desc;
2027 const struct x86_emulate_ops *ops = ctxt->ops;
2028 u8 cpl = ctxt->ops->cpl(ctxt);
2029
2030 /* Assignment of RIP may only fail in 64-bit mode */
2031 if (ctxt->mode == X86EMUL_MODE_PROT64)
2032 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2033 VCPU_SREG_CS);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002034
Avi Kivity9dac77f2011-06-01 15:34:25 +03002035 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002036
Nadav Amitd1442d82014-09-18 22:39:39 +03002037 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2038 &new_desc);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002039 if (rc != X86EMUL_CONTINUE)
2040 return rc;
2041
Nadav Amitd50eaa12014-11-19 17:43:11 +02002042 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03002043 if (rc != X86EMUL_CONTINUE) {
Nadav Amitcd9b8e2c2014-10-28 00:03:43 +02002044 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
Nadav Amitd1442d82014-09-18 22:39:39 +03002045 /* assigning eip failed; restore the old cs */
2046 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2047 return rc;
2048 }
2049 return rc;
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002050}
2051
Nadav Amitf7784042014-09-18 22:39:41 +03002052static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002053{
Nadav Amitf7784042014-09-18 22:39:41 +03002054 return assign_eip_near(ctxt, ctxt->src.val);
2055}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002056
Nadav Amitf7784042014-09-18 22:39:41 +03002057static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2058{
2059 int rc;
2060 long int old_eip;
2061
2062 old_eip = ctxt->_eip;
2063 rc = assign_eip_near(ctxt, ctxt->src.val);
2064 if (rc != X86EMUL_CONTINUE)
2065 return rc;
2066 ctxt->src.val = old_eip;
2067 rc = em_push(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09002068 return rc;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002069}
2070
Takuya Yoshikawae0dac402011-12-06 18:07:27 +09002071static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002072{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002073 u64 old = ctxt->dst.orig_val64;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002074
Nadav Amitaaa05f22014-06-02 18:34:10 +03002075 if (ctxt->dst.bytes == 16)
2076 return X86EMUL_UNHANDLEABLE;
2077
Avi Kivitydd856ef2012-08-27 23:46:17 +03002078 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2079 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2080 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2081 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
Laurent Vivier05f086f2007-09-24 11:10:55 +02002082 ctxt->eflags &= ~EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002083 } else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03002084 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2085 (u32) reg_read(ctxt, VCPU_REGS_RBX);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002086
Laurent Vivier05f086f2007-09-24 11:10:55 +02002087 ctxt->eflags |= EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002088 }
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002089 return X86EMUL_CONTINUE;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002090}
2091
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002092static int em_ret(struct x86_emulate_ctxt *ctxt)
2093{
Nadav Amit234f3ce2014-09-18 22:39:38 +03002094 int rc;
2095 unsigned long eip;
2096
2097 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2098 if (rc != X86EMUL_CONTINUE)
2099 return rc;
2100
2101 return assign_eip_near(ctxt, eip);
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002102}
2103
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002104static int em_ret_far(struct x86_emulate_ctxt *ctxt)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002105{
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002106 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002107 unsigned long eip, cs;
2108 u16 old_cs;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002109 int cpl = ctxt->ops->cpl(ctxt);
Nadav Amitd1442d82014-09-18 22:39:39 +03002110 struct desc_struct old_desc, new_desc;
2111 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002112
Nadav Amitd1442d82014-09-18 22:39:39 +03002113 if (ctxt->mode == X86EMUL_MODE_PROT64)
2114 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2115 VCPU_SREG_CS);
2116
2117 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002118 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002119 return rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002120 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002121 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002122 return rc;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002123 /* Outer-privilege level return is not implemented */
2124 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2125 return X86EMUL_UNHANDLEABLE;
Nadav Amitd1442d82014-09-18 22:39:39 +03002126 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
2127 &new_desc);
2128 if (rc != X86EMUL_CONTINUE)
2129 return rc;
Nadav Amitd50eaa12014-11-19 17:43:11 +02002130 rc = assign_eip_far(ctxt, eip, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03002131 if (rc != X86EMUL_CONTINUE) {
Nadav Amitcd9b8e2c2014-10-28 00:03:43 +02002132 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
Nadav Amitd1442d82014-09-18 22:39:39 +03002133 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2134 }
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002135 return rc;
2136}
2137
Bruce Rogers32611072013-09-09 09:40:20 -06002138static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2139{
2140 int rc;
2141
2142 rc = em_ret_far(ctxt);
2143 if (rc != X86EMUL_CONTINUE)
2144 return rc;
2145 rsp_increment(ctxt, ctxt->src.val);
2146 return X86EMUL_CONTINUE;
2147}
2148
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002149static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2150{
2151 /* Save real source value, then compare EAX against destination. */
Nadav Amit37c564f2014-06-02 18:34:07 +03002152 ctxt->dst.orig_val = ctxt->dst.val;
2153 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002154 ctxt->src.orig_val = ctxt->src.val;
Nadav Amit37c564f2014-06-02 18:34:07 +03002155 ctxt->src.val = ctxt->dst.orig_val;
Avi Kivity158de572013-01-19 19:51:57 +02002156 fastop(ctxt, em_cmp);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002157
2158 if (ctxt->eflags & EFLG_ZF) {
2159 /* Success: write back to memory. */
2160 ctxt->dst.val = ctxt->src.orig_val;
2161 } else {
2162 /* Failure: write the value we saw to EAX. */
2163 ctxt->dst.type = OP_REG;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002164 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
Nadav Amit37c564f2014-06-02 18:34:07 +03002165 ctxt->dst.val = ctxt->dst.orig_val;
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002166 }
2167 return X86EMUL_CONTINUE;
2168}
2169
Avi Kivityd4b43252011-09-13 10:45:50 +03002170static int em_lseg(struct x86_emulate_ctxt *ctxt)
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002171{
Avi Kivityd4b43252011-09-13 10:45:50 +03002172 int seg = ctxt->src2.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002173 unsigned short sel;
2174 int rc;
2175
Avi Kivity9dac77f2011-06-01 15:34:25 +03002176 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002177
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002178 rc = load_segment_descriptor(ctxt, sel, seg);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002179 if (rc != X86EMUL_CONTINUE)
2180 return rc;
2181
Avi Kivity9dac77f2011-06-01 15:34:25 +03002182 ctxt->dst.val = ctxt->src.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002183 return rc;
2184}
2185
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002186static void
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002187setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002188 struct desc_struct *cs, struct desc_struct *ss)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002189{
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002190 cs->l = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002191 set_desc_base(cs, 0); /* flat segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002192 cs->g = 1; /* 4kb granularity */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002193 set_desc_limit(cs, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002194 cs->type = 0x0b; /* Read, Execute, Accessed */
2195 cs->s = 1;
2196 cs->dpl = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002197 cs->p = 1;
2198 cs->d = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002199 cs->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002200
Gleb Natapov79168fd2010-04-28 19:15:30 +03002201 set_desc_base(ss, 0); /* flat segment */
2202 set_desc_limit(ss, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002203 ss->g = 1; /* 4kb granularity */
2204 ss->s = 1;
2205 ss->type = 0x03; /* Read/Write, Accessed */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002206 ss->d = 1; /* 32bit stack segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002207 ss->dpl = 0;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002208 ss->p = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002209 ss->l = 0;
2210 ss->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002211}
2212
Avi Kivity1a18a692012-02-01 12:23:21 +02002213static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2214{
2215 u32 eax, ebx, ecx, edx;
2216
2217 eax = ecx = 0;
Avi Kivity0017f932012-06-07 14:10:16 +03002218 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2219 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
Avi Kivity1a18a692012-02-01 12:23:21 +02002220 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2221 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2222}
2223
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002224static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2225{
Mathias Krause0225fb52012-08-30 01:30:16 +02002226 const struct x86_emulate_ops *ops = ctxt->ops;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002227 u32 eax, ebx, ecx, edx;
2228
2229 /*
2230 * syscall should always be enabled in longmode - so only become
2231 * vendor specific (cpuid) if other modes are active...
2232 */
2233 if (ctxt->mode == X86EMUL_MODE_PROT64)
2234 return true;
2235
2236 eax = 0x00000000;
2237 ecx = 0x00000000;
Avi Kivity0017f932012-06-07 14:10:16 +03002238 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2239 /*
2240 * Intel ("GenuineIntel")
2241 * remark: Intel CPUs only support "syscall" in 64bit
2242 * longmode. Also an 64bit guest with a
2243 * 32bit compat-app running will #UD !! While this
2244 * behaviour can be fixed (by emulating) into AMD
2245 * response - CPUs of AMD can't behave like Intel.
2246 */
2247 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2248 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2249 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2250 return false;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002251
Avi Kivity0017f932012-06-07 14:10:16 +03002252 /* AMD ("AuthenticAMD") */
2253 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2254 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2255 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2256 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002257
Avi Kivity0017f932012-06-07 14:10:16 +03002258 /* AMD ("AMDisbetter!") */
2259 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2260 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2261 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2262 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002263
2264 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2265 return false;
2266}
2267
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002268static int em_syscall(struct x86_emulate_ctxt *ctxt)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002269{
Mathias Krause0225fb52012-08-30 01:30:16 +02002270 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002271 struct desc_struct cs, ss;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002272 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002273 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002274 u64 efer = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002275
2276 /* syscall is not available in real mode */
Gleb Natapov2e901c42010-03-18 15:20:12 +02002277 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002278 ctxt->mode == X86EMUL_MODE_VM86)
2279 return emulate_ud(ctxt);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002280
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002281 if (!(em_syscall_is_enabled(ctxt)))
2282 return emulate_ud(ctxt);
2283
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002284 ops->get_msr(ctxt, MSR_EFER, &efer);
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002285 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002286
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002287 if (!(efer & EFER_SCE))
2288 return emulate_ud(ctxt);
2289
Avi Kivity717746e2011-04-20 13:37:53 +03002290 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002291 msr_data >>= 32;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002292 cs_sel = (u16)(msr_data & 0xfffc);
2293 ss_sel = (u16)(msr_data + 8);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002294
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002295 if (efer & EFER_LMA) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002296 cs.d = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002297 cs.l = 1;
2298 }
Avi Kivity1aa36612011-04-27 13:20:30 +03002299 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2300 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002301
Avi Kivitydd856ef2012-08-27 23:46:17 +03002302 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002303 if (efer & EFER_LMA) {
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002304#ifdef CONFIG_X86_64
Nadav Amit6c6cb692014-07-21 14:37:30 +03002305 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002306
Avi Kivity717746e2011-04-20 13:37:53 +03002307 ops->get_msr(ctxt,
Gleb Natapov3fb1b5d2010-04-28 19:15:28 +03002308 ctxt->mode == X86EMUL_MODE_PROT64 ?
2309 MSR_LSTAR : MSR_CSTAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002310 ctxt->_eip = msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002311
Avi Kivity717746e2011-04-20 13:37:53 +03002312 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
Nadav Amit6c6cb692014-07-21 14:37:30 +03002313 ctxt->eflags &= ~msr_data;
Nadav Amit807c1422014-11-02 11:54:49 +02002314 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002315#endif
2316 } else {
2317 /* legacy mode */
Avi Kivity717746e2011-04-20 13:37:53 +03002318 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002319 ctxt->_eip = (u32)msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002320
Nadav Amit6c6cb692014-07-21 14:37:30 +03002321 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002322 }
2323
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002324 return X86EMUL_CONTINUE;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002325}
2326
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002327static int em_sysenter(struct x86_emulate_ctxt *ctxt)
Andre Przywara8c604352009-06-18 12:56:01 +02002328{
Mathias Krause0225fb52012-08-30 01:30:16 +02002329 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002330 struct desc_struct cs, ss;
Andre Przywara8c604352009-06-18 12:56:01 +02002331 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002332 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002333 u64 efer = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002334
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002335 ops->get_msr(ctxt, MSR_EFER, &efer);
Gleb Natapova0044752010-02-10 14:21:31 +02002336 /* inject #GP if in real mode */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002337 if (ctxt->mode == X86EMUL_MODE_REAL)
2338 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002339
Avi Kivity1a18a692012-02-01 12:23:21 +02002340 /*
2341 * Not recognized on AMD in compat mode (but is recognized in legacy
2342 * mode).
2343 */
2344 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2345 && !vendor_intel(ctxt))
2346 return emulate_ud(ctxt);
2347
Nadav Amitb2c9d432014-11-02 11:55:01 +02002348 /* sysenter/sysexit have not been tested in 64bit mode. */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002349 if (ctxt->mode == X86EMUL_MODE_PROT64)
Nadav Amitb2c9d432014-11-02 11:55:01 +02002350 return X86EMUL_UNHANDLEABLE;
Andre Przywara8c604352009-06-18 12:56:01 +02002351
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002352 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara8c604352009-06-18 12:56:01 +02002353
Avi Kivity717746e2011-04-20 13:37:53 +03002354 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara8c604352009-06-18 12:56:01 +02002355 switch (ctxt->mode) {
2356 case X86EMUL_MODE_PROT32:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002357 if ((msr_data & 0xfffc) == 0x0)
2358 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002359 break;
2360 case X86EMUL_MODE_PROT64:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002361 if (msr_data == 0x0)
2362 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002363 break;
Gleb Natapov9d1b39a2012-09-03 15:24:27 +03002364 default:
2365 break;
Andre Przywara8c604352009-06-18 12:56:01 +02002366 }
2367
Nadav Amit6c6cb692014-07-21 14:37:30 +03002368 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002369 cs_sel = (u16)msr_data;
2370 cs_sel &= ~SELECTOR_RPL_MASK;
2371 ss_sel = cs_sel + 8;
2372 ss_sel &= ~SELECTOR_RPL_MASK;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002373 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002374 cs.d = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002375 cs.l = 1;
2376 }
2377
Avi Kivity1aa36612011-04-27 13:20:30 +03002378 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2379 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara8c604352009-06-18 12:56:01 +02002380
Avi Kivity717746e2011-04-20 13:37:53 +03002381 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002382 ctxt->_eip = msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002383
Avi Kivity717746e2011-04-20 13:37:53 +03002384 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
Avi Kivitydd856ef2012-08-27 23:46:17 +03002385 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002386
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002387 return X86EMUL_CONTINUE;
Andre Przywara8c604352009-06-18 12:56:01 +02002388}
2389
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002390static int em_sysexit(struct x86_emulate_ctxt *ctxt)
Andre Przywara4668f052009-06-18 12:56:02 +02002391{
Mathias Krause0225fb52012-08-30 01:30:16 +02002392 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002393 struct desc_struct cs, ss;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002394 u64 msr_data, rcx, rdx;
Andre Przywara4668f052009-06-18 12:56:02 +02002395 int usermode;
Xiao Guangrong1249b962011-05-15 23:25:10 +08002396 u16 cs_sel = 0, ss_sel = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002397
Gleb Natapova0044752010-02-10 14:21:31 +02002398 /* inject #GP if in real mode or Virtual 8086 mode */
2399 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002400 ctxt->mode == X86EMUL_MODE_VM86)
2401 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002402
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002403 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara4668f052009-06-18 12:56:02 +02002404
Avi Kivity9dac77f2011-06-01 15:34:25 +03002405 if ((ctxt->rex_prefix & 0x8) != 0x0)
Andre Przywara4668f052009-06-18 12:56:02 +02002406 usermode = X86EMUL_MODE_PROT64;
2407 else
2408 usermode = X86EMUL_MODE_PROT32;
2409
Nadav Amit234f3ce2014-09-18 22:39:38 +03002410 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2411 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2412
Andre Przywara4668f052009-06-18 12:56:02 +02002413 cs.dpl = 3;
2414 ss.dpl = 3;
Avi Kivity717746e2011-04-20 13:37:53 +03002415 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara4668f052009-06-18 12:56:02 +02002416 switch (usermode) {
2417 case X86EMUL_MODE_PROT32:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002418 cs_sel = (u16)(msr_data + 16);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002419 if ((msr_data & 0xfffc) == 0x0)
2420 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002421 ss_sel = (u16)(msr_data + 24);
Nadav Amitbf0b6822014-09-18 22:39:45 +03002422 rcx = (u32)rcx;
2423 rdx = (u32)rdx;
Andre Przywara4668f052009-06-18 12:56:02 +02002424 break;
2425 case X86EMUL_MODE_PROT64:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002426 cs_sel = (u16)(msr_data + 32);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002427 if (msr_data == 0x0)
2428 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002429 ss_sel = cs_sel + 8;
2430 cs.d = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002431 cs.l = 1;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002432 if (is_noncanonical_address(rcx) ||
2433 is_noncanonical_address(rdx))
2434 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002435 break;
2436 }
Gleb Natapov79168fd2010-04-28 19:15:30 +03002437 cs_sel |= SELECTOR_RPL_MASK;
2438 ss_sel |= SELECTOR_RPL_MASK;
Andre Przywara4668f052009-06-18 12:56:02 +02002439
Avi Kivity1aa36612011-04-27 13:20:30 +03002440 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2441 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara4668f052009-06-18 12:56:02 +02002442
Nadav Amit234f3ce2014-09-18 22:39:38 +03002443 ctxt->_eip = rdx;
2444 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
Andre Przywara4668f052009-06-18 12:56:02 +02002445
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002446 return X86EMUL_CONTINUE;
Andre Przywara4668f052009-06-18 12:56:02 +02002447}
2448
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002449static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002450{
2451 int iopl;
2452 if (ctxt->mode == X86EMUL_MODE_REAL)
2453 return false;
2454 if (ctxt->mode == X86EMUL_MODE_VM86)
2455 return true;
2456 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002457 return ctxt->ops->cpl(ctxt) > iopl;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002458}
2459
2460static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002461 u16 port, u16 len)
2462{
Mathias Krause0225fb52012-08-30 01:30:16 +02002463 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002464 struct desc_struct tr_seg;
Gleb Natapov5601d052011-03-07 14:55:06 +02002465 u32 base3;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002466 int r;
Avi Kivity1aa36612011-04-27 13:20:30 +03002467 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002468 unsigned mask = (1 << len) - 1;
Gleb Natapov5601d052011-03-07 14:55:06 +02002469 unsigned long base;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002470
Avi Kivity1aa36612011-04-27 13:20:30 +03002471 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002472 if (!tr_seg.p)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002473 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002474 if (desc_limit_scaled(&tr_seg) < 103)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002475 return false;
Gleb Natapov5601d052011-03-07 14:55:06 +02002476 base = get_desc_base(&tr_seg);
2477#ifdef CONFIG_X86_64
2478 base |= ((u64)base3) << 32;
2479#endif
Avi Kivity0f65dd72011-04-20 13:37:53 +03002480 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002481 if (r != X86EMUL_CONTINUE)
2482 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002483 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002484 return false;
Avi Kivity0f65dd72011-04-20 13:37:53 +03002485 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002486 if (r != X86EMUL_CONTINUE)
2487 return false;
2488 if ((perm >> bit_idx) & mask)
2489 return false;
2490 return true;
2491}
2492
2493static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002494 u16 port, u16 len)
2495{
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002496 if (ctxt->perm_ok)
2497 return true;
2498
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002499 if (emulator_bad_iopl(ctxt))
2500 if (!emulator_io_port_access_allowed(ctxt, port, len))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002501 return false;
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002502
2503 ctxt->perm_ok = true;
2504
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002505 return true;
2506}
2507
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002508static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002509 struct tss_segment_16 *tss)
2510{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002511 tss->ip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002512 tss->flag = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002513 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2514 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2515 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2516 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2517 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2518 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2519 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2520 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002521
Avi Kivity1aa36612011-04-27 13:20:30 +03002522 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2523 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2524 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2525 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2526 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002527}
2528
2529static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002530 struct tss_segment_16 *tss)
2531{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002532 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002533 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002534
Avi Kivity9dac77f2011-06-01 15:34:25 +03002535 ctxt->_eip = tss->ip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002536 ctxt->eflags = tss->flag | 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002537 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2538 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2539 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2540 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2541 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2542 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2543 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2544 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002545
2546 /*
2547 * SDM says that segment selectors are loaded before segment
2548 * descriptors
2549 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002550 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2551 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2552 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2553 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2554 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002555
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002556 cpl = tss->cs & 3;
2557
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002558 /*
Guo Chaofc058682012-06-28 15:19:51 +08002559 * Now load segment descriptors. If fault happens at this stage
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002560 * it is handled in a context of new task
2561 */
Nadav Amitd1442d82014-09-18 22:39:39 +03002562 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2563 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002564 if (ret != X86EMUL_CONTINUE)
2565 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002566 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2567 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002568 if (ret != X86EMUL_CONTINUE)
2569 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002570 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2571 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002572 if (ret != X86EMUL_CONTINUE)
2573 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002574 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2575 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002576 if (ret != X86EMUL_CONTINUE)
2577 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002578 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2579 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002580 if (ret != X86EMUL_CONTINUE)
2581 return ret;
2582
2583 return X86EMUL_CONTINUE;
2584}
2585
2586static int task_switch_16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002587 u16 tss_selector, u16 old_tss_sel,
2588 ulong old_tss_base, struct desc_struct *new_desc)
2589{
Mathias Krause0225fb52012-08-30 01:30:16 +02002590 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002591 struct tss_segment_16 tss_seg;
2592 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002593 u32 new_tss_base = get_desc_base(new_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002594
Avi Kivity0f65dd72011-04-20 13:37:53 +03002595 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002596 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002597 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002598 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002599 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002600
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002601 save_state_to_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002602
Avi Kivity0f65dd72011-04-20 13:37:53 +03002603 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002604 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002605 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002606 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002607 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002608
Avi Kivity0f65dd72011-04-20 13:37:53 +03002609 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002610 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002611 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002612 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002613 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002614
2615 if (old_tss_sel != 0xffff) {
2616 tss_seg.prev_task_link = old_tss_sel;
2617
Avi Kivity0f65dd72011-04-20 13:37:53 +03002618 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002619 &tss_seg.prev_task_link,
2620 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002621 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002622 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002623 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002624 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002625 }
2626
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002627 return load_state_from_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002628}
2629
2630static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002631 struct tss_segment_32 *tss)
2632{
Nadav Amit5c7411e2014-04-07 18:37:47 +03002633 /* CR3 and ldt selector are not saved intentionally */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002634 tss->eip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002635 tss->eflags = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002636 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2637 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2638 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2639 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2640 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2641 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2642 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2643 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002644
Avi Kivity1aa36612011-04-27 13:20:30 +03002645 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2646 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2647 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2648 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2649 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2650 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002651}
2652
2653static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002654 struct tss_segment_32 *tss)
2655{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002656 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002657 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002658
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002659 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002660 return emulate_gp(ctxt, 0);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002661 ctxt->_eip = tss->eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002662 ctxt->eflags = tss->eflags | 2;
Kevin Wolf4cee4792012-02-08 14:34:41 +01002663
2664 /* General purpose registers */
Avi Kivitydd856ef2012-08-27 23:46:17 +03002665 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2666 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2667 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2668 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2669 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2670 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2671 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2672 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002673
2674 /*
2675 * SDM says that segment selectors are loaded before segment
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002676 * descriptors. This is important because CPL checks will
2677 * use CS.RPL.
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002678 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002679 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2680 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2681 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2682 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2683 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2684 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2685 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002686
2687 /*
Kevin Wolf4cee4792012-02-08 14:34:41 +01002688 * If we're switching between Protected Mode and VM86, we need to make
2689 * sure to update the mode before loading the segment descriptors so
2690 * that the selectors are interpreted correctly.
Kevin Wolf4cee4792012-02-08 14:34:41 +01002691 */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002692 if (ctxt->eflags & X86_EFLAGS_VM) {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002693 ctxt->mode = X86EMUL_MODE_VM86;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002694 cpl = 3;
2695 } else {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002696 ctxt->mode = X86EMUL_MODE_PROT32;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002697 cpl = tss->cs & 3;
2698 }
Kevin Wolf4cee4792012-02-08 14:34:41 +01002699
2700 /*
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002701 * Now load segment descriptors. If fault happenes at this stage
2702 * it is handled in a context of new task
2703 */
Nadav Amitd1442d82014-09-18 22:39:39 +03002704 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2705 cpl, true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002706 if (ret != X86EMUL_CONTINUE)
2707 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002708 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2709 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002710 if (ret != X86EMUL_CONTINUE)
2711 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002712 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2713 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002714 if (ret != X86EMUL_CONTINUE)
2715 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002716 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2717 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002718 if (ret != X86EMUL_CONTINUE)
2719 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002720 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2721 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002722 if (ret != X86EMUL_CONTINUE)
2723 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002724 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2725 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002726 if (ret != X86EMUL_CONTINUE)
2727 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002728 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2729 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002730 if (ret != X86EMUL_CONTINUE)
2731 return ret;
2732
2733 return X86EMUL_CONTINUE;
2734}
2735
2736static int task_switch_32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002737 u16 tss_selector, u16 old_tss_sel,
2738 ulong old_tss_base, struct desc_struct *new_desc)
2739{
Mathias Krause0225fb52012-08-30 01:30:16 +02002740 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002741 struct tss_segment_32 tss_seg;
2742 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002743 u32 new_tss_base = get_desc_base(new_desc);
Nadav Amit5c7411e2014-04-07 18:37:47 +03002744 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2745 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002746
Avi Kivity0f65dd72011-04-20 13:37:53 +03002747 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002748 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002749 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002750 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002751 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002752
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002753 save_state_to_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002754
Nadav Amit5c7411e2014-04-07 18:37:47 +03002755 /* Only GP registers and segment selectors are saved */
2756 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2757 ldt_sel_offset - eip_offset, &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002758 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002759 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002760 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002761
Avi Kivity0f65dd72011-04-20 13:37:53 +03002762 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002763 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002764 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002765 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002766 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002767
2768 if (old_tss_sel != 0xffff) {
2769 tss_seg.prev_task_link = old_tss_sel;
2770
Avi Kivity0f65dd72011-04-20 13:37:53 +03002771 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002772 &tss_seg.prev_task_link,
2773 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002774 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002775 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002776 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002777 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002778 }
2779
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002780 return load_state_from_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002781}
2782
2783static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002784 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002785 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002786{
Mathias Krause0225fb52012-08-30 01:30:16 +02002787 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002788 struct desc_struct curr_tss_desc, next_tss_desc;
2789 int ret;
Avi Kivity1aa36612011-04-27 13:20:30 +03002790 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002791 ulong old_tss_base =
Avi Kivity4bff1e862011-04-20 13:37:53 +03002792 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
Gleb Natapovceffb452010-03-18 15:20:19 +02002793 u32 desc_limit;
Avi Kivitye9194642012-06-13 16:29:39 +03002794 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002795
2796 /* FIXME: old_tss_base == ~0 ? */
2797
Avi Kivitye9194642012-06-13 16:29:39 +03002798 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002799 if (ret != X86EMUL_CONTINUE)
2800 return ret;
Avi Kivitye9194642012-06-13 16:29:39 +03002801 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002802 if (ret != X86EMUL_CONTINUE)
2803 return ret;
2804
2805 /* FIXME: check that next_tss_desc is tss */
2806
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002807 /*
2808 * Check privileges. The three cases are task switch caused by...
2809 *
2810 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2811 * 2. Exception/IRQ/iret: No check is performed
Nadav Amit2c2ca2d2014-11-02 11:54:57 +02002812 * 3. jmp/call to TSS/task-gate: No check is performed since the
2813 * hardware checks it before exiting.
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002814 */
2815 if (reason == TASK_SWITCH_GATE) {
2816 if (idt_index != -1) {
2817 /* Software interrupts */
2818 struct desc_struct task_gate_desc;
2819 int dpl;
2820
2821 ret = read_interrupt_descriptor(ctxt, idt_index,
2822 &task_gate_desc);
2823 if (ret != X86EMUL_CONTINUE)
2824 return ret;
2825
2826 dpl = task_gate_desc.dpl;
2827 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2828 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2829 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002830 }
2831
Gleb Natapovceffb452010-03-18 15:20:19 +02002832 desc_limit = desc_limit_scaled(&next_tss_desc);
2833 if (!next_tss_desc.p ||
2834 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2835 desc_limit < 0x2b)) {
Paolo Bonzini592f0852014-08-20 10:05:08 +02002836 return emulate_ts(ctxt, tss_selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002837 }
2838
2839 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2840 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002841 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002842 }
2843
2844 if (reason == TASK_SWITCH_IRET)
2845 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2846
2847 /* set back link to prev task only if NT bit is set in eflags
Guo Chaofc058682012-06-28 15:19:51 +08002848 note that old_tss_sel is not used after this point */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002849 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2850 old_tss_sel = 0xffff;
2851
2852 if (next_tss_desc.type & 8)
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002853 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002854 old_tss_base, &next_tss_desc);
2855 else
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002856 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002857 old_tss_base, &next_tss_desc);
Jan Kiszka0760d442010-04-14 15:50:57 +02002858 if (ret != X86EMUL_CONTINUE)
2859 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002860
2861 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2862 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2863
2864 if (reason != TASK_SWITCH_IRET) {
2865 next_tss_desc.type |= (1 << 1); /* set busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002866 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002867 }
2868
Avi Kivity717746e2011-04-20 13:37:53 +03002869 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
Avi Kivity1aa36612011-04-27 13:20:30 +03002870 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002871
Jan Kiszkae269fb22010-04-14 15:51:09 +02002872 if (has_error_code) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002873 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2874 ctxt->lock_prefix = 0;
2875 ctxt->src.val = (unsigned long) error_code;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09002876 ret = em_push(ctxt);
Jan Kiszkae269fb22010-04-14 15:51:09 +02002877 }
2878
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002879 return ret;
2880}
2881
2882int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002883 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002884 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002885{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002886 int rc;
2887
Avi Kivitydd856ef2012-08-27 23:46:17 +03002888 invalidate_registers(ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002889 ctxt->_eip = ctxt->eip;
2890 ctxt->dst.type = OP_NONE;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002891
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002892 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002893 has_error_code, error_code);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002894
Avi Kivitydd856ef2012-08-27 23:46:17 +03002895 if (rc == X86EMUL_CONTINUE) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002896 ctxt->eip = ctxt->_eip;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002897 writeback_registers(ctxt);
2898 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002899
Gleb Natapova0c0ab22011-03-28 16:57:49 +02002900 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002901}
2902
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03002903static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2904 struct operand *op)
Gleb Natapova682e352010-03-18 15:20:21 +02002905{
Gleb Natapovb3356bf2012-09-03 15:24:29 +03002906 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
Gleb Natapova682e352010-03-18 15:20:21 +02002907
Paolo Bonzini01485a22014-11-19 18:25:08 +01002908 register_address_increment(ctxt, reg, df * op->bytes);
2909 op->addr.mem.ea = register_address(ctxt, reg);
Gleb Natapova682e352010-03-18 15:20:21 +02002910}
2911
Avi Kivity7af04fc2010-08-18 14:16:35 +03002912static int em_das(struct x86_emulate_ctxt *ctxt)
2913{
Avi Kivity7af04fc2010-08-18 14:16:35 +03002914 u8 al, old_al;
2915 bool af, cf, old_cf;
2916
2917 cf = ctxt->eflags & X86_EFLAGS_CF;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002918 al = ctxt->dst.val;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002919
2920 old_al = al;
2921 old_cf = cf;
2922 cf = false;
2923 af = ctxt->eflags & X86_EFLAGS_AF;
2924 if ((al & 0x0f) > 9 || af) {
2925 al -= 6;
2926 cf = old_cf | (al >= 250);
2927 af = true;
2928 } else {
2929 af = false;
2930 }
2931 if (old_al > 0x99 || old_cf) {
2932 al -= 0x60;
2933 cf = true;
2934 }
2935
Avi Kivity9dac77f2011-06-01 15:34:25 +03002936 ctxt->dst.val = al;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002937 /* Set PF, ZF, SF */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002938 ctxt->src.type = OP_IMM;
2939 ctxt->src.val = 0;
2940 ctxt->src.bytes = 1;
Avi Kivity158de572013-01-19 19:51:57 +02002941 fastop(ctxt, em_or);
Avi Kivity7af04fc2010-08-18 14:16:35 +03002942 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2943 if (cf)
2944 ctxt->eflags |= X86_EFLAGS_CF;
2945 if (af)
2946 ctxt->eflags |= X86_EFLAGS_AF;
2947 return X86EMUL_CONTINUE;
2948}
2949
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02002950static int em_aam(struct x86_emulate_ctxt *ctxt)
2951{
2952 u8 al, ah;
2953
2954 if (ctxt->src.val == 0)
2955 return emulate_de(ctxt);
2956
2957 al = ctxt->dst.val & 0xff;
2958 ah = al / ctxt->src.val;
2959 al %= ctxt->src.val;
2960
2961 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2962
2963 /* Set PF, ZF, SF */
2964 ctxt->src.type = OP_IMM;
2965 ctxt->src.val = 0;
2966 ctxt->src.bytes = 1;
2967 fastop(ctxt, em_or);
2968
2969 return X86EMUL_CONTINUE;
2970}
2971
Gleb Natapov7f662272012-12-10 11:42:30 +02002972static int em_aad(struct x86_emulate_ctxt *ctxt)
2973{
2974 u8 al = ctxt->dst.val & 0xff;
2975 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2976
2977 al = (al + (ah * ctxt->src.val)) & 0xff;
2978
2979 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2980
Gleb Natapovf583c292013-02-13 17:50:39 +02002981 /* Set PF, ZF, SF */
2982 ctxt->src.type = OP_IMM;
2983 ctxt->src.val = 0;
2984 ctxt->src.bytes = 1;
2985 fastop(ctxt, em_or);
Gleb Natapov7f662272012-12-10 11:42:30 +02002986
2987 return X86EMUL_CONTINUE;
2988}
2989
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09002990static int em_call(struct x86_emulate_ctxt *ctxt)
2991{
Nadav Amit234f3ce2014-09-18 22:39:38 +03002992 int rc;
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09002993 long rel = ctxt->src.val;
2994
2995 ctxt->src.val = (unsigned long)ctxt->_eip;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002996 rc = jmp_rel(ctxt, rel);
2997 if (rc != X86EMUL_CONTINUE)
2998 return rc;
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09002999 return em_push(ctxt);
3000}
3001
Avi Kivity0ef753b2010-08-18 14:51:45 +03003002static int em_call_far(struct x86_emulate_ctxt *ctxt)
3003{
Avi Kivity0ef753b2010-08-18 14:51:45 +03003004 u16 sel, old_cs;
3005 ulong old_eip;
3006 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03003007 struct desc_struct old_desc, new_desc;
3008 const struct x86_emulate_ops *ops = ctxt->ops;
3009 int cpl = ctxt->ops->cpl(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003010
Avi Kivity9dac77f2011-06-01 15:34:25 +03003011 old_eip = ctxt->_eip;
Nadav Amitd1442d82014-09-18 22:39:39 +03003012 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003013
Avi Kivity9dac77f2011-06-01 15:34:25 +03003014 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Nadav Amitd1442d82014-09-18 22:39:39 +03003015 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3016 &new_desc);
3017 if (rc != X86EMUL_CONTINUE)
Avi Kivity0ef753b2010-08-18 14:51:45 +03003018 return X86EMUL_CONTINUE;
3019
Nadav Amitd50eaa12014-11-19 17:43:11 +02003020 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03003021 if (rc != X86EMUL_CONTINUE)
3022 goto fail;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003023
Avi Kivity9dac77f2011-06-01 15:34:25 +03003024 ctxt->src.val = old_cs;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09003025 rc = em_push(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003026 if (rc != X86EMUL_CONTINUE)
Nadav Amitd1442d82014-09-18 22:39:39 +03003027 goto fail;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003028
Avi Kivity9dac77f2011-06-01 15:34:25 +03003029 ctxt->src.val = old_eip;
Nadav Amitd1442d82014-09-18 22:39:39 +03003030 rc = em_push(ctxt);
3031 /* If we failed, we tainted the memory, but the very least we should
3032 restore cs */
3033 if (rc != X86EMUL_CONTINUE)
3034 goto fail;
3035 return rc;
3036fail:
3037 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3038 return rc;
3039
Avi Kivity0ef753b2010-08-18 14:51:45 +03003040}
3041
Avi Kivity40ece7c2010-08-18 15:12:09 +03003042static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3043{
Avi Kivity40ece7c2010-08-18 15:12:09 +03003044 int rc;
Nadav Amit234f3ce2014-09-18 22:39:38 +03003045 unsigned long eip;
Avi Kivity40ece7c2010-08-18 15:12:09 +03003046
Nadav Amit234f3ce2014-09-18 22:39:38 +03003047 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3048 if (rc != X86EMUL_CONTINUE)
3049 return rc;
3050 rc = assign_eip_near(ctxt, eip);
Avi Kivity40ece7c2010-08-18 15:12:09 +03003051 if (rc != X86EMUL_CONTINUE)
3052 return rc;
Avi Kivity5ad105e2012-08-19 14:34:31 +03003053 rsp_increment(ctxt, ctxt->src.val);
Avi Kivity40ece7c2010-08-18 15:12:09 +03003054 return X86EMUL_CONTINUE;
3055}
3056
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003057static int em_xchg(struct x86_emulate_ctxt *ctxt)
3058{
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003059 /* Write back the register source. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003060 ctxt->src.val = ctxt->dst.val;
3061 write_register_operand(&ctxt->src);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003062
3063 /* Write back the memory destination with implicit LOCK prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003064 ctxt->dst.val = ctxt->src.orig_val;
3065 ctxt->lock_prefix = 1;
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003066 return X86EMUL_CONTINUE;
3067}
3068
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003069static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3070{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003071 ctxt->dst.val = ctxt->src2.val;
Avi Kivity4d758342013-01-19 19:51:55 +02003072 return fastop(ctxt, em_imul);
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003073}
3074
Avi Kivity61429142010-08-19 15:13:00 +03003075static int em_cwd(struct x86_emulate_ctxt *ctxt)
3076{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003077 ctxt->dst.type = OP_REG;
3078 ctxt->dst.bytes = ctxt->src.bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03003079 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivity9dac77f2011-06-01 15:34:25 +03003080 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
Avi Kivity61429142010-08-19 15:13:00 +03003081
3082 return X86EMUL_CONTINUE;
3083}
3084
Avi Kivity48bb5d32010-08-18 18:54:34 +03003085static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3086{
Avi Kivity48bb5d32010-08-18 18:54:34 +03003087 u64 tsc = 0;
3088
Avi Kivity717746e2011-04-20 13:37:53 +03003089 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003090 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3091 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
Avi Kivity48bb5d32010-08-18 18:54:34 +03003092 return X86EMUL_CONTINUE;
3093}
3094
Avi Kivity222d21a2011-11-10 14:57:30 +02003095static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3096{
3097 u64 pmc;
3098
Avi Kivitydd856ef2012-08-27 23:46:17 +03003099 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
Avi Kivity222d21a2011-11-10 14:57:30 +02003100 return emulate_gp(ctxt, 0);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003101 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3102 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
Avi Kivity222d21a2011-11-10 14:57:30 +02003103 return X86EMUL_CONTINUE;
3104}
3105
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003106static int em_mov(struct x86_emulate_ctxt *ctxt)
3107{
Paolo Bonzini54cfdb32014-03-27 11:36:25 +01003108 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003109 return X86EMUL_CONTINUE;
3110}
3111
Borislav Petkov84cffe42013-10-29 12:54:56 +01003112#define FFL(x) bit(X86_FEATURE_##x)
3113
3114static int em_movbe(struct x86_emulate_ctxt *ctxt)
3115{
3116 u32 ebx, ecx, edx, eax = 1;
3117 u16 tmp;
3118
3119 /*
3120 * Check MOVBE is set in the guest-visible CPUID leaf.
3121 */
3122 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3123 if (!(ecx & FFL(MOVBE)))
3124 return emulate_ud(ctxt);
3125
3126 switch (ctxt->op_bytes) {
3127 case 2:
3128 /*
3129 * From MOVBE definition: "...When the operand size is 16 bits,
3130 * the upper word of the destination register remains unchanged
3131 * ..."
3132 *
3133 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3134 * rules so we have to do the operation almost per hand.
3135 */
3136 tmp = (u16)ctxt->src.val;
3137 ctxt->dst.val &= ~0xffffUL;
3138 ctxt->dst.val |= (unsigned long)swab16(tmp);
3139 break;
3140 case 4:
3141 ctxt->dst.val = swab32((u32)ctxt->src.val);
3142 break;
3143 case 8:
3144 ctxt->dst.val = swab64(ctxt->src.val);
3145 break;
3146 default:
Paolo Bonzini592f0852014-08-20 10:05:08 +02003147 BUG();
Borislav Petkov84cffe42013-10-29 12:54:56 +01003148 }
3149 return X86EMUL_CONTINUE;
3150}
3151
Takuya Yoshikawabc00f8d2011-11-22 15:19:19 +09003152static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3153{
3154 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3155 return emulate_gp(ctxt, 0);
3156
3157 /* Disable writeback. */
3158 ctxt->dst.type = OP_NONE;
3159 return X86EMUL_CONTINUE;
3160}
3161
3162static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3163{
3164 unsigned long val;
3165
3166 if (ctxt->mode == X86EMUL_MODE_PROT64)
3167 val = ctxt->src.val & ~0ULL;
3168 else
3169 val = ctxt->src.val & ~0U;
3170
3171 /* #UD condition is already handled. */
3172 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3173 return emulate_gp(ctxt, 0);
3174
3175 /* Disable writeback. */
3176 ctxt->dst.type = OP_NONE;
3177 return X86EMUL_CONTINUE;
3178}
3179
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003180static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3181{
3182 u64 msr_data;
3183
Avi Kivitydd856ef2012-08-27 23:46:17 +03003184 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3185 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3186 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003187 return emulate_gp(ctxt, 0);
3188
3189 return X86EMUL_CONTINUE;
3190}
3191
3192static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3193{
3194 u64 msr_data;
3195
Avi Kivitydd856ef2012-08-27 23:46:17 +03003196 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003197 return emulate_gp(ctxt, 0);
3198
Avi Kivitydd856ef2012-08-27 23:46:17 +03003199 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3200 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003201 return X86EMUL_CONTINUE;
3202}
3203
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003204static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3205{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003206 if (ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003207 return emulate_ud(ctxt);
3208
Avi Kivity9dac77f2011-06-01 15:34:25 +03003209 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
Nadav Amitb5bbf102014-11-02 11:54:46 +02003210 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3211 ctxt->dst.bytes = 2;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003212 return X86EMUL_CONTINUE;
3213}
3214
3215static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3216{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003217 u16 sel = ctxt->src.val;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003218
Avi Kivity9dac77f2011-06-01 15:34:25 +03003219 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003220 return emulate_ud(ctxt);
3221
Avi Kivity9dac77f2011-06-01 15:34:25 +03003222 if (ctxt->modrm_reg == VCPU_SREG_SS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003223 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3224
3225 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003226 ctxt->dst.type = OP_NONE;
3227 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003228}
3229
Avi Kivitya14e5792012-06-13 12:28:33 +03003230static int em_lldt(struct x86_emulate_ctxt *ctxt)
3231{
3232 u16 sel = ctxt->src.val;
3233
3234 /* Disable writeback. */
3235 ctxt->dst.type = OP_NONE;
3236 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3237}
3238
Avi Kivity80890002012-06-13 16:33:29 +03003239static int em_ltr(struct x86_emulate_ctxt *ctxt)
3240{
3241 u16 sel = ctxt->src.val;
3242
3243 /* Disable writeback. */
3244 ctxt->dst.type = OP_NONE;
3245 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3246}
3247
Avi Kivity38503912011-03-31 18:48:09 +02003248static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3249{
Avi Kivity9fa088f2011-03-31 18:54:30 +02003250 int rc;
3251 ulong linear;
3252
Avi Kivity9dac77f2011-06-01 15:34:25 +03003253 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02003254 if (rc == X86EMUL_CONTINUE)
Avi Kivity3cb16fe2011-04-20 15:38:44 +03003255 ctxt->ops->invlpg(ctxt, linear);
Avi Kivity38503912011-03-31 18:48:09 +02003256 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003257 ctxt->dst.type = OP_NONE;
Avi Kivity38503912011-03-31 18:48:09 +02003258 return X86EMUL_CONTINUE;
3259}
3260
Avi Kivity2d04a052011-04-20 15:32:49 +03003261static int em_clts(struct x86_emulate_ctxt *ctxt)
3262{
3263 ulong cr0;
3264
3265 cr0 = ctxt->ops->get_cr(ctxt, 0);
3266 cr0 &= ~X86_CR0_TS;
3267 ctxt->ops->set_cr(ctxt, 0, cr0);
3268 return X86EMUL_CONTINUE;
3269}
3270
Avi Kivity26d05cc2011-04-21 12:07:59 +03003271static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3272{
Nadav Amit0f54a322014-08-29 11:26:55 +03003273 int rc = ctxt->ops->fix_hypercall(ctxt);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003274
Avi Kivity26d05cc2011-04-21 12:07:59 +03003275 if (rc != X86EMUL_CONTINUE)
3276 return rc;
3277
3278 /* Let the processor re-execute the fixed hypercall */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003279 ctxt->_eip = ctxt->eip;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003280 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003281 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003282 return X86EMUL_CONTINUE;
3283}
3284
Avi Kivity96051572012-06-10 17:21:18 +03003285static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3286 void (*get)(struct x86_emulate_ctxt *ctxt,
3287 struct desc_ptr *ptr))
3288{
3289 struct desc_ptr desc_ptr;
3290
3291 if (ctxt->mode == X86EMUL_MODE_PROT64)
3292 ctxt->op_bytes = 8;
3293 get(ctxt, &desc_ptr);
3294 if (ctxt->op_bytes == 2) {
3295 ctxt->op_bytes = 4;
3296 desc_ptr.address &= 0x00ffffff;
3297 }
3298 /* Disable writeback. */
3299 ctxt->dst.type = OP_NONE;
3300 return segmented_write(ctxt, ctxt->dst.addr.mem,
3301 &desc_ptr, 2 + ctxt->op_bytes);
3302}
3303
3304static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3305{
3306 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3307}
3308
3309static int em_sidt(struct x86_emulate_ctxt *ctxt)
3310{
3311 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3312}
3313
Nadav Amit5b7f6a1e2014-11-02 11:54:55 +02003314static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003315{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003316 struct desc_ptr desc_ptr;
3317 int rc;
3318
Avi Kivity510425f2012-06-07 17:04:36 +03003319 if (ctxt->mode == X86EMUL_MODE_PROT64)
3320 ctxt->op_bytes = 8;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003321 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
Avi Kivity26d05cc2011-04-21 12:07:59 +03003322 &desc_ptr.size, &desc_ptr.address,
Avi Kivity9dac77f2011-06-01 15:34:25 +03003323 ctxt->op_bytes);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003324 if (rc != X86EMUL_CONTINUE)
3325 return rc;
Nadav Amit9a9abf62014-11-02 11:54:56 +02003326 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3327 is_noncanonical_address(desc_ptr.address))
3328 return emulate_gp(ctxt, 0);
Nadav Amit5b7f6a1e2014-11-02 11:54:55 +02003329 if (lgdt)
3330 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3331 else
3332 ctxt->ops->set_idt(ctxt, &desc_ptr);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003333 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003334 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003335 return X86EMUL_CONTINUE;
3336}
3337
Nadav Amit5b7f6a1e2014-11-02 11:54:55 +02003338static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3339{
3340 return em_lgdt_lidt(ctxt, true);
3341}
3342
Avi Kivity5ef39c72011-04-21 12:21:50 +03003343static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003344{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003345 int rc;
3346
Avi Kivity5ef39c72011-04-21 12:21:50 +03003347 rc = ctxt->ops->fix_hypercall(ctxt);
3348
Avi Kivity26d05cc2011-04-21 12:07:59 +03003349 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003350 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003351 return rc;
3352}
3353
3354static int em_lidt(struct x86_emulate_ctxt *ctxt)
3355{
Nadav Amit5b7f6a1e2014-11-02 11:54:55 +02003356 return em_lgdt_lidt(ctxt, false);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003357}
3358
3359static int em_smsw(struct x86_emulate_ctxt *ctxt)
3360{
Nadav Amit32e94d02014-06-02 18:34:11 +03003361 if (ctxt->dst.type == OP_MEM)
3362 ctxt->dst.bytes = 2;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003363 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003364 return X86EMUL_CONTINUE;
3365}
3366
3367static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3368{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003369 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
Avi Kivity9dac77f2011-06-01 15:34:25 +03003370 | (ctxt->src.val & 0x0f));
3371 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003372 return X86EMUL_CONTINUE;
3373}
3374
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003375static int em_loop(struct x86_emulate_ctxt *ctxt)
3376{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003377 int rc = X86EMUL_CONTINUE;
3378
Paolo Bonzini01485a22014-11-19 18:25:08 +01003379 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003380 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
Avi Kivity9dac77f2011-06-01 15:34:25 +03003381 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
Nadav Amit234f3ce2014-09-18 22:39:38 +03003382 rc = jmp_rel(ctxt, ctxt->src.val);
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003383
Nadav Amit234f3ce2014-09-18 22:39:38 +03003384 return rc;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003385}
3386
3387static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3388{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003389 int rc = X86EMUL_CONTINUE;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003390
Nadav Amit234f3ce2014-09-18 22:39:38 +03003391 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3392 rc = jmp_rel(ctxt, ctxt->src.val);
3393
3394 return rc;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003395}
3396
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003397static int em_in(struct x86_emulate_ctxt *ctxt)
3398{
3399 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3400 &ctxt->dst.val))
3401 return X86EMUL_IO_NEEDED;
3402
3403 return X86EMUL_CONTINUE;
3404}
3405
3406static int em_out(struct x86_emulate_ctxt *ctxt)
3407{
3408 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3409 &ctxt->src.val, 1);
3410 /* Disable writeback. */
3411 ctxt->dst.type = OP_NONE;
3412 return X86EMUL_CONTINUE;
3413}
3414
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09003415static int em_cli(struct x86_emulate_ctxt *ctxt)
3416{
3417 if (emulator_bad_iopl(ctxt))
3418 return emulate_gp(ctxt, 0);
3419
3420 ctxt->eflags &= ~X86_EFLAGS_IF;
3421 return X86EMUL_CONTINUE;
3422}
3423
3424static int em_sti(struct x86_emulate_ctxt *ctxt)
3425{
3426 if (emulator_bad_iopl(ctxt))
3427 return emulate_gp(ctxt, 0);
3428
3429 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3430 ctxt->eflags |= X86_EFLAGS_IF;
3431 return X86EMUL_CONTINUE;
3432}
3433
Avi Kivity6d6eede2012-06-07 14:11:36 +03003434static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3435{
3436 u32 eax, ebx, ecx, edx;
3437
Avi Kivitydd856ef2012-08-27 23:46:17 +03003438 eax = reg_read(ctxt, VCPU_REGS_RAX);
3439 ecx = reg_read(ctxt, VCPU_REGS_RCX);
Avi Kivity6d6eede2012-06-07 14:11:36 +03003440 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003441 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3442 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3443 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3444 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
Avi Kivity6d6eede2012-06-07 14:11:36 +03003445 return X86EMUL_CONTINUE;
3446}
3447
Paolo Bonzini98f73632013-10-31 11:19:42 +01003448static int em_sahf(struct x86_emulate_ctxt *ctxt)
3449{
3450 u32 flags;
3451
3452 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3453 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3454
3455 ctxt->eflags &= ~0xffUL;
3456 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3457 return X86EMUL_CONTINUE;
3458}
3459
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003460static int em_lahf(struct x86_emulate_ctxt *ctxt)
3461{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003462 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3463 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003464 return X86EMUL_CONTINUE;
3465}
3466
Avi Kivity92998362012-06-13 12:25:06 +03003467static int em_bswap(struct x86_emulate_ctxt *ctxt)
3468{
3469 switch (ctxt->op_bytes) {
3470#ifdef CONFIG_X86_64
3471 case 8:
3472 asm("bswap %0" : "+r"(ctxt->dst.val));
3473 break;
3474#endif
3475 default:
3476 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3477 break;
3478 }
3479 return X86EMUL_CONTINUE;
3480}
3481
Nadav Amit13e457e2014-10-13 13:04:13 +03003482static int em_clflush(struct x86_emulate_ctxt *ctxt)
3483{
3484 /* emulating clflush regardless of cpuid */
3485 return X86EMUL_CONTINUE;
3486}
3487
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003488static bool valid_cr(int nr)
3489{
3490 switch (nr) {
3491 case 0:
3492 case 2 ... 4:
3493 case 8:
3494 return true;
3495 default:
3496 return false;
3497 }
3498}
3499
3500static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3501{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003502 if (!valid_cr(ctxt->modrm_reg))
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003503 return emulate_ud(ctxt);
3504
3505 return X86EMUL_CONTINUE;
3506}
3507
3508static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3509{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003510 u64 new_val = ctxt->src.val64;
3511 int cr = ctxt->modrm_reg;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003512 u64 efer = 0;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003513
3514 static u64 cr_reserved_bits[] = {
3515 0xffffffff00000000ULL,
3516 0, 0, 0, /* CR3 checked later */
3517 CR4_RESERVED_BITS,
3518 0, 0, 0,
3519 CR8_RESERVED_BITS,
3520 };
3521
3522 if (!valid_cr(cr))
3523 return emulate_ud(ctxt);
3524
3525 if (new_val & cr_reserved_bits[cr])
3526 return emulate_gp(ctxt, 0);
3527
3528 switch (cr) {
3529 case 0: {
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003530 u64 cr4;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003531 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3532 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3533 return emulate_gp(ctxt, 0);
3534
Avi Kivity717746e2011-04-20 13:37:53 +03003535 cr4 = ctxt->ops->get_cr(ctxt, 4);
3536 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003537
3538 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3539 !(cr4 & X86_CR4_PAE))
3540 return emulate_gp(ctxt, 0);
3541
3542 break;
3543 }
3544 case 3: {
3545 u64 rsvd = 0;
3546
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003547 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3548 if (efer & EFER_LMA)
Nadav Amit9d88fca2014-11-02 11:54:52 +02003549 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003550
3551 if (new_val & rsvd)
3552 return emulate_gp(ctxt, 0);
3553
3554 break;
3555 }
3556 case 4: {
Avi Kivity717746e2011-04-20 13:37:53 +03003557 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003558
3559 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3560 return emulate_gp(ctxt, 0);
3561
3562 break;
3563 }
3564 }
3565
3566 return X86EMUL_CONTINUE;
3567}
3568
Joerg Roedel3b88e412011-04-04 12:39:29 +02003569static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3570{
3571 unsigned long dr7;
3572
Avi Kivity717746e2011-04-20 13:37:53 +03003573 ctxt->ops->get_dr(ctxt, 7, &dr7);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003574
3575 /* Check if DR7.Global_Enable is set */
3576 return dr7 & (1 << 13);
3577}
3578
3579static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3580{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003581 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003582 u64 cr4;
3583
3584 if (dr > 7)
3585 return emulate_ud(ctxt);
3586
Avi Kivity717746e2011-04-20 13:37:53 +03003587 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003588 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3589 return emulate_ud(ctxt);
3590
Nadav Amit6d2a0522014-11-02 11:54:43 +02003591 if (check_dr7_gd(ctxt)) {
3592 ulong dr6;
3593
3594 ctxt->ops->get_dr(ctxt, 6, &dr6);
3595 dr6 &= ~15;
3596 dr6 |= DR6_BD | DR6_RTM;
3597 ctxt->ops->set_dr(ctxt, 6, dr6);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003598 return emulate_db(ctxt);
Nadav Amit6d2a0522014-11-02 11:54:43 +02003599 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02003600
3601 return X86EMUL_CONTINUE;
3602}
3603
3604static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3605{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003606 u64 new_val = ctxt->src.val64;
3607 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003608
3609 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3610 return emulate_gp(ctxt, 0);
3611
3612 return check_dr_read(ctxt);
3613}
3614
Joerg Roedel01de8b02011-04-04 12:39:31 +02003615static int check_svme(struct x86_emulate_ctxt *ctxt)
3616{
3617 u64 efer;
3618
Avi Kivity717746e2011-04-20 13:37:53 +03003619 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003620
3621 if (!(efer & EFER_SVME))
3622 return emulate_ud(ctxt);
3623
3624 return X86EMUL_CONTINUE;
3625}
3626
3627static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3628{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003629 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003630
3631 /* Valid physical address? */
Randy Dunlapd4224442011-04-21 09:09:22 -07003632 if (rax & 0xffff000000000000ULL)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003633 return emulate_gp(ctxt, 0);
3634
3635 return check_svme(ctxt);
3636}
3637
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003638static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3639{
Avi Kivity717746e2011-04-20 13:37:53 +03003640 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003641
Avi Kivity717746e2011-04-20 13:37:53 +03003642 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003643 return emulate_ud(ctxt);
3644
3645 return X86EMUL_CONTINUE;
3646}
3647
Joerg Roedel80612522011-04-04 12:39:33 +02003648static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3649{
Avi Kivity717746e2011-04-20 13:37:53 +03003650 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003651 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
Joerg Roedel80612522011-04-04 12:39:33 +02003652
Avi Kivity717746e2011-04-20 13:37:53 +03003653 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
Nadav Amit67f4d422014-06-02 18:34:09 +03003654 ctxt->ops->check_pmc(ctxt, rcx))
Joerg Roedel80612522011-04-04 12:39:33 +02003655 return emulate_gp(ctxt, 0);
3656
3657 return X86EMUL_CONTINUE;
3658}
3659
Joerg Roedelf6511932011-04-04 12:39:35 +02003660static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3661{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003662 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3663 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003664 return emulate_gp(ctxt, 0);
3665
3666 return X86EMUL_CONTINUE;
3667}
3668
3669static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3670{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003671 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3672 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003673 return emulate_gp(ctxt, 0);
3674
3675 return X86EMUL_CONTINUE;
3676}
3677
Avi Kivity73fba5f2010-07-29 15:11:53 +03003678#define D(_y) { .flags = (_y) }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003679#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3680#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3681 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Gleb Natapov0b789ee2013-04-11 11:59:55 +03003682#define N D(NotImpl)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003683#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003684#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3685#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
Gleb Natapov045a2822012-12-20 16:57:43 +02003686#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003687#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
Avi Kivitye28bbd42013-01-04 16:18:48 +02003688#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
Avi Kivityc4f035c2011-04-04 12:39:22 +02003689#define II(_f, _e, _i) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003690 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
Joerg Roedeld09beab2011-04-04 12:39:25 +02003691#define IIP(_f, _e, _i, _p) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003692 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3693 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Avi Kivityaa97bb42010-01-20 18:09:23 +02003694#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003695
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003696#define D2bv(_f) D((_f) | ByteOp), D(_f)
Joerg Roedelf6511932011-04-04 12:39:35 +02003697#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003698#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
Avi Kivityf7857f32013-01-04 16:18:53 +02003699#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003700#define I2bvIP(_f, _e, _i, _p) \
3701 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003702
Avi Kivityfb864fb2013-01-04 16:18:54 +02003703#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3704 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3705 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
Avi Kivity6230f7f2010-08-26 18:34:55 +03003706
Nadav Amit0f54a322014-08-29 11:26:55 +03003707static const struct opcode group7_rm0[] = {
3708 N,
3709 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3710 N, N, N, N, N, N,
3711};
3712
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003713static const struct opcode group7_rm1[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003714 DI(SrcNone | Priv, monitor),
3715 DI(SrcNone | Priv, mwait),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003716 N, N, N, N, N, N,
3717};
3718
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003719static const struct opcode group7_rm3[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003720 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
Borislav Petkovb51e9742013-09-22 16:44:52 +02003721 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003722 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3723 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3724 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3725 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3726 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3727 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003728};
Avi Kivity6230f7f2010-08-26 18:34:55 +03003729
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003730static const struct opcode group7_rm7[] = {
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003731 N,
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003732 DIP(SrcNone, rdtscp, check_rdtsc),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003733 N, N, N, N, N, N,
3734};
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09003735
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003736static const struct opcode group1[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003737 F(Lock, em_add),
3738 F(Lock | PageTable, em_or),
3739 F(Lock, em_adc),
3740 F(Lock, em_sbb),
3741 F(Lock | PageTable, em_and),
3742 F(Lock, em_sub),
3743 F(Lock, em_xor),
3744 F(NoWrite, em_cmp),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003745};
3746
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003747static const struct opcode group1A[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003748 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003749};
3750
Avi Kivity007a3b52013-01-19 19:51:51 +02003751static const struct opcode group2[] = {
3752 F(DstMem | ModRM, em_rol),
3753 F(DstMem | ModRM, em_ror),
3754 F(DstMem | ModRM, em_rcl),
3755 F(DstMem | ModRM, em_rcr),
3756 F(DstMem | ModRM, em_shl),
3757 F(DstMem | ModRM, em_shr),
3758 F(DstMem | ModRM, em_shl),
3759 F(DstMem | ModRM, em_sar),
3760};
3761
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003762static const struct opcode group3[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003763 F(DstMem | SrcImm | NoWrite, em_test),
3764 F(DstMem | SrcImm | NoWrite, em_test),
Avi Kivity45a14672013-01-04 16:18:52 +02003765 F(DstMem | SrcNone | Lock, em_not),
3766 F(DstMem | SrcNone | Lock, em_neg),
Avi Kivityb9fa4092013-02-09 11:31:48 +02003767 F(DstXacc | Src2Mem, em_mul_ex),
3768 F(DstXacc | Src2Mem, em_imul_ex),
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02003769 F(DstXacc | Src2Mem, em_div_ex),
3770 F(DstXacc | Src2Mem, em_idiv_ex),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003771};
3772
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003773static const struct opcode group4[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003774 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3775 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003776 N, N, N, N, N, N,
3777};
3778
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003779static const struct opcode group5[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003780 F(DstMem | SrcNone | Lock, em_inc),
3781 F(DstMem | SrcNone | Lock, em_dec),
Nadav Amit58b70752014-10-24 11:35:09 +03003782 I(SrcMem | NearBranch, em_call_near_abs),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003783 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
Nadav Amit58b70752014-10-24 11:35:09 +03003784 I(SrcMem | NearBranch, em_jmp_abs),
Nadav Amitf7784042014-09-18 22:39:41 +03003785 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3786 I(SrcMem | Stack, em_push), D(Undefined),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003787};
3788
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003789static const struct opcode group6[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003790 DI(Prot, sldt),
3791 DI(Prot, str),
Avi Kivitya14e5792012-06-13 12:28:33 +03003792 II(Prot | Priv | SrcMem16, em_lldt, lldt),
Avi Kivity80890002012-06-13 16:33:29 +03003793 II(Prot | Priv | SrcMem16, em_ltr, ltr),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02003794 N, N, N, N,
3795};
3796
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003797static const struct group_dual group7 = { {
Nadav Amit606b1c32014-06-02 18:34:06 +03003798 II(Mov | DstMem, em_sgdt, sgdt),
3799 II(Mov | DstMem, em_sidt, sidt),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003800 II(SrcMem | Priv, em_lgdt, lgdt),
3801 II(SrcMem | Priv, em_lidt, lidt),
3802 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3803 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3804 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003805}, {
Nadav Amit0f54a322014-08-29 11:26:55 +03003806 EXT(0, group7_rm0),
Avi Kivity5ef39c72011-04-21 12:21:50 +03003807 EXT(0, group7_rm1),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003808 N, EXT(0, group7_rm3),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003809 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3810 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3811 EXT(0, group7_rm7),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003812} };
3813
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003814static const struct opcode group8[] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003815 N, N, N, N,
Avi Kivity11c363b2013-01-19 19:51:54 +02003816 F(DstMem | SrcImmByte | NoWrite, em_bt),
3817 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3818 F(DstMem | SrcImmByte | Lock, em_btr),
3819 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003820};
3821
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003822static const struct group_dual group9 = { {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003823 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003824}, {
3825 N, N, N, N, N, N, N, N,
3826} };
3827
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003828static const struct opcode group11[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003829 I(DstMem | SrcImm | Mov | PageTable, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003830 X7(D(Undefined)),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03003831};
3832
Nadav Amit13e457e2014-10-13 13:04:13 +03003833static const struct gprefix pfx_0f_ae_7 = {
Nadav Amit3f6f1482014-10-13 13:04:14 +03003834 I(SrcMem | ByteOp, em_clflush), N, N, N,
Nadav Amit13e457e2014-10-13 13:04:13 +03003835};
3836
3837static const struct group_dual group15 = { {
3838 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3839}, {
3840 N, N, N, N, N, N, N, N,
3841} };
3842
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003843static const struct gprefix pfx_0f_6f_0f_7f = {
Avi Kivitye5971752012-04-09 18:40:03 +03003844 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
Avi Kivityaa97bb42010-01-20 18:09:23 +02003845};
3846
Paolo Bonzinid5b77062014-07-14 12:54:48 +02003847static const struct gprefix pfx_0f_2b = {
3848 I(0, em_mov), I(0, em_mov), N, N,
Avi Kivity3e114eb2012-04-09 18:40:01 +03003849};
3850
Igor Mammedov27ce8252014-03-15 21:01:59 +01003851static const struct gprefix pfx_0f_28_0f_29 = {
Igor Mammedov6fec27d2014-03-15 21:02:00 +01003852 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01003853};
3854
Alex Williamson0a370272014-07-11 11:56:31 -06003855static const struct gprefix pfx_0f_e7 = {
3856 N, I(Sse, em_mov), N, N,
3857};
3858
Gleb Natapov045a2822012-12-20 16:57:43 +02003859static const struct escape escape_d9 = { {
3860 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3861}, {
3862 /* 0xC0 - 0xC7 */
3863 N, N, N, N, N, N, N, N,
3864 /* 0xC8 - 0xCF */
3865 N, N, N, N, N, N, N, N,
3866 /* 0xD0 - 0xC7 */
3867 N, N, N, N, N, N, N, N,
3868 /* 0xD8 - 0xDF */
3869 N, N, N, N, N, N, N, N,
3870 /* 0xE0 - 0xE7 */
3871 N, N, N, N, N, N, N, N,
3872 /* 0xE8 - 0xEF */
3873 N, N, N, N, N, N, N, N,
3874 /* 0xF0 - 0xF7 */
3875 N, N, N, N, N, N, N, N,
3876 /* 0xF8 - 0xFF */
3877 N, N, N, N, N, N, N, N,
3878} };
3879
3880static const struct escape escape_db = { {
3881 N, N, N, N, N, N, N, N,
3882}, {
3883 /* 0xC0 - 0xC7 */
3884 N, N, N, N, N, N, N, N,
3885 /* 0xC8 - 0xCF */
3886 N, N, N, N, N, N, N, N,
3887 /* 0xD0 - 0xC7 */
3888 N, N, N, N, N, N, N, N,
3889 /* 0xD8 - 0xDF */
3890 N, N, N, N, N, N, N, N,
3891 /* 0xE0 - 0xE7 */
3892 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3893 /* 0xE8 - 0xEF */
3894 N, N, N, N, N, N, N, N,
3895 /* 0xF0 - 0xF7 */
3896 N, N, N, N, N, N, N, N,
3897 /* 0xF8 - 0xFF */
3898 N, N, N, N, N, N, N, N,
3899} };
3900
3901static const struct escape escape_dd = { {
3902 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3903}, {
3904 /* 0xC0 - 0xC7 */
3905 N, N, N, N, N, N, N, N,
3906 /* 0xC8 - 0xCF */
3907 N, N, N, N, N, N, N, N,
3908 /* 0xD0 - 0xC7 */
3909 N, N, N, N, N, N, N, N,
3910 /* 0xD8 - 0xDF */
3911 N, N, N, N, N, N, N, N,
3912 /* 0xE0 - 0xE7 */
3913 N, N, N, N, N, N, N, N,
3914 /* 0xE8 - 0xEF */
3915 N, N, N, N, N, N, N, N,
3916 /* 0xF0 - 0xF7 */
3917 N, N, N, N, N, N, N, N,
3918 /* 0xF8 - 0xFF */
3919 N, N, N, N, N, N, N, N,
3920} };
3921
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003922static const struct opcode opcode_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003923 /* 0x00 - 0x07 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003924 F6ALU(Lock, em_add),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003925 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3926 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003927 /* 0x08 - 0x0F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003928 F6ALU(Lock | PageTable, em_or),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003929 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3930 N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003931 /* 0x10 - 0x17 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003932 F6ALU(Lock, em_adc),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003933 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3934 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003935 /* 0x18 - 0x1F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003936 F6ALU(Lock, em_sbb),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003937 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3938 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003939 /* 0x20 - 0x27 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003940 F6ALU(Lock | PageTable, em_and), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003941 /* 0x28 - 0x2F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003942 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003943 /* 0x30 - 0x37 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003944 F6ALU(Lock, em_xor), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003945 /* 0x38 - 0x3F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003946 F6ALU(NoWrite, em_cmp), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003947 /* 0x40 - 0x4F */
Avi Kivity95413dc2013-01-19 19:51:53 +02003948 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003949 /* 0x50 - 0x57 */
Avi Kivity63540382010-07-29 15:11:55 +03003950 X8(I(SrcReg | Stack, em_push)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003951 /* 0x58 - 0x5F */
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09003952 X8(I(DstReg | Stack, em_pop)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003953 /* 0x60 - 0x67 */
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09003954 I(ImplicitOps | Stack | No64, em_pusha),
3955 I(ImplicitOps | Stack | No64, em_popa),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003956 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3957 N, N, N, N,
3958 /* 0x68 - 0x6F */
Avi Kivityd46164d2010-08-18 19:29:33 +03003959 I(SrcImm | Mov | Stack, em_push),
3960 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003961 I(SrcImmByte | Mov | Stack, em_push),
3962 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
Gleb Natapovb3356bf2012-09-03 15:24:29 +03003963 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
Takuya Yoshikawa2b5e97e2011-11-23 12:27:39 +09003964 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
Avi Kivity73fba5f2010-07-29 15:11:53 +03003965 /* 0x70 - 0x7F */
Nadav Amit58b70752014-10-24 11:35:09 +03003966 X16(D(SrcImmByte | NearBranch)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003967 /* 0x80 - 0x87 */
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003968 G(ByteOp | DstMem | SrcImm, group1),
3969 G(DstMem | SrcImm, group1),
3970 G(ByteOp | DstMem | SrcImm | No64, group1),
3971 G(DstMem | SrcImmByte, group1),
Avi Kivityfb864fb2013-01-04 16:18:54 +02003972 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003973 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003974 /* 0x88 - 0x8F */
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003975 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003976 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003977 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003978 D(ModRM | SrcMem | NoAccess | DstReg),
3979 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3980 G(0, group1A),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003981 /* 0x90 - 0x97 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02003982 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003983 /* 0x98 - 0x9F */
Avi Kivity61429142010-08-19 15:13:00 +03003984 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
Wei Yongjuncc4feed2010-08-25 14:10:53 +08003985 I(SrcImmFAddr | No64, em_call_far), N,
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09003986 II(ImplicitOps | Stack, em_pushf, pushf),
Paolo Bonzini98f73632013-10-31 11:19:42 +01003987 II(ImplicitOps | Stack, em_popf, popf),
3988 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003989 /* 0xA0 - 0xA7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003990 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003991 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003992 I2bv(SrcSI | DstDI | Mov | String, em_mov),
Nadav Amit5aca3722014-11-02 11:54:50 +02003993 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003994 /* 0xA8 - 0xAF */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003995 F2bv(DstAcc | SrcImm | NoWrite, em_test),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003996 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3997 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
Nadav Amit5aca3722014-11-02 11:54:50 +02003998 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003999 /* 0xB0 - 0xB7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004000 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004001 /* 0xB8 - 0xBF */
Nadav Amit5e2c6882012-12-06 21:55:10 -02004002 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004003 /* 0xC0 - 0xC7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02004004 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
Nadav Amit58b70752014-10-24 11:35:09 +03004005 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4006 I(ImplicitOps | NearBranch, em_ret),
Avi Kivityd4b43252011-09-13 10:45:50 +03004007 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4008 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03004009 G(ByteOp, group11), G(0, group11),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004010 /* 0xC8 - 0xCF */
Avi Kivity612e89f2012-06-12 20:03:23 +03004011 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
Bruce Rogers32611072013-09-09 09:40:20 -06004012 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4013 I(ImplicitOps | Stack, em_ret_far),
Avi Kivity3c6e2762011-04-04 12:39:23 +02004014 D(ImplicitOps), DI(SrcImmByte, intn),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004015 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004016 /* 0xD0 - 0xD7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02004017 G(Src2One | ByteOp, group2), G(Src2One, group2),
4018 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02004019 I(DstAcc | SrcImmUByte | No64, em_aam),
Paolo Bonzini326f5782013-05-09 11:32:51 +02004020 I(DstAcc | SrcImmUByte | No64, em_aad),
4021 F(DstAcc | ByteOp | No64, em_salc),
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004022 I(DstAcc | SrcXLat | ByteOp, em_mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004023 /* 0xD8 - 0xDF */
Gleb Natapov045a2822012-12-20 16:57:43 +02004024 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004025 /* 0xE0 - 0xE7 */
Nadav Amit58b70752014-10-24 11:35:09 +03004026 X3(I(SrcImmByte | NearBranch, em_loop)),
4027 I(SrcImmByte | NearBranch, em_jcxz),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004028 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4029 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004030 /* 0xE8 - 0xEF */
Nadav Amit58b70752014-10-24 11:35:09 +03004031 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4032 I(SrcImmFAddr | No64, em_jmp_far),
4033 D(SrcImmByte | ImplicitOps | NearBranch),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004034 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4035 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004036 /* 0xF0 - 0xF7 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02004037 N, DI(ImplicitOps, icebp), N, N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02004038 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4039 G(ByteOp, group3), G(0, group3),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004040 /* 0xF8 - 0xFF */
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09004041 D(ImplicitOps), D(ImplicitOps),
4042 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004043 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4044};
4045
Mathias Krausefd0a0d82012-08-30 01:30:15 +02004046static const struct opcode twobyte_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03004047 /* 0x00 - 0x0F */
Joerg Roedeldee6bb72011-04-04 12:39:30 +02004048 G(0, group6), GD(0, &group7), N, N,
Borislav Petkovb51e9742013-09-22 16:44:52 +02004049 N, I(ImplicitOps | EmulateOnUD, em_syscall),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004050 II(ImplicitOps | Priv, em_clts, clts), N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02004051 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
Nadav Amit3f6f1482014-10-13 13:04:14 +03004052 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004053 /* 0x10 - 0x1F */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02004054 N, N, N, N, N, N, N, N,
Nadav Amit3f6f1482014-10-13 13:04:14 +03004055 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4056 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004057 /* 0x20 - 0x2F */
Nadav Amit9b88ae92014-05-25 23:05:21 +03004058 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4059 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4060 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4061 check_cr_write),
4062 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4063 check_dr_write),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004064 N, N, N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01004065 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4066 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
Paolo Bonzinid5b77062014-07-14 12:54:48 +02004067 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
Avi Kivity3e114eb2012-04-09 18:40:01 +03004068 N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004069 /* 0x30 - 0x3F */
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09004070 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
Joerg Roedel80612522011-04-04 12:39:33 +02004071 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09004072 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
Avi Kivity222d21a2011-11-10 14:57:30 +02004073 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
Borislav Petkovb51e9742013-09-22 16:44:52 +02004074 I(ImplicitOps | EmulateOnUD, em_sysenter),
4075 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
Avi Kivityd8671622011-02-01 16:32:03 +02004076 N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004077 N, N, N, N, N, N, N, N,
4078 /* 0x40 - 0x4F */
Nadav Amit140bad82014-06-15 16:13:00 +03004079 X16(D(DstReg | SrcMem | ModRM)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004080 /* 0x50 - 0x5F */
4081 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4082 /* 0x60 - 0x6F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02004083 N, N, N, N,
4084 N, N, N, N,
4085 N, N, N, N,
4086 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004087 /* 0x70 - 0x7F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02004088 N, N, N, N,
4089 N, N, N, N,
4090 N, N, N, N,
4091 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004092 /* 0x80 - 0x8F */
Nadav Amit58b70752014-10-24 11:35:09 +03004093 X16(D(SrcImm | NearBranch)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004094 /* 0x90 - 0x9F */
Wei Yongjunee45b582010-08-06 17:10:07 +08004095 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004096 /* 0xA0 - 0xA7 */
Avi Kivity1cd196e2011-09-13 10:45:51 +03004097 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
Avi Kivity11c363b2013-01-19 19:51:54 +02004098 II(ImplicitOps, em_cpuid, cpuid),
4099 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
Avi Kivity0bdea062013-01-19 19:51:50 +02004100 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4101 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004102 /* 0xA8 - 0xAF */
Avi Kivity1cd196e2011-09-13 10:45:51 +03004103 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004104 DI(ImplicitOps, rsm),
Avi Kivity11c363b2013-01-19 19:51:54 +02004105 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
Avi Kivity0bdea062013-01-19 19:51:50 +02004106 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4107 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
Nadav Amit13e457e2014-10-13 13:04:13 +03004108 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004109 /* 0xB0 - 0xB7 */
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09004110 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
Avi Kivityd4b43252011-09-13 10:45:50 +03004111 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
Avi Kivity11c363b2013-01-19 19:51:54 +02004112 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
Avi Kivityd4b43252011-09-13 10:45:50 +03004113 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4114 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004115 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004116 /* 0xB8 - 0xBF */
4117 N, N,
Takuya Yoshikawace7faab2011-11-22 15:17:48 +09004118 G(BitOp, group8),
Avi Kivity11c363b2013-01-19 19:51:54 +02004119 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4120 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004121 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity92998362012-06-13 12:25:06 +03004122 /* 0xC0 - 0xC7 */
Avi Kivitye47a5f52013-02-09 11:31:51 +02004123 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
Nadav Amited9aad22014-11-02 11:55:00 +02004124 N, I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004125 N, N, N, GD(0, &group9),
Avi Kivity92998362012-06-13 12:25:06 +03004126 /* 0xC8 - 0xCF */
4127 X8(I(DstReg, em_bswap)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004128 /* 0xD0 - 0xDF */
4129 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4130 /* 0xE0 - 0xEF */
Alex Williamson0a370272014-07-11 11:56:31 -06004131 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4132 N, N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004133 /* 0xF0 - 0xFF */
4134 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4135};
4136
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004137static const struct gprefix three_byte_0f_38_f0 = {
Borislav Petkov84cffe42013-10-29 12:54:56 +01004138 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004139};
4140
4141static const struct gprefix three_byte_0f_38_f1 = {
Borislav Petkov84cffe42013-10-29 12:54:56 +01004142 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004143};
4144
4145/*
4146 * Insns below are selected by the prefix which indexed by the third opcode
4147 * byte.
4148 */
4149static const struct opcode opcode_map_0f_38[256] = {
4150 /* 0x00 - 0x7f */
4151 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
Borislav Petkov84cffe42013-10-29 12:54:56 +01004152 /* 0x80 - 0xef */
4153 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4154 /* 0xf0 - 0xf1 */
4155 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
4156 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4157 /* 0xf2 - 0xff */
4158 N, N, X4(N), X8(N)
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004159};
4160
Avi Kivity73fba5f2010-07-29 15:11:53 +03004161#undef D
4162#undef N
4163#undef G
4164#undef GD
4165#undef I
Avi Kivityaa97bb42010-01-20 18:09:23 +02004166#undef GP
Joerg Roedel01de8b02011-04-04 12:39:31 +02004167#undef EXT
Avi Kivity73fba5f2010-07-29 15:11:53 +03004168
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004169#undef D2bv
Joerg Roedelf6511932011-04-04 12:39:35 +02004170#undef D2bvIP
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004171#undef I2bv
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004172#undef I2bvIP
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09004173#undef I6ALU
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004174
Avi Kivity9dac77f2011-06-01 15:34:25 +03004175static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
Avi Kivity39f21ee2010-08-18 19:20:21 +03004176{
4177 unsigned size;
4178
Avi Kivity9dac77f2011-06-01 15:34:25 +03004179 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004180 if (size == 8)
4181 size = 4;
4182 return size;
4183}
4184
4185static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4186 unsigned size, bool sign_extension)
4187{
Avi Kivity39f21ee2010-08-18 19:20:21 +03004188 int rc = X86EMUL_CONTINUE;
4189
4190 op->type = OP_IMM;
4191 op->bytes = size;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004192 op->addr.mem.ea = ctxt->_eip;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004193 /* NB. Immediates are sign-extended as necessary. */
4194 switch (op->bytes) {
4195 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004196 op->val = insn_fetch(s8, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004197 break;
4198 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004199 op->val = insn_fetch(s16, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004200 break;
4201 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004202 op->val = insn_fetch(s32, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004203 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004204 case 8:
4205 op->val = insn_fetch(s64, ctxt);
4206 break;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004207 }
4208 if (!sign_extension) {
4209 switch (op->bytes) {
4210 case 1:
4211 op->val &= 0xff;
4212 break;
4213 case 2:
4214 op->val &= 0xffff;
4215 break;
4216 case 4:
4217 op->val &= 0xffffffff;
4218 break;
4219 }
4220 }
4221done:
4222 return rc;
4223}
4224
Avi Kivitya99455492011-09-13 10:45:41 +03004225static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4226 unsigned d)
4227{
4228 int rc = X86EMUL_CONTINUE;
4229
4230 switch (d) {
4231 case OpReg:
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004232 decode_register_operand(ctxt, op);
Avi Kivitya99455492011-09-13 10:45:41 +03004233 break;
4234 case OpImmUByte:
Avi Kivity608aabe2011-09-13 10:45:45 +03004235 rc = decode_imm(ctxt, op, 1, false);
Avi Kivitya99455492011-09-13 10:45:41 +03004236 break;
4237 case OpMem:
Avi Kivity41ddf972011-09-13 10:45:48 +03004238 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity0fe59122011-09-13 10:45:47 +03004239 mem_common:
Avi Kivitya99455492011-09-13 10:45:41 +03004240 *op = ctxt->memop;
4241 ctxt->memopp = op;
Paolo Bonzini96888972014-04-01 14:54:19 +02004242 if (ctxt->d & BitOp)
Avi Kivitya99455492011-09-13 10:45:41 +03004243 fetch_bit_operand(ctxt);
4244 op->orig_val = op->val;
4245 break;
Avi Kivity41ddf972011-09-13 10:45:48 +03004246 case OpMem64:
Nadav Amitaaa05f22014-06-02 18:34:10 +03004247 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
Avi Kivity41ddf972011-09-13 10:45:48 +03004248 goto mem_common;
Avi Kivitya99455492011-09-13 10:45:41 +03004249 case OpAcc:
4250 op->type = OP_REG;
4251 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004252 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
Avi Kivitya99455492011-09-13 10:45:41 +03004253 fetch_register_operand(op);
4254 op->orig_val = op->val;
4255 break;
Avi Kivity820207c2013-02-09 11:31:45 +02004256 case OpAccLo:
4257 op->type = OP_REG;
4258 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4259 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4260 fetch_register_operand(op);
4261 op->orig_val = op->val;
4262 break;
4263 case OpAccHi:
4264 if (ctxt->d & ByteOp) {
4265 op->type = OP_NONE;
4266 break;
4267 }
4268 op->type = OP_REG;
4269 op->bytes = ctxt->op_bytes;
4270 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4271 fetch_register_operand(op);
4272 op->orig_val = op->val;
4273 break;
Avi Kivitya99455492011-09-13 10:45:41 +03004274 case OpDI:
4275 op->type = OP_MEM;
4276 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4277 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004278 register_address(ctxt, VCPU_REGS_RDI);
Avi Kivitya99455492011-09-13 10:45:41 +03004279 op->addr.mem.seg = VCPU_SREG_ES;
4280 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004281 op->count = 1;
Avi Kivitya99455492011-09-13 10:45:41 +03004282 break;
4283 case OpDX:
4284 op->type = OP_REG;
4285 op->bytes = 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004286 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivitya99455492011-09-13 10:45:41 +03004287 fetch_register_operand(op);
4288 break;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004289 case OpCL:
4290 op->bytes = 1;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004291 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004292 break;
4293 case OpImmByte:
4294 rc = decode_imm(ctxt, op, 1, true);
4295 break;
4296 case OpOne:
4297 op->bytes = 1;
4298 op->val = 1;
4299 break;
4300 case OpImm:
4301 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4302 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004303 case OpImm64:
4304 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4305 break;
Avi Kivity28867ce2012-01-16 15:08:44 +02004306 case OpMem8:
4307 ctxt->memop.bytes = 1;
Gleb Natapov660696d2013-04-24 13:38:36 +03004308 if (ctxt->memop.type == OP_REG) {
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02004309 ctxt->memop.addr.reg = decode_register(ctxt,
4310 ctxt->modrm_rm, true);
Gleb Natapov660696d2013-04-24 13:38:36 +03004311 fetch_register_operand(&ctxt->memop);
4312 }
Avi Kivity28867ce2012-01-16 15:08:44 +02004313 goto mem_common;
Avi Kivity0fe59122011-09-13 10:45:47 +03004314 case OpMem16:
4315 ctxt->memop.bytes = 2;
4316 goto mem_common;
4317 case OpMem32:
4318 ctxt->memop.bytes = 4;
4319 goto mem_common;
4320 case OpImmU16:
4321 rc = decode_imm(ctxt, op, 2, false);
4322 break;
4323 case OpImmU:
4324 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4325 break;
4326 case OpSI:
4327 op->type = OP_MEM;
4328 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4329 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004330 register_address(ctxt, VCPU_REGS_RSI);
Bandan Das573e80f2014-04-16 12:46:13 -04004331 op->addr.mem.seg = ctxt->seg_override;
Avi Kivity0fe59122011-09-13 10:45:47 +03004332 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004333 op->count = 1;
Avi Kivity0fe59122011-09-13 10:45:47 +03004334 break;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004335 case OpXLat:
4336 op->type = OP_MEM;
4337 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4338 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004339 address_mask(ctxt,
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004340 reg_read(ctxt, VCPU_REGS_RBX) +
4341 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
Bandan Das573e80f2014-04-16 12:46:13 -04004342 op->addr.mem.seg = ctxt->seg_override;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004343 op->val = 0;
4344 break;
Avi Kivity0fe59122011-09-13 10:45:47 +03004345 case OpImmFAddr:
4346 op->type = OP_IMM;
4347 op->addr.mem.ea = ctxt->_eip;
4348 op->bytes = ctxt->op_bytes + 2;
4349 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4350 break;
4351 case OpMemFAddr:
4352 ctxt->memop.bytes = ctxt->op_bytes + 2;
4353 goto mem_common;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004354 case OpES:
4355 op->val = VCPU_SREG_ES;
4356 break;
4357 case OpCS:
4358 op->val = VCPU_SREG_CS;
4359 break;
4360 case OpSS:
4361 op->val = VCPU_SREG_SS;
4362 break;
4363 case OpDS:
4364 op->val = VCPU_SREG_DS;
4365 break;
4366 case OpFS:
4367 op->val = VCPU_SREG_FS;
4368 break;
4369 case OpGS:
4370 op->val = VCPU_SREG_GS;
4371 break;
Avi Kivitya99455492011-09-13 10:45:41 +03004372 case OpImplicit:
4373 /* Special instructions do their own operand decoding. */
4374 default:
4375 op->type = OP_NONE; /* Disable writeback. */
4376 break;
4377 }
4378
4379done:
4380 return rc;
4381}
4382
Takuya Yoshikawaef5d75c2011-05-15 00:57:43 +09004383int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004384{
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004385 int rc = X86EMUL_CONTINUE;
4386 int mode = ctxt->mode;
Avi Kivity46561642011-04-24 14:09:59 +03004387 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004388 bool op_prefix = false;
Bandan Das573e80f2014-04-16 12:46:13 -04004389 bool has_seg_override = false;
Avi Kivity46561642011-04-24 14:09:59 +03004390 struct opcode opcode;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004391
Avi Kivityf09ed832011-09-13 10:45:40 +03004392 ctxt->memop.type = OP_NONE;
4393 ctxt->memopp = NULL;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004394 ctxt->_eip = ctxt->eip;
Paolo Bonzini17052f12014-05-06 16:33:01 +02004395 ctxt->fetch.ptr = ctxt->fetch.data;
4396 ctxt->fetch.end = ctxt->fetch.data + insn_len;
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004397 ctxt->opcode_len = 1;
Andre Przywaradc25e892010-12-21 11:12:07 +01004398 if (insn_len > 0)
Avi Kivity9dac77f2011-06-01 15:34:25 +03004399 memcpy(ctxt->fetch.data, insn, insn_len);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004400 else {
Paolo Bonzini9506d572014-05-06 13:05:25 +02004401 rc = __do_insn_fetch_bytes(ctxt, 1);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004402 if (rc != X86EMUL_CONTINUE)
4403 return rc;
4404 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004405
4406 switch (mode) {
4407 case X86EMUL_MODE_REAL:
4408 case X86EMUL_MODE_VM86:
4409 case X86EMUL_MODE_PROT16:
4410 def_op_bytes = def_ad_bytes = 2;
4411 break;
4412 case X86EMUL_MODE_PROT32:
4413 def_op_bytes = def_ad_bytes = 4;
4414 break;
4415#ifdef CONFIG_X86_64
4416 case X86EMUL_MODE_PROT64:
4417 def_op_bytes = 4;
4418 def_ad_bytes = 8;
4419 break;
4420#endif
4421 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004422 return EMULATION_FAILED;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004423 }
4424
Avi Kivity9dac77f2011-06-01 15:34:25 +03004425 ctxt->op_bytes = def_op_bytes;
4426 ctxt->ad_bytes = def_ad_bytes;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004427
4428 /* Legacy prefixes. */
4429 for (;;) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004430 switch (ctxt->b = insn_fetch(u8, ctxt)) {
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004431 case 0x66: /* operand-size override */
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004432 op_prefix = true;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004433 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004434 ctxt->op_bytes = def_op_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004435 break;
4436 case 0x67: /* address-size override */
4437 if (mode == X86EMUL_MODE_PROT64)
4438 /* switch between 4/8 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004439 ctxt->ad_bytes = def_ad_bytes ^ 12;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004440 else
4441 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004442 ctxt->ad_bytes = def_ad_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004443 break;
4444 case 0x26: /* ES override */
4445 case 0x2e: /* CS override */
4446 case 0x36: /* SS override */
4447 case 0x3e: /* DS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004448 has_seg_override = true;
4449 ctxt->seg_override = (ctxt->b >> 3) & 3;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004450 break;
4451 case 0x64: /* FS override */
4452 case 0x65: /* GS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004453 has_seg_override = true;
4454 ctxt->seg_override = ctxt->b & 7;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004455 break;
4456 case 0x40 ... 0x4f: /* REX */
4457 if (mode != X86EMUL_MODE_PROT64)
4458 goto done_prefixes;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004459 ctxt->rex_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004460 continue;
4461 case 0xf0: /* LOCK */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004462 ctxt->lock_prefix = 1;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004463 break;
4464 case 0xf2: /* REPNE/REPNZ */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004465 case 0xf3: /* REP/REPE/REPZ */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004466 ctxt->rep_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004467 break;
4468 default:
4469 goto done_prefixes;
4470 }
4471
4472 /* Any legacy prefix after a REX prefix nullifies its effect. */
4473
Avi Kivity9dac77f2011-06-01 15:34:25 +03004474 ctxt->rex_prefix = 0;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004475 }
4476
4477done_prefixes:
4478
4479 /* REX prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004480 if (ctxt->rex_prefix & 8)
4481 ctxt->op_bytes = 8; /* REX.W */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004482
4483 /* Opcode byte(s). */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004484 opcode = opcode_table[ctxt->b];
Wei Yongjund3ad6242010-08-05 16:34:39 +08004485 /* Two-byte opcode? */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004486 if (ctxt->b == 0x0f) {
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004487 ctxt->opcode_len = 2;
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004488 ctxt->b = insn_fetch(u8, ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03004489 opcode = twobyte_table[ctxt->b];
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004490
4491 /* 0F_38 opcode map */
4492 if (ctxt->b == 0x38) {
4493 ctxt->opcode_len = 3;
4494 ctxt->b = insn_fetch(u8, ctxt);
4495 opcode = opcode_map_0f_38[ctxt->b];
4496 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004497 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004498 ctxt->d = opcode.flags;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004499
Takuya Yoshikawa9f4260e2012-04-30 17:48:25 +09004500 if (ctxt->d & ModRM)
4501 ctxt->modrm = insn_fetch(u8, ctxt);
4502
Nadav Amit7fe864d2014-06-02 18:34:03 +03004503 /* vex-prefix instructions are not implemented */
4504 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
Nadav Amitd14cb5d2014-11-02 11:54:58 +02004505 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
Nadav Amit7fe864d2014-06-02 18:34:03 +03004506 ctxt->d = NotImpl;
4507 }
4508
Avi Kivity9dac77f2011-06-01 15:34:25 +03004509 while (ctxt->d & GroupMask) {
4510 switch (ctxt->d & GroupMask) {
Avi Kivity46561642011-04-24 14:09:59 +03004511 case Group:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004512 goffset = (ctxt->modrm >> 3) & 7;
Avi Kivity46561642011-04-24 14:09:59 +03004513 opcode = opcode.u.group[goffset];
4514 break;
4515 case GroupDual:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004516 goffset = (ctxt->modrm >> 3) & 7;
4517 if ((ctxt->modrm >> 6) == 3)
Avi Kivity46561642011-04-24 14:09:59 +03004518 opcode = opcode.u.gdual->mod3[goffset];
4519 else
4520 opcode = opcode.u.gdual->mod012[goffset];
4521 break;
4522 case RMExt:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004523 goffset = ctxt->modrm & 7;
Joerg Roedel01de8b02011-04-04 12:39:31 +02004524 opcode = opcode.u.group[goffset];
Avi Kivity46561642011-04-24 14:09:59 +03004525 break;
4526 case Prefix:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004527 if (ctxt->rep_prefix && op_prefix)
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004528 return EMULATION_FAILED;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004529 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
Avi Kivity46561642011-04-24 14:09:59 +03004530 switch (simd_prefix) {
4531 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4532 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4533 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4534 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4535 }
4536 break;
Gleb Natapov045a2822012-12-20 16:57:43 +02004537 case Escape:
4538 if (ctxt->modrm > 0xbf)
4539 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4540 else
4541 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4542 break;
Avi Kivity46561642011-04-24 14:09:59 +03004543 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004544 return EMULATION_FAILED;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004545 }
Avi Kivity46561642011-04-24 14:09:59 +03004546
Avi Kivityb1ea50b2011-09-13 10:45:42 +03004547 ctxt->d &= ~(u64)GroupMask;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004548 ctxt->d |= opcode.flags;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004549 }
4550
Paolo Bonzinie24186e2014-03-27 12:00:57 +01004551 /* Unrecognised? */
4552 if (ctxt->d == 0)
4553 return EMULATION_FAILED;
4554
Avi Kivity9dac77f2011-06-01 15:34:25 +03004555 ctxt->execute = opcode.u.execute;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004556
Nadav Amit3a6095a2014-08-13 16:50:13 +03004557 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4558 return EMULATION_FAILED;
4559
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004560 if (unlikely(ctxt->d &
Nadav Amited9aad22014-11-02 11:55:00 +02004561 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4562 No16))) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004563 /*
4564 * These are copied unconditionally here, and checked unconditionally
4565 * in x86_emulate_insn.
4566 */
4567 ctxt->check_perm = opcode.check_perm;
4568 ctxt->intercept = opcode.intercept;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004569
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004570 if (ctxt->d & NotImpl)
4571 return EMULATION_FAILED;
Avi Kivityd8671622011-02-01 16:32:03 +02004572
Nadav Amit58b70752014-10-24 11:35:09 +03004573 if (mode == X86EMUL_MODE_PROT64) {
4574 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4575 ctxt->op_bytes = 8;
4576 else if (ctxt->d & NearBranch)
4577 ctxt->op_bytes = 8;
4578 }
Avi Kivity7f9b4b72010-08-01 14:46:54 +03004579
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004580 if (ctxt->d & Op3264) {
4581 if (mode == X86EMUL_MODE_PROT64)
4582 ctxt->op_bytes = 8;
4583 else
4584 ctxt->op_bytes = 4;
4585 }
4586
Nadav Amited9aad22014-11-02 11:55:00 +02004587 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4588 ctxt->op_bytes = 4;
4589
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004590 if (ctxt->d & Sse)
4591 ctxt->op_bytes = 16;
4592 else if (ctxt->d & Mmx)
4593 ctxt->op_bytes = 8;
4594 }
Avi Kivity1253791d2011-03-29 11:41:27 +02004595
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004596 /* ModRM and SIB bytes. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004597 if (ctxt->d & ModRM) {
Avi Kivityf09ed832011-09-13 10:45:40 +03004598 rc = decode_modrm(ctxt, &ctxt->memop);
Bandan Das573e80f2014-04-16 12:46:13 -04004599 if (!has_seg_override) {
4600 has_seg_override = true;
4601 ctxt->seg_override = ctxt->modrm_seg;
4602 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004603 } else if (ctxt->d & MemAbs)
Avi Kivityf09ed832011-09-13 10:45:40 +03004604 rc = decode_abs(ctxt, &ctxt->memop);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004605 if (rc != X86EMUL_CONTINUE)
4606 goto done;
4607
Bandan Das573e80f2014-04-16 12:46:13 -04004608 if (!has_seg_override)
4609 ctxt->seg_override = VCPU_SREG_DS;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004610
Bandan Das573e80f2014-04-16 12:46:13 -04004611 ctxt->memop.addr.mem.seg = ctxt->seg_override;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004612
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004613 /*
4614 * Decode and fetch the source operand: register, memory
4615 * or immediate.
4616 */
Avi Kivity0fe59122011-09-13 10:45:47 +03004617 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004618 if (rc != X86EMUL_CONTINUE)
4619 goto done;
4620
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004621 /*
4622 * Decode and fetch the second source operand: register, memory
4623 * or immediate.
4624 */
Avi Kivity4dd6a572011-09-13 10:45:43 +03004625 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004626 if (rc != X86EMUL_CONTINUE)
4627 goto done;
4628
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004629 /* Decode and fetch the destination operand: register or memory. */
Avi Kivitya99455492011-09-13 10:45:41 +03004630 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004631
Bandan Das41061cd2014-04-16 12:46:14 -04004632 if (ctxt->rip_relative)
Nadav Amit1c1c35a2014-11-19 17:43:09 +02004633 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
4634 ctxt->memopp->addr.mem.ea + ctxt->_eip);
Avi Kivitycb16c342011-06-19 19:21:11 +03004635
Paolo Bonzinia430c912014-10-23 14:54:14 +02004636done:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004637 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004638}
4639
Xiao Guangrong1cb3f3a2011-09-22 17:02:48 +08004640bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4641{
4642 return ctxt->d & PageTable;
4643}
4644
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004645static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4646{
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004647 /* The second termination condition only applies for REPE
4648 * and REPNE. Test if the repeat string operation prefix is
4649 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4650 * corresponding termination condition according to:
4651 * - if REPE/REPZ and ZF = 0 then done
4652 * - if REPNE/REPNZ and ZF = 1 then done
4653 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004654 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4655 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4656 && (((ctxt->rep_prefix == REPE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004657 ((ctxt->eflags & EFLG_ZF) == 0))
Avi Kivity9dac77f2011-06-01 15:34:25 +03004658 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004659 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4660 return true;
4661
4662 return false;
4663}
4664
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004665static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4666{
4667 bool fault = false;
4668
4669 ctxt->ops->get_fpu(ctxt);
4670 asm volatile("1: fwait \n\t"
4671 "2: \n\t"
4672 ".pushsection .fixup,\"ax\" \n\t"
4673 "3: \n\t"
4674 "movb $1, %[fault] \n\t"
4675 "jmp 2b \n\t"
4676 ".popsection \n\t"
4677 _ASM_EXTABLE(1b, 3b)
Avi Kivity38e8a2d2012-04-22 15:12:50 +03004678 : [fault]"+qm"(fault));
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004679 ctxt->ops->put_fpu(ctxt);
4680
4681 if (unlikely(fault))
4682 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4683
4684 return X86EMUL_CONTINUE;
4685}
4686
4687static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4688 struct operand *op)
4689{
4690 if (op->type == OP_MM)
4691 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4692}
4693
Avi Kivitye28bbd42013-01-04 16:18:48 +02004694static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4695{
4696 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivityb9fa4092013-02-09 11:31:48 +02004697 if (!(ctxt->d & ByteOp))
4698 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
Avi Kivitye28bbd42013-01-04 16:18:48 +02004699 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004700 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4701 [fastop]"+S"(fop)
4702 : "c"(ctxt->src2.val));
Avi Kivitye28bbd42013-01-04 16:18:48 +02004703 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004704 if (!fop) /* exception is returned in fop variable */
4705 return emulate_de(ctxt);
Avi Kivitye28bbd42013-01-04 16:18:48 +02004706 return X86EMUL_CONTINUE;
4707}
Avi Kivitydd856ef2012-08-27 23:46:17 +03004708
Bandan Das14985072014-04-16 12:46:09 -04004709void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4710{
Bandan Das573e80f2014-04-16 12:46:13 -04004711 memset(&ctxt->rip_relative, 0,
4712 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
Bandan Das14985072014-04-16 12:46:09 -04004713
Bandan Das14985072014-04-16 12:46:09 -04004714 ctxt->io_read.pos = 0;
4715 ctxt->io_read.end = 0;
Bandan Das14985072014-04-16 12:46:09 -04004716 ctxt->mem_read.end = 0;
4717}
4718
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09004719int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004720{
Mathias Krause0225fb52012-08-30 01:30:16 +02004721 const struct x86_emulate_ops *ops = ctxt->ops;
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09004722 int rc = X86EMUL_CONTINUE;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004723 int saved_dst_type = ctxt->dst.type;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004724
Avi Kivity9dac77f2011-06-01 15:34:25 +03004725 ctxt->mem_read.pos = 0;
Glauber Costa310b5d32009-05-12 16:21:06 -04004726
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004727 /* LOCK prefix is allowed only with some instructions */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004728 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004729 rc = emulate_ud(ctxt);
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004730 goto done;
4731 }
4732
Avi Kivity9dac77f2011-06-01 15:34:25 +03004733 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004734 rc = emulate_ud(ctxt);
Avi Kivity081bca02010-08-26 11:06:15 +03004735 goto done;
4736 }
4737
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004738 if (unlikely(ctxt->d &
4739 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4740 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4741 (ctxt->d & Undefined)) {
4742 rc = emulate_ud(ctxt);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004743 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004744 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004745
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004746 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4747 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4748 rc = emulate_ud(ctxt);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004749 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004750 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004751
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004752 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4753 rc = emulate_nm(ctxt);
Joerg Roedeld09beab2011-04-04 12:39:25 +02004754 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004755 }
Joerg Roedeld09beab2011-04-04 12:39:25 +02004756
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004757 if (ctxt->d & Mmx) {
4758 rc = flush_pending_x87_faults(ctxt);
4759 if (rc != X86EMUL_CONTINUE)
4760 goto done;
4761 /*
4762 * Now that we know the fpu is exception safe, we can fetch
4763 * operands from it.
4764 */
4765 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4766 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4767 if (!(ctxt->d & Mov))
4768 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4769 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004770
Bandan Das685bbf42014-04-16 12:46:10 -04004771 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004772 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4773 X86_ICPT_PRE_EXCEPT);
4774 if (rc != X86EMUL_CONTINUE)
4775 goto done;
4776 }
4777
4778 /* Privileged instruction can be executed only in CPL=0 */
4779 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
Nadav Amit68efa762014-06-18 17:19:35 +03004780 if (ctxt->d & PrivUD)
4781 rc = emulate_ud(ctxt);
4782 else
4783 rc = emulate_gp(ctxt, 0);
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004784 goto done;
4785 }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004786
4787 /* Instruction can only be executed in protected mode */
4788 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4789 rc = emulate_ud(ctxt);
4790 goto done;
4791 }
4792
4793 /* Do instruction specific permission checks */
Bandan Das685bbf42014-04-16 12:46:10 -04004794 if (ctxt->d & CheckPerm) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004795 rc = ctxt->check_perm(ctxt);
4796 if (rc != X86EMUL_CONTINUE)
4797 goto done;
4798 }
4799
Bandan Das685bbf42014-04-16 12:46:10 -04004800 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004801 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4802 X86_ICPT_POST_EXCEPT);
4803 if (rc != X86EMUL_CONTINUE)
4804 goto done;
4805 }
4806
4807 if (ctxt->rep_prefix && (ctxt->d & String)) {
4808 /* All REP prefixes have the same first termination condition */
4809 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4810 ctxt->eip = ctxt->_eip;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004811 ctxt->eflags &= ~EFLG_RF;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004812 goto done;
4813 }
4814 }
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004815 }
4816
Avi Kivity9dac77f2011-06-01 15:34:25 +03004817 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4818 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4819 ctxt->src.valptr, ctxt->src.bytes);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09004820 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004821 goto done;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004822 ctxt->src.orig_val64 = ctxt->src.val64;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004823 }
4824
Avi Kivity9dac77f2011-06-01 15:34:25 +03004825 if (ctxt->src2.type == OP_MEM) {
4826 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4827 &ctxt->src2.val, ctxt->src2.bytes);
Gleb Natapove35b7b92010-02-25 16:36:42 +02004828 if (rc != X86EMUL_CONTINUE)
4829 goto done;
4830 }
4831
Avi Kivity9dac77f2011-06-01 15:34:25 +03004832 if ((ctxt->d & DstMask) == ImplicitOps)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004833 goto special_insn;
4834
4835
Avi Kivity9dac77f2011-06-01 15:34:25 +03004836 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004837 /* optimisation - avoid slow emulated read if Mov */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004838 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4839 &ctxt->dst.val, ctxt->dst.bytes);
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004840 if (rc != X86EMUL_CONTINUE)
4841 goto done;
Avi Kivity038e51d2007-01-22 20:40:40 -08004842 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004843 ctxt->dst.orig_val = ctxt->dst.val;
Avi Kivity038e51d2007-01-22 20:40:40 -08004844
Avi Kivity018a98d2007-11-27 19:30:56 +02004845special_insn:
4846
Bandan Das685bbf42014-04-16 12:46:10 -04004847 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03004848 rc = emulator_check_intercept(ctxt, ctxt->intercept,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004849 X86_ICPT_POST_MEMACCESS);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004850 if (rc != X86EMUL_CONTINUE)
4851 goto done;
4852 }
4853
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03004854 if (ctxt->rep_prefix && (ctxt->d & String))
4855 ctxt->eflags |= EFLG_RF;
4856 else
4857 ctxt->eflags &= ~EFLG_RF;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004858
Avi Kivity9dac77f2011-06-01 15:34:25 +03004859 if (ctxt->execute) {
Avi Kivitye28bbd42013-01-04 16:18:48 +02004860 if (ctxt->d & Fastop) {
4861 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4862 rc = fastop(ctxt, fop);
4863 if (rc != X86EMUL_CONTINUE)
4864 goto done;
4865 goto writeback;
4866 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004867 rc = ctxt->execute(ctxt);
Avi Kivityef65c882010-07-29 15:11:51 +03004868 if (rc != X86EMUL_CONTINUE)
4869 goto done;
4870 goto writeback;
4871 }
4872
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004873 if (ctxt->opcode_len == 2)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004874 goto twobyte_insn;
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004875 else if (ctxt->opcode_len == 3)
4876 goto threebyte_insn;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004877
Avi Kivity9dac77f2011-06-01 15:34:25 +03004878 switch (ctxt->b) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08004879 case 0x63: /* movsxd */
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004880 if (ctxt->mode != X86EMUL_MODE_PROT64)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004881 goto cannot_emulate;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004882 ctxt->dst.val = (s32) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004883 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03004884 case 0x70 ... 0x7f: /* jcc (short) */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004885 if (test_cc(ctxt->b, ctxt->eflags))
Nadav Amit234f3ce2014-09-18 22:39:38 +03004886 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02004887 break;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03004888 case 0x8d: /* lea r16/r32, m */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004889 ctxt->dst.val = ctxt->src.addr.mem.ea;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03004890 break;
Avi Kivity3d9e77d2010-08-01 12:41:59 +03004891 case 0x90 ... 0x97: /* nop / xchg reg, rax */
Avi Kivitydd856ef2012-08-27 23:46:17 +03004892 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
Nadav Amita825f5c2014-06-15 16:13:01 +03004893 ctxt->dst.type = OP_NONE;
4894 else
4895 rc = em_xchg(ctxt);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09004896 break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08004897 case 0x98: /* cbw/cwde/cdqe */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004898 switch (ctxt->op_bytes) {
4899 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4900 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4901 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08004902 }
4903 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004904 case 0xcc: /* int3 */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09004905 rc = emulate_int(ctxt, 3);
4906 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004907 case 0xcd: /* int n */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004908 rc = emulate_int(ctxt, ctxt->src.val);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004909 break;
4910 case 0xce: /* into */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09004911 if (ctxt->eflags & EFLG_OF)
4912 rc = emulate_int(ctxt, 4);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004913 break;
Nitin A Kamble1a52e052007-09-18 16:34:25 -07004914 case 0xe9: /* jmp rel */
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004915 case 0xeb: /* jmp rel short */
Nadav Amit234f3ce2014-09-18 22:39:38 +03004916 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity9dac77f2011-06-01 15:34:25 +03004917 ctxt->dst.type = OP_NONE; /* Disable writeback. */
Nitin A Kamble1a52e052007-09-18 16:34:25 -07004918 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02004919 case 0xf4: /* hlt */
Avi Kivity6c3287f2011-04-20 15:43:05 +03004920 ctxt->ops->halt(ctxt);
Mohammed Gamal19fdfa02008-07-06 16:51:26 +03004921 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02004922 case 0xf5: /* cmc */
4923 /* complement carry flag from eflags reg */
4924 ctxt->eflags ^= EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02004925 break;
4926 case 0xf8: /* clc */
4927 ctxt->eflags &= ~EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02004928 break;
Mohammed Gamal8744aa92010-08-05 15:42:49 +03004929 case 0xf9: /* stc */
4930 ctxt->eflags |= EFLG_CF;
4931 break;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004932 case 0xfc: /* cld */
4933 ctxt->eflags &= ~EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004934 break;
4935 case 0xfd: /* std */
4936 ctxt->eflags |= EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004937 break;
Avi Kivity91269b82010-07-25 14:51:16 +03004938 default:
4939 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004940 }
Avi Kivity018a98d2007-11-27 19:30:56 +02004941
Avi Kivity7d9ddae2010-08-30 17:12:28 +03004942 if (rc != X86EMUL_CONTINUE)
4943 goto done;
4944
Avi Kivity018a98d2007-11-27 19:30:56 +02004945writeback:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02004946 if (ctxt->d & SrcWrite) {
4947 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4948 rc = writeback(ctxt, &ctxt->src);
4949 if (rc != X86EMUL_CONTINUE)
4950 goto done;
4951 }
Nadav Amitee212292014-06-15 16:12:58 +03004952 if (!(ctxt->d & NoWrite)) {
4953 rc = writeback(ctxt, &ctxt->dst);
4954 if (rc != X86EMUL_CONTINUE)
4955 goto done;
4956 }
Avi Kivity018a98d2007-11-27 19:30:56 +02004957
Gleb Natapov5cd21912010-03-18 15:20:26 +02004958 /*
4959 * restore dst type in case the decoding will be reused
4960 * (happens for string instruction )
4961 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004962 ctxt->dst.type = saved_dst_type;
Gleb Natapov5cd21912010-03-18 15:20:26 +02004963
Avi Kivity9dac77f2011-06-01 15:34:25 +03004964 if ((ctxt->d & SrcMask) == SrcSI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03004965 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
Gleb Natapova682e352010-03-18 15:20:21 +02004966
Avi Kivity9dac77f2011-06-01 15:34:25 +03004967 if ((ctxt->d & DstMask) == DstDI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03004968 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
Gleb Natapovd9271122010-03-18 15:20:22 +02004969
Avi Kivity9dac77f2011-06-01 15:34:25 +03004970 if (ctxt->rep_prefix && (ctxt->d & String)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004971 unsigned int count;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004972 struct read_cache *r = &ctxt->io_read;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004973 if ((ctxt->d & SrcMask) == SrcSI)
4974 count = ctxt->src.count;
4975 else
4976 count = ctxt->dst.count;
Paolo Bonzini01485a22014-11-19 18:25:08 +01004977 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004978
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03004979 if (!string_insn_completed(ctxt)) {
4980 /*
4981 * Re-enter guest when pio read ahead buffer is empty
4982 * or, if it is not used, after each 1024 iteration.
4983 */
Avi Kivitydd856ef2012-08-27 23:46:17 +03004984 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03004985 (r->end == 0 || r->end != r->pos)) {
4986 /*
4987 * Reset read cache. Usually happens before
4988 * decode, but since instruction is restarted
4989 * we have to do it here.
4990 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004991 ctxt->mem_read.end = 0;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004992 writeback_registers(ctxt);
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03004993 return EMULATION_RESTART;
4994 }
4995 goto done; /* skip rip writeback */
Avi Kivity0fa6ccb2010-08-17 11:22:17 +03004996 }
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03004997 ctxt->eflags &= ~EFLG_RF;
Gleb Natapov5cd21912010-03-18 15:20:26 +02004998 }
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03004999
Avi Kivity9dac77f2011-06-01 15:34:25 +03005000 ctxt->eip = ctxt->_eip;
Avi Kivity018a98d2007-11-27 19:30:56 +02005001
5002done:
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02005003 if (rc == X86EMUL_PROPAGATE_FAULT) {
5004 WARN_ON(ctxt->exception.vector > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +02005005 ctxt->have_exception = true;
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02005006 }
Joerg Roedel775fde82011-04-04 12:39:24 +02005007 if (rc == X86EMUL_INTERCEPTED)
5008 return EMULATION_INTERCEPTED;
5009
Avi Kivitydd856ef2012-08-27 23:46:17 +03005010 if (rc == X86EMUL_CONTINUE)
5011 writeback_registers(ctxt);
5012
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005013 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005014
5015twobyte_insn:
Avi Kivity9dac77f2011-06-01 15:34:25 +03005016 switch (ctxt->b) {
Avi Kivity018a98d2007-11-27 19:30:56 +02005017 case 0x09: /* wbinvd */
Clemens Nosscfb22372011-04-21 21:16:05 +02005018 (ctxt->ops->wbinvd)(ctxt);
Sheng Yangf5f48ee2010-06-30 12:25:15 +08005019 break;
5020 case 0x08: /* invd */
Avi Kivity018a98d2007-11-27 19:30:56 +02005021 case 0x0d: /* GrpP (prefetch) */
5022 case 0x18: /* Grp16 (prefetch/nop) */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02005023 case 0x1f: /* nop */
Avi Kivity018a98d2007-11-27 19:30:56 +02005024 break;
5025 case 0x20: /* mov cr, reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005026 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
Avi Kivity018a98d2007-11-27 19:30:56 +02005027 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005028 case 0x21: /* mov from dr to reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005029 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005030 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005031 case 0x40 ... 0x4f: /* cmov */
Nadav Amit140bad82014-06-15 16:13:00 +03005032 if (test_cc(ctxt->b, ctxt->eflags))
5033 ctxt->dst.val = ctxt->src.val;
5034 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5035 ctxt->op_bytes != 4)
Avi Kivity9dac77f2011-06-01 15:34:25 +03005036 ctxt->dst.type = OP_NONE; /* no writeback */
Avi Kivity6aa8b732006-12-10 02:21:36 -08005037 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03005038 case 0x80 ... 0x8f: /* jnz rel, etc*/
Avi Kivity9dac77f2011-06-01 15:34:25 +03005039 if (test_cc(ctxt->b, ctxt->eflags))
Nadav Amit234f3ce2014-09-18 22:39:38 +03005040 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02005041 break;
Wei Yongjunee45b582010-08-06 17:10:07 +08005042 case 0x90 ... 0x9f: /* setcc r/m8 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005043 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
Wei Yongjunee45b582010-08-06 17:10:07 +08005044 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005045 case 0xb6 ... 0xb7: /* movzx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005046 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03005047 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
Avi Kivity9dac77f2011-06-01 15:34:25 +03005048 : (u16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005049 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005050 case 0xbe ... 0xbf: /* movsx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005051 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03005052 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
Avi Kivity9dac77f2011-06-01 15:34:25 +03005053 (s16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005054 break;
Avi Kivity91269b82010-07-25 14:51:16 +03005055 default:
5056 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005057 }
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005058
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01005059threebyte_insn:
5060
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005061 if (rc != X86EMUL_CONTINUE)
5062 goto done;
5063
Avi Kivity6aa8b732006-12-10 02:21:36 -08005064 goto writeback;
5065
5066cannot_emulate:
Gleb Natapova0c0ab22011-03-28 16:57:49 +02005067 return EMULATION_FAILED;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005068}
Avi Kivitydd856ef2012-08-27 23:46:17 +03005069
5070void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5071{
5072 invalidate_registers(ctxt);
5073}
5074
5075void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5076{
5077 writeback_registers(ctxt);
5078}