blob: de12c1d379f16899645d96a2c3fd75663919c86d [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/******************************************************************************
Avi Kivity56e82312009-08-12 15:04:37 +03002 * emulate.c
Avi Kivity6aa8b732006-12-10 02:21:36 -08003 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
Rusty Russelldcc07662007-07-17 23:16:56 +10009 * privileged instructions:
Avi Kivity6aa8b732006-12-10 02:21:36 -080010 *
11 * Copyright (C) 2006 Qumranet
Nicolas Kaiser9611c182010-10-06 14:23:22 +020012 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -080013 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
Avi Kivityedf88412007-12-16 11:02:48 +020023#include <linux/kvm_host.h>
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030024#include "kvm_cache_regs.h"
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/module.h>
Avi Kivity56e82312009-08-12 15:04:37 +030026#include <asm/kvm_emulate.h>
Avi Kivityb7d491e2013-01-04 16:18:49 +020027#include <linux/stringify.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028
Avi Kivity3eeb3282010-01-21 15:31:48 +020029#include "x86.h"
Gleb Natapov38ba30b2010-03-18 15:20:17 +020030#include "tss.h"
Andre Przywarae99f0502009-06-17 15:50:33 +020031
Avi Kivity6aa8b732006-12-10 02:21:36 -080032/*
Avi Kivitya9945542011-09-13 10:45:41 +030033 * Operand types
34 */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030035#define OpNone 0ull
36#define OpImplicit 1ull /* No generic decode */
37#define OpReg 2ull /* Register */
38#define OpMem 3ull /* Memory */
39#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40#define OpDI 5ull /* ES:DI/EDI/RDI */
41#define OpMem64 6ull /* Memory, 64-bit */
42#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43#define OpDX 8ull /* DX register */
Avi Kivity4dd6a572011-09-13 10:45:43 +030044#define OpCL 9ull /* CL register (for shifts) */
45#define OpImmByte 10ull /* 8-bit sign extended immediate */
46#define OpOne 11ull /* Implied 1 */
Nadav Amit5e2c6882012-12-06 21:55:10 -020047#define OpImm 12ull /* Sign extended up to 32-bit immediate */
Avi Kivity0fe59122011-09-13 10:45:47 +030048#define OpMem16 13ull /* Memory operand (16-bit). */
49#define OpMem32 14ull /* Memory operand (32-bit). */
50#define OpImmU 15ull /* Immediate operand, zero extended */
51#define OpSI 16ull /* SI/ESI/RSI */
52#define OpImmFAddr 17ull /* Immediate far address */
53#define OpMemFAddr 18ull /* Far address in memory */
54#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
Avi Kivityc191a7a2011-09-13 10:45:49 +030055#define OpES 20ull /* ES */
56#define OpCS 21ull /* CS */
57#define OpSS 22ull /* SS */
58#define OpDS 23ull /* DS */
59#define OpFS 24ull /* FS */
60#define OpGS 25ull /* GS */
Avi Kivity28867ce2012-01-16 15:08:44 +020061#define OpMem8 26ull /* 8-bit zero extended memory operand */
Nadav Amit5e2c6882012-12-06 21:55:10 -020062#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
Paolo Bonzini7fa57952013-05-09 11:32:50 +020063#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
Avi Kivity820207c2013-02-09 11:31:45 +020064#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
Avi Kivitya9945542011-09-13 10:45:41 +030066
Avi Kivity0fe59122011-09-13 10:45:47 +030067#define OpBits 5 /* Width of operand field */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030068#define OpMask ((1ull << OpBits) - 1)
Avi Kivitya9945542011-09-13 10:45:41 +030069
70/*
Avi Kivity6aa8b732006-12-10 02:21:36 -080071 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
76 * not be handled.
77 */
78
79/* Operand sizes: 8-bit operands or specified/overridden size. */
Avi Kivityab85b12b2010-07-29 15:11:49 +030080#define ByteOp (1<<0) /* 8-bit operands. */
Avi Kivity6aa8b732006-12-10 02:21:36 -080081/* Destination operand type. */
Avi Kivitya9945542011-09-13 10:45:41 +030082#define DstShift 1
83#define ImplicitOps (OpImplicit << DstShift)
84#define DstReg (OpReg << DstShift)
85#define DstMem (OpMem << DstShift)
86#define DstAcc (OpAcc << DstShift)
87#define DstDI (OpDI << DstShift)
88#define DstMem64 (OpMem64 << DstShift)
89#define DstImmUByte (OpImmUByte << DstShift)
90#define DstDX (OpDX << DstShift)
Avi Kivity820207c2013-02-09 11:31:45 +020091#define DstAccLo (OpAccLo << DstShift)
Avi Kivitya9945542011-09-13 10:45:41 +030092#define DstMask (OpMask << DstShift)
Avi Kivity6aa8b732006-12-10 02:21:36 -080093/* Source operand type. */
Avi Kivity0fe59122011-09-13 10:45:47 +030094#define SrcShift 6
95#define SrcNone (OpNone << SrcShift)
96#define SrcReg (OpReg << SrcShift)
97#define SrcMem (OpMem << SrcShift)
98#define SrcMem16 (OpMem16 << SrcShift)
99#define SrcMem32 (OpMem32 << SrcShift)
100#define SrcImm (OpImm << SrcShift)
101#define SrcImmByte (OpImmByte << SrcShift)
102#define SrcOne (OpOne << SrcShift)
103#define SrcImmUByte (OpImmUByte << SrcShift)
104#define SrcImmU (OpImmU << SrcShift)
105#define SrcSI (OpSI << SrcShift)
Paolo Bonzini7fa57952013-05-09 11:32:50 +0200106#define SrcXLat (OpXLat << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300107#define SrcImmFAddr (OpImmFAddr << SrcShift)
108#define SrcMemFAddr (OpMemFAddr << SrcShift)
109#define SrcAcc (OpAcc << SrcShift)
110#define SrcImmU16 (OpImmU16 << SrcShift)
Nadav Amit5e2c6882012-12-06 21:55:10 -0200111#define SrcImm64 (OpImm64 << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300112#define SrcDX (OpDX << SrcShift)
Avi Kivity28867ce2012-01-16 15:08:44 +0200113#define SrcMem8 (OpMem8 << SrcShift)
Avi Kivity820207c2013-02-09 11:31:45 +0200114#define SrcAccHi (OpAccHi << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300115#define SrcMask (OpMask << SrcShift)
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300116#define BitOp (1<<11)
117#define MemAbs (1<<12) /* Memory operand is absolute displacement */
118#define String (1<<13) /* String instruction (rep capable) */
119#define Stack (1<<14) /* Stack instruction (push/pop) */
120#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
Gleb Natapov045a2822012-12-20 16:57:43 +0200125#define Escape (5<<15) /* Escape to coprocessor instruction */
Nadav Amit39f062f2014-11-26 15:47:18 +0200126#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300127#define Sse (1<<18) /* SSE Vector instruction */
Avi Kivity20c29ff2011-09-13 10:45:44 +0300128/* Generic ModRM decode. */
129#define ModRM (1<<19)
130/* Destination is only written; never read. */
131#define Mov (1<<20)
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300132/* Misc flags */
Joerg Roedel8ea7d6a2011-04-04 12:39:26 +0200133#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
Borislav Petkovb51e9742013-09-22 16:44:52 +0200134#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
Avi Kivity5a506b12010-08-01 15:10:29 +0300135#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
Avi Kivity7f9b4b72010-08-01 14:46:54 +0300136#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
Avi Kivity047a4812010-07-26 14:37:47 +0300137#define Undefined (1<<25) /* No Such Instruction */
Gleb Natapovd380a5e2010-02-10 14:21:36 +0200138#define Lock (1<<26) /* lock prefix is allowed for the instruction */
Gleb Natapove92805a2010-02-10 14:21:35 +0200139#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300140#define No64 (1<<28)
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +0800141#define PageTable (1 << 29) /* instruction used to write page table */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300142#define NotImpl (1 << 30) /* instruction is not implemented */
Guillaume Thouvenin0dc8d102008-12-04 14:26:42 +0100143/* Source 2 operand type */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300144#define Src2Shift (31)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300145#define Src2None (OpNone << Src2Shift)
Avi Kivityab2c5ce2013-02-09 11:31:46 +0200146#define Src2Mem (OpMem << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300147#define Src2CL (OpCL << Src2Shift)
148#define Src2ImmByte (OpImmByte << Src2Shift)
149#define Src2One (OpOne << Src2Shift)
150#define Src2Imm (OpImm << Src2Shift)
Avi Kivityc191a7a2011-09-13 10:45:49 +0300151#define Src2ES (OpES << Src2Shift)
152#define Src2CS (OpCS << Src2Shift)
153#define Src2SS (OpSS << Src2Shift)
154#define Src2DS (OpDS << Src2Shift)
155#define Src2FS (OpFS << Src2Shift)
156#define Src2GS (OpGS << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300157#define Src2Mask (OpMask << Src2Shift)
Avi Kivitycbe2c9d2012-04-09 18:40:02 +0300158#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
Avi Kivity1c11b372012-04-09 18:39:59 +0300159#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
160#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
161#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
Avi Kivitye28bbd42013-01-04 16:18:48 +0200162#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
Avi Kivityb6744dc2013-01-04 16:18:50 +0200163#define NoWrite ((u64)1 << 45) /* No writeback */
Avi Kivityfb32b1e2013-02-09 11:31:44 +0200164#define SrcWrite ((u64)1 << 46) /* Write back src operand */
Nadav Amit9b88ae92014-05-25 23:05:21 +0300165#define NoMod ((u64)1 << 47) /* Mod field is ignored */
Paolo Bonzinid40a6892014-03-27 11:58:02 +0100166#define Intercept ((u64)1 << 48) /* Has valid intercept field */
167#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
Nadav Amit10e38fc2014-06-18 17:19:34 +0300168#define NoBigReal ((u64)1 << 50) /* No big real mode */
Nadav Amit68efa762014-06-18 17:19:35 +0300169#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
Nadav Amit58b70752014-10-24 11:35:09 +0300170#define NearBranch ((u64)1 << 52) /* Near branches */
Nadav Amited9aad22014-11-02 11:55:00 +0200171#define No16 ((u64)1 << 53) /* No 16 bit operand */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800172
Avi Kivity820207c2013-02-09 11:31:45 +0200173#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800174
Avi Kivityd0e53322010-07-29 15:11:54 +0300175#define X2(x...) x, x
176#define X3(x...) X2(x), x
177#define X4(x...) X2(x), X2(x)
178#define X5(x...) X4(x), x
179#define X6(x...) X4(x), X2(x)
180#define X7(x...) X4(x), X3(x)
181#define X8(x...) X4(x), X4(x)
182#define X16(x...) X8(x), X8(x)
Avi Kivity83babbc2010-07-26 14:37:39 +0300183
Avi Kivitye28bbd42013-01-04 16:18:48 +0200184#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
185#define FASTOP_SIZE 8
186
187/*
188 * fastop functions have a special calling convention:
189 *
Avi Kivity017da7b2013-02-09 11:31:47 +0200190 * dst: rax (in/out)
191 * src: rdx (in/out)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200192 * src2: rcx (in)
193 * flags: rflags (in/out)
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200194 * ex: rsi (in:fastop pointer, out:zero if exception)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200195 *
196 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
197 * different operand sizes can be reached by calculation, rather than a jump
198 * table (which would be bigger than the code).
199 *
200 * fastop functions are declared as taking a never-defined fastop parameter,
201 * so they can't be called from C directly.
202 */
203
204struct fastop;
205
Avi Kivityd65b1de2010-07-29 15:11:35 +0300206struct opcode {
Avi Kivityb1ea50b2011-09-13 10:45:42 +0300207 u64 flags : 56;
208 u64 intercept : 8;
Avi Kivity120df892010-07-29 15:11:39 +0300209 union {
Avi Kivityef65c882010-07-29 15:11:51 +0300210 int (*execute)(struct x86_emulate_ctxt *ctxt);
Mathias Krausefd0a0d82012-08-30 01:30:15 +0200211 const struct opcode *group;
212 const struct group_dual *gdual;
213 const struct gprefix *gprefix;
Gleb Natapov045a2822012-12-20 16:57:43 +0200214 const struct escape *esc;
Nadav Amit39f062f2014-11-26 15:47:18 +0200215 const struct instr_dual *idual;
Avi Kivitye28bbd42013-01-04 16:18:48 +0200216 void (*fastop)(struct fastop *fake);
Avi Kivity120df892010-07-29 15:11:39 +0300217 } u;
Joerg Roedeld09beab2011-04-04 12:39:25 +0200218 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
Avi Kivity120df892010-07-29 15:11:39 +0300219};
220
221struct group_dual {
222 struct opcode mod012[8];
223 struct opcode mod3[8];
Avi Kivityd65b1de2010-07-29 15:11:35 +0300224};
225
Avi Kivity0d7cdee2011-03-29 11:34:38 +0200226struct gprefix {
227 struct opcode pfx_no;
228 struct opcode pfx_66;
229 struct opcode pfx_f2;
230 struct opcode pfx_f3;
231};
232
Gleb Natapov045a2822012-12-20 16:57:43 +0200233struct escape {
234 struct opcode op[8];
235 struct opcode high[64];
236};
237
Nadav Amit39f062f2014-11-26 15:47:18 +0200238struct instr_dual {
239 struct opcode mod012;
240 struct opcode mod3;
241};
242
Avi Kivity6aa8b732006-12-10 02:21:36 -0800243/* EFLAGS bit definitions. */
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200244#define EFLG_ID (1<<21)
245#define EFLG_VIP (1<<20)
246#define EFLG_VIF (1<<19)
247#define EFLG_AC (1<<18)
Andre Przywarab1d86142009-06-17 15:50:32 +0200248#define EFLG_VM (1<<17)
249#define EFLG_RF (1<<16)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200250#define EFLG_IOPL (3<<12)
251#define EFLG_NT (1<<14)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800252#define EFLG_OF (1<<11)
253#define EFLG_DF (1<<10)
Andre Przywarab1d86142009-06-17 15:50:32 +0200254#define EFLG_IF (1<<9)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200255#define EFLG_TF (1<<8)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800256#define EFLG_SF (1<<7)
257#define EFLG_ZF (1<<6)
258#define EFLG_AF (1<<4)
259#define EFLG_PF (1<<2)
260#define EFLG_CF (1<<0)
261
Mohammed Gamal62bd4302010-07-28 12:38:40 +0300262#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
263#define EFLG_RESERVED_ONE_MASK 2
264
Avi Kivitydd856ef2012-08-27 23:46:17 +0300265static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
266{
267 if (!(ctxt->regs_valid & (1 << nr))) {
268 ctxt->regs_valid |= 1 << nr;
269 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
270 }
271 return ctxt->_regs[nr];
272}
273
274static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
275{
276 ctxt->regs_valid |= 1 << nr;
277 ctxt->regs_dirty |= 1 << nr;
278 return &ctxt->_regs[nr];
279}
280
281static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
282{
283 reg_read(ctxt, nr);
284 return reg_write(ctxt, nr);
285}
286
287static void writeback_registers(struct x86_emulate_ctxt *ctxt)
288{
289 unsigned reg;
290
291 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
292 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
293}
294
295static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
296{
297 ctxt->regs_dirty = 0;
298 ctxt->regs_valid = 0;
299}
300
Avi Kivity6aa8b732006-12-10 02:21:36 -0800301/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800302 * These EFLAGS bits are restored from saved value during emulation, and
303 * any changes are written back to the saved value after emulation.
304 */
305#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
306
Avi Kivitydda96d82008-11-26 15:14:10 +0200307#ifdef CONFIG_X86_64
308#define ON64(x) x
309#else
310#define ON64(x)
311#endif
312
Avi Kivity4d758342013-01-19 19:51:55 +0200313static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
314
Avi Kivityb7d491e2013-01-04 16:18:49 +0200315#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
316#define FOP_RET "ret \n\t"
317
318#define FOP_START(op) \
319 extern void em_##op(struct fastop *fake); \
320 asm(".pushsection .text, \"ax\" \n\t" \
321 ".global em_" #op " \n\t" \
322 FOP_ALIGN \
323 "em_" #op ": \n\t"
324
325#define FOP_END \
326 ".popsection")
327
Avi Kivity0bdea062013-01-19 19:51:50 +0200328#define FOPNOP() FOP_ALIGN FOP_RET
329
Avi Kivityb7d491e2013-01-04 16:18:49 +0200330#define FOP1E(op, dst) \
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200331 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
332
333#define FOP1EEX(op, dst) \
334 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
Avi Kivityb7d491e2013-01-04 16:18:49 +0200335
336#define FASTOP1(op) \
337 FOP_START(op) \
338 FOP1E(op##b, al) \
339 FOP1E(op##w, ax) \
340 FOP1E(op##l, eax) \
341 ON64(FOP1E(op##q, rax)) \
342 FOP_END
343
Avi Kivityb9fa4092013-02-09 11:31:48 +0200344/* 1-operand, using src2 (for MUL/DIV r/m) */
345#define FASTOP1SRC2(op, name) \
346 FOP_START(name) \
347 FOP1E(op, cl) \
348 FOP1E(op, cx) \
349 FOP1E(op, ecx) \
350 ON64(FOP1E(op, rcx)) \
351 FOP_END
352
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200353/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
354#define FASTOP1SRC2EX(op, name) \
355 FOP_START(name) \
356 FOP1EEX(op, cl) \
357 FOP1EEX(op, cx) \
358 FOP1EEX(op, ecx) \
359 ON64(FOP1EEX(op, rcx)) \
360 FOP_END
361
Avi Kivityf7857f32013-01-04 16:18:53 +0200362#define FOP2E(op, dst, src) \
363 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
364
365#define FASTOP2(op) \
366 FOP_START(op) \
Avi Kivity017da7b2013-02-09 11:31:47 +0200367 FOP2E(op##b, al, dl) \
368 FOP2E(op##w, ax, dx) \
369 FOP2E(op##l, eax, edx) \
370 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivityf7857f32013-01-04 16:18:53 +0200371 FOP_END
372
Avi Kivity11c363b2013-01-19 19:51:54 +0200373/* 2 operand, word only */
374#define FASTOP2W(op) \
375 FOP_START(op) \
376 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200377 FOP2E(op##w, ax, dx) \
378 FOP2E(op##l, eax, edx) \
379 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivity11c363b2013-01-19 19:51:54 +0200380 FOP_END
381
Avi Kivity007a3b52013-01-19 19:51:51 +0200382/* 2 operand, src is CL */
383#define FASTOP2CL(op) \
384 FOP_START(op) \
385 FOP2E(op##b, al, cl) \
386 FOP2E(op##w, ax, cl) \
387 FOP2E(op##l, eax, cl) \
388 ON64(FOP2E(op##q, rax, cl)) \
389 FOP_END
390
Nadav Amit5aca3722014-11-02 11:54:50 +0200391/* 2 operand, src and dest are reversed */
392#define FASTOP2R(op, name) \
393 FOP_START(name) \
394 FOP2E(op##b, dl, al) \
395 FOP2E(op##w, dx, ax) \
396 FOP2E(op##l, edx, eax) \
397 ON64(FOP2E(op##q, rdx, rax)) \
398 FOP_END
399
Avi Kivity0bdea062013-01-19 19:51:50 +0200400#define FOP3E(op, dst, src, src2) \
401 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
402
403/* 3-operand, word-only, src2=cl */
404#define FASTOP3WCL(op) \
405 FOP_START(op) \
406 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200407 FOP3E(op##w, ax, dx, cl) \
408 FOP3E(op##l, eax, edx, cl) \
409 ON64(FOP3E(op##q, rax, rdx, cl)) \
Avi Kivity0bdea062013-01-19 19:51:50 +0200410 FOP_END
411
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200412/* Special case for SETcc - 1 instruction per cc */
413#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
414
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200415asm(".global kvm_fastop_exception \n"
416 "kvm_fastop_exception: xor %esi, %esi; ret");
417
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200418FOP_START(setcc)
419FOP_SETCC(seto)
420FOP_SETCC(setno)
421FOP_SETCC(setc)
422FOP_SETCC(setnc)
423FOP_SETCC(setz)
424FOP_SETCC(setnz)
425FOP_SETCC(setbe)
426FOP_SETCC(setnbe)
427FOP_SETCC(sets)
428FOP_SETCC(setns)
429FOP_SETCC(setp)
430FOP_SETCC(setnp)
431FOP_SETCC(setl)
432FOP_SETCC(setnl)
433FOP_SETCC(setle)
434FOP_SETCC(setnle)
435FOP_END;
436
Paolo Bonzini326f5782013-05-09 11:32:51 +0200437FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
438FOP_END;
439
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200440static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
441 enum x86_intercept intercept,
442 enum x86_intercept_stage stage)
443{
444 struct x86_instruction_info info = {
445 .intercept = intercept,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300446 .rep_prefix = ctxt->rep_prefix,
447 .modrm_mod = ctxt->modrm_mod,
448 .modrm_reg = ctxt->modrm_reg,
449 .modrm_rm = ctxt->modrm_rm,
450 .src_val = ctxt->src.val64,
Jan Kiszka6cbc5f52014-06-30 12:52:55 +0200451 .dst_val = ctxt->dst.val64,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300452 .src_bytes = ctxt->src.bytes,
453 .dst_bytes = ctxt->dst.bytes,
454 .ad_bytes = ctxt->ad_bytes,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200455 .next_rip = ctxt->eip,
456 };
457
Avi Kivity29535382011-04-20 13:37:53 +0300458 return ctxt->ops->intercept(ctxt, &info, stage);
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200459}
460
Avi Kivityf47cfa32012-06-07 17:49:24 +0300461static void assign_masked(ulong *dest, ulong src, ulong mask)
462{
463 *dest = (*dest & ~mask) | (src & mask);
464}
465
Avi Kivity9dac77f2011-06-01 15:34:25 +0300466static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800467{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300468 return (1UL << (ctxt->ad_bytes << 3)) - 1;
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800469}
470
Avi Kivityf47cfa32012-06-07 17:49:24 +0300471static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
472{
473 u16 sel;
474 struct desc_struct ss;
475
476 if (ctxt->mode == X86EMUL_MODE_PROT64)
477 return ~0UL;
478 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
479 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
480}
481
Avi Kivity612e89f2012-06-12 20:03:23 +0300482static int stack_size(struct x86_emulate_ctxt *ctxt)
483{
484 return (__fls(stack_mask(ctxt)) + 1) >> 3;
485}
486
Avi Kivity6aa8b732006-12-10 02:21:36 -0800487/* Access/update address held in a register, based on addressing mode. */
Harvey Harrisone4706772008-02-19 07:40:38 -0800488static inline unsigned long
Avi Kivity9dac77f2011-06-01 15:34:25 +0300489address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800490{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300491 if (ctxt->ad_bytes == sizeof(unsigned long))
Harvey Harrisone4706772008-02-19 07:40:38 -0800492 return reg;
493 else
Avi Kivity9dac77f2011-06-01 15:34:25 +0300494 return reg & ad_mask(ctxt);
Harvey Harrisone4706772008-02-19 07:40:38 -0800495}
496
497static inline unsigned long
Paolo Bonzini01485a22014-11-19 18:25:08 +0100498register_address(struct x86_emulate_ctxt *ctxt, int reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800499{
Paolo Bonzini01485a22014-11-19 18:25:08 +0100500 return address_mask(ctxt, reg_read(ctxt, reg));
Harvey Harrisone4706772008-02-19 07:40:38 -0800501}
502
Avi Kivity5ad105e2012-08-19 14:34:31 +0300503static void masked_increment(ulong *reg, ulong mask, int inc)
504{
505 assign_masked(reg, *reg + inc, mask);
506}
507
Harvey Harrison7a9572752008-02-19 07:40:41 -0800508static inline void
Paolo Bonzini01485a22014-11-19 18:25:08 +0100509register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
Harvey Harrison7a9572752008-02-19 07:40:41 -0800510{
Avi Kivity5ad105e2012-08-19 14:34:31 +0300511 ulong mask;
512
Avi Kivity9dac77f2011-06-01 15:34:25 +0300513 if (ctxt->ad_bytes == sizeof(unsigned long))
Avi Kivity5ad105e2012-08-19 14:34:31 +0300514 mask = ~0UL;
Harvey Harrison7a9572752008-02-19 07:40:41 -0800515 else
Avi Kivity5ad105e2012-08-19 14:34:31 +0300516 mask = ad_mask(ctxt);
Paolo Bonzini01485a22014-11-19 18:25:08 +0100517 masked_increment(reg_rmw(ctxt, reg), mask, inc);
Avi Kivity5ad105e2012-08-19 14:34:31 +0300518}
519
520static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
521{
Avi Kivitydd856ef2012-08-27 23:46:17 +0300522 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
Harvey Harrison7a9572752008-02-19 07:40:41 -0800523}
Avi Kivity6aa8b732006-12-10 02:21:36 -0800524
Avi Kivity56697682011-04-03 14:08:51 +0300525static u32 desc_limit_scaled(struct desc_struct *desc)
526{
527 u32 limit = get_desc_limit(desc);
528
529 return desc->g ? (limit << 12) | 0xfff : limit;
530}
531
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900532static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300533{
534 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
535 return 0;
536
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900537 return ctxt->ops->get_cached_segment_base(ctxt, seg);
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300538}
539
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200540static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
541 u32 error, bool valid)
Gleb Natapov54b84862010-04-28 19:15:44 +0300542{
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +0200543 WARN_ON(vec > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +0200544 ctxt->exception.vector = vec;
545 ctxt->exception.error_code = error;
546 ctxt->exception.error_code_valid = valid;
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200547 return X86EMUL_PROPAGATE_FAULT;
Gleb Natapov54b84862010-04-28 19:15:44 +0300548}
549
Joerg Roedel3b88e412011-04-04 12:39:29 +0200550static int emulate_db(struct x86_emulate_ctxt *ctxt)
551{
552 return emulate_exception(ctxt, DB_VECTOR, 0, false);
553}
554
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200555static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300556{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200557 return emulate_exception(ctxt, GP_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300558}
559
Avi Kivity618ff152011-04-03 12:32:09 +0300560static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
561{
562 return emulate_exception(ctxt, SS_VECTOR, err, true);
563}
564
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200565static int emulate_ud(struct x86_emulate_ctxt *ctxt)
Gleb Natapov54b84862010-04-28 19:15:44 +0300566{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200567 return emulate_exception(ctxt, UD_VECTOR, 0, false);
Gleb Natapov54b84862010-04-28 19:15:44 +0300568}
569
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200570static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300571{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200572 return emulate_exception(ctxt, TS_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300573}
574
Avi Kivity34d1f492010-08-26 11:59:01 +0300575static int emulate_de(struct x86_emulate_ctxt *ctxt)
576{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200577 return emulate_exception(ctxt, DE_VECTOR, 0, false);
Avi Kivity34d1f492010-08-26 11:59:01 +0300578}
579
Avi Kivity12537912011-03-29 11:41:27 +0200580static int emulate_nm(struct x86_emulate_ctxt *ctxt)
581{
582 return emulate_exception(ctxt, NM_VECTOR, 0, false);
583}
584
Avi Kivity1aa36612011-04-27 13:20:30 +0300585static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
586{
587 u16 selector;
588 struct desc_struct desc;
589
590 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
591 return selector;
592}
593
594static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
595 unsigned seg)
596{
597 u16 dummy;
598 u32 base3;
599 struct desc_struct desc;
600
601 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
602 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
603}
604
Avi Kivity1c11b372012-04-09 18:39:59 +0300605/*
606 * x86 defines three classes of vector instructions: explicitly
607 * aligned, explicitly unaligned, and the rest, which change behaviour
608 * depending on whether they're AVX encoded or not.
609 *
610 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
611 * subject to the same check.
612 */
613static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
614{
615 if (likely(size < 16))
616 return false;
617
618 if (ctxt->d & Aligned)
619 return true;
620 else if (ctxt->d & Unaligned)
621 return false;
622 else if (ctxt->d & Avx)
623 return false;
624 else
625 return true;
626}
627
Paolo Bonzinid09155d2014-10-27 14:54:44 +0100628static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
629 struct segmented_address addr,
630 unsigned *max_size, unsigned size,
631 bool write, bool fetch,
Nadav Amitd50eaa12014-11-19 17:43:11 +0200632 enum x86emul_mode mode, ulong *linear)
Avi Kivity52fd8b42011-04-03 12:33:12 +0300633{
Avi Kivity618ff152011-04-03 12:32:09 +0300634 struct desc_struct desc;
635 bool usable;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300636 ulong la;
Avi Kivity618ff152011-04-03 12:32:09 +0300637 u32 lim;
Avi Kivity1aa36612011-04-27 13:20:30 +0300638 u16 sel;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300639
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900640 la = seg_base(ctxt, addr.seg) + addr.ea;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100641 *max_size = 0;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200642 switch (mode) {
Avi Kivity618ff152011-04-03 12:32:09 +0300643 case X86EMUL_MODE_PROT64:
Nadav Amit4be4de72014-09-18 22:39:40 +0300644 if (is_noncanonical_address(la))
Nadav Amitabc7d8a2014-11-19 17:43:12 +0200645 goto bad;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100646
647 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
648 if (size > *max_size)
649 goto bad;
Avi Kivity618ff152011-04-03 12:32:09 +0300650 break;
651 default:
Avi Kivity1aa36612011-04-27 13:20:30 +0300652 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
653 addr.seg);
Avi Kivity618ff152011-04-03 12:32:09 +0300654 if (!usable)
655 goto bad;
Gleb Natapov58b78252012-12-11 15:14:12 +0200656 /* code segment in protected mode or read-only data segment */
657 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
658 || !(desc.type & 2)) && write)
Avi Kivity618ff152011-04-03 12:32:09 +0300659 goto bad;
660 /* unreadable code segment */
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400661 if (!fetch && (desc.type & 8) && !(desc.type & 2))
Avi Kivity618ff152011-04-03 12:32:09 +0300662 goto bad;
663 lim = desc_limit_scaled(&desc);
Paolo Bonzini997b0412014-11-19 18:33:38 +0100664 if (!(desc.type & 8) && (desc.type & 4)) {
Guo Chaofc058682012-06-28 15:19:51 +0800665 /* expand-down segment */
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100666 if (addr.ea <= lim)
Avi Kivity618ff152011-04-03 12:32:09 +0300667 goto bad;
668 lim = desc.d ? 0xffffffff : 0xffff;
Avi Kivity618ff152011-04-03 12:32:09 +0300669 }
Paolo Bonzini997b0412014-11-19 18:33:38 +0100670 if (addr.ea > lim)
671 goto bad;
672 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100673 if (size > *max_size)
674 goto bad;
Nadav Amit31ff6482014-11-19 17:43:13 +0200675 la &= (u32)-1;
Avi Kivity618ff152011-04-03 12:32:09 +0300676 break;
677 }
Avi Kivity1c11b372012-04-09 18:39:59 +0300678 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
679 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300680 *linear = la;
681 return X86EMUL_CONTINUE;
Avi Kivity618ff152011-04-03 12:32:09 +0300682bad:
683 if (addr.seg == VCPU_SREG_SS)
Paolo Bonzini36061892014-10-27 14:40:49 +0100684 return emulate_ss(ctxt, 0);
Avi Kivity618ff152011-04-03 12:32:09 +0300685 else
Paolo Bonzini36061892014-10-27 14:40:49 +0100686 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300687}
688
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400689static int linearize(struct x86_emulate_ctxt *ctxt,
690 struct segmented_address addr,
691 unsigned size, bool write,
692 ulong *linear)
693{
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100694 unsigned max_size;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200695 return __linearize(ctxt, addr, &max_size, size, write, false,
696 ctxt->mode, linear);
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400697}
698
Nadav Amitd50eaa12014-11-19 17:43:11 +0200699static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
700 enum x86emul_mode mode)
701{
702 ulong linear;
703 int rc;
704 unsigned max_size;
705 struct segmented_address addr = { .seg = VCPU_SREG_CS,
706 .ea = dst };
707
708 if (ctxt->op_bytes != sizeof(unsigned long))
709 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
710 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
711 if (rc == X86EMUL_CONTINUE)
712 ctxt->_eip = addr.ea;
713 return rc;
714}
715
716static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
717{
718 return assign_eip(ctxt, dst, ctxt->mode);
719}
720
721static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
722 const struct desc_struct *cs_desc)
723{
724 enum x86emul_mode mode = ctxt->mode;
725
726#ifdef CONFIG_X86_64
727 if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) {
728 u64 efer = 0;
729
730 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
731 if (efer & EFER_LMA)
732 mode = X86EMUL_MODE_PROT64;
733 }
734#endif
735 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
736 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
737 return assign_eip(ctxt, dst, mode);
738}
739
740static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
741{
742 return assign_eip_near(ctxt, ctxt->_eip + rel);
743}
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400744
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200745static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
746 struct segmented_address addr,
747 void *data,
748 unsigned size)
749{
Avi Kivity9fa088f2011-03-31 18:54:30 +0200750 int rc;
751 ulong linear;
752
Avi Kivity83b87952011-04-03 11:31:19 +0300753 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +0200754 if (rc != X86EMUL_CONTINUE)
755 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +0300756 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200757}
758
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900759/*
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200760 * Prefetch the remaining bytes of the instruction without crossing page
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900761 * boundary if they are not in fetch_cache yet.
762 */
Paolo Bonzini9506d572014-05-06 13:05:25 +0200763static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
Avi Kivity62266862007-11-20 13:15:52 +0200764{
Avi Kivity62266862007-11-20 13:15:52 +0200765 int rc;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100766 unsigned size, max_size;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200767 unsigned long linear;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200768 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200769 struct segmented_address addr = { .seg = VCPU_SREG_CS,
Paolo Bonzini17052f12014-05-06 16:33:01 +0200770 .ea = ctxt->eip + cur_size };
771
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100772 /*
773 * We do not know exactly how many bytes will be needed, and
774 * __linearize is expensive, so fetch as much as possible. We
775 * just have to avoid going beyond the 15 byte limit, the end
776 * of the segment, or the end of the page.
777 *
778 * __linearize is called with size 0 so that it does not do any
779 * boundary check itself. Instead, we use max_size to check
780 * against op_size.
781 */
Nadav Amitd50eaa12014-11-19 17:43:11 +0200782 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
783 &linear);
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200784 if (unlikely(rc != X86EMUL_CONTINUE))
785 return rc;
786
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100787 size = min_t(unsigned, 15UL ^ cur_size, max_size);
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200788 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
Paolo Bonzini5cfc7e02014-05-06 13:05:25 +0200789
790 /*
791 * One instruction can only straddle two pages,
792 * and one has been loaded at the beginning of
793 * x86_decode_insn. So, if not enough bytes
794 * still, we must have hit the 15-byte boundary.
795 */
796 if (unlikely(size < op_size))
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100797 return emulate_gp(ctxt, 0);
798
Paolo Bonzini17052f12014-05-06 16:33:01 +0200799 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200800 size, &ctxt->exception);
801 if (unlikely(rc != X86EMUL_CONTINUE))
802 return rc;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200803 ctxt->fetch.end += size;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +0900804 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200805}
806
Paolo Bonzini9506d572014-05-06 13:05:25 +0200807static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
808 unsigned size)
Avi Kivity62266862007-11-20 13:15:52 +0200809{
Nadav Amit08da44a2014-10-03 01:10:04 +0300810 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
811
812 if (unlikely(done_size < size))
813 return __do_insn_fetch_bytes(ctxt, size - done_size);
Paolo Bonzini9506d572014-05-06 13:05:25 +0200814 else
815 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200816}
817
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900818/* Fetch next part of the instruction being emulated. */
Takuya Yoshikawae85a1082011-07-30 18:01:26 +0900819#define insn_fetch(_type, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200820({ _type _x; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200821 \
822 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900823 if (rc != X86EMUL_CONTINUE) \
824 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200825 ctxt->_eip += sizeof(_type); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200826 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
827 ctxt->fetch.ptr += sizeof(_type); \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200828 _x; \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900829})
830
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900831#define insn_fetch_arr(_arr, _size, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200832({ \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200833 rc = do_insn_fetch_bytes(_ctxt, _size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900834 if (rc != X86EMUL_CONTINUE) \
835 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200836 ctxt->_eip += (_size); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200837 memcpy(_arr, ctxt->fetch.ptr, _size); \
838 ctxt->fetch.ptr += (_size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900839})
840
Rusty Russell1e3c5cb2007-07-17 23:16:11 +1000841/*
842 * Given the 'reg' portion of a ModRM byte, and a register block, return a
843 * pointer into the block that addresses the relevant register.
844 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
845 */
Avi Kivitydd856ef2012-08-27 23:46:17 +0300846static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200847 int byteop)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800848{
849 void *p;
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200850 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800851
Avi Kivity6aa8b732006-12-10 02:21:36 -0800852 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
Avi Kivitydd856ef2012-08-27 23:46:17 +0300853 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
854 else
855 p = reg_rmw(ctxt, modrm_reg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800856 return p;
857}
858
859static int read_descriptor(struct x86_emulate_ctxt *ctxt,
Avi Kivity90de84f2010-11-17 15:28:21 +0200860 struct segmented_address addr,
Avi Kivity6aa8b732006-12-10 02:21:36 -0800861 u16 *size, unsigned long *address, int op_bytes)
862{
863 int rc;
864
865 if (op_bytes == 2)
866 op_bytes = 3;
867 *address = 0;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200868 rc = segmented_read_std(ctxt, addr, size, 2);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +0900869 if (rc != X86EMUL_CONTINUE)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800870 return rc;
Avi Kivity30b31ab2010-11-17 15:28:22 +0200871 addr.ea += 2;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200872 rc = segmented_read_std(ctxt, addr, address, op_bytes);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800873 return rc;
874}
875
Avi Kivity34b77652013-01-19 19:51:56 +0200876FASTOP2(add);
877FASTOP2(or);
878FASTOP2(adc);
879FASTOP2(sbb);
880FASTOP2(and);
881FASTOP2(sub);
882FASTOP2(xor);
883FASTOP2(cmp);
884FASTOP2(test);
885
Avi Kivityb9fa4092013-02-09 11:31:48 +0200886FASTOP1SRC2(mul, mul_ex);
887FASTOP1SRC2(imul, imul_ex);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200888FASTOP1SRC2EX(div, div_ex);
889FASTOP1SRC2EX(idiv, idiv_ex);
Avi Kivityb9fa4092013-02-09 11:31:48 +0200890
Avi Kivity34b77652013-01-19 19:51:56 +0200891FASTOP3WCL(shld);
892FASTOP3WCL(shrd);
893
894FASTOP2W(imul);
895
896FASTOP1(not);
897FASTOP1(neg);
898FASTOP1(inc);
899FASTOP1(dec);
900
901FASTOP2CL(rol);
902FASTOP2CL(ror);
903FASTOP2CL(rcl);
904FASTOP2CL(rcr);
905FASTOP2CL(shl);
906FASTOP2CL(shr);
907FASTOP2CL(sar);
908
909FASTOP2W(bsf);
910FASTOP2W(bsr);
911FASTOP2W(bt);
912FASTOP2W(bts);
913FASTOP2W(btr);
914FASTOP2W(btc);
915
Avi Kivitye47a5f52013-02-09 11:31:51 +0200916FASTOP2(xadd);
917
Nadav Amit5aca3722014-11-02 11:54:50 +0200918FASTOP2R(cmp, cmp_r);
919
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200920static u8 test_cc(unsigned int condition, unsigned long flags)
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300921{
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200922 u8 rc;
923 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300924
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200925 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivity3f0c3d02013-01-26 23:56:04 +0200926 asm("push %[flags]; popf; call *%[fastop]"
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200927 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
928 return rc;
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300929}
930
Avi Kivity91ff3cb2010-08-01 12:53:09 +0300931static void fetch_register_operand(struct operand *op)
932{
933 switch (op->bytes) {
934 case 1:
935 op->val = *(u8 *)op->addr.reg;
936 break;
937 case 2:
938 op->val = *(u16 *)op->addr.reg;
939 break;
940 case 4:
941 op->val = *(u32 *)op->addr.reg;
942 break;
943 case 8:
944 op->val = *(u64 *)op->addr.reg;
945 break;
946 }
947}
948
Avi Kivity12537912011-03-29 11:41:27 +0200949static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
950{
951 ctxt->ops->get_fpu(ctxt);
952 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200953 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
954 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
955 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
956 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
957 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
958 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
959 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
960 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +0200961#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +0200962 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
963 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
964 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
965 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
966 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
967 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
968 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
969 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +0200970#endif
971 default: BUG();
972 }
973 ctxt->ops->put_fpu(ctxt);
974}
975
976static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
977 int reg)
978{
979 ctxt->ops->get_fpu(ctxt);
980 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200981 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
982 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
983 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
984 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
985 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
986 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
987 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
988 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +0200989#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +0200990 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
991 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
992 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
993 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
994 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
995 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
996 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
997 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +0200998#endif
999 default: BUG();
1000 }
1001 ctxt->ops->put_fpu(ctxt);
1002}
1003
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001004static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1005{
1006 ctxt->ops->get_fpu(ctxt);
1007 switch (reg) {
1008 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1009 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1010 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1011 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1012 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1013 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1014 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1015 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1016 default: BUG();
1017 }
1018 ctxt->ops->put_fpu(ctxt);
1019}
1020
1021static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1022{
1023 ctxt->ops->get_fpu(ctxt);
1024 switch (reg) {
1025 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1026 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1027 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1028 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1029 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1030 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1031 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1032 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1033 default: BUG();
1034 }
1035 ctxt->ops->put_fpu(ctxt);
1036}
1037
Gleb Natapov045a2822012-12-20 16:57:43 +02001038static int em_fninit(struct x86_emulate_ctxt *ctxt)
1039{
1040 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1041 return emulate_nm(ctxt);
1042
1043 ctxt->ops->get_fpu(ctxt);
1044 asm volatile("fninit");
1045 ctxt->ops->put_fpu(ctxt);
1046 return X86EMUL_CONTINUE;
1047}
1048
1049static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1050{
1051 u16 fcw;
1052
1053 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1054 return emulate_nm(ctxt);
1055
1056 ctxt->ops->get_fpu(ctxt);
1057 asm volatile("fnstcw %0": "+m"(fcw));
1058 ctxt->ops->put_fpu(ctxt);
1059
1060 /* force 2 byte destination */
1061 ctxt->dst.bytes = 2;
1062 ctxt->dst.val = fcw;
1063
1064 return X86EMUL_CONTINUE;
1065}
1066
1067static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1068{
1069 u16 fsw;
1070
1071 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1072 return emulate_nm(ctxt);
1073
1074 ctxt->ops->get_fpu(ctxt);
1075 asm volatile("fnstsw %0": "+m"(fsw));
1076 ctxt->ops->put_fpu(ctxt);
1077
1078 /* force 2 byte destination */
1079 ctxt->dst.bytes = 2;
1080 ctxt->dst.val = fsw;
1081
1082 return X86EMUL_CONTINUE;
1083}
1084
Avi Kivity12537912011-03-29 11:41:27 +02001085static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
Avi Kivity2adb5ad2012-01-16 15:08:45 +02001086 struct operand *op)
Avi Kivity3c118e22007-10-31 10:27:04 +02001087{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001088 unsigned reg = ctxt->modrm_reg;
Avi Kivity33615aa2007-10-31 11:15:56 +02001089
Avi Kivity9dac77f2011-06-01 15:34:25 +03001090 if (!(ctxt->d & ModRM))
1091 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
Avi Kivity12537912011-03-29 11:41:27 +02001092
Avi Kivity9dac77f2011-06-01 15:34:25 +03001093 if (ctxt->d & Sse) {
Avi Kivity12537912011-03-29 11:41:27 +02001094 op->type = OP_XMM;
1095 op->bytes = 16;
1096 op->addr.xmm = reg;
1097 read_sse_reg(ctxt, &op->vec_val, reg);
1098 return;
1099 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001100 if (ctxt->d & Mmx) {
1101 reg &= 7;
1102 op->type = OP_MM;
1103 op->bytes = 8;
1104 op->addr.mm = reg;
1105 return;
1106 }
Avi Kivity12537912011-03-29 11:41:27 +02001107
Avi Kivity3c118e22007-10-31 10:27:04 +02001108 op->type = OP_REG;
Gleb Natapov6d4d85e2013-11-04 15:52:42 +02001109 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1110 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1111
Avi Kivity91ff3cb2010-08-01 12:53:09 +03001112 fetch_register_operand(op);
Avi Kivity3c118e22007-10-31 10:27:04 +02001113 op->orig_val = op->val;
1114}
1115
Avi Kivitya6e34072012-06-10 17:15:39 +03001116static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1117{
1118 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1119 ctxt->modrm_seg = VCPU_SREG_SS;
1120}
1121
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001122static int decode_modrm(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001123 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001124{
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001125 u8 sib;
Bandan Das02357bd2014-04-16 12:46:11 -04001126 int index_reg, base_reg, scale;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001127 int rc = X86EMUL_CONTINUE;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001128 ulong modrm_ea = 0;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001129
Bandan Das02357bd2014-04-16 12:46:11 -04001130 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1131 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1132 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001133
Bandan Das02357bd2014-04-16 12:46:11 -04001134 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001135 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
Bandan Das02357bd2014-04-16 12:46:11 -04001136 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001137 ctxt->modrm_seg = VCPU_SREG_DS;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001138
Nadav Amit9b88ae92014-05-25 23:05:21 +03001139 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001140 op->type = OP_REG;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001141 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Paolo Bonzini8acb42072013-05-30 16:35:55 +02001142 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02001143 ctxt->d & ByteOp);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001144 if (ctxt->d & Sse) {
Avi Kivity12537912011-03-29 11:41:27 +02001145 op->type = OP_XMM;
1146 op->bytes = 16;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001147 op->addr.xmm = ctxt->modrm_rm;
1148 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
Avi Kivity12537912011-03-29 11:41:27 +02001149 return rc;
1150 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001151 if (ctxt->d & Mmx) {
1152 op->type = OP_MM;
1153 op->bytes = 8;
Paolo Bonzinibdc90722014-05-06 14:03:29 +02001154 op->addr.mm = ctxt->modrm_rm & 7;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001155 return rc;
1156 }
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001157 fetch_register_operand(op);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001158 return rc;
1159 }
1160
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001161 op->type = OP_MEM;
1162
Avi Kivity9dac77f2011-06-01 15:34:25 +03001163 if (ctxt->ad_bytes == 2) {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001164 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1165 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1166 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1167 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001168
1169 /* 16-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001170 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001171 case 0:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001172 if (ctxt->modrm_rm == 6)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001173 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001174 break;
1175 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001176 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001177 break;
1178 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001179 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001180 break;
1181 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001182 switch (ctxt->modrm_rm) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001183 case 0:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001184 modrm_ea += bx + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001185 break;
1186 case 1:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001187 modrm_ea += bx + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001188 break;
1189 case 2:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001190 modrm_ea += bp + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001191 break;
1192 case 3:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001193 modrm_ea += bp + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001194 break;
1195 case 4:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001196 modrm_ea += si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001197 break;
1198 case 5:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001199 modrm_ea += di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001200 break;
1201 case 6:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001202 if (ctxt->modrm_mod != 0)
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001203 modrm_ea += bp;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001204 break;
1205 case 7:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001206 modrm_ea += bx;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001207 break;
1208 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001209 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1210 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1211 ctxt->modrm_seg = VCPU_SREG_SS;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001212 modrm_ea = (u16)modrm_ea;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001213 } else {
1214 /* 32/64-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001215 if ((ctxt->modrm_rm & 7) == 4) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001216 sib = insn_fetch(u8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001217 index_reg |= (sib >> 3) & 7;
1218 base_reg |= sib & 7;
1219 scale = sib >> 6;
1220
Avi Kivity9dac77f2011-06-01 15:34:25 +03001221 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001222 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivitya6e34072012-06-10 17:15:39 +03001223 else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001224 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001225 adjust_modrm_seg(ctxt, base_reg);
1226 }
Avi Kivitydc71d0f2008-06-15 21:23:17 -07001227 if (index_reg != 4)
Avi Kivitydd856ef2012-08-27 23:46:17 +03001228 modrm_ea += reg_read(ctxt, index_reg) << scale;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001229 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
Nadav Amit5b38ab82014-11-02 11:54:41 +02001230 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity84411d82008-06-15 21:53:26 -07001231 if (ctxt->mode == X86EMUL_MODE_PROT64)
Avi Kivity9dac77f2011-06-01 15:34:25 +03001232 ctxt->rip_relative = 1;
Avi Kivitya6e34072012-06-10 17:15:39 +03001233 } else {
1234 base_reg = ctxt->modrm_rm;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001235 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001236 adjust_modrm_seg(ctxt, base_reg);
1237 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001238 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001239 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001240 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001241 break;
1242 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001243 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001244 break;
1245 }
1246 }
Avi Kivity90de84f2010-11-17 15:28:21 +02001247 op->addr.mem.ea = modrm_ea;
Bandan Das41061cd2014-04-16 12:46:14 -04001248 if (ctxt->ad_bytes != 8)
1249 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1250
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001251done:
1252 return rc;
1253}
1254
1255static int decode_abs(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001256 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001257{
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001258 int rc = X86EMUL_CONTINUE;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001259
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001260 op->type = OP_MEM;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001261 switch (ctxt->ad_bytes) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001262 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001263 op->addr.mem.ea = insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001264 break;
1265 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001266 op->addr.mem.ea = insn_fetch(u32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001267 break;
1268 case 8:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001269 op->addr.mem.ea = insn_fetch(u64, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001270 break;
1271 }
1272done:
1273 return rc;
1274}
1275
Avi Kivity9dac77f2011-06-01 15:34:25 +03001276static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
Wei Yongjun35c843c2010-08-09 11:34:56 +08001277{
Sheng Yang7129eec2010-09-28 16:33:32 +08001278 long sv = 0, mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001279
Avi Kivity9dac77f2011-06-01 15:34:25 +03001280 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
Nadav Amit7dec5602014-06-15 16:12:57 +03001281 mask = ~((long)ctxt->dst.bytes * 8 - 1);
Wei Yongjun35c843c2010-08-09 11:34:56 +08001282
Avi Kivity9dac77f2011-06-01 15:34:25 +03001283 if (ctxt->src.bytes == 2)
1284 sv = (s16)ctxt->src.val & (s16)mask;
1285 else if (ctxt->src.bytes == 4)
1286 sv = (s32)ctxt->src.val & (s32)mask;
Nadav Amit7dec5602014-06-15 16:12:57 +03001287 else
1288 sv = (s64)ctxt->src.val & (s64)mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001289
Nadav Amit1c1c35a2014-11-19 17:43:09 +02001290 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1291 ctxt->dst.addr.mem.ea + (sv >> 3));
Wei Yongjun35c843c2010-08-09 11:34:56 +08001292 }
Wei Yongjunba7ff2b2010-08-09 11:39:14 +08001293
1294 /* only subword offset */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001295 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001296}
1297
Gleb Natapov9de41572010-04-28 19:15:22 +03001298static int read_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov9de41572010-04-28 19:15:22 +03001299 unsigned long addr, void *dest, unsigned size)
1300{
1301 int rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001302 struct read_cache *mc = &ctxt->mem_read;
Gleb Natapov9de41572010-04-28 19:15:22 +03001303
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001304 if (mc->pos < mc->end)
1305 goto read_cached;
Gleb Natapov9de41572010-04-28 19:15:22 +03001306
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001307 WARN_ON((mc->end + size) >= sizeof(mc->data));
Gleb Natapov9de41572010-04-28 19:15:22 +03001308
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001309 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1310 &ctxt->exception);
1311 if (rc != X86EMUL_CONTINUE)
1312 return rc;
1313
1314 mc->end += size;
1315
1316read_cached:
1317 memcpy(dest, mc->data + mc->pos, size);
1318 mc->pos += size;
Gleb Natapov9de41572010-04-28 19:15:22 +03001319 return X86EMUL_CONTINUE;
1320}
1321
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001322static int segmented_read(struct x86_emulate_ctxt *ctxt,
1323 struct segmented_address addr,
1324 void *data,
1325 unsigned size)
1326{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001327 int rc;
1328 ulong linear;
1329
Avi Kivity83b87952011-04-03 11:31:19 +03001330 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001331 if (rc != X86EMUL_CONTINUE)
1332 return rc;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001333 return read_emulated(ctxt, linear, data, size);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001334}
1335
1336static int segmented_write(struct x86_emulate_ctxt *ctxt,
1337 struct segmented_address addr,
1338 const void *data,
1339 unsigned size)
1340{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001341 int rc;
1342 ulong linear;
1343
Avi Kivity83b87952011-04-03 11:31:19 +03001344 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001345 if (rc != X86EMUL_CONTINUE)
1346 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001347 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1348 &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001349}
1350
1351static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1352 struct segmented_address addr,
1353 const void *orig_data, const void *data,
1354 unsigned size)
1355{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001356 int rc;
1357 ulong linear;
1358
Avi Kivity83b87952011-04-03 11:31:19 +03001359 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001360 if (rc != X86EMUL_CONTINUE)
1361 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001362 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1363 size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001364}
1365
Gleb Natapov7b262e92010-03-18 15:20:27 +02001366static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov7b262e92010-03-18 15:20:27 +02001367 unsigned int size, unsigned short port,
1368 void *dest)
1369{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001370 struct read_cache *rc = &ctxt->io_read;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001371
1372 if (rc->pos == rc->end) { /* refill pio read ahead */
Gleb Natapov7b262e92010-03-18 15:20:27 +02001373 unsigned int in_page, n;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001374 unsigned int count = ctxt->rep_prefix ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001375 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001376 in_page = (ctxt->eflags & EFLG_DF) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001377 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1378 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
Mark Rustadb55a8142014-07-25 06:27:05 -07001379 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
Gleb Natapov7b262e92010-03-18 15:20:27 +02001380 if (n == 0)
1381 n = 1;
1382 rc->pos = rc->end = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001383 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
Gleb Natapov7b262e92010-03-18 15:20:27 +02001384 return 0;
1385 rc->end = n * size;
1386 }
1387
Nadav Amite6e39f02014-04-18 03:35:10 +03001388 if (ctxt->rep_prefix && (ctxt->d & String) &&
1389 !(ctxt->eflags & EFLG_DF)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001390 ctxt->dst.data = rc->data + rc->pos;
1391 ctxt->dst.type = OP_MEM_STR;
1392 ctxt->dst.count = (rc->end - rc->pos) / size;
1393 rc->pos = rc->end;
1394 } else {
1395 memcpy(dest, rc->data + rc->pos, size);
1396 rc->pos += size;
1397 }
Gleb Natapov7b262e92010-03-18 15:20:27 +02001398 return 1;
1399}
1400
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01001401static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1402 u16 index, struct desc_struct *desc)
1403{
1404 struct desc_ptr dt;
1405 ulong addr;
1406
1407 ctxt->ops->get_idt(ctxt, &dt);
1408
1409 if (dt.size < index * 8 + 7)
1410 return emulate_gp(ctxt, index << 3 | 0x2);
1411
1412 addr = dt.address + index * 8;
1413 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1414 &ctxt->exception);
1415}
1416
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001417static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001418 u16 selector, struct desc_ptr *dt)
1419{
Mathias Krause0225fb52012-08-30 01:30:16 +02001420 const struct x86_emulate_ops *ops = ctxt->ops;
Nadav Amit2eedcac2014-06-02 18:34:05 +03001421 u32 base3 = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001422
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001423 if (selector & 1 << 2) {
1424 struct desc_struct desc;
Avi Kivity1aa36612011-04-27 13:20:30 +03001425 u16 sel;
1426
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001427 memset (dt, 0, sizeof *dt);
Nadav Amit2eedcac2014-06-02 18:34:05 +03001428 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1429 VCPU_SREG_LDTR))
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001430 return;
1431
1432 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
Nadav Amit2eedcac2014-06-02 18:34:05 +03001433 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001434 } else
Avi Kivity4bff1e862011-04-20 13:37:53 +03001435 ops->get_gdt(ctxt, dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001436}
1437
1438/* allowed just for 8 bytes segments */
1439static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Avi Kivitye9194642012-06-13 16:29:39 +03001440 u16 selector, struct desc_struct *desc,
1441 ulong *desc_addr_p)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001442{
1443 struct desc_ptr dt;
1444 u16 index = selector >> 3;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001445 ulong addr;
1446
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001447 get_descriptor_table_ptr(ctxt, selector, &dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001448
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001449 if (dt.size < index * 8 + 7)
1450 return emulate_gp(ctxt, selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001451
Avi Kivitye9194642012-06-13 16:29:39 +03001452 *desc_addr_p = addr = dt.address + index * 8;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001453 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1454 &ctxt->exception);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001455}
1456
1457/* allowed just for 8 bytes segments */
1458static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001459 u16 selector, struct desc_struct *desc)
1460{
1461 struct desc_ptr dt;
1462 u16 index = selector >> 3;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001463 ulong addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001464
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001465 get_descriptor_table_ptr(ctxt, selector, &dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001466
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001467 if (dt.size < index * 8 + 7)
1468 return emulate_gp(ctxt, selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001469
1470 addr = dt.address + index * 8;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001471 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1472 &ctxt->exception);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001473}
1474
Gleb Natapov5601d052011-03-07 14:55:06 +02001475/* Does not support long mode */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001476static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Nadav Amitd1442d82014-09-18 22:39:39 +03001477 u16 selector, int seg, u8 cpl,
1478 bool in_task_switch,
1479 struct desc_struct *desc)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001480{
Avi Kivity869be992012-06-13 16:30:53 +03001481 struct desc_struct seg_desc, old_desc;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001482 u8 dpl, rpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001483 unsigned err_vec = GP_VECTOR;
1484 u32 err_code = 0;
1485 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
Avi Kivitye9194642012-06-13 16:29:39 +03001486 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001487 int ret;
Avi Kivity03ebebe2012-08-21 17:07:04 +03001488 u16 dummy;
Nadav Amite37a75a2014-06-02 18:34:04 +03001489 u32 base3 = 0;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001490
1491 memset(&seg_desc, 0, sizeof seg_desc);
1492
Kevin Wolff8da94e2013-04-11 14:06:03 +02001493 if (ctxt->mode == X86EMUL_MODE_REAL) {
1494 /* set real mode segment descriptor (keep limit etc. for
1495 * unreal mode) */
Avi Kivity03ebebe2012-08-21 17:07:04 +03001496 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001497 set_desc_base(&seg_desc, selector << 4);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001498 goto load;
Kevin Wolff8da94e2013-04-11 14:06:03 +02001499 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1500 /* VM86 needs a clean new segment descriptor */
1501 set_desc_base(&seg_desc, selector << 4);
1502 set_desc_limit(&seg_desc, 0xffff);
1503 seg_desc.type = 3;
1504 seg_desc.p = 1;
1505 seg_desc.s = 1;
1506 seg_desc.dpl = 3;
1507 goto load;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001508 }
1509
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001510 rpl = selector & 3;
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001511
1512 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1513 if ((seg == VCPU_SREG_CS
1514 || (seg == VCPU_SREG_SS
1515 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1516 || seg == VCPU_SREG_TR)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001517 && null_selector)
1518 goto exception;
1519
1520 /* TR should be in GDT only */
1521 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1522 goto exception;
1523
1524 if (null_selector) /* for NULL selector skip all following checks */
1525 goto load;
1526
Avi Kivitye9194642012-06-13 16:29:39 +03001527 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001528 if (ret != X86EMUL_CONTINUE)
1529 return ret;
1530
1531 err_code = selector & 0xfffc;
Paolo Bonzini15fc0752014-08-18 13:17:00 +02001532 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001533
Guo Chaofc058682012-06-28 15:19:51 +08001534 /* can't load system descriptor into segment selector */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001535 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1536 goto exception;
1537
1538 if (!seg_desc.p) {
1539 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1540 goto exception;
1541 }
1542
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001543 dpl = seg_desc.dpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001544
1545 switch (seg) {
1546 case VCPU_SREG_SS:
1547 /*
1548 * segment is not a writable data segment or segment
1549 * selector's RPL != CPL or segment selector's RPL != CPL
1550 */
1551 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1552 goto exception;
1553 break;
1554 case VCPU_SREG_CS:
1555 if (!(seg_desc.type & 8))
1556 goto exception;
1557
1558 if (seg_desc.type & 4) {
1559 /* conforming */
1560 if (dpl > cpl)
1561 goto exception;
1562 } else {
1563 /* nonconforming */
1564 if (rpl > cpl || dpl != cpl)
1565 goto exception;
1566 }
Nadav Amit040c8dc2014-09-18 22:39:43 +03001567 /* in long-mode d/b must be clear if l is set */
1568 if (seg_desc.d && seg_desc.l) {
1569 u64 efer = 0;
1570
1571 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1572 if (efer & EFER_LMA)
1573 goto exception;
1574 }
1575
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001576 /* CS(RPL) <- CPL */
1577 selector = (selector & 0xfffc) | cpl;
1578 break;
1579 case VCPU_SREG_TR:
1580 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1581 goto exception;
Avi Kivity869be992012-06-13 16:30:53 +03001582 old_desc = seg_desc;
1583 seg_desc.type |= 2; /* busy */
1584 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1585 sizeof(seg_desc), &ctxt->exception);
1586 if (ret != X86EMUL_CONTINUE)
1587 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001588 break;
1589 case VCPU_SREG_LDTR:
1590 if (seg_desc.s || seg_desc.type != 2)
1591 goto exception;
1592 break;
1593 default: /* DS, ES, FS, or GS */
1594 /*
1595 * segment is not a data or readable code segment or
1596 * ((segment is a data or nonconforming code segment)
1597 * and (both RPL and CPL > DPL))
1598 */
1599 if ((seg_desc.type & 0xa) == 0x8 ||
1600 (((seg_desc.type & 0xc) != 0xc) &&
1601 (rpl > dpl && cpl > dpl)))
1602 goto exception;
1603 break;
1604 }
1605
1606 if (seg_desc.s) {
1607 /* mark segment as accessed */
1608 seg_desc.type |= 1;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001609 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001610 if (ret != X86EMUL_CONTINUE)
1611 return ret;
Nadav Amite37a75a2014-06-02 18:34:04 +03001612 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1613 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1614 sizeof(base3), &ctxt->exception);
1615 if (ret != X86EMUL_CONTINUE)
1616 return ret;
Nadav Amit9a9abf62014-11-02 11:54:56 +02001617 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1618 ((u64)base3 << 32)))
1619 return emulate_gp(ctxt, 0);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001620 }
1621load:
Nadav Amite37a75a2014-06-02 18:34:04 +03001622 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
Nadav Amitd1442d82014-09-18 22:39:39 +03001623 if (desc)
1624 *desc = seg_desc;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001625 return X86EMUL_CONTINUE;
1626exception:
Paolo Bonzini592f0852014-08-20 10:05:08 +02001627 return emulate_exception(ctxt, err_vec, err_code, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001628}
1629
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001630static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1631 u16 selector, int seg)
1632{
1633 u8 cpl = ctxt->ops->cpl(ctxt);
Nadav Amitd1442d82014-09-18 22:39:39 +03001634 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001635}
1636
Wei Yongjun31be40b2010-08-17 09:17:30 +08001637static void write_register_operand(struct operand *op)
1638{
1639 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1640 switch (op->bytes) {
1641 case 1:
1642 *(u8 *)op->addr.reg = (u8)op->val;
1643 break;
1644 case 2:
1645 *(u16 *)op->addr.reg = (u16)op->val;
1646 break;
1647 case 4:
1648 *op->addr.reg = (u32)op->val;
1649 break; /* 64b: zero-extend */
1650 case 8:
1651 *op->addr.reg = op->val;
1652 break;
1653 }
1654}
1655
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001656static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
Wei Yongjunc37eda12010-06-15 09:03:33 +08001657{
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001658 switch (op->type) {
Wei Yongjunc37eda12010-06-15 09:03:33 +08001659 case OP_REG:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001660 write_register_operand(op);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001661 break;
1662 case OP_MEM:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001663 if (ctxt->lock_prefix)
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001664 return segmented_cmpxchg(ctxt,
1665 op->addr.mem,
1666 &op->orig_val,
1667 &op->val,
1668 op->bytes);
1669 else
1670 return segmented_write(ctxt,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001671 op->addr.mem,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001672 &op->val,
1673 op->bytes);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001674 break;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001675 case OP_MEM_STR:
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001676 return segmented_write(ctxt,
1677 op->addr.mem,
1678 op->data,
1679 op->bytes * op->count);
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001680 break;
Avi Kivity12537912011-03-29 11:41:27 +02001681 case OP_XMM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001682 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
Avi Kivity12537912011-03-29 11:41:27 +02001683 break;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001684 case OP_MM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001685 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001686 break;
Wei Yongjunc37eda12010-06-15 09:03:33 +08001687 case OP_NONE:
1688 /* no writeback */
1689 break;
1690 default:
1691 break;
1692 }
1693 return X86EMUL_CONTINUE;
1694}
1695
Avi Kivity51ddff52012-06-12 20:19:40 +03001696static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001697{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001698 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001699
Avi Kivity5ad105e2012-08-19 14:34:31 +03001700 rsp_increment(ctxt, -bytes);
Avi Kivitydd856ef2012-08-27 23:46:17 +03001701 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001702 addr.seg = VCPU_SREG_SS;
1703
Avi Kivity51ddff52012-06-12 20:19:40 +03001704 return segmented_write(ctxt, addr, data, bytes);
1705}
1706
1707static int em_push(struct x86_emulate_ctxt *ctxt)
1708{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001709 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001710 ctxt->dst.type = OP_NONE;
Avi Kivity51ddff52012-06-12 20:19:40 +03001711 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001712}
1713
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001714static int emulate_pop(struct x86_emulate_ctxt *ctxt,
Avi Kivity350f69d2009-01-05 11:12:40 +02001715 void *dest, int len)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001716{
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001717 int rc;
Avi Kivity90de84f2010-11-17 15:28:21 +02001718 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001719
Avi Kivitydd856ef2012-08-27 23:46:17 +03001720 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Avi Kivity90de84f2010-11-17 15:28:21 +02001721 addr.seg = VCPU_SREG_SS;
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001722 rc = segmented_read(ctxt, addr, dest, len);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09001723 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001724 return rc;
1725
Avi Kivity5ad105e2012-08-19 14:34:31 +03001726 rsp_increment(ctxt, len);
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001727 return rc;
1728}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001729
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001730static int em_pop(struct x86_emulate_ctxt *ctxt)
1731{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001732 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001733}
1734
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001735static int emulate_popf(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001736 void *dest, int len)
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001737{
1738 int rc;
1739 unsigned long val, change_mask;
1740 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001741 int cpl = ctxt->ops->cpl(ctxt);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001742
Takuya Yoshikawa3b9be3b2011-05-02 02:27:55 +09001743 rc = emulate_pop(ctxt, &val, len);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001744 if (rc != X86EMUL_CONTINUE)
1745 return rc;
1746
1747 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
Nadav Amit163b1352014-07-21 14:37:28 +03001748 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001749
1750 switch(ctxt->mode) {
1751 case X86EMUL_MODE_PROT64:
1752 case X86EMUL_MODE_PROT32:
1753 case X86EMUL_MODE_PROT16:
1754 if (cpl == 0)
1755 change_mask |= EFLG_IOPL;
1756 if (cpl <= iopl)
1757 change_mask |= EFLG_IF;
1758 break;
1759 case X86EMUL_MODE_VM86:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001760 if (iopl < 3)
1761 return emulate_gp(ctxt, 0);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001762 change_mask |= EFLG_IF;
1763 break;
1764 default: /* real mode */
1765 change_mask |= (EFLG_IOPL | EFLG_IF);
1766 break;
1767 }
1768
1769 *(unsigned long *)dest =
1770 (ctxt->eflags & ~change_mask) | (val & change_mask);
1771
1772 return rc;
1773}
1774
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001775static int em_popf(struct x86_emulate_ctxt *ctxt)
1776{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001777 ctxt->dst.type = OP_REG;
1778 ctxt->dst.addr.reg = &ctxt->eflags;
1779 ctxt->dst.bytes = ctxt->op_bytes;
1780 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001781}
1782
Avi Kivity612e89f2012-06-12 20:03:23 +03001783static int em_enter(struct x86_emulate_ctxt *ctxt)
1784{
1785 int rc;
1786 unsigned frame_size = ctxt->src.val;
1787 unsigned nesting_level = ctxt->src2.val & 31;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001788 ulong rbp;
Avi Kivity612e89f2012-06-12 20:03:23 +03001789
1790 if (nesting_level)
1791 return X86EMUL_UNHANDLEABLE;
1792
Avi Kivitydd856ef2012-08-27 23:46:17 +03001793 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1794 rc = push(ctxt, &rbp, stack_size(ctxt));
Avi Kivity612e89f2012-06-12 20:03:23 +03001795 if (rc != X86EMUL_CONTINUE)
1796 return rc;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001797 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
Avi Kivity612e89f2012-06-12 20:03:23 +03001798 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001799 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1800 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
Avi Kivity612e89f2012-06-12 20:03:23 +03001801 stack_mask(ctxt));
1802 return X86EMUL_CONTINUE;
1803}
1804
Avi Kivityf47cfa32012-06-07 17:49:24 +03001805static int em_leave(struct x86_emulate_ctxt *ctxt)
1806{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001807 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
Avi Kivityf47cfa32012-06-07 17:49:24 +03001808 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001809 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
Avi Kivityf47cfa32012-06-07 17:49:24 +03001810}
1811
Avi Kivity1cd196e2011-09-13 10:45:51 +03001812static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001813{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001814 int seg = ctxt->src2.val;
1815
Avi Kivity9dac77f2011-06-01 15:34:25 +03001816 ctxt->src.val = get_segment_selector(ctxt, seg);
Nadav Amit0fcc2072014-11-02 11:54:51 +02001817 if (ctxt->op_bytes == 4) {
1818 rsp_increment(ctxt, -2);
1819 ctxt->op_bytes = 2;
1820 }
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001821
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001822 return em_push(ctxt);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001823}
1824
Avi Kivity1cd196e2011-09-13 10:45:51 +03001825static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001826{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001827 int seg = ctxt->src2.val;
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001828 unsigned long selector;
1829 int rc;
1830
Avi Kivity9dac77f2011-06-01 15:34:25 +03001831 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001832 if (rc != X86EMUL_CONTINUE)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001833 return rc;
1834
Paolo Bonzinia5457e72014-06-05 17:29:34 +02001835 if (ctxt->modrm_reg == VCPU_SREG_SS)
1836 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1837
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001838 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001839 return rc;
1840}
1841
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001842static int em_pusha(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001843{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001844 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001845 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001846 int reg = VCPU_REGS_RAX;
1847
1848 while (reg <= VCPU_REGS_RDI) {
1849 (reg == VCPU_REGS_RSP) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001850 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001851
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001852 rc = em_push(ctxt);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001853 if (rc != X86EMUL_CONTINUE)
1854 return rc;
1855
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001856 ++reg;
1857 }
Wei Yongjunc37eda12010-06-15 09:03:33 +08001858
Wei Yongjunc37eda12010-06-15 09:03:33 +08001859 return rc;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001860}
1861
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001862static int em_pushf(struct x86_emulate_ctxt *ctxt)
1863{
Nadav Amitbc397a62014-12-10 11:19:03 +02001864 ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001865 return em_push(ctxt);
1866}
1867
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001868static int em_popa(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001869{
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001870 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001871 int reg = VCPU_REGS_RDI;
1872
1873 while (reg >= VCPU_REGS_RAX) {
1874 if (reg == VCPU_REGS_RSP) {
Avi Kivity5ad105e2012-08-19 14:34:31 +03001875 rsp_increment(ctxt, ctxt->op_bytes);
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001876 --reg;
1877 }
1878
Avi Kivitydd856ef2012-08-27 23:46:17 +03001879 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001880 if (rc != X86EMUL_CONTINUE)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001881 break;
1882 --reg;
1883 }
1884 return rc;
1885}
1886
Avi Kivitydd856ef2012-08-27 23:46:17 +03001887static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001888{
Mathias Krause0225fb52012-08-30 01:30:16 +02001889 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001890 int rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001891 struct desc_ptr dt;
1892 gva_t cs_addr;
1893 gva_t eip_addr;
1894 u16 cs, eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001895
1896 /* TODO: Add limit checks */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001897 ctxt->src.val = ctxt->eflags;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001898 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001899 if (rc != X86EMUL_CONTINUE)
1900 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001901
1902 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1903
Avi Kivity9dac77f2011-06-01 15:34:25 +03001904 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001905 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001906 if (rc != X86EMUL_CONTINUE)
1907 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001908
Avi Kivity9dac77f2011-06-01 15:34:25 +03001909 ctxt->src.val = ctxt->_eip;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001910 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001911 if (rc != X86EMUL_CONTINUE)
1912 return rc;
1913
Avi Kivity4bff1e862011-04-20 13:37:53 +03001914 ops->get_idt(ctxt, &dt);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001915
1916 eip_addr = dt.address + (irq << 2);
1917 cs_addr = dt.address + (irq << 2) + 2;
1918
Avi Kivity0f65dd72011-04-20 13:37:53 +03001919 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001920 if (rc != X86EMUL_CONTINUE)
1921 return rc;
1922
Avi Kivity0f65dd72011-04-20 13:37:53 +03001923 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001924 if (rc != X86EMUL_CONTINUE)
1925 return rc;
1926
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001927 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001928 if (rc != X86EMUL_CONTINUE)
1929 return rc;
1930
Avi Kivity9dac77f2011-06-01 15:34:25 +03001931 ctxt->_eip = eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001932
1933 return rc;
1934}
1935
Avi Kivitydd856ef2012-08-27 23:46:17 +03001936int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1937{
1938 int rc;
1939
1940 invalidate_registers(ctxt);
1941 rc = __emulate_int_real(ctxt, irq);
1942 if (rc == X86EMUL_CONTINUE)
1943 writeback_registers(ctxt);
1944 return rc;
1945}
1946
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001947static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001948{
1949 switch(ctxt->mode) {
1950 case X86EMUL_MODE_REAL:
Avi Kivitydd856ef2012-08-27 23:46:17 +03001951 return __emulate_int_real(ctxt, irq);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001952 case X86EMUL_MODE_VM86:
1953 case X86EMUL_MODE_PROT16:
1954 case X86EMUL_MODE_PROT32:
1955 case X86EMUL_MODE_PROT64:
1956 default:
1957 /* Protected mode interrupts unimplemented yet */
1958 return X86EMUL_UNHANDLEABLE;
1959 }
1960}
1961
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001962static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001963{
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001964 int rc = X86EMUL_CONTINUE;
1965 unsigned long temp_eip = 0;
1966 unsigned long temp_eflags = 0;
1967 unsigned long cs = 0;
1968 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1969 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1970 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1971 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1972
1973 /* TODO: Add stack limit check */
1974
Avi Kivity9dac77f2011-06-01 15:34:25 +03001975 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001976
1977 if (rc != X86EMUL_CONTINUE)
1978 return rc;
1979
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001980 if (temp_eip & ~0xffff)
1981 return emulate_gp(ctxt, 0);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001982
Avi Kivity9dac77f2011-06-01 15:34:25 +03001983 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001984
1985 if (rc != X86EMUL_CONTINUE)
1986 return rc;
1987
Avi Kivity9dac77f2011-06-01 15:34:25 +03001988 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001989
1990 if (rc != X86EMUL_CONTINUE)
1991 return rc;
1992
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001993 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001994
1995 if (rc != X86EMUL_CONTINUE)
1996 return rc;
1997
Avi Kivity9dac77f2011-06-01 15:34:25 +03001998 ctxt->_eip = temp_eip;
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001999
2000
Avi Kivity9dac77f2011-06-01 15:34:25 +03002001 if (ctxt->op_bytes == 4)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002002 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
Avi Kivity9dac77f2011-06-01 15:34:25 +03002003 else if (ctxt->op_bytes == 2) {
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002004 ctxt->eflags &= ~0xffff;
2005 ctxt->eflags |= temp_eflags;
2006 }
2007
2008 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2009 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2010
2011 return rc;
2012}
2013
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002014static int em_iret(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002015{
2016 switch(ctxt->mode) {
2017 case X86EMUL_MODE_REAL:
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002018 return emulate_iret_real(ctxt);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002019 case X86EMUL_MODE_VM86:
2020 case X86EMUL_MODE_PROT16:
2021 case X86EMUL_MODE_PROT32:
2022 case X86EMUL_MODE_PROT64:
2023 default:
2024 /* iret from protected mode unimplemented yet */
2025 return X86EMUL_UNHANDLEABLE;
2026 }
2027}
2028
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002029static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2030{
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002031 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002032 unsigned short sel, old_sel;
2033 struct desc_struct old_desc, new_desc;
2034 const struct x86_emulate_ops *ops = ctxt->ops;
2035 u8 cpl = ctxt->ops->cpl(ctxt);
2036
2037 /* Assignment of RIP may only fail in 64-bit mode */
2038 if (ctxt->mode == X86EMUL_MODE_PROT64)
2039 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2040 VCPU_SREG_CS);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002041
Avi Kivity9dac77f2011-06-01 15:34:25 +03002042 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002043
Nadav Amitd1442d82014-09-18 22:39:39 +03002044 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2045 &new_desc);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002046 if (rc != X86EMUL_CONTINUE)
2047 return rc;
2048
Nadav Amitd50eaa12014-11-19 17:43:11 +02002049 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03002050 if (rc != X86EMUL_CONTINUE) {
Nadav Amit7e46ddd2014-10-28 00:03:43 +02002051 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
Nadav Amitd1442d82014-09-18 22:39:39 +03002052 /* assigning eip failed; restore the old cs */
2053 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2054 return rc;
2055 }
2056 return rc;
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002057}
2058
Nadav Amitf7784042014-09-18 22:39:41 +03002059static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002060{
Nadav Amitf7784042014-09-18 22:39:41 +03002061 return assign_eip_near(ctxt, ctxt->src.val);
2062}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002063
Nadav Amitf7784042014-09-18 22:39:41 +03002064static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2065{
2066 int rc;
2067 long int old_eip;
2068
2069 old_eip = ctxt->_eip;
2070 rc = assign_eip_near(ctxt, ctxt->src.val);
2071 if (rc != X86EMUL_CONTINUE)
2072 return rc;
2073 ctxt->src.val = old_eip;
2074 rc = em_push(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09002075 return rc;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002076}
2077
Takuya Yoshikawae0dac402011-12-06 18:07:27 +09002078static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002079{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002080 u64 old = ctxt->dst.orig_val64;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002081
Nadav Amitaaa05f22014-06-02 18:34:10 +03002082 if (ctxt->dst.bytes == 16)
2083 return X86EMUL_UNHANDLEABLE;
2084
Avi Kivitydd856ef2012-08-27 23:46:17 +03002085 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2086 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2087 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2088 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
Laurent Vivier05f086f2007-09-24 11:10:55 +02002089 ctxt->eflags &= ~EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002090 } else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03002091 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2092 (u32) reg_read(ctxt, VCPU_REGS_RBX);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002093
Laurent Vivier05f086f2007-09-24 11:10:55 +02002094 ctxt->eflags |= EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002095 }
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002096 return X86EMUL_CONTINUE;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002097}
2098
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002099static int em_ret(struct x86_emulate_ctxt *ctxt)
2100{
Nadav Amit234f3ce2014-09-18 22:39:38 +03002101 int rc;
2102 unsigned long eip;
2103
2104 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2105 if (rc != X86EMUL_CONTINUE)
2106 return rc;
2107
2108 return assign_eip_near(ctxt, eip);
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002109}
2110
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002111static int em_ret_far(struct x86_emulate_ctxt *ctxt)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002112{
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002113 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002114 unsigned long eip, cs;
2115 u16 old_cs;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002116 int cpl = ctxt->ops->cpl(ctxt);
Nadav Amitd1442d82014-09-18 22:39:39 +03002117 struct desc_struct old_desc, new_desc;
2118 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002119
Nadav Amitd1442d82014-09-18 22:39:39 +03002120 if (ctxt->mode == X86EMUL_MODE_PROT64)
2121 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2122 VCPU_SREG_CS);
2123
2124 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002125 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002126 return rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002127 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002128 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002129 return rc;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002130 /* Outer-privilege level return is not implemented */
2131 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2132 return X86EMUL_UNHANDLEABLE;
Nadav Amitab646f52014-12-11 12:27:14 +01002133 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, false,
Nadav Amitd1442d82014-09-18 22:39:39 +03002134 &new_desc);
2135 if (rc != X86EMUL_CONTINUE)
2136 return rc;
Nadav Amitd50eaa12014-11-19 17:43:11 +02002137 rc = assign_eip_far(ctxt, eip, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03002138 if (rc != X86EMUL_CONTINUE) {
Nadav Amit7e46ddd2014-10-28 00:03:43 +02002139 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
Nadav Amitd1442d82014-09-18 22:39:39 +03002140 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2141 }
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002142 return rc;
2143}
2144
Bruce Rogers32611072013-09-09 09:40:20 -06002145static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2146{
2147 int rc;
2148
2149 rc = em_ret_far(ctxt);
2150 if (rc != X86EMUL_CONTINUE)
2151 return rc;
2152 rsp_increment(ctxt, ctxt->src.val);
2153 return X86EMUL_CONTINUE;
2154}
2155
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002156static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2157{
2158 /* Save real source value, then compare EAX against destination. */
Nadav Amit37c564f2014-06-02 18:34:07 +03002159 ctxt->dst.orig_val = ctxt->dst.val;
2160 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002161 ctxt->src.orig_val = ctxt->src.val;
Nadav Amit37c564f2014-06-02 18:34:07 +03002162 ctxt->src.val = ctxt->dst.orig_val;
Avi Kivity158de572013-01-19 19:51:57 +02002163 fastop(ctxt, em_cmp);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002164
2165 if (ctxt->eflags & EFLG_ZF) {
2166 /* Success: write back to memory. */
2167 ctxt->dst.val = ctxt->src.orig_val;
2168 } else {
2169 /* Failure: write the value we saw to EAX. */
2170 ctxt->dst.type = OP_REG;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002171 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
Nadav Amit37c564f2014-06-02 18:34:07 +03002172 ctxt->dst.val = ctxt->dst.orig_val;
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002173 }
2174 return X86EMUL_CONTINUE;
2175}
2176
Avi Kivityd4b43252011-09-13 10:45:50 +03002177static int em_lseg(struct x86_emulate_ctxt *ctxt)
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002178{
Avi Kivityd4b43252011-09-13 10:45:50 +03002179 int seg = ctxt->src2.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002180 unsigned short sel;
2181 int rc;
2182
Avi Kivity9dac77f2011-06-01 15:34:25 +03002183 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002184
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002185 rc = load_segment_descriptor(ctxt, sel, seg);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002186 if (rc != X86EMUL_CONTINUE)
2187 return rc;
2188
Avi Kivity9dac77f2011-06-01 15:34:25 +03002189 ctxt->dst.val = ctxt->src.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002190 return rc;
2191}
2192
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002193static void
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002194setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002195 struct desc_struct *cs, struct desc_struct *ss)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002196{
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002197 cs->l = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002198 set_desc_base(cs, 0); /* flat segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002199 cs->g = 1; /* 4kb granularity */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002200 set_desc_limit(cs, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002201 cs->type = 0x0b; /* Read, Execute, Accessed */
2202 cs->s = 1;
2203 cs->dpl = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002204 cs->p = 1;
2205 cs->d = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002206 cs->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002207
Gleb Natapov79168fd2010-04-28 19:15:30 +03002208 set_desc_base(ss, 0); /* flat segment */
2209 set_desc_limit(ss, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002210 ss->g = 1; /* 4kb granularity */
2211 ss->s = 1;
2212 ss->type = 0x03; /* Read/Write, Accessed */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002213 ss->d = 1; /* 32bit stack segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002214 ss->dpl = 0;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002215 ss->p = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002216 ss->l = 0;
2217 ss->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002218}
2219
Avi Kivity1a18a692012-02-01 12:23:21 +02002220static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2221{
2222 u32 eax, ebx, ecx, edx;
2223
2224 eax = ecx = 0;
Avi Kivity0017f932012-06-07 14:10:16 +03002225 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2226 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
Avi Kivity1a18a692012-02-01 12:23:21 +02002227 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2228 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2229}
2230
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002231static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2232{
Mathias Krause0225fb52012-08-30 01:30:16 +02002233 const struct x86_emulate_ops *ops = ctxt->ops;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002234 u32 eax, ebx, ecx, edx;
2235
2236 /*
2237 * syscall should always be enabled in longmode - so only become
2238 * vendor specific (cpuid) if other modes are active...
2239 */
2240 if (ctxt->mode == X86EMUL_MODE_PROT64)
2241 return true;
2242
2243 eax = 0x00000000;
2244 ecx = 0x00000000;
Avi Kivity0017f932012-06-07 14:10:16 +03002245 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2246 /*
2247 * Intel ("GenuineIntel")
2248 * remark: Intel CPUs only support "syscall" in 64bit
2249 * longmode. Also an 64bit guest with a
2250 * 32bit compat-app running will #UD !! While this
2251 * behaviour can be fixed (by emulating) into AMD
2252 * response - CPUs of AMD can't behave like Intel.
2253 */
2254 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2255 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2256 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2257 return false;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002258
Avi Kivity0017f932012-06-07 14:10:16 +03002259 /* AMD ("AuthenticAMD") */
2260 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2261 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2262 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2263 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002264
Avi Kivity0017f932012-06-07 14:10:16 +03002265 /* AMD ("AMDisbetter!") */
2266 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2267 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2268 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2269 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002270
2271 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2272 return false;
2273}
2274
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002275static int em_syscall(struct x86_emulate_ctxt *ctxt)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002276{
Mathias Krause0225fb52012-08-30 01:30:16 +02002277 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002278 struct desc_struct cs, ss;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002279 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002280 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002281 u64 efer = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002282
2283 /* syscall is not available in real mode */
Gleb Natapov2e901c42010-03-18 15:20:12 +02002284 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002285 ctxt->mode == X86EMUL_MODE_VM86)
2286 return emulate_ud(ctxt);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002287
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002288 if (!(em_syscall_is_enabled(ctxt)))
2289 return emulate_ud(ctxt);
2290
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002291 ops->get_msr(ctxt, MSR_EFER, &efer);
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002292 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002293
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002294 if (!(efer & EFER_SCE))
2295 return emulate_ud(ctxt);
2296
Avi Kivity717746e2011-04-20 13:37:53 +03002297 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002298 msr_data >>= 32;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002299 cs_sel = (u16)(msr_data & 0xfffc);
2300 ss_sel = (u16)(msr_data + 8);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002301
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002302 if (efer & EFER_LMA) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002303 cs.d = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002304 cs.l = 1;
2305 }
Avi Kivity1aa36612011-04-27 13:20:30 +03002306 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2307 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002308
Avi Kivitydd856ef2012-08-27 23:46:17 +03002309 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002310 if (efer & EFER_LMA) {
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002311#ifdef CONFIG_X86_64
Nadav Amit6c6cb692014-07-21 14:37:30 +03002312 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002313
Avi Kivity717746e2011-04-20 13:37:53 +03002314 ops->get_msr(ctxt,
Gleb Natapov3fb1b5d2010-04-28 19:15:28 +03002315 ctxt->mode == X86EMUL_MODE_PROT64 ?
2316 MSR_LSTAR : MSR_CSTAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002317 ctxt->_eip = msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002318
Avi Kivity717746e2011-04-20 13:37:53 +03002319 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
Nadav Amit6c6cb692014-07-21 14:37:30 +03002320 ctxt->eflags &= ~msr_data;
Nadav Amit807c1422014-11-02 11:54:49 +02002321 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002322#endif
2323 } else {
2324 /* legacy mode */
Avi Kivity717746e2011-04-20 13:37:53 +03002325 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002326 ctxt->_eip = (u32)msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002327
Nadav Amit6c6cb692014-07-21 14:37:30 +03002328 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002329 }
2330
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002331 return X86EMUL_CONTINUE;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002332}
2333
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002334static int em_sysenter(struct x86_emulate_ctxt *ctxt)
Andre Przywara8c604352009-06-18 12:56:01 +02002335{
Mathias Krause0225fb52012-08-30 01:30:16 +02002336 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002337 struct desc_struct cs, ss;
Andre Przywara8c604352009-06-18 12:56:01 +02002338 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002339 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002340 u64 efer = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002341
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002342 ops->get_msr(ctxt, MSR_EFER, &efer);
Gleb Natapova0044752010-02-10 14:21:31 +02002343 /* inject #GP if in real mode */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002344 if (ctxt->mode == X86EMUL_MODE_REAL)
2345 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002346
Avi Kivity1a18a692012-02-01 12:23:21 +02002347 /*
2348 * Not recognized on AMD in compat mode (but is recognized in legacy
2349 * mode).
2350 */
Nadav Amitf3747372015-01-01 23:11:11 +02002351 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
Avi Kivity1a18a692012-02-01 12:23:21 +02002352 && !vendor_intel(ctxt))
2353 return emulate_ud(ctxt);
2354
Nadav Amitb2c9d432014-11-02 11:55:01 +02002355 /* sysenter/sysexit have not been tested in 64bit mode. */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002356 if (ctxt->mode == X86EMUL_MODE_PROT64)
Nadav Amitb2c9d432014-11-02 11:55:01 +02002357 return X86EMUL_UNHANDLEABLE;
Andre Przywara8c604352009-06-18 12:56:01 +02002358
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002359 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara8c604352009-06-18 12:56:01 +02002360
Avi Kivity717746e2011-04-20 13:37:53 +03002361 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Nadav Amitf3747372015-01-01 23:11:11 +02002362 if ((msr_data & 0xfffc) == 0x0)
2363 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002364
Nadav Amit6c6cb692014-07-21 14:37:30 +03002365 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
Nadav Amitf3747372015-01-01 23:11:11 +02002366 cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002367 ss_sel = cs_sel + 8;
Nadav Amitf3747372015-01-01 23:11:11 +02002368 if (efer & EFER_LMA) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002369 cs.d = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002370 cs.l = 1;
2371 }
2372
Avi Kivity1aa36612011-04-27 13:20:30 +03002373 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2374 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara8c604352009-06-18 12:56:01 +02002375
Avi Kivity717746e2011-04-20 13:37:53 +03002376 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
Nadav Amitf3747372015-01-01 23:11:11 +02002377 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002378
Avi Kivity717746e2011-04-20 13:37:53 +03002379 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
Nadav Amitf3747372015-01-01 23:11:11 +02002380 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2381 (u32)msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002382
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002383 return X86EMUL_CONTINUE;
Andre Przywara8c604352009-06-18 12:56:01 +02002384}
2385
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002386static int em_sysexit(struct x86_emulate_ctxt *ctxt)
Andre Przywara4668f052009-06-18 12:56:02 +02002387{
Mathias Krause0225fb52012-08-30 01:30:16 +02002388 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002389 struct desc_struct cs, ss;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002390 u64 msr_data, rcx, rdx;
Andre Przywara4668f052009-06-18 12:56:02 +02002391 int usermode;
Xiao Guangrong1249b962011-05-15 23:25:10 +08002392 u16 cs_sel = 0, ss_sel = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002393
Gleb Natapova0044752010-02-10 14:21:31 +02002394 /* inject #GP if in real mode or Virtual 8086 mode */
2395 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002396 ctxt->mode == X86EMUL_MODE_VM86)
2397 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002398
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002399 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara4668f052009-06-18 12:56:02 +02002400
Avi Kivity9dac77f2011-06-01 15:34:25 +03002401 if ((ctxt->rex_prefix & 0x8) != 0x0)
Andre Przywara4668f052009-06-18 12:56:02 +02002402 usermode = X86EMUL_MODE_PROT64;
2403 else
2404 usermode = X86EMUL_MODE_PROT32;
2405
Nadav Amit234f3ce2014-09-18 22:39:38 +03002406 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2407 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2408
Andre Przywara4668f052009-06-18 12:56:02 +02002409 cs.dpl = 3;
2410 ss.dpl = 3;
Avi Kivity717746e2011-04-20 13:37:53 +03002411 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara4668f052009-06-18 12:56:02 +02002412 switch (usermode) {
2413 case X86EMUL_MODE_PROT32:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002414 cs_sel = (u16)(msr_data + 16);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002415 if ((msr_data & 0xfffc) == 0x0)
2416 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002417 ss_sel = (u16)(msr_data + 24);
Nadav Amitbf0b6822014-09-18 22:39:45 +03002418 rcx = (u32)rcx;
2419 rdx = (u32)rdx;
Andre Przywara4668f052009-06-18 12:56:02 +02002420 break;
2421 case X86EMUL_MODE_PROT64:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002422 cs_sel = (u16)(msr_data + 32);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002423 if (msr_data == 0x0)
2424 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002425 ss_sel = cs_sel + 8;
2426 cs.d = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002427 cs.l = 1;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002428 if (is_noncanonical_address(rcx) ||
2429 is_noncanonical_address(rdx))
2430 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002431 break;
2432 }
Gleb Natapov79168fd2010-04-28 19:15:30 +03002433 cs_sel |= SELECTOR_RPL_MASK;
2434 ss_sel |= SELECTOR_RPL_MASK;
Andre Przywara4668f052009-06-18 12:56:02 +02002435
Avi Kivity1aa36612011-04-27 13:20:30 +03002436 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2437 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara4668f052009-06-18 12:56:02 +02002438
Nadav Amit234f3ce2014-09-18 22:39:38 +03002439 ctxt->_eip = rdx;
2440 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
Andre Przywara4668f052009-06-18 12:56:02 +02002441
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002442 return X86EMUL_CONTINUE;
Andre Przywara4668f052009-06-18 12:56:02 +02002443}
2444
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002445static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002446{
2447 int iopl;
2448 if (ctxt->mode == X86EMUL_MODE_REAL)
2449 return false;
2450 if (ctxt->mode == X86EMUL_MODE_VM86)
2451 return true;
2452 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002453 return ctxt->ops->cpl(ctxt) > iopl;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002454}
2455
2456static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002457 u16 port, u16 len)
2458{
Mathias Krause0225fb52012-08-30 01:30:16 +02002459 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002460 struct desc_struct tr_seg;
Gleb Natapov5601d052011-03-07 14:55:06 +02002461 u32 base3;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002462 int r;
Avi Kivity1aa36612011-04-27 13:20:30 +03002463 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002464 unsigned mask = (1 << len) - 1;
Gleb Natapov5601d052011-03-07 14:55:06 +02002465 unsigned long base;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002466
Avi Kivity1aa36612011-04-27 13:20:30 +03002467 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002468 if (!tr_seg.p)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002469 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002470 if (desc_limit_scaled(&tr_seg) < 103)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002471 return false;
Gleb Natapov5601d052011-03-07 14:55:06 +02002472 base = get_desc_base(&tr_seg);
2473#ifdef CONFIG_X86_64
2474 base |= ((u64)base3) << 32;
2475#endif
Avi Kivity0f65dd72011-04-20 13:37:53 +03002476 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002477 if (r != X86EMUL_CONTINUE)
2478 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002479 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002480 return false;
Avi Kivity0f65dd72011-04-20 13:37:53 +03002481 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002482 if (r != X86EMUL_CONTINUE)
2483 return false;
2484 if ((perm >> bit_idx) & mask)
2485 return false;
2486 return true;
2487}
2488
2489static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002490 u16 port, u16 len)
2491{
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002492 if (ctxt->perm_ok)
2493 return true;
2494
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002495 if (emulator_bad_iopl(ctxt))
2496 if (!emulator_io_port_access_allowed(ctxt, port, len))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002497 return false;
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002498
2499 ctxt->perm_ok = true;
2500
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002501 return true;
2502}
2503
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002504static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002505 struct tss_segment_16 *tss)
2506{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002507 tss->ip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002508 tss->flag = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002509 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2510 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2511 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2512 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2513 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2514 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2515 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2516 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002517
Avi Kivity1aa36612011-04-27 13:20:30 +03002518 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2519 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2520 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2521 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2522 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002523}
2524
2525static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002526 struct tss_segment_16 *tss)
2527{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002528 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002529 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002530
Avi Kivity9dac77f2011-06-01 15:34:25 +03002531 ctxt->_eip = tss->ip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002532 ctxt->eflags = tss->flag | 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002533 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2534 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2535 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2536 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2537 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2538 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2539 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2540 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002541
2542 /*
2543 * SDM says that segment selectors are loaded before segment
2544 * descriptors
2545 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002546 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2547 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2548 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2549 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2550 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002551
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002552 cpl = tss->cs & 3;
2553
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002554 /*
Guo Chaofc058682012-06-28 15:19:51 +08002555 * Now load segment descriptors. If fault happens at this stage
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002556 * it is handled in a context of new task
2557 */
Nadav Amitd1442d82014-09-18 22:39:39 +03002558 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2559 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002560 if (ret != X86EMUL_CONTINUE)
2561 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002562 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2563 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002564 if (ret != X86EMUL_CONTINUE)
2565 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002566 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2567 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002568 if (ret != X86EMUL_CONTINUE)
2569 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002570 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2571 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002572 if (ret != X86EMUL_CONTINUE)
2573 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002574 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2575 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002576 if (ret != X86EMUL_CONTINUE)
2577 return ret;
2578
2579 return X86EMUL_CONTINUE;
2580}
2581
2582static int task_switch_16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002583 u16 tss_selector, u16 old_tss_sel,
2584 ulong old_tss_base, struct desc_struct *new_desc)
2585{
Mathias Krause0225fb52012-08-30 01:30:16 +02002586 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002587 struct tss_segment_16 tss_seg;
2588 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002589 u32 new_tss_base = get_desc_base(new_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002590
Avi Kivity0f65dd72011-04-20 13:37:53 +03002591 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002592 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002593 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002594 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002595
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002596 save_state_to_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002597
Avi Kivity0f65dd72011-04-20 13:37:53 +03002598 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002599 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002600 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002601 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002602
Avi Kivity0f65dd72011-04-20 13:37:53 +03002603 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002604 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002605 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002606 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002607
2608 if (old_tss_sel != 0xffff) {
2609 tss_seg.prev_task_link = old_tss_sel;
2610
Avi Kivity0f65dd72011-04-20 13:37:53 +03002611 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002612 &tss_seg.prev_task_link,
2613 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002614 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002615 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002616 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002617 }
2618
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002619 return load_state_from_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002620}
2621
2622static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002623 struct tss_segment_32 *tss)
2624{
Nadav Amit5c7411e2014-04-07 18:37:47 +03002625 /* CR3 and ldt selector are not saved intentionally */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002626 tss->eip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002627 tss->eflags = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002628 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2629 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2630 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2631 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2632 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2633 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2634 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2635 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002636
Avi Kivity1aa36612011-04-27 13:20:30 +03002637 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2638 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2639 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2640 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2641 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2642 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002643}
2644
2645static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002646 struct tss_segment_32 *tss)
2647{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002648 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002649 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002650
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002651 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002652 return emulate_gp(ctxt, 0);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002653 ctxt->_eip = tss->eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002654 ctxt->eflags = tss->eflags | 2;
Kevin Wolf4cee4792012-02-08 14:34:41 +01002655
2656 /* General purpose registers */
Avi Kivitydd856ef2012-08-27 23:46:17 +03002657 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2658 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2659 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2660 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2661 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2662 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2663 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2664 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002665
2666 /*
2667 * SDM says that segment selectors are loaded before segment
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002668 * descriptors. This is important because CPL checks will
2669 * use CS.RPL.
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002670 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002671 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2672 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2673 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2674 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2675 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2676 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2677 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002678
2679 /*
Kevin Wolf4cee4792012-02-08 14:34:41 +01002680 * If we're switching between Protected Mode and VM86, we need to make
2681 * sure to update the mode before loading the segment descriptors so
2682 * that the selectors are interpreted correctly.
Kevin Wolf4cee4792012-02-08 14:34:41 +01002683 */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002684 if (ctxt->eflags & X86_EFLAGS_VM) {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002685 ctxt->mode = X86EMUL_MODE_VM86;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002686 cpl = 3;
2687 } else {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002688 ctxt->mode = X86EMUL_MODE_PROT32;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002689 cpl = tss->cs & 3;
2690 }
Kevin Wolf4cee4792012-02-08 14:34:41 +01002691
2692 /*
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002693 * Now load segment descriptors. If fault happenes at this stage
2694 * it is handled in a context of new task
2695 */
Nadav Amitd1442d82014-09-18 22:39:39 +03002696 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2697 cpl, true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002698 if (ret != X86EMUL_CONTINUE)
2699 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002700 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2701 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002702 if (ret != X86EMUL_CONTINUE)
2703 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002704 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2705 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002706 if (ret != X86EMUL_CONTINUE)
2707 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002708 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2709 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002710 if (ret != X86EMUL_CONTINUE)
2711 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002712 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2713 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002714 if (ret != X86EMUL_CONTINUE)
2715 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002716 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2717 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002718 if (ret != X86EMUL_CONTINUE)
2719 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002720 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2721 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002722 if (ret != X86EMUL_CONTINUE)
2723 return ret;
2724
2725 return X86EMUL_CONTINUE;
2726}
2727
2728static int task_switch_32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002729 u16 tss_selector, u16 old_tss_sel,
2730 ulong old_tss_base, struct desc_struct *new_desc)
2731{
Mathias Krause0225fb52012-08-30 01:30:16 +02002732 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002733 struct tss_segment_32 tss_seg;
2734 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002735 u32 new_tss_base = get_desc_base(new_desc);
Nadav Amit5c7411e2014-04-07 18:37:47 +03002736 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2737 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002738
Avi Kivity0f65dd72011-04-20 13:37:53 +03002739 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002740 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002741 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002742 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002743 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002744
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002745 save_state_to_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002746
Nadav Amit5c7411e2014-04-07 18:37:47 +03002747 /* Only GP registers and segment selectors are saved */
2748 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2749 ldt_sel_offset - eip_offset, &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002750 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002751 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002752 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002753
Avi Kivity0f65dd72011-04-20 13:37:53 +03002754 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002755 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002756 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002757 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002758 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002759
2760 if (old_tss_sel != 0xffff) {
2761 tss_seg.prev_task_link = old_tss_sel;
2762
Avi Kivity0f65dd72011-04-20 13:37:53 +03002763 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002764 &tss_seg.prev_task_link,
2765 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002766 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002767 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002768 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002769 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002770 }
2771
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002772 return load_state_from_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002773}
2774
2775static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002776 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002777 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002778{
Mathias Krause0225fb52012-08-30 01:30:16 +02002779 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002780 struct desc_struct curr_tss_desc, next_tss_desc;
2781 int ret;
Avi Kivity1aa36612011-04-27 13:20:30 +03002782 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002783 ulong old_tss_base =
Avi Kivity4bff1e862011-04-20 13:37:53 +03002784 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
Gleb Natapovceffb452010-03-18 15:20:19 +02002785 u32 desc_limit;
Avi Kivitye9194642012-06-13 16:29:39 +03002786 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002787
2788 /* FIXME: old_tss_base == ~0 ? */
2789
Avi Kivitye9194642012-06-13 16:29:39 +03002790 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002791 if (ret != X86EMUL_CONTINUE)
2792 return ret;
Avi Kivitye9194642012-06-13 16:29:39 +03002793 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002794 if (ret != X86EMUL_CONTINUE)
2795 return ret;
2796
2797 /* FIXME: check that next_tss_desc is tss */
2798
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002799 /*
2800 * Check privileges. The three cases are task switch caused by...
2801 *
2802 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2803 * 2. Exception/IRQ/iret: No check is performed
Nadav Amit2c2ca2d2014-11-02 11:54:57 +02002804 * 3. jmp/call to TSS/task-gate: No check is performed since the
2805 * hardware checks it before exiting.
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002806 */
2807 if (reason == TASK_SWITCH_GATE) {
2808 if (idt_index != -1) {
2809 /* Software interrupts */
2810 struct desc_struct task_gate_desc;
2811 int dpl;
2812
2813 ret = read_interrupt_descriptor(ctxt, idt_index,
2814 &task_gate_desc);
2815 if (ret != X86EMUL_CONTINUE)
2816 return ret;
2817
2818 dpl = task_gate_desc.dpl;
2819 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2820 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2821 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002822 }
2823
Gleb Natapovceffb452010-03-18 15:20:19 +02002824 desc_limit = desc_limit_scaled(&next_tss_desc);
2825 if (!next_tss_desc.p ||
2826 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2827 desc_limit < 0x2b)) {
Paolo Bonzini592f0852014-08-20 10:05:08 +02002828 return emulate_ts(ctxt, tss_selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002829 }
2830
2831 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2832 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002833 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002834 }
2835
2836 if (reason == TASK_SWITCH_IRET)
2837 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2838
2839 /* set back link to prev task only if NT bit is set in eflags
Guo Chaofc058682012-06-28 15:19:51 +08002840 note that old_tss_sel is not used after this point */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002841 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2842 old_tss_sel = 0xffff;
2843
2844 if (next_tss_desc.type & 8)
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002845 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002846 old_tss_base, &next_tss_desc);
2847 else
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002848 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002849 old_tss_base, &next_tss_desc);
Jan Kiszka0760d442010-04-14 15:50:57 +02002850 if (ret != X86EMUL_CONTINUE)
2851 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002852
2853 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2854 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2855
2856 if (reason != TASK_SWITCH_IRET) {
2857 next_tss_desc.type |= (1 << 1); /* set busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002858 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002859 }
2860
Avi Kivity717746e2011-04-20 13:37:53 +03002861 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
Avi Kivity1aa36612011-04-27 13:20:30 +03002862 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002863
Jan Kiszkae269fb22010-04-14 15:51:09 +02002864 if (has_error_code) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002865 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2866 ctxt->lock_prefix = 0;
2867 ctxt->src.val = (unsigned long) error_code;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09002868 ret = em_push(ctxt);
Jan Kiszkae269fb22010-04-14 15:51:09 +02002869 }
2870
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002871 return ret;
2872}
2873
2874int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002875 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002876 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002877{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002878 int rc;
2879
Avi Kivitydd856ef2012-08-27 23:46:17 +03002880 invalidate_registers(ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002881 ctxt->_eip = ctxt->eip;
2882 ctxt->dst.type = OP_NONE;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002883
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002884 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002885 has_error_code, error_code);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002886
Avi Kivitydd856ef2012-08-27 23:46:17 +03002887 if (rc == X86EMUL_CONTINUE) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002888 ctxt->eip = ctxt->_eip;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002889 writeback_registers(ctxt);
2890 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002891
Gleb Natapova0c0ab22011-03-28 16:57:49 +02002892 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002893}
2894
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03002895static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2896 struct operand *op)
Gleb Natapova682e352010-03-18 15:20:21 +02002897{
Gleb Natapovb3356bf2012-09-03 15:24:29 +03002898 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
Gleb Natapova682e352010-03-18 15:20:21 +02002899
Paolo Bonzini01485a22014-11-19 18:25:08 +01002900 register_address_increment(ctxt, reg, df * op->bytes);
2901 op->addr.mem.ea = register_address(ctxt, reg);
Gleb Natapova682e352010-03-18 15:20:21 +02002902}
2903
Avi Kivity7af04fc2010-08-18 14:16:35 +03002904static int em_das(struct x86_emulate_ctxt *ctxt)
2905{
Avi Kivity7af04fc2010-08-18 14:16:35 +03002906 u8 al, old_al;
2907 bool af, cf, old_cf;
2908
2909 cf = ctxt->eflags & X86_EFLAGS_CF;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002910 al = ctxt->dst.val;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002911
2912 old_al = al;
2913 old_cf = cf;
2914 cf = false;
2915 af = ctxt->eflags & X86_EFLAGS_AF;
2916 if ((al & 0x0f) > 9 || af) {
2917 al -= 6;
2918 cf = old_cf | (al >= 250);
2919 af = true;
2920 } else {
2921 af = false;
2922 }
2923 if (old_al > 0x99 || old_cf) {
2924 al -= 0x60;
2925 cf = true;
2926 }
2927
Avi Kivity9dac77f2011-06-01 15:34:25 +03002928 ctxt->dst.val = al;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002929 /* Set PF, ZF, SF */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002930 ctxt->src.type = OP_IMM;
2931 ctxt->src.val = 0;
2932 ctxt->src.bytes = 1;
Avi Kivity158de572013-01-19 19:51:57 +02002933 fastop(ctxt, em_or);
Avi Kivity7af04fc2010-08-18 14:16:35 +03002934 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2935 if (cf)
2936 ctxt->eflags |= X86_EFLAGS_CF;
2937 if (af)
2938 ctxt->eflags |= X86_EFLAGS_AF;
2939 return X86EMUL_CONTINUE;
2940}
2941
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02002942static int em_aam(struct x86_emulate_ctxt *ctxt)
2943{
2944 u8 al, ah;
2945
2946 if (ctxt->src.val == 0)
2947 return emulate_de(ctxt);
2948
2949 al = ctxt->dst.val & 0xff;
2950 ah = al / ctxt->src.val;
2951 al %= ctxt->src.val;
2952
2953 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2954
2955 /* Set PF, ZF, SF */
2956 ctxt->src.type = OP_IMM;
2957 ctxt->src.val = 0;
2958 ctxt->src.bytes = 1;
2959 fastop(ctxt, em_or);
2960
2961 return X86EMUL_CONTINUE;
2962}
2963
Gleb Natapov7f662272012-12-10 11:42:30 +02002964static int em_aad(struct x86_emulate_ctxt *ctxt)
2965{
2966 u8 al = ctxt->dst.val & 0xff;
2967 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2968
2969 al = (al + (ah * ctxt->src.val)) & 0xff;
2970
2971 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2972
Gleb Natapovf583c292013-02-13 17:50:39 +02002973 /* Set PF, ZF, SF */
2974 ctxt->src.type = OP_IMM;
2975 ctxt->src.val = 0;
2976 ctxt->src.bytes = 1;
2977 fastop(ctxt, em_or);
Gleb Natapov7f662272012-12-10 11:42:30 +02002978
2979 return X86EMUL_CONTINUE;
2980}
2981
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09002982static int em_call(struct x86_emulate_ctxt *ctxt)
2983{
Nadav Amit234f3ce2014-09-18 22:39:38 +03002984 int rc;
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09002985 long rel = ctxt->src.val;
2986
2987 ctxt->src.val = (unsigned long)ctxt->_eip;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002988 rc = jmp_rel(ctxt, rel);
2989 if (rc != X86EMUL_CONTINUE)
2990 return rc;
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09002991 return em_push(ctxt);
2992}
2993
Avi Kivity0ef753b2010-08-18 14:51:45 +03002994static int em_call_far(struct x86_emulate_ctxt *ctxt)
2995{
Avi Kivity0ef753b2010-08-18 14:51:45 +03002996 u16 sel, old_cs;
2997 ulong old_eip;
2998 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002999 struct desc_struct old_desc, new_desc;
3000 const struct x86_emulate_ops *ops = ctxt->ops;
3001 int cpl = ctxt->ops->cpl(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003002
Avi Kivity9dac77f2011-06-01 15:34:25 +03003003 old_eip = ctxt->_eip;
Nadav Amitd1442d82014-09-18 22:39:39 +03003004 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003005
Avi Kivity9dac77f2011-06-01 15:34:25 +03003006 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Nadav Amitd1442d82014-09-18 22:39:39 +03003007 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3008 &new_desc);
3009 if (rc != X86EMUL_CONTINUE)
Avi Kivity0ef753b2010-08-18 14:51:45 +03003010 return X86EMUL_CONTINUE;
3011
Nadav Amitd50eaa12014-11-19 17:43:11 +02003012 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03003013 if (rc != X86EMUL_CONTINUE)
3014 goto fail;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003015
Avi Kivity9dac77f2011-06-01 15:34:25 +03003016 ctxt->src.val = old_cs;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09003017 rc = em_push(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003018 if (rc != X86EMUL_CONTINUE)
Nadav Amitd1442d82014-09-18 22:39:39 +03003019 goto fail;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003020
Avi Kivity9dac77f2011-06-01 15:34:25 +03003021 ctxt->src.val = old_eip;
Nadav Amitd1442d82014-09-18 22:39:39 +03003022 rc = em_push(ctxt);
3023 /* If we failed, we tainted the memory, but the very least we should
3024 restore cs */
3025 if (rc != X86EMUL_CONTINUE)
3026 goto fail;
3027 return rc;
3028fail:
3029 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3030 return rc;
3031
Avi Kivity0ef753b2010-08-18 14:51:45 +03003032}
3033
Avi Kivity40ece7c2010-08-18 15:12:09 +03003034static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3035{
Avi Kivity40ece7c2010-08-18 15:12:09 +03003036 int rc;
Nadav Amit234f3ce2014-09-18 22:39:38 +03003037 unsigned long eip;
Avi Kivity40ece7c2010-08-18 15:12:09 +03003038
Nadav Amit234f3ce2014-09-18 22:39:38 +03003039 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3040 if (rc != X86EMUL_CONTINUE)
3041 return rc;
3042 rc = assign_eip_near(ctxt, eip);
Avi Kivity40ece7c2010-08-18 15:12:09 +03003043 if (rc != X86EMUL_CONTINUE)
3044 return rc;
Avi Kivity5ad105e2012-08-19 14:34:31 +03003045 rsp_increment(ctxt, ctxt->src.val);
Avi Kivity40ece7c2010-08-18 15:12:09 +03003046 return X86EMUL_CONTINUE;
3047}
3048
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003049static int em_xchg(struct x86_emulate_ctxt *ctxt)
3050{
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003051 /* Write back the register source. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003052 ctxt->src.val = ctxt->dst.val;
3053 write_register_operand(&ctxt->src);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003054
3055 /* Write back the memory destination with implicit LOCK prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003056 ctxt->dst.val = ctxt->src.orig_val;
3057 ctxt->lock_prefix = 1;
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003058 return X86EMUL_CONTINUE;
3059}
3060
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003061static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3062{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003063 ctxt->dst.val = ctxt->src2.val;
Avi Kivity4d758342013-01-19 19:51:55 +02003064 return fastop(ctxt, em_imul);
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003065}
3066
Avi Kivity61429142010-08-19 15:13:00 +03003067static int em_cwd(struct x86_emulate_ctxt *ctxt)
3068{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003069 ctxt->dst.type = OP_REG;
3070 ctxt->dst.bytes = ctxt->src.bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03003071 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivity9dac77f2011-06-01 15:34:25 +03003072 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
Avi Kivity61429142010-08-19 15:13:00 +03003073
3074 return X86EMUL_CONTINUE;
3075}
3076
Avi Kivity48bb5d32010-08-18 18:54:34 +03003077static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3078{
Avi Kivity48bb5d32010-08-18 18:54:34 +03003079 u64 tsc = 0;
3080
Avi Kivity717746e2011-04-20 13:37:53 +03003081 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003082 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3083 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
Avi Kivity48bb5d32010-08-18 18:54:34 +03003084 return X86EMUL_CONTINUE;
3085}
3086
Avi Kivity222d21a2011-11-10 14:57:30 +02003087static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3088{
3089 u64 pmc;
3090
Avi Kivitydd856ef2012-08-27 23:46:17 +03003091 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
Avi Kivity222d21a2011-11-10 14:57:30 +02003092 return emulate_gp(ctxt, 0);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003093 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3094 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
Avi Kivity222d21a2011-11-10 14:57:30 +02003095 return X86EMUL_CONTINUE;
3096}
3097
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003098static int em_mov(struct x86_emulate_ctxt *ctxt)
3099{
Paolo Bonzini54cfdb32014-03-27 11:36:25 +01003100 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003101 return X86EMUL_CONTINUE;
3102}
3103
Borislav Petkov84cffe42013-10-29 12:54:56 +01003104#define FFL(x) bit(X86_FEATURE_##x)
3105
3106static int em_movbe(struct x86_emulate_ctxt *ctxt)
3107{
3108 u32 ebx, ecx, edx, eax = 1;
3109 u16 tmp;
3110
3111 /*
3112 * Check MOVBE is set in the guest-visible CPUID leaf.
3113 */
3114 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3115 if (!(ecx & FFL(MOVBE)))
3116 return emulate_ud(ctxt);
3117
3118 switch (ctxt->op_bytes) {
3119 case 2:
3120 /*
3121 * From MOVBE definition: "...When the operand size is 16 bits,
3122 * the upper word of the destination register remains unchanged
3123 * ..."
3124 *
3125 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3126 * rules so we have to do the operation almost per hand.
3127 */
3128 tmp = (u16)ctxt->src.val;
3129 ctxt->dst.val &= ~0xffffUL;
3130 ctxt->dst.val |= (unsigned long)swab16(tmp);
3131 break;
3132 case 4:
3133 ctxt->dst.val = swab32((u32)ctxt->src.val);
3134 break;
3135 case 8:
3136 ctxt->dst.val = swab64(ctxt->src.val);
3137 break;
3138 default:
Paolo Bonzini592f0852014-08-20 10:05:08 +02003139 BUG();
Borislav Petkov84cffe42013-10-29 12:54:56 +01003140 }
3141 return X86EMUL_CONTINUE;
3142}
3143
Takuya Yoshikawabc00f8d2011-11-22 15:19:19 +09003144static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3145{
3146 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3147 return emulate_gp(ctxt, 0);
3148
3149 /* Disable writeback. */
3150 ctxt->dst.type = OP_NONE;
3151 return X86EMUL_CONTINUE;
3152}
3153
3154static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3155{
3156 unsigned long val;
3157
3158 if (ctxt->mode == X86EMUL_MODE_PROT64)
3159 val = ctxt->src.val & ~0ULL;
3160 else
3161 val = ctxt->src.val & ~0U;
3162
3163 /* #UD condition is already handled. */
3164 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3165 return emulate_gp(ctxt, 0);
3166
3167 /* Disable writeback. */
3168 ctxt->dst.type = OP_NONE;
3169 return X86EMUL_CONTINUE;
3170}
3171
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003172static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3173{
3174 u64 msr_data;
3175
Avi Kivitydd856ef2012-08-27 23:46:17 +03003176 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3177 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3178 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003179 return emulate_gp(ctxt, 0);
3180
3181 return X86EMUL_CONTINUE;
3182}
3183
3184static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3185{
3186 u64 msr_data;
3187
Avi Kivitydd856ef2012-08-27 23:46:17 +03003188 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003189 return emulate_gp(ctxt, 0);
3190
Avi Kivitydd856ef2012-08-27 23:46:17 +03003191 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3192 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003193 return X86EMUL_CONTINUE;
3194}
3195
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003196static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3197{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003198 if (ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003199 return emulate_ud(ctxt);
3200
Avi Kivity9dac77f2011-06-01 15:34:25 +03003201 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
Nadav Amitb5bbf102014-11-02 11:54:46 +02003202 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3203 ctxt->dst.bytes = 2;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003204 return X86EMUL_CONTINUE;
3205}
3206
3207static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3208{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003209 u16 sel = ctxt->src.val;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003210
Avi Kivity9dac77f2011-06-01 15:34:25 +03003211 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003212 return emulate_ud(ctxt);
3213
Avi Kivity9dac77f2011-06-01 15:34:25 +03003214 if (ctxt->modrm_reg == VCPU_SREG_SS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003215 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3216
3217 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003218 ctxt->dst.type = OP_NONE;
3219 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003220}
3221
Avi Kivitya14e5792012-06-13 12:28:33 +03003222static int em_lldt(struct x86_emulate_ctxt *ctxt)
3223{
3224 u16 sel = ctxt->src.val;
3225
3226 /* Disable writeback. */
3227 ctxt->dst.type = OP_NONE;
3228 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3229}
3230
Avi Kivity80890002012-06-13 16:33:29 +03003231static int em_ltr(struct x86_emulate_ctxt *ctxt)
3232{
3233 u16 sel = ctxt->src.val;
3234
3235 /* Disable writeback. */
3236 ctxt->dst.type = OP_NONE;
3237 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3238}
3239
Avi Kivity38503912011-03-31 18:48:09 +02003240static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3241{
Avi Kivity9fa088f2011-03-31 18:54:30 +02003242 int rc;
3243 ulong linear;
3244
Avi Kivity9dac77f2011-06-01 15:34:25 +03003245 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02003246 if (rc == X86EMUL_CONTINUE)
Avi Kivity3cb16fe2011-04-20 15:38:44 +03003247 ctxt->ops->invlpg(ctxt, linear);
Avi Kivity38503912011-03-31 18:48:09 +02003248 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003249 ctxt->dst.type = OP_NONE;
Avi Kivity38503912011-03-31 18:48:09 +02003250 return X86EMUL_CONTINUE;
3251}
3252
Avi Kivity2d04a052011-04-20 15:32:49 +03003253static int em_clts(struct x86_emulate_ctxt *ctxt)
3254{
3255 ulong cr0;
3256
3257 cr0 = ctxt->ops->get_cr(ctxt, 0);
3258 cr0 &= ~X86_CR0_TS;
3259 ctxt->ops->set_cr(ctxt, 0, cr0);
3260 return X86EMUL_CONTINUE;
3261}
3262
Avi Kivity26d05cc2011-04-21 12:07:59 +03003263static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3264{
Nadav Amit0f54a322014-08-29 11:26:55 +03003265 int rc = ctxt->ops->fix_hypercall(ctxt);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003266
Avi Kivity26d05cc2011-04-21 12:07:59 +03003267 if (rc != X86EMUL_CONTINUE)
3268 return rc;
3269
3270 /* Let the processor re-execute the fixed hypercall */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003271 ctxt->_eip = ctxt->eip;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003272 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003273 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003274 return X86EMUL_CONTINUE;
3275}
3276
Avi Kivity96051572012-06-10 17:21:18 +03003277static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3278 void (*get)(struct x86_emulate_ctxt *ctxt,
3279 struct desc_ptr *ptr))
3280{
3281 struct desc_ptr desc_ptr;
3282
3283 if (ctxt->mode == X86EMUL_MODE_PROT64)
3284 ctxt->op_bytes = 8;
3285 get(ctxt, &desc_ptr);
3286 if (ctxt->op_bytes == 2) {
3287 ctxt->op_bytes = 4;
3288 desc_ptr.address &= 0x00ffffff;
3289 }
3290 /* Disable writeback. */
3291 ctxt->dst.type = OP_NONE;
3292 return segmented_write(ctxt, ctxt->dst.addr.mem,
3293 &desc_ptr, 2 + ctxt->op_bytes);
3294}
3295
3296static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3297{
3298 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3299}
3300
3301static int em_sidt(struct x86_emulate_ctxt *ctxt)
3302{
3303 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3304}
3305
Nadav Amit5b7f6a12014-11-02 11:54:55 +02003306static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003307{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003308 struct desc_ptr desc_ptr;
3309 int rc;
3310
Avi Kivity510425f2012-06-07 17:04:36 +03003311 if (ctxt->mode == X86EMUL_MODE_PROT64)
3312 ctxt->op_bytes = 8;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003313 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
Avi Kivity26d05cc2011-04-21 12:07:59 +03003314 &desc_ptr.size, &desc_ptr.address,
Avi Kivity9dac77f2011-06-01 15:34:25 +03003315 ctxt->op_bytes);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003316 if (rc != X86EMUL_CONTINUE)
3317 return rc;
Nadav Amit9a9abf62014-11-02 11:54:56 +02003318 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3319 is_noncanonical_address(desc_ptr.address))
3320 return emulate_gp(ctxt, 0);
Nadav Amit5b7f6a12014-11-02 11:54:55 +02003321 if (lgdt)
3322 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3323 else
3324 ctxt->ops->set_idt(ctxt, &desc_ptr);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003325 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003326 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003327 return X86EMUL_CONTINUE;
3328}
3329
Nadav Amit5b7f6a12014-11-02 11:54:55 +02003330static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3331{
3332 return em_lgdt_lidt(ctxt, true);
3333}
3334
Avi Kivity5ef39c72011-04-21 12:21:50 +03003335static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003336{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003337 int rc;
3338
Avi Kivity5ef39c72011-04-21 12:21:50 +03003339 rc = ctxt->ops->fix_hypercall(ctxt);
3340
Avi Kivity26d05cc2011-04-21 12:07:59 +03003341 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003342 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003343 return rc;
3344}
3345
3346static int em_lidt(struct x86_emulate_ctxt *ctxt)
3347{
Nadav Amit5b7f6a12014-11-02 11:54:55 +02003348 return em_lgdt_lidt(ctxt, false);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003349}
3350
3351static int em_smsw(struct x86_emulate_ctxt *ctxt)
3352{
Nadav Amit32e94d02014-06-02 18:34:11 +03003353 if (ctxt->dst.type == OP_MEM)
3354 ctxt->dst.bytes = 2;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003355 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003356 return X86EMUL_CONTINUE;
3357}
3358
3359static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3360{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003361 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
Avi Kivity9dac77f2011-06-01 15:34:25 +03003362 | (ctxt->src.val & 0x0f));
3363 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003364 return X86EMUL_CONTINUE;
3365}
3366
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003367static int em_loop(struct x86_emulate_ctxt *ctxt)
3368{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003369 int rc = X86EMUL_CONTINUE;
3370
Paolo Bonzini01485a22014-11-19 18:25:08 +01003371 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003372 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
Avi Kivity9dac77f2011-06-01 15:34:25 +03003373 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
Nadav Amit234f3ce2014-09-18 22:39:38 +03003374 rc = jmp_rel(ctxt, ctxt->src.val);
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003375
Nadav Amit234f3ce2014-09-18 22:39:38 +03003376 return rc;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003377}
3378
3379static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3380{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003381 int rc = X86EMUL_CONTINUE;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003382
Nadav Amit234f3ce2014-09-18 22:39:38 +03003383 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3384 rc = jmp_rel(ctxt, ctxt->src.val);
3385
3386 return rc;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003387}
3388
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003389static int em_in(struct x86_emulate_ctxt *ctxt)
3390{
3391 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3392 &ctxt->dst.val))
3393 return X86EMUL_IO_NEEDED;
3394
3395 return X86EMUL_CONTINUE;
3396}
3397
3398static int em_out(struct x86_emulate_ctxt *ctxt)
3399{
3400 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3401 &ctxt->src.val, 1);
3402 /* Disable writeback. */
3403 ctxt->dst.type = OP_NONE;
3404 return X86EMUL_CONTINUE;
3405}
3406
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09003407static int em_cli(struct x86_emulate_ctxt *ctxt)
3408{
3409 if (emulator_bad_iopl(ctxt))
3410 return emulate_gp(ctxt, 0);
3411
3412 ctxt->eflags &= ~X86_EFLAGS_IF;
3413 return X86EMUL_CONTINUE;
3414}
3415
3416static int em_sti(struct x86_emulate_ctxt *ctxt)
3417{
3418 if (emulator_bad_iopl(ctxt))
3419 return emulate_gp(ctxt, 0);
3420
3421 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3422 ctxt->eflags |= X86_EFLAGS_IF;
3423 return X86EMUL_CONTINUE;
3424}
3425
Avi Kivity6d6eede2012-06-07 14:11:36 +03003426static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3427{
3428 u32 eax, ebx, ecx, edx;
3429
Avi Kivitydd856ef2012-08-27 23:46:17 +03003430 eax = reg_read(ctxt, VCPU_REGS_RAX);
3431 ecx = reg_read(ctxt, VCPU_REGS_RCX);
Avi Kivity6d6eede2012-06-07 14:11:36 +03003432 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003433 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3434 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3435 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3436 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
Avi Kivity6d6eede2012-06-07 14:11:36 +03003437 return X86EMUL_CONTINUE;
3438}
3439
Paolo Bonzini98f73632013-10-31 11:19:42 +01003440static int em_sahf(struct x86_emulate_ctxt *ctxt)
3441{
3442 u32 flags;
3443
3444 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3445 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3446
3447 ctxt->eflags &= ~0xffUL;
3448 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3449 return X86EMUL_CONTINUE;
3450}
3451
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003452static int em_lahf(struct x86_emulate_ctxt *ctxt)
3453{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003454 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3455 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003456 return X86EMUL_CONTINUE;
3457}
3458
Avi Kivity92998362012-06-13 12:25:06 +03003459static int em_bswap(struct x86_emulate_ctxt *ctxt)
3460{
3461 switch (ctxt->op_bytes) {
3462#ifdef CONFIG_X86_64
3463 case 8:
3464 asm("bswap %0" : "+r"(ctxt->dst.val));
3465 break;
3466#endif
3467 default:
3468 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3469 break;
3470 }
3471 return X86EMUL_CONTINUE;
3472}
3473
Nadav Amit13e457e2014-10-13 13:04:13 +03003474static int em_clflush(struct x86_emulate_ctxt *ctxt)
3475{
3476 /* emulating clflush regardless of cpuid */
3477 return X86EMUL_CONTINUE;
3478}
3479
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003480static bool valid_cr(int nr)
3481{
3482 switch (nr) {
3483 case 0:
3484 case 2 ... 4:
3485 case 8:
3486 return true;
3487 default:
3488 return false;
3489 }
3490}
3491
3492static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3493{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003494 if (!valid_cr(ctxt->modrm_reg))
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003495 return emulate_ud(ctxt);
3496
3497 return X86EMUL_CONTINUE;
3498}
3499
3500static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3501{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003502 u64 new_val = ctxt->src.val64;
3503 int cr = ctxt->modrm_reg;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003504 u64 efer = 0;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003505
3506 static u64 cr_reserved_bits[] = {
3507 0xffffffff00000000ULL,
3508 0, 0, 0, /* CR3 checked later */
3509 CR4_RESERVED_BITS,
3510 0, 0, 0,
3511 CR8_RESERVED_BITS,
3512 };
3513
3514 if (!valid_cr(cr))
3515 return emulate_ud(ctxt);
3516
3517 if (new_val & cr_reserved_bits[cr])
3518 return emulate_gp(ctxt, 0);
3519
3520 switch (cr) {
3521 case 0: {
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003522 u64 cr4;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003523 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3524 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3525 return emulate_gp(ctxt, 0);
3526
Avi Kivity717746e2011-04-20 13:37:53 +03003527 cr4 = ctxt->ops->get_cr(ctxt, 4);
3528 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003529
3530 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3531 !(cr4 & X86_CR4_PAE))
3532 return emulate_gp(ctxt, 0);
3533
3534 break;
3535 }
3536 case 3: {
3537 u64 rsvd = 0;
3538
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003539 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3540 if (efer & EFER_LMA)
Nadav Amit9d88fca2014-11-02 11:54:52 +02003541 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003542
3543 if (new_val & rsvd)
3544 return emulate_gp(ctxt, 0);
3545
3546 break;
3547 }
3548 case 4: {
Avi Kivity717746e2011-04-20 13:37:53 +03003549 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003550
3551 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3552 return emulate_gp(ctxt, 0);
3553
3554 break;
3555 }
3556 }
3557
3558 return X86EMUL_CONTINUE;
3559}
3560
Joerg Roedel3b88e412011-04-04 12:39:29 +02003561static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3562{
3563 unsigned long dr7;
3564
Avi Kivity717746e2011-04-20 13:37:53 +03003565 ctxt->ops->get_dr(ctxt, 7, &dr7);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003566
3567 /* Check if DR7.Global_Enable is set */
3568 return dr7 & (1 << 13);
3569}
3570
3571static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3572{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003573 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003574 u64 cr4;
3575
3576 if (dr > 7)
3577 return emulate_ud(ctxt);
3578
Avi Kivity717746e2011-04-20 13:37:53 +03003579 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003580 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3581 return emulate_ud(ctxt);
3582
Nadav Amit6d2a0522014-11-02 11:54:43 +02003583 if (check_dr7_gd(ctxt)) {
3584 ulong dr6;
3585
3586 ctxt->ops->get_dr(ctxt, 6, &dr6);
3587 dr6 &= ~15;
3588 dr6 |= DR6_BD | DR6_RTM;
3589 ctxt->ops->set_dr(ctxt, 6, dr6);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003590 return emulate_db(ctxt);
Nadav Amit6d2a0522014-11-02 11:54:43 +02003591 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02003592
3593 return X86EMUL_CONTINUE;
3594}
3595
3596static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3597{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003598 u64 new_val = ctxt->src.val64;
3599 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003600
3601 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3602 return emulate_gp(ctxt, 0);
3603
3604 return check_dr_read(ctxt);
3605}
3606
Joerg Roedel01de8b02011-04-04 12:39:31 +02003607static int check_svme(struct x86_emulate_ctxt *ctxt)
3608{
3609 u64 efer;
3610
Avi Kivity717746e2011-04-20 13:37:53 +03003611 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003612
3613 if (!(efer & EFER_SVME))
3614 return emulate_ud(ctxt);
3615
3616 return X86EMUL_CONTINUE;
3617}
3618
3619static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3620{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003621 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003622
3623 /* Valid physical address? */
Randy Dunlapd4224442011-04-21 09:09:22 -07003624 if (rax & 0xffff000000000000ULL)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003625 return emulate_gp(ctxt, 0);
3626
3627 return check_svme(ctxt);
3628}
3629
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003630static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3631{
Avi Kivity717746e2011-04-20 13:37:53 +03003632 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003633
Avi Kivity717746e2011-04-20 13:37:53 +03003634 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003635 return emulate_ud(ctxt);
3636
3637 return X86EMUL_CONTINUE;
3638}
3639
Joerg Roedel80612522011-04-04 12:39:33 +02003640static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3641{
Avi Kivity717746e2011-04-20 13:37:53 +03003642 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003643 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
Joerg Roedel80612522011-04-04 12:39:33 +02003644
Avi Kivity717746e2011-04-20 13:37:53 +03003645 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
Nadav Amit67f4d422014-06-02 18:34:09 +03003646 ctxt->ops->check_pmc(ctxt, rcx))
Joerg Roedel80612522011-04-04 12:39:33 +02003647 return emulate_gp(ctxt, 0);
3648
3649 return X86EMUL_CONTINUE;
3650}
3651
Joerg Roedelf6511932011-04-04 12:39:35 +02003652static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3653{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003654 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3655 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003656 return emulate_gp(ctxt, 0);
3657
3658 return X86EMUL_CONTINUE;
3659}
3660
3661static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3662{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003663 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3664 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003665 return emulate_gp(ctxt, 0);
3666
3667 return X86EMUL_CONTINUE;
3668}
3669
Avi Kivity73fba5f2010-07-29 15:11:53 +03003670#define D(_y) { .flags = (_y) }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003671#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3672#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3673 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Gleb Natapov0b789ee2013-04-11 11:59:55 +03003674#define N D(NotImpl)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003675#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003676#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3677#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
Nadav Amit39f062f2014-11-26 15:47:18 +02003678#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
Gleb Natapov045a2822012-12-20 16:57:43 +02003679#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003680#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
Avi Kivitye28bbd42013-01-04 16:18:48 +02003681#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
Avi Kivityc4f035c2011-04-04 12:39:22 +02003682#define II(_f, _e, _i) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003683 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
Joerg Roedeld09beab2011-04-04 12:39:25 +02003684#define IIP(_f, _e, _i, _p) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003685 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3686 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Avi Kivityaa97bb42010-01-20 18:09:23 +02003687#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003688
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003689#define D2bv(_f) D((_f) | ByteOp), D(_f)
Joerg Roedelf6511932011-04-04 12:39:35 +02003690#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003691#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
Avi Kivityf7857f32013-01-04 16:18:53 +02003692#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003693#define I2bvIP(_f, _e, _i, _p) \
3694 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003695
Avi Kivityfb864fb2013-01-04 16:18:54 +02003696#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3697 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3698 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
Avi Kivity6230f7f2010-08-26 18:34:55 +03003699
Nadav Amit0f54a322014-08-29 11:26:55 +03003700static const struct opcode group7_rm0[] = {
3701 N,
3702 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3703 N, N, N, N, N, N,
3704};
3705
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003706static const struct opcode group7_rm1[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003707 DI(SrcNone | Priv, monitor),
3708 DI(SrcNone | Priv, mwait),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003709 N, N, N, N, N, N,
3710};
3711
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003712static const struct opcode group7_rm3[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003713 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
Borislav Petkovb51e9742013-09-22 16:44:52 +02003714 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003715 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3716 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3717 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3718 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3719 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3720 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003721};
Avi Kivity6230f7f2010-08-26 18:34:55 +03003722
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003723static const struct opcode group7_rm7[] = {
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003724 N,
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003725 DIP(SrcNone, rdtscp, check_rdtsc),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003726 N, N, N, N, N, N,
3727};
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09003728
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003729static const struct opcode group1[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003730 F(Lock, em_add),
3731 F(Lock | PageTable, em_or),
3732 F(Lock, em_adc),
3733 F(Lock, em_sbb),
3734 F(Lock | PageTable, em_and),
3735 F(Lock, em_sub),
3736 F(Lock, em_xor),
3737 F(NoWrite, em_cmp),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003738};
3739
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003740static const struct opcode group1A[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003741 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003742};
3743
Avi Kivity007a3b52013-01-19 19:51:51 +02003744static const struct opcode group2[] = {
3745 F(DstMem | ModRM, em_rol),
3746 F(DstMem | ModRM, em_ror),
3747 F(DstMem | ModRM, em_rcl),
3748 F(DstMem | ModRM, em_rcr),
3749 F(DstMem | ModRM, em_shl),
3750 F(DstMem | ModRM, em_shr),
3751 F(DstMem | ModRM, em_shl),
3752 F(DstMem | ModRM, em_sar),
3753};
3754
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003755static const struct opcode group3[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003756 F(DstMem | SrcImm | NoWrite, em_test),
3757 F(DstMem | SrcImm | NoWrite, em_test),
Avi Kivity45a14672013-01-04 16:18:52 +02003758 F(DstMem | SrcNone | Lock, em_not),
3759 F(DstMem | SrcNone | Lock, em_neg),
Avi Kivityb9fa4092013-02-09 11:31:48 +02003760 F(DstXacc | Src2Mem, em_mul_ex),
3761 F(DstXacc | Src2Mem, em_imul_ex),
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02003762 F(DstXacc | Src2Mem, em_div_ex),
3763 F(DstXacc | Src2Mem, em_idiv_ex),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003764};
3765
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003766static const struct opcode group4[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003767 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3768 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003769 N, N, N, N, N, N,
3770};
3771
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003772static const struct opcode group5[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003773 F(DstMem | SrcNone | Lock, em_inc),
3774 F(DstMem | SrcNone | Lock, em_dec),
Nadav Amit58b70752014-10-24 11:35:09 +03003775 I(SrcMem | NearBranch, em_call_near_abs),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003776 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
Nadav Amit58b70752014-10-24 11:35:09 +03003777 I(SrcMem | NearBranch, em_jmp_abs),
Nadav Amitf7784042014-09-18 22:39:41 +03003778 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3779 I(SrcMem | Stack, em_push), D(Undefined),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003780};
3781
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003782static const struct opcode group6[] = {
Nadav Amit63ea0a42015-01-08 11:59:03 +01003783 DI(Prot | DstMem, sldt),
3784 DI(Prot | DstMem, str),
Avi Kivitya14e5792012-06-13 12:28:33 +03003785 II(Prot | Priv | SrcMem16, em_lldt, lldt),
Avi Kivity80890002012-06-13 16:33:29 +03003786 II(Prot | Priv | SrcMem16, em_ltr, ltr),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02003787 N, N, N, N,
3788};
3789
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003790static const struct group_dual group7 = { {
Nadav Amit606b1c32014-06-02 18:34:06 +03003791 II(Mov | DstMem, em_sgdt, sgdt),
3792 II(Mov | DstMem, em_sidt, sidt),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003793 II(SrcMem | Priv, em_lgdt, lgdt),
3794 II(SrcMem | Priv, em_lidt, lidt),
3795 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3796 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3797 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003798}, {
Nadav Amit0f54a322014-08-29 11:26:55 +03003799 EXT(0, group7_rm0),
Avi Kivity5ef39c72011-04-21 12:21:50 +03003800 EXT(0, group7_rm1),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003801 N, EXT(0, group7_rm3),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003802 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3803 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3804 EXT(0, group7_rm7),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003805} };
3806
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003807static const struct opcode group8[] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003808 N, N, N, N,
Avi Kivity11c363b2013-01-19 19:51:54 +02003809 F(DstMem | SrcImmByte | NoWrite, em_bt),
3810 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3811 F(DstMem | SrcImmByte | Lock, em_btr),
3812 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003813};
3814
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003815static const struct group_dual group9 = { {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003816 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003817}, {
3818 N, N, N, N, N, N, N, N,
3819} };
3820
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003821static const struct opcode group11[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003822 I(DstMem | SrcImm | Mov | PageTable, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003823 X7(D(Undefined)),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03003824};
3825
Nadav Amit13e457e2014-10-13 13:04:13 +03003826static const struct gprefix pfx_0f_ae_7 = {
Nadav Amit3f6f1482014-10-13 13:04:14 +03003827 I(SrcMem | ByteOp, em_clflush), N, N, N,
Nadav Amit13e457e2014-10-13 13:04:13 +03003828};
3829
3830static const struct group_dual group15 = { {
3831 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3832}, {
3833 N, N, N, N, N, N, N, N,
3834} };
3835
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003836static const struct gprefix pfx_0f_6f_0f_7f = {
Avi Kivitye5971752012-04-09 18:40:03 +03003837 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
Avi Kivityaa97bb42010-01-20 18:09:23 +02003838};
3839
Nadav Amit39f062f2014-11-26 15:47:18 +02003840static const struct instr_dual instr_dual_0f_2b = {
3841 I(0, em_mov), N
3842};
3843
Paolo Bonzinid5b77062014-07-14 12:54:48 +02003844static const struct gprefix pfx_0f_2b = {
Nadav Amit39f062f2014-11-26 15:47:18 +02003845 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
Avi Kivity3e114eb2012-04-09 18:40:01 +03003846};
3847
Igor Mammedov27ce8252014-03-15 21:01:59 +01003848static const struct gprefix pfx_0f_28_0f_29 = {
Igor Mammedov6fec27d2014-03-15 21:02:00 +01003849 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01003850};
3851
Alex Williamson0a370272014-07-11 11:56:31 -06003852static const struct gprefix pfx_0f_e7 = {
3853 N, I(Sse, em_mov), N, N,
3854};
3855
Gleb Natapov045a2822012-12-20 16:57:43 +02003856static const struct escape escape_d9 = { {
3857 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3858}, {
3859 /* 0xC0 - 0xC7 */
3860 N, N, N, N, N, N, N, N,
3861 /* 0xC8 - 0xCF */
3862 N, N, N, N, N, N, N, N,
3863 /* 0xD0 - 0xC7 */
3864 N, N, N, N, N, N, N, N,
3865 /* 0xD8 - 0xDF */
3866 N, N, N, N, N, N, N, N,
3867 /* 0xE0 - 0xE7 */
3868 N, N, N, N, N, N, N, N,
3869 /* 0xE8 - 0xEF */
3870 N, N, N, N, N, N, N, N,
3871 /* 0xF0 - 0xF7 */
3872 N, N, N, N, N, N, N, N,
3873 /* 0xF8 - 0xFF */
3874 N, N, N, N, N, N, N, N,
3875} };
3876
3877static const struct escape escape_db = { {
3878 N, N, N, N, N, N, N, N,
3879}, {
3880 /* 0xC0 - 0xC7 */
3881 N, N, N, N, N, N, N, N,
3882 /* 0xC8 - 0xCF */
3883 N, N, N, N, N, N, N, N,
3884 /* 0xD0 - 0xC7 */
3885 N, N, N, N, N, N, N, N,
3886 /* 0xD8 - 0xDF */
3887 N, N, N, N, N, N, N, N,
3888 /* 0xE0 - 0xE7 */
3889 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3890 /* 0xE8 - 0xEF */
3891 N, N, N, N, N, N, N, N,
3892 /* 0xF0 - 0xF7 */
3893 N, N, N, N, N, N, N, N,
3894 /* 0xF8 - 0xFF */
3895 N, N, N, N, N, N, N, N,
3896} };
3897
3898static const struct escape escape_dd = { {
3899 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3900}, {
3901 /* 0xC0 - 0xC7 */
3902 N, N, N, N, N, N, N, N,
3903 /* 0xC8 - 0xCF */
3904 N, N, N, N, N, N, N, N,
3905 /* 0xD0 - 0xC7 */
3906 N, N, N, N, N, N, N, N,
3907 /* 0xD8 - 0xDF */
3908 N, N, N, N, N, N, N, N,
3909 /* 0xE0 - 0xE7 */
3910 N, N, N, N, N, N, N, N,
3911 /* 0xE8 - 0xEF */
3912 N, N, N, N, N, N, N, N,
3913 /* 0xF0 - 0xF7 */
3914 N, N, N, N, N, N, N, N,
3915 /* 0xF8 - 0xFF */
3916 N, N, N, N, N, N, N, N,
3917} };
3918
Nadav Amit39f062f2014-11-26 15:47:18 +02003919static const struct instr_dual instr_dual_0f_c3 = {
3920 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
3921};
3922
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003923static const struct opcode opcode_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003924 /* 0x00 - 0x07 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003925 F6ALU(Lock, em_add),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003926 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3927 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003928 /* 0x08 - 0x0F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003929 F6ALU(Lock | PageTable, em_or),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003930 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3931 N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003932 /* 0x10 - 0x17 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003933 F6ALU(Lock, em_adc),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003934 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3935 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003936 /* 0x18 - 0x1F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003937 F6ALU(Lock, em_sbb),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003938 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3939 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003940 /* 0x20 - 0x27 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003941 F6ALU(Lock | PageTable, em_and), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003942 /* 0x28 - 0x2F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003943 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003944 /* 0x30 - 0x37 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003945 F6ALU(Lock, em_xor), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003946 /* 0x38 - 0x3F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003947 F6ALU(NoWrite, em_cmp), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003948 /* 0x40 - 0x4F */
Avi Kivity95413dc2013-01-19 19:51:53 +02003949 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003950 /* 0x50 - 0x57 */
Avi Kivity63540382010-07-29 15:11:55 +03003951 X8(I(SrcReg | Stack, em_push)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003952 /* 0x58 - 0x5F */
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09003953 X8(I(DstReg | Stack, em_pop)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003954 /* 0x60 - 0x67 */
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09003955 I(ImplicitOps | Stack | No64, em_pusha),
3956 I(ImplicitOps | Stack | No64, em_popa),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003957 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3958 N, N, N, N,
3959 /* 0x68 - 0x6F */
Avi Kivityd46164d2010-08-18 19:29:33 +03003960 I(SrcImm | Mov | Stack, em_push),
3961 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003962 I(SrcImmByte | Mov | Stack, em_push),
3963 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
Gleb Natapovb3356bf2012-09-03 15:24:29 +03003964 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
Takuya Yoshikawa2b5e97e2011-11-23 12:27:39 +09003965 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
Avi Kivity73fba5f2010-07-29 15:11:53 +03003966 /* 0x70 - 0x7F */
Nadav Amit58b70752014-10-24 11:35:09 +03003967 X16(D(SrcImmByte | NearBranch)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003968 /* 0x80 - 0x87 */
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003969 G(ByteOp | DstMem | SrcImm, group1),
3970 G(DstMem | SrcImm, group1),
3971 G(ByteOp | DstMem | SrcImm | No64, group1),
3972 G(DstMem | SrcImmByte, group1),
Avi Kivityfb864fb2013-01-04 16:18:54 +02003973 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003974 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003975 /* 0x88 - 0x8F */
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003976 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003977 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003978 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003979 D(ModRM | SrcMem | NoAccess | DstReg),
3980 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3981 G(0, group1A),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003982 /* 0x90 - 0x97 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02003983 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003984 /* 0x98 - 0x9F */
Avi Kivity61429142010-08-19 15:13:00 +03003985 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
Wei Yongjuncc4feed2010-08-25 14:10:53 +08003986 I(SrcImmFAddr | No64, em_call_far), N,
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09003987 II(ImplicitOps | Stack, em_pushf, pushf),
Paolo Bonzini98f73632013-10-31 11:19:42 +01003988 II(ImplicitOps | Stack, em_popf, popf),
3989 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003990 /* 0xA0 - 0xA7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003991 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003992 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003993 I2bv(SrcSI | DstDI | Mov | String, em_mov),
Nadav Amit5aca3722014-11-02 11:54:50 +02003994 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003995 /* 0xA8 - 0xAF */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003996 F2bv(DstAcc | SrcImm | NoWrite, em_test),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003997 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3998 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
Nadav Amit5aca3722014-11-02 11:54:50 +02003999 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004000 /* 0xB0 - 0xB7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004001 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004002 /* 0xB8 - 0xBF */
Nadav Amit5e2c6882012-12-06 21:55:10 -02004003 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004004 /* 0xC0 - 0xC7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02004005 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
Nadav Amit58b70752014-10-24 11:35:09 +03004006 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4007 I(ImplicitOps | NearBranch, em_ret),
Avi Kivityd4b43252011-09-13 10:45:50 +03004008 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4009 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03004010 G(ByteOp, group11), G(0, group11),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004011 /* 0xC8 - 0xCF */
Avi Kivity612e89f2012-06-12 20:03:23 +03004012 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
Bruce Rogers32611072013-09-09 09:40:20 -06004013 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4014 I(ImplicitOps | Stack, em_ret_far),
Avi Kivity3c6e2762011-04-04 12:39:23 +02004015 D(ImplicitOps), DI(SrcImmByte, intn),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004016 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004017 /* 0xD0 - 0xD7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02004018 G(Src2One | ByteOp, group2), G(Src2One, group2),
4019 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02004020 I(DstAcc | SrcImmUByte | No64, em_aam),
Paolo Bonzini326f5782013-05-09 11:32:51 +02004021 I(DstAcc | SrcImmUByte | No64, em_aad),
4022 F(DstAcc | ByteOp | No64, em_salc),
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004023 I(DstAcc | SrcXLat | ByteOp, em_mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004024 /* 0xD8 - 0xDF */
Gleb Natapov045a2822012-12-20 16:57:43 +02004025 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004026 /* 0xE0 - 0xE7 */
Nadav Amit58b70752014-10-24 11:35:09 +03004027 X3(I(SrcImmByte | NearBranch, em_loop)),
4028 I(SrcImmByte | NearBranch, em_jcxz),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004029 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4030 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004031 /* 0xE8 - 0xEF */
Nadav Amit58b70752014-10-24 11:35:09 +03004032 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4033 I(SrcImmFAddr | No64, em_jmp_far),
4034 D(SrcImmByte | ImplicitOps | NearBranch),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004035 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4036 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004037 /* 0xF0 - 0xF7 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02004038 N, DI(ImplicitOps, icebp), N, N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02004039 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4040 G(ByteOp, group3), G(0, group3),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004041 /* 0xF8 - 0xFF */
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09004042 D(ImplicitOps), D(ImplicitOps),
4043 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004044 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4045};
4046
Mathias Krausefd0a0d82012-08-30 01:30:15 +02004047static const struct opcode twobyte_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03004048 /* 0x00 - 0x0F */
Joerg Roedeldee6bb72011-04-04 12:39:30 +02004049 G(0, group6), GD(0, &group7), N, N,
Borislav Petkovb51e9742013-09-22 16:44:52 +02004050 N, I(ImplicitOps | EmulateOnUD, em_syscall),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004051 II(ImplicitOps | Priv, em_clts, clts), N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02004052 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
Nadav Amit3f6f1482014-10-13 13:04:14 +03004053 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004054 /* 0x10 - 0x1F */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02004055 N, N, N, N, N, N, N, N,
Nadav Amit3f6f1482014-10-13 13:04:14 +03004056 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4057 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004058 /* 0x20 - 0x2F */
Nadav Amit9b88ae92014-05-25 23:05:21 +03004059 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4060 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4061 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4062 check_cr_write),
4063 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4064 check_dr_write),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004065 N, N, N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01004066 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4067 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
Paolo Bonzinid5b77062014-07-14 12:54:48 +02004068 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
Avi Kivity3e114eb2012-04-09 18:40:01 +03004069 N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004070 /* 0x30 - 0x3F */
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09004071 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
Joerg Roedel80612522011-04-04 12:39:33 +02004072 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09004073 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
Avi Kivity222d21a2011-11-10 14:57:30 +02004074 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
Borislav Petkovb51e9742013-09-22 16:44:52 +02004075 I(ImplicitOps | EmulateOnUD, em_sysenter),
4076 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
Avi Kivityd8671622011-02-01 16:32:03 +02004077 N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004078 N, N, N, N, N, N, N, N,
4079 /* 0x40 - 0x4F */
Nadav Amit140bad82014-06-15 16:13:00 +03004080 X16(D(DstReg | SrcMem | ModRM)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004081 /* 0x50 - 0x5F */
4082 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4083 /* 0x60 - 0x6F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02004084 N, N, N, N,
4085 N, N, N, N,
4086 N, N, N, N,
4087 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004088 /* 0x70 - 0x7F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02004089 N, N, N, N,
4090 N, N, N, N,
4091 N, N, N, N,
4092 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004093 /* 0x80 - 0x8F */
Nadav Amit58b70752014-10-24 11:35:09 +03004094 X16(D(SrcImm | NearBranch)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004095 /* 0x90 - 0x9F */
Wei Yongjunee45b582010-08-06 17:10:07 +08004096 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004097 /* 0xA0 - 0xA7 */
Avi Kivity1cd196e2011-09-13 10:45:51 +03004098 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
Avi Kivity11c363b2013-01-19 19:51:54 +02004099 II(ImplicitOps, em_cpuid, cpuid),
4100 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
Avi Kivity0bdea062013-01-19 19:51:50 +02004101 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4102 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004103 /* 0xA8 - 0xAF */
Avi Kivity1cd196e2011-09-13 10:45:51 +03004104 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004105 DI(ImplicitOps, rsm),
Avi Kivity11c363b2013-01-19 19:51:54 +02004106 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
Avi Kivity0bdea062013-01-19 19:51:50 +02004107 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4108 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
Nadav Amit13e457e2014-10-13 13:04:13 +03004109 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004110 /* 0xB0 - 0xB7 */
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09004111 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
Avi Kivityd4b43252011-09-13 10:45:50 +03004112 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
Avi Kivity11c363b2013-01-19 19:51:54 +02004113 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
Avi Kivityd4b43252011-09-13 10:45:50 +03004114 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4115 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004116 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004117 /* 0xB8 - 0xBF */
4118 N, N,
Takuya Yoshikawace7faab2011-11-22 15:17:48 +09004119 G(BitOp, group8),
Avi Kivity11c363b2013-01-19 19:51:54 +02004120 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4121 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004122 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity92998362012-06-13 12:25:06 +03004123 /* 0xC0 - 0xC7 */
Avi Kivitye47a5f52013-02-09 11:31:51 +02004124 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
Nadav Amit39f062f2014-11-26 15:47:18 +02004125 N, ID(0, &instr_dual_0f_c3),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004126 N, N, N, GD(0, &group9),
Avi Kivity92998362012-06-13 12:25:06 +03004127 /* 0xC8 - 0xCF */
4128 X8(I(DstReg, em_bswap)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004129 /* 0xD0 - 0xDF */
4130 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4131 /* 0xE0 - 0xEF */
Alex Williamson0a370272014-07-11 11:56:31 -06004132 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4133 N, N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004134 /* 0xF0 - 0xFF */
4135 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4136};
4137
Nadav Amit39f062f2014-11-26 15:47:18 +02004138static const struct instr_dual instr_dual_0f_38_f0 = {
4139 I(DstReg | SrcMem | Mov, em_movbe), N
4140};
4141
4142static const struct instr_dual instr_dual_0f_38_f1 = {
4143 I(DstMem | SrcReg | Mov, em_movbe), N
4144};
4145
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004146static const struct gprefix three_byte_0f_38_f0 = {
Nadav Amit39f062f2014-11-26 15:47:18 +02004147 ID(0, &instr_dual_0f_38_f0), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004148};
4149
4150static const struct gprefix three_byte_0f_38_f1 = {
Nadav Amit39f062f2014-11-26 15:47:18 +02004151 ID(0, &instr_dual_0f_38_f1), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004152};
4153
4154/*
4155 * Insns below are selected by the prefix which indexed by the third opcode
4156 * byte.
4157 */
4158static const struct opcode opcode_map_0f_38[256] = {
4159 /* 0x00 - 0x7f */
4160 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
Borislav Petkov84cffe42013-10-29 12:54:56 +01004161 /* 0x80 - 0xef */
4162 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4163 /* 0xf0 - 0xf1 */
Nadav Amit53bb4f72014-12-07 11:49:42 +02004164 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4165 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
Borislav Petkov84cffe42013-10-29 12:54:56 +01004166 /* 0xf2 - 0xff */
4167 N, N, X4(N), X8(N)
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004168};
4169
Avi Kivity73fba5f2010-07-29 15:11:53 +03004170#undef D
4171#undef N
4172#undef G
4173#undef GD
4174#undef I
Avi Kivityaa97bb42010-01-20 18:09:23 +02004175#undef GP
Joerg Roedel01de8b02011-04-04 12:39:31 +02004176#undef EXT
Avi Kivity73fba5f2010-07-29 15:11:53 +03004177
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004178#undef D2bv
Joerg Roedelf6511932011-04-04 12:39:35 +02004179#undef D2bvIP
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004180#undef I2bv
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004181#undef I2bvIP
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09004182#undef I6ALU
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004183
Avi Kivity9dac77f2011-06-01 15:34:25 +03004184static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
Avi Kivity39f21ee2010-08-18 19:20:21 +03004185{
4186 unsigned size;
4187
Avi Kivity9dac77f2011-06-01 15:34:25 +03004188 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004189 if (size == 8)
4190 size = 4;
4191 return size;
4192}
4193
4194static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4195 unsigned size, bool sign_extension)
4196{
Avi Kivity39f21ee2010-08-18 19:20:21 +03004197 int rc = X86EMUL_CONTINUE;
4198
4199 op->type = OP_IMM;
4200 op->bytes = size;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004201 op->addr.mem.ea = ctxt->_eip;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004202 /* NB. Immediates are sign-extended as necessary. */
4203 switch (op->bytes) {
4204 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004205 op->val = insn_fetch(s8, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004206 break;
4207 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004208 op->val = insn_fetch(s16, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004209 break;
4210 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004211 op->val = insn_fetch(s32, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004212 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004213 case 8:
4214 op->val = insn_fetch(s64, ctxt);
4215 break;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004216 }
4217 if (!sign_extension) {
4218 switch (op->bytes) {
4219 case 1:
4220 op->val &= 0xff;
4221 break;
4222 case 2:
4223 op->val &= 0xffff;
4224 break;
4225 case 4:
4226 op->val &= 0xffffffff;
4227 break;
4228 }
4229 }
4230done:
4231 return rc;
4232}
4233
Avi Kivitya9945542011-09-13 10:45:41 +03004234static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4235 unsigned d)
4236{
4237 int rc = X86EMUL_CONTINUE;
4238
4239 switch (d) {
4240 case OpReg:
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004241 decode_register_operand(ctxt, op);
Avi Kivitya9945542011-09-13 10:45:41 +03004242 break;
4243 case OpImmUByte:
Avi Kivity608aabe2011-09-13 10:45:45 +03004244 rc = decode_imm(ctxt, op, 1, false);
Avi Kivitya9945542011-09-13 10:45:41 +03004245 break;
4246 case OpMem:
Avi Kivity41ddf972011-09-13 10:45:48 +03004247 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity0fe59122011-09-13 10:45:47 +03004248 mem_common:
Avi Kivitya9945542011-09-13 10:45:41 +03004249 *op = ctxt->memop;
4250 ctxt->memopp = op;
Paolo Bonzini96888972014-04-01 14:54:19 +02004251 if (ctxt->d & BitOp)
Avi Kivitya9945542011-09-13 10:45:41 +03004252 fetch_bit_operand(ctxt);
4253 op->orig_val = op->val;
4254 break;
Avi Kivity41ddf972011-09-13 10:45:48 +03004255 case OpMem64:
Nadav Amitaaa05f22014-06-02 18:34:10 +03004256 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
Avi Kivity41ddf972011-09-13 10:45:48 +03004257 goto mem_common;
Avi Kivitya9945542011-09-13 10:45:41 +03004258 case OpAcc:
4259 op->type = OP_REG;
4260 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004261 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
Avi Kivitya9945542011-09-13 10:45:41 +03004262 fetch_register_operand(op);
4263 op->orig_val = op->val;
4264 break;
Avi Kivity820207c2013-02-09 11:31:45 +02004265 case OpAccLo:
4266 op->type = OP_REG;
4267 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4268 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4269 fetch_register_operand(op);
4270 op->orig_val = op->val;
4271 break;
4272 case OpAccHi:
4273 if (ctxt->d & ByteOp) {
4274 op->type = OP_NONE;
4275 break;
4276 }
4277 op->type = OP_REG;
4278 op->bytes = ctxt->op_bytes;
4279 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4280 fetch_register_operand(op);
4281 op->orig_val = op->val;
4282 break;
Avi Kivitya9945542011-09-13 10:45:41 +03004283 case OpDI:
4284 op->type = OP_MEM;
4285 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4286 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004287 register_address(ctxt, VCPU_REGS_RDI);
Avi Kivitya9945542011-09-13 10:45:41 +03004288 op->addr.mem.seg = VCPU_SREG_ES;
4289 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004290 op->count = 1;
Avi Kivitya9945542011-09-13 10:45:41 +03004291 break;
4292 case OpDX:
4293 op->type = OP_REG;
4294 op->bytes = 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004295 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivitya9945542011-09-13 10:45:41 +03004296 fetch_register_operand(op);
4297 break;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004298 case OpCL:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004299 op->type = OP_IMM;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004300 op->bytes = 1;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004301 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004302 break;
4303 case OpImmByte:
4304 rc = decode_imm(ctxt, op, 1, true);
4305 break;
4306 case OpOne:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004307 op->type = OP_IMM;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004308 op->bytes = 1;
4309 op->val = 1;
4310 break;
4311 case OpImm:
4312 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4313 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004314 case OpImm64:
4315 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4316 break;
Avi Kivity28867ce2012-01-16 15:08:44 +02004317 case OpMem8:
4318 ctxt->memop.bytes = 1;
Gleb Natapov660696d2013-04-24 13:38:36 +03004319 if (ctxt->memop.type == OP_REG) {
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02004320 ctxt->memop.addr.reg = decode_register(ctxt,
4321 ctxt->modrm_rm, true);
Gleb Natapov660696d2013-04-24 13:38:36 +03004322 fetch_register_operand(&ctxt->memop);
4323 }
Avi Kivity28867ce2012-01-16 15:08:44 +02004324 goto mem_common;
Avi Kivity0fe59122011-09-13 10:45:47 +03004325 case OpMem16:
4326 ctxt->memop.bytes = 2;
4327 goto mem_common;
4328 case OpMem32:
4329 ctxt->memop.bytes = 4;
4330 goto mem_common;
4331 case OpImmU16:
4332 rc = decode_imm(ctxt, op, 2, false);
4333 break;
4334 case OpImmU:
4335 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4336 break;
4337 case OpSI:
4338 op->type = OP_MEM;
4339 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4340 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004341 register_address(ctxt, VCPU_REGS_RSI);
Bandan Das573e80f2014-04-16 12:46:13 -04004342 op->addr.mem.seg = ctxt->seg_override;
Avi Kivity0fe59122011-09-13 10:45:47 +03004343 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004344 op->count = 1;
Avi Kivity0fe59122011-09-13 10:45:47 +03004345 break;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004346 case OpXLat:
4347 op->type = OP_MEM;
4348 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4349 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004350 address_mask(ctxt,
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004351 reg_read(ctxt, VCPU_REGS_RBX) +
4352 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
Bandan Das573e80f2014-04-16 12:46:13 -04004353 op->addr.mem.seg = ctxt->seg_override;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004354 op->val = 0;
4355 break;
Avi Kivity0fe59122011-09-13 10:45:47 +03004356 case OpImmFAddr:
4357 op->type = OP_IMM;
4358 op->addr.mem.ea = ctxt->_eip;
4359 op->bytes = ctxt->op_bytes + 2;
4360 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4361 break;
4362 case OpMemFAddr:
4363 ctxt->memop.bytes = ctxt->op_bytes + 2;
4364 goto mem_common;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004365 case OpES:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004366 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004367 op->val = VCPU_SREG_ES;
4368 break;
4369 case OpCS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004370 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004371 op->val = VCPU_SREG_CS;
4372 break;
4373 case OpSS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004374 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004375 op->val = VCPU_SREG_SS;
4376 break;
4377 case OpDS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004378 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004379 op->val = VCPU_SREG_DS;
4380 break;
4381 case OpFS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004382 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004383 op->val = VCPU_SREG_FS;
4384 break;
4385 case OpGS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004386 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004387 op->val = VCPU_SREG_GS;
4388 break;
Avi Kivitya9945542011-09-13 10:45:41 +03004389 case OpImplicit:
4390 /* Special instructions do their own operand decoding. */
4391 default:
4392 op->type = OP_NONE; /* Disable writeback. */
4393 break;
4394 }
4395
4396done:
4397 return rc;
4398}
4399
Takuya Yoshikawaef5d75c2011-05-15 00:57:43 +09004400int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004401{
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004402 int rc = X86EMUL_CONTINUE;
4403 int mode = ctxt->mode;
Avi Kivity46561642011-04-24 14:09:59 +03004404 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004405 bool op_prefix = false;
Bandan Das573e80f2014-04-16 12:46:13 -04004406 bool has_seg_override = false;
Avi Kivity46561642011-04-24 14:09:59 +03004407 struct opcode opcode;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004408
Avi Kivityf09ed832011-09-13 10:45:40 +03004409 ctxt->memop.type = OP_NONE;
4410 ctxt->memopp = NULL;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004411 ctxt->_eip = ctxt->eip;
Paolo Bonzini17052f12014-05-06 16:33:01 +02004412 ctxt->fetch.ptr = ctxt->fetch.data;
4413 ctxt->fetch.end = ctxt->fetch.data + insn_len;
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004414 ctxt->opcode_len = 1;
Andre Przywaradc25e892010-12-21 11:12:07 +01004415 if (insn_len > 0)
Avi Kivity9dac77f2011-06-01 15:34:25 +03004416 memcpy(ctxt->fetch.data, insn, insn_len);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004417 else {
Paolo Bonzini9506d572014-05-06 13:05:25 +02004418 rc = __do_insn_fetch_bytes(ctxt, 1);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004419 if (rc != X86EMUL_CONTINUE)
4420 return rc;
4421 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004422
4423 switch (mode) {
4424 case X86EMUL_MODE_REAL:
4425 case X86EMUL_MODE_VM86:
4426 case X86EMUL_MODE_PROT16:
4427 def_op_bytes = def_ad_bytes = 2;
4428 break;
4429 case X86EMUL_MODE_PROT32:
4430 def_op_bytes = def_ad_bytes = 4;
4431 break;
4432#ifdef CONFIG_X86_64
4433 case X86EMUL_MODE_PROT64:
4434 def_op_bytes = 4;
4435 def_ad_bytes = 8;
4436 break;
4437#endif
4438 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004439 return EMULATION_FAILED;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004440 }
4441
Avi Kivity9dac77f2011-06-01 15:34:25 +03004442 ctxt->op_bytes = def_op_bytes;
4443 ctxt->ad_bytes = def_ad_bytes;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004444
4445 /* Legacy prefixes. */
4446 for (;;) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004447 switch (ctxt->b = insn_fetch(u8, ctxt)) {
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004448 case 0x66: /* operand-size override */
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004449 op_prefix = true;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004450 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004451 ctxt->op_bytes = def_op_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004452 break;
4453 case 0x67: /* address-size override */
4454 if (mode == X86EMUL_MODE_PROT64)
4455 /* switch between 4/8 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004456 ctxt->ad_bytes = def_ad_bytes ^ 12;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004457 else
4458 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004459 ctxt->ad_bytes = def_ad_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004460 break;
4461 case 0x26: /* ES override */
4462 case 0x2e: /* CS override */
4463 case 0x36: /* SS override */
4464 case 0x3e: /* DS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004465 has_seg_override = true;
4466 ctxt->seg_override = (ctxt->b >> 3) & 3;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004467 break;
4468 case 0x64: /* FS override */
4469 case 0x65: /* GS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004470 has_seg_override = true;
4471 ctxt->seg_override = ctxt->b & 7;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004472 break;
4473 case 0x40 ... 0x4f: /* REX */
4474 if (mode != X86EMUL_MODE_PROT64)
4475 goto done_prefixes;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004476 ctxt->rex_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004477 continue;
4478 case 0xf0: /* LOCK */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004479 ctxt->lock_prefix = 1;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004480 break;
4481 case 0xf2: /* REPNE/REPNZ */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004482 case 0xf3: /* REP/REPE/REPZ */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004483 ctxt->rep_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004484 break;
4485 default:
4486 goto done_prefixes;
4487 }
4488
4489 /* Any legacy prefix after a REX prefix nullifies its effect. */
4490
Avi Kivity9dac77f2011-06-01 15:34:25 +03004491 ctxt->rex_prefix = 0;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004492 }
4493
4494done_prefixes:
4495
4496 /* REX prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004497 if (ctxt->rex_prefix & 8)
4498 ctxt->op_bytes = 8; /* REX.W */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004499
4500 /* Opcode byte(s). */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004501 opcode = opcode_table[ctxt->b];
Wei Yongjund3ad6242010-08-05 16:34:39 +08004502 /* Two-byte opcode? */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004503 if (ctxt->b == 0x0f) {
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004504 ctxt->opcode_len = 2;
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004505 ctxt->b = insn_fetch(u8, ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03004506 opcode = twobyte_table[ctxt->b];
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004507
4508 /* 0F_38 opcode map */
4509 if (ctxt->b == 0x38) {
4510 ctxt->opcode_len = 3;
4511 ctxt->b = insn_fetch(u8, ctxt);
4512 opcode = opcode_map_0f_38[ctxt->b];
4513 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004514 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004515 ctxt->d = opcode.flags;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004516
Takuya Yoshikawa9f4260e2012-04-30 17:48:25 +09004517 if (ctxt->d & ModRM)
4518 ctxt->modrm = insn_fetch(u8, ctxt);
4519
Nadav Amit7fe864d2014-06-02 18:34:03 +03004520 /* vex-prefix instructions are not implemented */
4521 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
Nadav Amitd14cb5d2014-11-02 11:54:58 +02004522 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
Nadav Amit7fe864d2014-06-02 18:34:03 +03004523 ctxt->d = NotImpl;
4524 }
4525
Avi Kivity9dac77f2011-06-01 15:34:25 +03004526 while (ctxt->d & GroupMask) {
4527 switch (ctxt->d & GroupMask) {
Avi Kivity46561642011-04-24 14:09:59 +03004528 case Group:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004529 goffset = (ctxt->modrm >> 3) & 7;
Avi Kivity46561642011-04-24 14:09:59 +03004530 opcode = opcode.u.group[goffset];
4531 break;
4532 case GroupDual:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004533 goffset = (ctxt->modrm >> 3) & 7;
4534 if ((ctxt->modrm >> 6) == 3)
Avi Kivity46561642011-04-24 14:09:59 +03004535 opcode = opcode.u.gdual->mod3[goffset];
4536 else
4537 opcode = opcode.u.gdual->mod012[goffset];
4538 break;
4539 case RMExt:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004540 goffset = ctxt->modrm & 7;
Joerg Roedel01de8b02011-04-04 12:39:31 +02004541 opcode = opcode.u.group[goffset];
Avi Kivity46561642011-04-24 14:09:59 +03004542 break;
4543 case Prefix:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004544 if (ctxt->rep_prefix && op_prefix)
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004545 return EMULATION_FAILED;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004546 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
Avi Kivity46561642011-04-24 14:09:59 +03004547 switch (simd_prefix) {
4548 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4549 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4550 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4551 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4552 }
4553 break;
Gleb Natapov045a2822012-12-20 16:57:43 +02004554 case Escape:
4555 if (ctxt->modrm > 0xbf)
4556 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4557 else
4558 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4559 break;
Nadav Amit39f062f2014-11-26 15:47:18 +02004560 case InstrDual:
4561 if ((ctxt->modrm >> 6) == 3)
4562 opcode = opcode.u.idual->mod3;
4563 else
4564 opcode = opcode.u.idual->mod012;
4565 break;
Avi Kivity46561642011-04-24 14:09:59 +03004566 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004567 return EMULATION_FAILED;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004568 }
Avi Kivity46561642011-04-24 14:09:59 +03004569
Avi Kivityb1ea50b2011-09-13 10:45:42 +03004570 ctxt->d &= ~(u64)GroupMask;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004571 ctxt->d |= opcode.flags;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004572 }
4573
Paolo Bonzinie24186e2014-03-27 12:00:57 +01004574 /* Unrecognised? */
4575 if (ctxt->d == 0)
4576 return EMULATION_FAILED;
4577
Avi Kivity9dac77f2011-06-01 15:34:25 +03004578 ctxt->execute = opcode.u.execute;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004579
Nadav Amit3a6095a2014-08-13 16:50:13 +03004580 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4581 return EMULATION_FAILED;
4582
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004583 if (unlikely(ctxt->d &
Nadav Amited9aad22014-11-02 11:55:00 +02004584 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4585 No16))) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004586 /*
4587 * These are copied unconditionally here, and checked unconditionally
4588 * in x86_emulate_insn.
4589 */
4590 ctxt->check_perm = opcode.check_perm;
4591 ctxt->intercept = opcode.intercept;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004592
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004593 if (ctxt->d & NotImpl)
4594 return EMULATION_FAILED;
Avi Kivityd8671622011-02-01 16:32:03 +02004595
Nadav Amit58b70752014-10-24 11:35:09 +03004596 if (mode == X86EMUL_MODE_PROT64) {
4597 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4598 ctxt->op_bytes = 8;
4599 else if (ctxt->d & NearBranch)
4600 ctxt->op_bytes = 8;
4601 }
Avi Kivity7f9b4b72010-08-01 14:46:54 +03004602
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004603 if (ctxt->d & Op3264) {
4604 if (mode == X86EMUL_MODE_PROT64)
4605 ctxt->op_bytes = 8;
4606 else
4607 ctxt->op_bytes = 4;
4608 }
4609
Nadav Amited9aad22014-11-02 11:55:00 +02004610 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4611 ctxt->op_bytes = 4;
4612
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004613 if (ctxt->d & Sse)
4614 ctxt->op_bytes = 16;
4615 else if (ctxt->d & Mmx)
4616 ctxt->op_bytes = 8;
4617 }
Avi Kivity12537912011-03-29 11:41:27 +02004618
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004619 /* ModRM and SIB bytes. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004620 if (ctxt->d & ModRM) {
Avi Kivityf09ed832011-09-13 10:45:40 +03004621 rc = decode_modrm(ctxt, &ctxt->memop);
Bandan Das573e80f2014-04-16 12:46:13 -04004622 if (!has_seg_override) {
4623 has_seg_override = true;
4624 ctxt->seg_override = ctxt->modrm_seg;
4625 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004626 } else if (ctxt->d & MemAbs)
Avi Kivityf09ed832011-09-13 10:45:40 +03004627 rc = decode_abs(ctxt, &ctxt->memop);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004628 if (rc != X86EMUL_CONTINUE)
4629 goto done;
4630
Bandan Das573e80f2014-04-16 12:46:13 -04004631 if (!has_seg_override)
4632 ctxt->seg_override = VCPU_SREG_DS;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004633
Bandan Das573e80f2014-04-16 12:46:13 -04004634 ctxt->memop.addr.mem.seg = ctxt->seg_override;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004635
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004636 /*
4637 * Decode and fetch the source operand: register, memory
4638 * or immediate.
4639 */
Avi Kivity0fe59122011-09-13 10:45:47 +03004640 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004641 if (rc != X86EMUL_CONTINUE)
4642 goto done;
4643
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004644 /*
4645 * Decode and fetch the second source operand: register, memory
4646 * or immediate.
4647 */
Avi Kivity4dd6a572011-09-13 10:45:43 +03004648 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004649 if (rc != X86EMUL_CONTINUE)
4650 goto done;
4651
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004652 /* Decode and fetch the destination operand: register or memory. */
Avi Kivitya9945542011-09-13 10:45:41 +03004653 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004654
Bandan Das41061cd2014-04-16 12:46:14 -04004655 if (ctxt->rip_relative)
Nadav Amit1c1c35a2014-11-19 17:43:09 +02004656 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
4657 ctxt->memopp->addr.mem.ea + ctxt->_eip);
Avi Kivitycb16c342011-06-19 19:21:11 +03004658
Paolo Bonzinia430c912014-10-23 14:54:14 +02004659done:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004660 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004661}
4662
Xiao Guangrong1cb3f3a2011-09-22 17:02:48 +08004663bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4664{
4665 return ctxt->d & PageTable;
4666}
4667
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004668static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4669{
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004670 /* The second termination condition only applies for REPE
4671 * and REPNE. Test if the repeat string operation prefix is
4672 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4673 * corresponding termination condition according to:
4674 * - if REPE/REPZ and ZF = 0 then done
4675 * - if REPNE/REPNZ and ZF = 1 then done
4676 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004677 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4678 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4679 && (((ctxt->rep_prefix == REPE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004680 ((ctxt->eflags & EFLG_ZF) == 0))
Avi Kivity9dac77f2011-06-01 15:34:25 +03004681 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004682 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4683 return true;
4684
4685 return false;
4686}
4687
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004688static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4689{
4690 bool fault = false;
4691
4692 ctxt->ops->get_fpu(ctxt);
4693 asm volatile("1: fwait \n\t"
4694 "2: \n\t"
4695 ".pushsection .fixup,\"ax\" \n\t"
4696 "3: \n\t"
4697 "movb $1, %[fault] \n\t"
4698 "jmp 2b \n\t"
4699 ".popsection \n\t"
4700 _ASM_EXTABLE(1b, 3b)
Avi Kivity38e8a2d2012-04-22 15:12:50 +03004701 : [fault]"+qm"(fault));
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004702 ctxt->ops->put_fpu(ctxt);
4703
4704 if (unlikely(fault))
4705 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4706
4707 return X86EMUL_CONTINUE;
4708}
4709
4710static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4711 struct operand *op)
4712{
4713 if (op->type == OP_MM)
4714 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4715}
4716
Avi Kivitye28bbd42013-01-04 16:18:48 +02004717static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4718{
4719 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivityb9fa4092013-02-09 11:31:48 +02004720 if (!(ctxt->d & ByteOp))
4721 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
Avi Kivitye28bbd42013-01-04 16:18:48 +02004722 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004723 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4724 [fastop]"+S"(fop)
4725 : "c"(ctxt->src2.val));
Avi Kivitye28bbd42013-01-04 16:18:48 +02004726 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004727 if (!fop) /* exception is returned in fop variable */
4728 return emulate_de(ctxt);
Avi Kivitye28bbd42013-01-04 16:18:48 +02004729 return X86EMUL_CONTINUE;
4730}
Avi Kivitydd856ef2012-08-27 23:46:17 +03004731
Bandan Das14985072014-04-16 12:46:09 -04004732void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4733{
Bandan Das573e80f2014-04-16 12:46:13 -04004734 memset(&ctxt->rip_relative, 0,
4735 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
Bandan Das14985072014-04-16 12:46:09 -04004736
Bandan Das14985072014-04-16 12:46:09 -04004737 ctxt->io_read.pos = 0;
4738 ctxt->io_read.end = 0;
Bandan Das14985072014-04-16 12:46:09 -04004739 ctxt->mem_read.end = 0;
4740}
4741
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09004742int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004743{
Mathias Krause0225fb52012-08-30 01:30:16 +02004744 const struct x86_emulate_ops *ops = ctxt->ops;
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09004745 int rc = X86EMUL_CONTINUE;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004746 int saved_dst_type = ctxt->dst.type;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004747
Avi Kivity9dac77f2011-06-01 15:34:25 +03004748 ctxt->mem_read.pos = 0;
Glauber Costa310b5d32009-05-12 16:21:06 -04004749
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004750 /* LOCK prefix is allowed only with some instructions */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004751 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004752 rc = emulate_ud(ctxt);
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004753 goto done;
4754 }
4755
Avi Kivity9dac77f2011-06-01 15:34:25 +03004756 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004757 rc = emulate_ud(ctxt);
Avi Kivity081bca02010-08-26 11:06:15 +03004758 goto done;
4759 }
4760
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004761 if (unlikely(ctxt->d &
4762 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4763 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4764 (ctxt->d & Undefined)) {
4765 rc = emulate_ud(ctxt);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004766 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004767 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004768
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004769 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4770 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4771 rc = emulate_ud(ctxt);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004772 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004773 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004774
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004775 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4776 rc = emulate_nm(ctxt);
Joerg Roedeld09beab2011-04-04 12:39:25 +02004777 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004778 }
Joerg Roedeld09beab2011-04-04 12:39:25 +02004779
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004780 if (ctxt->d & Mmx) {
4781 rc = flush_pending_x87_faults(ctxt);
4782 if (rc != X86EMUL_CONTINUE)
4783 goto done;
4784 /*
4785 * Now that we know the fpu is exception safe, we can fetch
4786 * operands from it.
4787 */
4788 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4789 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4790 if (!(ctxt->d & Mov))
4791 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4792 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004793
Bandan Das685bbf42014-04-16 12:46:10 -04004794 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004795 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4796 X86_ICPT_PRE_EXCEPT);
4797 if (rc != X86EMUL_CONTINUE)
4798 goto done;
4799 }
4800
Nadav Amit64a38292014-12-10 11:19:04 +02004801 /* Instruction can only be executed in protected mode */
4802 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4803 rc = emulate_ud(ctxt);
4804 goto done;
4805 }
4806
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004807 /* Privileged instruction can be executed only in CPL=0 */
4808 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
Nadav Amit68efa762014-06-18 17:19:35 +03004809 if (ctxt->d & PrivUD)
4810 rc = emulate_ud(ctxt);
4811 else
4812 rc = emulate_gp(ctxt, 0);
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004813 goto done;
4814 }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004815
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004816 /* Do instruction specific permission checks */
Bandan Das685bbf42014-04-16 12:46:10 -04004817 if (ctxt->d & CheckPerm) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004818 rc = ctxt->check_perm(ctxt);
4819 if (rc != X86EMUL_CONTINUE)
4820 goto done;
4821 }
4822
Bandan Das685bbf42014-04-16 12:46:10 -04004823 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004824 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4825 X86_ICPT_POST_EXCEPT);
4826 if (rc != X86EMUL_CONTINUE)
4827 goto done;
4828 }
4829
4830 if (ctxt->rep_prefix && (ctxt->d & String)) {
4831 /* All REP prefixes have the same first termination condition */
4832 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4833 ctxt->eip = ctxt->_eip;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004834 ctxt->eflags &= ~EFLG_RF;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004835 goto done;
4836 }
4837 }
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004838 }
4839
Avi Kivity9dac77f2011-06-01 15:34:25 +03004840 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4841 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4842 ctxt->src.valptr, ctxt->src.bytes);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09004843 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004844 goto done;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004845 ctxt->src.orig_val64 = ctxt->src.val64;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004846 }
4847
Avi Kivity9dac77f2011-06-01 15:34:25 +03004848 if (ctxt->src2.type == OP_MEM) {
4849 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4850 &ctxt->src2.val, ctxt->src2.bytes);
Gleb Natapove35b7b92010-02-25 16:36:42 +02004851 if (rc != X86EMUL_CONTINUE)
4852 goto done;
4853 }
4854
Avi Kivity9dac77f2011-06-01 15:34:25 +03004855 if ((ctxt->d & DstMask) == ImplicitOps)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004856 goto special_insn;
4857
4858
Avi Kivity9dac77f2011-06-01 15:34:25 +03004859 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004860 /* optimisation - avoid slow emulated read if Mov */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004861 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4862 &ctxt->dst.val, ctxt->dst.bytes);
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004863 if (rc != X86EMUL_CONTINUE)
4864 goto done;
Avi Kivity038e51d2007-01-22 20:40:40 -08004865 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004866 ctxt->dst.orig_val = ctxt->dst.val;
Avi Kivity038e51d2007-01-22 20:40:40 -08004867
Avi Kivity018a98d2007-11-27 19:30:56 +02004868special_insn:
4869
Bandan Das685bbf42014-04-16 12:46:10 -04004870 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03004871 rc = emulator_check_intercept(ctxt, ctxt->intercept,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004872 X86_ICPT_POST_MEMACCESS);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004873 if (rc != X86EMUL_CONTINUE)
4874 goto done;
4875 }
4876
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03004877 if (ctxt->rep_prefix && (ctxt->d & String))
4878 ctxt->eflags |= EFLG_RF;
4879 else
4880 ctxt->eflags &= ~EFLG_RF;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004881
Avi Kivity9dac77f2011-06-01 15:34:25 +03004882 if (ctxt->execute) {
Avi Kivitye28bbd42013-01-04 16:18:48 +02004883 if (ctxt->d & Fastop) {
4884 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4885 rc = fastop(ctxt, fop);
4886 if (rc != X86EMUL_CONTINUE)
4887 goto done;
4888 goto writeback;
4889 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004890 rc = ctxt->execute(ctxt);
Avi Kivityef65c882010-07-29 15:11:51 +03004891 if (rc != X86EMUL_CONTINUE)
4892 goto done;
4893 goto writeback;
4894 }
4895
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004896 if (ctxt->opcode_len == 2)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004897 goto twobyte_insn;
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004898 else if (ctxt->opcode_len == 3)
4899 goto threebyte_insn;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004900
Avi Kivity9dac77f2011-06-01 15:34:25 +03004901 switch (ctxt->b) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08004902 case 0x63: /* movsxd */
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004903 if (ctxt->mode != X86EMUL_MODE_PROT64)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004904 goto cannot_emulate;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004905 ctxt->dst.val = (s32) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004906 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03004907 case 0x70 ... 0x7f: /* jcc (short) */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004908 if (test_cc(ctxt->b, ctxt->eflags))
Nadav Amit234f3ce2014-09-18 22:39:38 +03004909 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02004910 break;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03004911 case 0x8d: /* lea r16/r32, m */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004912 ctxt->dst.val = ctxt->src.addr.mem.ea;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03004913 break;
Avi Kivity3d9e77d2010-08-01 12:41:59 +03004914 case 0x90 ... 0x97: /* nop / xchg reg, rax */
Avi Kivitydd856ef2012-08-27 23:46:17 +03004915 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
Nadav Amita825f5c2014-06-15 16:13:01 +03004916 ctxt->dst.type = OP_NONE;
4917 else
4918 rc = em_xchg(ctxt);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09004919 break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08004920 case 0x98: /* cbw/cwde/cdqe */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004921 switch (ctxt->op_bytes) {
4922 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4923 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4924 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08004925 }
4926 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004927 case 0xcc: /* int3 */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09004928 rc = emulate_int(ctxt, 3);
4929 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004930 case 0xcd: /* int n */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004931 rc = emulate_int(ctxt, ctxt->src.val);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004932 break;
4933 case 0xce: /* into */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09004934 if (ctxt->eflags & EFLG_OF)
4935 rc = emulate_int(ctxt, 4);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004936 break;
Nitin A Kamble1a52e052007-09-18 16:34:25 -07004937 case 0xe9: /* jmp rel */
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004938 case 0xeb: /* jmp rel short */
Nadav Amit234f3ce2014-09-18 22:39:38 +03004939 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity9dac77f2011-06-01 15:34:25 +03004940 ctxt->dst.type = OP_NONE; /* Disable writeback. */
Nitin A Kamble1a52e052007-09-18 16:34:25 -07004941 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02004942 case 0xf4: /* hlt */
Avi Kivity6c3287f2011-04-20 15:43:05 +03004943 ctxt->ops->halt(ctxt);
Mohammed Gamal19fdfa02008-07-06 16:51:26 +03004944 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02004945 case 0xf5: /* cmc */
4946 /* complement carry flag from eflags reg */
4947 ctxt->eflags ^= EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02004948 break;
4949 case 0xf8: /* clc */
4950 ctxt->eflags &= ~EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02004951 break;
Mohammed Gamal8744aa92010-08-05 15:42:49 +03004952 case 0xf9: /* stc */
4953 ctxt->eflags |= EFLG_CF;
4954 break;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004955 case 0xfc: /* cld */
4956 ctxt->eflags &= ~EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004957 break;
4958 case 0xfd: /* std */
4959 ctxt->eflags |= EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004960 break;
Avi Kivity91269b82010-07-25 14:51:16 +03004961 default:
4962 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004963 }
Avi Kivity018a98d2007-11-27 19:30:56 +02004964
Avi Kivity7d9ddae2010-08-30 17:12:28 +03004965 if (rc != X86EMUL_CONTINUE)
4966 goto done;
4967
Avi Kivity018a98d2007-11-27 19:30:56 +02004968writeback:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02004969 if (ctxt->d & SrcWrite) {
4970 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4971 rc = writeback(ctxt, &ctxt->src);
4972 if (rc != X86EMUL_CONTINUE)
4973 goto done;
4974 }
Nadav Amitee212292014-06-15 16:12:58 +03004975 if (!(ctxt->d & NoWrite)) {
4976 rc = writeback(ctxt, &ctxt->dst);
4977 if (rc != X86EMUL_CONTINUE)
4978 goto done;
4979 }
Avi Kivity018a98d2007-11-27 19:30:56 +02004980
Gleb Natapov5cd21912010-03-18 15:20:26 +02004981 /*
4982 * restore dst type in case the decoding will be reused
4983 * (happens for string instruction )
4984 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004985 ctxt->dst.type = saved_dst_type;
Gleb Natapov5cd21912010-03-18 15:20:26 +02004986
Avi Kivity9dac77f2011-06-01 15:34:25 +03004987 if ((ctxt->d & SrcMask) == SrcSI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03004988 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
Gleb Natapova682e352010-03-18 15:20:21 +02004989
Avi Kivity9dac77f2011-06-01 15:34:25 +03004990 if ((ctxt->d & DstMask) == DstDI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03004991 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
Gleb Natapovd9271122010-03-18 15:20:22 +02004992
Avi Kivity9dac77f2011-06-01 15:34:25 +03004993 if (ctxt->rep_prefix && (ctxt->d & String)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004994 unsigned int count;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004995 struct read_cache *r = &ctxt->io_read;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004996 if ((ctxt->d & SrcMask) == SrcSI)
4997 count = ctxt->src.count;
4998 else
4999 count = ctxt->dst.count;
Paolo Bonzini01485a22014-11-19 18:25:08 +01005000 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03005001
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005002 if (!string_insn_completed(ctxt)) {
5003 /*
5004 * Re-enter guest when pio read ahead buffer is empty
5005 * or, if it is not used, after each 1024 iteration.
5006 */
Avi Kivitydd856ef2012-08-27 23:46:17 +03005007 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005008 (r->end == 0 || r->end != r->pos)) {
5009 /*
5010 * Reset read cache. Usually happens before
5011 * decode, but since instruction is restarted
5012 * we have to do it here.
5013 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005014 ctxt->mem_read.end = 0;
Avi Kivitydd856ef2012-08-27 23:46:17 +03005015 writeback_registers(ctxt);
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005016 return EMULATION_RESTART;
5017 }
5018 goto done; /* skip rip writeback */
Avi Kivity0fa6ccb2010-08-17 11:22:17 +03005019 }
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03005020 ctxt->eflags &= ~EFLG_RF;
Gleb Natapov5cd21912010-03-18 15:20:26 +02005021 }
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005022
Avi Kivity9dac77f2011-06-01 15:34:25 +03005023 ctxt->eip = ctxt->_eip;
Avi Kivity018a98d2007-11-27 19:30:56 +02005024
5025done:
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02005026 if (rc == X86EMUL_PROPAGATE_FAULT) {
5027 WARN_ON(ctxt->exception.vector > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +02005028 ctxt->have_exception = true;
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02005029 }
Joerg Roedel775fde82011-04-04 12:39:24 +02005030 if (rc == X86EMUL_INTERCEPTED)
5031 return EMULATION_INTERCEPTED;
5032
Avi Kivitydd856ef2012-08-27 23:46:17 +03005033 if (rc == X86EMUL_CONTINUE)
5034 writeback_registers(ctxt);
5035
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005036 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005037
5038twobyte_insn:
Avi Kivity9dac77f2011-06-01 15:34:25 +03005039 switch (ctxt->b) {
Avi Kivity018a98d2007-11-27 19:30:56 +02005040 case 0x09: /* wbinvd */
Clemens Nosscfb22372011-04-21 21:16:05 +02005041 (ctxt->ops->wbinvd)(ctxt);
Sheng Yangf5f48ee2010-06-30 12:25:15 +08005042 break;
5043 case 0x08: /* invd */
Avi Kivity018a98d2007-11-27 19:30:56 +02005044 case 0x0d: /* GrpP (prefetch) */
5045 case 0x18: /* Grp16 (prefetch/nop) */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02005046 case 0x1f: /* nop */
Avi Kivity018a98d2007-11-27 19:30:56 +02005047 break;
5048 case 0x20: /* mov cr, reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005049 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
Avi Kivity018a98d2007-11-27 19:30:56 +02005050 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005051 case 0x21: /* mov from dr to reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005052 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005053 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005054 case 0x40 ... 0x4f: /* cmov */
Nadav Amit140bad82014-06-15 16:13:00 +03005055 if (test_cc(ctxt->b, ctxt->eflags))
5056 ctxt->dst.val = ctxt->src.val;
5057 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5058 ctxt->op_bytes != 4)
Avi Kivity9dac77f2011-06-01 15:34:25 +03005059 ctxt->dst.type = OP_NONE; /* no writeback */
Avi Kivity6aa8b732006-12-10 02:21:36 -08005060 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03005061 case 0x80 ... 0x8f: /* jnz rel, etc*/
Avi Kivity9dac77f2011-06-01 15:34:25 +03005062 if (test_cc(ctxt->b, ctxt->eflags))
Nadav Amit234f3ce2014-09-18 22:39:38 +03005063 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02005064 break;
Wei Yongjunee45b582010-08-06 17:10:07 +08005065 case 0x90 ... 0x9f: /* setcc r/m8 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005066 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
Wei Yongjunee45b582010-08-06 17:10:07 +08005067 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005068 case 0xb6 ... 0xb7: /* movzx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005069 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03005070 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
Avi Kivity9dac77f2011-06-01 15:34:25 +03005071 : (u16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005072 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005073 case 0xbe ... 0xbf: /* movsx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005074 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03005075 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
Avi Kivity9dac77f2011-06-01 15:34:25 +03005076 (s16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005077 break;
Avi Kivity91269b82010-07-25 14:51:16 +03005078 default:
5079 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005080 }
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005081
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01005082threebyte_insn:
5083
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005084 if (rc != X86EMUL_CONTINUE)
5085 goto done;
5086
Avi Kivity6aa8b732006-12-10 02:21:36 -08005087 goto writeback;
5088
5089cannot_emulate:
Gleb Natapova0c0ab22011-03-28 16:57:49 +02005090 return EMULATION_FAILED;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005091}
Avi Kivitydd856ef2012-08-27 23:46:17 +03005092
5093void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5094{
5095 invalidate_registers(ctxt);
5096}
5097
5098void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5099{
5100 writeback_registers(ctxt);
5101}