blob: db3cf399e39e4543fb68a9f97d22f5161de6ac99 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/******************************************************************************
Avi Kivity56e82312009-08-12 15:04:37 +03002 * emulate.c
Avi Kivity6aa8b732006-12-10 02:21:36 -08003 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
Rusty Russelldcc07662007-07-17 23:16:56 +10009 * privileged instructions:
Avi Kivity6aa8b732006-12-10 02:21:36 -080010 *
11 * Copyright (C) 2006 Qumranet
Nicolas Kaiser9611c182010-10-06 14:23:22 +020012 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -080013 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
Avi Kivityedf88412007-12-16 11:02:48 +020023#include <linux/kvm_host.h>
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030024#include "kvm_cache_regs.h"
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/module.h>
Avi Kivity56e82312009-08-12 15:04:37 +030026#include <asm/kvm_emulate.h>
Avi Kivityb7d491e2013-01-04 16:18:49 +020027#include <linux/stringify.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028
Avi Kivity3eeb3282010-01-21 15:31:48 +020029#include "x86.h"
Gleb Natapov38ba30b2010-03-18 15:20:17 +020030#include "tss.h"
Andre Przywarae99f0502009-06-17 15:50:33 +020031
Avi Kivity6aa8b732006-12-10 02:21:36 -080032/*
Avi Kivitya99455492011-09-13 10:45:41 +030033 * Operand types
34 */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030035#define OpNone 0ull
36#define OpImplicit 1ull /* No generic decode */
37#define OpReg 2ull /* Register */
38#define OpMem 3ull /* Memory */
39#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40#define OpDI 5ull /* ES:DI/EDI/RDI */
41#define OpMem64 6ull /* Memory, 64-bit */
42#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43#define OpDX 8ull /* DX register */
Avi Kivity4dd6a572011-09-13 10:45:43 +030044#define OpCL 9ull /* CL register (for shifts) */
45#define OpImmByte 10ull /* 8-bit sign extended immediate */
46#define OpOne 11ull /* Implied 1 */
Nadav Amit5e2c6882012-12-06 21:55:10 -020047#define OpImm 12ull /* Sign extended up to 32-bit immediate */
Avi Kivity0fe59122011-09-13 10:45:47 +030048#define OpMem16 13ull /* Memory operand (16-bit). */
49#define OpMem32 14ull /* Memory operand (32-bit). */
50#define OpImmU 15ull /* Immediate operand, zero extended */
51#define OpSI 16ull /* SI/ESI/RSI */
52#define OpImmFAddr 17ull /* Immediate far address */
53#define OpMemFAddr 18ull /* Far address in memory */
54#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
Avi Kivityc191a7a2011-09-13 10:45:49 +030055#define OpES 20ull /* ES */
56#define OpCS 21ull /* CS */
57#define OpSS 22ull /* SS */
58#define OpDS 23ull /* DS */
59#define OpFS 24ull /* FS */
60#define OpGS 25ull /* GS */
Avi Kivity28867ce2012-01-16 15:08:44 +020061#define OpMem8 26ull /* 8-bit zero extended memory operand */
Nadav Amit5e2c6882012-12-06 21:55:10 -020062#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
Paolo Bonzini7fa57952013-05-09 11:32:50 +020063#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
Avi Kivity820207c2013-02-09 11:31:45 +020064#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
Avi Kivitya99455492011-09-13 10:45:41 +030066
Avi Kivity0fe59122011-09-13 10:45:47 +030067#define OpBits 5 /* Width of operand field */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030068#define OpMask ((1ull << OpBits) - 1)
Avi Kivitya99455492011-09-13 10:45:41 +030069
70/*
Avi Kivity6aa8b732006-12-10 02:21:36 -080071 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
76 * not be handled.
77 */
78
79/* Operand sizes: 8-bit operands or specified/overridden size. */
Avi Kivityab85b12b2010-07-29 15:11:49 +030080#define ByteOp (1<<0) /* 8-bit operands. */
Avi Kivity6aa8b732006-12-10 02:21:36 -080081/* Destination operand type. */
Avi Kivitya99455492011-09-13 10:45:41 +030082#define DstShift 1
83#define ImplicitOps (OpImplicit << DstShift)
84#define DstReg (OpReg << DstShift)
85#define DstMem (OpMem << DstShift)
86#define DstAcc (OpAcc << DstShift)
87#define DstDI (OpDI << DstShift)
88#define DstMem64 (OpMem64 << DstShift)
Nadav Amit16bebef2014-12-25 02:52:18 +020089#define DstMem16 (OpMem16 << DstShift)
Avi Kivitya99455492011-09-13 10:45:41 +030090#define DstImmUByte (OpImmUByte << DstShift)
91#define DstDX (OpDX << DstShift)
Avi Kivity820207c2013-02-09 11:31:45 +020092#define DstAccLo (OpAccLo << DstShift)
Avi Kivitya99455492011-09-13 10:45:41 +030093#define DstMask (OpMask << DstShift)
Avi Kivity6aa8b732006-12-10 02:21:36 -080094/* Source operand type. */
Avi Kivity0fe59122011-09-13 10:45:47 +030095#define SrcShift 6
96#define SrcNone (OpNone << SrcShift)
97#define SrcReg (OpReg << SrcShift)
98#define SrcMem (OpMem << SrcShift)
99#define SrcMem16 (OpMem16 << SrcShift)
100#define SrcMem32 (OpMem32 << SrcShift)
101#define SrcImm (OpImm << SrcShift)
102#define SrcImmByte (OpImmByte << SrcShift)
103#define SrcOne (OpOne << SrcShift)
104#define SrcImmUByte (OpImmUByte << SrcShift)
105#define SrcImmU (OpImmU << SrcShift)
106#define SrcSI (OpSI << SrcShift)
Paolo Bonzini7fa57952013-05-09 11:32:50 +0200107#define SrcXLat (OpXLat << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300108#define SrcImmFAddr (OpImmFAddr << SrcShift)
109#define SrcMemFAddr (OpMemFAddr << SrcShift)
110#define SrcAcc (OpAcc << SrcShift)
111#define SrcImmU16 (OpImmU16 << SrcShift)
Nadav Amit5e2c6882012-12-06 21:55:10 -0200112#define SrcImm64 (OpImm64 << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300113#define SrcDX (OpDX << SrcShift)
Avi Kivity28867ce2012-01-16 15:08:44 +0200114#define SrcMem8 (OpMem8 << SrcShift)
Avi Kivity820207c2013-02-09 11:31:45 +0200115#define SrcAccHi (OpAccHi << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300116#define SrcMask (OpMask << SrcShift)
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300117#define BitOp (1<<11)
118#define MemAbs (1<<12) /* Memory operand is absolute displacement */
119#define String (1<<13) /* String instruction (rep capable) */
120#define Stack (1<<14) /* Stack instruction (push/pop) */
121#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
122#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
123#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
124#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
125#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
Gleb Natapov045a2822012-12-20 16:57:43 +0200126#define Escape (5<<15) /* Escape to coprocessor instruction */
Nadav Amit39f062f2014-11-26 15:47:18 +0200127#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
Nadav Amit2276b512015-01-26 09:32:24 +0200128#define ModeDual (7<<15) /* Different instruction for 32/64 bit */
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300129#define Sse (1<<18) /* SSE Vector instruction */
Avi Kivity20c29ff2011-09-13 10:45:44 +0300130/* Generic ModRM decode. */
131#define ModRM (1<<19)
132/* Destination is only written; never read. */
133#define Mov (1<<20)
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300134/* Misc flags */
Joerg Roedel8ea7d6a2011-04-04 12:39:26 +0200135#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
Borislav Petkovb51e9742013-09-22 16:44:52 +0200136#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
Avi Kivity5a506b12010-08-01 15:10:29 +0300137#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
Avi Kivity7f9b4b72010-08-01 14:46:54 +0300138#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
Avi Kivity047a4812010-07-26 14:37:47 +0300139#define Undefined (1<<25) /* No Such Instruction */
Gleb Natapovd380a5e2010-02-10 14:21:36 +0200140#define Lock (1<<26) /* lock prefix is allowed for the instruction */
Gleb Natapove92805a2010-02-10 14:21:35 +0200141#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300142#define No64 (1<<28)
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +0800143#define PageTable (1 << 29) /* instruction used to write page table */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300144#define NotImpl (1 << 30) /* instruction is not implemented */
Guillaume Thouvenin0dc8d102008-12-04 14:26:42 +0100145/* Source 2 operand type */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300146#define Src2Shift (31)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300147#define Src2None (OpNone << Src2Shift)
Avi Kivityab2c5ce2013-02-09 11:31:46 +0200148#define Src2Mem (OpMem << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300149#define Src2CL (OpCL << Src2Shift)
150#define Src2ImmByte (OpImmByte << Src2Shift)
151#define Src2One (OpOne << Src2Shift)
152#define Src2Imm (OpImm << Src2Shift)
Avi Kivityc191a7a2011-09-13 10:45:49 +0300153#define Src2ES (OpES << Src2Shift)
154#define Src2CS (OpCS << Src2Shift)
155#define Src2SS (OpSS << Src2Shift)
156#define Src2DS (OpDS << Src2Shift)
157#define Src2FS (OpFS << Src2Shift)
158#define Src2GS (OpGS << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300159#define Src2Mask (OpMask << Src2Shift)
Avi Kivitycbe2c9d2012-04-09 18:40:02 +0300160#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
Avi Kivity1c11b372012-04-09 18:39:59 +0300161#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
162#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
163#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
Avi Kivitye28bbd42013-01-04 16:18:48 +0200164#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
Avi Kivityb6744dc2013-01-04 16:18:50 +0200165#define NoWrite ((u64)1 << 45) /* No writeback */
Avi Kivityfb32b1e2013-02-09 11:31:44 +0200166#define SrcWrite ((u64)1 << 46) /* Write back src operand */
Nadav Amit9b88ae92014-05-25 23:05:21 +0300167#define NoMod ((u64)1 << 47) /* Mod field is ignored */
Paolo Bonzinid40a6892014-03-27 11:58:02 +0100168#define Intercept ((u64)1 << 48) /* Has valid intercept field */
169#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
Nadav Amit10e38fc2014-06-18 17:19:34 +0300170#define NoBigReal ((u64)1 << 50) /* No big real mode */
Nadav Amit68efa762014-06-18 17:19:35 +0300171#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
Nadav Amit58b70752014-10-24 11:35:09 +0300172#define NearBranch ((u64)1 << 52) /* Near branches */
Nadav Amited9aad22014-11-02 11:55:00 +0200173#define No16 ((u64)1 << 53) /* No 16 bit operand */
Nadav Amitab708092014-12-25 02:52:21 +0200174#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800175
Avi Kivity820207c2013-02-09 11:31:45 +0200176#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800177
Avi Kivityd0e53322010-07-29 15:11:54 +0300178#define X2(x...) x, x
179#define X3(x...) X2(x), x
180#define X4(x...) X2(x), X2(x)
181#define X5(x...) X4(x), x
182#define X6(x...) X4(x), X2(x)
183#define X7(x...) X4(x), X3(x)
184#define X8(x...) X4(x), X4(x)
185#define X16(x...) X8(x), X8(x)
Avi Kivity83babbc2010-07-26 14:37:39 +0300186
Avi Kivitye28bbd42013-01-04 16:18:48 +0200187#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
188#define FASTOP_SIZE 8
189
190/*
191 * fastop functions have a special calling convention:
192 *
Avi Kivity017da7b2013-02-09 11:31:47 +0200193 * dst: rax (in/out)
194 * src: rdx (in/out)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200195 * src2: rcx (in)
196 * flags: rflags (in/out)
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200197 * ex: rsi (in:fastop pointer, out:zero if exception)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200198 *
199 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
200 * different operand sizes can be reached by calculation, rather than a jump
201 * table (which would be bigger than the code).
202 *
203 * fastop functions are declared as taking a never-defined fastop parameter,
204 * so they can't be called from C directly.
205 */
206
207struct fastop;
208
Avi Kivityd65b1de2010-07-29 15:11:35 +0300209struct opcode {
Avi Kivityb1ea50b2011-09-13 10:45:42 +0300210 u64 flags : 56;
211 u64 intercept : 8;
Avi Kivity120df892010-07-29 15:11:39 +0300212 union {
Avi Kivityef65c882010-07-29 15:11:51 +0300213 int (*execute)(struct x86_emulate_ctxt *ctxt);
Mathias Krausefd0a0d82012-08-30 01:30:15 +0200214 const struct opcode *group;
215 const struct group_dual *gdual;
216 const struct gprefix *gprefix;
Gleb Natapov045a2822012-12-20 16:57:43 +0200217 const struct escape *esc;
Nadav Amit39f062f2014-11-26 15:47:18 +0200218 const struct instr_dual *idual;
Nadav Amit2276b512015-01-26 09:32:24 +0200219 const struct mode_dual *mdual;
Avi Kivitye28bbd42013-01-04 16:18:48 +0200220 void (*fastop)(struct fastop *fake);
Avi Kivity120df892010-07-29 15:11:39 +0300221 } u;
Joerg Roedeld09beab2011-04-04 12:39:25 +0200222 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
Avi Kivity120df892010-07-29 15:11:39 +0300223};
224
225struct group_dual {
226 struct opcode mod012[8];
227 struct opcode mod3[8];
Avi Kivityd65b1de2010-07-29 15:11:35 +0300228};
229
Avi Kivity0d7cdee2011-03-29 11:34:38 +0200230struct gprefix {
231 struct opcode pfx_no;
232 struct opcode pfx_66;
233 struct opcode pfx_f2;
234 struct opcode pfx_f3;
235};
236
Gleb Natapov045a2822012-12-20 16:57:43 +0200237struct escape {
238 struct opcode op[8];
239 struct opcode high[64];
240};
241
Nadav Amit39f062f2014-11-26 15:47:18 +0200242struct instr_dual {
243 struct opcode mod012;
244 struct opcode mod3;
245};
246
Nadav Amit2276b512015-01-26 09:32:24 +0200247struct mode_dual {
248 struct opcode mode32;
249 struct opcode mode64;
250};
251
Avi Kivity6aa8b732006-12-10 02:21:36 -0800252/* EFLAGS bit definitions. */
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200253#define EFLG_ID (1<<21)
254#define EFLG_VIP (1<<20)
255#define EFLG_VIF (1<<19)
256#define EFLG_AC (1<<18)
Andre Przywarab1d86142009-06-17 15:50:32 +0200257#define EFLG_VM (1<<17)
258#define EFLG_RF (1<<16)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200259#define EFLG_IOPL (3<<12)
260#define EFLG_NT (1<<14)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800261#define EFLG_OF (1<<11)
262#define EFLG_DF (1<<10)
Andre Przywarab1d86142009-06-17 15:50:32 +0200263#define EFLG_IF (1<<9)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200264#define EFLG_TF (1<<8)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800265#define EFLG_SF (1<<7)
266#define EFLG_ZF (1<<6)
267#define EFLG_AF (1<<4)
268#define EFLG_PF (1<<2)
269#define EFLG_CF (1<<0)
270
Mohammed Gamal62bd4302010-07-28 12:38:40 +0300271#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
272#define EFLG_RESERVED_ONE_MASK 2
273
Nadav Amit3dc4bc42014-12-25 02:52:19 +0200274enum x86_transfer_type {
275 X86_TRANSFER_NONE,
276 X86_TRANSFER_CALL_JMP,
277 X86_TRANSFER_RET,
278 X86_TRANSFER_TASK_SWITCH,
279};
280
Avi Kivitydd856ef2012-08-27 23:46:17 +0300281static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
282{
283 if (!(ctxt->regs_valid & (1 << nr))) {
284 ctxt->regs_valid |= 1 << nr;
285 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
286 }
287 return ctxt->_regs[nr];
288}
289
290static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
291{
292 ctxt->regs_valid |= 1 << nr;
293 ctxt->regs_dirty |= 1 << nr;
294 return &ctxt->_regs[nr];
295}
296
297static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
298{
299 reg_read(ctxt, nr);
300 return reg_write(ctxt, nr);
301}
302
303static void writeback_registers(struct x86_emulate_ctxt *ctxt)
304{
305 unsigned reg;
306
307 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
308 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
309}
310
311static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
312{
313 ctxt->regs_dirty = 0;
314 ctxt->regs_valid = 0;
315}
316
Avi Kivity6aa8b732006-12-10 02:21:36 -0800317/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800318 * These EFLAGS bits are restored from saved value during emulation, and
319 * any changes are written back to the saved value after emulation.
320 */
321#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
322
Avi Kivitydda96d82008-11-26 15:14:10 +0200323#ifdef CONFIG_X86_64
324#define ON64(x) x
325#else
326#define ON64(x)
327#endif
328
Avi Kivity4d758342013-01-19 19:51:55 +0200329static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
330
Avi Kivityb7d491e2013-01-04 16:18:49 +0200331#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
332#define FOP_RET "ret \n\t"
333
334#define FOP_START(op) \
335 extern void em_##op(struct fastop *fake); \
336 asm(".pushsection .text, \"ax\" \n\t" \
337 ".global em_" #op " \n\t" \
338 FOP_ALIGN \
339 "em_" #op ": \n\t"
340
341#define FOP_END \
342 ".popsection")
343
Avi Kivity0bdea062013-01-19 19:51:50 +0200344#define FOPNOP() FOP_ALIGN FOP_RET
345
Avi Kivityb7d491e2013-01-04 16:18:49 +0200346#define FOP1E(op, dst) \
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200347 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
348
349#define FOP1EEX(op, dst) \
350 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
Avi Kivityb7d491e2013-01-04 16:18:49 +0200351
352#define FASTOP1(op) \
353 FOP_START(op) \
354 FOP1E(op##b, al) \
355 FOP1E(op##w, ax) \
356 FOP1E(op##l, eax) \
357 ON64(FOP1E(op##q, rax)) \
358 FOP_END
359
Avi Kivityb9fa4092013-02-09 11:31:48 +0200360/* 1-operand, using src2 (for MUL/DIV r/m) */
361#define FASTOP1SRC2(op, name) \
362 FOP_START(name) \
363 FOP1E(op, cl) \
364 FOP1E(op, cx) \
365 FOP1E(op, ecx) \
366 ON64(FOP1E(op, rcx)) \
367 FOP_END
368
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200369/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
370#define FASTOP1SRC2EX(op, name) \
371 FOP_START(name) \
372 FOP1EEX(op, cl) \
373 FOP1EEX(op, cx) \
374 FOP1EEX(op, ecx) \
375 ON64(FOP1EEX(op, rcx)) \
376 FOP_END
377
Avi Kivityf7857f32013-01-04 16:18:53 +0200378#define FOP2E(op, dst, src) \
379 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
380
381#define FASTOP2(op) \
382 FOP_START(op) \
Avi Kivity017da7b2013-02-09 11:31:47 +0200383 FOP2E(op##b, al, dl) \
384 FOP2E(op##w, ax, dx) \
385 FOP2E(op##l, eax, edx) \
386 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivityf7857f32013-01-04 16:18:53 +0200387 FOP_END
388
Avi Kivity11c363b2013-01-19 19:51:54 +0200389/* 2 operand, word only */
390#define FASTOP2W(op) \
391 FOP_START(op) \
392 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200393 FOP2E(op##w, ax, dx) \
394 FOP2E(op##l, eax, edx) \
395 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivity11c363b2013-01-19 19:51:54 +0200396 FOP_END
397
Avi Kivity007a3b52013-01-19 19:51:51 +0200398/* 2 operand, src is CL */
399#define FASTOP2CL(op) \
400 FOP_START(op) \
401 FOP2E(op##b, al, cl) \
402 FOP2E(op##w, ax, cl) \
403 FOP2E(op##l, eax, cl) \
404 ON64(FOP2E(op##q, rax, cl)) \
405 FOP_END
406
Nadav Amit5aca3722014-11-02 11:54:50 +0200407/* 2 operand, src and dest are reversed */
408#define FASTOP2R(op, name) \
409 FOP_START(name) \
410 FOP2E(op##b, dl, al) \
411 FOP2E(op##w, dx, ax) \
412 FOP2E(op##l, edx, eax) \
413 ON64(FOP2E(op##q, rdx, rax)) \
414 FOP_END
415
Avi Kivity0bdea062013-01-19 19:51:50 +0200416#define FOP3E(op, dst, src, src2) \
417 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
418
419/* 3-operand, word-only, src2=cl */
420#define FASTOP3WCL(op) \
421 FOP_START(op) \
422 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200423 FOP3E(op##w, ax, dx, cl) \
424 FOP3E(op##l, eax, edx, cl) \
425 ON64(FOP3E(op##q, rax, rdx, cl)) \
Avi Kivity0bdea062013-01-19 19:51:50 +0200426 FOP_END
427
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200428/* Special case for SETcc - 1 instruction per cc */
429#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
430
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200431asm(".global kvm_fastop_exception \n"
432 "kvm_fastop_exception: xor %esi, %esi; ret");
433
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200434FOP_START(setcc)
435FOP_SETCC(seto)
436FOP_SETCC(setno)
437FOP_SETCC(setc)
438FOP_SETCC(setnc)
439FOP_SETCC(setz)
440FOP_SETCC(setnz)
441FOP_SETCC(setbe)
442FOP_SETCC(setnbe)
443FOP_SETCC(sets)
444FOP_SETCC(setns)
445FOP_SETCC(setp)
446FOP_SETCC(setnp)
447FOP_SETCC(setl)
448FOP_SETCC(setnl)
449FOP_SETCC(setle)
450FOP_SETCC(setnle)
451FOP_END;
452
Paolo Bonzini326f5782013-05-09 11:32:51 +0200453FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
454FOP_END;
455
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200456static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
457 enum x86_intercept intercept,
458 enum x86_intercept_stage stage)
459{
460 struct x86_instruction_info info = {
461 .intercept = intercept,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300462 .rep_prefix = ctxt->rep_prefix,
463 .modrm_mod = ctxt->modrm_mod,
464 .modrm_reg = ctxt->modrm_reg,
465 .modrm_rm = ctxt->modrm_rm,
466 .src_val = ctxt->src.val64,
Jan Kiszka6cbc5f52014-06-30 12:52:55 +0200467 .dst_val = ctxt->dst.val64,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300468 .src_bytes = ctxt->src.bytes,
469 .dst_bytes = ctxt->dst.bytes,
470 .ad_bytes = ctxt->ad_bytes,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200471 .next_rip = ctxt->eip,
472 };
473
Avi Kivity29535382011-04-20 13:37:53 +0300474 return ctxt->ops->intercept(ctxt, &info, stage);
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200475}
476
Avi Kivityf47cfa32012-06-07 17:49:24 +0300477static void assign_masked(ulong *dest, ulong src, ulong mask)
478{
479 *dest = (*dest & ~mask) | (src & mask);
480}
481
Avi Kivity9dac77f2011-06-01 15:34:25 +0300482static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800483{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300484 return (1UL << (ctxt->ad_bytes << 3)) - 1;
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800485}
486
Avi Kivityf47cfa32012-06-07 17:49:24 +0300487static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
488{
489 u16 sel;
490 struct desc_struct ss;
491
492 if (ctxt->mode == X86EMUL_MODE_PROT64)
493 return ~0UL;
494 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
495 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
496}
497
Avi Kivity612e89f2012-06-12 20:03:23 +0300498static int stack_size(struct x86_emulate_ctxt *ctxt)
499{
500 return (__fls(stack_mask(ctxt)) + 1) >> 3;
501}
502
Avi Kivity6aa8b732006-12-10 02:21:36 -0800503/* Access/update address held in a register, based on addressing mode. */
Harvey Harrisone4706772008-02-19 07:40:38 -0800504static inline unsigned long
Avi Kivity9dac77f2011-06-01 15:34:25 +0300505address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800506{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300507 if (ctxt->ad_bytes == sizeof(unsigned long))
Harvey Harrisone4706772008-02-19 07:40:38 -0800508 return reg;
509 else
Avi Kivity9dac77f2011-06-01 15:34:25 +0300510 return reg & ad_mask(ctxt);
Harvey Harrisone4706772008-02-19 07:40:38 -0800511}
512
513static inline unsigned long
Paolo Bonzini01485a22014-11-19 18:25:08 +0100514register_address(struct x86_emulate_ctxt *ctxt, int reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800515{
Paolo Bonzini01485a22014-11-19 18:25:08 +0100516 return address_mask(ctxt, reg_read(ctxt, reg));
Harvey Harrisone4706772008-02-19 07:40:38 -0800517}
518
Avi Kivity5ad105e2012-08-19 14:34:31 +0300519static void masked_increment(ulong *reg, ulong mask, int inc)
520{
521 assign_masked(reg, *reg + inc, mask);
522}
523
Harvey Harrison7a9572752008-02-19 07:40:41 -0800524static inline void
Paolo Bonzini01485a22014-11-19 18:25:08 +0100525register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
Harvey Harrison7a9572752008-02-19 07:40:41 -0800526{
Avi Kivity5ad105e2012-08-19 14:34:31 +0300527 ulong mask;
528
Avi Kivity9dac77f2011-06-01 15:34:25 +0300529 if (ctxt->ad_bytes == sizeof(unsigned long))
Avi Kivity5ad105e2012-08-19 14:34:31 +0300530 mask = ~0UL;
Harvey Harrison7a9572752008-02-19 07:40:41 -0800531 else
Avi Kivity5ad105e2012-08-19 14:34:31 +0300532 mask = ad_mask(ctxt);
Paolo Bonzini01485a22014-11-19 18:25:08 +0100533 masked_increment(reg_rmw(ctxt, reg), mask, inc);
Avi Kivity5ad105e2012-08-19 14:34:31 +0300534}
535
536static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
537{
Avi Kivitydd856ef2012-08-27 23:46:17 +0300538 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
Harvey Harrison7a9572752008-02-19 07:40:41 -0800539}
Avi Kivity6aa8b732006-12-10 02:21:36 -0800540
Avi Kivity56697682011-04-03 14:08:51 +0300541static u32 desc_limit_scaled(struct desc_struct *desc)
542{
543 u32 limit = get_desc_limit(desc);
544
545 return desc->g ? (limit << 12) | 0xfff : limit;
546}
547
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900548static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300549{
550 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
551 return 0;
552
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900553 return ctxt->ops->get_cached_segment_base(ctxt, seg);
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300554}
555
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200556static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
557 u32 error, bool valid)
Gleb Natapov54b84862010-04-28 19:15:44 +0300558{
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +0200559 WARN_ON(vec > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +0200560 ctxt->exception.vector = vec;
561 ctxt->exception.error_code = error;
562 ctxt->exception.error_code_valid = valid;
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200563 return X86EMUL_PROPAGATE_FAULT;
Gleb Natapov54b84862010-04-28 19:15:44 +0300564}
565
Joerg Roedel3b88e412011-04-04 12:39:29 +0200566static int emulate_db(struct x86_emulate_ctxt *ctxt)
567{
568 return emulate_exception(ctxt, DB_VECTOR, 0, false);
569}
570
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200571static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300572{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200573 return emulate_exception(ctxt, GP_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300574}
575
Avi Kivity618ff152011-04-03 12:32:09 +0300576static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
577{
578 return emulate_exception(ctxt, SS_VECTOR, err, true);
579}
580
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200581static int emulate_ud(struct x86_emulate_ctxt *ctxt)
Gleb Natapov54b84862010-04-28 19:15:44 +0300582{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200583 return emulate_exception(ctxt, UD_VECTOR, 0, false);
Gleb Natapov54b84862010-04-28 19:15:44 +0300584}
585
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200586static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300587{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200588 return emulate_exception(ctxt, TS_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300589}
590
Avi Kivity34d1f492010-08-26 11:59:01 +0300591static int emulate_de(struct x86_emulate_ctxt *ctxt)
592{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200593 return emulate_exception(ctxt, DE_VECTOR, 0, false);
Avi Kivity34d1f492010-08-26 11:59:01 +0300594}
595
Avi Kivity1253791d2011-03-29 11:41:27 +0200596static int emulate_nm(struct x86_emulate_ctxt *ctxt)
597{
598 return emulate_exception(ctxt, NM_VECTOR, 0, false);
599}
600
Avi Kivity1aa36612011-04-27 13:20:30 +0300601static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
602{
603 u16 selector;
604 struct desc_struct desc;
605
606 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
607 return selector;
608}
609
610static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
611 unsigned seg)
612{
613 u16 dummy;
614 u32 base3;
615 struct desc_struct desc;
616
617 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
618 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
619}
620
Avi Kivity1c11b372012-04-09 18:39:59 +0300621/*
622 * x86 defines three classes of vector instructions: explicitly
623 * aligned, explicitly unaligned, and the rest, which change behaviour
624 * depending on whether they're AVX encoded or not.
625 *
626 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
627 * subject to the same check.
628 */
629static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
630{
631 if (likely(size < 16))
632 return false;
633
634 if (ctxt->d & Aligned)
635 return true;
636 else if (ctxt->d & Unaligned)
637 return false;
638 else if (ctxt->d & Avx)
639 return false;
640 else
641 return true;
642}
643
Paolo Bonzinid09155d2014-10-27 14:54:44 +0100644static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
645 struct segmented_address addr,
646 unsigned *max_size, unsigned size,
647 bool write, bool fetch,
Nadav Amitd50eaa12014-11-19 17:43:11 +0200648 enum x86emul_mode mode, ulong *linear)
Avi Kivity52fd8b42011-04-03 12:33:12 +0300649{
Avi Kivity618ff152011-04-03 12:32:09 +0300650 struct desc_struct desc;
651 bool usable;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300652 ulong la;
Avi Kivity618ff152011-04-03 12:32:09 +0300653 u32 lim;
Avi Kivity1aa36612011-04-27 13:20:30 +0300654 u16 sel;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300655
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900656 la = seg_base(ctxt, addr.seg) + addr.ea;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100657 *max_size = 0;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200658 switch (mode) {
Avi Kivity618ff152011-04-03 12:32:09 +0300659 case X86EMUL_MODE_PROT64:
Nadav Amit4be4de72014-09-18 22:39:40 +0300660 if (is_noncanonical_address(la))
Nadav Amitabc7d8a2014-11-19 17:43:12 +0200661 goto bad;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100662
663 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
664 if (size > *max_size)
665 goto bad;
Avi Kivity618ff152011-04-03 12:32:09 +0300666 break;
667 default:
Avi Kivity1aa36612011-04-27 13:20:30 +0300668 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
669 addr.seg);
Avi Kivity618ff152011-04-03 12:32:09 +0300670 if (!usable)
671 goto bad;
Gleb Natapov58b78252012-12-11 15:14:12 +0200672 /* code segment in protected mode or read-only data segment */
673 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
674 || !(desc.type & 2)) && write)
Avi Kivity618ff152011-04-03 12:32:09 +0300675 goto bad;
676 /* unreadable code segment */
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400677 if (!fetch && (desc.type & 8) && !(desc.type & 2))
Avi Kivity618ff152011-04-03 12:32:09 +0300678 goto bad;
679 lim = desc_limit_scaled(&desc);
Paolo Bonzini997b0412014-11-19 18:33:38 +0100680 if (!(desc.type & 8) && (desc.type & 4)) {
Guo Chaofc058682012-06-28 15:19:51 +0800681 /* expand-down segment */
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100682 if (addr.ea <= lim)
Avi Kivity618ff152011-04-03 12:32:09 +0300683 goto bad;
684 lim = desc.d ? 0xffffffff : 0xffff;
Avi Kivity618ff152011-04-03 12:32:09 +0300685 }
Paolo Bonzini997b0412014-11-19 18:33:38 +0100686 if (addr.ea > lim)
687 goto bad;
688 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100689 if (size > *max_size)
690 goto bad;
Nadav Amit31ff6482014-11-19 17:43:13 +0200691 la &= (u32)-1;
Avi Kivity618ff152011-04-03 12:32:09 +0300692 break;
693 }
Avi Kivity1c11b372012-04-09 18:39:59 +0300694 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
695 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300696 *linear = la;
697 return X86EMUL_CONTINUE;
Avi Kivity618ff152011-04-03 12:32:09 +0300698bad:
699 if (addr.seg == VCPU_SREG_SS)
Paolo Bonzini36061892014-10-27 14:40:49 +0100700 return emulate_ss(ctxt, 0);
Avi Kivity618ff152011-04-03 12:32:09 +0300701 else
Paolo Bonzini36061892014-10-27 14:40:49 +0100702 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300703}
704
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400705static int linearize(struct x86_emulate_ctxt *ctxt,
706 struct segmented_address addr,
707 unsigned size, bool write,
708 ulong *linear)
709{
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100710 unsigned max_size;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200711 return __linearize(ctxt, addr, &max_size, size, write, false,
712 ctxt->mode, linear);
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400713}
714
Nadav Amitd50eaa12014-11-19 17:43:11 +0200715static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
716 enum x86emul_mode mode)
717{
718 ulong linear;
719 int rc;
720 unsigned max_size;
721 struct segmented_address addr = { .seg = VCPU_SREG_CS,
722 .ea = dst };
723
724 if (ctxt->op_bytes != sizeof(unsigned long))
725 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
726 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
727 if (rc == X86EMUL_CONTINUE)
728 ctxt->_eip = addr.ea;
729 return rc;
730}
731
732static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
733{
734 return assign_eip(ctxt, dst, ctxt->mode);
735}
736
737static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
738 const struct desc_struct *cs_desc)
739{
740 enum x86emul_mode mode = ctxt->mode;
741
742#ifdef CONFIG_X86_64
743 if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) {
744 u64 efer = 0;
745
746 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
747 if (efer & EFER_LMA)
748 mode = X86EMUL_MODE_PROT64;
749 }
750#endif
751 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
752 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
753 return assign_eip(ctxt, dst, mode);
754}
755
756static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
757{
758 return assign_eip_near(ctxt, ctxt->_eip + rel);
759}
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400760
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200761static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
762 struct segmented_address addr,
763 void *data,
764 unsigned size)
765{
Avi Kivity9fa088f2011-03-31 18:54:30 +0200766 int rc;
767 ulong linear;
768
Avi Kivity83b87952011-04-03 11:31:19 +0300769 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +0200770 if (rc != X86EMUL_CONTINUE)
771 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +0300772 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200773}
774
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900775/*
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200776 * Prefetch the remaining bytes of the instruction without crossing page
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900777 * boundary if they are not in fetch_cache yet.
778 */
Paolo Bonzini9506d572014-05-06 13:05:25 +0200779static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
Avi Kivity62266862007-11-20 13:15:52 +0200780{
Avi Kivity62266862007-11-20 13:15:52 +0200781 int rc;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100782 unsigned size, max_size;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200783 unsigned long linear;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200784 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200785 struct segmented_address addr = { .seg = VCPU_SREG_CS,
Paolo Bonzini17052f12014-05-06 16:33:01 +0200786 .ea = ctxt->eip + cur_size };
787
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100788 /*
789 * We do not know exactly how many bytes will be needed, and
790 * __linearize is expensive, so fetch as much as possible. We
791 * just have to avoid going beyond the 15 byte limit, the end
792 * of the segment, or the end of the page.
793 *
794 * __linearize is called with size 0 so that it does not do any
795 * boundary check itself. Instead, we use max_size to check
796 * against op_size.
797 */
Nadav Amitd50eaa12014-11-19 17:43:11 +0200798 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
799 &linear);
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200800 if (unlikely(rc != X86EMUL_CONTINUE))
801 return rc;
802
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100803 size = min_t(unsigned, 15UL ^ cur_size, max_size);
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200804 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
Paolo Bonzini5cfc7e02014-05-06 13:05:25 +0200805
806 /*
807 * One instruction can only straddle two pages,
808 * and one has been loaded at the beginning of
809 * x86_decode_insn. So, if not enough bytes
810 * still, we must have hit the 15-byte boundary.
811 */
812 if (unlikely(size < op_size))
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100813 return emulate_gp(ctxt, 0);
814
Paolo Bonzini17052f12014-05-06 16:33:01 +0200815 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200816 size, &ctxt->exception);
817 if (unlikely(rc != X86EMUL_CONTINUE))
818 return rc;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200819 ctxt->fetch.end += size;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +0900820 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200821}
822
Paolo Bonzini9506d572014-05-06 13:05:25 +0200823static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
824 unsigned size)
Avi Kivity62266862007-11-20 13:15:52 +0200825{
Nadav Amit08da44a2014-10-03 01:10:04 +0300826 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
827
828 if (unlikely(done_size < size))
829 return __do_insn_fetch_bytes(ctxt, size - done_size);
Paolo Bonzini9506d572014-05-06 13:05:25 +0200830 else
831 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200832}
833
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900834/* Fetch next part of the instruction being emulated. */
Takuya Yoshikawae85a1082011-07-30 18:01:26 +0900835#define insn_fetch(_type, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200836({ _type _x; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200837 \
838 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900839 if (rc != X86EMUL_CONTINUE) \
840 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200841 ctxt->_eip += sizeof(_type); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200842 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
843 ctxt->fetch.ptr += sizeof(_type); \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200844 _x; \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900845})
846
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900847#define insn_fetch_arr(_arr, _size, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200848({ \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200849 rc = do_insn_fetch_bytes(_ctxt, _size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900850 if (rc != X86EMUL_CONTINUE) \
851 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200852 ctxt->_eip += (_size); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200853 memcpy(_arr, ctxt->fetch.ptr, _size); \
854 ctxt->fetch.ptr += (_size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900855})
856
Rusty Russell1e3c5cb2007-07-17 23:16:11 +1000857/*
858 * Given the 'reg' portion of a ModRM byte, and a register block, return a
859 * pointer into the block that addresses the relevant register.
860 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
861 */
Avi Kivitydd856ef2012-08-27 23:46:17 +0300862static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200863 int byteop)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800864{
865 void *p;
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200866 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800867
Avi Kivity6aa8b732006-12-10 02:21:36 -0800868 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
Avi Kivitydd856ef2012-08-27 23:46:17 +0300869 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
870 else
871 p = reg_rmw(ctxt, modrm_reg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800872 return p;
873}
874
875static int read_descriptor(struct x86_emulate_ctxt *ctxt,
Avi Kivity90de84f2010-11-17 15:28:21 +0200876 struct segmented_address addr,
Avi Kivity6aa8b732006-12-10 02:21:36 -0800877 u16 *size, unsigned long *address, int op_bytes)
878{
879 int rc;
880
881 if (op_bytes == 2)
882 op_bytes = 3;
883 *address = 0;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200884 rc = segmented_read_std(ctxt, addr, size, 2);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +0900885 if (rc != X86EMUL_CONTINUE)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800886 return rc;
Avi Kivity30b31ab2010-11-17 15:28:22 +0200887 addr.ea += 2;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200888 rc = segmented_read_std(ctxt, addr, address, op_bytes);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800889 return rc;
890}
891
Avi Kivity34b77652013-01-19 19:51:56 +0200892FASTOP2(add);
893FASTOP2(or);
894FASTOP2(adc);
895FASTOP2(sbb);
896FASTOP2(and);
897FASTOP2(sub);
898FASTOP2(xor);
899FASTOP2(cmp);
900FASTOP2(test);
901
Avi Kivityb9fa4092013-02-09 11:31:48 +0200902FASTOP1SRC2(mul, mul_ex);
903FASTOP1SRC2(imul, imul_ex);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200904FASTOP1SRC2EX(div, div_ex);
905FASTOP1SRC2EX(idiv, idiv_ex);
Avi Kivityb9fa4092013-02-09 11:31:48 +0200906
Avi Kivity34b77652013-01-19 19:51:56 +0200907FASTOP3WCL(shld);
908FASTOP3WCL(shrd);
909
910FASTOP2W(imul);
911
912FASTOP1(not);
913FASTOP1(neg);
914FASTOP1(inc);
915FASTOP1(dec);
916
917FASTOP2CL(rol);
918FASTOP2CL(ror);
919FASTOP2CL(rcl);
920FASTOP2CL(rcr);
921FASTOP2CL(shl);
922FASTOP2CL(shr);
923FASTOP2CL(sar);
924
925FASTOP2W(bsf);
926FASTOP2W(bsr);
927FASTOP2W(bt);
928FASTOP2W(bts);
929FASTOP2W(btr);
930FASTOP2W(btc);
931
Avi Kivitye47a5f52013-02-09 11:31:51 +0200932FASTOP2(xadd);
933
Nadav Amit5aca3722014-11-02 11:54:50 +0200934FASTOP2R(cmp, cmp_r);
935
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200936static u8 test_cc(unsigned int condition, unsigned long flags)
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300937{
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200938 u8 rc;
939 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300940
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200941 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivity3f0c3d02013-01-26 23:56:04 +0200942 asm("push %[flags]; popf; call *%[fastop]"
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200943 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
944 return rc;
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300945}
946
Avi Kivity91ff3cb2010-08-01 12:53:09 +0300947static void fetch_register_operand(struct operand *op)
948{
949 switch (op->bytes) {
950 case 1:
951 op->val = *(u8 *)op->addr.reg;
952 break;
953 case 2:
954 op->val = *(u16 *)op->addr.reg;
955 break;
956 case 4:
957 op->val = *(u32 *)op->addr.reg;
958 break;
959 case 8:
960 op->val = *(u64 *)op->addr.reg;
961 break;
962 }
963}
964
Avi Kivity1253791d2011-03-29 11:41:27 +0200965static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
966{
967 ctxt->ops->get_fpu(ctxt);
968 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200969 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
970 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
971 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
972 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
973 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
974 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
975 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
976 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +0200977#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +0200978 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
979 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
980 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
981 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
982 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
983 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
984 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
985 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +0200986#endif
987 default: BUG();
988 }
989 ctxt->ops->put_fpu(ctxt);
990}
991
992static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
993 int reg)
994{
995 ctxt->ops->get_fpu(ctxt);
996 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200997 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
998 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
999 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1000 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1001 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1002 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1003 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1004 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +02001005#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +02001006 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1007 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1008 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1009 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1010 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1011 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1012 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1013 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +02001014#endif
1015 default: BUG();
1016 }
1017 ctxt->ops->put_fpu(ctxt);
1018}
1019
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001020static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1021{
1022 ctxt->ops->get_fpu(ctxt);
1023 switch (reg) {
1024 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1025 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1026 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1027 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1028 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1029 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1030 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1031 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1032 default: BUG();
1033 }
1034 ctxt->ops->put_fpu(ctxt);
1035}
1036
1037static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1038{
1039 ctxt->ops->get_fpu(ctxt);
1040 switch (reg) {
1041 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1042 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1043 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1044 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1045 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1046 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1047 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1048 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1049 default: BUG();
1050 }
1051 ctxt->ops->put_fpu(ctxt);
1052}
1053
Gleb Natapov045a2822012-12-20 16:57:43 +02001054static int em_fninit(struct x86_emulate_ctxt *ctxt)
1055{
1056 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1057 return emulate_nm(ctxt);
1058
1059 ctxt->ops->get_fpu(ctxt);
1060 asm volatile("fninit");
1061 ctxt->ops->put_fpu(ctxt);
1062 return X86EMUL_CONTINUE;
1063}
1064
1065static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1066{
1067 u16 fcw;
1068
1069 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1070 return emulate_nm(ctxt);
1071
1072 ctxt->ops->get_fpu(ctxt);
1073 asm volatile("fnstcw %0": "+m"(fcw));
1074 ctxt->ops->put_fpu(ctxt);
1075
Gleb Natapov045a2822012-12-20 16:57:43 +02001076 ctxt->dst.val = fcw;
1077
1078 return X86EMUL_CONTINUE;
1079}
1080
1081static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1082{
1083 u16 fsw;
1084
1085 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1086 return emulate_nm(ctxt);
1087
1088 ctxt->ops->get_fpu(ctxt);
1089 asm volatile("fnstsw %0": "+m"(fsw));
1090 ctxt->ops->put_fpu(ctxt);
1091
Gleb Natapov045a2822012-12-20 16:57:43 +02001092 ctxt->dst.val = fsw;
1093
1094 return X86EMUL_CONTINUE;
1095}
1096
Avi Kivity1253791d2011-03-29 11:41:27 +02001097static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
Avi Kivity2adb5ad2012-01-16 15:08:45 +02001098 struct operand *op)
Avi Kivity3c118e22007-10-31 10:27:04 +02001099{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001100 unsigned reg = ctxt->modrm_reg;
Avi Kivity33615aa2007-10-31 11:15:56 +02001101
Avi Kivity9dac77f2011-06-01 15:34:25 +03001102 if (!(ctxt->d & ModRM))
1103 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
Avi Kivity1253791d2011-03-29 11:41:27 +02001104
Avi Kivity9dac77f2011-06-01 15:34:25 +03001105 if (ctxt->d & Sse) {
Avi Kivity1253791d2011-03-29 11:41:27 +02001106 op->type = OP_XMM;
1107 op->bytes = 16;
1108 op->addr.xmm = reg;
1109 read_sse_reg(ctxt, &op->vec_val, reg);
1110 return;
1111 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001112 if (ctxt->d & Mmx) {
1113 reg &= 7;
1114 op->type = OP_MM;
1115 op->bytes = 8;
1116 op->addr.mm = reg;
1117 return;
1118 }
Avi Kivity1253791d2011-03-29 11:41:27 +02001119
Avi Kivity3c118e22007-10-31 10:27:04 +02001120 op->type = OP_REG;
Gleb Natapov6d4d85e2013-11-04 15:52:42 +02001121 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1122 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1123
Avi Kivity91ff3cb2010-08-01 12:53:09 +03001124 fetch_register_operand(op);
Avi Kivity3c118e22007-10-31 10:27:04 +02001125 op->orig_val = op->val;
1126}
1127
Avi Kivitya6e34072012-06-10 17:15:39 +03001128static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1129{
1130 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1131 ctxt->modrm_seg = VCPU_SREG_SS;
1132}
1133
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001134static int decode_modrm(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001135 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001136{
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001137 u8 sib;
Bandan Das02357bd2014-04-16 12:46:11 -04001138 int index_reg, base_reg, scale;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001139 int rc = X86EMUL_CONTINUE;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001140 ulong modrm_ea = 0;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001141
Bandan Das02357bd2014-04-16 12:46:11 -04001142 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1143 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1144 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001145
Bandan Das02357bd2014-04-16 12:46:11 -04001146 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001147 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
Bandan Das02357bd2014-04-16 12:46:11 -04001148 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001149 ctxt->modrm_seg = VCPU_SREG_DS;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001150
Nadav Amit9b88ae92014-05-25 23:05:21 +03001151 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001152 op->type = OP_REG;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001153 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Paolo Bonzini8acb42072013-05-30 16:35:55 +02001154 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02001155 ctxt->d & ByteOp);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001156 if (ctxt->d & Sse) {
Avi Kivity1253791d2011-03-29 11:41:27 +02001157 op->type = OP_XMM;
1158 op->bytes = 16;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001159 op->addr.xmm = ctxt->modrm_rm;
1160 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
Avi Kivity1253791d2011-03-29 11:41:27 +02001161 return rc;
1162 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001163 if (ctxt->d & Mmx) {
1164 op->type = OP_MM;
1165 op->bytes = 8;
Paolo Bonzinibdc90722014-05-06 14:03:29 +02001166 op->addr.mm = ctxt->modrm_rm & 7;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001167 return rc;
1168 }
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001169 fetch_register_operand(op);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001170 return rc;
1171 }
1172
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001173 op->type = OP_MEM;
1174
Avi Kivity9dac77f2011-06-01 15:34:25 +03001175 if (ctxt->ad_bytes == 2) {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001176 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1177 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1178 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1179 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001180
1181 /* 16-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001182 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001183 case 0:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001184 if (ctxt->modrm_rm == 6)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001185 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001186 break;
1187 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001188 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001189 break;
1190 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001191 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001192 break;
1193 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001194 switch (ctxt->modrm_rm) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001195 case 0:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001196 modrm_ea += bx + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001197 break;
1198 case 1:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001199 modrm_ea += bx + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001200 break;
1201 case 2:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001202 modrm_ea += bp + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001203 break;
1204 case 3:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001205 modrm_ea += bp + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001206 break;
1207 case 4:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001208 modrm_ea += si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001209 break;
1210 case 5:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001211 modrm_ea += di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001212 break;
1213 case 6:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001214 if (ctxt->modrm_mod != 0)
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001215 modrm_ea += bp;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001216 break;
1217 case 7:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001218 modrm_ea += bx;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001219 break;
1220 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001221 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1222 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1223 ctxt->modrm_seg = VCPU_SREG_SS;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001224 modrm_ea = (u16)modrm_ea;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001225 } else {
1226 /* 32/64-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001227 if ((ctxt->modrm_rm & 7) == 4) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001228 sib = insn_fetch(u8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001229 index_reg |= (sib >> 3) & 7;
1230 base_reg |= sib & 7;
1231 scale = sib >> 6;
1232
Avi Kivity9dac77f2011-06-01 15:34:25 +03001233 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001234 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivitya6e34072012-06-10 17:15:39 +03001235 else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001236 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001237 adjust_modrm_seg(ctxt, base_reg);
Nadav Amitab708092014-12-25 02:52:21 +02001238 /* Increment ESP on POP [ESP] */
1239 if ((ctxt->d & IncSP) &&
1240 base_reg == VCPU_REGS_RSP)
1241 modrm_ea += ctxt->op_bytes;
Avi Kivitya6e34072012-06-10 17:15:39 +03001242 }
Avi Kivitydc71d0f2008-06-15 21:23:17 -07001243 if (index_reg != 4)
Avi Kivitydd856ef2012-08-27 23:46:17 +03001244 modrm_ea += reg_read(ctxt, index_reg) << scale;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001245 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
Nadav Amit5b38ab82014-11-02 11:54:41 +02001246 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity84411d82008-06-15 21:53:26 -07001247 if (ctxt->mode == X86EMUL_MODE_PROT64)
Avi Kivity9dac77f2011-06-01 15:34:25 +03001248 ctxt->rip_relative = 1;
Avi Kivitya6e34072012-06-10 17:15:39 +03001249 } else {
1250 base_reg = ctxt->modrm_rm;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001251 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001252 adjust_modrm_seg(ctxt, base_reg);
1253 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001254 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001255 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001256 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001257 break;
1258 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001259 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001260 break;
1261 }
1262 }
Avi Kivity90de84f2010-11-17 15:28:21 +02001263 op->addr.mem.ea = modrm_ea;
Bandan Das41061cd2014-04-16 12:46:14 -04001264 if (ctxt->ad_bytes != 8)
1265 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1266
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001267done:
1268 return rc;
1269}
1270
1271static int decode_abs(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001272 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001273{
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001274 int rc = X86EMUL_CONTINUE;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001275
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001276 op->type = OP_MEM;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001277 switch (ctxt->ad_bytes) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001278 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001279 op->addr.mem.ea = insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001280 break;
1281 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001282 op->addr.mem.ea = insn_fetch(u32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001283 break;
1284 case 8:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001285 op->addr.mem.ea = insn_fetch(u64, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001286 break;
1287 }
1288done:
1289 return rc;
1290}
1291
Avi Kivity9dac77f2011-06-01 15:34:25 +03001292static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
Wei Yongjun35c843c2010-08-09 11:34:56 +08001293{
Sheng Yang7129eec2010-09-28 16:33:32 +08001294 long sv = 0, mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001295
Avi Kivity9dac77f2011-06-01 15:34:25 +03001296 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
Nadav Amit7dec5602014-06-15 16:12:57 +03001297 mask = ~((long)ctxt->dst.bytes * 8 - 1);
Wei Yongjun35c843c2010-08-09 11:34:56 +08001298
Avi Kivity9dac77f2011-06-01 15:34:25 +03001299 if (ctxt->src.bytes == 2)
1300 sv = (s16)ctxt->src.val & (s16)mask;
1301 else if (ctxt->src.bytes == 4)
1302 sv = (s32)ctxt->src.val & (s32)mask;
Nadav Amit7dec5602014-06-15 16:12:57 +03001303 else
1304 sv = (s64)ctxt->src.val & (s64)mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001305
Nadav Amit1c1c35a2014-11-19 17:43:09 +02001306 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1307 ctxt->dst.addr.mem.ea + (sv >> 3));
Wei Yongjun35c843c2010-08-09 11:34:56 +08001308 }
Wei Yongjunba7ff2b2010-08-09 11:39:14 +08001309
1310 /* only subword offset */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001311 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001312}
1313
Gleb Natapov9de41572010-04-28 19:15:22 +03001314static int read_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov9de41572010-04-28 19:15:22 +03001315 unsigned long addr, void *dest, unsigned size)
1316{
1317 int rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001318 struct read_cache *mc = &ctxt->mem_read;
Gleb Natapov9de41572010-04-28 19:15:22 +03001319
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001320 if (mc->pos < mc->end)
1321 goto read_cached;
Gleb Natapov9de41572010-04-28 19:15:22 +03001322
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001323 WARN_ON((mc->end + size) >= sizeof(mc->data));
Gleb Natapov9de41572010-04-28 19:15:22 +03001324
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001325 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1326 &ctxt->exception);
1327 if (rc != X86EMUL_CONTINUE)
1328 return rc;
1329
1330 mc->end += size;
1331
1332read_cached:
1333 memcpy(dest, mc->data + mc->pos, size);
1334 mc->pos += size;
Gleb Natapov9de41572010-04-28 19:15:22 +03001335 return X86EMUL_CONTINUE;
1336}
1337
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001338static int segmented_read(struct x86_emulate_ctxt *ctxt,
1339 struct segmented_address addr,
1340 void *data,
1341 unsigned size)
1342{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001343 int rc;
1344 ulong linear;
1345
Avi Kivity83b87952011-04-03 11:31:19 +03001346 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001347 if (rc != X86EMUL_CONTINUE)
1348 return rc;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001349 return read_emulated(ctxt, linear, data, size);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001350}
1351
1352static int segmented_write(struct x86_emulate_ctxt *ctxt,
1353 struct segmented_address addr,
1354 const void *data,
1355 unsigned size)
1356{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001357 int rc;
1358 ulong linear;
1359
Avi Kivity83b87952011-04-03 11:31:19 +03001360 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001361 if (rc != X86EMUL_CONTINUE)
1362 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001363 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1364 &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001365}
1366
1367static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1368 struct segmented_address addr,
1369 const void *orig_data, const void *data,
1370 unsigned size)
1371{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001372 int rc;
1373 ulong linear;
1374
Avi Kivity83b87952011-04-03 11:31:19 +03001375 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001376 if (rc != X86EMUL_CONTINUE)
1377 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001378 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1379 size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001380}
1381
Gleb Natapov7b262e92010-03-18 15:20:27 +02001382static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov7b262e92010-03-18 15:20:27 +02001383 unsigned int size, unsigned short port,
1384 void *dest)
1385{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001386 struct read_cache *rc = &ctxt->io_read;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001387
1388 if (rc->pos == rc->end) { /* refill pio read ahead */
Gleb Natapov7b262e92010-03-18 15:20:27 +02001389 unsigned int in_page, n;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001390 unsigned int count = ctxt->rep_prefix ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001391 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001392 in_page = (ctxt->eflags & EFLG_DF) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001393 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1394 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
Mark Rustadb55a8142014-07-25 06:27:05 -07001395 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
Gleb Natapov7b262e92010-03-18 15:20:27 +02001396 if (n == 0)
1397 n = 1;
1398 rc->pos = rc->end = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001399 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
Gleb Natapov7b262e92010-03-18 15:20:27 +02001400 return 0;
1401 rc->end = n * size;
1402 }
1403
Nadav Amite6e39f02014-04-18 03:35:10 +03001404 if (ctxt->rep_prefix && (ctxt->d & String) &&
1405 !(ctxt->eflags & EFLG_DF)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001406 ctxt->dst.data = rc->data + rc->pos;
1407 ctxt->dst.type = OP_MEM_STR;
1408 ctxt->dst.count = (rc->end - rc->pos) / size;
1409 rc->pos = rc->end;
1410 } else {
1411 memcpy(dest, rc->data + rc->pos, size);
1412 rc->pos += size;
1413 }
Gleb Natapov7b262e92010-03-18 15:20:27 +02001414 return 1;
1415}
1416
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01001417static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1418 u16 index, struct desc_struct *desc)
1419{
1420 struct desc_ptr dt;
1421 ulong addr;
1422
1423 ctxt->ops->get_idt(ctxt, &dt);
1424
1425 if (dt.size < index * 8 + 7)
1426 return emulate_gp(ctxt, index << 3 | 0x2);
1427
1428 addr = dt.address + index * 8;
1429 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1430 &ctxt->exception);
1431}
1432
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001433static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001434 u16 selector, struct desc_ptr *dt)
1435{
Mathias Krause0225fb52012-08-30 01:30:16 +02001436 const struct x86_emulate_ops *ops = ctxt->ops;
Nadav Amit2eedcac2014-06-02 18:34:05 +03001437 u32 base3 = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001438
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001439 if (selector & 1 << 2) {
1440 struct desc_struct desc;
Avi Kivity1aa36612011-04-27 13:20:30 +03001441 u16 sel;
1442
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001443 memset (dt, 0, sizeof *dt);
Nadav Amit2eedcac2014-06-02 18:34:05 +03001444 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1445 VCPU_SREG_LDTR))
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001446 return;
1447
1448 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
Nadav Amit2eedcac2014-06-02 18:34:05 +03001449 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001450 } else
Avi Kivity4bff1e862011-04-20 13:37:53 +03001451 ops->get_gdt(ctxt, dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001452}
1453
Nadav Amitedccda72014-12-25 02:52:23 +02001454static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1455 u16 selector, ulong *desc_addr_p)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001456{
1457 struct desc_ptr dt;
1458 u16 index = selector >> 3;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001459 ulong addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001460
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001461 get_descriptor_table_ptr(ctxt, selector, &dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001462
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001463 if (dt.size < index * 8 + 7)
1464 return emulate_gp(ctxt, selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001465
1466 addr = dt.address + index * 8;
Nadav Amitedccda72014-12-25 02:52:23 +02001467
1468#ifdef CONFIG_X86_64
1469 if (addr >> 32 != 0) {
1470 u64 efer = 0;
1471
1472 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1473 if (!(efer & EFER_LMA))
1474 addr &= (u32)-1;
1475 }
1476#endif
1477
1478 *desc_addr_p = addr;
1479 return X86EMUL_CONTINUE;
1480}
1481
1482/* allowed just for 8 bytes segments */
1483static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1484 u16 selector, struct desc_struct *desc,
1485 ulong *desc_addr_p)
1486{
1487 int rc;
1488
1489 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1490 if (rc != X86EMUL_CONTINUE)
1491 return rc;
1492
1493 return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1494 &ctxt->exception);
1495}
1496
1497/* allowed just for 8 bytes segments */
1498static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1499 u16 selector, struct desc_struct *desc)
1500{
1501 int rc;
1502 ulong addr;
1503
1504 rc = get_descriptor_ptr(ctxt, selector, &addr);
1505 if (rc != X86EMUL_CONTINUE)
1506 return rc;
1507
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001508 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1509 &ctxt->exception);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001510}
1511
Gleb Natapov5601d052011-03-07 14:55:06 +02001512/* Does not support long mode */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001513static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Nadav Amitd1442d82014-09-18 22:39:39 +03001514 u16 selector, int seg, u8 cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001515 enum x86_transfer_type transfer,
Nadav Amitd1442d82014-09-18 22:39:39 +03001516 struct desc_struct *desc)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001517{
Avi Kivity869be992012-06-13 16:30:53 +03001518 struct desc_struct seg_desc, old_desc;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001519 u8 dpl, rpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001520 unsigned err_vec = GP_VECTOR;
1521 u32 err_code = 0;
1522 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
Avi Kivitye9194642012-06-13 16:29:39 +03001523 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001524 int ret;
Avi Kivity03ebebe2012-08-21 17:07:04 +03001525 u16 dummy;
Nadav Amite37a75a2014-06-02 18:34:04 +03001526 u32 base3 = 0;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001527
1528 memset(&seg_desc, 0, sizeof seg_desc);
1529
Kevin Wolff8da94e2013-04-11 14:06:03 +02001530 if (ctxt->mode == X86EMUL_MODE_REAL) {
1531 /* set real mode segment descriptor (keep limit etc. for
1532 * unreal mode) */
Avi Kivity03ebebe2012-08-21 17:07:04 +03001533 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001534 set_desc_base(&seg_desc, selector << 4);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001535 goto load;
Kevin Wolff8da94e2013-04-11 14:06:03 +02001536 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1537 /* VM86 needs a clean new segment descriptor */
1538 set_desc_base(&seg_desc, selector << 4);
1539 set_desc_limit(&seg_desc, 0xffff);
1540 seg_desc.type = 3;
1541 seg_desc.p = 1;
1542 seg_desc.s = 1;
1543 seg_desc.dpl = 3;
1544 goto load;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001545 }
1546
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001547 rpl = selector & 3;
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001548
1549 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1550 if ((seg == VCPU_SREG_CS
1551 || (seg == VCPU_SREG_SS
1552 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1553 || seg == VCPU_SREG_TR)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001554 && null_selector)
1555 goto exception;
1556
1557 /* TR should be in GDT only */
1558 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1559 goto exception;
1560
1561 if (null_selector) /* for NULL selector skip all following checks */
1562 goto load;
1563
Avi Kivitye9194642012-06-13 16:29:39 +03001564 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001565 if (ret != X86EMUL_CONTINUE)
1566 return ret;
1567
1568 err_code = selector & 0xfffc;
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001569 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1570 GP_VECTOR;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001571
Guo Chaofc058682012-06-28 15:19:51 +08001572 /* can't load system descriptor into segment selector */
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001573 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1574 if (transfer == X86_TRANSFER_CALL_JMP)
1575 return X86EMUL_UNHANDLEABLE;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001576 goto exception;
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001577 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001578
1579 if (!seg_desc.p) {
1580 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1581 goto exception;
1582 }
1583
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001584 dpl = seg_desc.dpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001585
1586 switch (seg) {
1587 case VCPU_SREG_SS:
1588 /*
1589 * segment is not a writable data segment or segment
1590 * selector's RPL != CPL or segment selector's RPL != CPL
1591 */
1592 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1593 goto exception;
1594 break;
1595 case VCPU_SREG_CS:
1596 if (!(seg_desc.type & 8))
1597 goto exception;
1598
1599 if (seg_desc.type & 4) {
1600 /* conforming */
1601 if (dpl > cpl)
1602 goto exception;
1603 } else {
1604 /* nonconforming */
1605 if (rpl > cpl || dpl != cpl)
1606 goto exception;
1607 }
Nadav Amit040c8dc2014-09-18 22:39:43 +03001608 /* in long-mode d/b must be clear if l is set */
1609 if (seg_desc.d && seg_desc.l) {
1610 u64 efer = 0;
1611
1612 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1613 if (efer & EFER_LMA)
1614 goto exception;
1615 }
1616
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001617 /* CS(RPL) <- CPL */
1618 selector = (selector & 0xfffc) | cpl;
1619 break;
1620 case VCPU_SREG_TR:
1621 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1622 goto exception;
Avi Kivity869be992012-06-13 16:30:53 +03001623 old_desc = seg_desc;
1624 seg_desc.type |= 2; /* busy */
1625 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1626 sizeof(seg_desc), &ctxt->exception);
1627 if (ret != X86EMUL_CONTINUE)
1628 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001629 break;
1630 case VCPU_SREG_LDTR:
1631 if (seg_desc.s || seg_desc.type != 2)
1632 goto exception;
1633 break;
1634 default: /* DS, ES, FS, or GS */
1635 /*
1636 * segment is not a data or readable code segment or
1637 * ((segment is a data or nonconforming code segment)
1638 * and (both RPL and CPL > DPL))
1639 */
1640 if ((seg_desc.type & 0xa) == 0x8 ||
1641 (((seg_desc.type & 0xc) != 0xc) &&
1642 (rpl > dpl && cpl > dpl)))
1643 goto exception;
1644 break;
1645 }
1646
1647 if (seg_desc.s) {
1648 /* mark segment as accessed */
Nadav Amite2cefa72014-12-25 02:52:22 +02001649 if (!(seg_desc.type & 1)) {
1650 seg_desc.type |= 1;
1651 ret = write_segment_descriptor(ctxt, selector,
1652 &seg_desc);
1653 if (ret != X86EMUL_CONTINUE)
1654 return ret;
1655 }
Nadav Amite37a75a2014-06-02 18:34:04 +03001656 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1657 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1658 sizeof(base3), &ctxt->exception);
1659 if (ret != X86EMUL_CONTINUE)
1660 return ret;
Nadav Amit9a9abf62014-11-02 11:54:56 +02001661 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1662 ((u64)base3 << 32)))
1663 return emulate_gp(ctxt, 0);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001664 }
1665load:
Nadav Amite37a75a2014-06-02 18:34:04 +03001666 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
Nadav Amitd1442d82014-09-18 22:39:39 +03001667 if (desc)
1668 *desc = seg_desc;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001669 return X86EMUL_CONTINUE;
1670exception:
Paolo Bonzini592f0852014-08-20 10:05:08 +02001671 return emulate_exception(ctxt, err_vec, err_code, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001672}
1673
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001674static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1675 u16 selector, int seg)
1676{
1677 u8 cpl = ctxt->ops->cpl(ctxt);
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001678 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1679 X86_TRANSFER_NONE, NULL);
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001680}
1681
Wei Yongjun31be40b2010-08-17 09:17:30 +08001682static void write_register_operand(struct operand *op)
1683{
1684 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1685 switch (op->bytes) {
1686 case 1:
1687 *(u8 *)op->addr.reg = (u8)op->val;
1688 break;
1689 case 2:
1690 *(u16 *)op->addr.reg = (u16)op->val;
1691 break;
1692 case 4:
1693 *op->addr.reg = (u32)op->val;
1694 break; /* 64b: zero-extend */
1695 case 8:
1696 *op->addr.reg = op->val;
1697 break;
1698 }
1699}
1700
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001701static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
Wei Yongjunc37eda12010-06-15 09:03:33 +08001702{
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001703 switch (op->type) {
Wei Yongjunc37eda12010-06-15 09:03:33 +08001704 case OP_REG:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001705 write_register_operand(op);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001706 break;
1707 case OP_MEM:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001708 if (ctxt->lock_prefix)
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001709 return segmented_cmpxchg(ctxt,
1710 op->addr.mem,
1711 &op->orig_val,
1712 &op->val,
1713 op->bytes);
1714 else
1715 return segmented_write(ctxt,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001716 op->addr.mem,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001717 &op->val,
1718 op->bytes);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001719 break;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001720 case OP_MEM_STR:
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001721 return segmented_write(ctxt,
1722 op->addr.mem,
1723 op->data,
1724 op->bytes * op->count);
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001725 break;
Avi Kivity1253791d2011-03-29 11:41:27 +02001726 case OP_XMM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001727 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
Avi Kivity1253791d2011-03-29 11:41:27 +02001728 break;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001729 case OP_MM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001730 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001731 break;
Wei Yongjunc37eda12010-06-15 09:03:33 +08001732 case OP_NONE:
1733 /* no writeback */
1734 break;
1735 default:
1736 break;
1737 }
1738 return X86EMUL_CONTINUE;
1739}
1740
Avi Kivity51ddff52012-06-12 20:19:40 +03001741static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001742{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001743 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001744
Avi Kivity5ad105e2012-08-19 14:34:31 +03001745 rsp_increment(ctxt, -bytes);
Avi Kivitydd856ef2012-08-27 23:46:17 +03001746 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001747 addr.seg = VCPU_SREG_SS;
1748
Avi Kivity51ddff52012-06-12 20:19:40 +03001749 return segmented_write(ctxt, addr, data, bytes);
1750}
1751
1752static int em_push(struct x86_emulate_ctxt *ctxt)
1753{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001754 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001755 ctxt->dst.type = OP_NONE;
Avi Kivity51ddff52012-06-12 20:19:40 +03001756 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001757}
1758
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001759static int emulate_pop(struct x86_emulate_ctxt *ctxt,
Avi Kivity350f69d2009-01-05 11:12:40 +02001760 void *dest, int len)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001761{
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001762 int rc;
Avi Kivity90de84f2010-11-17 15:28:21 +02001763 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001764
Avi Kivitydd856ef2012-08-27 23:46:17 +03001765 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Avi Kivity90de84f2010-11-17 15:28:21 +02001766 addr.seg = VCPU_SREG_SS;
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001767 rc = segmented_read(ctxt, addr, dest, len);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09001768 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001769 return rc;
1770
Avi Kivity5ad105e2012-08-19 14:34:31 +03001771 rsp_increment(ctxt, len);
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001772 return rc;
1773}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001774
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001775static int em_pop(struct x86_emulate_ctxt *ctxt)
1776{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001777 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001778}
1779
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001780static int emulate_popf(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001781 void *dest, int len)
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001782{
1783 int rc;
1784 unsigned long val, change_mask;
1785 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001786 int cpl = ctxt->ops->cpl(ctxt);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001787
Takuya Yoshikawa3b9be3b2011-05-02 02:27:55 +09001788 rc = emulate_pop(ctxt, &val, len);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001789 if (rc != X86EMUL_CONTINUE)
1790 return rc;
1791
1792 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
Nadav Amit163b1352014-07-21 14:37:28 +03001793 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001794
1795 switch(ctxt->mode) {
1796 case X86EMUL_MODE_PROT64:
1797 case X86EMUL_MODE_PROT32:
1798 case X86EMUL_MODE_PROT16:
1799 if (cpl == 0)
1800 change_mask |= EFLG_IOPL;
1801 if (cpl <= iopl)
1802 change_mask |= EFLG_IF;
1803 break;
1804 case X86EMUL_MODE_VM86:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001805 if (iopl < 3)
1806 return emulate_gp(ctxt, 0);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001807 change_mask |= EFLG_IF;
1808 break;
1809 default: /* real mode */
1810 change_mask |= (EFLG_IOPL | EFLG_IF);
1811 break;
1812 }
1813
1814 *(unsigned long *)dest =
1815 (ctxt->eflags & ~change_mask) | (val & change_mask);
1816
1817 return rc;
1818}
1819
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001820static int em_popf(struct x86_emulate_ctxt *ctxt)
1821{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001822 ctxt->dst.type = OP_REG;
1823 ctxt->dst.addr.reg = &ctxt->eflags;
1824 ctxt->dst.bytes = ctxt->op_bytes;
1825 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001826}
1827
Avi Kivity612e89f2012-06-12 20:03:23 +03001828static int em_enter(struct x86_emulate_ctxt *ctxt)
1829{
1830 int rc;
1831 unsigned frame_size = ctxt->src.val;
1832 unsigned nesting_level = ctxt->src2.val & 31;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001833 ulong rbp;
Avi Kivity612e89f2012-06-12 20:03:23 +03001834
1835 if (nesting_level)
1836 return X86EMUL_UNHANDLEABLE;
1837
Avi Kivitydd856ef2012-08-27 23:46:17 +03001838 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1839 rc = push(ctxt, &rbp, stack_size(ctxt));
Avi Kivity612e89f2012-06-12 20:03:23 +03001840 if (rc != X86EMUL_CONTINUE)
1841 return rc;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001842 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
Avi Kivity612e89f2012-06-12 20:03:23 +03001843 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001844 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1845 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
Avi Kivity612e89f2012-06-12 20:03:23 +03001846 stack_mask(ctxt));
1847 return X86EMUL_CONTINUE;
1848}
1849
Avi Kivityf47cfa32012-06-07 17:49:24 +03001850static int em_leave(struct x86_emulate_ctxt *ctxt)
1851{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001852 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
Avi Kivityf47cfa32012-06-07 17:49:24 +03001853 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001854 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
Avi Kivityf47cfa32012-06-07 17:49:24 +03001855}
1856
Avi Kivity1cd196e2011-09-13 10:45:51 +03001857static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001858{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001859 int seg = ctxt->src2.val;
1860
Avi Kivity9dac77f2011-06-01 15:34:25 +03001861 ctxt->src.val = get_segment_selector(ctxt, seg);
Nadav Amit0fcc2072014-11-02 11:54:51 +02001862 if (ctxt->op_bytes == 4) {
1863 rsp_increment(ctxt, -2);
1864 ctxt->op_bytes = 2;
1865 }
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001866
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001867 return em_push(ctxt);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001868}
1869
Avi Kivity1cd196e2011-09-13 10:45:51 +03001870static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001871{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001872 int seg = ctxt->src2.val;
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001873 unsigned long selector;
1874 int rc;
1875
Nadav Amit3313bc42014-12-25 02:52:17 +02001876 rc = emulate_pop(ctxt, &selector, 2);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001877 if (rc != X86EMUL_CONTINUE)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001878 return rc;
1879
Paolo Bonzinia5457e72014-06-05 17:29:34 +02001880 if (ctxt->modrm_reg == VCPU_SREG_SS)
1881 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
Nadav Amit3313bc42014-12-25 02:52:17 +02001882 if (ctxt->op_bytes > 2)
1883 rsp_increment(ctxt, ctxt->op_bytes - 2);
Paolo Bonzinia5457e72014-06-05 17:29:34 +02001884
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001885 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001886 return rc;
1887}
1888
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001889static int em_pusha(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001890{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001891 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001892 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001893 int reg = VCPU_REGS_RAX;
1894
1895 while (reg <= VCPU_REGS_RDI) {
1896 (reg == VCPU_REGS_RSP) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001897 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001898
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001899 rc = em_push(ctxt);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001900 if (rc != X86EMUL_CONTINUE)
1901 return rc;
1902
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001903 ++reg;
1904 }
Wei Yongjunc37eda12010-06-15 09:03:33 +08001905
Wei Yongjunc37eda12010-06-15 09:03:33 +08001906 return rc;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001907}
1908
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001909static int em_pushf(struct x86_emulate_ctxt *ctxt)
1910{
Nadav Amitbc397a62014-12-10 11:19:03 +02001911 ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001912 return em_push(ctxt);
1913}
1914
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001915static int em_popa(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001916{
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001917 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001918 int reg = VCPU_REGS_RDI;
1919
1920 while (reg >= VCPU_REGS_RAX) {
1921 if (reg == VCPU_REGS_RSP) {
Avi Kivity5ad105e2012-08-19 14:34:31 +03001922 rsp_increment(ctxt, ctxt->op_bytes);
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001923 --reg;
1924 }
1925
Avi Kivitydd856ef2012-08-27 23:46:17 +03001926 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001927 if (rc != X86EMUL_CONTINUE)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001928 break;
1929 --reg;
1930 }
1931 return rc;
1932}
1933
Avi Kivitydd856ef2012-08-27 23:46:17 +03001934static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001935{
Mathias Krause0225fb52012-08-30 01:30:16 +02001936 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001937 int rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001938 struct desc_ptr dt;
1939 gva_t cs_addr;
1940 gva_t eip_addr;
1941 u16 cs, eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001942
1943 /* TODO: Add limit checks */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001944 ctxt->src.val = ctxt->eflags;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001945 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001946 if (rc != X86EMUL_CONTINUE)
1947 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001948
1949 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1950
Avi Kivity9dac77f2011-06-01 15:34:25 +03001951 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001952 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001953 if (rc != X86EMUL_CONTINUE)
1954 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001955
Avi Kivity9dac77f2011-06-01 15:34:25 +03001956 ctxt->src.val = ctxt->_eip;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001957 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001958 if (rc != X86EMUL_CONTINUE)
1959 return rc;
1960
Avi Kivity4bff1e862011-04-20 13:37:53 +03001961 ops->get_idt(ctxt, &dt);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001962
1963 eip_addr = dt.address + (irq << 2);
1964 cs_addr = dt.address + (irq << 2) + 2;
1965
Avi Kivity0f65dd72011-04-20 13:37:53 +03001966 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001967 if (rc != X86EMUL_CONTINUE)
1968 return rc;
1969
Avi Kivity0f65dd72011-04-20 13:37:53 +03001970 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001971 if (rc != X86EMUL_CONTINUE)
1972 return rc;
1973
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001974 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001975 if (rc != X86EMUL_CONTINUE)
1976 return rc;
1977
Avi Kivity9dac77f2011-06-01 15:34:25 +03001978 ctxt->_eip = eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001979
1980 return rc;
1981}
1982
Avi Kivitydd856ef2012-08-27 23:46:17 +03001983int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1984{
1985 int rc;
1986
1987 invalidate_registers(ctxt);
1988 rc = __emulate_int_real(ctxt, irq);
1989 if (rc == X86EMUL_CONTINUE)
1990 writeback_registers(ctxt);
1991 return rc;
1992}
1993
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001994static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001995{
1996 switch(ctxt->mode) {
1997 case X86EMUL_MODE_REAL:
Avi Kivitydd856ef2012-08-27 23:46:17 +03001998 return __emulate_int_real(ctxt, irq);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001999 case X86EMUL_MODE_VM86:
2000 case X86EMUL_MODE_PROT16:
2001 case X86EMUL_MODE_PROT32:
2002 case X86EMUL_MODE_PROT64:
2003 default:
2004 /* Protected mode interrupts unimplemented yet */
2005 return X86EMUL_UNHANDLEABLE;
2006 }
2007}
2008
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002009static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002010{
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002011 int rc = X86EMUL_CONTINUE;
2012 unsigned long temp_eip = 0;
2013 unsigned long temp_eflags = 0;
2014 unsigned long cs = 0;
2015 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
2016 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
2017 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
2018 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
2019
2020 /* TODO: Add stack limit check */
2021
Avi Kivity9dac77f2011-06-01 15:34:25 +03002022 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002023
2024 if (rc != X86EMUL_CONTINUE)
2025 return rc;
2026
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002027 if (temp_eip & ~0xffff)
2028 return emulate_gp(ctxt, 0);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002029
Avi Kivity9dac77f2011-06-01 15:34:25 +03002030 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002031
2032 if (rc != X86EMUL_CONTINUE)
2033 return rc;
2034
Avi Kivity9dac77f2011-06-01 15:34:25 +03002035 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002036
2037 if (rc != X86EMUL_CONTINUE)
2038 return rc;
2039
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002040 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002041
2042 if (rc != X86EMUL_CONTINUE)
2043 return rc;
2044
Avi Kivity9dac77f2011-06-01 15:34:25 +03002045 ctxt->_eip = temp_eip;
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002046
2047
Avi Kivity9dac77f2011-06-01 15:34:25 +03002048 if (ctxt->op_bytes == 4)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002049 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
Avi Kivity9dac77f2011-06-01 15:34:25 +03002050 else if (ctxt->op_bytes == 2) {
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002051 ctxt->eflags &= ~0xffff;
2052 ctxt->eflags |= temp_eflags;
2053 }
2054
2055 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2056 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
Nadav Amit801806d2015-01-26 09:32:23 +02002057 ctxt->ops->set_nmi_mask(ctxt, false);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002058
2059 return rc;
2060}
2061
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002062static int em_iret(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002063{
2064 switch(ctxt->mode) {
2065 case X86EMUL_MODE_REAL:
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002066 return emulate_iret_real(ctxt);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002067 case X86EMUL_MODE_VM86:
2068 case X86EMUL_MODE_PROT16:
2069 case X86EMUL_MODE_PROT32:
2070 case X86EMUL_MODE_PROT64:
2071 default:
2072 /* iret from protected mode unimplemented yet */
2073 return X86EMUL_UNHANDLEABLE;
2074 }
2075}
2076
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002077static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2078{
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002079 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002080 unsigned short sel, old_sel;
2081 struct desc_struct old_desc, new_desc;
2082 const struct x86_emulate_ops *ops = ctxt->ops;
2083 u8 cpl = ctxt->ops->cpl(ctxt);
2084
2085 /* Assignment of RIP may only fail in 64-bit mode */
2086 if (ctxt->mode == X86EMUL_MODE_PROT64)
2087 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2088 VCPU_SREG_CS);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002089
Avi Kivity9dac77f2011-06-01 15:34:25 +03002090 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002091
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002092 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2093 X86_TRANSFER_CALL_JMP,
Nadav Amitd1442d82014-09-18 22:39:39 +03002094 &new_desc);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002095 if (rc != X86EMUL_CONTINUE)
2096 return rc;
2097
Nadav Amitd50eaa12014-11-19 17:43:11 +02002098 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03002099 if (rc != X86EMUL_CONTINUE) {
Nadav Amit7e46ddd2014-10-28 00:03:43 +02002100 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
Nadav Amitd1442d82014-09-18 22:39:39 +03002101 /* assigning eip failed; restore the old cs */
2102 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2103 return rc;
2104 }
2105 return rc;
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002106}
2107
Nadav Amitf7784042014-09-18 22:39:41 +03002108static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002109{
Nadav Amitf7784042014-09-18 22:39:41 +03002110 return assign_eip_near(ctxt, ctxt->src.val);
2111}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002112
Nadav Amitf7784042014-09-18 22:39:41 +03002113static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2114{
2115 int rc;
2116 long int old_eip;
2117
2118 old_eip = ctxt->_eip;
2119 rc = assign_eip_near(ctxt, ctxt->src.val);
2120 if (rc != X86EMUL_CONTINUE)
2121 return rc;
2122 ctxt->src.val = old_eip;
2123 rc = em_push(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09002124 return rc;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002125}
2126
Takuya Yoshikawae0dac402011-12-06 18:07:27 +09002127static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002128{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002129 u64 old = ctxt->dst.orig_val64;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002130
Nadav Amitaaa05f22014-06-02 18:34:10 +03002131 if (ctxt->dst.bytes == 16)
2132 return X86EMUL_UNHANDLEABLE;
2133
Avi Kivitydd856ef2012-08-27 23:46:17 +03002134 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2135 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2136 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2137 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
Laurent Vivier05f086f2007-09-24 11:10:55 +02002138 ctxt->eflags &= ~EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002139 } else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03002140 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2141 (u32) reg_read(ctxt, VCPU_REGS_RBX);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002142
Laurent Vivier05f086f2007-09-24 11:10:55 +02002143 ctxt->eflags |= EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002144 }
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002145 return X86EMUL_CONTINUE;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002146}
2147
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002148static int em_ret(struct x86_emulate_ctxt *ctxt)
2149{
Nadav Amit234f3ce2014-09-18 22:39:38 +03002150 int rc;
2151 unsigned long eip;
2152
2153 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2154 if (rc != X86EMUL_CONTINUE)
2155 return rc;
2156
2157 return assign_eip_near(ctxt, eip);
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002158}
2159
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002160static int em_ret_far(struct x86_emulate_ctxt *ctxt)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002161{
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002162 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002163 unsigned long eip, cs;
2164 u16 old_cs;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002165 int cpl = ctxt->ops->cpl(ctxt);
Nadav Amitd1442d82014-09-18 22:39:39 +03002166 struct desc_struct old_desc, new_desc;
2167 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002168
Nadav Amitd1442d82014-09-18 22:39:39 +03002169 if (ctxt->mode == X86EMUL_MODE_PROT64)
2170 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2171 VCPU_SREG_CS);
2172
2173 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002174 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002175 return rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002176 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002177 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002178 return rc;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002179 /* Outer-privilege level return is not implemented */
2180 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2181 return X86EMUL_UNHANDLEABLE;
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002182 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2183 X86_TRANSFER_RET,
Nadav Amitd1442d82014-09-18 22:39:39 +03002184 &new_desc);
2185 if (rc != X86EMUL_CONTINUE)
2186 return rc;
Nadav Amitd50eaa12014-11-19 17:43:11 +02002187 rc = assign_eip_far(ctxt, eip, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03002188 if (rc != X86EMUL_CONTINUE) {
Nadav Amit7e46ddd2014-10-28 00:03:43 +02002189 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
Nadav Amitd1442d82014-09-18 22:39:39 +03002190 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2191 }
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002192 return rc;
2193}
2194
Bruce Rogers32611072013-09-09 09:40:20 -06002195static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2196{
2197 int rc;
2198
2199 rc = em_ret_far(ctxt);
2200 if (rc != X86EMUL_CONTINUE)
2201 return rc;
2202 rsp_increment(ctxt, ctxt->src.val);
2203 return X86EMUL_CONTINUE;
2204}
2205
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002206static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2207{
2208 /* Save real source value, then compare EAX against destination. */
Nadav Amit37c564f2014-06-02 18:34:07 +03002209 ctxt->dst.orig_val = ctxt->dst.val;
2210 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002211 ctxt->src.orig_val = ctxt->src.val;
Nadav Amit37c564f2014-06-02 18:34:07 +03002212 ctxt->src.val = ctxt->dst.orig_val;
Avi Kivity158de572013-01-19 19:51:57 +02002213 fastop(ctxt, em_cmp);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002214
2215 if (ctxt->eflags & EFLG_ZF) {
Nadav Amit2fcf5c82015-01-26 09:32:21 +02002216 /* Success: write back to memory; no update of EAX */
2217 ctxt->src.type = OP_NONE;
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002218 ctxt->dst.val = ctxt->src.orig_val;
2219 } else {
2220 /* Failure: write the value we saw to EAX. */
Nadav Amit2fcf5c82015-01-26 09:32:21 +02002221 ctxt->src.type = OP_REG;
2222 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2223 ctxt->src.val = ctxt->dst.orig_val;
2224 /* Create write-cycle to dest by writing the same value */
Nadav Amit37c564f2014-06-02 18:34:07 +03002225 ctxt->dst.val = ctxt->dst.orig_val;
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002226 }
2227 return X86EMUL_CONTINUE;
2228}
2229
Avi Kivityd4b43252011-09-13 10:45:50 +03002230static int em_lseg(struct x86_emulate_ctxt *ctxt)
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002231{
Avi Kivityd4b43252011-09-13 10:45:50 +03002232 int seg = ctxt->src2.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002233 unsigned short sel;
2234 int rc;
2235
Avi Kivity9dac77f2011-06-01 15:34:25 +03002236 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002237
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002238 rc = load_segment_descriptor(ctxt, sel, seg);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002239 if (rc != X86EMUL_CONTINUE)
2240 return rc;
2241
Avi Kivity9dac77f2011-06-01 15:34:25 +03002242 ctxt->dst.val = ctxt->src.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002243 return rc;
2244}
2245
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002246static void
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002247setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002248 struct desc_struct *cs, struct desc_struct *ss)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002249{
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002250 cs->l = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002251 set_desc_base(cs, 0); /* flat segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002252 cs->g = 1; /* 4kb granularity */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002253 set_desc_limit(cs, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002254 cs->type = 0x0b; /* Read, Execute, Accessed */
2255 cs->s = 1;
2256 cs->dpl = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002257 cs->p = 1;
2258 cs->d = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002259 cs->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002260
Gleb Natapov79168fd2010-04-28 19:15:30 +03002261 set_desc_base(ss, 0); /* flat segment */
2262 set_desc_limit(ss, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002263 ss->g = 1; /* 4kb granularity */
2264 ss->s = 1;
2265 ss->type = 0x03; /* Read/Write, Accessed */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002266 ss->d = 1; /* 32bit stack segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002267 ss->dpl = 0;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002268 ss->p = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002269 ss->l = 0;
2270 ss->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002271}
2272
Avi Kivity1a18a692012-02-01 12:23:21 +02002273static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2274{
2275 u32 eax, ebx, ecx, edx;
2276
2277 eax = ecx = 0;
Avi Kivity0017f932012-06-07 14:10:16 +03002278 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2279 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
Avi Kivity1a18a692012-02-01 12:23:21 +02002280 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2281 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2282}
2283
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002284static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2285{
Mathias Krause0225fb52012-08-30 01:30:16 +02002286 const struct x86_emulate_ops *ops = ctxt->ops;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002287 u32 eax, ebx, ecx, edx;
2288
2289 /*
2290 * syscall should always be enabled in longmode - so only become
2291 * vendor specific (cpuid) if other modes are active...
2292 */
2293 if (ctxt->mode == X86EMUL_MODE_PROT64)
2294 return true;
2295
2296 eax = 0x00000000;
2297 ecx = 0x00000000;
Avi Kivity0017f932012-06-07 14:10:16 +03002298 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2299 /*
2300 * Intel ("GenuineIntel")
2301 * remark: Intel CPUs only support "syscall" in 64bit
2302 * longmode. Also an 64bit guest with a
2303 * 32bit compat-app running will #UD !! While this
2304 * behaviour can be fixed (by emulating) into AMD
2305 * response - CPUs of AMD can't behave like Intel.
2306 */
2307 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2308 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2309 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2310 return false;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002311
Avi Kivity0017f932012-06-07 14:10:16 +03002312 /* AMD ("AuthenticAMD") */
2313 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2314 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2315 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2316 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002317
Avi Kivity0017f932012-06-07 14:10:16 +03002318 /* AMD ("AMDisbetter!") */
2319 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2320 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2321 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2322 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002323
2324 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2325 return false;
2326}
2327
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002328static int em_syscall(struct x86_emulate_ctxt *ctxt)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002329{
Mathias Krause0225fb52012-08-30 01:30:16 +02002330 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002331 struct desc_struct cs, ss;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002332 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002333 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002334 u64 efer = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002335
2336 /* syscall is not available in real mode */
Gleb Natapov2e901c42010-03-18 15:20:12 +02002337 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002338 ctxt->mode == X86EMUL_MODE_VM86)
2339 return emulate_ud(ctxt);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002340
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002341 if (!(em_syscall_is_enabled(ctxt)))
2342 return emulate_ud(ctxt);
2343
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002344 ops->get_msr(ctxt, MSR_EFER, &efer);
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002345 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002346
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002347 if (!(efer & EFER_SCE))
2348 return emulate_ud(ctxt);
2349
Avi Kivity717746e2011-04-20 13:37:53 +03002350 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002351 msr_data >>= 32;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002352 cs_sel = (u16)(msr_data & 0xfffc);
2353 ss_sel = (u16)(msr_data + 8);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002354
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002355 if (efer & EFER_LMA) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002356 cs.d = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002357 cs.l = 1;
2358 }
Avi Kivity1aa36612011-04-27 13:20:30 +03002359 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2360 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002361
Avi Kivitydd856ef2012-08-27 23:46:17 +03002362 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002363 if (efer & EFER_LMA) {
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002364#ifdef CONFIG_X86_64
Nadav Amit6c6cb692014-07-21 14:37:30 +03002365 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002366
Avi Kivity717746e2011-04-20 13:37:53 +03002367 ops->get_msr(ctxt,
Gleb Natapov3fb1b5d2010-04-28 19:15:28 +03002368 ctxt->mode == X86EMUL_MODE_PROT64 ?
2369 MSR_LSTAR : MSR_CSTAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002370 ctxt->_eip = msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002371
Avi Kivity717746e2011-04-20 13:37:53 +03002372 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
Nadav Amit6c6cb692014-07-21 14:37:30 +03002373 ctxt->eflags &= ~msr_data;
Nadav Amit807c1422014-11-02 11:54:49 +02002374 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002375#endif
2376 } else {
2377 /* legacy mode */
Avi Kivity717746e2011-04-20 13:37:53 +03002378 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002379 ctxt->_eip = (u32)msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002380
Nadav Amit6c6cb692014-07-21 14:37:30 +03002381 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002382 }
2383
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002384 return X86EMUL_CONTINUE;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002385}
2386
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002387static int em_sysenter(struct x86_emulate_ctxt *ctxt)
Andre Przywara8c604352009-06-18 12:56:01 +02002388{
Mathias Krause0225fb52012-08-30 01:30:16 +02002389 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002390 struct desc_struct cs, ss;
Andre Przywara8c604352009-06-18 12:56:01 +02002391 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002392 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002393 u64 efer = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002394
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002395 ops->get_msr(ctxt, MSR_EFER, &efer);
Gleb Natapova0044752010-02-10 14:21:31 +02002396 /* inject #GP if in real mode */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002397 if (ctxt->mode == X86EMUL_MODE_REAL)
2398 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002399
Avi Kivity1a18a692012-02-01 12:23:21 +02002400 /*
2401 * Not recognized on AMD in compat mode (but is recognized in legacy
2402 * mode).
2403 */
2404 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2405 && !vendor_intel(ctxt))
2406 return emulate_ud(ctxt);
2407
Nadav Amitb2c9d432014-11-02 11:55:01 +02002408 /* sysenter/sysexit have not been tested in 64bit mode. */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002409 if (ctxt->mode == X86EMUL_MODE_PROT64)
Nadav Amitb2c9d432014-11-02 11:55:01 +02002410 return X86EMUL_UNHANDLEABLE;
Andre Przywara8c604352009-06-18 12:56:01 +02002411
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002412 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara8c604352009-06-18 12:56:01 +02002413
Avi Kivity717746e2011-04-20 13:37:53 +03002414 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara8c604352009-06-18 12:56:01 +02002415 switch (ctxt->mode) {
2416 case X86EMUL_MODE_PROT32:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002417 if ((msr_data & 0xfffc) == 0x0)
2418 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002419 break;
2420 case X86EMUL_MODE_PROT64:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002421 if (msr_data == 0x0)
2422 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002423 break;
Gleb Natapov9d1b39a2012-09-03 15:24:27 +03002424 default:
2425 break;
Andre Przywara8c604352009-06-18 12:56:01 +02002426 }
2427
Nadav Amit6c6cb692014-07-21 14:37:30 +03002428 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002429 cs_sel = (u16)msr_data;
2430 cs_sel &= ~SELECTOR_RPL_MASK;
2431 ss_sel = cs_sel + 8;
2432 ss_sel &= ~SELECTOR_RPL_MASK;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002433 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002434 cs.d = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002435 cs.l = 1;
2436 }
2437
Avi Kivity1aa36612011-04-27 13:20:30 +03002438 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2439 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara8c604352009-06-18 12:56:01 +02002440
Avi Kivity717746e2011-04-20 13:37:53 +03002441 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002442 ctxt->_eip = msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002443
Avi Kivity717746e2011-04-20 13:37:53 +03002444 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
Avi Kivitydd856ef2012-08-27 23:46:17 +03002445 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002446
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002447 return X86EMUL_CONTINUE;
Andre Przywara8c604352009-06-18 12:56:01 +02002448}
2449
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002450static int em_sysexit(struct x86_emulate_ctxt *ctxt)
Andre Przywara4668f052009-06-18 12:56:02 +02002451{
Mathias Krause0225fb52012-08-30 01:30:16 +02002452 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002453 struct desc_struct cs, ss;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002454 u64 msr_data, rcx, rdx;
Andre Przywara4668f052009-06-18 12:56:02 +02002455 int usermode;
Xiao Guangrong1249b962011-05-15 23:25:10 +08002456 u16 cs_sel = 0, ss_sel = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002457
Gleb Natapova0044752010-02-10 14:21:31 +02002458 /* inject #GP if in real mode or Virtual 8086 mode */
2459 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002460 ctxt->mode == X86EMUL_MODE_VM86)
2461 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002462
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002463 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara4668f052009-06-18 12:56:02 +02002464
Avi Kivity9dac77f2011-06-01 15:34:25 +03002465 if ((ctxt->rex_prefix & 0x8) != 0x0)
Andre Przywara4668f052009-06-18 12:56:02 +02002466 usermode = X86EMUL_MODE_PROT64;
2467 else
2468 usermode = X86EMUL_MODE_PROT32;
2469
Nadav Amit234f3ce2014-09-18 22:39:38 +03002470 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2471 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2472
Andre Przywara4668f052009-06-18 12:56:02 +02002473 cs.dpl = 3;
2474 ss.dpl = 3;
Avi Kivity717746e2011-04-20 13:37:53 +03002475 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara4668f052009-06-18 12:56:02 +02002476 switch (usermode) {
2477 case X86EMUL_MODE_PROT32:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002478 cs_sel = (u16)(msr_data + 16);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002479 if ((msr_data & 0xfffc) == 0x0)
2480 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002481 ss_sel = (u16)(msr_data + 24);
Nadav Amitbf0b6822014-09-18 22:39:45 +03002482 rcx = (u32)rcx;
2483 rdx = (u32)rdx;
Andre Przywara4668f052009-06-18 12:56:02 +02002484 break;
2485 case X86EMUL_MODE_PROT64:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002486 cs_sel = (u16)(msr_data + 32);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002487 if (msr_data == 0x0)
2488 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002489 ss_sel = cs_sel + 8;
2490 cs.d = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002491 cs.l = 1;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002492 if (is_noncanonical_address(rcx) ||
2493 is_noncanonical_address(rdx))
2494 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002495 break;
2496 }
Gleb Natapov79168fd2010-04-28 19:15:30 +03002497 cs_sel |= SELECTOR_RPL_MASK;
2498 ss_sel |= SELECTOR_RPL_MASK;
Andre Przywara4668f052009-06-18 12:56:02 +02002499
Avi Kivity1aa36612011-04-27 13:20:30 +03002500 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2501 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara4668f052009-06-18 12:56:02 +02002502
Nadav Amit234f3ce2014-09-18 22:39:38 +03002503 ctxt->_eip = rdx;
2504 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
Andre Przywara4668f052009-06-18 12:56:02 +02002505
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002506 return X86EMUL_CONTINUE;
Andre Przywara4668f052009-06-18 12:56:02 +02002507}
2508
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002509static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002510{
2511 int iopl;
2512 if (ctxt->mode == X86EMUL_MODE_REAL)
2513 return false;
2514 if (ctxt->mode == X86EMUL_MODE_VM86)
2515 return true;
2516 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002517 return ctxt->ops->cpl(ctxt) > iopl;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002518}
2519
2520static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002521 u16 port, u16 len)
2522{
Mathias Krause0225fb52012-08-30 01:30:16 +02002523 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002524 struct desc_struct tr_seg;
Gleb Natapov5601d052011-03-07 14:55:06 +02002525 u32 base3;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002526 int r;
Avi Kivity1aa36612011-04-27 13:20:30 +03002527 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002528 unsigned mask = (1 << len) - 1;
Gleb Natapov5601d052011-03-07 14:55:06 +02002529 unsigned long base;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002530
Avi Kivity1aa36612011-04-27 13:20:30 +03002531 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002532 if (!tr_seg.p)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002533 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002534 if (desc_limit_scaled(&tr_seg) < 103)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002535 return false;
Gleb Natapov5601d052011-03-07 14:55:06 +02002536 base = get_desc_base(&tr_seg);
2537#ifdef CONFIG_X86_64
2538 base |= ((u64)base3) << 32;
2539#endif
Avi Kivity0f65dd72011-04-20 13:37:53 +03002540 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002541 if (r != X86EMUL_CONTINUE)
2542 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002543 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002544 return false;
Avi Kivity0f65dd72011-04-20 13:37:53 +03002545 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002546 if (r != X86EMUL_CONTINUE)
2547 return false;
2548 if ((perm >> bit_idx) & mask)
2549 return false;
2550 return true;
2551}
2552
2553static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002554 u16 port, u16 len)
2555{
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002556 if (ctxt->perm_ok)
2557 return true;
2558
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002559 if (emulator_bad_iopl(ctxt))
2560 if (!emulator_io_port_access_allowed(ctxt, port, len))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002561 return false;
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002562
2563 ctxt->perm_ok = true;
2564
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002565 return true;
2566}
2567
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002568static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002569 struct tss_segment_16 *tss)
2570{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002571 tss->ip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002572 tss->flag = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002573 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2574 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2575 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2576 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2577 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2578 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2579 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2580 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002581
Avi Kivity1aa36612011-04-27 13:20:30 +03002582 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2583 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2584 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2585 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2586 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002587}
2588
2589static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002590 struct tss_segment_16 *tss)
2591{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002592 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002593 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002594
Avi Kivity9dac77f2011-06-01 15:34:25 +03002595 ctxt->_eip = tss->ip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002596 ctxt->eflags = tss->flag | 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002597 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2598 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2599 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2600 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2601 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2602 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2603 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2604 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002605
2606 /*
2607 * SDM says that segment selectors are loaded before segment
2608 * descriptors
2609 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002610 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2611 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2612 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2613 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2614 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002615
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002616 cpl = tss->cs & 3;
2617
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002618 /*
Guo Chaofc058682012-06-28 15:19:51 +08002619 * Now load segment descriptors. If fault happens at this stage
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002620 * it is handled in a context of new task
2621 */
Nadav Amitd1442d82014-09-18 22:39:39 +03002622 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002623 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002624 if (ret != X86EMUL_CONTINUE)
2625 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002626 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002627 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002628 if (ret != X86EMUL_CONTINUE)
2629 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002630 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002631 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002632 if (ret != X86EMUL_CONTINUE)
2633 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002634 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002635 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002636 if (ret != X86EMUL_CONTINUE)
2637 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002638 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002639 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002640 if (ret != X86EMUL_CONTINUE)
2641 return ret;
2642
2643 return X86EMUL_CONTINUE;
2644}
2645
2646static int task_switch_16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002647 u16 tss_selector, u16 old_tss_sel,
2648 ulong old_tss_base, struct desc_struct *new_desc)
2649{
Mathias Krause0225fb52012-08-30 01:30:16 +02002650 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002651 struct tss_segment_16 tss_seg;
2652 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002653 u32 new_tss_base = get_desc_base(new_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002654
Avi Kivity0f65dd72011-04-20 13:37:53 +03002655 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002656 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002657 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002658 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002659
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002660 save_state_to_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002661
Avi Kivity0f65dd72011-04-20 13:37:53 +03002662 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002663 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002664 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002665 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002666
Avi Kivity0f65dd72011-04-20 13:37:53 +03002667 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002668 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002669 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002670 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002671
2672 if (old_tss_sel != 0xffff) {
2673 tss_seg.prev_task_link = old_tss_sel;
2674
Avi Kivity0f65dd72011-04-20 13:37:53 +03002675 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002676 &tss_seg.prev_task_link,
2677 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002678 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002679 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002680 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002681 }
2682
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002683 return load_state_from_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002684}
2685
2686static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002687 struct tss_segment_32 *tss)
2688{
Nadav Amit5c7411e2014-04-07 18:37:47 +03002689 /* CR3 and ldt selector are not saved intentionally */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002690 tss->eip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002691 tss->eflags = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002692 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2693 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2694 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2695 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2696 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2697 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2698 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2699 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002700
Avi Kivity1aa36612011-04-27 13:20:30 +03002701 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2702 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2703 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2704 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2705 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2706 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002707}
2708
2709static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002710 struct tss_segment_32 *tss)
2711{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002712 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002713 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002714
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002715 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002716 return emulate_gp(ctxt, 0);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002717 ctxt->_eip = tss->eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002718 ctxt->eflags = tss->eflags | 2;
Kevin Wolf4cee4792012-02-08 14:34:41 +01002719
2720 /* General purpose registers */
Avi Kivitydd856ef2012-08-27 23:46:17 +03002721 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2722 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2723 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2724 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2725 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2726 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2727 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2728 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002729
2730 /*
2731 * SDM says that segment selectors are loaded before segment
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002732 * descriptors. This is important because CPL checks will
2733 * use CS.RPL.
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002734 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002735 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2736 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2737 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2738 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2739 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2740 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2741 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002742
2743 /*
Kevin Wolf4cee4792012-02-08 14:34:41 +01002744 * If we're switching between Protected Mode and VM86, we need to make
2745 * sure to update the mode before loading the segment descriptors so
2746 * that the selectors are interpreted correctly.
Kevin Wolf4cee4792012-02-08 14:34:41 +01002747 */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002748 if (ctxt->eflags & X86_EFLAGS_VM) {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002749 ctxt->mode = X86EMUL_MODE_VM86;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002750 cpl = 3;
2751 } else {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002752 ctxt->mode = X86EMUL_MODE_PROT32;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002753 cpl = tss->cs & 3;
2754 }
Kevin Wolf4cee4792012-02-08 14:34:41 +01002755
2756 /*
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002757 * Now load segment descriptors. If fault happenes at this stage
2758 * it is handled in a context of new task
2759 */
Nadav Amitd1442d82014-09-18 22:39:39 +03002760 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002761 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002762 if (ret != X86EMUL_CONTINUE)
2763 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002764 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002765 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002766 if (ret != X86EMUL_CONTINUE)
2767 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002768 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002769 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002770 if (ret != X86EMUL_CONTINUE)
2771 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002772 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002773 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002774 if (ret != X86EMUL_CONTINUE)
2775 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002776 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002777 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002778 if (ret != X86EMUL_CONTINUE)
2779 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002780 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002781 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002782 if (ret != X86EMUL_CONTINUE)
2783 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002784 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002785 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002786 if (ret != X86EMUL_CONTINUE)
2787 return ret;
2788
2789 return X86EMUL_CONTINUE;
2790}
2791
2792static int task_switch_32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002793 u16 tss_selector, u16 old_tss_sel,
2794 ulong old_tss_base, struct desc_struct *new_desc)
2795{
Mathias Krause0225fb52012-08-30 01:30:16 +02002796 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002797 struct tss_segment_32 tss_seg;
2798 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002799 u32 new_tss_base = get_desc_base(new_desc);
Nadav Amit5c7411e2014-04-07 18:37:47 +03002800 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2801 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002802
Avi Kivity0f65dd72011-04-20 13:37:53 +03002803 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002804 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002805 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002806 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002807
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002808 save_state_to_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002809
Nadav Amit5c7411e2014-04-07 18:37:47 +03002810 /* Only GP registers and segment selectors are saved */
2811 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2812 ldt_sel_offset - eip_offset, &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002813 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002814 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002815
Avi Kivity0f65dd72011-04-20 13:37:53 +03002816 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002817 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002818 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002819 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002820
2821 if (old_tss_sel != 0xffff) {
2822 tss_seg.prev_task_link = old_tss_sel;
2823
Avi Kivity0f65dd72011-04-20 13:37:53 +03002824 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002825 &tss_seg.prev_task_link,
2826 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002827 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002828 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002829 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002830 }
2831
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002832 return load_state_from_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002833}
2834
2835static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002836 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002837 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002838{
Mathias Krause0225fb52012-08-30 01:30:16 +02002839 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002840 struct desc_struct curr_tss_desc, next_tss_desc;
2841 int ret;
Avi Kivity1aa36612011-04-27 13:20:30 +03002842 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002843 ulong old_tss_base =
Avi Kivity4bff1e862011-04-20 13:37:53 +03002844 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
Gleb Natapovceffb452010-03-18 15:20:19 +02002845 u32 desc_limit;
Avi Kivitye9194642012-06-13 16:29:39 +03002846 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002847
2848 /* FIXME: old_tss_base == ~0 ? */
2849
Avi Kivitye9194642012-06-13 16:29:39 +03002850 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002851 if (ret != X86EMUL_CONTINUE)
2852 return ret;
Avi Kivitye9194642012-06-13 16:29:39 +03002853 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002854 if (ret != X86EMUL_CONTINUE)
2855 return ret;
2856
2857 /* FIXME: check that next_tss_desc is tss */
2858
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002859 /*
2860 * Check privileges. The three cases are task switch caused by...
2861 *
2862 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2863 * 2. Exception/IRQ/iret: No check is performed
Nadav Amit2c2ca2d2014-11-02 11:54:57 +02002864 * 3. jmp/call to TSS/task-gate: No check is performed since the
2865 * hardware checks it before exiting.
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002866 */
2867 if (reason == TASK_SWITCH_GATE) {
2868 if (idt_index != -1) {
2869 /* Software interrupts */
2870 struct desc_struct task_gate_desc;
2871 int dpl;
2872
2873 ret = read_interrupt_descriptor(ctxt, idt_index,
2874 &task_gate_desc);
2875 if (ret != X86EMUL_CONTINUE)
2876 return ret;
2877
2878 dpl = task_gate_desc.dpl;
2879 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2880 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2881 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002882 }
2883
Gleb Natapovceffb452010-03-18 15:20:19 +02002884 desc_limit = desc_limit_scaled(&next_tss_desc);
2885 if (!next_tss_desc.p ||
2886 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2887 desc_limit < 0x2b)) {
Paolo Bonzini592f0852014-08-20 10:05:08 +02002888 return emulate_ts(ctxt, tss_selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002889 }
2890
2891 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2892 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002893 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002894 }
2895
2896 if (reason == TASK_SWITCH_IRET)
2897 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2898
2899 /* set back link to prev task only if NT bit is set in eflags
Guo Chaofc058682012-06-28 15:19:51 +08002900 note that old_tss_sel is not used after this point */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002901 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2902 old_tss_sel = 0xffff;
2903
2904 if (next_tss_desc.type & 8)
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002905 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002906 old_tss_base, &next_tss_desc);
2907 else
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002908 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002909 old_tss_base, &next_tss_desc);
Jan Kiszka0760d442010-04-14 15:50:57 +02002910 if (ret != X86EMUL_CONTINUE)
2911 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002912
2913 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2914 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2915
2916 if (reason != TASK_SWITCH_IRET) {
2917 next_tss_desc.type |= (1 << 1); /* set busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002918 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002919 }
2920
Avi Kivity717746e2011-04-20 13:37:53 +03002921 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
Avi Kivity1aa36612011-04-27 13:20:30 +03002922 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002923
Jan Kiszkae269fb22010-04-14 15:51:09 +02002924 if (has_error_code) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002925 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2926 ctxt->lock_prefix = 0;
2927 ctxt->src.val = (unsigned long) error_code;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09002928 ret = em_push(ctxt);
Jan Kiszkae269fb22010-04-14 15:51:09 +02002929 }
2930
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002931 return ret;
2932}
2933
2934int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002935 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002936 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002937{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002938 int rc;
2939
Avi Kivitydd856ef2012-08-27 23:46:17 +03002940 invalidate_registers(ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002941 ctxt->_eip = ctxt->eip;
2942 ctxt->dst.type = OP_NONE;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002943
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002944 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002945 has_error_code, error_code);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002946
Avi Kivitydd856ef2012-08-27 23:46:17 +03002947 if (rc == X86EMUL_CONTINUE) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002948 ctxt->eip = ctxt->_eip;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002949 writeback_registers(ctxt);
2950 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002951
Gleb Natapova0c0ab22011-03-28 16:57:49 +02002952 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002953}
2954
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03002955static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2956 struct operand *op)
Gleb Natapova682e352010-03-18 15:20:21 +02002957{
Gleb Natapovb3356bf2012-09-03 15:24:29 +03002958 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
Gleb Natapova682e352010-03-18 15:20:21 +02002959
Paolo Bonzini01485a22014-11-19 18:25:08 +01002960 register_address_increment(ctxt, reg, df * op->bytes);
2961 op->addr.mem.ea = register_address(ctxt, reg);
Gleb Natapova682e352010-03-18 15:20:21 +02002962}
2963
Avi Kivity7af04fc2010-08-18 14:16:35 +03002964static int em_das(struct x86_emulate_ctxt *ctxt)
2965{
Avi Kivity7af04fc2010-08-18 14:16:35 +03002966 u8 al, old_al;
2967 bool af, cf, old_cf;
2968
2969 cf = ctxt->eflags & X86_EFLAGS_CF;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002970 al = ctxt->dst.val;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002971
2972 old_al = al;
2973 old_cf = cf;
2974 cf = false;
2975 af = ctxt->eflags & X86_EFLAGS_AF;
2976 if ((al & 0x0f) > 9 || af) {
2977 al -= 6;
2978 cf = old_cf | (al >= 250);
2979 af = true;
2980 } else {
2981 af = false;
2982 }
2983 if (old_al > 0x99 || old_cf) {
2984 al -= 0x60;
2985 cf = true;
2986 }
2987
Avi Kivity9dac77f2011-06-01 15:34:25 +03002988 ctxt->dst.val = al;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002989 /* Set PF, ZF, SF */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002990 ctxt->src.type = OP_IMM;
2991 ctxt->src.val = 0;
2992 ctxt->src.bytes = 1;
Avi Kivity158de572013-01-19 19:51:57 +02002993 fastop(ctxt, em_or);
Avi Kivity7af04fc2010-08-18 14:16:35 +03002994 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2995 if (cf)
2996 ctxt->eflags |= X86_EFLAGS_CF;
2997 if (af)
2998 ctxt->eflags |= X86_EFLAGS_AF;
2999 return X86EMUL_CONTINUE;
3000}
3001
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02003002static int em_aam(struct x86_emulate_ctxt *ctxt)
3003{
3004 u8 al, ah;
3005
3006 if (ctxt->src.val == 0)
3007 return emulate_de(ctxt);
3008
3009 al = ctxt->dst.val & 0xff;
3010 ah = al / ctxt->src.val;
3011 al %= ctxt->src.val;
3012
3013 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3014
3015 /* Set PF, ZF, SF */
3016 ctxt->src.type = OP_IMM;
3017 ctxt->src.val = 0;
3018 ctxt->src.bytes = 1;
3019 fastop(ctxt, em_or);
3020
3021 return X86EMUL_CONTINUE;
3022}
3023
Gleb Natapov7f662272012-12-10 11:42:30 +02003024static int em_aad(struct x86_emulate_ctxt *ctxt)
3025{
3026 u8 al = ctxt->dst.val & 0xff;
3027 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3028
3029 al = (al + (ah * ctxt->src.val)) & 0xff;
3030
3031 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3032
Gleb Natapovf583c292013-02-13 17:50:39 +02003033 /* Set PF, ZF, SF */
3034 ctxt->src.type = OP_IMM;
3035 ctxt->src.val = 0;
3036 ctxt->src.bytes = 1;
3037 fastop(ctxt, em_or);
Gleb Natapov7f662272012-12-10 11:42:30 +02003038
3039 return X86EMUL_CONTINUE;
3040}
3041
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003042static int em_call(struct x86_emulate_ctxt *ctxt)
3043{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003044 int rc;
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003045 long rel = ctxt->src.val;
3046
3047 ctxt->src.val = (unsigned long)ctxt->_eip;
Nadav Amit234f3ce2014-09-18 22:39:38 +03003048 rc = jmp_rel(ctxt, rel);
3049 if (rc != X86EMUL_CONTINUE)
3050 return rc;
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003051 return em_push(ctxt);
3052}
3053
Avi Kivity0ef753b2010-08-18 14:51:45 +03003054static int em_call_far(struct x86_emulate_ctxt *ctxt)
3055{
Avi Kivity0ef753b2010-08-18 14:51:45 +03003056 u16 sel, old_cs;
3057 ulong old_eip;
3058 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03003059 struct desc_struct old_desc, new_desc;
3060 const struct x86_emulate_ops *ops = ctxt->ops;
3061 int cpl = ctxt->ops->cpl(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003062
Avi Kivity9dac77f2011-06-01 15:34:25 +03003063 old_eip = ctxt->_eip;
Nadav Amitd1442d82014-09-18 22:39:39 +03003064 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003065
Avi Kivity9dac77f2011-06-01 15:34:25 +03003066 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Nadav Amit3dc4bc42014-12-25 02:52:19 +02003067 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3068 X86_TRANSFER_CALL_JMP, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03003069 if (rc != X86EMUL_CONTINUE)
Nadav Amit80976db2014-12-25 02:52:20 +02003070 return rc;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003071
Nadav Amitd50eaa12014-11-19 17:43:11 +02003072 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03003073 if (rc != X86EMUL_CONTINUE)
3074 goto fail;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003075
Avi Kivity9dac77f2011-06-01 15:34:25 +03003076 ctxt->src.val = old_cs;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09003077 rc = em_push(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003078 if (rc != X86EMUL_CONTINUE)
Nadav Amitd1442d82014-09-18 22:39:39 +03003079 goto fail;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003080
Avi Kivity9dac77f2011-06-01 15:34:25 +03003081 ctxt->src.val = old_eip;
Nadav Amitd1442d82014-09-18 22:39:39 +03003082 rc = em_push(ctxt);
3083 /* If we failed, we tainted the memory, but the very least we should
3084 restore cs */
3085 if (rc != X86EMUL_CONTINUE)
3086 goto fail;
3087 return rc;
3088fail:
3089 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3090 return rc;
3091
Avi Kivity0ef753b2010-08-18 14:51:45 +03003092}
3093
Avi Kivity40ece7c2010-08-18 15:12:09 +03003094static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3095{
Avi Kivity40ece7c2010-08-18 15:12:09 +03003096 int rc;
Nadav Amit234f3ce2014-09-18 22:39:38 +03003097 unsigned long eip;
Avi Kivity40ece7c2010-08-18 15:12:09 +03003098
Nadav Amit234f3ce2014-09-18 22:39:38 +03003099 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3100 if (rc != X86EMUL_CONTINUE)
3101 return rc;
3102 rc = assign_eip_near(ctxt, eip);
Avi Kivity40ece7c2010-08-18 15:12:09 +03003103 if (rc != X86EMUL_CONTINUE)
3104 return rc;
Avi Kivity5ad105e2012-08-19 14:34:31 +03003105 rsp_increment(ctxt, ctxt->src.val);
Avi Kivity40ece7c2010-08-18 15:12:09 +03003106 return X86EMUL_CONTINUE;
3107}
3108
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003109static int em_xchg(struct x86_emulate_ctxt *ctxt)
3110{
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003111 /* Write back the register source. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003112 ctxt->src.val = ctxt->dst.val;
3113 write_register_operand(&ctxt->src);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003114
3115 /* Write back the memory destination with implicit LOCK prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003116 ctxt->dst.val = ctxt->src.orig_val;
3117 ctxt->lock_prefix = 1;
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003118 return X86EMUL_CONTINUE;
3119}
3120
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003121static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3122{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003123 ctxt->dst.val = ctxt->src2.val;
Avi Kivity4d758342013-01-19 19:51:55 +02003124 return fastop(ctxt, em_imul);
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003125}
3126
Avi Kivity61429142010-08-19 15:13:00 +03003127static int em_cwd(struct x86_emulate_ctxt *ctxt)
3128{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003129 ctxt->dst.type = OP_REG;
3130 ctxt->dst.bytes = ctxt->src.bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03003131 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivity9dac77f2011-06-01 15:34:25 +03003132 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
Avi Kivity61429142010-08-19 15:13:00 +03003133
3134 return X86EMUL_CONTINUE;
3135}
3136
Avi Kivity48bb5d32010-08-18 18:54:34 +03003137static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3138{
Avi Kivity48bb5d32010-08-18 18:54:34 +03003139 u64 tsc = 0;
3140
Avi Kivity717746e2011-04-20 13:37:53 +03003141 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003142 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3143 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
Avi Kivity48bb5d32010-08-18 18:54:34 +03003144 return X86EMUL_CONTINUE;
3145}
3146
Avi Kivity222d21a2011-11-10 14:57:30 +02003147static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3148{
3149 u64 pmc;
3150
Avi Kivitydd856ef2012-08-27 23:46:17 +03003151 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
Avi Kivity222d21a2011-11-10 14:57:30 +02003152 return emulate_gp(ctxt, 0);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003153 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3154 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
Avi Kivity222d21a2011-11-10 14:57:30 +02003155 return X86EMUL_CONTINUE;
3156}
3157
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003158static int em_mov(struct x86_emulate_ctxt *ctxt)
3159{
Paolo Bonzini54cfdb32014-03-27 11:36:25 +01003160 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003161 return X86EMUL_CONTINUE;
3162}
3163
Borislav Petkov84cffe42013-10-29 12:54:56 +01003164#define FFL(x) bit(X86_FEATURE_##x)
3165
3166static int em_movbe(struct x86_emulate_ctxt *ctxt)
3167{
3168 u32 ebx, ecx, edx, eax = 1;
3169 u16 tmp;
3170
3171 /*
3172 * Check MOVBE is set in the guest-visible CPUID leaf.
3173 */
3174 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3175 if (!(ecx & FFL(MOVBE)))
3176 return emulate_ud(ctxt);
3177
3178 switch (ctxt->op_bytes) {
3179 case 2:
3180 /*
3181 * From MOVBE definition: "...When the operand size is 16 bits,
3182 * the upper word of the destination register remains unchanged
3183 * ..."
3184 *
3185 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3186 * rules so we have to do the operation almost per hand.
3187 */
3188 tmp = (u16)ctxt->src.val;
3189 ctxt->dst.val &= ~0xffffUL;
3190 ctxt->dst.val |= (unsigned long)swab16(tmp);
3191 break;
3192 case 4:
3193 ctxt->dst.val = swab32((u32)ctxt->src.val);
3194 break;
3195 case 8:
3196 ctxt->dst.val = swab64(ctxt->src.val);
3197 break;
3198 default:
Paolo Bonzini592f0852014-08-20 10:05:08 +02003199 BUG();
Borislav Petkov84cffe42013-10-29 12:54:56 +01003200 }
3201 return X86EMUL_CONTINUE;
3202}
3203
Takuya Yoshikawabc00f8d2011-11-22 15:19:19 +09003204static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3205{
3206 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3207 return emulate_gp(ctxt, 0);
3208
3209 /* Disable writeback. */
3210 ctxt->dst.type = OP_NONE;
3211 return X86EMUL_CONTINUE;
3212}
3213
3214static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3215{
3216 unsigned long val;
3217
3218 if (ctxt->mode == X86EMUL_MODE_PROT64)
3219 val = ctxt->src.val & ~0ULL;
3220 else
3221 val = ctxt->src.val & ~0U;
3222
3223 /* #UD condition is already handled. */
3224 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3225 return emulate_gp(ctxt, 0);
3226
3227 /* Disable writeback. */
3228 ctxt->dst.type = OP_NONE;
3229 return X86EMUL_CONTINUE;
3230}
3231
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003232static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3233{
3234 u64 msr_data;
3235
Avi Kivitydd856ef2012-08-27 23:46:17 +03003236 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3237 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3238 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003239 return emulate_gp(ctxt, 0);
3240
3241 return X86EMUL_CONTINUE;
3242}
3243
3244static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3245{
3246 u64 msr_data;
3247
Avi Kivitydd856ef2012-08-27 23:46:17 +03003248 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003249 return emulate_gp(ctxt, 0);
3250
Avi Kivitydd856ef2012-08-27 23:46:17 +03003251 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3252 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003253 return X86EMUL_CONTINUE;
3254}
3255
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003256static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3257{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003258 if (ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003259 return emulate_ud(ctxt);
3260
Avi Kivity9dac77f2011-06-01 15:34:25 +03003261 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
Nadav Amitb5bbf102014-11-02 11:54:46 +02003262 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3263 ctxt->dst.bytes = 2;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003264 return X86EMUL_CONTINUE;
3265}
3266
3267static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3268{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003269 u16 sel = ctxt->src.val;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003270
Avi Kivity9dac77f2011-06-01 15:34:25 +03003271 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003272 return emulate_ud(ctxt);
3273
Avi Kivity9dac77f2011-06-01 15:34:25 +03003274 if (ctxt->modrm_reg == VCPU_SREG_SS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003275 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3276
3277 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003278 ctxt->dst.type = OP_NONE;
3279 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003280}
3281
Avi Kivitya14e5792012-06-13 12:28:33 +03003282static int em_lldt(struct x86_emulate_ctxt *ctxt)
3283{
3284 u16 sel = ctxt->src.val;
3285
3286 /* Disable writeback. */
3287 ctxt->dst.type = OP_NONE;
3288 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3289}
3290
Avi Kivity80890002012-06-13 16:33:29 +03003291static int em_ltr(struct x86_emulate_ctxt *ctxt)
3292{
3293 u16 sel = ctxt->src.val;
3294
3295 /* Disable writeback. */
3296 ctxt->dst.type = OP_NONE;
3297 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3298}
3299
Avi Kivity38503912011-03-31 18:48:09 +02003300static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3301{
Avi Kivity9fa088f2011-03-31 18:54:30 +02003302 int rc;
3303 ulong linear;
3304
Avi Kivity9dac77f2011-06-01 15:34:25 +03003305 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02003306 if (rc == X86EMUL_CONTINUE)
Avi Kivity3cb16fe2011-04-20 15:38:44 +03003307 ctxt->ops->invlpg(ctxt, linear);
Avi Kivity38503912011-03-31 18:48:09 +02003308 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003309 ctxt->dst.type = OP_NONE;
Avi Kivity38503912011-03-31 18:48:09 +02003310 return X86EMUL_CONTINUE;
3311}
3312
Avi Kivity2d04a052011-04-20 15:32:49 +03003313static int em_clts(struct x86_emulate_ctxt *ctxt)
3314{
3315 ulong cr0;
3316
3317 cr0 = ctxt->ops->get_cr(ctxt, 0);
3318 cr0 &= ~X86_CR0_TS;
3319 ctxt->ops->set_cr(ctxt, 0, cr0);
3320 return X86EMUL_CONTINUE;
3321}
3322
Avi Kivity26d05cc2011-04-21 12:07:59 +03003323static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3324{
Nadav Amit0f54a322014-08-29 11:26:55 +03003325 int rc = ctxt->ops->fix_hypercall(ctxt);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003326
Avi Kivity26d05cc2011-04-21 12:07:59 +03003327 if (rc != X86EMUL_CONTINUE)
3328 return rc;
3329
3330 /* Let the processor re-execute the fixed hypercall */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003331 ctxt->_eip = ctxt->eip;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003332 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003333 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003334 return X86EMUL_CONTINUE;
3335}
3336
Avi Kivity96051572012-06-10 17:21:18 +03003337static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3338 void (*get)(struct x86_emulate_ctxt *ctxt,
3339 struct desc_ptr *ptr))
3340{
3341 struct desc_ptr desc_ptr;
3342
3343 if (ctxt->mode == X86EMUL_MODE_PROT64)
3344 ctxt->op_bytes = 8;
3345 get(ctxt, &desc_ptr);
3346 if (ctxt->op_bytes == 2) {
3347 ctxt->op_bytes = 4;
3348 desc_ptr.address &= 0x00ffffff;
3349 }
3350 /* Disable writeback. */
3351 ctxt->dst.type = OP_NONE;
3352 return segmented_write(ctxt, ctxt->dst.addr.mem,
3353 &desc_ptr, 2 + ctxt->op_bytes);
3354}
3355
3356static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3357{
3358 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3359}
3360
3361static int em_sidt(struct x86_emulate_ctxt *ctxt)
3362{
3363 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3364}
3365
Nadav Amit5b7f6a1e2014-11-02 11:54:55 +02003366static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003367{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003368 struct desc_ptr desc_ptr;
3369 int rc;
3370
Avi Kivity510425f2012-06-07 17:04:36 +03003371 if (ctxt->mode == X86EMUL_MODE_PROT64)
3372 ctxt->op_bytes = 8;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003373 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
Avi Kivity26d05cc2011-04-21 12:07:59 +03003374 &desc_ptr.size, &desc_ptr.address,
Avi Kivity9dac77f2011-06-01 15:34:25 +03003375 ctxt->op_bytes);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003376 if (rc != X86EMUL_CONTINUE)
3377 return rc;
Nadav Amit9a9abf62014-11-02 11:54:56 +02003378 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3379 is_noncanonical_address(desc_ptr.address))
3380 return emulate_gp(ctxt, 0);
Nadav Amit5b7f6a1e2014-11-02 11:54:55 +02003381 if (lgdt)
3382 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3383 else
3384 ctxt->ops->set_idt(ctxt, &desc_ptr);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003385 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003386 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003387 return X86EMUL_CONTINUE;
3388}
3389
Nadav Amit5b7f6a1e2014-11-02 11:54:55 +02003390static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3391{
3392 return em_lgdt_lidt(ctxt, true);
3393}
3394
Avi Kivity5ef39c72011-04-21 12:21:50 +03003395static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003396{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003397 int rc;
3398
Avi Kivity5ef39c72011-04-21 12:21:50 +03003399 rc = ctxt->ops->fix_hypercall(ctxt);
3400
Avi Kivity26d05cc2011-04-21 12:07:59 +03003401 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003402 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003403 return rc;
3404}
3405
3406static int em_lidt(struct x86_emulate_ctxt *ctxt)
3407{
Nadav Amit5b7f6a1e2014-11-02 11:54:55 +02003408 return em_lgdt_lidt(ctxt, false);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003409}
3410
3411static int em_smsw(struct x86_emulate_ctxt *ctxt)
3412{
Nadav Amit32e94d02014-06-02 18:34:11 +03003413 if (ctxt->dst.type == OP_MEM)
3414 ctxt->dst.bytes = 2;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003415 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003416 return X86EMUL_CONTINUE;
3417}
3418
3419static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3420{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003421 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
Avi Kivity9dac77f2011-06-01 15:34:25 +03003422 | (ctxt->src.val & 0x0f));
3423 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003424 return X86EMUL_CONTINUE;
3425}
3426
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003427static int em_loop(struct x86_emulate_ctxt *ctxt)
3428{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003429 int rc = X86EMUL_CONTINUE;
3430
Paolo Bonzini01485a22014-11-19 18:25:08 +01003431 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003432 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
Avi Kivity9dac77f2011-06-01 15:34:25 +03003433 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
Nadav Amit234f3ce2014-09-18 22:39:38 +03003434 rc = jmp_rel(ctxt, ctxt->src.val);
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003435
Nadav Amit234f3ce2014-09-18 22:39:38 +03003436 return rc;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003437}
3438
3439static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3440{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003441 int rc = X86EMUL_CONTINUE;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003442
Nadav Amit234f3ce2014-09-18 22:39:38 +03003443 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3444 rc = jmp_rel(ctxt, ctxt->src.val);
3445
3446 return rc;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003447}
3448
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003449static int em_in(struct x86_emulate_ctxt *ctxt)
3450{
3451 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3452 &ctxt->dst.val))
3453 return X86EMUL_IO_NEEDED;
3454
3455 return X86EMUL_CONTINUE;
3456}
3457
3458static int em_out(struct x86_emulate_ctxt *ctxt)
3459{
3460 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3461 &ctxt->src.val, 1);
3462 /* Disable writeback. */
3463 ctxt->dst.type = OP_NONE;
3464 return X86EMUL_CONTINUE;
3465}
3466
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09003467static int em_cli(struct x86_emulate_ctxt *ctxt)
3468{
3469 if (emulator_bad_iopl(ctxt))
3470 return emulate_gp(ctxt, 0);
3471
3472 ctxt->eflags &= ~X86_EFLAGS_IF;
3473 return X86EMUL_CONTINUE;
3474}
3475
3476static int em_sti(struct x86_emulate_ctxt *ctxt)
3477{
3478 if (emulator_bad_iopl(ctxt))
3479 return emulate_gp(ctxt, 0);
3480
3481 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3482 ctxt->eflags |= X86_EFLAGS_IF;
3483 return X86EMUL_CONTINUE;
3484}
3485
Avi Kivity6d6eede2012-06-07 14:11:36 +03003486static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3487{
3488 u32 eax, ebx, ecx, edx;
3489
Avi Kivitydd856ef2012-08-27 23:46:17 +03003490 eax = reg_read(ctxt, VCPU_REGS_RAX);
3491 ecx = reg_read(ctxt, VCPU_REGS_RCX);
Avi Kivity6d6eede2012-06-07 14:11:36 +03003492 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003493 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3494 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3495 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3496 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
Avi Kivity6d6eede2012-06-07 14:11:36 +03003497 return X86EMUL_CONTINUE;
3498}
3499
Paolo Bonzini98f73632013-10-31 11:19:42 +01003500static int em_sahf(struct x86_emulate_ctxt *ctxt)
3501{
3502 u32 flags;
3503
3504 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3505 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3506
3507 ctxt->eflags &= ~0xffUL;
3508 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3509 return X86EMUL_CONTINUE;
3510}
3511
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003512static int em_lahf(struct x86_emulate_ctxt *ctxt)
3513{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003514 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3515 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003516 return X86EMUL_CONTINUE;
3517}
3518
Avi Kivity92998362012-06-13 12:25:06 +03003519static int em_bswap(struct x86_emulate_ctxt *ctxt)
3520{
3521 switch (ctxt->op_bytes) {
3522#ifdef CONFIG_X86_64
3523 case 8:
3524 asm("bswap %0" : "+r"(ctxt->dst.val));
3525 break;
3526#endif
3527 default:
3528 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3529 break;
3530 }
3531 return X86EMUL_CONTINUE;
3532}
3533
Nadav Amit13e457e2014-10-13 13:04:13 +03003534static int em_clflush(struct x86_emulate_ctxt *ctxt)
3535{
3536 /* emulating clflush regardless of cpuid */
3537 return X86EMUL_CONTINUE;
3538}
3539
Nadav Amit2276b512015-01-26 09:32:24 +02003540static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3541{
3542 ctxt->dst.val = (s32) ctxt->src.val;
3543 return X86EMUL_CONTINUE;
3544}
3545
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003546static bool valid_cr(int nr)
3547{
3548 switch (nr) {
3549 case 0:
3550 case 2 ... 4:
3551 case 8:
3552 return true;
3553 default:
3554 return false;
3555 }
3556}
3557
3558static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3559{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003560 if (!valid_cr(ctxt->modrm_reg))
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003561 return emulate_ud(ctxt);
3562
3563 return X86EMUL_CONTINUE;
3564}
3565
3566static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3567{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003568 u64 new_val = ctxt->src.val64;
3569 int cr = ctxt->modrm_reg;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003570 u64 efer = 0;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003571
3572 static u64 cr_reserved_bits[] = {
3573 0xffffffff00000000ULL,
3574 0, 0, 0, /* CR3 checked later */
3575 CR4_RESERVED_BITS,
3576 0, 0, 0,
3577 CR8_RESERVED_BITS,
3578 };
3579
3580 if (!valid_cr(cr))
3581 return emulate_ud(ctxt);
3582
3583 if (new_val & cr_reserved_bits[cr])
3584 return emulate_gp(ctxt, 0);
3585
3586 switch (cr) {
3587 case 0: {
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003588 u64 cr4;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003589 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3590 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3591 return emulate_gp(ctxt, 0);
3592
Avi Kivity717746e2011-04-20 13:37:53 +03003593 cr4 = ctxt->ops->get_cr(ctxt, 4);
3594 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003595
3596 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3597 !(cr4 & X86_CR4_PAE))
3598 return emulate_gp(ctxt, 0);
3599
3600 break;
3601 }
3602 case 3: {
3603 u64 rsvd = 0;
3604
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003605 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3606 if (efer & EFER_LMA)
Nadav Amit9d88fca2014-11-02 11:54:52 +02003607 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003608
3609 if (new_val & rsvd)
3610 return emulate_gp(ctxt, 0);
3611
3612 break;
3613 }
3614 case 4: {
Avi Kivity717746e2011-04-20 13:37:53 +03003615 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003616
3617 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3618 return emulate_gp(ctxt, 0);
3619
3620 break;
3621 }
3622 }
3623
3624 return X86EMUL_CONTINUE;
3625}
3626
Joerg Roedel3b88e412011-04-04 12:39:29 +02003627static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3628{
3629 unsigned long dr7;
3630
Avi Kivity717746e2011-04-20 13:37:53 +03003631 ctxt->ops->get_dr(ctxt, 7, &dr7);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003632
3633 /* Check if DR7.Global_Enable is set */
3634 return dr7 & (1 << 13);
3635}
3636
3637static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3638{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003639 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003640 u64 cr4;
3641
3642 if (dr > 7)
3643 return emulate_ud(ctxt);
3644
Avi Kivity717746e2011-04-20 13:37:53 +03003645 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003646 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3647 return emulate_ud(ctxt);
3648
Nadav Amit6d2a0522014-11-02 11:54:43 +02003649 if (check_dr7_gd(ctxt)) {
3650 ulong dr6;
3651
3652 ctxt->ops->get_dr(ctxt, 6, &dr6);
3653 dr6 &= ~15;
3654 dr6 |= DR6_BD | DR6_RTM;
3655 ctxt->ops->set_dr(ctxt, 6, dr6);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003656 return emulate_db(ctxt);
Nadav Amit6d2a0522014-11-02 11:54:43 +02003657 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02003658
3659 return X86EMUL_CONTINUE;
3660}
3661
3662static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3663{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003664 u64 new_val = ctxt->src.val64;
3665 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003666
3667 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3668 return emulate_gp(ctxt, 0);
3669
3670 return check_dr_read(ctxt);
3671}
3672
Joerg Roedel01de8b02011-04-04 12:39:31 +02003673static int check_svme(struct x86_emulate_ctxt *ctxt)
3674{
3675 u64 efer;
3676
Avi Kivity717746e2011-04-20 13:37:53 +03003677 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003678
3679 if (!(efer & EFER_SVME))
3680 return emulate_ud(ctxt);
3681
3682 return X86EMUL_CONTINUE;
3683}
3684
3685static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3686{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003687 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003688
3689 /* Valid physical address? */
Randy Dunlapd4224442011-04-21 09:09:22 -07003690 if (rax & 0xffff000000000000ULL)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003691 return emulate_gp(ctxt, 0);
3692
3693 return check_svme(ctxt);
3694}
3695
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003696static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3697{
Avi Kivity717746e2011-04-20 13:37:53 +03003698 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003699
Avi Kivity717746e2011-04-20 13:37:53 +03003700 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003701 return emulate_ud(ctxt);
3702
3703 return X86EMUL_CONTINUE;
3704}
3705
Joerg Roedel80612522011-04-04 12:39:33 +02003706static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3707{
Avi Kivity717746e2011-04-20 13:37:53 +03003708 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003709 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
Joerg Roedel80612522011-04-04 12:39:33 +02003710
Avi Kivity717746e2011-04-20 13:37:53 +03003711 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
Nadav Amit67f4d422014-06-02 18:34:09 +03003712 ctxt->ops->check_pmc(ctxt, rcx))
Joerg Roedel80612522011-04-04 12:39:33 +02003713 return emulate_gp(ctxt, 0);
3714
3715 return X86EMUL_CONTINUE;
3716}
3717
Joerg Roedelf6511932011-04-04 12:39:35 +02003718static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3719{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003720 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3721 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003722 return emulate_gp(ctxt, 0);
3723
3724 return X86EMUL_CONTINUE;
3725}
3726
3727static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3728{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003729 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3730 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003731 return emulate_gp(ctxt, 0);
3732
3733 return X86EMUL_CONTINUE;
3734}
3735
Avi Kivity73fba5f2010-07-29 15:11:53 +03003736#define D(_y) { .flags = (_y) }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003737#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3738#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3739 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Gleb Natapov0b789ee2013-04-11 11:59:55 +03003740#define N D(NotImpl)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003741#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003742#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3743#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
Nadav Amit39f062f2014-11-26 15:47:18 +02003744#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
Nadav Amit2276b512015-01-26 09:32:24 +02003745#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
Gleb Natapov045a2822012-12-20 16:57:43 +02003746#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003747#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
Avi Kivitye28bbd42013-01-04 16:18:48 +02003748#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
Avi Kivityc4f035c2011-04-04 12:39:22 +02003749#define II(_f, _e, _i) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003750 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
Joerg Roedeld09beab2011-04-04 12:39:25 +02003751#define IIP(_f, _e, _i, _p) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003752 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3753 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Avi Kivityaa97bb42010-01-20 18:09:23 +02003754#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003755
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003756#define D2bv(_f) D((_f) | ByteOp), D(_f)
Joerg Roedelf6511932011-04-04 12:39:35 +02003757#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003758#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
Avi Kivityf7857f32013-01-04 16:18:53 +02003759#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003760#define I2bvIP(_f, _e, _i, _p) \
3761 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003762
Avi Kivityfb864fb2013-01-04 16:18:54 +02003763#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3764 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3765 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
Avi Kivity6230f7f2010-08-26 18:34:55 +03003766
Nadav Amit0f54a322014-08-29 11:26:55 +03003767static const struct opcode group7_rm0[] = {
3768 N,
3769 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3770 N, N, N, N, N, N,
3771};
3772
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003773static const struct opcode group7_rm1[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003774 DI(SrcNone | Priv, monitor),
3775 DI(SrcNone | Priv, mwait),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003776 N, N, N, N, N, N,
3777};
3778
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003779static const struct opcode group7_rm3[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003780 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
Borislav Petkovb51e9742013-09-22 16:44:52 +02003781 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003782 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3783 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3784 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3785 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3786 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3787 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003788};
Avi Kivity6230f7f2010-08-26 18:34:55 +03003789
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003790static const struct opcode group7_rm7[] = {
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003791 N,
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003792 DIP(SrcNone, rdtscp, check_rdtsc),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003793 N, N, N, N, N, N,
3794};
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09003795
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003796static const struct opcode group1[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003797 F(Lock, em_add),
3798 F(Lock | PageTable, em_or),
3799 F(Lock, em_adc),
3800 F(Lock, em_sbb),
3801 F(Lock | PageTable, em_and),
3802 F(Lock, em_sub),
3803 F(Lock, em_xor),
3804 F(NoWrite, em_cmp),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003805};
3806
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003807static const struct opcode group1A[] = {
Nadav Amitab708092014-12-25 02:52:21 +02003808 I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003809};
3810
Avi Kivity007a3b52013-01-19 19:51:51 +02003811static const struct opcode group2[] = {
3812 F(DstMem | ModRM, em_rol),
3813 F(DstMem | ModRM, em_ror),
3814 F(DstMem | ModRM, em_rcl),
3815 F(DstMem | ModRM, em_rcr),
3816 F(DstMem | ModRM, em_shl),
3817 F(DstMem | ModRM, em_shr),
3818 F(DstMem | ModRM, em_shl),
3819 F(DstMem | ModRM, em_sar),
3820};
3821
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003822static const struct opcode group3[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003823 F(DstMem | SrcImm | NoWrite, em_test),
3824 F(DstMem | SrcImm | NoWrite, em_test),
Avi Kivity45a14672013-01-04 16:18:52 +02003825 F(DstMem | SrcNone | Lock, em_not),
3826 F(DstMem | SrcNone | Lock, em_neg),
Avi Kivityb9fa4092013-02-09 11:31:48 +02003827 F(DstXacc | Src2Mem, em_mul_ex),
3828 F(DstXacc | Src2Mem, em_imul_ex),
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02003829 F(DstXacc | Src2Mem, em_div_ex),
3830 F(DstXacc | Src2Mem, em_idiv_ex),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003831};
3832
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003833static const struct opcode group4[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003834 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3835 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003836 N, N, N, N, N, N,
3837};
3838
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003839static const struct opcode group5[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003840 F(DstMem | SrcNone | Lock, em_inc),
3841 F(DstMem | SrcNone | Lock, em_dec),
Nadav Amit58b70752014-10-24 11:35:09 +03003842 I(SrcMem | NearBranch, em_call_near_abs),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003843 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
Nadav Amit58b70752014-10-24 11:35:09 +03003844 I(SrcMem | NearBranch, em_jmp_abs),
Nadav Amitf7784042014-09-18 22:39:41 +03003845 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3846 I(SrcMem | Stack, em_push), D(Undefined),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003847};
3848
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003849static const struct opcode group6[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003850 DI(Prot, sldt),
3851 DI(Prot, str),
Avi Kivitya14e5792012-06-13 12:28:33 +03003852 II(Prot | Priv | SrcMem16, em_lldt, lldt),
Avi Kivity80890002012-06-13 16:33:29 +03003853 II(Prot | Priv | SrcMem16, em_ltr, ltr),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02003854 N, N, N, N,
3855};
3856
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003857static const struct group_dual group7 = { {
Nadav Amit606b1c32014-06-02 18:34:06 +03003858 II(Mov | DstMem, em_sgdt, sgdt),
3859 II(Mov | DstMem, em_sidt, sidt),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003860 II(SrcMem | Priv, em_lgdt, lgdt),
3861 II(SrcMem | Priv, em_lidt, lidt),
3862 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3863 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3864 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003865}, {
Nadav Amit0f54a322014-08-29 11:26:55 +03003866 EXT(0, group7_rm0),
Avi Kivity5ef39c72011-04-21 12:21:50 +03003867 EXT(0, group7_rm1),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003868 N, EXT(0, group7_rm3),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003869 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3870 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3871 EXT(0, group7_rm7),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003872} };
3873
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003874static const struct opcode group8[] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003875 N, N, N, N,
Avi Kivity11c363b2013-01-19 19:51:54 +02003876 F(DstMem | SrcImmByte | NoWrite, em_bt),
3877 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3878 F(DstMem | SrcImmByte | Lock, em_btr),
3879 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003880};
3881
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003882static const struct group_dual group9 = { {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003883 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003884}, {
3885 N, N, N, N, N, N, N, N,
3886} };
3887
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003888static const struct opcode group11[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003889 I(DstMem | SrcImm | Mov | PageTable, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003890 X7(D(Undefined)),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03003891};
3892
Nadav Amit13e457e2014-10-13 13:04:13 +03003893static const struct gprefix pfx_0f_ae_7 = {
Nadav Amit3f6f1482014-10-13 13:04:14 +03003894 I(SrcMem | ByteOp, em_clflush), N, N, N,
Nadav Amit13e457e2014-10-13 13:04:13 +03003895};
3896
3897static const struct group_dual group15 = { {
3898 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3899}, {
3900 N, N, N, N, N, N, N, N,
3901} };
3902
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003903static const struct gprefix pfx_0f_6f_0f_7f = {
Avi Kivitye5971752012-04-09 18:40:03 +03003904 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
Avi Kivityaa97bb42010-01-20 18:09:23 +02003905};
3906
Nadav Amit39f062f2014-11-26 15:47:18 +02003907static const struct instr_dual instr_dual_0f_2b = {
3908 I(0, em_mov), N
3909};
3910
Paolo Bonzinid5b77062014-07-14 12:54:48 +02003911static const struct gprefix pfx_0f_2b = {
Nadav Amit39f062f2014-11-26 15:47:18 +02003912 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
Avi Kivity3e114eb2012-04-09 18:40:01 +03003913};
3914
Igor Mammedov27ce8252014-03-15 21:01:59 +01003915static const struct gprefix pfx_0f_28_0f_29 = {
Igor Mammedov6fec27d2014-03-15 21:02:00 +01003916 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01003917};
3918
Alex Williamson0a370272014-07-11 11:56:31 -06003919static const struct gprefix pfx_0f_e7 = {
3920 N, I(Sse, em_mov), N, N,
3921};
3922
Gleb Natapov045a2822012-12-20 16:57:43 +02003923static const struct escape escape_d9 = { {
Nadav Amit16bebef2014-12-25 02:52:18 +02003924 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
Gleb Natapov045a2822012-12-20 16:57:43 +02003925}, {
3926 /* 0xC0 - 0xC7 */
3927 N, N, N, N, N, N, N, N,
3928 /* 0xC8 - 0xCF */
3929 N, N, N, N, N, N, N, N,
3930 /* 0xD0 - 0xC7 */
3931 N, N, N, N, N, N, N, N,
3932 /* 0xD8 - 0xDF */
3933 N, N, N, N, N, N, N, N,
3934 /* 0xE0 - 0xE7 */
3935 N, N, N, N, N, N, N, N,
3936 /* 0xE8 - 0xEF */
3937 N, N, N, N, N, N, N, N,
3938 /* 0xF0 - 0xF7 */
3939 N, N, N, N, N, N, N, N,
3940 /* 0xF8 - 0xFF */
3941 N, N, N, N, N, N, N, N,
3942} };
3943
3944static const struct escape escape_db = { {
3945 N, N, N, N, N, N, N, N,
3946}, {
3947 /* 0xC0 - 0xC7 */
3948 N, N, N, N, N, N, N, N,
3949 /* 0xC8 - 0xCF */
3950 N, N, N, N, N, N, N, N,
3951 /* 0xD0 - 0xC7 */
3952 N, N, N, N, N, N, N, N,
3953 /* 0xD8 - 0xDF */
3954 N, N, N, N, N, N, N, N,
3955 /* 0xE0 - 0xE7 */
3956 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3957 /* 0xE8 - 0xEF */
3958 N, N, N, N, N, N, N, N,
3959 /* 0xF0 - 0xF7 */
3960 N, N, N, N, N, N, N, N,
3961 /* 0xF8 - 0xFF */
3962 N, N, N, N, N, N, N, N,
3963} };
3964
3965static const struct escape escape_dd = { {
Nadav Amit16bebef2014-12-25 02:52:18 +02003966 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
Gleb Natapov045a2822012-12-20 16:57:43 +02003967}, {
3968 /* 0xC0 - 0xC7 */
3969 N, N, N, N, N, N, N, N,
3970 /* 0xC8 - 0xCF */
3971 N, N, N, N, N, N, N, N,
3972 /* 0xD0 - 0xC7 */
3973 N, N, N, N, N, N, N, N,
3974 /* 0xD8 - 0xDF */
3975 N, N, N, N, N, N, N, N,
3976 /* 0xE0 - 0xE7 */
3977 N, N, N, N, N, N, N, N,
3978 /* 0xE8 - 0xEF */
3979 N, N, N, N, N, N, N, N,
3980 /* 0xF0 - 0xF7 */
3981 N, N, N, N, N, N, N, N,
3982 /* 0xF8 - 0xFF */
3983 N, N, N, N, N, N, N, N,
3984} };
3985
Nadav Amit39f062f2014-11-26 15:47:18 +02003986static const struct instr_dual instr_dual_0f_c3 = {
3987 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
3988};
3989
Nadav Amit2276b512015-01-26 09:32:24 +02003990static const struct mode_dual mode_dual_63 = {
3991 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
3992};
3993
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003994static const struct opcode opcode_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003995 /* 0x00 - 0x07 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003996 F6ALU(Lock, em_add),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003997 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3998 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003999 /* 0x08 - 0x0F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004000 F6ALU(Lock | PageTable, em_or),
Avi Kivity1cd196e2011-09-13 10:45:51 +03004001 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4002 N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004003 /* 0x10 - 0x17 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004004 F6ALU(Lock, em_adc),
Avi Kivity1cd196e2011-09-13 10:45:51 +03004005 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4006 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004007 /* 0x18 - 0x1F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004008 F6ALU(Lock, em_sbb),
Avi Kivity1cd196e2011-09-13 10:45:51 +03004009 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4010 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004011 /* 0x20 - 0x27 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004012 F6ALU(Lock | PageTable, em_and), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004013 /* 0x28 - 0x2F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004014 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004015 /* 0x30 - 0x37 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004016 F6ALU(Lock, em_xor), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004017 /* 0x38 - 0x3F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004018 F6ALU(NoWrite, em_cmp), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004019 /* 0x40 - 0x4F */
Avi Kivity95413dc2013-01-19 19:51:53 +02004020 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004021 /* 0x50 - 0x57 */
Avi Kivity63540382010-07-29 15:11:55 +03004022 X8(I(SrcReg | Stack, em_push)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004023 /* 0x58 - 0x5F */
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09004024 X8(I(DstReg | Stack, em_pop)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004025 /* 0x60 - 0x67 */
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09004026 I(ImplicitOps | Stack | No64, em_pusha),
4027 I(ImplicitOps | Stack | No64, em_popa),
Nadav Amit2276b512015-01-26 09:32:24 +02004028 N, MD(ModRM, &mode_dual_63),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004029 N, N, N, N,
4030 /* 0x68 - 0x6F */
Avi Kivityd46164d2010-08-18 19:29:33 +03004031 I(SrcImm | Mov | Stack, em_push),
4032 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03004033 I(SrcImmByte | Mov | Stack, em_push),
4034 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004035 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
Takuya Yoshikawa2b5e97e2011-11-23 12:27:39 +09004036 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
Avi Kivity73fba5f2010-07-29 15:11:53 +03004037 /* 0x70 - 0x7F */
Nadav Amit58b70752014-10-24 11:35:09 +03004038 X16(D(SrcImmByte | NearBranch)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004039 /* 0x80 - 0x87 */
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09004040 G(ByteOp | DstMem | SrcImm, group1),
4041 G(DstMem | SrcImm, group1),
4042 G(ByteOp | DstMem | SrcImm | No64, group1),
4043 G(DstMem | SrcImmByte, group1),
Avi Kivityfb864fb2013-01-04 16:18:54 +02004044 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004045 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004046 /* 0x88 - 0x8F */
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004047 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004048 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004049 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09004050 D(ModRM | SrcMem | NoAccess | DstReg),
4051 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4052 G(0, group1A),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004053 /* 0x90 - 0x97 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02004054 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004055 /* 0x98 - 0x9F */
Avi Kivity61429142010-08-19 15:13:00 +03004056 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
Wei Yongjuncc4feed2010-08-25 14:10:53 +08004057 I(SrcImmFAddr | No64, em_call_far), N,
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09004058 II(ImplicitOps | Stack, em_pushf, pushf),
Paolo Bonzini98f73632013-10-31 11:19:42 +01004059 II(ImplicitOps | Stack, em_popf, popf),
4060 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004061 /* 0xA0 - 0xA7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004062 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004063 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004064 I2bv(SrcSI | DstDI | Mov | String, em_mov),
Nadav Amit5aca3722014-11-02 11:54:50 +02004065 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004066 /* 0xA8 - 0xAF */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004067 F2bv(DstAcc | SrcImm | NoWrite, em_test),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004068 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4069 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
Nadav Amit5aca3722014-11-02 11:54:50 +02004070 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004071 /* 0xB0 - 0xB7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004072 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004073 /* 0xB8 - 0xBF */
Nadav Amit5e2c6882012-12-06 21:55:10 -02004074 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004075 /* 0xC0 - 0xC7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02004076 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
Nadav Amit58b70752014-10-24 11:35:09 +03004077 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4078 I(ImplicitOps | NearBranch, em_ret),
Avi Kivityd4b43252011-09-13 10:45:50 +03004079 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4080 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03004081 G(ByteOp, group11), G(0, group11),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004082 /* 0xC8 - 0xCF */
Avi Kivity612e89f2012-06-12 20:03:23 +03004083 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
Nadav Amit16794aa2015-01-26 09:32:22 +02004084 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4085 I(ImplicitOps, em_ret_far),
Avi Kivity3c6e2762011-04-04 12:39:23 +02004086 D(ImplicitOps), DI(SrcImmByte, intn),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004087 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004088 /* 0xD0 - 0xD7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02004089 G(Src2One | ByteOp, group2), G(Src2One, group2),
4090 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02004091 I(DstAcc | SrcImmUByte | No64, em_aam),
Paolo Bonzini326f5782013-05-09 11:32:51 +02004092 I(DstAcc | SrcImmUByte | No64, em_aad),
4093 F(DstAcc | ByteOp | No64, em_salc),
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004094 I(DstAcc | SrcXLat | ByteOp, em_mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004095 /* 0xD8 - 0xDF */
Gleb Natapov045a2822012-12-20 16:57:43 +02004096 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004097 /* 0xE0 - 0xE7 */
Nadav Amit58b70752014-10-24 11:35:09 +03004098 X3(I(SrcImmByte | NearBranch, em_loop)),
4099 I(SrcImmByte | NearBranch, em_jcxz),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004100 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4101 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004102 /* 0xE8 - 0xEF */
Nadav Amit58b70752014-10-24 11:35:09 +03004103 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4104 I(SrcImmFAddr | No64, em_jmp_far),
4105 D(SrcImmByte | ImplicitOps | NearBranch),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004106 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4107 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004108 /* 0xF0 - 0xF7 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02004109 N, DI(ImplicitOps, icebp), N, N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02004110 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4111 G(ByteOp, group3), G(0, group3),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004112 /* 0xF8 - 0xFF */
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09004113 D(ImplicitOps), D(ImplicitOps),
4114 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004115 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4116};
4117
Mathias Krausefd0a0d82012-08-30 01:30:15 +02004118static const struct opcode twobyte_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03004119 /* 0x00 - 0x0F */
Joerg Roedeldee6bb72011-04-04 12:39:30 +02004120 G(0, group6), GD(0, &group7), N, N,
Borislav Petkovb51e9742013-09-22 16:44:52 +02004121 N, I(ImplicitOps | EmulateOnUD, em_syscall),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004122 II(ImplicitOps | Priv, em_clts, clts), N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02004123 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
Nadav Amit3f6f1482014-10-13 13:04:14 +03004124 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004125 /* 0x10 - 0x1F */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02004126 N, N, N, N, N, N, N, N,
Nadav Amit3f6f1482014-10-13 13:04:14 +03004127 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4128 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004129 /* 0x20 - 0x2F */
Nadav Amit9b88ae92014-05-25 23:05:21 +03004130 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4131 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4132 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4133 check_cr_write),
4134 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4135 check_dr_write),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004136 N, N, N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01004137 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4138 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
Paolo Bonzinid5b77062014-07-14 12:54:48 +02004139 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
Avi Kivity3e114eb2012-04-09 18:40:01 +03004140 N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004141 /* 0x30 - 0x3F */
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09004142 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
Joerg Roedel80612522011-04-04 12:39:33 +02004143 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09004144 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
Avi Kivity222d21a2011-11-10 14:57:30 +02004145 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
Borislav Petkovb51e9742013-09-22 16:44:52 +02004146 I(ImplicitOps | EmulateOnUD, em_sysenter),
4147 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
Avi Kivityd8671622011-02-01 16:32:03 +02004148 N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004149 N, N, N, N, N, N, N, N,
4150 /* 0x40 - 0x4F */
Nadav Amit140bad82014-06-15 16:13:00 +03004151 X16(D(DstReg | SrcMem | ModRM)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004152 /* 0x50 - 0x5F */
4153 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4154 /* 0x60 - 0x6F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02004155 N, N, N, N,
4156 N, N, N, N,
4157 N, N, N, N,
4158 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004159 /* 0x70 - 0x7F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02004160 N, N, N, N,
4161 N, N, N, N,
4162 N, N, N, N,
4163 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004164 /* 0x80 - 0x8F */
Nadav Amit58b70752014-10-24 11:35:09 +03004165 X16(D(SrcImm | NearBranch)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004166 /* 0x90 - 0x9F */
Wei Yongjunee45b582010-08-06 17:10:07 +08004167 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004168 /* 0xA0 - 0xA7 */
Avi Kivity1cd196e2011-09-13 10:45:51 +03004169 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
Avi Kivity11c363b2013-01-19 19:51:54 +02004170 II(ImplicitOps, em_cpuid, cpuid),
4171 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
Avi Kivity0bdea062013-01-19 19:51:50 +02004172 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4173 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004174 /* 0xA8 - 0xAF */
Avi Kivity1cd196e2011-09-13 10:45:51 +03004175 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004176 DI(ImplicitOps, rsm),
Avi Kivity11c363b2013-01-19 19:51:54 +02004177 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
Avi Kivity0bdea062013-01-19 19:51:50 +02004178 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4179 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
Nadav Amit13e457e2014-10-13 13:04:13 +03004180 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004181 /* 0xB0 - 0xB7 */
Nadav Amit2fcf5c82015-01-26 09:32:21 +02004182 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
Avi Kivityd4b43252011-09-13 10:45:50 +03004183 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
Avi Kivity11c363b2013-01-19 19:51:54 +02004184 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
Avi Kivityd4b43252011-09-13 10:45:50 +03004185 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4186 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004187 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004188 /* 0xB8 - 0xBF */
4189 N, N,
Takuya Yoshikawace7faab2011-11-22 15:17:48 +09004190 G(BitOp, group8),
Avi Kivity11c363b2013-01-19 19:51:54 +02004191 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4192 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004193 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity92998362012-06-13 12:25:06 +03004194 /* 0xC0 - 0xC7 */
Avi Kivitye47a5f52013-02-09 11:31:51 +02004195 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
Nadav Amit39f062f2014-11-26 15:47:18 +02004196 N, ID(0, &instr_dual_0f_c3),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004197 N, N, N, GD(0, &group9),
Avi Kivity92998362012-06-13 12:25:06 +03004198 /* 0xC8 - 0xCF */
4199 X8(I(DstReg, em_bswap)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004200 /* 0xD0 - 0xDF */
4201 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4202 /* 0xE0 - 0xEF */
Alex Williamson0a370272014-07-11 11:56:31 -06004203 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4204 N, N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004205 /* 0xF0 - 0xFF */
4206 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4207};
4208
Nadav Amit39f062f2014-11-26 15:47:18 +02004209static const struct instr_dual instr_dual_0f_38_f0 = {
4210 I(DstReg | SrcMem | Mov, em_movbe), N
4211};
4212
4213static const struct instr_dual instr_dual_0f_38_f1 = {
4214 I(DstMem | SrcReg | Mov, em_movbe), N
4215};
4216
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004217static const struct gprefix three_byte_0f_38_f0 = {
Nadav Amit39f062f2014-11-26 15:47:18 +02004218 ID(0, &instr_dual_0f_38_f0), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004219};
4220
4221static const struct gprefix three_byte_0f_38_f1 = {
Nadav Amit39f062f2014-11-26 15:47:18 +02004222 ID(0, &instr_dual_0f_38_f1), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004223};
4224
4225/*
4226 * Insns below are selected by the prefix which indexed by the third opcode
4227 * byte.
4228 */
4229static const struct opcode opcode_map_0f_38[256] = {
4230 /* 0x00 - 0x7f */
4231 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
Borislav Petkov84cffe42013-10-29 12:54:56 +01004232 /* 0x80 - 0xef */
4233 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4234 /* 0xf0 - 0xf1 */
Nadav Amit53bb4f72014-12-07 11:49:42 +02004235 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4236 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
Borislav Petkov84cffe42013-10-29 12:54:56 +01004237 /* 0xf2 - 0xff */
4238 N, N, X4(N), X8(N)
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004239};
4240
Avi Kivity73fba5f2010-07-29 15:11:53 +03004241#undef D
4242#undef N
4243#undef G
4244#undef GD
4245#undef I
Avi Kivityaa97bb42010-01-20 18:09:23 +02004246#undef GP
Joerg Roedel01de8b02011-04-04 12:39:31 +02004247#undef EXT
Nadav Amit2276b512015-01-26 09:32:24 +02004248#undef MD
Avi Kivity73fba5f2010-07-29 15:11:53 +03004249
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004250#undef D2bv
Joerg Roedelf6511932011-04-04 12:39:35 +02004251#undef D2bvIP
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004252#undef I2bv
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004253#undef I2bvIP
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09004254#undef I6ALU
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004255
Avi Kivity9dac77f2011-06-01 15:34:25 +03004256static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
Avi Kivity39f21ee2010-08-18 19:20:21 +03004257{
4258 unsigned size;
4259
Avi Kivity9dac77f2011-06-01 15:34:25 +03004260 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004261 if (size == 8)
4262 size = 4;
4263 return size;
4264}
4265
4266static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4267 unsigned size, bool sign_extension)
4268{
Avi Kivity39f21ee2010-08-18 19:20:21 +03004269 int rc = X86EMUL_CONTINUE;
4270
4271 op->type = OP_IMM;
4272 op->bytes = size;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004273 op->addr.mem.ea = ctxt->_eip;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004274 /* NB. Immediates are sign-extended as necessary. */
4275 switch (op->bytes) {
4276 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004277 op->val = insn_fetch(s8, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004278 break;
4279 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004280 op->val = insn_fetch(s16, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004281 break;
4282 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004283 op->val = insn_fetch(s32, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004284 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004285 case 8:
4286 op->val = insn_fetch(s64, ctxt);
4287 break;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004288 }
4289 if (!sign_extension) {
4290 switch (op->bytes) {
4291 case 1:
4292 op->val &= 0xff;
4293 break;
4294 case 2:
4295 op->val &= 0xffff;
4296 break;
4297 case 4:
4298 op->val &= 0xffffffff;
4299 break;
4300 }
4301 }
4302done:
4303 return rc;
4304}
4305
Avi Kivitya99455492011-09-13 10:45:41 +03004306static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4307 unsigned d)
4308{
4309 int rc = X86EMUL_CONTINUE;
4310
4311 switch (d) {
4312 case OpReg:
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004313 decode_register_operand(ctxt, op);
Avi Kivitya99455492011-09-13 10:45:41 +03004314 break;
4315 case OpImmUByte:
Avi Kivity608aabe2011-09-13 10:45:45 +03004316 rc = decode_imm(ctxt, op, 1, false);
Avi Kivitya99455492011-09-13 10:45:41 +03004317 break;
4318 case OpMem:
Avi Kivity41ddf972011-09-13 10:45:48 +03004319 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity0fe59122011-09-13 10:45:47 +03004320 mem_common:
Avi Kivitya99455492011-09-13 10:45:41 +03004321 *op = ctxt->memop;
4322 ctxt->memopp = op;
Paolo Bonzini96888972014-04-01 14:54:19 +02004323 if (ctxt->d & BitOp)
Avi Kivitya99455492011-09-13 10:45:41 +03004324 fetch_bit_operand(ctxt);
4325 op->orig_val = op->val;
4326 break;
Avi Kivity41ddf972011-09-13 10:45:48 +03004327 case OpMem64:
Nadav Amitaaa05f22014-06-02 18:34:10 +03004328 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
Avi Kivity41ddf972011-09-13 10:45:48 +03004329 goto mem_common;
Avi Kivitya99455492011-09-13 10:45:41 +03004330 case OpAcc:
4331 op->type = OP_REG;
4332 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004333 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
Avi Kivitya99455492011-09-13 10:45:41 +03004334 fetch_register_operand(op);
4335 op->orig_val = op->val;
4336 break;
Avi Kivity820207c2013-02-09 11:31:45 +02004337 case OpAccLo:
4338 op->type = OP_REG;
4339 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4340 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4341 fetch_register_operand(op);
4342 op->orig_val = op->val;
4343 break;
4344 case OpAccHi:
4345 if (ctxt->d & ByteOp) {
4346 op->type = OP_NONE;
4347 break;
4348 }
4349 op->type = OP_REG;
4350 op->bytes = ctxt->op_bytes;
4351 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4352 fetch_register_operand(op);
4353 op->orig_val = op->val;
4354 break;
Avi Kivitya99455492011-09-13 10:45:41 +03004355 case OpDI:
4356 op->type = OP_MEM;
4357 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4358 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004359 register_address(ctxt, VCPU_REGS_RDI);
Avi Kivitya99455492011-09-13 10:45:41 +03004360 op->addr.mem.seg = VCPU_SREG_ES;
4361 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004362 op->count = 1;
Avi Kivitya99455492011-09-13 10:45:41 +03004363 break;
4364 case OpDX:
4365 op->type = OP_REG;
4366 op->bytes = 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004367 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivitya99455492011-09-13 10:45:41 +03004368 fetch_register_operand(op);
4369 break;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004370 case OpCL:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004371 op->type = OP_IMM;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004372 op->bytes = 1;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004373 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004374 break;
4375 case OpImmByte:
4376 rc = decode_imm(ctxt, op, 1, true);
4377 break;
4378 case OpOne:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004379 op->type = OP_IMM;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004380 op->bytes = 1;
4381 op->val = 1;
4382 break;
4383 case OpImm:
4384 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4385 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004386 case OpImm64:
4387 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4388 break;
Avi Kivity28867ce2012-01-16 15:08:44 +02004389 case OpMem8:
4390 ctxt->memop.bytes = 1;
Gleb Natapov660696d2013-04-24 13:38:36 +03004391 if (ctxt->memop.type == OP_REG) {
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02004392 ctxt->memop.addr.reg = decode_register(ctxt,
4393 ctxt->modrm_rm, true);
Gleb Natapov660696d2013-04-24 13:38:36 +03004394 fetch_register_operand(&ctxt->memop);
4395 }
Avi Kivity28867ce2012-01-16 15:08:44 +02004396 goto mem_common;
Avi Kivity0fe59122011-09-13 10:45:47 +03004397 case OpMem16:
4398 ctxt->memop.bytes = 2;
4399 goto mem_common;
4400 case OpMem32:
4401 ctxt->memop.bytes = 4;
4402 goto mem_common;
4403 case OpImmU16:
4404 rc = decode_imm(ctxt, op, 2, false);
4405 break;
4406 case OpImmU:
4407 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4408 break;
4409 case OpSI:
4410 op->type = OP_MEM;
4411 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4412 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004413 register_address(ctxt, VCPU_REGS_RSI);
Bandan Das573e80f2014-04-16 12:46:13 -04004414 op->addr.mem.seg = ctxt->seg_override;
Avi Kivity0fe59122011-09-13 10:45:47 +03004415 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004416 op->count = 1;
Avi Kivity0fe59122011-09-13 10:45:47 +03004417 break;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004418 case OpXLat:
4419 op->type = OP_MEM;
4420 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4421 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004422 address_mask(ctxt,
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004423 reg_read(ctxt, VCPU_REGS_RBX) +
4424 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
Bandan Das573e80f2014-04-16 12:46:13 -04004425 op->addr.mem.seg = ctxt->seg_override;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004426 op->val = 0;
4427 break;
Avi Kivity0fe59122011-09-13 10:45:47 +03004428 case OpImmFAddr:
4429 op->type = OP_IMM;
4430 op->addr.mem.ea = ctxt->_eip;
4431 op->bytes = ctxt->op_bytes + 2;
4432 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4433 break;
4434 case OpMemFAddr:
4435 ctxt->memop.bytes = ctxt->op_bytes + 2;
4436 goto mem_common;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004437 case OpES:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004438 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004439 op->val = VCPU_SREG_ES;
4440 break;
4441 case OpCS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004442 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004443 op->val = VCPU_SREG_CS;
4444 break;
4445 case OpSS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004446 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004447 op->val = VCPU_SREG_SS;
4448 break;
4449 case OpDS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004450 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004451 op->val = VCPU_SREG_DS;
4452 break;
4453 case OpFS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004454 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004455 op->val = VCPU_SREG_FS;
4456 break;
4457 case OpGS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004458 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004459 op->val = VCPU_SREG_GS;
4460 break;
Avi Kivitya99455492011-09-13 10:45:41 +03004461 case OpImplicit:
4462 /* Special instructions do their own operand decoding. */
4463 default:
4464 op->type = OP_NONE; /* Disable writeback. */
4465 break;
4466 }
4467
4468done:
4469 return rc;
4470}
4471
Takuya Yoshikawaef5d75c2011-05-15 00:57:43 +09004472int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004473{
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004474 int rc = X86EMUL_CONTINUE;
4475 int mode = ctxt->mode;
Avi Kivity46561642011-04-24 14:09:59 +03004476 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004477 bool op_prefix = false;
Bandan Das573e80f2014-04-16 12:46:13 -04004478 bool has_seg_override = false;
Avi Kivity46561642011-04-24 14:09:59 +03004479 struct opcode opcode;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004480
Avi Kivityf09ed832011-09-13 10:45:40 +03004481 ctxt->memop.type = OP_NONE;
4482 ctxt->memopp = NULL;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004483 ctxt->_eip = ctxt->eip;
Paolo Bonzini17052f12014-05-06 16:33:01 +02004484 ctxt->fetch.ptr = ctxt->fetch.data;
4485 ctxt->fetch.end = ctxt->fetch.data + insn_len;
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004486 ctxt->opcode_len = 1;
Andre Przywaradc25e892010-12-21 11:12:07 +01004487 if (insn_len > 0)
Avi Kivity9dac77f2011-06-01 15:34:25 +03004488 memcpy(ctxt->fetch.data, insn, insn_len);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004489 else {
Paolo Bonzini9506d572014-05-06 13:05:25 +02004490 rc = __do_insn_fetch_bytes(ctxt, 1);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004491 if (rc != X86EMUL_CONTINUE)
4492 return rc;
4493 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004494
4495 switch (mode) {
4496 case X86EMUL_MODE_REAL:
4497 case X86EMUL_MODE_VM86:
4498 case X86EMUL_MODE_PROT16:
4499 def_op_bytes = def_ad_bytes = 2;
4500 break;
4501 case X86EMUL_MODE_PROT32:
4502 def_op_bytes = def_ad_bytes = 4;
4503 break;
4504#ifdef CONFIG_X86_64
4505 case X86EMUL_MODE_PROT64:
4506 def_op_bytes = 4;
4507 def_ad_bytes = 8;
4508 break;
4509#endif
4510 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004511 return EMULATION_FAILED;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004512 }
4513
Avi Kivity9dac77f2011-06-01 15:34:25 +03004514 ctxt->op_bytes = def_op_bytes;
4515 ctxt->ad_bytes = def_ad_bytes;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004516
4517 /* Legacy prefixes. */
4518 for (;;) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004519 switch (ctxt->b = insn_fetch(u8, ctxt)) {
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004520 case 0x66: /* operand-size override */
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004521 op_prefix = true;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004522 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004523 ctxt->op_bytes = def_op_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004524 break;
4525 case 0x67: /* address-size override */
4526 if (mode == X86EMUL_MODE_PROT64)
4527 /* switch between 4/8 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004528 ctxt->ad_bytes = def_ad_bytes ^ 12;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004529 else
4530 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004531 ctxt->ad_bytes = def_ad_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004532 break;
4533 case 0x26: /* ES override */
4534 case 0x2e: /* CS override */
4535 case 0x36: /* SS override */
4536 case 0x3e: /* DS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004537 has_seg_override = true;
4538 ctxt->seg_override = (ctxt->b >> 3) & 3;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004539 break;
4540 case 0x64: /* FS override */
4541 case 0x65: /* GS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004542 has_seg_override = true;
4543 ctxt->seg_override = ctxt->b & 7;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004544 break;
4545 case 0x40 ... 0x4f: /* REX */
4546 if (mode != X86EMUL_MODE_PROT64)
4547 goto done_prefixes;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004548 ctxt->rex_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004549 continue;
4550 case 0xf0: /* LOCK */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004551 ctxt->lock_prefix = 1;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004552 break;
4553 case 0xf2: /* REPNE/REPNZ */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004554 case 0xf3: /* REP/REPE/REPZ */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004555 ctxt->rep_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004556 break;
4557 default:
4558 goto done_prefixes;
4559 }
4560
4561 /* Any legacy prefix after a REX prefix nullifies its effect. */
4562
Avi Kivity9dac77f2011-06-01 15:34:25 +03004563 ctxt->rex_prefix = 0;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004564 }
4565
4566done_prefixes:
4567
4568 /* REX prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004569 if (ctxt->rex_prefix & 8)
4570 ctxt->op_bytes = 8; /* REX.W */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004571
4572 /* Opcode byte(s). */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004573 opcode = opcode_table[ctxt->b];
Wei Yongjund3ad6242010-08-05 16:34:39 +08004574 /* Two-byte opcode? */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004575 if (ctxt->b == 0x0f) {
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004576 ctxt->opcode_len = 2;
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004577 ctxt->b = insn_fetch(u8, ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03004578 opcode = twobyte_table[ctxt->b];
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004579
4580 /* 0F_38 opcode map */
4581 if (ctxt->b == 0x38) {
4582 ctxt->opcode_len = 3;
4583 ctxt->b = insn_fetch(u8, ctxt);
4584 opcode = opcode_map_0f_38[ctxt->b];
4585 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004586 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004587 ctxt->d = opcode.flags;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004588
Takuya Yoshikawa9f4260e2012-04-30 17:48:25 +09004589 if (ctxt->d & ModRM)
4590 ctxt->modrm = insn_fetch(u8, ctxt);
4591
Nadav Amit7fe864d2014-06-02 18:34:03 +03004592 /* vex-prefix instructions are not implemented */
4593 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
Nadav Amitd14cb5d2014-11-02 11:54:58 +02004594 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
Nadav Amit7fe864d2014-06-02 18:34:03 +03004595 ctxt->d = NotImpl;
4596 }
4597
Avi Kivity9dac77f2011-06-01 15:34:25 +03004598 while (ctxt->d & GroupMask) {
4599 switch (ctxt->d & GroupMask) {
Avi Kivity46561642011-04-24 14:09:59 +03004600 case Group:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004601 goffset = (ctxt->modrm >> 3) & 7;
Avi Kivity46561642011-04-24 14:09:59 +03004602 opcode = opcode.u.group[goffset];
4603 break;
4604 case GroupDual:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004605 goffset = (ctxt->modrm >> 3) & 7;
4606 if ((ctxt->modrm >> 6) == 3)
Avi Kivity46561642011-04-24 14:09:59 +03004607 opcode = opcode.u.gdual->mod3[goffset];
4608 else
4609 opcode = opcode.u.gdual->mod012[goffset];
4610 break;
4611 case RMExt:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004612 goffset = ctxt->modrm & 7;
Joerg Roedel01de8b02011-04-04 12:39:31 +02004613 opcode = opcode.u.group[goffset];
Avi Kivity46561642011-04-24 14:09:59 +03004614 break;
4615 case Prefix:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004616 if (ctxt->rep_prefix && op_prefix)
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004617 return EMULATION_FAILED;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004618 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
Avi Kivity46561642011-04-24 14:09:59 +03004619 switch (simd_prefix) {
4620 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4621 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4622 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4623 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4624 }
4625 break;
Gleb Natapov045a2822012-12-20 16:57:43 +02004626 case Escape:
4627 if (ctxt->modrm > 0xbf)
4628 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4629 else
4630 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4631 break;
Nadav Amit39f062f2014-11-26 15:47:18 +02004632 case InstrDual:
4633 if ((ctxt->modrm >> 6) == 3)
4634 opcode = opcode.u.idual->mod3;
4635 else
4636 opcode = opcode.u.idual->mod012;
4637 break;
Nadav Amit2276b512015-01-26 09:32:24 +02004638 case ModeDual:
4639 if (ctxt->mode == X86EMUL_MODE_PROT64)
4640 opcode = opcode.u.mdual->mode64;
4641 else
4642 opcode = opcode.u.mdual->mode32;
4643 break;
Avi Kivity46561642011-04-24 14:09:59 +03004644 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004645 return EMULATION_FAILED;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004646 }
Avi Kivity46561642011-04-24 14:09:59 +03004647
Avi Kivityb1ea50b2011-09-13 10:45:42 +03004648 ctxt->d &= ~(u64)GroupMask;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004649 ctxt->d |= opcode.flags;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004650 }
4651
Paolo Bonzinie24186e2014-03-27 12:00:57 +01004652 /* Unrecognised? */
4653 if (ctxt->d == 0)
4654 return EMULATION_FAILED;
4655
Avi Kivity9dac77f2011-06-01 15:34:25 +03004656 ctxt->execute = opcode.u.execute;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004657
Nadav Amit3a6095a2014-08-13 16:50:13 +03004658 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4659 return EMULATION_FAILED;
4660
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004661 if (unlikely(ctxt->d &
Nadav Amited9aad22014-11-02 11:55:00 +02004662 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4663 No16))) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004664 /*
4665 * These are copied unconditionally here, and checked unconditionally
4666 * in x86_emulate_insn.
4667 */
4668 ctxt->check_perm = opcode.check_perm;
4669 ctxt->intercept = opcode.intercept;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004670
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004671 if (ctxt->d & NotImpl)
4672 return EMULATION_FAILED;
Avi Kivityd8671622011-02-01 16:32:03 +02004673
Nadav Amit58b70752014-10-24 11:35:09 +03004674 if (mode == X86EMUL_MODE_PROT64) {
4675 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4676 ctxt->op_bytes = 8;
4677 else if (ctxt->d & NearBranch)
4678 ctxt->op_bytes = 8;
4679 }
Avi Kivity7f9b4b72010-08-01 14:46:54 +03004680
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004681 if (ctxt->d & Op3264) {
4682 if (mode == X86EMUL_MODE_PROT64)
4683 ctxt->op_bytes = 8;
4684 else
4685 ctxt->op_bytes = 4;
4686 }
4687
Nadav Amited9aad22014-11-02 11:55:00 +02004688 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4689 ctxt->op_bytes = 4;
4690
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004691 if (ctxt->d & Sse)
4692 ctxt->op_bytes = 16;
4693 else if (ctxt->d & Mmx)
4694 ctxt->op_bytes = 8;
4695 }
Avi Kivity1253791d2011-03-29 11:41:27 +02004696
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004697 /* ModRM and SIB bytes. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004698 if (ctxt->d & ModRM) {
Avi Kivityf09ed832011-09-13 10:45:40 +03004699 rc = decode_modrm(ctxt, &ctxt->memop);
Bandan Das573e80f2014-04-16 12:46:13 -04004700 if (!has_seg_override) {
4701 has_seg_override = true;
4702 ctxt->seg_override = ctxt->modrm_seg;
4703 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004704 } else if (ctxt->d & MemAbs)
Avi Kivityf09ed832011-09-13 10:45:40 +03004705 rc = decode_abs(ctxt, &ctxt->memop);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004706 if (rc != X86EMUL_CONTINUE)
4707 goto done;
4708
Bandan Das573e80f2014-04-16 12:46:13 -04004709 if (!has_seg_override)
4710 ctxt->seg_override = VCPU_SREG_DS;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004711
Bandan Das573e80f2014-04-16 12:46:13 -04004712 ctxt->memop.addr.mem.seg = ctxt->seg_override;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004713
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004714 /*
4715 * Decode and fetch the source operand: register, memory
4716 * or immediate.
4717 */
Avi Kivity0fe59122011-09-13 10:45:47 +03004718 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004719 if (rc != X86EMUL_CONTINUE)
4720 goto done;
4721
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004722 /*
4723 * Decode and fetch the second source operand: register, memory
4724 * or immediate.
4725 */
Avi Kivity4dd6a572011-09-13 10:45:43 +03004726 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004727 if (rc != X86EMUL_CONTINUE)
4728 goto done;
4729
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004730 /* Decode and fetch the destination operand: register or memory. */
Avi Kivitya99455492011-09-13 10:45:41 +03004731 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004732
Bandan Das41061cd2014-04-16 12:46:14 -04004733 if (ctxt->rip_relative)
Nadav Amit1c1c35a2014-11-19 17:43:09 +02004734 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
4735 ctxt->memopp->addr.mem.ea + ctxt->_eip);
Avi Kivitycb16c342011-06-19 19:21:11 +03004736
Paolo Bonzinia430c912014-10-23 14:54:14 +02004737done:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004738 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004739}
4740
Xiao Guangrong1cb3f3a2011-09-22 17:02:48 +08004741bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4742{
4743 return ctxt->d & PageTable;
4744}
4745
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004746static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4747{
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004748 /* The second termination condition only applies for REPE
4749 * and REPNE. Test if the repeat string operation prefix is
4750 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4751 * corresponding termination condition according to:
4752 * - if REPE/REPZ and ZF = 0 then done
4753 * - if REPNE/REPNZ and ZF = 1 then done
4754 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004755 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4756 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4757 && (((ctxt->rep_prefix == REPE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004758 ((ctxt->eflags & EFLG_ZF) == 0))
Avi Kivity9dac77f2011-06-01 15:34:25 +03004759 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004760 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4761 return true;
4762
4763 return false;
4764}
4765
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004766static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4767{
4768 bool fault = false;
4769
4770 ctxt->ops->get_fpu(ctxt);
4771 asm volatile("1: fwait \n\t"
4772 "2: \n\t"
4773 ".pushsection .fixup,\"ax\" \n\t"
4774 "3: \n\t"
4775 "movb $1, %[fault] \n\t"
4776 "jmp 2b \n\t"
4777 ".popsection \n\t"
4778 _ASM_EXTABLE(1b, 3b)
Avi Kivity38e8a2d2012-04-22 15:12:50 +03004779 : [fault]"+qm"(fault));
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004780 ctxt->ops->put_fpu(ctxt);
4781
4782 if (unlikely(fault))
4783 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4784
4785 return X86EMUL_CONTINUE;
4786}
4787
4788static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4789 struct operand *op)
4790{
4791 if (op->type == OP_MM)
4792 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4793}
4794
Avi Kivitye28bbd42013-01-04 16:18:48 +02004795static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4796{
4797 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivityb9fa4092013-02-09 11:31:48 +02004798 if (!(ctxt->d & ByteOp))
4799 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
Avi Kivitye28bbd42013-01-04 16:18:48 +02004800 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004801 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4802 [fastop]"+S"(fop)
4803 : "c"(ctxt->src2.val));
Avi Kivitye28bbd42013-01-04 16:18:48 +02004804 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004805 if (!fop) /* exception is returned in fop variable */
4806 return emulate_de(ctxt);
Avi Kivitye28bbd42013-01-04 16:18:48 +02004807 return X86EMUL_CONTINUE;
4808}
Avi Kivitydd856ef2012-08-27 23:46:17 +03004809
Bandan Das14985072014-04-16 12:46:09 -04004810void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4811{
Bandan Das573e80f2014-04-16 12:46:13 -04004812 memset(&ctxt->rip_relative, 0,
4813 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
Bandan Das14985072014-04-16 12:46:09 -04004814
Bandan Das14985072014-04-16 12:46:09 -04004815 ctxt->io_read.pos = 0;
4816 ctxt->io_read.end = 0;
Bandan Das14985072014-04-16 12:46:09 -04004817 ctxt->mem_read.end = 0;
4818}
4819
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09004820int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004821{
Mathias Krause0225fb52012-08-30 01:30:16 +02004822 const struct x86_emulate_ops *ops = ctxt->ops;
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09004823 int rc = X86EMUL_CONTINUE;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004824 int saved_dst_type = ctxt->dst.type;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004825
Avi Kivity9dac77f2011-06-01 15:34:25 +03004826 ctxt->mem_read.pos = 0;
Glauber Costa310b5d32009-05-12 16:21:06 -04004827
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004828 /* LOCK prefix is allowed only with some instructions */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004829 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004830 rc = emulate_ud(ctxt);
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004831 goto done;
4832 }
4833
Avi Kivity9dac77f2011-06-01 15:34:25 +03004834 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004835 rc = emulate_ud(ctxt);
Avi Kivity081bca02010-08-26 11:06:15 +03004836 goto done;
4837 }
4838
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004839 if (unlikely(ctxt->d &
4840 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4841 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4842 (ctxt->d & Undefined)) {
4843 rc = emulate_ud(ctxt);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004844 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004845 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004846
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004847 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4848 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4849 rc = emulate_ud(ctxt);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004850 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004851 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004852
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004853 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4854 rc = emulate_nm(ctxt);
Joerg Roedeld09beab2011-04-04 12:39:25 +02004855 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004856 }
Joerg Roedeld09beab2011-04-04 12:39:25 +02004857
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004858 if (ctxt->d & Mmx) {
4859 rc = flush_pending_x87_faults(ctxt);
4860 if (rc != X86EMUL_CONTINUE)
4861 goto done;
4862 /*
4863 * Now that we know the fpu is exception safe, we can fetch
4864 * operands from it.
4865 */
4866 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4867 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4868 if (!(ctxt->d & Mov))
4869 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4870 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004871
Bandan Das685bbf42014-04-16 12:46:10 -04004872 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004873 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4874 X86_ICPT_PRE_EXCEPT);
4875 if (rc != X86EMUL_CONTINUE)
4876 goto done;
4877 }
4878
Nadav Amit64a38292014-12-10 11:19:04 +02004879 /* Instruction can only be executed in protected mode */
4880 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4881 rc = emulate_ud(ctxt);
4882 goto done;
4883 }
4884
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004885 /* Privileged instruction can be executed only in CPL=0 */
4886 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
Nadav Amit68efa762014-06-18 17:19:35 +03004887 if (ctxt->d & PrivUD)
4888 rc = emulate_ud(ctxt);
4889 else
4890 rc = emulate_gp(ctxt, 0);
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004891 goto done;
4892 }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004893
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004894 /* Do instruction specific permission checks */
Bandan Das685bbf42014-04-16 12:46:10 -04004895 if (ctxt->d & CheckPerm) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004896 rc = ctxt->check_perm(ctxt);
4897 if (rc != X86EMUL_CONTINUE)
4898 goto done;
4899 }
4900
Bandan Das685bbf42014-04-16 12:46:10 -04004901 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004902 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4903 X86_ICPT_POST_EXCEPT);
4904 if (rc != X86EMUL_CONTINUE)
4905 goto done;
4906 }
4907
4908 if (ctxt->rep_prefix && (ctxt->d & String)) {
4909 /* All REP prefixes have the same first termination condition */
4910 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4911 ctxt->eip = ctxt->_eip;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004912 ctxt->eflags &= ~EFLG_RF;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004913 goto done;
4914 }
4915 }
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004916 }
4917
Avi Kivity9dac77f2011-06-01 15:34:25 +03004918 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4919 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4920 ctxt->src.valptr, ctxt->src.bytes);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09004921 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004922 goto done;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004923 ctxt->src.orig_val64 = ctxt->src.val64;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004924 }
4925
Avi Kivity9dac77f2011-06-01 15:34:25 +03004926 if (ctxt->src2.type == OP_MEM) {
4927 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4928 &ctxt->src2.val, ctxt->src2.bytes);
Gleb Natapove35b7b92010-02-25 16:36:42 +02004929 if (rc != X86EMUL_CONTINUE)
4930 goto done;
4931 }
4932
Avi Kivity9dac77f2011-06-01 15:34:25 +03004933 if ((ctxt->d & DstMask) == ImplicitOps)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004934 goto special_insn;
4935
4936
Avi Kivity9dac77f2011-06-01 15:34:25 +03004937 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004938 /* optimisation - avoid slow emulated read if Mov */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004939 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4940 &ctxt->dst.val, ctxt->dst.bytes);
Nadav Amitc205fb72014-12-25 02:52:16 +02004941 if (rc != X86EMUL_CONTINUE) {
4942 if (rc == X86EMUL_PROPAGATE_FAULT &&
4943 ctxt->exception.vector == PF_VECTOR)
4944 ctxt->exception.error_code |= PFERR_WRITE_MASK;
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004945 goto done;
Nadav Amitc205fb72014-12-25 02:52:16 +02004946 }
Avi Kivity038e51d2007-01-22 20:40:40 -08004947 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004948 ctxt->dst.orig_val = ctxt->dst.val;
Avi Kivity038e51d2007-01-22 20:40:40 -08004949
Avi Kivity018a98d2007-11-27 19:30:56 +02004950special_insn:
4951
Bandan Das685bbf42014-04-16 12:46:10 -04004952 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03004953 rc = emulator_check_intercept(ctxt, ctxt->intercept,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004954 X86_ICPT_POST_MEMACCESS);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004955 if (rc != X86EMUL_CONTINUE)
4956 goto done;
4957 }
4958
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03004959 if (ctxt->rep_prefix && (ctxt->d & String))
4960 ctxt->eflags |= EFLG_RF;
4961 else
4962 ctxt->eflags &= ~EFLG_RF;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004963
Avi Kivity9dac77f2011-06-01 15:34:25 +03004964 if (ctxt->execute) {
Avi Kivitye28bbd42013-01-04 16:18:48 +02004965 if (ctxt->d & Fastop) {
4966 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4967 rc = fastop(ctxt, fop);
4968 if (rc != X86EMUL_CONTINUE)
4969 goto done;
4970 goto writeback;
4971 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004972 rc = ctxt->execute(ctxt);
Avi Kivityef65c882010-07-29 15:11:51 +03004973 if (rc != X86EMUL_CONTINUE)
4974 goto done;
4975 goto writeback;
4976 }
4977
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004978 if (ctxt->opcode_len == 2)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004979 goto twobyte_insn;
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004980 else if (ctxt->opcode_len == 3)
4981 goto threebyte_insn;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004982
Avi Kivity9dac77f2011-06-01 15:34:25 +03004983 switch (ctxt->b) {
Gleb Natapovb2833e32009-04-12 13:36:30 +03004984 case 0x70 ... 0x7f: /* jcc (short) */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004985 if (test_cc(ctxt->b, ctxt->eflags))
Nadav Amit234f3ce2014-09-18 22:39:38 +03004986 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02004987 break;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03004988 case 0x8d: /* lea r16/r32, m */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004989 ctxt->dst.val = ctxt->src.addr.mem.ea;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03004990 break;
Avi Kivity3d9e77d2010-08-01 12:41:59 +03004991 case 0x90 ... 0x97: /* nop / xchg reg, rax */
Avi Kivitydd856ef2012-08-27 23:46:17 +03004992 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
Nadav Amita825f5c2014-06-15 16:13:01 +03004993 ctxt->dst.type = OP_NONE;
4994 else
4995 rc = em_xchg(ctxt);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09004996 break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08004997 case 0x98: /* cbw/cwde/cdqe */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004998 switch (ctxt->op_bytes) {
4999 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5000 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5001 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08005002 }
5003 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03005004 case 0xcc: /* int3 */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09005005 rc = emulate_int(ctxt, 3);
5006 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03005007 case 0xcd: /* int n */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005008 rc = emulate_int(ctxt, ctxt->src.val);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03005009 break;
5010 case 0xce: /* into */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09005011 if (ctxt->eflags & EFLG_OF)
5012 rc = emulate_int(ctxt, 4);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03005013 break;
Nitin A Kamble1a52e052007-09-18 16:34:25 -07005014 case 0xe9: /* jmp rel */
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09005015 case 0xeb: /* jmp rel short */
Nadav Amit234f3ce2014-09-18 22:39:38 +03005016 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity9dac77f2011-06-01 15:34:25 +03005017 ctxt->dst.type = OP_NONE; /* Disable writeback. */
Nitin A Kamble1a52e052007-09-18 16:34:25 -07005018 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02005019 case 0xf4: /* hlt */
Avi Kivity6c3287f2011-04-20 15:43:05 +03005020 ctxt->ops->halt(ctxt);
Mohammed Gamal19fdfa02008-07-06 16:51:26 +03005021 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02005022 case 0xf5: /* cmc */
5023 /* complement carry flag from eflags reg */
5024 ctxt->eflags ^= EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02005025 break;
5026 case 0xf8: /* clc */
5027 ctxt->eflags &= ~EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02005028 break;
Mohammed Gamal8744aa92010-08-05 15:42:49 +03005029 case 0xf9: /* stc */
5030 ctxt->eflags |= EFLG_CF;
5031 break;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03005032 case 0xfc: /* cld */
5033 ctxt->eflags &= ~EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03005034 break;
5035 case 0xfd: /* std */
5036 ctxt->eflags |= EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03005037 break;
Avi Kivity91269b82010-07-25 14:51:16 +03005038 default:
5039 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005040 }
Avi Kivity018a98d2007-11-27 19:30:56 +02005041
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005042 if (rc != X86EMUL_CONTINUE)
5043 goto done;
5044
Avi Kivity018a98d2007-11-27 19:30:56 +02005045writeback:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02005046 if (ctxt->d & SrcWrite) {
5047 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5048 rc = writeback(ctxt, &ctxt->src);
5049 if (rc != X86EMUL_CONTINUE)
5050 goto done;
5051 }
Nadav Amitee212292014-06-15 16:12:58 +03005052 if (!(ctxt->d & NoWrite)) {
5053 rc = writeback(ctxt, &ctxt->dst);
5054 if (rc != X86EMUL_CONTINUE)
5055 goto done;
5056 }
Avi Kivity018a98d2007-11-27 19:30:56 +02005057
Gleb Natapov5cd21912010-03-18 15:20:26 +02005058 /*
5059 * restore dst type in case the decoding will be reused
5060 * (happens for string instruction )
5061 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005062 ctxt->dst.type = saved_dst_type;
Gleb Natapov5cd21912010-03-18 15:20:26 +02005063
Avi Kivity9dac77f2011-06-01 15:34:25 +03005064 if ((ctxt->d & SrcMask) == SrcSI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03005065 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
Gleb Natapova682e352010-03-18 15:20:21 +02005066
Avi Kivity9dac77f2011-06-01 15:34:25 +03005067 if ((ctxt->d & DstMask) == DstDI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03005068 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
Gleb Natapovd9271122010-03-18 15:20:22 +02005069
Avi Kivity9dac77f2011-06-01 15:34:25 +03005070 if (ctxt->rep_prefix && (ctxt->d & String)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03005071 unsigned int count;
Avi Kivity9dac77f2011-06-01 15:34:25 +03005072 struct read_cache *r = &ctxt->io_read;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03005073 if ((ctxt->d & SrcMask) == SrcSI)
5074 count = ctxt->src.count;
5075 else
5076 count = ctxt->dst.count;
Paolo Bonzini01485a22014-11-19 18:25:08 +01005077 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03005078
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005079 if (!string_insn_completed(ctxt)) {
5080 /*
5081 * Re-enter guest when pio read ahead buffer is empty
5082 * or, if it is not used, after each 1024 iteration.
5083 */
Avi Kivitydd856ef2012-08-27 23:46:17 +03005084 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005085 (r->end == 0 || r->end != r->pos)) {
5086 /*
5087 * Reset read cache. Usually happens before
5088 * decode, but since instruction is restarted
5089 * we have to do it here.
5090 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005091 ctxt->mem_read.end = 0;
Avi Kivitydd856ef2012-08-27 23:46:17 +03005092 writeback_registers(ctxt);
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005093 return EMULATION_RESTART;
5094 }
5095 goto done; /* skip rip writeback */
Avi Kivity0fa6ccb2010-08-17 11:22:17 +03005096 }
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03005097 ctxt->eflags &= ~EFLG_RF;
Gleb Natapov5cd21912010-03-18 15:20:26 +02005098 }
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005099
Avi Kivity9dac77f2011-06-01 15:34:25 +03005100 ctxt->eip = ctxt->_eip;
Avi Kivity018a98d2007-11-27 19:30:56 +02005101
5102done:
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02005103 if (rc == X86EMUL_PROPAGATE_FAULT) {
5104 WARN_ON(ctxt->exception.vector > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +02005105 ctxt->have_exception = true;
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02005106 }
Joerg Roedel775fde82011-04-04 12:39:24 +02005107 if (rc == X86EMUL_INTERCEPTED)
5108 return EMULATION_INTERCEPTED;
5109
Avi Kivitydd856ef2012-08-27 23:46:17 +03005110 if (rc == X86EMUL_CONTINUE)
5111 writeback_registers(ctxt);
5112
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005113 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005114
5115twobyte_insn:
Avi Kivity9dac77f2011-06-01 15:34:25 +03005116 switch (ctxt->b) {
Avi Kivity018a98d2007-11-27 19:30:56 +02005117 case 0x09: /* wbinvd */
Clemens Nosscfb22372011-04-21 21:16:05 +02005118 (ctxt->ops->wbinvd)(ctxt);
Sheng Yangf5f48ee2010-06-30 12:25:15 +08005119 break;
5120 case 0x08: /* invd */
Avi Kivity018a98d2007-11-27 19:30:56 +02005121 case 0x0d: /* GrpP (prefetch) */
5122 case 0x18: /* Grp16 (prefetch/nop) */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02005123 case 0x1f: /* nop */
Avi Kivity018a98d2007-11-27 19:30:56 +02005124 break;
5125 case 0x20: /* mov cr, reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005126 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
Avi Kivity018a98d2007-11-27 19:30:56 +02005127 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005128 case 0x21: /* mov from dr to reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005129 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005130 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005131 case 0x40 ... 0x4f: /* cmov */
Nadav Amit140bad82014-06-15 16:13:00 +03005132 if (test_cc(ctxt->b, ctxt->eflags))
5133 ctxt->dst.val = ctxt->src.val;
5134 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5135 ctxt->op_bytes != 4)
Avi Kivity9dac77f2011-06-01 15:34:25 +03005136 ctxt->dst.type = OP_NONE; /* no writeback */
Avi Kivity6aa8b732006-12-10 02:21:36 -08005137 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03005138 case 0x80 ... 0x8f: /* jnz rel, etc*/
Avi Kivity9dac77f2011-06-01 15:34:25 +03005139 if (test_cc(ctxt->b, ctxt->eflags))
Nadav Amit234f3ce2014-09-18 22:39:38 +03005140 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02005141 break;
Wei Yongjunee45b582010-08-06 17:10:07 +08005142 case 0x90 ... 0x9f: /* setcc r/m8 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005143 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
Wei Yongjunee45b582010-08-06 17:10:07 +08005144 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005145 case 0xb6 ... 0xb7: /* movzx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005146 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03005147 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
Avi Kivity9dac77f2011-06-01 15:34:25 +03005148 : (u16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005149 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005150 case 0xbe ... 0xbf: /* movsx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005151 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03005152 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
Avi Kivity9dac77f2011-06-01 15:34:25 +03005153 (s16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005154 break;
Avi Kivity91269b82010-07-25 14:51:16 +03005155 default:
5156 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005157 }
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005158
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01005159threebyte_insn:
5160
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005161 if (rc != X86EMUL_CONTINUE)
5162 goto done;
5163
Avi Kivity6aa8b732006-12-10 02:21:36 -08005164 goto writeback;
5165
5166cannot_emulate:
Gleb Natapova0c0ab22011-03-28 16:57:49 +02005167 return EMULATION_FAILED;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005168}
Avi Kivitydd856ef2012-08-27 23:46:17 +03005169
5170void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5171{
5172 invalidate_registers(ctxt);
5173}
5174
5175void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5176{
5177 writeback_registers(ctxt);
5178}