blob: 70045779c72503757899bed6890d9903fc97d699 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/******************************************************************************
Avi Kivity56e82312009-08-12 15:04:37 +03002 * emulate.c
Avi Kivity6aa8b732006-12-10 02:21:36 -08003 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
Rusty Russelldcc07662007-07-17 23:16:56 +10009 * privileged instructions:
Avi Kivity6aa8b732006-12-10 02:21:36 -080010 *
11 * Copyright (C) 2006 Qumranet
Nicolas Kaiser9611c182010-10-06 14:23:22 +020012 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -080013 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
Avi Kivityedf88412007-12-16 11:02:48 +020023#include <linux/kvm_host.h>
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030024#include "kvm_cache_regs.h"
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/module.h>
Avi Kivity56e82312009-08-12 15:04:37 +030026#include <asm/kvm_emulate.h>
Avi Kivityb7d491e2013-01-04 16:18:49 +020027#include <linux/stringify.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028
Avi Kivity3eeb3282010-01-21 15:31:48 +020029#include "x86.h"
Gleb Natapov38ba30b2010-03-18 15:20:17 +020030#include "tss.h"
Andre Przywarae99f0502009-06-17 15:50:33 +020031
Avi Kivity6aa8b732006-12-10 02:21:36 -080032/*
Avi Kivitya9945542011-09-13 10:45:41 +030033 * Operand types
34 */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030035#define OpNone 0ull
36#define OpImplicit 1ull /* No generic decode */
37#define OpReg 2ull /* Register */
38#define OpMem 3ull /* Memory */
39#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40#define OpDI 5ull /* ES:DI/EDI/RDI */
41#define OpMem64 6ull /* Memory, 64-bit */
42#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43#define OpDX 8ull /* DX register */
Avi Kivity4dd6a572011-09-13 10:45:43 +030044#define OpCL 9ull /* CL register (for shifts) */
45#define OpImmByte 10ull /* 8-bit sign extended immediate */
46#define OpOne 11ull /* Implied 1 */
Nadav Amit5e2c6882012-12-06 21:55:10 -020047#define OpImm 12ull /* Sign extended up to 32-bit immediate */
Avi Kivity0fe59122011-09-13 10:45:47 +030048#define OpMem16 13ull /* Memory operand (16-bit). */
49#define OpMem32 14ull /* Memory operand (32-bit). */
50#define OpImmU 15ull /* Immediate operand, zero extended */
51#define OpSI 16ull /* SI/ESI/RSI */
52#define OpImmFAddr 17ull /* Immediate far address */
53#define OpMemFAddr 18ull /* Far address in memory */
54#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
Avi Kivityc191a7a2011-09-13 10:45:49 +030055#define OpES 20ull /* ES */
56#define OpCS 21ull /* CS */
57#define OpSS 22ull /* SS */
58#define OpDS 23ull /* DS */
59#define OpFS 24ull /* FS */
60#define OpGS 25ull /* GS */
Avi Kivity28867ce2012-01-16 15:08:44 +020061#define OpMem8 26ull /* 8-bit zero extended memory operand */
Nadav Amit5e2c6882012-12-06 21:55:10 -020062#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
Paolo Bonzini7fa57952013-05-09 11:32:50 +020063#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
Avi Kivity820207c2013-02-09 11:31:45 +020064#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
Avi Kivitya9945542011-09-13 10:45:41 +030066
Avi Kivity0fe59122011-09-13 10:45:47 +030067#define OpBits 5 /* Width of operand field */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030068#define OpMask ((1ull << OpBits) - 1)
Avi Kivitya9945542011-09-13 10:45:41 +030069
70/*
Avi Kivity6aa8b732006-12-10 02:21:36 -080071 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
76 * not be handled.
77 */
78
79/* Operand sizes: 8-bit operands or specified/overridden size. */
Avi Kivityab85b12b2010-07-29 15:11:49 +030080#define ByteOp (1<<0) /* 8-bit operands. */
Avi Kivity6aa8b732006-12-10 02:21:36 -080081/* Destination operand type. */
Avi Kivitya9945542011-09-13 10:45:41 +030082#define DstShift 1
83#define ImplicitOps (OpImplicit << DstShift)
84#define DstReg (OpReg << DstShift)
85#define DstMem (OpMem << DstShift)
86#define DstAcc (OpAcc << DstShift)
87#define DstDI (OpDI << DstShift)
88#define DstMem64 (OpMem64 << DstShift)
Nadav Amit16bebef2014-12-25 02:52:18 +020089#define DstMem16 (OpMem16 << DstShift)
Avi Kivitya9945542011-09-13 10:45:41 +030090#define DstImmUByte (OpImmUByte << DstShift)
91#define DstDX (OpDX << DstShift)
Avi Kivity820207c2013-02-09 11:31:45 +020092#define DstAccLo (OpAccLo << DstShift)
Avi Kivitya9945542011-09-13 10:45:41 +030093#define DstMask (OpMask << DstShift)
Avi Kivity6aa8b732006-12-10 02:21:36 -080094/* Source operand type. */
Avi Kivity0fe59122011-09-13 10:45:47 +030095#define SrcShift 6
96#define SrcNone (OpNone << SrcShift)
97#define SrcReg (OpReg << SrcShift)
98#define SrcMem (OpMem << SrcShift)
99#define SrcMem16 (OpMem16 << SrcShift)
100#define SrcMem32 (OpMem32 << SrcShift)
101#define SrcImm (OpImm << SrcShift)
102#define SrcImmByte (OpImmByte << SrcShift)
103#define SrcOne (OpOne << SrcShift)
104#define SrcImmUByte (OpImmUByte << SrcShift)
105#define SrcImmU (OpImmU << SrcShift)
106#define SrcSI (OpSI << SrcShift)
Paolo Bonzini7fa57952013-05-09 11:32:50 +0200107#define SrcXLat (OpXLat << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300108#define SrcImmFAddr (OpImmFAddr << SrcShift)
109#define SrcMemFAddr (OpMemFAddr << SrcShift)
110#define SrcAcc (OpAcc << SrcShift)
111#define SrcImmU16 (OpImmU16 << SrcShift)
Nadav Amit5e2c6882012-12-06 21:55:10 -0200112#define SrcImm64 (OpImm64 << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300113#define SrcDX (OpDX << SrcShift)
Avi Kivity28867ce2012-01-16 15:08:44 +0200114#define SrcMem8 (OpMem8 << SrcShift)
Avi Kivity820207c2013-02-09 11:31:45 +0200115#define SrcAccHi (OpAccHi << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300116#define SrcMask (OpMask << SrcShift)
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300117#define BitOp (1<<11)
118#define MemAbs (1<<12) /* Memory operand is absolute displacement */
119#define String (1<<13) /* String instruction (rep capable) */
120#define Stack (1<<14) /* Stack instruction (push/pop) */
121#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
122#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
123#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
124#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
125#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
Gleb Natapov045a2822012-12-20 16:57:43 +0200126#define Escape (5<<15) /* Escape to coprocessor instruction */
Nadav Amit39f062f2014-11-26 15:47:18 +0200127#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
Nadav Amit2276b512015-01-26 09:32:24 +0200128#define ModeDual (7<<15) /* Different instruction for 32/64 bit */
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300129#define Sse (1<<18) /* SSE Vector instruction */
Avi Kivity20c29ff2011-09-13 10:45:44 +0300130/* Generic ModRM decode. */
131#define ModRM (1<<19)
132/* Destination is only written; never read. */
133#define Mov (1<<20)
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300134/* Misc flags */
Joerg Roedel8ea7d6a2011-04-04 12:39:26 +0200135#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
Borislav Petkovb51e9742013-09-22 16:44:52 +0200136#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
Avi Kivity5a506b12010-08-01 15:10:29 +0300137#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
Avi Kivity7f9b4b72010-08-01 14:46:54 +0300138#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
Avi Kivity047a4812010-07-26 14:37:47 +0300139#define Undefined (1<<25) /* No Such Instruction */
Gleb Natapovd380a5e2010-02-10 14:21:36 +0200140#define Lock (1<<26) /* lock prefix is allowed for the instruction */
Gleb Natapove92805a2010-02-10 14:21:35 +0200141#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300142#define No64 (1<<28)
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +0800143#define PageTable (1 << 29) /* instruction used to write page table */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300144#define NotImpl (1 << 30) /* instruction is not implemented */
Guillaume Thouvenin0dc8d102008-12-04 14:26:42 +0100145/* Source 2 operand type */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300146#define Src2Shift (31)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300147#define Src2None (OpNone << Src2Shift)
Avi Kivityab2c5ce2013-02-09 11:31:46 +0200148#define Src2Mem (OpMem << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300149#define Src2CL (OpCL << Src2Shift)
150#define Src2ImmByte (OpImmByte << Src2Shift)
151#define Src2One (OpOne << Src2Shift)
152#define Src2Imm (OpImm << Src2Shift)
Avi Kivityc191a7a2011-09-13 10:45:49 +0300153#define Src2ES (OpES << Src2Shift)
154#define Src2CS (OpCS << Src2Shift)
155#define Src2SS (OpSS << Src2Shift)
156#define Src2DS (OpDS << Src2Shift)
157#define Src2FS (OpFS << Src2Shift)
158#define Src2GS (OpGS << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300159#define Src2Mask (OpMask << Src2Shift)
Avi Kivitycbe2c9d2012-04-09 18:40:02 +0300160#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
Avi Kivity1c11b372012-04-09 18:39:59 +0300161#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
162#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
163#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
Avi Kivitye28bbd42013-01-04 16:18:48 +0200164#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
Avi Kivityb6744dc2013-01-04 16:18:50 +0200165#define NoWrite ((u64)1 << 45) /* No writeback */
Avi Kivityfb32b1e2013-02-09 11:31:44 +0200166#define SrcWrite ((u64)1 << 46) /* Write back src operand */
Nadav Amit9b88ae92014-05-25 23:05:21 +0300167#define NoMod ((u64)1 << 47) /* Mod field is ignored */
Paolo Bonzinid40a6892014-03-27 11:58:02 +0100168#define Intercept ((u64)1 << 48) /* Has valid intercept field */
169#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
Nadav Amit68efa762014-06-18 17:19:35 +0300170#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
Nadav Amit58b70752014-10-24 11:35:09 +0300171#define NearBranch ((u64)1 << 52) /* Near branches */
Nadav Amited9aad22014-11-02 11:55:00 +0200172#define No16 ((u64)1 << 53) /* No 16 bit operand */
Nadav Amitab708092014-12-25 02:52:21 +0200173#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800174
Avi Kivity820207c2013-02-09 11:31:45 +0200175#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800176
Avi Kivityd0e53322010-07-29 15:11:54 +0300177#define X2(x...) x, x
178#define X3(x...) X2(x), x
179#define X4(x...) X2(x), X2(x)
180#define X5(x...) X4(x), x
181#define X6(x...) X4(x), X2(x)
182#define X7(x...) X4(x), X3(x)
183#define X8(x...) X4(x), X4(x)
184#define X16(x...) X8(x), X8(x)
Avi Kivity83babbc2010-07-26 14:37:39 +0300185
Avi Kivitye28bbd42013-01-04 16:18:48 +0200186#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
187#define FASTOP_SIZE 8
188
189/*
190 * fastop functions have a special calling convention:
191 *
Avi Kivity017da7b2013-02-09 11:31:47 +0200192 * dst: rax (in/out)
193 * src: rdx (in/out)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200194 * src2: rcx (in)
195 * flags: rflags (in/out)
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200196 * ex: rsi (in:fastop pointer, out:zero if exception)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200197 *
198 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
199 * different operand sizes can be reached by calculation, rather than a jump
200 * table (which would be bigger than the code).
201 *
202 * fastop functions are declared as taking a never-defined fastop parameter,
203 * so they can't be called from C directly.
204 */
205
206struct fastop;
207
Avi Kivityd65b1de2010-07-29 15:11:35 +0300208struct opcode {
Avi Kivityb1ea50b2011-09-13 10:45:42 +0300209 u64 flags : 56;
210 u64 intercept : 8;
Avi Kivity120df892010-07-29 15:11:39 +0300211 union {
Avi Kivityef65c882010-07-29 15:11:51 +0300212 int (*execute)(struct x86_emulate_ctxt *ctxt);
Mathias Krausefd0a0d82012-08-30 01:30:15 +0200213 const struct opcode *group;
214 const struct group_dual *gdual;
215 const struct gprefix *gprefix;
Gleb Natapov045a2822012-12-20 16:57:43 +0200216 const struct escape *esc;
Nadav Amit39f062f2014-11-26 15:47:18 +0200217 const struct instr_dual *idual;
Nadav Amit2276b512015-01-26 09:32:24 +0200218 const struct mode_dual *mdual;
Avi Kivitye28bbd42013-01-04 16:18:48 +0200219 void (*fastop)(struct fastop *fake);
Avi Kivity120df892010-07-29 15:11:39 +0300220 } u;
Joerg Roedeld09beab2011-04-04 12:39:25 +0200221 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
Avi Kivity120df892010-07-29 15:11:39 +0300222};
223
224struct group_dual {
225 struct opcode mod012[8];
226 struct opcode mod3[8];
Avi Kivityd65b1de2010-07-29 15:11:35 +0300227};
228
Avi Kivity0d7cdee2011-03-29 11:34:38 +0200229struct gprefix {
230 struct opcode pfx_no;
231 struct opcode pfx_66;
232 struct opcode pfx_f2;
233 struct opcode pfx_f3;
234};
235
Gleb Natapov045a2822012-12-20 16:57:43 +0200236struct escape {
237 struct opcode op[8];
238 struct opcode high[64];
239};
240
Nadav Amit39f062f2014-11-26 15:47:18 +0200241struct instr_dual {
242 struct opcode mod012;
243 struct opcode mod3;
244};
245
Nadav Amit2276b512015-01-26 09:32:24 +0200246struct mode_dual {
247 struct opcode mode32;
248 struct opcode mode64;
249};
250
Avi Kivity6aa8b732006-12-10 02:21:36 -0800251/* EFLAGS bit definitions. */
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200252#define EFLG_ID (1<<21)
253#define EFLG_VIP (1<<20)
254#define EFLG_VIF (1<<19)
255#define EFLG_AC (1<<18)
Andre Przywarab1d86142009-06-17 15:50:32 +0200256#define EFLG_VM (1<<17)
257#define EFLG_RF (1<<16)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200258#define EFLG_IOPL (3<<12)
259#define EFLG_NT (1<<14)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800260#define EFLG_OF (1<<11)
261#define EFLG_DF (1<<10)
Andre Przywarab1d86142009-06-17 15:50:32 +0200262#define EFLG_IF (1<<9)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200263#define EFLG_TF (1<<8)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800264#define EFLG_SF (1<<7)
265#define EFLG_ZF (1<<6)
266#define EFLG_AF (1<<4)
267#define EFLG_PF (1<<2)
268#define EFLG_CF (1<<0)
269
Mohammed Gamal62bd4302010-07-28 12:38:40 +0300270#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
271#define EFLG_RESERVED_ONE_MASK 2
272
Nadav Amit3dc4bc42014-12-25 02:52:19 +0200273enum x86_transfer_type {
274 X86_TRANSFER_NONE,
275 X86_TRANSFER_CALL_JMP,
276 X86_TRANSFER_RET,
277 X86_TRANSFER_TASK_SWITCH,
278};
279
Avi Kivitydd856ef2012-08-27 23:46:17 +0300280static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
281{
282 if (!(ctxt->regs_valid & (1 << nr))) {
283 ctxt->regs_valid |= 1 << nr;
284 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
285 }
286 return ctxt->_regs[nr];
287}
288
289static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
290{
291 ctxt->regs_valid |= 1 << nr;
292 ctxt->regs_dirty |= 1 << nr;
293 return &ctxt->_regs[nr];
294}
295
296static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
297{
298 reg_read(ctxt, nr);
299 return reg_write(ctxt, nr);
300}
301
302static void writeback_registers(struct x86_emulate_ctxt *ctxt)
303{
304 unsigned reg;
305
306 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
307 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
308}
309
310static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
311{
312 ctxt->regs_dirty = 0;
313 ctxt->regs_valid = 0;
314}
315
Avi Kivity6aa8b732006-12-10 02:21:36 -0800316/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800317 * These EFLAGS bits are restored from saved value during emulation, and
318 * any changes are written back to the saved value after emulation.
319 */
320#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
321
Avi Kivitydda96d82008-11-26 15:14:10 +0200322#ifdef CONFIG_X86_64
323#define ON64(x) x
324#else
325#define ON64(x)
326#endif
327
Avi Kivity4d758342013-01-19 19:51:55 +0200328static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
329
Avi Kivityb7d491e2013-01-04 16:18:49 +0200330#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
331#define FOP_RET "ret \n\t"
332
333#define FOP_START(op) \
334 extern void em_##op(struct fastop *fake); \
335 asm(".pushsection .text, \"ax\" \n\t" \
336 ".global em_" #op " \n\t" \
337 FOP_ALIGN \
338 "em_" #op ": \n\t"
339
340#define FOP_END \
341 ".popsection")
342
Avi Kivity0bdea062013-01-19 19:51:50 +0200343#define FOPNOP() FOP_ALIGN FOP_RET
344
Avi Kivityb7d491e2013-01-04 16:18:49 +0200345#define FOP1E(op, dst) \
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200346 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
347
348#define FOP1EEX(op, dst) \
349 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
Avi Kivityb7d491e2013-01-04 16:18:49 +0200350
351#define FASTOP1(op) \
352 FOP_START(op) \
353 FOP1E(op##b, al) \
354 FOP1E(op##w, ax) \
355 FOP1E(op##l, eax) \
356 ON64(FOP1E(op##q, rax)) \
357 FOP_END
358
Avi Kivityb9fa4092013-02-09 11:31:48 +0200359/* 1-operand, using src2 (for MUL/DIV r/m) */
360#define FASTOP1SRC2(op, name) \
361 FOP_START(name) \
362 FOP1E(op, cl) \
363 FOP1E(op, cx) \
364 FOP1E(op, ecx) \
365 ON64(FOP1E(op, rcx)) \
366 FOP_END
367
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200368/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
369#define FASTOP1SRC2EX(op, name) \
370 FOP_START(name) \
371 FOP1EEX(op, cl) \
372 FOP1EEX(op, cx) \
373 FOP1EEX(op, ecx) \
374 ON64(FOP1EEX(op, rcx)) \
375 FOP_END
376
Avi Kivityf7857f32013-01-04 16:18:53 +0200377#define FOP2E(op, dst, src) \
378 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
379
380#define FASTOP2(op) \
381 FOP_START(op) \
Avi Kivity017da7b2013-02-09 11:31:47 +0200382 FOP2E(op##b, al, dl) \
383 FOP2E(op##w, ax, dx) \
384 FOP2E(op##l, eax, edx) \
385 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivityf7857f32013-01-04 16:18:53 +0200386 FOP_END
387
Avi Kivity11c363b2013-01-19 19:51:54 +0200388/* 2 operand, word only */
389#define FASTOP2W(op) \
390 FOP_START(op) \
391 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200392 FOP2E(op##w, ax, dx) \
393 FOP2E(op##l, eax, edx) \
394 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivity11c363b2013-01-19 19:51:54 +0200395 FOP_END
396
Avi Kivity007a3b52013-01-19 19:51:51 +0200397/* 2 operand, src is CL */
398#define FASTOP2CL(op) \
399 FOP_START(op) \
400 FOP2E(op##b, al, cl) \
401 FOP2E(op##w, ax, cl) \
402 FOP2E(op##l, eax, cl) \
403 ON64(FOP2E(op##q, rax, cl)) \
404 FOP_END
405
Nadav Amit5aca3722014-11-02 11:54:50 +0200406/* 2 operand, src and dest are reversed */
407#define FASTOP2R(op, name) \
408 FOP_START(name) \
409 FOP2E(op##b, dl, al) \
410 FOP2E(op##w, dx, ax) \
411 FOP2E(op##l, edx, eax) \
412 ON64(FOP2E(op##q, rdx, rax)) \
413 FOP_END
414
Avi Kivity0bdea062013-01-19 19:51:50 +0200415#define FOP3E(op, dst, src, src2) \
416 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
417
418/* 3-operand, word-only, src2=cl */
419#define FASTOP3WCL(op) \
420 FOP_START(op) \
421 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200422 FOP3E(op##w, ax, dx, cl) \
423 FOP3E(op##l, eax, edx, cl) \
424 ON64(FOP3E(op##q, rax, rdx, cl)) \
Avi Kivity0bdea062013-01-19 19:51:50 +0200425 FOP_END
426
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200427/* Special case for SETcc - 1 instruction per cc */
428#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
429
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200430asm(".global kvm_fastop_exception \n"
431 "kvm_fastop_exception: xor %esi, %esi; ret");
432
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200433FOP_START(setcc)
434FOP_SETCC(seto)
435FOP_SETCC(setno)
436FOP_SETCC(setc)
437FOP_SETCC(setnc)
438FOP_SETCC(setz)
439FOP_SETCC(setnz)
440FOP_SETCC(setbe)
441FOP_SETCC(setnbe)
442FOP_SETCC(sets)
443FOP_SETCC(setns)
444FOP_SETCC(setp)
445FOP_SETCC(setnp)
446FOP_SETCC(setl)
447FOP_SETCC(setnl)
448FOP_SETCC(setle)
449FOP_SETCC(setnle)
450FOP_END;
451
Paolo Bonzini326f5782013-05-09 11:32:51 +0200452FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
453FOP_END;
454
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200455static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
456 enum x86_intercept intercept,
457 enum x86_intercept_stage stage)
458{
459 struct x86_instruction_info info = {
460 .intercept = intercept,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300461 .rep_prefix = ctxt->rep_prefix,
462 .modrm_mod = ctxt->modrm_mod,
463 .modrm_reg = ctxt->modrm_reg,
464 .modrm_rm = ctxt->modrm_rm,
465 .src_val = ctxt->src.val64,
Jan Kiszka6cbc5f52014-06-30 12:52:55 +0200466 .dst_val = ctxt->dst.val64,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300467 .src_bytes = ctxt->src.bytes,
468 .dst_bytes = ctxt->dst.bytes,
469 .ad_bytes = ctxt->ad_bytes,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200470 .next_rip = ctxt->eip,
471 };
472
Avi Kivity29535382011-04-20 13:37:53 +0300473 return ctxt->ops->intercept(ctxt, &info, stage);
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200474}
475
Avi Kivityf47cfa32012-06-07 17:49:24 +0300476static void assign_masked(ulong *dest, ulong src, ulong mask)
477{
478 *dest = (*dest & ~mask) | (src & mask);
479}
480
Nadav Amit6fd8e122015-03-30 15:39:20 +0300481static void assign_register(unsigned long *reg, u64 val, int bytes)
482{
483 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
484 switch (bytes) {
485 case 1:
486 *(u8 *)reg = (u8)val;
487 break;
488 case 2:
489 *(u16 *)reg = (u16)val;
490 break;
491 case 4:
492 *reg = (u32)val;
493 break; /* 64b: zero-extend */
494 case 8:
495 *reg = val;
496 break;
497 }
498}
499
Avi Kivity9dac77f2011-06-01 15:34:25 +0300500static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800501{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300502 return (1UL << (ctxt->ad_bytes << 3)) - 1;
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800503}
504
Avi Kivityf47cfa32012-06-07 17:49:24 +0300505static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
506{
507 u16 sel;
508 struct desc_struct ss;
509
510 if (ctxt->mode == X86EMUL_MODE_PROT64)
511 return ~0UL;
512 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
513 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
514}
515
Avi Kivity612e89f2012-06-12 20:03:23 +0300516static int stack_size(struct x86_emulate_ctxt *ctxt)
517{
518 return (__fls(stack_mask(ctxt)) + 1) >> 3;
519}
520
Avi Kivity6aa8b732006-12-10 02:21:36 -0800521/* Access/update address held in a register, based on addressing mode. */
Harvey Harrisone4706772008-02-19 07:40:38 -0800522static inline unsigned long
Avi Kivity9dac77f2011-06-01 15:34:25 +0300523address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800524{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300525 if (ctxt->ad_bytes == sizeof(unsigned long))
Harvey Harrisone4706772008-02-19 07:40:38 -0800526 return reg;
527 else
Avi Kivity9dac77f2011-06-01 15:34:25 +0300528 return reg & ad_mask(ctxt);
Harvey Harrisone4706772008-02-19 07:40:38 -0800529}
530
531static inline unsigned long
Paolo Bonzini01485a22014-11-19 18:25:08 +0100532register_address(struct x86_emulate_ctxt *ctxt, int reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800533{
Paolo Bonzini01485a22014-11-19 18:25:08 +0100534 return address_mask(ctxt, reg_read(ctxt, reg));
Harvey Harrisone4706772008-02-19 07:40:38 -0800535}
536
Avi Kivity5ad105e2012-08-19 14:34:31 +0300537static void masked_increment(ulong *reg, ulong mask, int inc)
538{
539 assign_masked(reg, *reg + inc, mask);
540}
541
Harvey Harrison7a9572752008-02-19 07:40:41 -0800542static inline void
Paolo Bonzini01485a22014-11-19 18:25:08 +0100543register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
Harvey Harrison7a9572752008-02-19 07:40:41 -0800544{
Avi Kivity5ad105e2012-08-19 14:34:31 +0300545 ulong mask;
546
Avi Kivity9dac77f2011-06-01 15:34:25 +0300547 if (ctxt->ad_bytes == sizeof(unsigned long))
Avi Kivity5ad105e2012-08-19 14:34:31 +0300548 mask = ~0UL;
Harvey Harrison7a9572752008-02-19 07:40:41 -0800549 else
Avi Kivity5ad105e2012-08-19 14:34:31 +0300550 mask = ad_mask(ctxt);
Paolo Bonzini01485a22014-11-19 18:25:08 +0100551 masked_increment(reg_rmw(ctxt, reg), mask, inc);
Avi Kivity5ad105e2012-08-19 14:34:31 +0300552}
553
554static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
555{
Avi Kivitydd856ef2012-08-27 23:46:17 +0300556 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
Harvey Harrison7a9572752008-02-19 07:40:41 -0800557}
Avi Kivity6aa8b732006-12-10 02:21:36 -0800558
Avi Kivity56697682011-04-03 14:08:51 +0300559static u32 desc_limit_scaled(struct desc_struct *desc)
560{
561 u32 limit = get_desc_limit(desc);
562
563 return desc->g ? (limit << 12) | 0xfff : limit;
564}
565
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900566static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300567{
568 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
569 return 0;
570
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900571 return ctxt->ops->get_cached_segment_base(ctxt, seg);
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300572}
573
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200574static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
575 u32 error, bool valid)
Gleb Natapov54b84862010-04-28 19:15:44 +0300576{
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +0200577 WARN_ON(vec > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +0200578 ctxt->exception.vector = vec;
579 ctxt->exception.error_code = error;
580 ctxt->exception.error_code_valid = valid;
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200581 return X86EMUL_PROPAGATE_FAULT;
Gleb Natapov54b84862010-04-28 19:15:44 +0300582}
583
Joerg Roedel3b88e412011-04-04 12:39:29 +0200584static int emulate_db(struct x86_emulate_ctxt *ctxt)
585{
586 return emulate_exception(ctxt, DB_VECTOR, 0, false);
587}
588
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200589static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300590{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200591 return emulate_exception(ctxt, GP_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300592}
593
Avi Kivity618ff152011-04-03 12:32:09 +0300594static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
595{
596 return emulate_exception(ctxt, SS_VECTOR, err, true);
597}
598
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200599static int emulate_ud(struct x86_emulate_ctxt *ctxt)
Gleb Natapov54b84862010-04-28 19:15:44 +0300600{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200601 return emulate_exception(ctxt, UD_VECTOR, 0, false);
Gleb Natapov54b84862010-04-28 19:15:44 +0300602}
603
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200604static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300605{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200606 return emulate_exception(ctxt, TS_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300607}
608
Avi Kivity34d1f492010-08-26 11:59:01 +0300609static int emulate_de(struct x86_emulate_ctxt *ctxt)
610{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200611 return emulate_exception(ctxt, DE_VECTOR, 0, false);
Avi Kivity34d1f492010-08-26 11:59:01 +0300612}
613
Avi Kivity12537912011-03-29 11:41:27 +0200614static int emulate_nm(struct x86_emulate_ctxt *ctxt)
615{
616 return emulate_exception(ctxt, NM_VECTOR, 0, false);
617}
618
Avi Kivity1aa36612011-04-27 13:20:30 +0300619static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
620{
621 u16 selector;
622 struct desc_struct desc;
623
624 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
625 return selector;
626}
627
628static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
629 unsigned seg)
630{
631 u16 dummy;
632 u32 base3;
633 struct desc_struct desc;
634
635 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
636 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
637}
638
Avi Kivity1c11b372012-04-09 18:39:59 +0300639/*
640 * x86 defines three classes of vector instructions: explicitly
641 * aligned, explicitly unaligned, and the rest, which change behaviour
642 * depending on whether they're AVX encoded or not.
643 *
644 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
645 * subject to the same check.
646 */
647static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
648{
649 if (likely(size < 16))
650 return false;
651
652 if (ctxt->d & Aligned)
653 return true;
654 else if (ctxt->d & Unaligned)
655 return false;
656 else if (ctxt->d & Avx)
657 return false;
658 else
659 return true;
660}
661
Paolo Bonzinid09155d2014-10-27 14:54:44 +0100662static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
663 struct segmented_address addr,
664 unsigned *max_size, unsigned size,
665 bool write, bool fetch,
Nadav Amitd50eaa12014-11-19 17:43:11 +0200666 enum x86emul_mode mode, ulong *linear)
Avi Kivity52fd8b42011-04-03 12:33:12 +0300667{
Avi Kivity618ff152011-04-03 12:32:09 +0300668 struct desc_struct desc;
669 bool usable;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300670 ulong la;
Avi Kivity618ff152011-04-03 12:32:09 +0300671 u32 lim;
Avi Kivity1aa36612011-04-27 13:20:30 +0300672 u16 sel;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300673
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900674 la = seg_base(ctxt, addr.seg) + addr.ea;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100675 *max_size = 0;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200676 switch (mode) {
Avi Kivity618ff152011-04-03 12:32:09 +0300677 case X86EMUL_MODE_PROT64:
Nadav Amit4be4de72014-09-18 22:39:40 +0300678 if (is_noncanonical_address(la))
Nadav Amitabc7d8a2014-11-19 17:43:12 +0200679 goto bad;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100680
681 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
682 if (size > *max_size)
683 goto bad;
Avi Kivity618ff152011-04-03 12:32:09 +0300684 break;
685 default:
Avi Kivity1aa36612011-04-27 13:20:30 +0300686 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
687 addr.seg);
Avi Kivity618ff152011-04-03 12:32:09 +0300688 if (!usable)
689 goto bad;
Gleb Natapov58b78252012-12-11 15:14:12 +0200690 /* code segment in protected mode or read-only data segment */
691 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
692 || !(desc.type & 2)) && write)
Avi Kivity618ff152011-04-03 12:32:09 +0300693 goto bad;
694 /* unreadable code segment */
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400695 if (!fetch && (desc.type & 8) && !(desc.type & 2))
Avi Kivity618ff152011-04-03 12:32:09 +0300696 goto bad;
697 lim = desc_limit_scaled(&desc);
Paolo Bonzini997b0412014-11-19 18:33:38 +0100698 if (!(desc.type & 8) && (desc.type & 4)) {
Guo Chaofc058682012-06-28 15:19:51 +0800699 /* expand-down segment */
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100700 if (addr.ea <= lim)
Avi Kivity618ff152011-04-03 12:32:09 +0300701 goto bad;
702 lim = desc.d ? 0xffffffff : 0xffff;
Avi Kivity618ff152011-04-03 12:32:09 +0300703 }
Paolo Bonzini997b0412014-11-19 18:33:38 +0100704 if (addr.ea > lim)
705 goto bad;
Nadav Amitbac155312015-01-26 09:32:26 +0200706 if (lim == 0xffffffff)
707 *max_size = ~0u;
708 else {
709 *max_size = (u64)lim + 1 - addr.ea;
710 if (size > *max_size)
711 goto bad;
712 }
Nadav Amit31ff6482014-11-19 17:43:13 +0200713 la &= (u32)-1;
Avi Kivity618ff152011-04-03 12:32:09 +0300714 break;
715 }
Avi Kivity1c11b372012-04-09 18:39:59 +0300716 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
717 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300718 *linear = la;
719 return X86EMUL_CONTINUE;
Avi Kivity618ff152011-04-03 12:32:09 +0300720bad:
721 if (addr.seg == VCPU_SREG_SS)
Paolo Bonzini36061892014-10-27 14:40:49 +0100722 return emulate_ss(ctxt, 0);
Avi Kivity618ff152011-04-03 12:32:09 +0300723 else
Paolo Bonzini36061892014-10-27 14:40:49 +0100724 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300725}
726
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400727static int linearize(struct x86_emulate_ctxt *ctxt,
728 struct segmented_address addr,
729 unsigned size, bool write,
730 ulong *linear)
731{
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100732 unsigned max_size;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200733 return __linearize(ctxt, addr, &max_size, size, write, false,
734 ctxt->mode, linear);
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400735}
736
Nadav Amitd50eaa12014-11-19 17:43:11 +0200737static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
738 enum x86emul_mode mode)
739{
740 ulong linear;
741 int rc;
742 unsigned max_size;
743 struct segmented_address addr = { .seg = VCPU_SREG_CS,
744 .ea = dst };
745
746 if (ctxt->op_bytes != sizeof(unsigned long))
747 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
748 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
749 if (rc == X86EMUL_CONTINUE)
750 ctxt->_eip = addr.ea;
751 return rc;
752}
753
754static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
755{
756 return assign_eip(ctxt, dst, ctxt->mode);
757}
758
759static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
760 const struct desc_struct *cs_desc)
761{
762 enum x86emul_mode mode = ctxt->mode;
Nadav Amit82268082015-01-26 09:32:27 +0200763 int rc;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200764
765#ifdef CONFIG_X86_64
Nadav Amit82268082015-01-26 09:32:27 +0200766 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
767 if (cs_desc->l) {
768 u64 efer = 0;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200769
Nadav Amit82268082015-01-26 09:32:27 +0200770 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
771 if (efer & EFER_LMA)
772 mode = X86EMUL_MODE_PROT64;
773 } else
774 mode = X86EMUL_MODE_PROT32; /* temporary value */
Nadav Amitd50eaa12014-11-19 17:43:11 +0200775 }
776#endif
777 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
778 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
Nadav Amit82268082015-01-26 09:32:27 +0200779 rc = assign_eip(ctxt, dst, mode);
780 if (rc == X86EMUL_CONTINUE)
781 ctxt->mode = mode;
782 return rc;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200783}
784
785static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
786{
787 return assign_eip_near(ctxt, ctxt->_eip + rel);
788}
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400789
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200790static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
791 struct segmented_address addr,
792 void *data,
793 unsigned size)
794{
Avi Kivity9fa088f2011-03-31 18:54:30 +0200795 int rc;
796 ulong linear;
797
Avi Kivity83b87952011-04-03 11:31:19 +0300798 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +0200799 if (rc != X86EMUL_CONTINUE)
800 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +0300801 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200802}
803
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900804/*
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200805 * Prefetch the remaining bytes of the instruction without crossing page
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900806 * boundary if they are not in fetch_cache yet.
807 */
Paolo Bonzini9506d572014-05-06 13:05:25 +0200808static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
Avi Kivity62266862007-11-20 13:15:52 +0200809{
Avi Kivity62266862007-11-20 13:15:52 +0200810 int rc;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100811 unsigned size, max_size;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200812 unsigned long linear;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200813 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200814 struct segmented_address addr = { .seg = VCPU_SREG_CS,
Paolo Bonzini17052f12014-05-06 16:33:01 +0200815 .ea = ctxt->eip + cur_size };
816
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100817 /*
818 * We do not know exactly how many bytes will be needed, and
819 * __linearize is expensive, so fetch as much as possible. We
820 * just have to avoid going beyond the 15 byte limit, the end
821 * of the segment, or the end of the page.
822 *
823 * __linearize is called with size 0 so that it does not do any
824 * boundary check itself. Instead, we use max_size to check
825 * against op_size.
826 */
Nadav Amitd50eaa12014-11-19 17:43:11 +0200827 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
828 &linear);
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200829 if (unlikely(rc != X86EMUL_CONTINUE))
830 return rc;
831
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100832 size = min_t(unsigned, 15UL ^ cur_size, max_size);
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200833 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
Paolo Bonzini5cfc7e02014-05-06 13:05:25 +0200834
835 /*
836 * One instruction can only straddle two pages,
837 * and one has been loaded at the beginning of
838 * x86_decode_insn. So, if not enough bytes
839 * still, we must have hit the 15-byte boundary.
840 */
841 if (unlikely(size < op_size))
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100842 return emulate_gp(ctxt, 0);
843
Paolo Bonzini17052f12014-05-06 16:33:01 +0200844 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200845 size, &ctxt->exception);
846 if (unlikely(rc != X86EMUL_CONTINUE))
847 return rc;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200848 ctxt->fetch.end += size;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +0900849 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200850}
851
Paolo Bonzini9506d572014-05-06 13:05:25 +0200852static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
853 unsigned size)
Avi Kivity62266862007-11-20 13:15:52 +0200854{
Nadav Amit08da44a2014-10-03 01:10:04 +0300855 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
856
857 if (unlikely(done_size < size))
858 return __do_insn_fetch_bytes(ctxt, size - done_size);
Paolo Bonzini9506d572014-05-06 13:05:25 +0200859 else
860 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200861}
862
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900863/* Fetch next part of the instruction being emulated. */
Takuya Yoshikawae85a1082011-07-30 18:01:26 +0900864#define insn_fetch(_type, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200865({ _type _x; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200866 \
867 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900868 if (rc != X86EMUL_CONTINUE) \
869 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200870 ctxt->_eip += sizeof(_type); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200871 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
872 ctxt->fetch.ptr += sizeof(_type); \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200873 _x; \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900874})
875
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900876#define insn_fetch_arr(_arr, _size, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200877({ \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200878 rc = do_insn_fetch_bytes(_ctxt, _size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900879 if (rc != X86EMUL_CONTINUE) \
880 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200881 ctxt->_eip += (_size); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200882 memcpy(_arr, ctxt->fetch.ptr, _size); \
883 ctxt->fetch.ptr += (_size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900884})
885
Rusty Russell1e3c5cb2007-07-17 23:16:11 +1000886/*
887 * Given the 'reg' portion of a ModRM byte, and a register block, return a
888 * pointer into the block that addresses the relevant register.
889 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
890 */
Avi Kivitydd856ef2012-08-27 23:46:17 +0300891static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200892 int byteop)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800893{
894 void *p;
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200895 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800896
Avi Kivity6aa8b732006-12-10 02:21:36 -0800897 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
Avi Kivitydd856ef2012-08-27 23:46:17 +0300898 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
899 else
900 p = reg_rmw(ctxt, modrm_reg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800901 return p;
902}
903
904static int read_descriptor(struct x86_emulate_ctxt *ctxt,
Avi Kivity90de84f2010-11-17 15:28:21 +0200905 struct segmented_address addr,
Avi Kivity6aa8b732006-12-10 02:21:36 -0800906 u16 *size, unsigned long *address, int op_bytes)
907{
908 int rc;
909
910 if (op_bytes == 2)
911 op_bytes = 3;
912 *address = 0;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200913 rc = segmented_read_std(ctxt, addr, size, 2);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +0900914 if (rc != X86EMUL_CONTINUE)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800915 return rc;
Avi Kivity30b31ab2010-11-17 15:28:22 +0200916 addr.ea += 2;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200917 rc = segmented_read_std(ctxt, addr, address, op_bytes);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800918 return rc;
919}
920
Avi Kivity34b77652013-01-19 19:51:56 +0200921FASTOP2(add);
922FASTOP2(or);
923FASTOP2(adc);
924FASTOP2(sbb);
925FASTOP2(and);
926FASTOP2(sub);
927FASTOP2(xor);
928FASTOP2(cmp);
929FASTOP2(test);
930
Avi Kivityb9fa4092013-02-09 11:31:48 +0200931FASTOP1SRC2(mul, mul_ex);
932FASTOP1SRC2(imul, imul_ex);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200933FASTOP1SRC2EX(div, div_ex);
934FASTOP1SRC2EX(idiv, idiv_ex);
Avi Kivityb9fa4092013-02-09 11:31:48 +0200935
Avi Kivity34b77652013-01-19 19:51:56 +0200936FASTOP3WCL(shld);
937FASTOP3WCL(shrd);
938
939FASTOP2W(imul);
940
941FASTOP1(not);
942FASTOP1(neg);
943FASTOP1(inc);
944FASTOP1(dec);
945
946FASTOP2CL(rol);
947FASTOP2CL(ror);
948FASTOP2CL(rcl);
949FASTOP2CL(rcr);
950FASTOP2CL(shl);
951FASTOP2CL(shr);
952FASTOP2CL(sar);
953
954FASTOP2W(bsf);
955FASTOP2W(bsr);
956FASTOP2W(bt);
957FASTOP2W(bts);
958FASTOP2W(btr);
959FASTOP2W(btc);
960
Avi Kivitye47a5f52013-02-09 11:31:51 +0200961FASTOP2(xadd);
962
Nadav Amit5aca3722014-11-02 11:54:50 +0200963FASTOP2R(cmp, cmp_r);
964
Nadav Amit900efe22015-03-30 15:39:21 +0300965static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
966{
967 /* If src is zero, do not writeback, but update flags */
968 if (ctxt->src.val == 0)
969 ctxt->dst.type = OP_NONE;
970 return fastop(ctxt, em_bsf);
971}
972
973static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
974{
975 /* If src is zero, do not writeback, but update flags */
976 if (ctxt->src.val == 0)
977 ctxt->dst.type = OP_NONE;
978 return fastop(ctxt, em_bsr);
979}
980
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200981static u8 test_cc(unsigned int condition, unsigned long flags)
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300982{
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200983 u8 rc;
984 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300985
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200986 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivity3f0c3d02013-01-26 23:56:04 +0200987 asm("push %[flags]; popf; call *%[fastop]"
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200988 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
989 return rc;
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300990}
991
Avi Kivity91ff3cb2010-08-01 12:53:09 +0300992static void fetch_register_operand(struct operand *op)
993{
994 switch (op->bytes) {
995 case 1:
996 op->val = *(u8 *)op->addr.reg;
997 break;
998 case 2:
999 op->val = *(u16 *)op->addr.reg;
1000 break;
1001 case 4:
1002 op->val = *(u32 *)op->addr.reg;
1003 break;
1004 case 8:
1005 op->val = *(u64 *)op->addr.reg;
1006 break;
1007 }
1008}
1009
Avi Kivity12537912011-03-29 11:41:27 +02001010static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1011{
1012 ctxt->ops->get_fpu(ctxt);
1013 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +02001014 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1015 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1016 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1017 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1018 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1019 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1020 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1021 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +02001022#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +02001023 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1024 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1025 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1026 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1027 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1028 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1029 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1030 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +02001031#endif
1032 default: BUG();
1033 }
1034 ctxt->ops->put_fpu(ctxt);
1035}
1036
1037static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1038 int reg)
1039{
1040 ctxt->ops->get_fpu(ctxt);
1041 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +02001042 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1043 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1044 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1045 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1046 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1047 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1048 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1049 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +02001050#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +02001051 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1052 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1053 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1054 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1055 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1056 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1057 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1058 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +02001059#endif
1060 default: BUG();
1061 }
1062 ctxt->ops->put_fpu(ctxt);
1063}
1064
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001065static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1066{
1067 ctxt->ops->get_fpu(ctxt);
1068 switch (reg) {
1069 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1070 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1071 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1072 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1073 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1074 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1075 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1076 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1077 default: BUG();
1078 }
1079 ctxt->ops->put_fpu(ctxt);
1080}
1081
1082static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1083{
1084 ctxt->ops->get_fpu(ctxt);
1085 switch (reg) {
1086 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1087 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1088 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1089 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1090 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1091 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1092 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1093 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1094 default: BUG();
1095 }
1096 ctxt->ops->put_fpu(ctxt);
1097}
1098
Gleb Natapov045a2822012-12-20 16:57:43 +02001099static int em_fninit(struct x86_emulate_ctxt *ctxt)
1100{
1101 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1102 return emulate_nm(ctxt);
1103
1104 ctxt->ops->get_fpu(ctxt);
1105 asm volatile("fninit");
1106 ctxt->ops->put_fpu(ctxt);
1107 return X86EMUL_CONTINUE;
1108}
1109
1110static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1111{
1112 u16 fcw;
1113
1114 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1115 return emulate_nm(ctxt);
1116
1117 ctxt->ops->get_fpu(ctxt);
1118 asm volatile("fnstcw %0": "+m"(fcw));
1119 ctxt->ops->put_fpu(ctxt);
1120
Gleb Natapov045a2822012-12-20 16:57:43 +02001121 ctxt->dst.val = fcw;
1122
1123 return X86EMUL_CONTINUE;
1124}
1125
1126static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1127{
1128 u16 fsw;
1129
1130 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1131 return emulate_nm(ctxt);
1132
1133 ctxt->ops->get_fpu(ctxt);
1134 asm volatile("fnstsw %0": "+m"(fsw));
1135 ctxt->ops->put_fpu(ctxt);
1136
Gleb Natapov045a2822012-12-20 16:57:43 +02001137 ctxt->dst.val = fsw;
1138
1139 return X86EMUL_CONTINUE;
1140}
1141
Avi Kivity12537912011-03-29 11:41:27 +02001142static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
Avi Kivity2adb5ad2012-01-16 15:08:45 +02001143 struct operand *op)
Avi Kivity3c118e22007-10-31 10:27:04 +02001144{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001145 unsigned reg = ctxt->modrm_reg;
Avi Kivity33615aa2007-10-31 11:15:56 +02001146
Avi Kivity9dac77f2011-06-01 15:34:25 +03001147 if (!(ctxt->d & ModRM))
1148 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
Avi Kivity12537912011-03-29 11:41:27 +02001149
Avi Kivity9dac77f2011-06-01 15:34:25 +03001150 if (ctxt->d & Sse) {
Avi Kivity12537912011-03-29 11:41:27 +02001151 op->type = OP_XMM;
1152 op->bytes = 16;
1153 op->addr.xmm = reg;
1154 read_sse_reg(ctxt, &op->vec_val, reg);
1155 return;
1156 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001157 if (ctxt->d & Mmx) {
1158 reg &= 7;
1159 op->type = OP_MM;
1160 op->bytes = 8;
1161 op->addr.mm = reg;
1162 return;
1163 }
Avi Kivity12537912011-03-29 11:41:27 +02001164
Avi Kivity3c118e22007-10-31 10:27:04 +02001165 op->type = OP_REG;
Gleb Natapov6d4d85e2013-11-04 15:52:42 +02001166 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1167 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1168
Avi Kivity91ff3cb2010-08-01 12:53:09 +03001169 fetch_register_operand(op);
Avi Kivity3c118e22007-10-31 10:27:04 +02001170 op->orig_val = op->val;
1171}
1172
Avi Kivitya6e34072012-06-10 17:15:39 +03001173static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1174{
1175 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1176 ctxt->modrm_seg = VCPU_SREG_SS;
1177}
1178
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001179static int decode_modrm(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001180 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001181{
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001182 u8 sib;
Bandan Das02357bd2014-04-16 12:46:11 -04001183 int index_reg, base_reg, scale;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001184 int rc = X86EMUL_CONTINUE;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001185 ulong modrm_ea = 0;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001186
Bandan Das02357bd2014-04-16 12:46:11 -04001187 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1188 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1189 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001190
Bandan Das02357bd2014-04-16 12:46:11 -04001191 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001192 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
Bandan Das02357bd2014-04-16 12:46:11 -04001193 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001194 ctxt->modrm_seg = VCPU_SREG_DS;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001195
Nadav Amit9b88ae92014-05-25 23:05:21 +03001196 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001197 op->type = OP_REG;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001198 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Paolo Bonzini8acb42072013-05-30 16:35:55 +02001199 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02001200 ctxt->d & ByteOp);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001201 if (ctxt->d & Sse) {
Avi Kivity12537912011-03-29 11:41:27 +02001202 op->type = OP_XMM;
1203 op->bytes = 16;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001204 op->addr.xmm = ctxt->modrm_rm;
1205 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
Avi Kivity12537912011-03-29 11:41:27 +02001206 return rc;
1207 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001208 if (ctxt->d & Mmx) {
1209 op->type = OP_MM;
1210 op->bytes = 8;
Paolo Bonzinibdc90722014-05-06 14:03:29 +02001211 op->addr.mm = ctxt->modrm_rm & 7;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001212 return rc;
1213 }
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001214 fetch_register_operand(op);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001215 return rc;
1216 }
1217
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001218 op->type = OP_MEM;
1219
Avi Kivity9dac77f2011-06-01 15:34:25 +03001220 if (ctxt->ad_bytes == 2) {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001221 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1222 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1223 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1224 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001225
1226 /* 16-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001227 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001228 case 0:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001229 if (ctxt->modrm_rm == 6)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001230 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001231 break;
1232 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001233 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001234 break;
1235 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001236 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001237 break;
1238 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001239 switch (ctxt->modrm_rm) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001240 case 0:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001241 modrm_ea += bx + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001242 break;
1243 case 1:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001244 modrm_ea += bx + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001245 break;
1246 case 2:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001247 modrm_ea += bp + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001248 break;
1249 case 3:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001250 modrm_ea += bp + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001251 break;
1252 case 4:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001253 modrm_ea += si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001254 break;
1255 case 5:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001256 modrm_ea += di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001257 break;
1258 case 6:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001259 if (ctxt->modrm_mod != 0)
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001260 modrm_ea += bp;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001261 break;
1262 case 7:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001263 modrm_ea += bx;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001264 break;
1265 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001266 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1267 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1268 ctxt->modrm_seg = VCPU_SREG_SS;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001269 modrm_ea = (u16)modrm_ea;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001270 } else {
1271 /* 32/64-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001272 if ((ctxt->modrm_rm & 7) == 4) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001273 sib = insn_fetch(u8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001274 index_reg |= (sib >> 3) & 7;
1275 base_reg |= sib & 7;
1276 scale = sib >> 6;
1277
Avi Kivity9dac77f2011-06-01 15:34:25 +03001278 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001279 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivitya6e34072012-06-10 17:15:39 +03001280 else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001281 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001282 adjust_modrm_seg(ctxt, base_reg);
Nadav Amitab708092014-12-25 02:52:21 +02001283 /* Increment ESP on POP [ESP] */
1284 if ((ctxt->d & IncSP) &&
1285 base_reg == VCPU_REGS_RSP)
1286 modrm_ea += ctxt->op_bytes;
Avi Kivitya6e34072012-06-10 17:15:39 +03001287 }
Avi Kivitydc71d0f2008-06-15 21:23:17 -07001288 if (index_reg != 4)
Avi Kivitydd856ef2012-08-27 23:46:17 +03001289 modrm_ea += reg_read(ctxt, index_reg) << scale;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001290 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
Nadav Amit5b38ab82014-11-02 11:54:41 +02001291 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity84411d82008-06-15 21:53:26 -07001292 if (ctxt->mode == X86EMUL_MODE_PROT64)
Avi Kivity9dac77f2011-06-01 15:34:25 +03001293 ctxt->rip_relative = 1;
Avi Kivitya6e34072012-06-10 17:15:39 +03001294 } else {
1295 base_reg = ctxt->modrm_rm;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001296 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001297 adjust_modrm_seg(ctxt, base_reg);
1298 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001299 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001300 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001301 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001302 break;
1303 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001304 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001305 break;
1306 }
1307 }
Avi Kivity90de84f2010-11-17 15:28:21 +02001308 op->addr.mem.ea = modrm_ea;
Bandan Das41061cd2014-04-16 12:46:14 -04001309 if (ctxt->ad_bytes != 8)
1310 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1311
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001312done:
1313 return rc;
1314}
1315
1316static int decode_abs(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001317 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001318{
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001319 int rc = X86EMUL_CONTINUE;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001320
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001321 op->type = OP_MEM;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001322 switch (ctxt->ad_bytes) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001323 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001324 op->addr.mem.ea = insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001325 break;
1326 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001327 op->addr.mem.ea = insn_fetch(u32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001328 break;
1329 case 8:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001330 op->addr.mem.ea = insn_fetch(u64, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001331 break;
1332 }
1333done:
1334 return rc;
1335}
1336
Avi Kivity9dac77f2011-06-01 15:34:25 +03001337static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
Wei Yongjun35c843c2010-08-09 11:34:56 +08001338{
Sheng Yang7129eec2010-09-28 16:33:32 +08001339 long sv = 0, mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001340
Avi Kivity9dac77f2011-06-01 15:34:25 +03001341 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
Nadav Amit7dec5602014-06-15 16:12:57 +03001342 mask = ~((long)ctxt->dst.bytes * 8 - 1);
Wei Yongjun35c843c2010-08-09 11:34:56 +08001343
Avi Kivity9dac77f2011-06-01 15:34:25 +03001344 if (ctxt->src.bytes == 2)
1345 sv = (s16)ctxt->src.val & (s16)mask;
1346 else if (ctxt->src.bytes == 4)
1347 sv = (s32)ctxt->src.val & (s32)mask;
Nadav Amit7dec5602014-06-15 16:12:57 +03001348 else
1349 sv = (s64)ctxt->src.val & (s64)mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001350
Nadav Amit1c1c35a2014-11-19 17:43:09 +02001351 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1352 ctxt->dst.addr.mem.ea + (sv >> 3));
Wei Yongjun35c843c2010-08-09 11:34:56 +08001353 }
Wei Yongjunba7ff2b2010-08-09 11:39:14 +08001354
1355 /* only subword offset */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001356 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001357}
1358
Gleb Natapov9de41572010-04-28 19:15:22 +03001359static int read_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov9de41572010-04-28 19:15:22 +03001360 unsigned long addr, void *dest, unsigned size)
1361{
1362 int rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001363 struct read_cache *mc = &ctxt->mem_read;
Gleb Natapov9de41572010-04-28 19:15:22 +03001364
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001365 if (mc->pos < mc->end)
1366 goto read_cached;
Gleb Natapov9de41572010-04-28 19:15:22 +03001367
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001368 WARN_ON((mc->end + size) >= sizeof(mc->data));
Gleb Natapov9de41572010-04-28 19:15:22 +03001369
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001370 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1371 &ctxt->exception);
1372 if (rc != X86EMUL_CONTINUE)
1373 return rc;
1374
1375 mc->end += size;
1376
1377read_cached:
1378 memcpy(dest, mc->data + mc->pos, size);
1379 mc->pos += size;
Gleb Natapov9de41572010-04-28 19:15:22 +03001380 return X86EMUL_CONTINUE;
1381}
1382
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001383static int segmented_read(struct x86_emulate_ctxt *ctxt,
1384 struct segmented_address addr,
1385 void *data,
1386 unsigned size)
1387{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001388 int rc;
1389 ulong linear;
1390
Avi Kivity83b87952011-04-03 11:31:19 +03001391 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001392 if (rc != X86EMUL_CONTINUE)
1393 return rc;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001394 return read_emulated(ctxt, linear, data, size);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001395}
1396
1397static int segmented_write(struct x86_emulate_ctxt *ctxt,
1398 struct segmented_address addr,
1399 const void *data,
1400 unsigned size)
1401{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001402 int rc;
1403 ulong linear;
1404
Avi Kivity83b87952011-04-03 11:31:19 +03001405 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001406 if (rc != X86EMUL_CONTINUE)
1407 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001408 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1409 &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001410}
1411
1412static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1413 struct segmented_address addr,
1414 const void *orig_data, const void *data,
1415 unsigned size)
1416{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001417 int rc;
1418 ulong linear;
1419
Avi Kivity83b87952011-04-03 11:31:19 +03001420 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001421 if (rc != X86EMUL_CONTINUE)
1422 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001423 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1424 size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001425}
1426
Gleb Natapov7b262e92010-03-18 15:20:27 +02001427static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov7b262e92010-03-18 15:20:27 +02001428 unsigned int size, unsigned short port,
1429 void *dest)
1430{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001431 struct read_cache *rc = &ctxt->io_read;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001432
1433 if (rc->pos == rc->end) { /* refill pio read ahead */
Gleb Natapov7b262e92010-03-18 15:20:27 +02001434 unsigned int in_page, n;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001435 unsigned int count = ctxt->rep_prefix ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001436 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001437 in_page = (ctxt->eflags & EFLG_DF) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001438 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1439 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
Mark Rustadb55a8142014-07-25 06:27:05 -07001440 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
Gleb Natapov7b262e92010-03-18 15:20:27 +02001441 if (n == 0)
1442 n = 1;
1443 rc->pos = rc->end = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001444 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
Gleb Natapov7b262e92010-03-18 15:20:27 +02001445 return 0;
1446 rc->end = n * size;
1447 }
1448
Nadav Amite6e39f02014-04-18 03:35:10 +03001449 if (ctxt->rep_prefix && (ctxt->d & String) &&
1450 !(ctxt->eflags & EFLG_DF)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001451 ctxt->dst.data = rc->data + rc->pos;
1452 ctxt->dst.type = OP_MEM_STR;
1453 ctxt->dst.count = (rc->end - rc->pos) / size;
1454 rc->pos = rc->end;
1455 } else {
1456 memcpy(dest, rc->data + rc->pos, size);
1457 rc->pos += size;
1458 }
Gleb Natapov7b262e92010-03-18 15:20:27 +02001459 return 1;
1460}
1461
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01001462static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1463 u16 index, struct desc_struct *desc)
1464{
1465 struct desc_ptr dt;
1466 ulong addr;
1467
1468 ctxt->ops->get_idt(ctxt, &dt);
1469
1470 if (dt.size < index * 8 + 7)
1471 return emulate_gp(ctxt, index << 3 | 0x2);
1472
1473 addr = dt.address + index * 8;
1474 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1475 &ctxt->exception);
1476}
1477
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001478static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001479 u16 selector, struct desc_ptr *dt)
1480{
Mathias Krause0225fb52012-08-30 01:30:16 +02001481 const struct x86_emulate_ops *ops = ctxt->ops;
Nadav Amit2eedcac2014-06-02 18:34:05 +03001482 u32 base3 = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001483
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001484 if (selector & 1 << 2) {
1485 struct desc_struct desc;
Avi Kivity1aa36612011-04-27 13:20:30 +03001486 u16 sel;
1487
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001488 memset (dt, 0, sizeof *dt);
Nadav Amit2eedcac2014-06-02 18:34:05 +03001489 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1490 VCPU_SREG_LDTR))
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001491 return;
1492
1493 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
Nadav Amit2eedcac2014-06-02 18:34:05 +03001494 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001495 } else
Avi Kivity4bff1e862011-04-20 13:37:53 +03001496 ops->get_gdt(ctxt, dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001497}
1498
Nadav Amitedccda72014-12-25 02:52:23 +02001499static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1500 u16 selector, ulong *desc_addr_p)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001501{
1502 struct desc_ptr dt;
1503 u16 index = selector >> 3;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001504 ulong addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001505
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001506 get_descriptor_table_ptr(ctxt, selector, &dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001507
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001508 if (dt.size < index * 8 + 7)
1509 return emulate_gp(ctxt, selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001510
1511 addr = dt.address + index * 8;
Nadav Amitedccda72014-12-25 02:52:23 +02001512
1513#ifdef CONFIG_X86_64
1514 if (addr >> 32 != 0) {
1515 u64 efer = 0;
1516
1517 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1518 if (!(efer & EFER_LMA))
1519 addr &= (u32)-1;
1520 }
1521#endif
1522
1523 *desc_addr_p = addr;
1524 return X86EMUL_CONTINUE;
1525}
1526
1527/* allowed just for 8 bytes segments */
1528static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1529 u16 selector, struct desc_struct *desc,
1530 ulong *desc_addr_p)
1531{
1532 int rc;
1533
1534 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1535 if (rc != X86EMUL_CONTINUE)
1536 return rc;
1537
1538 return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1539 &ctxt->exception);
1540}
1541
1542/* allowed just for 8 bytes segments */
1543static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1544 u16 selector, struct desc_struct *desc)
1545{
1546 int rc;
1547 ulong addr;
1548
1549 rc = get_descriptor_ptr(ctxt, selector, &addr);
1550 if (rc != X86EMUL_CONTINUE)
1551 return rc;
1552
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001553 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1554 &ctxt->exception);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001555}
1556
Gleb Natapov5601d052011-03-07 14:55:06 +02001557/* Does not support long mode */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001558static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Nadav Amitd1442d82014-09-18 22:39:39 +03001559 u16 selector, int seg, u8 cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001560 enum x86_transfer_type transfer,
Nadav Amitd1442d82014-09-18 22:39:39 +03001561 struct desc_struct *desc)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001562{
Avi Kivity869be992012-06-13 16:30:53 +03001563 struct desc_struct seg_desc, old_desc;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001564 u8 dpl, rpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001565 unsigned err_vec = GP_VECTOR;
1566 u32 err_code = 0;
1567 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
Avi Kivitye9194642012-06-13 16:29:39 +03001568 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001569 int ret;
Avi Kivity03ebebe2012-08-21 17:07:04 +03001570 u16 dummy;
Nadav Amite37a75a2014-06-02 18:34:04 +03001571 u32 base3 = 0;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001572
1573 memset(&seg_desc, 0, sizeof seg_desc);
1574
Kevin Wolff8da94e2013-04-11 14:06:03 +02001575 if (ctxt->mode == X86EMUL_MODE_REAL) {
1576 /* set real mode segment descriptor (keep limit etc. for
1577 * unreal mode) */
Avi Kivity03ebebe2012-08-21 17:07:04 +03001578 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001579 set_desc_base(&seg_desc, selector << 4);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001580 goto load;
Kevin Wolff8da94e2013-04-11 14:06:03 +02001581 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1582 /* VM86 needs a clean new segment descriptor */
1583 set_desc_base(&seg_desc, selector << 4);
1584 set_desc_limit(&seg_desc, 0xffff);
1585 seg_desc.type = 3;
1586 seg_desc.p = 1;
1587 seg_desc.s = 1;
1588 seg_desc.dpl = 3;
1589 goto load;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001590 }
1591
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001592 rpl = selector & 3;
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001593
1594 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1595 if ((seg == VCPU_SREG_CS
1596 || (seg == VCPU_SREG_SS
1597 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1598 || seg == VCPU_SREG_TR)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001599 && null_selector)
1600 goto exception;
1601
1602 /* TR should be in GDT only */
1603 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1604 goto exception;
1605
1606 if (null_selector) /* for NULL selector skip all following checks */
1607 goto load;
1608
Avi Kivitye9194642012-06-13 16:29:39 +03001609 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001610 if (ret != X86EMUL_CONTINUE)
1611 return ret;
1612
1613 err_code = selector & 0xfffc;
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001614 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1615 GP_VECTOR;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001616
Guo Chaofc058682012-06-28 15:19:51 +08001617 /* can't load system descriptor into segment selector */
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001618 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1619 if (transfer == X86_TRANSFER_CALL_JMP)
1620 return X86EMUL_UNHANDLEABLE;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001621 goto exception;
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001622 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001623
1624 if (!seg_desc.p) {
1625 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1626 goto exception;
1627 }
1628
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001629 dpl = seg_desc.dpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001630
1631 switch (seg) {
1632 case VCPU_SREG_SS:
1633 /*
1634 * segment is not a writable data segment or segment
1635 * selector's RPL != CPL or segment selector's RPL != CPL
1636 */
1637 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1638 goto exception;
1639 break;
1640 case VCPU_SREG_CS:
1641 if (!(seg_desc.type & 8))
1642 goto exception;
1643
1644 if (seg_desc.type & 4) {
1645 /* conforming */
1646 if (dpl > cpl)
1647 goto exception;
1648 } else {
1649 /* nonconforming */
1650 if (rpl > cpl || dpl != cpl)
1651 goto exception;
1652 }
Nadav Amit040c8dc2014-09-18 22:39:43 +03001653 /* in long-mode d/b must be clear if l is set */
1654 if (seg_desc.d && seg_desc.l) {
1655 u64 efer = 0;
1656
1657 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1658 if (efer & EFER_LMA)
1659 goto exception;
1660 }
1661
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001662 /* CS(RPL) <- CPL */
1663 selector = (selector & 0xfffc) | cpl;
1664 break;
1665 case VCPU_SREG_TR:
1666 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1667 goto exception;
Avi Kivity869be992012-06-13 16:30:53 +03001668 old_desc = seg_desc;
1669 seg_desc.type |= 2; /* busy */
1670 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1671 sizeof(seg_desc), &ctxt->exception);
1672 if (ret != X86EMUL_CONTINUE)
1673 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001674 break;
1675 case VCPU_SREG_LDTR:
1676 if (seg_desc.s || seg_desc.type != 2)
1677 goto exception;
1678 break;
1679 default: /* DS, ES, FS, or GS */
1680 /*
1681 * segment is not a data or readable code segment or
1682 * ((segment is a data or nonconforming code segment)
1683 * and (both RPL and CPL > DPL))
1684 */
1685 if ((seg_desc.type & 0xa) == 0x8 ||
1686 (((seg_desc.type & 0xc) != 0xc) &&
1687 (rpl > dpl && cpl > dpl)))
1688 goto exception;
1689 break;
1690 }
1691
1692 if (seg_desc.s) {
1693 /* mark segment as accessed */
Nadav Amite2cefa72014-12-25 02:52:22 +02001694 if (!(seg_desc.type & 1)) {
1695 seg_desc.type |= 1;
1696 ret = write_segment_descriptor(ctxt, selector,
1697 &seg_desc);
1698 if (ret != X86EMUL_CONTINUE)
1699 return ret;
1700 }
Nadav Amite37a75a2014-06-02 18:34:04 +03001701 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1702 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1703 sizeof(base3), &ctxt->exception);
1704 if (ret != X86EMUL_CONTINUE)
1705 return ret;
Nadav Amit9a9abf62014-11-02 11:54:56 +02001706 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1707 ((u64)base3 << 32)))
1708 return emulate_gp(ctxt, 0);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001709 }
1710load:
Nadav Amite37a75a2014-06-02 18:34:04 +03001711 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
Nadav Amitd1442d82014-09-18 22:39:39 +03001712 if (desc)
1713 *desc = seg_desc;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001714 return X86EMUL_CONTINUE;
1715exception:
Paolo Bonzini592f0852014-08-20 10:05:08 +02001716 return emulate_exception(ctxt, err_vec, err_code, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001717}
1718
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001719static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1720 u16 selector, int seg)
1721{
1722 u8 cpl = ctxt->ops->cpl(ctxt);
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001723 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1724 X86_TRANSFER_NONE, NULL);
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001725}
1726
Wei Yongjun31be40b2010-08-17 09:17:30 +08001727static void write_register_operand(struct operand *op)
1728{
Nadav Amit6fd8e122015-03-30 15:39:20 +03001729 return assign_register(op->addr.reg, op->val, op->bytes);
Wei Yongjun31be40b2010-08-17 09:17:30 +08001730}
1731
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001732static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
Wei Yongjunc37eda12010-06-15 09:03:33 +08001733{
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001734 switch (op->type) {
Wei Yongjunc37eda12010-06-15 09:03:33 +08001735 case OP_REG:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001736 write_register_operand(op);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001737 break;
1738 case OP_MEM:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001739 if (ctxt->lock_prefix)
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001740 return segmented_cmpxchg(ctxt,
1741 op->addr.mem,
1742 &op->orig_val,
1743 &op->val,
1744 op->bytes);
1745 else
1746 return segmented_write(ctxt,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001747 op->addr.mem,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001748 &op->val,
1749 op->bytes);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001750 break;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001751 case OP_MEM_STR:
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001752 return segmented_write(ctxt,
1753 op->addr.mem,
1754 op->data,
1755 op->bytes * op->count);
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001756 break;
Avi Kivity12537912011-03-29 11:41:27 +02001757 case OP_XMM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001758 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
Avi Kivity12537912011-03-29 11:41:27 +02001759 break;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001760 case OP_MM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001761 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001762 break;
Wei Yongjunc37eda12010-06-15 09:03:33 +08001763 case OP_NONE:
1764 /* no writeback */
1765 break;
1766 default:
1767 break;
1768 }
1769 return X86EMUL_CONTINUE;
1770}
1771
Avi Kivity51ddff52012-06-12 20:19:40 +03001772static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001773{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001774 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001775
Avi Kivity5ad105e2012-08-19 14:34:31 +03001776 rsp_increment(ctxt, -bytes);
Avi Kivitydd856ef2012-08-27 23:46:17 +03001777 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001778 addr.seg = VCPU_SREG_SS;
1779
Avi Kivity51ddff52012-06-12 20:19:40 +03001780 return segmented_write(ctxt, addr, data, bytes);
1781}
1782
1783static int em_push(struct x86_emulate_ctxt *ctxt)
1784{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001785 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001786 ctxt->dst.type = OP_NONE;
Avi Kivity51ddff52012-06-12 20:19:40 +03001787 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001788}
1789
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001790static int emulate_pop(struct x86_emulate_ctxt *ctxt,
Avi Kivity350f69d2009-01-05 11:12:40 +02001791 void *dest, int len)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001792{
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001793 int rc;
Avi Kivity90de84f2010-11-17 15:28:21 +02001794 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001795
Avi Kivitydd856ef2012-08-27 23:46:17 +03001796 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Avi Kivity90de84f2010-11-17 15:28:21 +02001797 addr.seg = VCPU_SREG_SS;
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001798 rc = segmented_read(ctxt, addr, dest, len);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09001799 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001800 return rc;
1801
Avi Kivity5ad105e2012-08-19 14:34:31 +03001802 rsp_increment(ctxt, len);
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001803 return rc;
1804}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001805
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001806static int em_pop(struct x86_emulate_ctxt *ctxt)
1807{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001808 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001809}
1810
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001811static int emulate_popf(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001812 void *dest, int len)
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001813{
1814 int rc;
1815 unsigned long val, change_mask;
1816 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001817 int cpl = ctxt->ops->cpl(ctxt);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001818
Takuya Yoshikawa3b9be3b2011-05-02 02:27:55 +09001819 rc = emulate_pop(ctxt, &val, len);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001820 if (rc != X86EMUL_CONTINUE)
1821 return rc;
1822
1823 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
Nadav Amit163b1352014-07-21 14:37:28 +03001824 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001825
1826 switch(ctxt->mode) {
1827 case X86EMUL_MODE_PROT64:
1828 case X86EMUL_MODE_PROT32:
1829 case X86EMUL_MODE_PROT16:
1830 if (cpl == 0)
1831 change_mask |= EFLG_IOPL;
1832 if (cpl <= iopl)
1833 change_mask |= EFLG_IF;
1834 break;
1835 case X86EMUL_MODE_VM86:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001836 if (iopl < 3)
1837 return emulate_gp(ctxt, 0);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001838 change_mask |= EFLG_IF;
1839 break;
1840 default: /* real mode */
1841 change_mask |= (EFLG_IOPL | EFLG_IF);
1842 break;
1843 }
1844
1845 *(unsigned long *)dest =
1846 (ctxt->eflags & ~change_mask) | (val & change_mask);
1847
1848 return rc;
1849}
1850
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001851static int em_popf(struct x86_emulate_ctxt *ctxt)
1852{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001853 ctxt->dst.type = OP_REG;
1854 ctxt->dst.addr.reg = &ctxt->eflags;
1855 ctxt->dst.bytes = ctxt->op_bytes;
1856 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001857}
1858
Avi Kivity612e89f2012-06-12 20:03:23 +03001859static int em_enter(struct x86_emulate_ctxt *ctxt)
1860{
1861 int rc;
1862 unsigned frame_size = ctxt->src.val;
1863 unsigned nesting_level = ctxt->src2.val & 31;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001864 ulong rbp;
Avi Kivity612e89f2012-06-12 20:03:23 +03001865
1866 if (nesting_level)
1867 return X86EMUL_UNHANDLEABLE;
1868
Avi Kivitydd856ef2012-08-27 23:46:17 +03001869 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1870 rc = push(ctxt, &rbp, stack_size(ctxt));
Avi Kivity612e89f2012-06-12 20:03:23 +03001871 if (rc != X86EMUL_CONTINUE)
1872 return rc;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001873 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
Avi Kivity612e89f2012-06-12 20:03:23 +03001874 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001875 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1876 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
Avi Kivity612e89f2012-06-12 20:03:23 +03001877 stack_mask(ctxt));
1878 return X86EMUL_CONTINUE;
1879}
1880
Avi Kivityf47cfa32012-06-07 17:49:24 +03001881static int em_leave(struct x86_emulate_ctxt *ctxt)
1882{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001883 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
Avi Kivityf47cfa32012-06-07 17:49:24 +03001884 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001885 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
Avi Kivityf47cfa32012-06-07 17:49:24 +03001886}
1887
Avi Kivity1cd196e2011-09-13 10:45:51 +03001888static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001889{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001890 int seg = ctxt->src2.val;
1891
Avi Kivity9dac77f2011-06-01 15:34:25 +03001892 ctxt->src.val = get_segment_selector(ctxt, seg);
Nadav Amit0fcc2072014-11-02 11:54:51 +02001893 if (ctxt->op_bytes == 4) {
1894 rsp_increment(ctxt, -2);
1895 ctxt->op_bytes = 2;
1896 }
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001897
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001898 return em_push(ctxt);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001899}
1900
Avi Kivity1cd196e2011-09-13 10:45:51 +03001901static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001902{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001903 int seg = ctxt->src2.val;
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001904 unsigned long selector;
1905 int rc;
1906
Nadav Amit3313bc42014-12-25 02:52:17 +02001907 rc = emulate_pop(ctxt, &selector, 2);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001908 if (rc != X86EMUL_CONTINUE)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001909 return rc;
1910
Paolo Bonzinia5457e72014-06-05 17:29:34 +02001911 if (ctxt->modrm_reg == VCPU_SREG_SS)
1912 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
Nadav Amit3313bc42014-12-25 02:52:17 +02001913 if (ctxt->op_bytes > 2)
1914 rsp_increment(ctxt, ctxt->op_bytes - 2);
Paolo Bonzinia5457e72014-06-05 17:29:34 +02001915
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001916 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001917 return rc;
1918}
1919
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001920static int em_pusha(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001921{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001922 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001923 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001924 int reg = VCPU_REGS_RAX;
1925
1926 while (reg <= VCPU_REGS_RDI) {
1927 (reg == VCPU_REGS_RSP) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001928 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001929
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001930 rc = em_push(ctxt);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001931 if (rc != X86EMUL_CONTINUE)
1932 return rc;
1933
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001934 ++reg;
1935 }
Wei Yongjunc37eda12010-06-15 09:03:33 +08001936
Wei Yongjunc37eda12010-06-15 09:03:33 +08001937 return rc;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001938}
1939
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001940static int em_pushf(struct x86_emulate_ctxt *ctxt)
1941{
Nadav Amitbc397a62014-12-10 11:19:03 +02001942 ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001943 return em_push(ctxt);
1944}
1945
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001946static int em_popa(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001947{
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001948 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001949 int reg = VCPU_REGS_RDI;
Nadav Amit6fd8e122015-03-30 15:39:20 +03001950 u32 val;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001951
1952 while (reg >= VCPU_REGS_RAX) {
1953 if (reg == VCPU_REGS_RSP) {
Avi Kivity5ad105e2012-08-19 14:34:31 +03001954 rsp_increment(ctxt, ctxt->op_bytes);
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001955 --reg;
1956 }
1957
Nadav Amit6fd8e122015-03-30 15:39:20 +03001958 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001959 if (rc != X86EMUL_CONTINUE)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001960 break;
Nadav Amit6fd8e122015-03-30 15:39:20 +03001961 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001962 --reg;
1963 }
1964 return rc;
1965}
1966
Avi Kivitydd856ef2012-08-27 23:46:17 +03001967static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001968{
Mathias Krause0225fb52012-08-30 01:30:16 +02001969 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001970 int rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001971 struct desc_ptr dt;
1972 gva_t cs_addr;
1973 gva_t eip_addr;
1974 u16 cs, eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001975
1976 /* TODO: Add limit checks */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001977 ctxt->src.val = ctxt->eflags;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001978 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001979 if (rc != X86EMUL_CONTINUE)
1980 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001981
1982 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1983
Avi Kivity9dac77f2011-06-01 15:34:25 +03001984 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001985 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001986 if (rc != X86EMUL_CONTINUE)
1987 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001988
Avi Kivity9dac77f2011-06-01 15:34:25 +03001989 ctxt->src.val = ctxt->_eip;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001990 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001991 if (rc != X86EMUL_CONTINUE)
1992 return rc;
1993
Avi Kivity4bff1e862011-04-20 13:37:53 +03001994 ops->get_idt(ctxt, &dt);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001995
1996 eip_addr = dt.address + (irq << 2);
1997 cs_addr = dt.address + (irq << 2) + 2;
1998
Avi Kivity0f65dd72011-04-20 13:37:53 +03001999 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03002000 if (rc != X86EMUL_CONTINUE)
2001 return rc;
2002
Avi Kivity0f65dd72011-04-20 13:37:53 +03002003 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03002004 if (rc != X86EMUL_CONTINUE)
2005 return rc;
2006
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002007 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03002008 if (rc != X86EMUL_CONTINUE)
2009 return rc;
2010
Avi Kivity9dac77f2011-06-01 15:34:25 +03002011 ctxt->_eip = eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03002012
2013 return rc;
2014}
2015
Avi Kivitydd856ef2012-08-27 23:46:17 +03002016int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2017{
2018 int rc;
2019
2020 invalidate_registers(ctxt);
2021 rc = __emulate_int_real(ctxt, irq);
2022 if (rc == X86EMUL_CONTINUE)
2023 writeback_registers(ctxt);
2024 return rc;
2025}
2026
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002027static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03002028{
2029 switch(ctxt->mode) {
2030 case X86EMUL_MODE_REAL:
Avi Kivitydd856ef2012-08-27 23:46:17 +03002031 return __emulate_int_real(ctxt, irq);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03002032 case X86EMUL_MODE_VM86:
2033 case X86EMUL_MODE_PROT16:
2034 case X86EMUL_MODE_PROT32:
2035 case X86EMUL_MODE_PROT64:
2036 default:
2037 /* Protected mode interrupts unimplemented yet */
2038 return X86EMUL_UNHANDLEABLE;
2039 }
2040}
2041
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002042static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002043{
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002044 int rc = X86EMUL_CONTINUE;
2045 unsigned long temp_eip = 0;
2046 unsigned long temp_eflags = 0;
2047 unsigned long cs = 0;
2048 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
2049 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
2050 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
2051 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
2052
2053 /* TODO: Add stack limit check */
2054
Avi Kivity9dac77f2011-06-01 15:34:25 +03002055 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002056
2057 if (rc != X86EMUL_CONTINUE)
2058 return rc;
2059
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002060 if (temp_eip & ~0xffff)
2061 return emulate_gp(ctxt, 0);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002062
Avi Kivity9dac77f2011-06-01 15:34:25 +03002063 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002064
2065 if (rc != X86EMUL_CONTINUE)
2066 return rc;
2067
Avi Kivity9dac77f2011-06-01 15:34:25 +03002068 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002069
2070 if (rc != X86EMUL_CONTINUE)
2071 return rc;
2072
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002073 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002074
2075 if (rc != X86EMUL_CONTINUE)
2076 return rc;
2077
Avi Kivity9dac77f2011-06-01 15:34:25 +03002078 ctxt->_eip = temp_eip;
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002079
2080
Avi Kivity9dac77f2011-06-01 15:34:25 +03002081 if (ctxt->op_bytes == 4)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002082 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
Avi Kivity9dac77f2011-06-01 15:34:25 +03002083 else if (ctxt->op_bytes == 2) {
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002084 ctxt->eflags &= ~0xffff;
2085 ctxt->eflags |= temp_eflags;
2086 }
2087
2088 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2089 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
Nadav Amit801806d2015-01-26 09:32:23 +02002090 ctxt->ops->set_nmi_mask(ctxt, false);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002091
2092 return rc;
2093}
2094
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002095static int em_iret(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002096{
2097 switch(ctxt->mode) {
2098 case X86EMUL_MODE_REAL:
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002099 return emulate_iret_real(ctxt);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002100 case X86EMUL_MODE_VM86:
2101 case X86EMUL_MODE_PROT16:
2102 case X86EMUL_MODE_PROT32:
2103 case X86EMUL_MODE_PROT64:
2104 default:
2105 /* iret from protected mode unimplemented yet */
2106 return X86EMUL_UNHANDLEABLE;
2107 }
2108}
2109
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002110static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2111{
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002112 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002113 unsigned short sel, old_sel;
2114 struct desc_struct old_desc, new_desc;
2115 const struct x86_emulate_ops *ops = ctxt->ops;
2116 u8 cpl = ctxt->ops->cpl(ctxt);
2117
2118 /* Assignment of RIP may only fail in 64-bit mode */
2119 if (ctxt->mode == X86EMUL_MODE_PROT64)
2120 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2121 VCPU_SREG_CS);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002122
Avi Kivity9dac77f2011-06-01 15:34:25 +03002123 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002124
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002125 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2126 X86_TRANSFER_CALL_JMP,
Nadav Amitd1442d82014-09-18 22:39:39 +03002127 &new_desc);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002128 if (rc != X86EMUL_CONTINUE)
2129 return rc;
2130
Nadav Amitd50eaa12014-11-19 17:43:11 +02002131 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03002132 if (rc != X86EMUL_CONTINUE) {
Nadav Amit7e46ddd2014-10-28 00:03:43 +02002133 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
Nadav Amitd1442d82014-09-18 22:39:39 +03002134 /* assigning eip failed; restore the old cs */
2135 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2136 return rc;
2137 }
2138 return rc;
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002139}
2140
Nadav Amitf7784042014-09-18 22:39:41 +03002141static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002142{
Nadav Amitf7784042014-09-18 22:39:41 +03002143 return assign_eip_near(ctxt, ctxt->src.val);
2144}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002145
Nadav Amitf7784042014-09-18 22:39:41 +03002146static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2147{
2148 int rc;
2149 long int old_eip;
2150
2151 old_eip = ctxt->_eip;
2152 rc = assign_eip_near(ctxt, ctxt->src.val);
2153 if (rc != X86EMUL_CONTINUE)
2154 return rc;
2155 ctxt->src.val = old_eip;
2156 rc = em_push(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09002157 return rc;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002158}
2159
Takuya Yoshikawae0dac402011-12-06 18:07:27 +09002160static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002161{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002162 u64 old = ctxt->dst.orig_val64;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002163
Nadav Amitaaa05f22014-06-02 18:34:10 +03002164 if (ctxt->dst.bytes == 16)
2165 return X86EMUL_UNHANDLEABLE;
2166
Avi Kivitydd856ef2012-08-27 23:46:17 +03002167 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2168 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2169 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2170 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
Laurent Vivier05f086f2007-09-24 11:10:55 +02002171 ctxt->eflags &= ~EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002172 } else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03002173 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2174 (u32) reg_read(ctxt, VCPU_REGS_RBX);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002175
Laurent Vivier05f086f2007-09-24 11:10:55 +02002176 ctxt->eflags |= EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002177 }
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002178 return X86EMUL_CONTINUE;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002179}
2180
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002181static int em_ret(struct x86_emulate_ctxt *ctxt)
2182{
Nadav Amit234f3ce2014-09-18 22:39:38 +03002183 int rc;
2184 unsigned long eip;
2185
2186 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2187 if (rc != X86EMUL_CONTINUE)
2188 return rc;
2189
2190 return assign_eip_near(ctxt, eip);
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002191}
2192
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002193static int em_ret_far(struct x86_emulate_ctxt *ctxt)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002194{
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002195 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002196 unsigned long eip, cs;
2197 u16 old_cs;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002198 int cpl = ctxt->ops->cpl(ctxt);
Nadav Amitd1442d82014-09-18 22:39:39 +03002199 struct desc_struct old_desc, new_desc;
2200 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002201
Nadav Amitd1442d82014-09-18 22:39:39 +03002202 if (ctxt->mode == X86EMUL_MODE_PROT64)
2203 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2204 VCPU_SREG_CS);
2205
2206 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002207 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002208 return rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002209 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002210 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002211 return rc;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002212 /* Outer-privilege level return is not implemented */
2213 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2214 return X86EMUL_UNHANDLEABLE;
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002215 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2216 X86_TRANSFER_RET,
Nadav Amitd1442d82014-09-18 22:39:39 +03002217 &new_desc);
2218 if (rc != X86EMUL_CONTINUE)
2219 return rc;
Nadav Amitd50eaa12014-11-19 17:43:11 +02002220 rc = assign_eip_far(ctxt, eip, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03002221 if (rc != X86EMUL_CONTINUE) {
Nadav Amit7e46ddd2014-10-28 00:03:43 +02002222 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
Nadav Amitd1442d82014-09-18 22:39:39 +03002223 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2224 }
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002225 return rc;
2226}
2227
Bruce Rogers32611072013-09-09 09:40:20 -06002228static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2229{
2230 int rc;
2231
2232 rc = em_ret_far(ctxt);
2233 if (rc != X86EMUL_CONTINUE)
2234 return rc;
2235 rsp_increment(ctxt, ctxt->src.val);
2236 return X86EMUL_CONTINUE;
2237}
2238
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002239static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2240{
2241 /* Save real source value, then compare EAX against destination. */
Nadav Amit37c564f2014-06-02 18:34:07 +03002242 ctxt->dst.orig_val = ctxt->dst.val;
2243 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002244 ctxt->src.orig_val = ctxt->src.val;
Nadav Amit37c564f2014-06-02 18:34:07 +03002245 ctxt->src.val = ctxt->dst.orig_val;
Avi Kivity158de572013-01-19 19:51:57 +02002246 fastop(ctxt, em_cmp);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002247
2248 if (ctxt->eflags & EFLG_ZF) {
Nadav Amit2fcf5c82015-01-26 09:32:21 +02002249 /* Success: write back to memory; no update of EAX */
2250 ctxt->src.type = OP_NONE;
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002251 ctxt->dst.val = ctxt->src.orig_val;
2252 } else {
2253 /* Failure: write the value we saw to EAX. */
Nadav Amit2fcf5c82015-01-26 09:32:21 +02002254 ctxt->src.type = OP_REG;
2255 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2256 ctxt->src.val = ctxt->dst.orig_val;
2257 /* Create write-cycle to dest by writing the same value */
Nadav Amit37c564f2014-06-02 18:34:07 +03002258 ctxt->dst.val = ctxt->dst.orig_val;
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002259 }
2260 return X86EMUL_CONTINUE;
2261}
2262
Avi Kivityd4b43252011-09-13 10:45:50 +03002263static int em_lseg(struct x86_emulate_ctxt *ctxt)
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002264{
Avi Kivityd4b43252011-09-13 10:45:50 +03002265 int seg = ctxt->src2.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002266 unsigned short sel;
2267 int rc;
2268
Avi Kivity9dac77f2011-06-01 15:34:25 +03002269 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002270
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002271 rc = load_segment_descriptor(ctxt, sel, seg);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002272 if (rc != X86EMUL_CONTINUE)
2273 return rc;
2274
Avi Kivity9dac77f2011-06-01 15:34:25 +03002275 ctxt->dst.val = ctxt->src.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002276 return rc;
2277}
2278
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002279static void
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002280setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002281 struct desc_struct *cs, struct desc_struct *ss)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002282{
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002283 cs->l = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002284 set_desc_base(cs, 0); /* flat segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002285 cs->g = 1; /* 4kb granularity */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002286 set_desc_limit(cs, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002287 cs->type = 0x0b; /* Read, Execute, Accessed */
2288 cs->s = 1;
2289 cs->dpl = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002290 cs->p = 1;
2291 cs->d = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002292 cs->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002293
Gleb Natapov79168fd2010-04-28 19:15:30 +03002294 set_desc_base(ss, 0); /* flat segment */
2295 set_desc_limit(ss, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002296 ss->g = 1; /* 4kb granularity */
2297 ss->s = 1;
2298 ss->type = 0x03; /* Read/Write, Accessed */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002299 ss->d = 1; /* 32bit stack segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002300 ss->dpl = 0;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002301 ss->p = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002302 ss->l = 0;
2303 ss->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002304}
2305
Avi Kivity1a18a692012-02-01 12:23:21 +02002306static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2307{
2308 u32 eax, ebx, ecx, edx;
2309
2310 eax = ecx = 0;
Avi Kivity0017f932012-06-07 14:10:16 +03002311 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2312 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
Avi Kivity1a18a692012-02-01 12:23:21 +02002313 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2314 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2315}
2316
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002317static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2318{
Mathias Krause0225fb52012-08-30 01:30:16 +02002319 const struct x86_emulate_ops *ops = ctxt->ops;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002320 u32 eax, ebx, ecx, edx;
2321
2322 /*
2323 * syscall should always be enabled in longmode - so only become
2324 * vendor specific (cpuid) if other modes are active...
2325 */
2326 if (ctxt->mode == X86EMUL_MODE_PROT64)
2327 return true;
2328
2329 eax = 0x00000000;
2330 ecx = 0x00000000;
Avi Kivity0017f932012-06-07 14:10:16 +03002331 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2332 /*
2333 * Intel ("GenuineIntel")
2334 * remark: Intel CPUs only support "syscall" in 64bit
2335 * longmode. Also an 64bit guest with a
2336 * 32bit compat-app running will #UD !! While this
2337 * behaviour can be fixed (by emulating) into AMD
2338 * response - CPUs of AMD can't behave like Intel.
2339 */
2340 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2341 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2342 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2343 return false;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002344
Avi Kivity0017f932012-06-07 14:10:16 +03002345 /* AMD ("AuthenticAMD") */
2346 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2347 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2348 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2349 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002350
Avi Kivity0017f932012-06-07 14:10:16 +03002351 /* AMD ("AMDisbetter!") */
2352 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2353 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2354 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2355 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002356
2357 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2358 return false;
2359}
2360
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002361static int em_syscall(struct x86_emulate_ctxt *ctxt)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002362{
Mathias Krause0225fb52012-08-30 01:30:16 +02002363 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002364 struct desc_struct cs, ss;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002365 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002366 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002367 u64 efer = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002368
2369 /* syscall is not available in real mode */
Gleb Natapov2e901c42010-03-18 15:20:12 +02002370 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002371 ctxt->mode == X86EMUL_MODE_VM86)
2372 return emulate_ud(ctxt);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002373
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002374 if (!(em_syscall_is_enabled(ctxt)))
2375 return emulate_ud(ctxt);
2376
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002377 ops->get_msr(ctxt, MSR_EFER, &efer);
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002378 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002379
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002380 if (!(efer & EFER_SCE))
2381 return emulate_ud(ctxt);
2382
Avi Kivity717746e2011-04-20 13:37:53 +03002383 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002384 msr_data >>= 32;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002385 cs_sel = (u16)(msr_data & 0xfffc);
2386 ss_sel = (u16)(msr_data + 8);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002387
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002388 if (efer & EFER_LMA) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002389 cs.d = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002390 cs.l = 1;
2391 }
Avi Kivity1aa36612011-04-27 13:20:30 +03002392 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2393 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002394
Avi Kivitydd856ef2012-08-27 23:46:17 +03002395 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002396 if (efer & EFER_LMA) {
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002397#ifdef CONFIG_X86_64
Nadav Amit6c6cb692014-07-21 14:37:30 +03002398 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002399
Avi Kivity717746e2011-04-20 13:37:53 +03002400 ops->get_msr(ctxt,
Gleb Natapov3fb1b5d2010-04-28 19:15:28 +03002401 ctxt->mode == X86EMUL_MODE_PROT64 ?
2402 MSR_LSTAR : MSR_CSTAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002403 ctxt->_eip = msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002404
Avi Kivity717746e2011-04-20 13:37:53 +03002405 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
Nadav Amit6c6cb692014-07-21 14:37:30 +03002406 ctxt->eflags &= ~msr_data;
Nadav Amit807c1422014-11-02 11:54:49 +02002407 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002408#endif
2409 } else {
2410 /* legacy mode */
Avi Kivity717746e2011-04-20 13:37:53 +03002411 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002412 ctxt->_eip = (u32)msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002413
Nadav Amit6c6cb692014-07-21 14:37:30 +03002414 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002415 }
2416
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002417 return X86EMUL_CONTINUE;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002418}
2419
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002420static int em_sysenter(struct x86_emulate_ctxt *ctxt)
Andre Przywara8c604352009-06-18 12:56:01 +02002421{
Mathias Krause0225fb52012-08-30 01:30:16 +02002422 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002423 struct desc_struct cs, ss;
Andre Przywara8c604352009-06-18 12:56:01 +02002424 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002425 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002426 u64 efer = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002427
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002428 ops->get_msr(ctxt, MSR_EFER, &efer);
Gleb Natapova0044752010-02-10 14:21:31 +02002429 /* inject #GP if in real mode */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002430 if (ctxt->mode == X86EMUL_MODE_REAL)
2431 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002432
Avi Kivity1a18a692012-02-01 12:23:21 +02002433 /*
2434 * Not recognized on AMD in compat mode (but is recognized in legacy
2435 * mode).
2436 */
Nadav Amitf3747372015-01-01 23:11:11 +02002437 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
Avi Kivity1a18a692012-02-01 12:23:21 +02002438 && !vendor_intel(ctxt))
2439 return emulate_ud(ctxt);
2440
Nadav Amitb2c9d432014-11-02 11:55:01 +02002441 /* sysenter/sysexit have not been tested in 64bit mode. */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002442 if (ctxt->mode == X86EMUL_MODE_PROT64)
Nadav Amitb2c9d432014-11-02 11:55:01 +02002443 return X86EMUL_UNHANDLEABLE;
Andre Przywara8c604352009-06-18 12:56:01 +02002444
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002445 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara8c604352009-06-18 12:56:01 +02002446
Avi Kivity717746e2011-04-20 13:37:53 +03002447 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Nadav Amitf3747372015-01-01 23:11:11 +02002448 if ((msr_data & 0xfffc) == 0x0)
2449 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002450
Nadav Amit6c6cb692014-07-21 14:37:30 +03002451 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
Nadav Amitf3747372015-01-01 23:11:11 +02002452 cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002453 ss_sel = cs_sel + 8;
Nadav Amitf3747372015-01-01 23:11:11 +02002454 if (efer & EFER_LMA) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002455 cs.d = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002456 cs.l = 1;
2457 }
2458
Avi Kivity1aa36612011-04-27 13:20:30 +03002459 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2460 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara8c604352009-06-18 12:56:01 +02002461
Avi Kivity717746e2011-04-20 13:37:53 +03002462 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
Nadav Amitf3747372015-01-01 23:11:11 +02002463 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002464
Avi Kivity717746e2011-04-20 13:37:53 +03002465 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
Nadav Amitf3747372015-01-01 23:11:11 +02002466 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2467 (u32)msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002468
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002469 return X86EMUL_CONTINUE;
Andre Przywara8c604352009-06-18 12:56:01 +02002470}
2471
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002472static int em_sysexit(struct x86_emulate_ctxt *ctxt)
Andre Przywara4668f052009-06-18 12:56:02 +02002473{
Mathias Krause0225fb52012-08-30 01:30:16 +02002474 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002475 struct desc_struct cs, ss;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002476 u64 msr_data, rcx, rdx;
Andre Przywara4668f052009-06-18 12:56:02 +02002477 int usermode;
Xiao Guangrong1249b962011-05-15 23:25:10 +08002478 u16 cs_sel = 0, ss_sel = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002479
Gleb Natapova0044752010-02-10 14:21:31 +02002480 /* inject #GP if in real mode or Virtual 8086 mode */
2481 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002482 ctxt->mode == X86EMUL_MODE_VM86)
2483 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002484
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002485 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara4668f052009-06-18 12:56:02 +02002486
Avi Kivity9dac77f2011-06-01 15:34:25 +03002487 if ((ctxt->rex_prefix & 0x8) != 0x0)
Andre Przywara4668f052009-06-18 12:56:02 +02002488 usermode = X86EMUL_MODE_PROT64;
2489 else
2490 usermode = X86EMUL_MODE_PROT32;
2491
Nadav Amit234f3ce2014-09-18 22:39:38 +03002492 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2493 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2494
Andre Przywara4668f052009-06-18 12:56:02 +02002495 cs.dpl = 3;
2496 ss.dpl = 3;
Avi Kivity717746e2011-04-20 13:37:53 +03002497 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara4668f052009-06-18 12:56:02 +02002498 switch (usermode) {
2499 case X86EMUL_MODE_PROT32:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002500 cs_sel = (u16)(msr_data + 16);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002501 if ((msr_data & 0xfffc) == 0x0)
2502 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002503 ss_sel = (u16)(msr_data + 24);
Nadav Amitbf0b6822014-09-18 22:39:45 +03002504 rcx = (u32)rcx;
2505 rdx = (u32)rdx;
Andre Przywara4668f052009-06-18 12:56:02 +02002506 break;
2507 case X86EMUL_MODE_PROT64:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002508 cs_sel = (u16)(msr_data + 32);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002509 if (msr_data == 0x0)
2510 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002511 ss_sel = cs_sel + 8;
2512 cs.d = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002513 cs.l = 1;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002514 if (is_noncanonical_address(rcx) ||
2515 is_noncanonical_address(rdx))
2516 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002517 break;
2518 }
Gleb Natapov79168fd2010-04-28 19:15:30 +03002519 cs_sel |= SELECTOR_RPL_MASK;
2520 ss_sel |= SELECTOR_RPL_MASK;
Andre Przywara4668f052009-06-18 12:56:02 +02002521
Avi Kivity1aa36612011-04-27 13:20:30 +03002522 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2523 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara4668f052009-06-18 12:56:02 +02002524
Nadav Amit234f3ce2014-09-18 22:39:38 +03002525 ctxt->_eip = rdx;
2526 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
Andre Przywara4668f052009-06-18 12:56:02 +02002527
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002528 return X86EMUL_CONTINUE;
Andre Przywara4668f052009-06-18 12:56:02 +02002529}
2530
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002531static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002532{
2533 int iopl;
2534 if (ctxt->mode == X86EMUL_MODE_REAL)
2535 return false;
2536 if (ctxt->mode == X86EMUL_MODE_VM86)
2537 return true;
2538 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002539 return ctxt->ops->cpl(ctxt) > iopl;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002540}
2541
2542static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002543 u16 port, u16 len)
2544{
Mathias Krause0225fb52012-08-30 01:30:16 +02002545 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002546 struct desc_struct tr_seg;
Gleb Natapov5601d052011-03-07 14:55:06 +02002547 u32 base3;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002548 int r;
Avi Kivity1aa36612011-04-27 13:20:30 +03002549 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002550 unsigned mask = (1 << len) - 1;
Gleb Natapov5601d052011-03-07 14:55:06 +02002551 unsigned long base;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002552
Avi Kivity1aa36612011-04-27 13:20:30 +03002553 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002554 if (!tr_seg.p)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002555 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002556 if (desc_limit_scaled(&tr_seg) < 103)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002557 return false;
Gleb Natapov5601d052011-03-07 14:55:06 +02002558 base = get_desc_base(&tr_seg);
2559#ifdef CONFIG_X86_64
2560 base |= ((u64)base3) << 32;
2561#endif
Avi Kivity0f65dd72011-04-20 13:37:53 +03002562 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002563 if (r != X86EMUL_CONTINUE)
2564 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002565 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002566 return false;
Avi Kivity0f65dd72011-04-20 13:37:53 +03002567 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002568 if (r != X86EMUL_CONTINUE)
2569 return false;
2570 if ((perm >> bit_idx) & mask)
2571 return false;
2572 return true;
2573}
2574
2575static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002576 u16 port, u16 len)
2577{
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002578 if (ctxt->perm_ok)
2579 return true;
2580
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002581 if (emulator_bad_iopl(ctxt))
2582 if (!emulator_io_port_access_allowed(ctxt, port, len))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002583 return false;
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002584
2585 ctxt->perm_ok = true;
2586
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002587 return true;
2588}
2589
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002590static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002591 struct tss_segment_16 *tss)
2592{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002593 tss->ip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002594 tss->flag = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002595 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2596 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2597 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2598 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2599 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2600 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2601 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2602 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002603
Avi Kivity1aa36612011-04-27 13:20:30 +03002604 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2605 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2606 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2607 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2608 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002609}
2610
2611static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002612 struct tss_segment_16 *tss)
2613{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002614 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002615 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002616
Avi Kivity9dac77f2011-06-01 15:34:25 +03002617 ctxt->_eip = tss->ip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002618 ctxt->eflags = tss->flag | 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002619 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2620 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2621 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2622 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2623 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2624 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2625 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2626 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002627
2628 /*
2629 * SDM says that segment selectors are loaded before segment
2630 * descriptors
2631 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002632 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2633 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2634 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2635 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2636 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002637
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002638 cpl = tss->cs & 3;
2639
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002640 /*
Guo Chaofc058682012-06-28 15:19:51 +08002641 * Now load segment descriptors. If fault happens at this stage
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002642 * it is handled in a context of new task
2643 */
Nadav Amitd1442d82014-09-18 22:39:39 +03002644 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002645 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002646 if (ret != X86EMUL_CONTINUE)
2647 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002648 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002649 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002650 if (ret != X86EMUL_CONTINUE)
2651 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002652 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002653 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002654 if (ret != X86EMUL_CONTINUE)
2655 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002656 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002657 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002658 if (ret != X86EMUL_CONTINUE)
2659 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002660 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002661 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002662 if (ret != X86EMUL_CONTINUE)
2663 return ret;
2664
2665 return X86EMUL_CONTINUE;
2666}
2667
2668static int task_switch_16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002669 u16 tss_selector, u16 old_tss_sel,
2670 ulong old_tss_base, struct desc_struct *new_desc)
2671{
Mathias Krause0225fb52012-08-30 01:30:16 +02002672 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002673 struct tss_segment_16 tss_seg;
2674 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002675 u32 new_tss_base = get_desc_base(new_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002676
Avi Kivity0f65dd72011-04-20 13:37:53 +03002677 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002678 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002679 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002680 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002681
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002682 save_state_to_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002683
Avi Kivity0f65dd72011-04-20 13:37:53 +03002684 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002685 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002686 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002687 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002688
Avi Kivity0f65dd72011-04-20 13:37:53 +03002689 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002690 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002691 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002692 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002693
2694 if (old_tss_sel != 0xffff) {
2695 tss_seg.prev_task_link = old_tss_sel;
2696
Avi Kivity0f65dd72011-04-20 13:37:53 +03002697 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002698 &tss_seg.prev_task_link,
2699 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002700 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002701 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002702 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002703 }
2704
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002705 return load_state_from_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002706}
2707
2708static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002709 struct tss_segment_32 *tss)
2710{
Nadav Amit5c7411e2014-04-07 18:37:47 +03002711 /* CR3 and ldt selector are not saved intentionally */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002712 tss->eip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002713 tss->eflags = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002714 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2715 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2716 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2717 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2718 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2719 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2720 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2721 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002722
Avi Kivity1aa36612011-04-27 13:20:30 +03002723 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2724 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2725 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2726 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2727 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2728 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002729}
2730
2731static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002732 struct tss_segment_32 *tss)
2733{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002734 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002735 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002736
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002737 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002738 return emulate_gp(ctxt, 0);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002739 ctxt->_eip = tss->eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002740 ctxt->eflags = tss->eflags | 2;
Kevin Wolf4cee4792012-02-08 14:34:41 +01002741
2742 /* General purpose registers */
Avi Kivitydd856ef2012-08-27 23:46:17 +03002743 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2744 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2745 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2746 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2747 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2748 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2749 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2750 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002751
2752 /*
2753 * SDM says that segment selectors are loaded before segment
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002754 * descriptors. This is important because CPL checks will
2755 * use CS.RPL.
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002756 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002757 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2758 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2759 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2760 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2761 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2762 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2763 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002764
2765 /*
Kevin Wolf4cee4792012-02-08 14:34:41 +01002766 * If we're switching between Protected Mode and VM86, we need to make
2767 * sure to update the mode before loading the segment descriptors so
2768 * that the selectors are interpreted correctly.
Kevin Wolf4cee4792012-02-08 14:34:41 +01002769 */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002770 if (ctxt->eflags & X86_EFLAGS_VM) {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002771 ctxt->mode = X86EMUL_MODE_VM86;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002772 cpl = 3;
2773 } else {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002774 ctxt->mode = X86EMUL_MODE_PROT32;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002775 cpl = tss->cs & 3;
2776 }
Kevin Wolf4cee4792012-02-08 14:34:41 +01002777
2778 /*
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002779 * Now load segment descriptors. If fault happenes at this stage
2780 * it is handled in a context of new task
2781 */
Nadav Amitd1442d82014-09-18 22:39:39 +03002782 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002783 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002784 if (ret != X86EMUL_CONTINUE)
2785 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002786 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002787 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002788 if (ret != X86EMUL_CONTINUE)
2789 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002790 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002791 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002792 if (ret != X86EMUL_CONTINUE)
2793 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002794 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002795 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002796 if (ret != X86EMUL_CONTINUE)
2797 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002798 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002799 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002800 if (ret != X86EMUL_CONTINUE)
2801 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002802 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002803 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002804 if (ret != X86EMUL_CONTINUE)
2805 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002806 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002807 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002808 if (ret != X86EMUL_CONTINUE)
2809 return ret;
2810
2811 return X86EMUL_CONTINUE;
2812}
2813
2814static int task_switch_32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002815 u16 tss_selector, u16 old_tss_sel,
2816 ulong old_tss_base, struct desc_struct *new_desc)
2817{
Mathias Krause0225fb52012-08-30 01:30:16 +02002818 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002819 struct tss_segment_32 tss_seg;
2820 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002821 u32 new_tss_base = get_desc_base(new_desc);
Nadav Amit5c7411e2014-04-07 18:37:47 +03002822 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2823 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002824
Avi Kivity0f65dd72011-04-20 13:37:53 +03002825 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002826 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002827 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002828 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002829
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002830 save_state_to_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002831
Nadav Amit5c7411e2014-04-07 18:37:47 +03002832 /* Only GP registers and segment selectors are saved */
2833 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2834 ldt_sel_offset - eip_offset, &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002835 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002836 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002837
Avi Kivity0f65dd72011-04-20 13:37:53 +03002838 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002839 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002840 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002841 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002842
2843 if (old_tss_sel != 0xffff) {
2844 tss_seg.prev_task_link = old_tss_sel;
2845
Avi Kivity0f65dd72011-04-20 13:37:53 +03002846 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002847 &tss_seg.prev_task_link,
2848 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002849 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002850 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002851 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002852 }
2853
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002854 return load_state_from_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002855}
2856
2857static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002858 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002859 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002860{
Mathias Krause0225fb52012-08-30 01:30:16 +02002861 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002862 struct desc_struct curr_tss_desc, next_tss_desc;
2863 int ret;
Avi Kivity1aa36612011-04-27 13:20:30 +03002864 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002865 ulong old_tss_base =
Avi Kivity4bff1e862011-04-20 13:37:53 +03002866 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
Gleb Natapovceffb452010-03-18 15:20:19 +02002867 u32 desc_limit;
Avi Kivitye9194642012-06-13 16:29:39 +03002868 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002869
2870 /* FIXME: old_tss_base == ~0 ? */
2871
Avi Kivitye9194642012-06-13 16:29:39 +03002872 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002873 if (ret != X86EMUL_CONTINUE)
2874 return ret;
Avi Kivitye9194642012-06-13 16:29:39 +03002875 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002876 if (ret != X86EMUL_CONTINUE)
2877 return ret;
2878
2879 /* FIXME: check that next_tss_desc is tss */
2880
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002881 /*
2882 * Check privileges. The three cases are task switch caused by...
2883 *
2884 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2885 * 2. Exception/IRQ/iret: No check is performed
Nadav Amit2c2ca2d2014-11-02 11:54:57 +02002886 * 3. jmp/call to TSS/task-gate: No check is performed since the
2887 * hardware checks it before exiting.
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002888 */
2889 if (reason == TASK_SWITCH_GATE) {
2890 if (idt_index != -1) {
2891 /* Software interrupts */
2892 struct desc_struct task_gate_desc;
2893 int dpl;
2894
2895 ret = read_interrupt_descriptor(ctxt, idt_index,
2896 &task_gate_desc);
2897 if (ret != X86EMUL_CONTINUE)
2898 return ret;
2899
2900 dpl = task_gate_desc.dpl;
2901 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2902 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2903 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002904 }
2905
Gleb Natapovceffb452010-03-18 15:20:19 +02002906 desc_limit = desc_limit_scaled(&next_tss_desc);
2907 if (!next_tss_desc.p ||
2908 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2909 desc_limit < 0x2b)) {
Paolo Bonzini592f0852014-08-20 10:05:08 +02002910 return emulate_ts(ctxt, tss_selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002911 }
2912
2913 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2914 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002915 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002916 }
2917
2918 if (reason == TASK_SWITCH_IRET)
2919 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2920
2921 /* set back link to prev task only if NT bit is set in eflags
Guo Chaofc058682012-06-28 15:19:51 +08002922 note that old_tss_sel is not used after this point */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002923 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2924 old_tss_sel = 0xffff;
2925
2926 if (next_tss_desc.type & 8)
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002927 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002928 old_tss_base, &next_tss_desc);
2929 else
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002930 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002931 old_tss_base, &next_tss_desc);
Jan Kiszka0760d442010-04-14 15:50:57 +02002932 if (ret != X86EMUL_CONTINUE)
2933 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002934
2935 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2936 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2937
2938 if (reason != TASK_SWITCH_IRET) {
2939 next_tss_desc.type |= (1 << 1); /* set busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002940 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002941 }
2942
Avi Kivity717746e2011-04-20 13:37:53 +03002943 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
Avi Kivity1aa36612011-04-27 13:20:30 +03002944 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002945
Jan Kiszkae269fb22010-04-14 15:51:09 +02002946 if (has_error_code) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002947 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2948 ctxt->lock_prefix = 0;
2949 ctxt->src.val = (unsigned long) error_code;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09002950 ret = em_push(ctxt);
Jan Kiszkae269fb22010-04-14 15:51:09 +02002951 }
2952
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002953 return ret;
2954}
2955
2956int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002957 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002958 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002959{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002960 int rc;
2961
Avi Kivitydd856ef2012-08-27 23:46:17 +03002962 invalidate_registers(ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002963 ctxt->_eip = ctxt->eip;
2964 ctxt->dst.type = OP_NONE;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002965
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002966 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002967 has_error_code, error_code);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002968
Avi Kivitydd856ef2012-08-27 23:46:17 +03002969 if (rc == X86EMUL_CONTINUE) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002970 ctxt->eip = ctxt->_eip;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002971 writeback_registers(ctxt);
2972 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002973
Gleb Natapova0c0ab22011-03-28 16:57:49 +02002974 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002975}
2976
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03002977static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2978 struct operand *op)
Gleb Natapova682e352010-03-18 15:20:21 +02002979{
Gleb Natapovb3356bf2012-09-03 15:24:29 +03002980 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
Gleb Natapova682e352010-03-18 15:20:21 +02002981
Paolo Bonzini01485a22014-11-19 18:25:08 +01002982 register_address_increment(ctxt, reg, df * op->bytes);
2983 op->addr.mem.ea = register_address(ctxt, reg);
Gleb Natapova682e352010-03-18 15:20:21 +02002984}
2985
Avi Kivity7af04fc2010-08-18 14:16:35 +03002986static int em_das(struct x86_emulate_ctxt *ctxt)
2987{
Avi Kivity7af04fc2010-08-18 14:16:35 +03002988 u8 al, old_al;
2989 bool af, cf, old_cf;
2990
2991 cf = ctxt->eflags & X86_EFLAGS_CF;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002992 al = ctxt->dst.val;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002993
2994 old_al = al;
2995 old_cf = cf;
2996 cf = false;
2997 af = ctxt->eflags & X86_EFLAGS_AF;
2998 if ((al & 0x0f) > 9 || af) {
2999 al -= 6;
3000 cf = old_cf | (al >= 250);
3001 af = true;
3002 } else {
3003 af = false;
3004 }
3005 if (old_al > 0x99 || old_cf) {
3006 al -= 0x60;
3007 cf = true;
3008 }
3009
Avi Kivity9dac77f2011-06-01 15:34:25 +03003010 ctxt->dst.val = al;
Avi Kivity7af04fc2010-08-18 14:16:35 +03003011 /* Set PF, ZF, SF */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003012 ctxt->src.type = OP_IMM;
3013 ctxt->src.val = 0;
3014 ctxt->src.bytes = 1;
Avi Kivity158de572013-01-19 19:51:57 +02003015 fastop(ctxt, em_or);
Avi Kivity7af04fc2010-08-18 14:16:35 +03003016 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3017 if (cf)
3018 ctxt->eflags |= X86_EFLAGS_CF;
3019 if (af)
3020 ctxt->eflags |= X86_EFLAGS_AF;
3021 return X86EMUL_CONTINUE;
3022}
3023
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02003024static int em_aam(struct x86_emulate_ctxt *ctxt)
3025{
3026 u8 al, ah;
3027
3028 if (ctxt->src.val == 0)
3029 return emulate_de(ctxt);
3030
3031 al = ctxt->dst.val & 0xff;
3032 ah = al / ctxt->src.val;
3033 al %= ctxt->src.val;
3034
3035 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3036
3037 /* Set PF, ZF, SF */
3038 ctxt->src.type = OP_IMM;
3039 ctxt->src.val = 0;
3040 ctxt->src.bytes = 1;
3041 fastop(ctxt, em_or);
3042
3043 return X86EMUL_CONTINUE;
3044}
3045
Gleb Natapov7f662272012-12-10 11:42:30 +02003046static int em_aad(struct x86_emulate_ctxt *ctxt)
3047{
3048 u8 al = ctxt->dst.val & 0xff;
3049 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3050
3051 al = (al + (ah * ctxt->src.val)) & 0xff;
3052
3053 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3054
Gleb Natapovf583c292013-02-13 17:50:39 +02003055 /* Set PF, ZF, SF */
3056 ctxt->src.type = OP_IMM;
3057 ctxt->src.val = 0;
3058 ctxt->src.bytes = 1;
3059 fastop(ctxt, em_or);
Gleb Natapov7f662272012-12-10 11:42:30 +02003060
3061 return X86EMUL_CONTINUE;
3062}
3063
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003064static int em_call(struct x86_emulate_ctxt *ctxt)
3065{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003066 int rc;
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003067 long rel = ctxt->src.val;
3068
3069 ctxt->src.val = (unsigned long)ctxt->_eip;
Nadav Amit234f3ce2014-09-18 22:39:38 +03003070 rc = jmp_rel(ctxt, rel);
3071 if (rc != X86EMUL_CONTINUE)
3072 return rc;
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003073 return em_push(ctxt);
3074}
3075
Avi Kivity0ef753b2010-08-18 14:51:45 +03003076static int em_call_far(struct x86_emulate_ctxt *ctxt)
3077{
Avi Kivity0ef753b2010-08-18 14:51:45 +03003078 u16 sel, old_cs;
3079 ulong old_eip;
3080 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03003081 struct desc_struct old_desc, new_desc;
3082 const struct x86_emulate_ops *ops = ctxt->ops;
3083 int cpl = ctxt->ops->cpl(ctxt);
Nadav Amit82268082015-01-26 09:32:27 +02003084 enum x86emul_mode prev_mode = ctxt->mode;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003085
Avi Kivity9dac77f2011-06-01 15:34:25 +03003086 old_eip = ctxt->_eip;
Nadav Amitd1442d82014-09-18 22:39:39 +03003087 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003088
Avi Kivity9dac77f2011-06-01 15:34:25 +03003089 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Nadav Amit3dc4bc42014-12-25 02:52:19 +02003090 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3091 X86_TRANSFER_CALL_JMP, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03003092 if (rc != X86EMUL_CONTINUE)
Nadav Amit80976db2014-12-25 02:52:20 +02003093 return rc;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003094
Nadav Amitd50eaa12014-11-19 17:43:11 +02003095 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03003096 if (rc != X86EMUL_CONTINUE)
3097 goto fail;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003098
Avi Kivity9dac77f2011-06-01 15:34:25 +03003099 ctxt->src.val = old_cs;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09003100 rc = em_push(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003101 if (rc != X86EMUL_CONTINUE)
Nadav Amitd1442d82014-09-18 22:39:39 +03003102 goto fail;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003103
Avi Kivity9dac77f2011-06-01 15:34:25 +03003104 ctxt->src.val = old_eip;
Nadav Amitd1442d82014-09-18 22:39:39 +03003105 rc = em_push(ctxt);
3106 /* If we failed, we tainted the memory, but the very least we should
3107 restore cs */
Nadav Amit82268082015-01-26 09:32:27 +02003108 if (rc != X86EMUL_CONTINUE) {
3109 pr_warn_once("faulting far call emulation tainted memory\n");
Nadav Amitd1442d82014-09-18 22:39:39 +03003110 goto fail;
Nadav Amit82268082015-01-26 09:32:27 +02003111 }
Nadav Amitd1442d82014-09-18 22:39:39 +03003112 return rc;
3113fail:
3114 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
Nadav Amit82268082015-01-26 09:32:27 +02003115 ctxt->mode = prev_mode;
Nadav Amitd1442d82014-09-18 22:39:39 +03003116 return rc;
3117
Avi Kivity0ef753b2010-08-18 14:51:45 +03003118}
3119
Avi Kivity40ece7c2010-08-18 15:12:09 +03003120static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3121{
Avi Kivity40ece7c2010-08-18 15:12:09 +03003122 int rc;
Nadav Amit234f3ce2014-09-18 22:39:38 +03003123 unsigned long eip;
Avi Kivity40ece7c2010-08-18 15:12:09 +03003124
Nadav Amit234f3ce2014-09-18 22:39:38 +03003125 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3126 if (rc != X86EMUL_CONTINUE)
3127 return rc;
3128 rc = assign_eip_near(ctxt, eip);
Avi Kivity40ece7c2010-08-18 15:12:09 +03003129 if (rc != X86EMUL_CONTINUE)
3130 return rc;
Avi Kivity5ad105e2012-08-19 14:34:31 +03003131 rsp_increment(ctxt, ctxt->src.val);
Avi Kivity40ece7c2010-08-18 15:12:09 +03003132 return X86EMUL_CONTINUE;
3133}
3134
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003135static int em_xchg(struct x86_emulate_ctxt *ctxt)
3136{
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003137 /* Write back the register source. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003138 ctxt->src.val = ctxt->dst.val;
3139 write_register_operand(&ctxt->src);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003140
3141 /* Write back the memory destination with implicit LOCK prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003142 ctxt->dst.val = ctxt->src.orig_val;
3143 ctxt->lock_prefix = 1;
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003144 return X86EMUL_CONTINUE;
3145}
3146
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003147static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3148{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003149 ctxt->dst.val = ctxt->src2.val;
Avi Kivity4d758342013-01-19 19:51:55 +02003150 return fastop(ctxt, em_imul);
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003151}
3152
Avi Kivity61429142010-08-19 15:13:00 +03003153static int em_cwd(struct x86_emulate_ctxt *ctxt)
3154{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003155 ctxt->dst.type = OP_REG;
3156 ctxt->dst.bytes = ctxt->src.bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03003157 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivity9dac77f2011-06-01 15:34:25 +03003158 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
Avi Kivity61429142010-08-19 15:13:00 +03003159
3160 return X86EMUL_CONTINUE;
3161}
3162
Avi Kivity48bb5d32010-08-18 18:54:34 +03003163static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3164{
Avi Kivity48bb5d32010-08-18 18:54:34 +03003165 u64 tsc = 0;
3166
Avi Kivity717746e2011-04-20 13:37:53 +03003167 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003168 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3169 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
Avi Kivity48bb5d32010-08-18 18:54:34 +03003170 return X86EMUL_CONTINUE;
3171}
3172
Avi Kivity222d21a2011-11-10 14:57:30 +02003173static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3174{
3175 u64 pmc;
3176
Avi Kivitydd856ef2012-08-27 23:46:17 +03003177 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
Avi Kivity222d21a2011-11-10 14:57:30 +02003178 return emulate_gp(ctxt, 0);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003179 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3180 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
Avi Kivity222d21a2011-11-10 14:57:30 +02003181 return X86EMUL_CONTINUE;
3182}
3183
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003184static int em_mov(struct x86_emulate_ctxt *ctxt)
3185{
Paolo Bonzini54cfdb32014-03-27 11:36:25 +01003186 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003187 return X86EMUL_CONTINUE;
3188}
3189
Borislav Petkov84cffe42013-10-29 12:54:56 +01003190#define FFL(x) bit(X86_FEATURE_##x)
3191
3192static int em_movbe(struct x86_emulate_ctxt *ctxt)
3193{
3194 u32 ebx, ecx, edx, eax = 1;
3195 u16 tmp;
3196
3197 /*
3198 * Check MOVBE is set in the guest-visible CPUID leaf.
3199 */
3200 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3201 if (!(ecx & FFL(MOVBE)))
3202 return emulate_ud(ctxt);
3203
3204 switch (ctxt->op_bytes) {
3205 case 2:
3206 /*
3207 * From MOVBE definition: "...When the operand size is 16 bits,
3208 * the upper word of the destination register remains unchanged
3209 * ..."
3210 *
3211 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3212 * rules so we have to do the operation almost per hand.
3213 */
3214 tmp = (u16)ctxt->src.val;
3215 ctxt->dst.val &= ~0xffffUL;
3216 ctxt->dst.val |= (unsigned long)swab16(tmp);
3217 break;
3218 case 4:
3219 ctxt->dst.val = swab32((u32)ctxt->src.val);
3220 break;
3221 case 8:
3222 ctxt->dst.val = swab64(ctxt->src.val);
3223 break;
3224 default:
Paolo Bonzini592f0852014-08-20 10:05:08 +02003225 BUG();
Borislav Petkov84cffe42013-10-29 12:54:56 +01003226 }
3227 return X86EMUL_CONTINUE;
3228}
3229
Takuya Yoshikawabc00f8d2011-11-22 15:19:19 +09003230static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3231{
3232 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3233 return emulate_gp(ctxt, 0);
3234
3235 /* Disable writeback. */
3236 ctxt->dst.type = OP_NONE;
3237 return X86EMUL_CONTINUE;
3238}
3239
3240static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3241{
3242 unsigned long val;
3243
3244 if (ctxt->mode == X86EMUL_MODE_PROT64)
3245 val = ctxt->src.val & ~0ULL;
3246 else
3247 val = ctxt->src.val & ~0U;
3248
3249 /* #UD condition is already handled. */
3250 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3251 return emulate_gp(ctxt, 0);
3252
3253 /* Disable writeback. */
3254 ctxt->dst.type = OP_NONE;
3255 return X86EMUL_CONTINUE;
3256}
3257
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003258static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3259{
3260 u64 msr_data;
3261
Avi Kivitydd856ef2012-08-27 23:46:17 +03003262 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3263 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3264 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003265 return emulate_gp(ctxt, 0);
3266
3267 return X86EMUL_CONTINUE;
3268}
3269
3270static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3271{
3272 u64 msr_data;
3273
Avi Kivitydd856ef2012-08-27 23:46:17 +03003274 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003275 return emulate_gp(ctxt, 0);
3276
Avi Kivitydd856ef2012-08-27 23:46:17 +03003277 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3278 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003279 return X86EMUL_CONTINUE;
3280}
3281
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003282static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3283{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003284 if (ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003285 return emulate_ud(ctxt);
3286
Avi Kivity9dac77f2011-06-01 15:34:25 +03003287 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
Nadav Amitb5bbf102014-11-02 11:54:46 +02003288 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3289 ctxt->dst.bytes = 2;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003290 return X86EMUL_CONTINUE;
3291}
3292
3293static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3294{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003295 u16 sel = ctxt->src.val;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003296
Avi Kivity9dac77f2011-06-01 15:34:25 +03003297 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003298 return emulate_ud(ctxt);
3299
Avi Kivity9dac77f2011-06-01 15:34:25 +03003300 if (ctxt->modrm_reg == VCPU_SREG_SS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003301 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3302
3303 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003304 ctxt->dst.type = OP_NONE;
3305 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003306}
3307
Avi Kivitya14e5792012-06-13 12:28:33 +03003308static int em_lldt(struct x86_emulate_ctxt *ctxt)
3309{
3310 u16 sel = ctxt->src.val;
3311
3312 /* Disable writeback. */
3313 ctxt->dst.type = OP_NONE;
3314 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3315}
3316
Avi Kivity80890002012-06-13 16:33:29 +03003317static int em_ltr(struct x86_emulate_ctxt *ctxt)
3318{
3319 u16 sel = ctxt->src.val;
3320
3321 /* Disable writeback. */
3322 ctxt->dst.type = OP_NONE;
3323 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3324}
3325
Avi Kivity38503912011-03-31 18:48:09 +02003326static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3327{
Avi Kivity9fa088f2011-03-31 18:54:30 +02003328 int rc;
3329 ulong linear;
3330
Avi Kivity9dac77f2011-06-01 15:34:25 +03003331 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02003332 if (rc == X86EMUL_CONTINUE)
Avi Kivity3cb16fe2011-04-20 15:38:44 +03003333 ctxt->ops->invlpg(ctxt, linear);
Avi Kivity38503912011-03-31 18:48:09 +02003334 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003335 ctxt->dst.type = OP_NONE;
Avi Kivity38503912011-03-31 18:48:09 +02003336 return X86EMUL_CONTINUE;
3337}
3338
Avi Kivity2d04a052011-04-20 15:32:49 +03003339static int em_clts(struct x86_emulate_ctxt *ctxt)
3340{
3341 ulong cr0;
3342
3343 cr0 = ctxt->ops->get_cr(ctxt, 0);
3344 cr0 &= ~X86_CR0_TS;
3345 ctxt->ops->set_cr(ctxt, 0, cr0);
3346 return X86EMUL_CONTINUE;
3347}
3348
Jan Kiszkab34a8052015-03-09 20:27:43 +01003349static int em_hypercall(struct x86_emulate_ctxt *ctxt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003350{
Nadav Amit0f54a322014-08-29 11:26:55 +03003351 int rc = ctxt->ops->fix_hypercall(ctxt);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003352
Avi Kivity26d05cc2011-04-21 12:07:59 +03003353 if (rc != X86EMUL_CONTINUE)
3354 return rc;
3355
3356 /* Let the processor re-execute the fixed hypercall */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003357 ctxt->_eip = ctxt->eip;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003358 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003359 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003360 return X86EMUL_CONTINUE;
3361}
3362
Avi Kivity96051572012-06-10 17:21:18 +03003363static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3364 void (*get)(struct x86_emulate_ctxt *ctxt,
3365 struct desc_ptr *ptr))
3366{
3367 struct desc_ptr desc_ptr;
3368
3369 if (ctxt->mode == X86EMUL_MODE_PROT64)
3370 ctxt->op_bytes = 8;
3371 get(ctxt, &desc_ptr);
3372 if (ctxt->op_bytes == 2) {
3373 ctxt->op_bytes = 4;
3374 desc_ptr.address &= 0x00ffffff;
3375 }
3376 /* Disable writeback. */
3377 ctxt->dst.type = OP_NONE;
3378 return segmented_write(ctxt, ctxt->dst.addr.mem,
3379 &desc_ptr, 2 + ctxt->op_bytes);
3380}
3381
3382static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3383{
3384 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3385}
3386
3387static int em_sidt(struct x86_emulate_ctxt *ctxt)
3388{
3389 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3390}
3391
Nadav Amit5b7f6a12014-11-02 11:54:55 +02003392static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003393{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003394 struct desc_ptr desc_ptr;
3395 int rc;
3396
Avi Kivity510425f2012-06-07 17:04:36 +03003397 if (ctxt->mode == X86EMUL_MODE_PROT64)
3398 ctxt->op_bytes = 8;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003399 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
Avi Kivity26d05cc2011-04-21 12:07:59 +03003400 &desc_ptr.size, &desc_ptr.address,
Avi Kivity9dac77f2011-06-01 15:34:25 +03003401 ctxt->op_bytes);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003402 if (rc != X86EMUL_CONTINUE)
3403 return rc;
Nadav Amit9a9abf62014-11-02 11:54:56 +02003404 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3405 is_noncanonical_address(desc_ptr.address))
3406 return emulate_gp(ctxt, 0);
Nadav Amit5b7f6a12014-11-02 11:54:55 +02003407 if (lgdt)
3408 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3409 else
3410 ctxt->ops->set_idt(ctxt, &desc_ptr);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003411 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003412 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003413 return X86EMUL_CONTINUE;
3414}
3415
Nadav Amit5b7f6a12014-11-02 11:54:55 +02003416static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3417{
3418 return em_lgdt_lidt(ctxt, true);
3419}
3420
Avi Kivity26d05cc2011-04-21 12:07:59 +03003421static int em_lidt(struct x86_emulate_ctxt *ctxt)
3422{
Nadav Amit5b7f6a12014-11-02 11:54:55 +02003423 return em_lgdt_lidt(ctxt, false);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003424}
3425
3426static int em_smsw(struct x86_emulate_ctxt *ctxt)
3427{
Nadav Amit32e94d02014-06-02 18:34:11 +03003428 if (ctxt->dst.type == OP_MEM)
3429 ctxt->dst.bytes = 2;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003430 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003431 return X86EMUL_CONTINUE;
3432}
3433
3434static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3435{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003436 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
Avi Kivity9dac77f2011-06-01 15:34:25 +03003437 | (ctxt->src.val & 0x0f));
3438 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003439 return X86EMUL_CONTINUE;
3440}
3441
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003442static int em_loop(struct x86_emulate_ctxt *ctxt)
3443{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003444 int rc = X86EMUL_CONTINUE;
3445
Paolo Bonzini01485a22014-11-19 18:25:08 +01003446 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003447 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
Avi Kivity9dac77f2011-06-01 15:34:25 +03003448 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
Nadav Amit234f3ce2014-09-18 22:39:38 +03003449 rc = jmp_rel(ctxt, ctxt->src.val);
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003450
Nadav Amit234f3ce2014-09-18 22:39:38 +03003451 return rc;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003452}
3453
3454static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3455{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003456 int rc = X86EMUL_CONTINUE;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003457
Nadav Amit234f3ce2014-09-18 22:39:38 +03003458 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3459 rc = jmp_rel(ctxt, ctxt->src.val);
3460
3461 return rc;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003462}
3463
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003464static int em_in(struct x86_emulate_ctxt *ctxt)
3465{
3466 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3467 &ctxt->dst.val))
3468 return X86EMUL_IO_NEEDED;
3469
3470 return X86EMUL_CONTINUE;
3471}
3472
3473static int em_out(struct x86_emulate_ctxt *ctxt)
3474{
3475 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3476 &ctxt->src.val, 1);
3477 /* Disable writeback. */
3478 ctxt->dst.type = OP_NONE;
3479 return X86EMUL_CONTINUE;
3480}
3481
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09003482static int em_cli(struct x86_emulate_ctxt *ctxt)
3483{
3484 if (emulator_bad_iopl(ctxt))
3485 return emulate_gp(ctxt, 0);
3486
3487 ctxt->eflags &= ~X86_EFLAGS_IF;
3488 return X86EMUL_CONTINUE;
3489}
3490
3491static int em_sti(struct x86_emulate_ctxt *ctxt)
3492{
3493 if (emulator_bad_iopl(ctxt))
3494 return emulate_gp(ctxt, 0);
3495
3496 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3497 ctxt->eflags |= X86_EFLAGS_IF;
3498 return X86EMUL_CONTINUE;
3499}
3500
Avi Kivity6d6eede2012-06-07 14:11:36 +03003501static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3502{
3503 u32 eax, ebx, ecx, edx;
3504
Avi Kivitydd856ef2012-08-27 23:46:17 +03003505 eax = reg_read(ctxt, VCPU_REGS_RAX);
3506 ecx = reg_read(ctxt, VCPU_REGS_RCX);
Avi Kivity6d6eede2012-06-07 14:11:36 +03003507 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003508 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3509 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3510 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3511 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
Avi Kivity6d6eede2012-06-07 14:11:36 +03003512 return X86EMUL_CONTINUE;
3513}
3514
Paolo Bonzini98f73632013-10-31 11:19:42 +01003515static int em_sahf(struct x86_emulate_ctxt *ctxt)
3516{
3517 u32 flags;
3518
3519 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3520 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3521
3522 ctxt->eflags &= ~0xffUL;
3523 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3524 return X86EMUL_CONTINUE;
3525}
3526
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003527static int em_lahf(struct x86_emulate_ctxt *ctxt)
3528{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003529 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3530 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003531 return X86EMUL_CONTINUE;
3532}
3533
Avi Kivity92998362012-06-13 12:25:06 +03003534static int em_bswap(struct x86_emulate_ctxt *ctxt)
3535{
3536 switch (ctxt->op_bytes) {
3537#ifdef CONFIG_X86_64
3538 case 8:
3539 asm("bswap %0" : "+r"(ctxt->dst.val));
3540 break;
3541#endif
3542 default:
3543 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3544 break;
3545 }
3546 return X86EMUL_CONTINUE;
3547}
3548
Nadav Amit13e457e2014-10-13 13:04:13 +03003549static int em_clflush(struct x86_emulate_ctxt *ctxt)
3550{
3551 /* emulating clflush regardless of cpuid */
3552 return X86EMUL_CONTINUE;
3553}
3554
Nadav Amit2276b512015-01-26 09:32:24 +02003555static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3556{
3557 ctxt->dst.val = (s32) ctxt->src.val;
3558 return X86EMUL_CONTINUE;
3559}
3560
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003561static bool valid_cr(int nr)
3562{
3563 switch (nr) {
3564 case 0:
3565 case 2 ... 4:
3566 case 8:
3567 return true;
3568 default:
3569 return false;
3570 }
3571}
3572
3573static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3574{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003575 if (!valid_cr(ctxt->modrm_reg))
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003576 return emulate_ud(ctxt);
3577
3578 return X86EMUL_CONTINUE;
3579}
3580
3581static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3582{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003583 u64 new_val = ctxt->src.val64;
3584 int cr = ctxt->modrm_reg;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003585 u64 efer = 0;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003586
3587 static u64 cr_reserved_bits[] = {
3588 0xffffffff00000000ULL,
3589 0, 0, 0, /* CR3 checked later */
3590 CR4_RESERVED_BITS,
3591 0, 0, 0,
3592 CR8_RESERVED_BITS,
3593 };
3594
3595 if (!valid_cr(cr))
3596 return emulate_ud(ctxt);
3597
3598 if (new_val & cr_reserved_bits[cr])
3599 return emulate_gp(ctxt, 0);
3600
3601 switch (cr) {
3602 case 0: {
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003603 u64 cr4;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003604 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3605 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3606 return emulate_gp(ctxt, 0);
3607
Avi Kivity717746e2011-04-20 13:37:53 +03003608 cr4 = ctxt->ops->get_cr(ctxt, 4);
3609 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003610
3611 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3612 !(cr4 & X86_CR4_PAE))
3613 return emulate_gp(ctxt, 0);
3614
3615 break;
3616 }
3617 case 3: {
3618 u64 rsvd = 0;
3619
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003620 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3621 if (efer & EFER_LMA)
Nadav Amit9d88fca2014-11-02 11:54:52 +02003622 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003623
3624 if (new_val & rsvd)
3625 return emulate_gp(ctxt, 0);
3626
3627 break;
3628 }
3629 case 4: {
Avi Kivity717746e2011-04-20 13:37:53 +03003630 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003631
3632 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3633 return emulate_gp(ctxt, 0);
3634
3635 break;
3636 }
3637 }
3638
3639 return X86EMUL_CONTINUE;
3640}
3641
Joerg Roedel3b88e412011-04-04 12:39:29 +02003642static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3643{
3644 unsigned long dr7;
3645
Avi Kivity717746e2011-04-20 13:37:53 +03003646 ctxt->ops->get_dr(ctxt, 7, &dr7);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003647
3648 /* Check if DR7.Global_Enable is set */
3649 return dr7 & (1 << 13);
3650}
3651
3652static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3653{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003654 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003655 u64 cr4;
3656
3657 if (dr > 7)
3658 return emulate_ud(ctxt);
3659
Avi Kivity717746e2011-04-20 13:37:53 +03003660 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003661 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3662 return emulate_ud(ctxt);
3663
Nadav Amit6d2a0522014-11-02 11:54:43 +02003664 if (check_dr7_gd(ctxt)) {
3665 ulong dr6;
3666
3667 ctxt->ops->get_dr(ctxt, 6, &dr6);
3668 dr6 &= ~15;
3669 dr6 |= DR6_BD | DR6_RTM;
3670 ctxt->ops->set_dr(ctxt, 6, dr6);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003671 return emulate_db(ctxt);
Nadav Amit6d2a0522014-11-02 11:54:43 +02003672 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02003673
3674 return X86EMUL_CONTINUE;
3675}
3676
3677static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3678{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003679 u64 new_val = ctxt->src.val64;
3680 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003681
3682 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3683 return emulate_gp(ctxt, 0);
3684
3685 return check_dr_read(ctxt);
3686}
3687
Joerg Roedel01de8b02011-04-04 12:39:31 +02003688static int check_svme(struct x86_emulate_ctxt *ctxt)
3689{
3690 u64 efer;
3691
Avi Kivity717746e2011-04-20 13:37:53 +03003692 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003693
3694 if (!(efer & EFER_SVME))
3695 return emulate_ud(ctxt);
3696
3697 return X86EMUL_CONTINUE;
3698}
3699
3700static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3701{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003702 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003703
3704 /* Valid physical address? */
Randy Dunlapd4224442011-04-21 09:09:22 -07003705 if (rax & 0xffff000000000000ULL)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003706 return emulate_gp(ctxt, 0);
3707
3708 return check_svme(ctxt);
3709}
3710
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003711static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3712{
Avi Kivity717746e2011-04-20 13:37:53 +03003713 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003714
Avi Kivity717746e2011-04-20 13:37:53 +03003715 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003716 return emulate_ud(ctxt);
3717
3718 return X86EMUL_CONTINUE;
3719}
3720
Joerg Roedel80612522011-04-04 12:39:33 +02003721static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3722{
Avi Kivity717746e2011-04-20 13:37:53 +03003723 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003724 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
Joerg Roedel80612522011-04-04 12:39:33 +02003725
Avi Kivity717746e2011-04-20 13:37:53 +03003726 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
Nadav Amit67f4d422014-06-02 18:34:09 +03003727 ctxt->ops->check_pmc(ctxt, rcx))
Joerg Roedel80612522011-04-04 12:39:33 +02003728 return emulate_gp(ctxt, 0);
3729
3730 return X86EMUL_CONTINUE;
3731}
3732
Joerg Roedelf6511932011-04-04 12:39:35 +02003733static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3734{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003735 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3736 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003737 return emulate_gp(ctxt, 0);
3738
3739 return X86EMUL_CONTINUE;
3740}
3741
3742static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3743{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003744 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3745 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003746 return emulate_gp(ctxt, 0);
3747
3748 return X86EMUL_CONTINUE;
3749}
3750
Avi Kivity73fba5f2010-07-29 15:11:53 +03003751#define D(_y) { .flags = (_y) }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003752#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3753#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3754 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Gleb Natapov0b789ee2013-04-11 11:59:55 +03003755#define N D(NotImpl)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003756#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003757#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3758#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
Nadav Amit39f062f2014-11-26 15:47:18 +02003759#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
Nadav Amit2276b512015-01-26 09:32:24 +02003760#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
Gleb Natapov045a2822012-12-20 16:57:43 +02003761#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003762#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
Avi Kivitye28bbd42013-01-04 16:18:48 +02003763#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
Avi Kivityc4f035c2011-04-04 12:39:22 +02003764#define II(_f, _e, _i) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003765 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
Joerg Roedeld09beab2011-04-04 12:39:25 +02003766#define IIP(_f, _e, _i, _p) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003767 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3768 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Avi Kivityaa97bb42010-01-20 18:09:23 +02003769#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003770
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003771#define D2bv(_f) D((_f) | ByteOp), D(_f)
Joerg Roedelf6511932011-04-04 12:39:35 +02003772#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003773#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
Avi Kivityf7857f32013-01-04 16:18:53 +02003774#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003775#define I2bvIP(_f, _e, _i, _p) \
3776 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003777
Avi Kivityfb864fb2013-01-04 16:18:54 +02003778#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3779 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3780 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
Avi Kivity6230f7f2010-08-26 18:34:55 +03003781
Nadav Amit0f54a322014-08-29 11:26:55 +03003782static const struct opcode group7_rm0[] = {
3783 N,
Jan Kiszkab34a8052015-03-09 20:27:43 +01003784 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
Nadav Amit0f54a322014-08-29 11:26:55 +03003785 N, N, N, N, N, N,
3786};
3787
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003788static const struct opcode group7_rm1[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003789 DI(SrcNone | Priv, monitor),
3790 DI(SrcNone | Priv, mwait),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003791 N, N, N, N, N, N,
3792};
3793
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003794static const struct opcode group7_rm3[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003795 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
Jan Kiszkab34a8052015-03-09 20:27:43 +01003796 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003797 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3798 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3799 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3800 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3801 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3802 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003803};
Avi Kivity6230f7f2010-08-26 18:34:55 +03003804
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003805static const struct opcode group7_rm7[] = {
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003806 N,
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003807 DIP(SrcNone, rdtscp, check_rdtsc),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003808 N, N, N, N, N, N,
3809};
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09003810
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003811static const struct opcode group1[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003812 F(Lock, em_add),
3813 F(Lock | PageTable, em_or),
3814 F(Lock, em_adc),
3815 F(Lock, em_sbb),
3816 F(Lock | PageTable, em_and),
3817 F(Lock, em_sub),
3818 F(Lock, em_xor),
3819 F(NoWrite, em_cmp),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003820};
3821
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003822static const struct opcode group1A[] = {
Nadav Amitab708092014-12-25 02:52:21 +02003823 I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003824};
3825
Avi Kivity007a3b52013-01-19 19:51:51 +02003826static const struct opcode group2[] = {
3827 F(DstMem | ModRM, em_rol),
3828 F(DstMem | ModRM, em_ror),
3829 F(DstMem | ModRM, em_rcl),
3830 F(DstMem | ModRM, em_rcr),
3831 F(DstMem | ModRM, em_shl),
3832 F(DstMem | ModRM, em_shr),
3833 F(DstMem | ModRM, em_shl),
3834 F(DstMem | ModRM, em_sar),
3835};
3836
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003837static const struct opcode group3[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003838 F(DstMem | SrcImm | NoWrite, em_test),
3839 F(DstMem | SrcImm | NoWrite, em_test),
Avi Kivity45a14672013-01-04 16:18:52 +02003840 F(DstMem | SrcNone | Lock, em_not),
3841 F(DstMem | SrcNone | Lock, em_neg),
Avi Kivityb9fa4092013-02-09 11:31:48 +02003842 F(DstXacc | Src2Mem, em_mul_ex),
3843 F(DstXacc | Src2Mem, em_imul_ex),
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02003844 F(DstXacc | Src2Mem, em_div_ex),
3845 F(DstXacc | Src2Mem, em_idiv_ex),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003846};
3847
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003848static const struct opcode group4[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003849 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3850 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003851 N, N, N, N, N, N,
3852};
3853
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003854static const struct opcode group5[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003855 F(DstMem | SrcNone | Lock, em_inc),
3856 F(DstMem | SrcNone | Lock, em_dec),
Nadav Amit58b70752014-10-24 11:35:09 +03003857 I(SrcMem | NearBranch, em_call_near_abs),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003858 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
Nadav Amit58b70752014-10-24 11:35:09 +03003859 I(SrcMem | NearBranch, em_jmp_abs),
Nadav Amitf7784042014-09-18 22:39:41 +03003860 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3861 I(SrcMem | Stack, em_push), D(Undefined),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003862};
3863
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003864static const struct opcode group6[] = {
Nadav Amit63ea0a42015-01-08 11:59:03 +01003865 DI(Prot | DstMem, sldt),
3866 DI(Prot | DstMem, str),
Avi Kivitya14e5792012-06-13 12:28:33 +03003867 II(Prot | Priv | SrcMem16, em_lldt, lldt),
Avi Kivity80890002012-06-13 16:33:29 +03003868 II(Prot | Priv | SrcMem16, em_ltr, ltr),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02003869 N, N, N, N,
3870};
3871
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003872static const struct group_dual group7 = { {
Nadav Amit606b1c32014-06-02 18:34:06 +03003873 II(Mov | DstMem, em_sgdt, sgdt),
3874 II(Mov | DstMem, em_sidt, sidt),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003875 II(SrcMem | Priv, em_lgdt, lgdt),
3876 II(SrcMem | Priv, em_lidt, lidt),
3877 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3878 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3879 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003880}, {
Nadav Amit0f54a322014-08-29 11:26:55 +03003881 EXT(0, group7_rm0),
Avi Kivity5ef39c72011-04-21 12:21:50 +03003882 EXT(0, group7_rm1),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003883 N, EXT(0, group7_rm3),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003884 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3885 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3886 EXT(0, group7_rm7),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003887} };
3888
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003889static const struct opcode group8[] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003890 N, N, N, N,
Avi Kivity11c363b2013-01-19 19:51:54 +02003891 F(DstMem | SrcImmByte | NoWrite, em_bt),
3892 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3893 F(DstMem | SrcImmByte | Lock, em_btr),
3894 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003895};
3896
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003897static const struct group_dual group9 = { {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003898 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003899}, {
3900 N, N, N, N, N, N, N, N,
3901} };
3902
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003903static const struct opcode group11[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003904 I(DstMem | SrcImm | Mov | PageTable, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003905 X7(D(Undefined)),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03003906};
3907
Nadav Amit13e457e2014-10-13 13:04:13 +03003908static const struct gprefix pfx_0f_ae_7 = {
Nadav Amit3f6f1482014-10-13 13:04:14 +03003909 I(SrcMem | ByteOp, em_clflush), N, N, N,
Nadav Amit13e457e2014-10-13 13:04:13 +03003910};
3911
3912static const struct group_dual group15 = { {
3913 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3914}, {
3915 N, N, N, N, N, N, N, N,
3916} };
3917
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003918static const struct gprefix pfx_0f_6f_0f_7f = {
Avi Kivitye5971752012-04-09 18:40:03 +03003919 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
Avi Kivityaa97bb42010-01-20 18:09:23 +02003920};
3921
Nadav Amit39f062f2014-11-26 15:47:18 +02003922static const struct instr_dual instr_dual_0f_2b = {
3923 I(0, em_mov), N
3924};
3925
Paolo Bonzinid5b77062014-07-14 12:54:48 +02003926static const struct gprefix pfx_0f_2b = {
Nadav Amit39f062f2014-11-26 15:47:18 +02003927 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
Avi Kivity3e114eb2012-04-09 18:40:01 +03003928};
3929
Igor Mammedov27ce8252014-03-15 21:01:59 +01003930static const struct gprefix pfx_0f_28_0f_29 = {
Igor Mammedov6fec27d2014-03-15 21:02:00 +01003931 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01003932};
3933
Alex Williamson0a370272014-07-11 11:56:31 -06003934static const struct gprefix pfx_0f_e7 = {
3935 N, I(Sse, em_mov), N, N,
3936};
3937
Gleb Natapov045a2822012-12-20 16:57:43 +02003938static const struct escape escape_d9 = { {
Nadav Amit16bebef2014-12-25 02:52:18 +02003939 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
Gleb Natapov045a2822012-12-20 16:57:43 +02003940}, {
3941 /* 0xC0 - 0xC7 */
3942 N, N, N, N, N, N, N, N,
3943 /* 0xC8 - 0xCF */
3944 N, N, N, N, N, N, N, N,
3945 /* 0xD0 - 0xC7 */
3946 N, N, N, N, N, N, N, N,
3947 /* 0xD8 - 0xDF */
3948 N, N, N, N, N, N, N, N,
3949 /* 0xE0 - 0xE7 */
3950 N, N, N, N, N, N, N, N,
3951 /* 0xE8 - 0xEF */
3952 N, N, N, N, N, N, N, N,
3953 /* 0xF0 - 0xF7 */
3954 N, N, N, N, N, N, N, N,
3955 /* 0xF8 - 0xFF */
3956 N, N, N, N, N, N, N, N,
3957} };
3958
3959static const struct escape escape_db = { {
3960 N, N, N, N, N, N, N, N,
3961}, {
3962 /* 0xC0 - 0xC7 */
3963 N, N, N, N, N, N, N, N,
3964 /* 0xC8 - 0xCF */
3965 N, N, N, N, N, N, N, N,
3966 /* 0xD0 - 0xC7 */
3967 N, N, N, N, N, N, N, N,
3968 /* 0xD8 - 0xDF */
3969 N, N, N, N, N, N, N, N,
3970 /* 0xE0 - 0xE7 */
3971 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3972 /* 0xE8 - 0xEF */
3973 N, N, N, N, N, N, N, N,
3974 /* 0xF0 - 0xF7 */
3975 N, N, N, N, N, N, N, N,
3976 /* 0xF8 - 0xFF */
3977 N, N, N, N, N, N, N, N,
3978} };
3979
3980static const struct escape escape_dd = { {
Nadav Amit16bebef2014-12-25 02:52:18 +02003981 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
Gleb Natapov045a2822012-12-20 16:57:43 +02003982}, {
3983 /* 0xC0 - 0xC7 */
3984 N, N, N, N, N, N, N, N,
3985 /* 0xC8 - 0xCF */
3986 N, N, N, N, N, N, N, N,
3987 /* 0xD0 - 0xC7 */
3988 N, N, N, N, N, N, N, N,
3989 /* 0xD8 - 0xDF */
3990 N, N, N, N, N, N, N, N,
3991 /* 0xE0 - 0xE7 */
3992 N, N, N, N, N, N, N, N,
3993 /* 0xE8 - 0xEF */
3994 N, N, N, N, N, N, N, N,
3995 /* 0xF0 - 0xF7 */
3996 N, N, N, N, N, N, N, N,
3997 /* 0xF8 - 0xFF */
3998 N, N, N, N, N, N, N, N,
3999} };
4000
Nadav Amit39f062f2014-11-26 15:47:18 +02004001static const struct instr_dual instr_dual_0f_c3 = {
4002 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4003};
4004
Nadav Amit2276b512015-01-26 09:32:24 +02004005static const struct mode_dual mode_dual_63 = {
4006 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4007};
4008
Mathias Krausefd0a0d82012-08-30 01:30:15 +02004009static const struct opcode opcode_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03004010 /* 0x00 - 0x07 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004011 F6ALU(Lock, em_add),
Avi Kivity1cd196e2011-09-13 10:45:51 +03004012 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4013 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004014 /* 0x08 - 0x0F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004015 F6ALU(Lock | PageTable, em_or),
Avi Kivity1cd196e2011-09-13 10:45:51 +03004016 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4017 N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004018 /* 0x10 - 0x17 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004019 F6ALU(Lock, em_adc),
Avi Kivity1cd196e2011-09-13 10:45:51 +03004020 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4021 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004022 /* 0x18 - 0x1F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004023 F6ALU(Lock, em_sbb),
Avi Kivity1cd196e2011-09-13 10:45:51 +03004024 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4025 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004026 /* 0x20 - 0x27 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004027 F6ALU(Lock | PageTable, em_and), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004028 /* 0x28 - 0x2F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004029 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004030 /* 0x30 - 0x37 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004031 F6ALU(Lock, em_xor), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004032 /* 0x38 - 0x3F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004033 F6ALU(NoWrite, em_cmp), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004034 /* 0x40 - 0x4F */
Avi Kivity95413dc2013-01-19 19:51:53 +02004035 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004036 /* 0x50 - 0x57 */
Avi Kivity63540382010-07-29 15:11:55 +03004037 X8(I(SrcReg | Stack, em_push)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004038 /* 0x58 - 0x5F */
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09004039 X8(I(DstReg | Stack, em_pop)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004040 /* 0x60 - 0x67 */
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09004041 I(ImplicitOps | Stack | No64, em_pusha),
4042 I(ImplicitOps | Stack | No64, em_popa),
Nadav Amit2276b512015-01-26 09:32:24 +02004043 N, MD(ModRM, &mode_dual_63),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004044 N, N, N, N,
4045 /* 0x68 - 0x6F */
Avi Kivityd46164d2010-08-18 19:29:33 +03004046 I(SrcImm | Mov | Stack, em_push),
4047 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03004048 I(SrcImmByte | Mov | Stack, em_push),
4049 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004050 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
Takuya Yoshikawa2b5e97e2011-11-23 12:27:39 +09004051 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
Avi Kivity73fba5f2010-07-29 15:11:53 +03004052 /* 0x70 - 0x7F */
Nadav Amit58b70752014-10-24 11:35:09 +03004053 X16(D(SrcImmByte | NearBranch)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004054 /* 0x80 - 0x87 */
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09004055 G(ByteOp | DstMem | SrcImm, group1),
4056 G(DstMem | SrcImm, group1),
4057 G(ByteOp | DstMem | SrcImm | No64, group1),
4058 G(DstMem | SrcImmByte, group1),
Avi Kivityfb864fb2013-01-04 16:18:54 +02004059 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004060 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004061 /* 0x88 - 0x8F */
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004062 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004063 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004064 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09004065 D(ModRM | SrcMem | NoAccess | DstReg),
4066 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4067 G(0, group1A),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004068 /* 0x90 - 0x97 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02004069 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004070 /* 0x98 - 0x9F */
Avi Kivity61429142010-08-19 15:13:00 +03004071 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
Wei Yongjuncc4feed2010-08-25 14:10:53 +08004072 I(SrcImmFAddr | No64, em_call_far), N,
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09004073 II(ImplicitOps | Stack, em_pushf, pushf),
Paolo Bonzini98f73632013-10-31 11:19:42 +01004074 II(ImplicitOps | Stack, em_popf, popf),
4075 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004076 /* 0xA0 - 0xA7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004077 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004078 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004079 I2bv(SrcSI | DstDI | Mov | String, em_mov),
Nadav Amit5aca3722014-11-02 11:54:50 +02004080 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004081 /* 0xA8 - 0xAF */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004082 F2bv(DstAcc | SrcImm | NoWrite, em_test),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004083 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4084 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
Nadav Amit5aca3722014-11-02 11:54:50 +02004085 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004086 /* 0xB0 - 0xB7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004087 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004088 /* 0xB8 - 0xBF */
Nadav Amit5e2c6882012-12-06 21:55:10 -02004089 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004090 /* 0xC0 - 0xC7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02004091 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
Nadav Amit58b70752014-10-24 11:35:09 +03004092 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4093 I(ImplicitOps | NearBranch, em_ret),
Avi Kivityd4b43252011-09-13 10:45:50 +03004094 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4095 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03004096 G(ByteOp, group11), G(0, group11),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004097 /* 0xC8 - 0xCF */
Avi Kivity612e89f2012-06-12 20:03:23 +03004098 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
Nadav Amit16794aa2015-01-26 09:32:22 +02004099 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4100 I(ImplicitOps, em_ret_far),
Avi Kivity3c6e2762011-04-04 12:39:23 +02004101 D(ImplicitOps), DI(SrcImmByte, intn),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004102 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004103 /* 0xD0 - 0xD7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02004104 G(Src2One | ByteOp, group2), G(Src2One, group2),
4105 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02004106 I(DstAcc | SrcImmUByte | No64, em_aam),
Paolo Bonzini326f5782013-05-09 11:32:51 +02004107 I(DstAcc | SrcImmUByte | No64, em_aad),
4108 F(DstAcc | ByteOp | No64, em_salc),
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004109 I(DstAcc | SrcXLat | ByteOp, em_mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004110 /* 0xD8 - 0xDF */
Gleb Natapov045a2822012-12-20 16:57:43 +02004111 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004112 /* 0xE0 - 0xE7 */
Nadav Amit58b70752014-10-24 11:35:09 +03004113 X3(I(SrcImmByte | NearBranch, em_loop)),
4114 I(SrcImmByte | NearBranch, em_jcxz),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004115 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4116 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004117 /* 0xE8 - 0xEF */
Nadav Amit58b70752014-10-24 11:35:09 +03004118 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4119 I(SrcImmFAddr | No64, em_jmp_far),
4120 D(SrcImmByte | ImplicitOps | NearBranch),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004121 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4122 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004123 /* 0xF0 - 0xF7 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02004124 N, DI(ImplicitOps, icebp), N, N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02004125 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4126 G(ByteOp, group3), G(0, group3),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004127 /* 0xF8 - 0xFF */
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09004128 D(ImplicitOps), D(ImplicitOps),
4129 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004130 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4131};
4132
Mathias Krausefd0a0d82012-08-30 01:30:15 +02004133static const struct opcode twobyte_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03004134 /* 0x00 - 0x0F */
Joerg Roedeldee6bb72011-04-04 12:39:30 +02004135 G(0, group6), GD(0, &group7), N, N,
Borislav Petkovb51e9742013-09-22 16:44:52 +02004136 N, I(ImplicitOps | EmulateOnUD, em_syscall),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004137 II(ImplicitOps | Priv, em_clts, clts), N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02004138 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
Nadav Amit3f6f1482014-10-13 13:04:14 +03004139 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004140 /* 0x10 - 0x1F */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02004141 N, N, N, N, N, N, N, N,
Nadav Amit3f6f1482014-10-13 13:04:14 +03004142 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4143 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004144 /* 0x20 - 0x2F */
Nadav Amit9b88ae92014-05-25 23:05:21 +03004145 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4146 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4147 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4148 check_cr_write),
4149 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4150 check_dr_write),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004151 N, N, N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01004152 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4153 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
Paolo Bonzinid5b77062014-07-14 12:54:48 +02004154 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
Avi Kivity3e114eb2012-04-09 18:40:01 +03004155 N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004156 /* 0x30 - 0x3F */
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09004157 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
Joerg Roedel80612522011-04-04 12:39:33 +02004158 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09004159 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
Avi Kivity222d21a2011-11-10 14:57:30 +02004160 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
Borislav Petkovb51e9742013-09-22 16:44:52 +02004161 I(ImplicitOps | EmulateOnUD, em_sysenter),
4162 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
Avi Kivityd8671622011-02-01 16:32:03 +02004163 N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004164 N, N, N, N, N, N, N, N,
4165 /* 0x40 - 0x4F */
Nadav Amit140bad82014-06-15 16:13:00 +03004166 X16(D(DstReg | SrcMem | ModRM)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004167 /* 0x50 - 0x5F */
4168 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4169 /* 0x60 - 0x6F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02004170 N, N, N, N,
4171 N, N, N, N,
4172 N, N, N, N,
4173 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004174 /* 0x70 - 0x7F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02004175 N, N, N, N,
4176 N, N, N, N,
4177 N, N, N, N,
4178 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004179 /* 0x80 - 0x8F */
Nadav Amit58b70752014-10-24 11:35:09 +03004180 X16(D(SrcImm | NearBranch)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004181 /* 0x90 - 0x9F */
Wei Yongjunee45b582010-08-06 17:10:07 +08004182 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004183 /* 0xA0 - 0xA7 */
Avi Kivity1cd196e2011-09-13 10:45:51 +03004184 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
Avi Kivity11c363b2013-01-19 19:51:54 +02004185 II(ImplicitOps, em_cpuid, cpuid),
4186 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
Avi Kivity0bdea062013-01-19 19:51:50 +02004187 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4188 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004189 /* 0xA8 - 0xAF */
Avi Kivity1cd196e2011-09-13 10:45:51 +03004190 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004191 DI(ImplicitOps, rsm),
Avi Kivity11c363b2013-01-19 19:51:54 +02004192 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
Avi Kivity0bdea062013-01-19 19:51:50 +02004193 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4194 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
Nadav Amit13e457e2014-10-13 13:04:13 +03004195 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004196 /* 0xB0 - 0xB7 */
Nadav Amit2fcf5c82015-01-26 09:32:21 +02004197 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
Avi Kivityd4b43252011-09-13 10:45:50 +03004198 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
Avi Kivity11c363b2013-01-19 19:51:54 +02004199 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
Avi Kivityd4b43252011-09-13 10:45:50 +03004200 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4201 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004202 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004203 /* 0xB8 - 0xBF */
4204 N, N,
Takuya Yoshikawace7faab2011-11-22 15:17:48 +09004205 G(BitOp, group8),
Avi Kivity11c363b2013-01-19 19:51:54 +02004206 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
Nadav Amit900efe22015-03-30 15:39:21 +03004207 I(DstReg | SrcMem | ModRM, em_bsf_c),
4208 I(DstReg | SrcMem | ModRM, em_bsr_c),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004209 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity92998362012-06-13 12:25:06 +03004210 /* 0xC0 - 0xC7 */
Avi Kivitye47a5f52013-02-09 11:31:51 +02004211 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
Nadav Amit39f062f2014-11-26 15:47:18 +02004212 N, ID(0, &instr_dual_0f_c3),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004213 N, N, N, GD(0, &group9),
Avi Kivity92998362012-06-13 12:25:06 +03004214 /* 0xC8 - 0xCF */
4215 X8(I(DstReg, em_bswap)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004216 /* 0xD0 - 0xDF */
4217 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4218 /* 0xE0 - 0xEF */
Alex Williamson0a370272014-07-11 11:56:31 -06004219 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4220 N, N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004221 /* 0xF0 - 0xFF */
4222 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4223};
4224
Nadav Amit39f062f2014-11-26 15:47:18 +02004225static const struct instr_dual instr_dual_0f_38_f0 = {
4226 I(DstReg | SrcMem | Mov, em_movbe), N
4227};
4228
4229static const struct instr_dual instr_dual_0f_38_f1 = {
4230 I(DstMem | SrcReg | Mov, em_movbe), N
4231};
4232
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004233static const struct gprefix three_byte_0f_38_f0 = {
Nadav Amit39f062f2014-11-26 15:47:18 +02004234 ID(0, &instr_dual_0f_38_f0), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004235};
4236
4237static const struct gprefix three_byte_0f_38_f1 = {
Nadav Amit39f062f2014-11-26 15:47:18 +02004238 ID(0, &instr_dual_0f_38_f1), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004239};
4240
4241/*
4242 * Insns below are selected by the prefix which indexed by the third opcode
4243 * byte.
4244 */
4245static const struct opcode opcode_map_0f_38[256] = {
4246 /* 0x00 - 0x7f */
4247 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
Borislav Petkov84cffe42013-10-29 12:54:56 +01004248 /* 0x80 - 0xef */
4249 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4250 /* 0xf0 - 0xf1 */
Nadav Amit53bb4f72014-12-07 11:49:42 +02004251 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4252 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
Borislav Petkov84cffe42013-10-29 12:54:56 +01004253 /* 0xf2 - 0xff */
4254 N, N, X4(N), X8(N)
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004255};
4256
Avi Kivity73fba5f2010-07-29 15:11:53 +03004257#undef D
4258#undef N
4259#undef G
4260#undef GD
4261#undef I
Avi Kivityaa97bb42010-01-20 18:09:23 +02004262#undef GP
Joerg Roedel01de8b02011-04-04 12:39:31 +02004263#undef EXT
Nadav Amit2276b512015-01-26 09:32:24 +02004264#undef MD
Nadav Amit2b42fce2015-01-26 09:32:25 +02004265#undef ID
Avi Kivity73fba5f2010-07-29 15:11:53 +03004266
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004267#undef D2bv
Joerg Roedelf6511932011-04-04 12:39:35 +02004268#undef D2bvIP
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004269#undef I2bv
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004270#undef I2bvIP
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09004271#undef I6ALU
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004272
Avi Kivity9dac77f2011-06-01 15:34:25 +03004273static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
Avi Kivity39f21ee2010-08-18 19:20:21 +03004274{
4275 unsigned size;
4276
Avi Kivity9dac77f2011-06-01 15:34:25 +03004277 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004278 if (size == 8)
4279 size = 4;
4280 return size;
4281}
4282
4283static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4284 unsigned size, bool sign_extension)
4285{
Avi Kivity39f21ee2010-08-18 19:20:21 +03004286 int rc = X86EMUL_CONTINUE;
4287
4288 op->type = OP_IMM;
4289 op->bytes = size;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004290 op->addr.mem.ea = ctxt->_eip;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004291 /* NB. Immediates are sign-extended as necessary. */
4292 switch (op->bytes) {
4293 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004294 op->val = insn_fetch(s8, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004295 break;
4296 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004297 op->val = insn_fetch(s16, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004298 break;
4299 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004300 op->val = insn_fetch(s32, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004301 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004302 case 8:
4303 op->val = insn_fetch(s64, ctxt);
4304 break;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004305 }
4306 if (!sign_extension) {
4307 switch (op->bytes) {
4308 case 1:
4309 op->val &= 0xff;
4310 break;
4311 case 2:
4312 op->val &= 0xffff;
4313 break;
4314 case 4:
4315 op->val &= 0xffffffff;
4316 break;
4317 }
4318 }
4319done:
4320 return rc;
4321}
4322
Avi Kivitya9945542011-09-13 10:45:41 +03004323static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4324 unsigned d)
4325{
4326 int rc = X86EMUL_CONTINUE;
4327
4328 switch (d) {
4329 case OpReg:
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004330 decode_register_operand(ctxt, op);
Avi Kivitya9945542011-09-13 10:45:41 +03004331 break;
4332 case OpImmUByte:
Avi Kivity608aabe2011-09-13 10:45:45 +03004333 rc = decode_imm(ctxt, op, 1, false);
Avi Kivitya9945542011-09-13 10:45:41 +03004334 break;
4335 case OpMem:
Avi Kivity41ddf972011-09-13 10:45:48 +03004336 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity0fe59122011-09-13 10:45:47 +03004337 mem_common:
Avi Kivitya9945542011-09-13 10:45:41 +03004338 *op = ctxt->memop;
4339 ctxt->memopp = op;
Paolo Bonzini96888972014-04-01 14:54:19 +02004340 if (ctxt->d & BitOp)
Avi Kivitya9945542011-09-13 10:45:41 +03004341 fetch_bit_operand(ctxt);
4342 op->orig_val = op->val;
4343 break;
Avi Kivity41ddf972011-09-13 10:45:48 +03004344 case OpMem64:
Nadav Amitaaa05f22014-06-02 18:34:10 +03004345 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
Avi Kivity41ddf972011-09-13 10:45:48 +03004346 goto mem_common;
Avi Kivitya9945542011-09-13 10:45:41 +03004347 case OpAcc:
4348 op->type = OP_REG;
4349 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004350 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
Avi Kivitya9945542011-09-13 10:45:41 +03004351 fetch_register_operand(op);
4352 op->orig_val = op->val;
4353 break;
Avi Kivity820207c2013-02-09 11:31:45 +02004354 case OpAccLo:
4355 op->type = OP_REG;
4356 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4357 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4358 fetch_register_operand(op);
4359 op->orig_val = op->val;
4360 break;
4361 case OpAccHi:
4362 if (ctxt->d & ByteOp) {
4363 op->type = OP_NONE;
4364 break;
4365 }
4366 op->type = OP_REG;
4367 op->bytes = ctxt->op_bytes;
4368 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4369 fetch_register_operand(op);
4370 op->orig_val = op->val;
4371 break;
Avi Kivitya9945542011-09-13 10:45:41 +03004372 case OpDI:
4373 op->type = OP_MEM;
4374 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4375 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004376 register_address(ctxt, VCPU_REGS_RDI);
Avi Kivitya9945542011-09-13 10:45:41 +03004377 op->addr.mem.seg = VCPU_SREG_ES;
4378 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004379 op->count = 1;
Avi Kivitya9945542011-09-13 10:45:41 +03004380 break;
4381 case OpDX:
4382 op->type = OP_REG;
4383 op->bytes = 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004384 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivitya9945542011-09-13 10:45:41 +03004385 fetch_register_operand(op);
4386 break;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004387 case OpCL:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004388 op->type = OP_IMM;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004389 op->bytes = 1;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004390 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004391 break;
4392 case OpImmByte:
4393 rc = decode_imm(ctxt, op, 1, true);
4394 break;
4395 case OpOne:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004396 op->type = OP_IMM;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004397 op->bytes = 1;
4398 op->val = 1;
4399 break;
4400 case OpImm:
4401 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4402 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004403 case OpImm64:
4404 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4405 break;
Avi Kivity28867ce2012-01-16 15:08:44 +02004406 case OpMem8:
4407 ctxt->memop.bytes = 1;
Gleb Natapov660696d2013-04-24 13:38:36 +03004408 if (ctxt->memop.type == OP_REG) {
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02004409 ctxt->memop.addr.reg = decode_register(ctxt,
4410 ctxt->modrm_rm, true);
Gleb Natapov660696d2013-04-24 13:38:36 +03004411 fetch_register_operand(&ctxt->memop);
4412 }
Avi Kivity28867ce2012-01-16 15:08:44 +02004413 goto mem_common;
Avi Kivity0fe59122011-09-13 10:45:47 +03004414 case OpMem16:
4415 ctxt->memop.bytes = 2;
4416 goto mem_common;
4417 case OpMem32:
4418 ctxt->memop.bytes = 4;
4419 goto mem_common;
4420 case OpImmU16:
4421 rc = decode_imm(ctxt, op, 2, false);
4422 break;
4423 case OpImmU:
4424 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4425 break;
4426 case OpSI:
4427 op->type = OP_MEM;
4428 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4429 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004430 register_address(ctxt, VCPU_REGS_RSI);
Bandan Das573e80f2014-04-16 12:46:13 -04004431 op->addr.mem.seg = ctxt->seg_override;
Avi Kivity0fe59122011-09-13 10:45:47 +03004432 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004433 op->count = 1;
Avi Kivity0fe59122011-09-13 10:45:47 +03004434 break;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004435 case OpXLat:
4436 op->type = OP_MEM;
4437 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4438 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004439 address_mask(ctxt,
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004440 reg_read(ctxt, VCPU_REGS_RBX) +
4441 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
Bandan Das573e80f2014-04-16 12:46:13 -04004442 op->addr.mem.seg = ctxt->seg_override;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004443 op->val = 0;
4444 break;
Avi Kivity0fe59122011-09-13 10:45:47 +03004445 case OpImmFAddr:
4446 op->type = OP_IMM;
4447 op->addr.mem.ea = ctxt->_eip;
4448 op->bytes = ctxt->op_bytes + 2;
4449 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4450 break;
4451 case OpMemFAddr:
4452 ctxt->memop.bytes = ctxt->op_bytes + 2;
4453 goto mem_common;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004454 case OpES:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004455 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004456 op->val = VCPU_SREG_ES;
4457 break;
4458 case OpCS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004459 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004460 op->val = VCPU_SREG_CS;
4461 break;
4462 case OpSS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004463 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004464 op->val = VCPU_SREG_SS;
4465 break;
4466 case OpDS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004467 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004468 op->val = VCPU_SREG_DS;
4469 break;
4470 case OpFS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004471 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004472 op->val = VCPU_SREG_FS;
4473 break;
4474 case OpGS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004475 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004476 op->val = VCPU_SREG_GS;
4477 break;
Avi Kivitya9945542011-09-13 10:45:41 +03004478 case OpImplicit:
4479 /* Special instructions do their own operand decoding. */
4480 default:
4481 op->type = OP_NONE; /* Disable writeback. */
4482 break;
4483 }
4484
4485done:
4486 return rc;
4487}
4488
Takuya Yoshikawaef5d75c2011-05-15 00:57:43 +09004489int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004490{
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004491 int rc = X86EMUL_CONTINUE;
4492 int mode = ctxt->mode;
Avi Kivity46561642011-04-24 14:09:59 +03004493 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004494 bool op_prefix = false;
Bandan Das573e80f2014-04-16 12:46:13 -04004495 bool has_seg_override = false;
Avi Kivity46561642011-04-24 14:09:59 +03004496 struct opcode opcode;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004497
Avi Kivityf09ed832011-09-13 10:45:40 +03004498 ctxt->memop.type = OP_NONE;
4499 ctxt->memopp = NULL;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004500 ctxt->_eip = ctxt->eip;
Paolo Bonzini17052f12014-05-06 16:33:01 +02004501 ctxt->fetch.ptr = ctxt->fetch.data;
4502 ctxt->fetch.end = ctxt->fetch.data + insn_len;
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004503 ctxt->opcode_len = 1;
Andre Przywaradc25e892010-12-21 11:12:07 +01004504 if (insn_len > 0)
Avi Kivity9dac77f2011-06-01 15:34:25 +03004505 memcpy(ctxt->fetch.data, insn, insn_len);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004506 else {
Paolo Bonzini9506d572014-05-06 13:05:25 +02004507 rc = __do_insn_fetch_bytes(ctxt, 1);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004508 if (rc != X86EMUL_CONTINUE)
4509 return rc;
4510 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004511
4512 switch (mode) {
4513 case X86EMUL_MODE_REAL:
4514 case X86EMUL_MODE_VM86:
4515 case X86EMUL_MODE_PROT16:
4516 def_op_bytes = def_ad_bytes = 2;
4517 break;
4518 case X86EMUL_MODE_PROT32:
4519 def_op_bytes = def_ad_bytes = 4;
4520 break;
4521#ifdef CONFIG_X86_64
4522 case X86EMUL_MODE_PROT64:
4523 def_op_bytes = 4;
4524 def_ad_bytes = 8;
4525 break;
4526#endif
4527 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004528 return EMULATION_FAILED;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004529 }
4530
Avi Kivity9dac77f2011-06-01 15:34:25 +03004531 ctxt->op_bytes = def_op_bytes;
4532 ctxt->ad_bytes = def_ad_bytes;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004533
4534 /* Legacy prefixes. */
4535 for (;;) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004536 switch (ctxt->b = insn_fetch(u8, ctxt)) {
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004537 case 0x66: /* operand-size override */
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004538 op_prefix = true;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004539 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004540 ctxt->op_bytes = def_op_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004541 break;
4542 case 0x67: /* address-size override */
4543 if (mode == X86EMUL_MODE_PROT64)
4544 /* switch between 4/8 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004545 ctxt->ad_bytes = def_ad_bytes ^ 12;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004546 else
4547 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004548 ctxt->ad_bytes = def_ad_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004549 break;
4550 case 0x26: /* ES override */
4551 case 0x2e: /* CS override */
4552 case 0x36: /* SS override */
4553 case 0x3e: /* DS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004554 has_seg_override = true;
4555 ctxt->seg_override = (ctxt->b >> 3) & 3;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004556 break;
4557 case 0x64: /* FS override */
4558 case 0x65: /* GS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004559 has_seg_override = true;
4560 ctxt->seg_override = ctxt->b & 7;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004561 break;
4562 case 0x40 ... 0x4f: /* REX */
4563 if (mode != X86EMUL_MODE_PROT64)
4564 goto done_prefixes;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004565 ctxt->rex_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004566 continue;
4567 case 0xf0: /* LOCK */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004568 ctxt->lock_prefix = 1;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004569 break;
4570 case 0xf2: /* REPNE/REPNZ */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004571 case 0xf3: /* REP/REPE/REPZ */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004572 ctxt->rep_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004573 break;
4574 default:
4575 goto done_prefixes;
4576 }
4577
4578 /* Any legacy prefix after a REX prefix nullifies its effect. */
4579
Avi Kivity9dac77f2011-06-01 15:34:25 +03004580 ctxt->rex_prefix = 0;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004581 }
4582
4583done_prefixes:
4584
4585 /* REX prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004586 if (ctxt->rex_prefix & 8)
4587 ctxt->op_bytes = 8; /* REX.W */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004588
4589 /* Opcode byte(s). */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004590 opcode = opcode_table[ctxt->b];
Wei Yongjund3ad6242010-08-05 16:34:39 +08004591 /* Two-byte opcode? */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004592 if (ctxt->b == 0x0f) {
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004593 ctxt->opcode_len = 2;
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004594 ctxt->b = insn_fetch(u8, ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03004595 opcode = twobyte_table[ctxt->b];
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004596
4597 /* 0F_38 opcode map */
4598 if (ctxt->b == 0x38) {
4599 ctxt->opcode_len = 3;
4600 ctxt->b = insn_fetch(u8, ctxt);
4601 opcode = opcode_map_0f_38[ctxt->b];
4602 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004603 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004604 ctxt->d = opcode.flags;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004605
Takuya Yoshikawa9f4260e2012-04-30 17:48:25 +09004606 if (ctxt->d & ModRM)
4607 ctxt->modrm = insn_fetch(u8, ctxt);
4608
Nadav Amit7fe864d2014-06-02 18:34:03 +03004609 /* vex-prefix instructions are not implemented */
4610 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
Nadav Amitd14cb5d2014-11-02 11:54:58 +02004611 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
Nadav Amit7fe864d2014-06-02 18:34:03 +03004612 ctxt->d = NotImpl;
4613 }
4614
Avi Kivity9dac77f2011-06-01 15:34:25 +03004615 while (ctxt->d & GroupMask) {
4616 switch (ctxt->d & GroupMask) {
Avi Kivity46561642011-04-24 14:09:59 +03004617 case Group:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004618 goffset = (ctxt->modrm >> 3) & 7;
Avi Kivity46561642011-04-24 14:09:59 +03004619 opcode = opcode.u.group[goffset];
4620 break;
4621 case GroupDual:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004622 goffset = (ctxt->modrm >> 3) & 7;
4623 if ((ctxt->modrm >> 6) == 3)
Avi Kivity46561642011-04-24 14:09:59 +03004624 opcode = opcode.u.gdual->mod3[goffset];
4625 else
4626 opcode = opcode.u.gdual->mod012[goffset];
4627 break;
4628 case RMExt:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004629 goffset = ctxt->modrm & 7;
Joerg Roedel01de8b02011-04-04 12:39:31 +02004630 opcode = opcode.u.group[goffset];
Avi Kivity46561642011-04-24 14:09:59 +03004631 break;
4632 case Prefix:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004633 if (ctxt->rep_prefix && op_prefix)
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004634 return EMULATION_FAILED;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004635 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
Avi Kivity46561642011-04-24 14:09:59 +03004636 switch (simd_prefix) {
4637 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4638 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4639 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4640 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4641 }
4642 break;
Gleb Natapov045a2822012-12-20 16:57:43 +02004643 case Escape:
4644 if (ctxt->modrm > 0xbf)
4645 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4646 else
4647 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4648 break;
Nadav Amit39f062f2014-11-26 15:47:18 +02004649 case InstrDual:
4650 if ((ctxt->modrm >> 6) == 3)
4651 opcode = opcode.u.idual->mod3;
4652 else
4653 opcode = opcode.u.idual->mod012;
4654 break;
Nadav Amit2276b512015-01-26 09:32:24 +02004655 case ModeDual:
4656 if (ctxt->mode == X86EMUL_MODE_PROT64)
4657 opcode = opcode.u.mdual->mode64;
4658 else
4659 opcode = opcode.u.mdual->mode32;
4660 break;
Avi Kivity46561642011-04-24 14:09:59 +03004661 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004662 return EMULATION_FAILED;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004663 }
Avi Kivity46561642011-04-24 14:09:59 +03004664
Avi Kivityb1ea50b2011-09-13 10:45:42 +03004665 ctxt->d &= ~(u64)GroupMask;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004666 ctxt->d |= opcode.flags;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004667 }
4668
Paolo Bonzinie24186e2014-03-27 12:00:57 +01004669 /* Unrecognised? */
4670 if (ctxt->d == 0)
4671 return EMULATION_FAILED;
4672
Avi Kivity9dac77f2011-06-01 15:34:25 +03004673 ctxt->execute = opcode.u.execute;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004674
Nadav Amit3a6095a2014-08-13 16:50:13 +03004675 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4676 return EMULATION_FAILED;
4677
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004678 if (unlikely(ctxt->d &
Nadav Amited9aad22014-11-02 11:55:00 +02004679 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4680 No16))) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004681 /*
4682 * These are copied unconditionally here, and checked unconditionally
4683 * in x86_emulate_insn.
4684 */
4685 ctxt->check_perm = opcode.check_perm;
4686 ctxt->intercept = opcode.intercept;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004687
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004688 if (ctxt->d & NotImpl)
4689 return EMULATION_FAILED;
Avi Kivityd8671622011-02-01 16:32:03 +02004690
Nadav Amit58b70752014-10-24 11:35:09 +03004691 if (mode == X86EMUL_MODE_PROT64) {
4692 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4693 ctxt->op_bytes = 8;
4694 else if (ctxt->d & NearBranch)
4695 ctxt->op_bytes = 8;
4696 }
Avi Kivity7f9b4b72010-08-01 14:46:54 +03004697
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004698 if (ctxt->d & Op3264) {
4699 if (mode == X86EMUL_MODE_PROT64)
4700 ctxt->op_bytes = 8;
4701 else
4702 ctxt->op_bytes = 4;
4703 }
4704
Nadav Amited9aad22014-11-02 11:55:00 +02004705 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4706 ctxt->op_bytes = 4;
4707
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004708 if (ctxt->d & Sse)
4709 ctxt->op_bytes = 16;
4710 else if (ctxt->d & Mmx)
4711 ctxt->op_bytes = 8;
4712 }
Avi Kivity12537912011-03-29 11:41:27 +02004713
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004714 /* ModRM and SIB bytes. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004715 if (ctxt->d & ModRM) {
Avi Kivityf09ed832011-09-13 10:45:40 +03004716 rc = decode_modrm(ctxt, &ctxt->memop);
Bandan Das573e80f2014-04-16 12:46:13 -04004717 if (!has_seg_override) {
4718 has_seg_override = true;
4719 ctxt->seg_override = ctxt->modrm_seg;
4720 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004721 } else if (ctxt->d & MemAbs)
Avi Kivityf09ed832011-09-13 10:45:40 +03004722 rc = decode_abs(ctxt, &ctxt->memop);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004723 if (rc != X86EMUL_CONTINUE)
4724 goto done;
4725
Bandan Das573e80f2014-04-16 12:46:13 -04004726 if (!has_seg_override)
4727 ctxt->seg_override = VCPU_SREG_DS;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004728
Bandan Das573e80f2014-04-16 12:46:13 -04004729 ctxt->memop.addr.mem.seg = ctxt->seg_override;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004730
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004731 /*
4732 * Decode and fetch the source operand: register, memory
4733 * or immediate.
4734 */
Avi Kivity0fe59122011-09-13 10:45:47 +03004735 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004736 if (rc != X86EMUL_CONTINUE)
4737 goto done;
4738
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004739 /*
4740 * Decode and fetch the second source operand: register, memory
4741 * or immediate.
4742 */
Avi Kivity4dd6a572011-09-13 10:45:43 +03004743 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004744 if (rc != X86EMUL_CONTINUE)
4745 goto done;
4746
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004747 /* Decode and fetch the destination operand: register or memory. */
Avi Kivitya9945542011-09-13 10:45:41 +03004748 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004749
Bandan Das41061cd2014-04-16 12:46:14 -04004750 if (ctxt->rip_relative)
Nadav Amit1c1c35a2014-11-19 17:43:09 +02004751 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
4752 ctxt->memopp->addr.mem.ea + ctxt->_eip);
Avi Kivitycb16c342011-06-19 19:21:11 +03004753
Paolo Bonzinia430c912014-10-23 14:54:14 +02004754done:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004755 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004756}
4757
Xiao Guangrong1cb3f3a2011-09-22 17:02:48 +08004758bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4759{
4760 return ctxt->d & PageTable;
4761}
4762
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004763static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4764{
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004765 /* The second termination condition only applies for REPE
4766 * and REPNE. Test if the repeat string operation prefix is
4767 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4768 * corresponding termination condition according to:
4769 * - if REPE/REPZ and ZF = 0 then done
4770 * - if REPNE/REPNZ and ZF = 1 then done
4771 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004772 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4773 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4774 && (((ctxt->rep_prefix == REPE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004775 ((ctxt->eflags & EFLG_ZF) == 0))
Avi Kivity9dac77f2011-06-01 15:34:25 +03004776 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004777 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4778 return true;
4779
4780 return false;
4781}
4782
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004783static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4784{
4785 bool fault = false;
4786
4787 ctxt->ops->get_fpu(ctxt);
4788 asm volatile("1: fwait \n\t"
4789 "2: \n\t"
4790 ".pushsection .fixup,\"ax\" \n\t"
4791 "3: \n\t"
4792 "movb $1, %[fault] \n\t"
4793 "jmp 2b \n\t"
4794 ".popsection \n\t"
4795 _ASM_EXTABLE(1b, 3b)
Avi Kivity38e8a2d2012-04-22 15:12:50 +03004796 : [fault]"+qm"(fault));
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004797 ctxt->ops->put_fpu(ctxt);
4798
4799 if (unlikely(fault))
4800 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4801
4802 return X86EMUL_CONTINUE;
4803}
4804
4805static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4806 struct operand *op)
4807{
4808 if (op->type == OP_MM)
4809 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4810}
4811
Avi Kivitye28bbd42013-01-04 16:18:48 +02004812static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4813{
4814 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivityb9fa4092013-02-09 11:31:48 +02004815 if (!(ctxt->d & ByteOp))
4816 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
Avi Kivitye28bbd42013-01-04 16:18:48 +02004817 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004818 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4819 [fastop]"+S"(fop)
4820 : "c"(ctxt->src2.val));
Avi Kivitye28bbd42013-01-04 16:18:48 +02004821 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004822 if (!fop) /* exception is returned in fop variable */
4823 return emulate_de(ctxt);
Avi Kivitye28bbd42013-01-04 16:18:48 +02004824 return X86EMUL_CONTINUE;
4825}
Avi Kivitydd856ef2012-08-27 23:46:17 +03004826
Bandan Das14985072014-04-16 12:46:09 -04004827void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4828{
Bandan Das573e80f2014-04-16 12:46:13 -04004829 memset(&ctxt->rip_relative, 0,
4830 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
Bandan Das14985072014-04-16 12:46:09 -04004831
Bandan Das14985072014-04-16 12:46:09 -04004832 ctxt->io_read.pos = 0;
4833 ctxt->io_read.end = 0;
Bandan Das14985072014-04-16 12:46:09 -04004834 ctxt->mem_read.end = 0;
4835}
4836
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09004837int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004838{
Mathias Krause0225fb52012-08-30 01:30:16 +02004839 const struct x86_emulate_ops *ops = ctxt->ops;
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09004840 int rc = X86EMUL_CONTINUE;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004841 int saved_dst_type = ctxt->dst.type;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004842
Avi Kivity9dac77f2011-06-01 15:34:25 +03004843 ctxt->mem_read.pos = 0;
Glauber Costa310b5d32009-05-12 16:21:06 -04004844
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004845 /* LOCK prefix is allowed only with some instructions */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004846 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004847 rc = emulate_ud(ctxt);
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004848 goto done;
4849 }
4850
Avi Kivity9dac77f2011-06-01 15:34:25 +03004851 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004852 rc = emulate_ud(ctxt);
Avi Kivity081bca02010-08-26 11:06:15 +03004853 goto done;
4854 }
4855
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004856 if (unlikely(ctxt->d &
4857 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4858 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4859 (ctxt->d & Undefined)) {
4860 rc = emulate_ud(ctxt);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004861 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004862 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004863
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004864 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4865 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4866 rc = emulate_ud(ctxt);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004867 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004868 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004869
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004870 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4871 rc = emulate_nm(ctxt);
Joerg Roedeld09beab2011-04-04 12:39:25 +02004872 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004873 }
Joerg Roedeld09beab2011-04-04 12:39:25 +02004874
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004875 if (ctxt->d & Mmx) {
4876 rc = flush_pending_x87_faults(ctxt);
4877 if (rc != X86EMUL_CONTINUE)
4878 goto done;
4879 /*
4880 * Now that we know the fpu is exception safe, we can fetch
4881 * operands from it.
4882 */
4883 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4884 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4885 if (!(ctxt->d & Mov))
4886 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4887 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004888
Bandan Das685bbf42014-04-16 12:46:10 -04004889 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004890 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4891 X86_ICPT_PRE_EXCEPT);
4892 if (rc != X86EMUL_CONTINUE)
4893 goto done;
4894 }
4895
Nadav Amit64a38292014-12-10 11:19:04 +02004896 /* Instruction can only be executed in protected mode */
4897 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4898 rc = emulate_ud(ctxt);
4899 goto done;
4900 }
4901
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004902 /* Privileged instruction can be executed only in CPL=0 */
4903 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
Nadav Amit68efa762014-06-18 17:19:35 +03004904 if (ctxt->d & PrivUD)
4905 rc = emulate_ud(ctxt);
4906 else
4907 rc = emulate_gp(ctxt, 0);
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004908 goto done;
4909 }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004910
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004911 /* Do instruction specific permission checks */
Bandan Das685bbf42014-04-16 12:46:10 -04004912 if (ctxt->d & CheckPerm) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004913 rc = ctxt->check_perm(ctxt);
4914 if (rc != X86EMUL_CONTINUE)
4915 goto done;
4916 }
4917
Bandan Das685bbf42014-04-16 12:46:10 -04004918 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004919 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4920 X86_ICPT_POST_EXCEPT);
4921 if (rc != X86EMUL_CONTINUE)
4922 goto done;
4923 }
4924
4925 if (ctxt->rep_prefix && (ctxt->d & String)) {
4926 /* All REP prefixes have the same first termination condition */
4927 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4928 ctxt->eip = ctxt->_eip;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004929 ctxt->eflags &= ~EFLG_RF;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004930 goto done;
4931 }
4932 }
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004933 }
4934
Avi Kivity9dac77f2011-06-01 15:34:25 +03004935 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4936 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4937 ctxt->src.valptr, ctxt->src.bytes);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09004938 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004939 goto done;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004940 ctxt->src.orig_val64 = ctxt->src.val64;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004941 }
4942
Avi Kivity9dac77f2011-06-01 15:34:25 +03004943 if (ctxt->src2.type == OP_MEM) {
4944 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4945 &ctxt->src2.val, ctxt->src2.bytes);
Gleb Natapove35b7b92010-02-25 16:36:42 +02004946 if (rc != X86EMUL_CONTINUE)
4947 goto done;
4948 }
4949
Avi Kivity9dac77f2011-06-01 15:34:25 +03004950 if ((ctxt->d & DstMask) == ImplicitOps)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004951 goto special_insn;
4952
4953
Avi Kivity9dac77f2011-06-01 15:34:25 +03004954 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004955 /* optimisation - avoid slow emulated read if Mov */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004956 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4957 &ctxt->dst.val, ctxt->dst.bytes);
Nadav Amitc205fb72014-12-25 02:52:16 +02004958 if (rc != X86EMUL_CONTINUE) {
Paolo Bonzinid44e1212015-02-09 10:02:05 +01004959 if (!(ctxt->d & NoWrite) &&
4960 rc == X86EMUL_PROPAGATE_FAULT &&
Nadav Amitc205fb72014-12-25 02:52:16 +02004961 ctxt->exception.vector == PF_VECTOR)
4962 ctxt->exception.error_code |= PFERR_WRITE_MASK;
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004963 goto done;
Nadav Amitc205fb72014-12-25 02:52:16 +02004964 }
Avi Kivity038e51d2007-01-22 20:40:40 -08004965 }
Paolo Bonzini4ff6f8e2015-02-12 17:04:47 +01004966 /* Copy full 64-bit value for CMPXCHG8B. */
4967 ctxt->dst.orig_val64 = ctxt->dst.val64;
Avi Kivity038e51d2007-01-22 20:40:40 -08004968
Avi Kivity018a98d2007-11-27 19:30:56 +02004969special_insn:
4970
Bandan Das685bbf42014-04-16 12:46:10 -04004971 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03004972 rc = emulator_check_intercept(ctxt, ctxt->intercept,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004973 X86_ICPT_POST_MEMACCESS);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004974 if (rc != X86EMUL_CONTINUE)
4975 goto done;
4976 }
4977
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03004978 if (ctxt->rep_prefix && (ctxt->d & String))
4979 ctxt->eflags |= EFLG_RF;
4980 else
4981 ctxt->eflags &= ~EFLG_RF;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004982
Avi Kivity9dac77f2011-06-01 15:34:25 +03004983 if (ctxt->execute) {
Avi Kivitye28bbd42013-01-04 16:18:48 +02004984 if (ctxt->d & Fastop) {
4985 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4986 rc = fastop(ctxt, fop);
4987 if (rc != X86EMUL_CONTINUE)
4988 goto done;
4989 goto writeback;
4990 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004991 rc = ctxt->execute(ctxt);
Avi Kivityef65c882010-07-29 15:11:51 +03004992 if (rc != X86EMUL_CONTINUE)
4993 goto done;
4994 goto writeback;
4995 }
4996
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004997 if (ctxt->opcode_len == 2)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004998 goto twobyte_insn;
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004999 else if (ctxt->opcode_len == 3)
5000 goto threebyte_insn;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005001
Avi Kivity9dac77f2011-06-01 15:34:25 +03005002 switch (ctxt->b) {
Gleb Natapovb2833e32009-04-12 13:36:30 +03005003 case 0x70 ... 0x7f: /* jcc (short) */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005004 if (test_cc(ctxt->b, ctxt->eflags))
Nadav Amit234f3ce2014-09-18 22:39:38 +03005005 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02005006 break;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03005007 case 0x8d: /* lea r16/r32, m */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005008 ctxt->dst.val = ctxt->src.addr.mem.ea;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03005009 break;
Avi Kivity3d9e77d2010-08-01 12:41:59 +03005010 case 0x90 ... 0x97: /* nop / xchg reg, rax */
Avi Kivitydd856ef2012-08-27 23:46:17 +03005011 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
Nadav Amita825f5c2014-06-15 16:13:01 +03005012 ctxt->dst.type = OP_NONE;
5013 else
5014 rc = em_xchg(ctxt);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09005015 break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08005016 case 0x98: /* cbw/cwde/cdqe */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005017 switch (ctxt->op_bytes) {
5018 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5019 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5020 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08005021 }
5022 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03005023 case 0xcc: /* int3 */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09005024 rc = emulate_int(ctxt, 3);
5025 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03005026 case 0xcd: /* int n */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005027 rc = emulate_int(ctxt, ctxt->src.val);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03005028 break;
5029 case 0xce: /* into */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09005030 if (ctxt->eflags & EFLG_OF)
5031 rc = emulate_int(ctxt, 4);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03005032 break;
Nitin A Kamble1a52e052007-09-18 16:34:25 -07005033 case 0xe9: /* jmp rel */
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09005034 case 0xeb: /* jmp rel short */
Nadav Amit234f3ce2014-09-18 22:39:38 +03005035 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity9dac77f2011-06-01 15:34:25 +03005036 ctxt->dst.type = OP_NONE; /* Disable writeback. */
Nitin A Kamble1a52e052007-09-18 16:34:25 -07005037 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02005038 case 0xf4: /* hlt */
Avi Kivity6c3287f2011-04-20 15:43:05 +03005039 ctxt->ops->halt(ctxt);
Mohammed Gamal19fdfa02008-07-06 16:51:26 +03005040 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02005041 case 0xf5: /* cmc */
5042 /* complement carry flag from eflags reg */
5043 ctxt->eflags ^= EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02005044 break;
5045 case 0xf8: /* clc */
5046 ctxt->eflags &= ~EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02005047 break;
Mohammed Gamal8744aa92010-08-05 15:42:49 +03005048 case 0xf9: /* stc */
5049 ctxt->eflags |= EFLG_CF;
5050 break;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03005051 case 0xfc: /* cld */
5052 ctxt->eflags &= ~EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03005053 break;
5054 case 0xfd: /* std */
5055 ctxt->eflags |= EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03005056 break;
Avi Kivity91269b82010-07-25 14:51:16 +03005057 default:
5058 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005059 }
Avi Kivity018a98d2007-11-27 19:30:56 +02005060
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005061 if (rc != X86EMUL_CONTINUE)
5062 goto done;
5063
Avi Kivity018a98d2007-11-27 19:30:56 +02005064writeback:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02005065 if (ctxt->d & SrcWrite) {
5066 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5067 rc = writeback(ctxt, &ctxt->src);
5068 if (rc != X86EMUL_CONTINUE)
5069 goto done;
5070 }
Nadav Amitee212292014-06-15 16:12:58 +03005071 if (!(ctxt->d & NoWrite)) {
5072 rc = writeback(ctxt, &ctxt->dst);
5073 if (rc != X86EMUL_CONTINUE)
5074 goto done;
5075 }
Avi Kivity018a98d2007-11-27 19:30:56 +02005076
Gleb Natapov5cd21912010-03-18 15:20:26 +02005077 /*
5078 * restore dst type in case the decoding will be reused
5079 * (happens for string instruction )
5080 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005081 ctxt->dst.type = saved_dst_type;
Gleb Natapov5cd21912010-03-18 15:20:26 +02005082
Avi Kivity9dac77f2011-06-01 15:34:25 +03005083 if ((ctxt->d & SrcMask) == SrcSI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03005084 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
Gleb Natapova682e352010-03-18 15:20:21 +02005085
Avi Kivity9dac77f2011-06-01 15:34:25 +03005086 if ((ctxt->d & DstMask) == DstDI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03005087 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
Gleb Natapovd9271122010-03-18 15:20:22 +02005088
Avi Kivity9dac77f2011-06-01 15:34:25 +03005089 if (ctxt->rep_prefix && (ctxt->d & String)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03005090 unsigned int count;
Avi Kivity9dac77f2011-06-01 15:34:25 +03005091 struct read_cache *r = &ctxt->io_read;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03005092 if ((ctxt->d & SrcMask) == SrcSI)
5093 count = ctxt->src.count;
5094 else
5095 count = ctxt->dst.count;
Paolo Bonzini01485a22014-11-19 18:25:08 +01005096 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03005097
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005098 if (!string_insn_completed(ctxt)) {
5099 /*
5100 * Re-enter guest when pio read ahead buffer is empty
5101 * or, if it is not used, after each 1024 iteration.
5102 */
Avi Kivitydd856ef2012-08-27 23:46:17 +03005103 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005104 (r->end == 0 || r->end != r->pos)) {
5105 /*
5106 * Reset read cache. Usually happens before
5107 * decode, but since instruction is restarted
5108 * we have to do it here.
5109 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005110 ctxt->mem_read.end = 0;
Avi Kivitydd856ef2012-08-27 23:46:17 +03005111 writeback_registers(ctxt);
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005112 return EMULATION_RESTART;
5113 }
5114 goto done; /* skip rip writeback */
Avi Kivity0fa6ccb2010-08-17 11:22:17 +03005115 }
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03005116 ctxt->eflags &= ~EFLG_RF;
Gleb Natapov5cd21912010-03-18 15:20:26 +02005117 }
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005118
Avi Kivity9dac77f2011-06-01 15:34:25 +03005119 ctxt->eip = ctxt->_eip;
Avi Kivity018a98d2007-11-27 19:30:56 +02005120
5121done:
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02005122 if (rc == X86EMUL_PROPAGATE_FAULT) {
5123 WARN_ON(ctxt->exception.vector > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +02005124 ctxt->have_exception = true;
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02005125 }
Joerg Roedel775fde82011-04-04 12:39:24 +02005126 if (rc == X86EMUL_INTERCEPTED)
5127 return EMULATION_INTERCEPTED;
5128
Avi Kivitydd856ef2012-08-27 23:46:17 +03005129 if (rc == X86EMUL_CONTINUE)
5130 writeback_registers(ctxt);
5131
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005132 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005133
5134twobyte_insn:
Avi Kivity9dac77f2011-06-01 15:34:25 +03005135 switch (ctxt->b) {
Avi Kivity018a98d2007-11-27 19:30:56 +02005136 case 0x09: /* wbinvd */
Clemens Nosscfb22372011-04-21 21:16:05 +02005137 (ctxt->ops->wbinvd)(ctxt);
Sheng Yangf5f48ee2010-06-30 12:25:15 +08005138 break;
5139 case 0x08: /* invd */
Avi Kivity018a98d2007-11-27 19:30:56 +02005140 case 0x0d: /* GrpP (prefetch) */
5141 case 0x18: /* Grp16 (prefetch/nop) */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02005142 case 0x1f: /* nop */
Avi Kivity018a98d2007-11-27 19:30:56 +02005143 break;
5144 case 0x20: /* mov cr, reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005145 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
Avi Kivity018a98d2007-11-27 19:30:56 +02005146 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005147 case 0x21: /* mov from dr to reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005148 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005149 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005150 case 0x40 ... 0x4f: /* cmov */
Nadav Amit140bad82014-06-15 16:13:00 +03005151 if (test_cc(ctxt->b, ctxt->eflags))
5152 ctxt->dst.val = ctxt->src.val;
Nadav Amitb91aa142015-03-30 15:39:19 +03005153 else if (ctxt->op_bytes != 4)
Avi Kivity9dac77f2011-06-01 15:34:25 +03005154 ctxt->dst.type = OP_NONE; /* no writeback */
Avi Kivity6aa8b732006-12-10 02:21:36 -08005155 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03005156 case 0x80 ... 0x8f: /* jnz rel, etc*/
Avi Kivity9dac77f2011-06-01 15:34:25 +03005157 if (test_cc(ctxt->b, ctxt->eflags))
Nadav Amit234f3ce2014-09-18 22:39:38 +03005158 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02005159 break;
Wei Yongjunee45b582010-08-06 17:10:07 +08005160 case 0x90 ... 0x9f: /* setcc r/m8 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005161 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
Wei Yongjunee45b582010-08-06 17:10:07 +08005162 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005163 case 0xb6 ... 0xb7: /* movzx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005164 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03005165 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
Avi Kivity9dac77f2011-06-01 15:34:25 +03005166 : (u16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005167 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005168 case 0xbe ... 0xbf: /* movsx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005169 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03005170 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
Avi Kivity9dac77f2011-06-01 15:34:25 +03005171 (s16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005172 break;
Avi Kivity91269b82010-07-25 14:51:16 +03005173 default:
5174 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005175 }
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005176
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01005177threebyte_insn:
5178
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005179 if (rc != X86EMUL_CONTINUE)
5180 goto done;
5181
Avi Kivity6aa8b732006-12-10 02:21:36 -08005182 goto writeback;
5183
5184cannot_emulate:
Gleb Natapova0c0ab22011-03-28 16:57:49 +02005185 return EMULATION_FAILED;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005186}
Avi Kivitydd856ef2012-08-27 23:46:17 +03005187
5188void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5189{
5190 invalidate_registers(ctxt);
5191}
5192
5193void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5194{
5195 writeback_registers(ctxt);
5196}