blob: aa272545402ecad0384bbb7db9686f13ca62e217 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/******************************************************************************
Avi Kivity56e82312009-08-12 15:04:37 +03002 * emulate.c
Avi Kivity6aa8b732006-12-10 02:21:36 -08003 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
Rusty Russelldcc07662007-07-17 23:16:56 +10009 * privileged instructions:
Avi Kivity6aa8b732006-12-10 02:21:36 -080010 *
11 * Copyright (C) 2006 Qumranet
Nicolas Kaiser9611c182010-10-06 14:23:22 +020012 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -080013 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
Avi Kivityedf88412007-12-16 11:02:48 +020023#include <linux/kvm_host.h>
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030024#include "kvm_cache_regs.h"
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/module.h>
Avi Kivity56e82312009-08-12 15:04:37 +030026#include <asm/kvm_emulate.h>
Avi Kivityb7d491e2013-01-04 16:18:49 +020027#include <linux/stringify.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028
Avi Kivity3eeb3282010-01-21 15:31:48 +020029#include "x86.h"
Gleb Natapov38ba30b2010-03-18 15:20:17 +020030#include "tss.h"
Andre Przywarae99f0502009-06-17 15:50:33 +020031
Avi Kivity6aa8b732006-12-10 02:21:36 -080032/*
Avi Kivitya99455492011-09-13 10:45:41 +030033 * Operand types
34 */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030035#define OpNone 0ull
36#define OpImplicit 1ull /* No generic decode */
37#define OpReg 2ull /* Register */
38#define OpMem 3ull /* Memory */
39#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40#define OpDI 5ull /* ES:DI/EDI/RDI */
41#define OpMem64 6ull /* Memory, 64-bit */
42#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43#define OpDX 8ull /* DX register */
Avi Kivity4dd6a572011-09-13 10:45:43 +030044#define OpCL 9ull /* CL register (for shifts) */
45#define OpImmByte 10ull /* 8-bit sign extended immediate */
46#define OpOne 11ull /* Implied 1 */
Nadav Amit5e2c6882012-12-06 21:55:10 -020047#define OpImm 12ull /* Sign extended up to 32-bit immediate */
Avi Kivity0fe59122011-09-13 10:45:47 +030048#define OpMem16 13ull /* Memory operand (16-bit). */
49#define OpMem32 14ull /* Memory operand (32-bit). */
50#define OpImmU 15ull /* Immediate operand, zero extended */
51#define OpSI 16ull /* SI/ESI/RSI */
52#define OpImmFAddr 17ull /* Immediate far address */
53#define OpMemFAddr 18ull /* Far address in memory */
54#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
Avi Kivityc191a7a2011-09-13 10:45:49 +030055#define OpES 20ull /* ES */
56#define OpCS 21ull /* CS */
57#define OpSS 22ull /* SS */
58#define OpDS 23ull /* DS */
59#define OpFS 24ull /* FS */
60#define OpGS 25ull /* GS */
Avi Kivity28867ce2012-01-16 15:08:44 +020061#define OpMem8 26ull /* 8-bit zero extended memory operand */
Nadav Amit5e2c6882012-12-06 21:55:10 -020062#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
Paolo Bonzini7fa57952013-05-09 11:32:50 +020063#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
Avi Kivity820207c2013-02-09 11:31:45 +020064#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
Avi Kivitya99455492011-09-13 10:45:41 +030066
Avi Kivity0fe59122011-09-13 10:45:47 +030067#define OpBits 5 /* Width of operand field */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030068#define OpMask ((1ull << OpBits) - 1)
Avi Kivitya99455492011-09-13 10:45:41 +030069
70/*
Avi Kivity6aa8b732006-12-10 02:21:36 -080071 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
76 * not be handled.
77 */
78
79/* Operand sizes: 8-bit operands or specified/overridden size. */
Avi Kivityab85b12b2010-07-29 15:11:49 +030080#define ByteOp (1<<0) /* 8-bit operands. */
Avi Kivity6aa8b732006-12-10 02:21:36 -080081/* Destination operand type. */
Avi Kivitya99455492011-09-13 10:45:41 +030082#define DstShift 1
83#define ImplicitOps (OpImplicit << DstShift)
84#define DstReg (OpReg << DstShift)
85#define DstMem (OpMem << DstShift)
86#define DstAcc (OpAcc << DstShift)
87#define DstDI (OpDI << DstShift)
88#define DstMem64 (OpMem64 << DstShift)
Nadav Amit16bebef2014-12-25 02:52:18 +020089#define DstMem16 (OpMem16 << DstShift)
Avi Kivitya99455492011-09-13 10:45:41 +030090#define DstImmUByte (OpImmUByte << DstShift)
91#define DstDX (OpDX << DstShift)
Avi Kivity820207c2013-02-09 11:31:45 +020092#define DstAccLo (OpAccLo << DstShift)
Avi Kivitya99455492011-09-13 10:45:41 +030093#define DstMask (OpMask << DstShift)
Avi Kivity6aa8b732006-12-10 02:21:36 -080094/* Source operand type. */
Avi Kivity0fe59122011-09-13 10:45:47 +030095#define SrcShift 6
96#define SrcNone (OpNone << SrcShift)
97#define SrcReg (OpReg << SrcShift)
98#define SrcMem (OpMem << SrcShift)
99#define SrcMem16 (OpMem16 << SrcShift)
100#define SrcMem32 (OpMem32 << SrcShift)
101#define SrcImm (OpImm << SrcShift)
102#define SrcImmByte (OpImmByte << SrcShift)
103#define SrcOne (OpOne << SrcShift)
104#define SrcImmUByte (OpImmUByte << SrcShift)
105#define SrcImmU (OpImmU << SrcShift)
106#define SrcSI (OpSI << SrcShift)
Paolo Bonzini7fa57952013-05-09 11:32:50 +0200107#define SrcXLat (OpXLat << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300108#define SrcImmFAddr (OpImmFAddr << SrcShift)
109#define SrcMemFAddr (OpMemFAddr << SrcShift)
110#define SrcAcc (OpAcc << SrcShift)
111#define SrcImmU16 (OpImmU16 << SrcShift)
Nadav Amit5e2c6882012-12-06 21:55:10 -0200112#define SrcImm64 (OpImm64 << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300113#define SrcDX (OpDX << SrcShift)
Avi Kivity28867ce2012-01-16 15:08:44 +0200114#define SrcMem8 (OpMem8 << SrcShift)
Avi Kivity820207c2013-02-09 11:31:45 +0200115#define SrcAccHi (OpAccHi << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300116#define SrcMask (OpMask << SrcShift)
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300117#define BitOp (1<<11)
118#define MemAbs (1<<12) /* Memory operand is absolute displacement */
119#define String (1<<13) /* String instruction (rep capable) */
120#define Stack (1<<14) /* Stack instruction (push/pop) */
121#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
122#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
123#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
124#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
125#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
Gleb Natapov045a2822012-12-20 16:57:43 +0200126#define Escape (5<<15) /* Escape to coprocessor instruction */
Nadav Amit39f062f2014-11-26 15:47:18 +0200127#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300128#define Sse (1<<18) /* SSE Vector instruction */
Avi Kivity20c29ff2011-09-13 10:45:44 +0300129/* Generic ModRM decode. */
130#define ModRM (1<<19)
131/* Destination is only written; never read. */
132#define Mov (1<<20)
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300133/* Misc flags */
Joerg Roedel8ea7d6a2011-04-04 12:39:26 +0200134#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
Borislav Petkovb51e9742013-09-22 16:44:52 +0200135#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
Avi Kivity5a506b12010-08-01 15:10:29 +0300136#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
Avi Kivity7f9b4b72010-08-01 14:46:54 +0300137#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
Avi Kivity047a4812010-07-26 14:37:47 +0300138#define Undefined (1<<25) /* No Such Instruction */
Gleb Natapovd380a5e2010-02-10 14:21:36 +0200139#define Lock (1<<26) /* lock prefix is allowed for the instruction */
Gleb Natapove92805a2010-02-10 14:21:35 +0200140#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300141#define No64 (1<<28)
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +0800142#define PageTable (1 << 29) /* instruction used to write page table */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300143#define NotImpl (1 << 30) /* instruction is not implemented */
Guillaume Thouvenin0dc8d102008-12-04 14:26:42 +0100144/* Source 2 operand type */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300145#define Src2Shift (31)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300146#define Src2None (OpNone << Src2Shift)
Avi Kivityab2c5ce2013-02-09 11:31:46 +0200147#define Src2Mem (OpMem << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300148#define Src2CL (OpCL << Src2Shift)
149#define Src2ImmByte (OpImmByte << Src2Shift)
150#define Src2One (OpOne << Src2Shift)
151#define Src2Imm (OpImm << Src2Shift)
Avi Kivityc191a7a2011-09-13 10:45:49 +0300152#define Src2ES (OpES << Src2Shift)
153#define Src2CS (OpCS << Src2Shift)
154#define Src2SS (OpSS << Src2Shift)
155#define Src2DS (OpDS << Src2Shift)
156#define Src2FS (OpFS << Src2Shift)
157#define Src2GS (OpGS << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300158#define Src2Mask (OpMask << Src2Shift)
Avi Kivitycbe2c9d2012-04-09 18:40:02 +0300159#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
Avi Kivity1c11b372012-04-09 18:39:59 +0300160#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
161#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
162#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
Avi Kivitye28bbd42013-01-04 16:18:48 +0200163#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
Avi Kivityb6744dc2013-01-04 16:18:50 +0200164#define NoWrite ((u64)1 << 45) /* No writeback */
Avi Kivityfb32b1e2013-02-09 11:31:44 +0200165#define SrcWrite ((u64)1 << 46) /* Write back src operand */
Nadav Amit9b88ae92014-05-25 23:05:21 +0300166#define NoMod ((u64)1 << 47) /* Mod field is ignored */
Paolo Bonzinid40a6892014-03-27 11:58:02 +0100167#define Intercept ((u64)1 << 48) /* Has valid intercept field */
168#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
Nadav Amit10e38fc2014-06-18 17:19:34 +0300169#define NoBigReal ((u64)1 << 50) /* No big real mode */
Nadav Amit68efa762014-06-18 17:19:35 +0300170#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
Nadav Amit58b70752014-10-24 11:35:09 +0300171#define NearBranch ((u64)1 << 52) /* Near branches */
Nadav Amited9aad22014-11-02 11:55:00 +0200172#define No16 ((u64)1 << 53) /* No 16 bit operand */
Nadav Amitab708092014-12-25 02:52:21 +0200173#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800174
Avi Kivity820207c2013-02-09 11:31:45 +0200175#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800176
Avi Kivityd0e53322010-07-29 15:11:54 +0300177#define X2(x...) x, x
178#define X3(x...) X2(x), x
179#define X4(x...) X2(x), X2(x)
180#define X5(x...) X4(x), x
181#define X6(x...) X4(x), X2(x)
182#define X7(x...) X4(x), X3(x)
183#define X8(x...) X4(x), X4(x)
184#define X16(x...) X8(x), X8(x)
Avi Kivity83babbc2010-07-26 14:37:39 +0300185
Avi Kivitye28bbd42013-01-04 16:18:48 +0200186#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
187#define FASTOP_SIZE 8
188
189/*
190 * fastop functions have a special calling convention:
191 *
Avi Kivity017da7b2013-02-09 11:31:47 +0200192 * dst: rax (in/out)
193 * src: rdx (in/out)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200194 * src2: rcx (in)
195 * flags: rflags (in/out)
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200196 * ex: rsi (in:fastop pointer, out:zero if exception)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200197 *
198 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
199 * different operand sizes can be reached by calculation, rather than a jump
200 * table (which would be bigger than the code).
201 *
202 * fastop functions are declared as taking a never-defined fastop parameter,
203 * so they can't be called from C directly.
204 */
205
206struct fastop;
207
Avi Kivityd65b1de2010-07-29 15:11:35 +0300208struct opcode {
Avi Kivityb1ea50b2011-09-13 10:45:42 +0300209 u64 flags : 56;
210 u64 intercept : 8;
Avi Kivity120df892010-07-29 15:11:39 +0300211 union {
Avi Kivityef65c882010-07-29 15:11:51 +0300212 int (*execute)(struct x86_emulate_ctxt *ctxt);
Mathias Krausefd0a0d82012-08-30 01:30:15 +0200213 const struct opcode *group;
214 const struct group_dual *gdual;
215 const struct gprefix *gprefix;
Gleb Natapov045a2822012-12-20 16:57:43 +0200216 const struct escape *esc;
Nadav Amit39f062f2014-11-26 15:47:18 +0200217 const struct instr_dual *idual;
Avi Kivitye28bbd42013-01-04 16:18:48 +0200218 void (*fastop)(struct fastop *fake);
Avi Kivity120df892010-07-29 15:11:39 +0300219 } u;
Joerg Roedeld09beab2011-04-04 12:39:25 +0200220 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
Avi Kivity120df892010-07-29 15:11:39 +0300221};
222
223struct group_dual {
224 struct opcode mod012[8];
225 struct opcode mod3[8];
Avi Kivityd65b1de2010-07-29 15:11:35 +0300226};
227
Avi Kivity0d7cdee2011-03-29 11:34:38 +0200228struct gprefix {
229 struct opcode pfx_no;
230 struct opcode pfx_66;
231 struct opcode pfx_f2;
232 struct opcode pfx_f3;
233};
234
Gleb Natapov045a2822012-12-20 16:57:43 +0200235struct escape {
236 struct opcode op[8];
237 struct opcode high[64];
238};
239
Nadav Amit39f062f2014-11-26 15:47:18 +0200240struct instr_dual {
241 struct opcode mod012;
242 struct opcode mod3;
243};
244
Avi Kivity6aa8b732006-12-10 02:21:36 -0800245/* EFLAGS bit definitions. */
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200246#define EFLG_ID (1<<21)
247#define EFLG_VIP (1<<20)
248#define EFLG_VIF (1<<19)
249#define EFLG_AC (1<<18)
Andre Przywarab1d86142009-06-17 15:50:32 +0200250#define EFLG_VM (1<<17)
251#define EFLG_RF (1<<16)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200252#define EFLG_IOPL (3<<12)
253#define EFLG_NT (1<<14)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800254#define EFLG_OF (1<<11)
255#define EFLG_DF (1<<10)
Andre Przywarab1d86142009-06-17 15:50:32 +0200256#define EFLG_IF (1<<9)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200257#define EFLG_TF (1<<8)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800258#define EFLG_SF (1<<7)
259#define EFLG_ZF (1<<6)
260#define EFLG_AF (1<<4)
261#define EFLG_PF (1<<2)
262#define EFLG_CF (1<<0)
263
Mohammed Gamal62bd4302010-07-28 12:38:40 +0300264#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
265#define EFLG_RESERVED_ONE_MASK 2
266
Nadav Amit3dc4bc42014-12-25 02:52:19 +0200267enum x86_transfer_type {
268 X86_TRANSFER_NONE,
269 X86_TRANSFER_CALL_JMP,
270 X86_TRANSFER_RET,
271 X86_TRANSFER_TASK_SWITCH,
272};
273
Avi Kivitydd856ef2012-08-27 23:46:17 +0300274static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
275{
276 if (!(ctxt->regs_valid & (1 << nr))) {
277 ctxt->regs_valid |= 1 << nr;
278 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
279 }
280 return ctxt->_regs[nr];
281}
282
283static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
284{
285 ctxt->regs_valid |= 1 << nr;
286 ctxt->regs_dirty |= 1 << nr;
287 return &ctxt->_regs[nr];
288}
289
290static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
291{
292 reg_read(ctxt, nr);
293 return reg_write(ctxt, nr);
294}
295
296static void writeback_registers(struct x86_emulate_ctxt *ctxt)
297{
298 unsigned reg;
299
300 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
301 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
302}
303
304static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
305{
306 ctxt->regs_dirty = 0;
307 ctxt->regs_valid = 0;
308}
309
Avi Kivity6aa8b732006-12-10 02:21:36 -0800310/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800311 * These EFLAGS bits are restored from saved value during emulation, and
312 * any changes are written back to the saved value after emulation.
313 */
314#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
315
Avi Kivitydda96d82008-11-26 15:14:10 +0200316#ifdef CONFIG_X86_64
317#define ON64(x) x
318#else
319#define ON64(x)
320#endif
321
Avi Kivity4d758342013-01-19 19:51:55 +0200322static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
323
Avi Kivityb7d491e2013-01-04 16:18:49 +0200324#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
325#define FOP_RET "ret \n\t"
326
327#define FOP_START(op) \
328 extern void em_##op(struct fastop *fake); \
329 asm(".pushsection .text, \"ax\" \n\t" \
330 ".global em_" #op " \n\t" \
331 FOP_ALIGN \
332 "em_" #op ": \n\t"
333
334#define FOP_END \
335 ".popsection")
336
Avi Kivity0bdea062013-01-19 19:51:50 +0200337#define FOPNOP() FOP_ALIGN FOP_RET
338
Avi Kivityb7d491e2013-01-04 16:18:49 +0200339#define FOP1E(op, dst) \
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200340 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
341
342#define FOP1EEX(op, dst) \
343 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
Avi Kivityb7d491e2013-01-04 16:18:49 +0200344
345#define FASTOP1(op) \
346 FOP_START(op) \
347 FOP1E(op##b, al) \
348 FOP1E(op##w, ax) \
349 FOP1E(op##l, eax) \
350 ON64(FOP1E(op##q, rax)) \
351 FOP_END
352
Avi Kivityb9fa4092013-02-09 11:31:48 +0200353/* 1-operand, using src2 (for MUL/DIV r/m) */
354#define FASTOP1SRC2(op, name) \
355 FOP_START(name) \
356 FOP1E(op, cl) \
357 FOP1E(op, cx) \
358 FOP1E(op, ecx) \
359 ON64(FOP1E(op, rcx)) \
360 FOP_END
361
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200362/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
363#define FASTOP1SRC2EX(op, name) \
364 FOP_START(name) \
365 FOP1EEX(op, cl) \
366 FOP1EEX(op, cx) \
367 FOP1EEX(op, ecx) \
368 ON64(FOP1EEX(op, rcx)) \
369 FOP_END
370
Avi Kivityf7857f32013-01-04 16:18:53 +0200371#define FOP2E(op, dst, src) \
372 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
373
374#define FASTOP2(op) \
375 FOP_START(op) \
Avi Kivity017da7b2013-02-09 11:31:47 +0200376 FOP2E(op##b, al, dl) \
377 FOP2E(op##w, ax, dx) \
378 FOP2E(op##l, eax, edx) \
379 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivityf7857f32013-01-04 16:18:53 +0200380 FOP_END
381
Avi Kivity11c363b2013-01-19 19:51:54 +0200382/* 2 operand, word only */
383#define FASTOP2W(op) \
384 FOP_START(op) \
385 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200386 FOP2E(op##w, ax, dx) \
387 FOP2E(op##l, eax, edx) \
388 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivity11c363b2013-01-19 19:51:54 +0200389 FOP_END
390
Avi Kivity007a3b52013-01-19 19:51:51 +0200391/* 2 operand, src is CL */
392#define FASTOP2CL(op) \
393 FOP_START(op) \
394 FOP2E(op##b, al, cl) \
395 FOP2E(op##w, ax, cl) \
396 FOP2E(op##l, eax, cl) \
397 ON64(FOP2E(op##q, rax, cl)) \
398 FOP_END
399
Nadav Amit5aca3722014-11-02 11:54:50 +0200400/* 2 operand, src and dest are reversed */
401#define FASTOP2R(op, name) \
402 FOP_START(name) \
403 FOP2E(op##b, dl, al) \
404 FOP2E(op##w, dx, ax) \
405 FOP2E(op##l, edx, eax) \
406 ON64(FOP2E(op##q, rdx, rax)) \
407 FOP_END
408
Avi Kivity0bdea062013-01-19 19:51:50 +0200409#define FOP3E(op, dst, src, src2) \
410 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
411
412/* 3-operand, word-only, src2=cl */
413#define FASTOP3WCL(op) \
414 FOP_START(op) \
415 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200416 FOP3E(op##w, ax, dx, cl) \
417 FOP3E(op##l, eax, edx, cl) \
418 ON64(FOP3E(op##q, rax, rdx, cl)) \
Avi Kivity0bdea062013-01-19 19:51:50 +0200419 FOP_END
420
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200421/* Special case for SETcc - 1 instruction per cc */
422#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
423
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200424asm(".global kvm_fastop_exception \n"
425 "kvm_fastop_exception: xor %esi, %esi; ret");
426
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200427FOP_START(setcc)
428FOP_SETCC(seto)
429FOP_SETCC(setno)
430FOP_SETCC(setc)
431FOP_SETCC(setnc)
432FOP_SETCC(setz)
433FOP_SETCC(setnz)
434FOP_SETCC(setbe)
435FOP_SETCC(setnbe)
436FOP_SETCC(sets)
437FOP_SETCC(setns)
438FOP_SETCC(setp)
439FOP_SETCC(setnp)
440FOP_SETCC(setl)
441FOP_SETCC(setnl)
442FOP_SETCC(setle)
443FOP_SETCC(setnle)
444FOP_END;
445
Paolo Bonzini326f5782013-05-09 11:32:51 +0200446FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
447FOP_END;
448
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200449static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
450 enum x86_intercept intercept,
451 enum x86_intercept_stage stage)
452{
453 struct x86_instruction_info info = {
454 .intercept = intercept,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300455 .rep_prefix = ctxt->rep_prefix,
456 .modrm_mod = ctxt->modrm_mod,
457 .modrm_reg = ctxt->modrm_reg,
458 .modrm_rm = ctxt->modrm_rm,
459 .src_val = ctxt->src.val64,
Jan Kiszka6cbc5f52014-06-30 12:52:55 +0200460 .dst_val = ctxt->dst.val64,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300461 .src_bytes = ctxt->src.bytes,
462 .dst_bytes = ctxt->dst.bytes,
463 .ad_bytes = ctxt->ad_bytes,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200464 .next_rip = ctxt->eip,
465 };
466
Avi Kivity29535382011-04-20 13:37:53 +0300467 return ctxt->ops->intercept(ctxt, &info, stage);
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200468}
469
Avi Kivityf47cfa32012-06-07 17:49:24 +0300470static void assign_masked(ulong *dest, ulong src, ulong mask)
471{
472 *dest = (*dest & ~mask) | (src & mask);
473}
474
Avi Kivity9dac77f2011-06-01 15:34:25 +0300475static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800476{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300477 return (1UL << (ctxt->ad_bytes << 3)) - 1;
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800478}
479
Avi Kivityf47cfa32012-06-07 17:49:24 +0300480static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
481{
482 u16 sel;
483 struct desc_struct ss;
484
485 if (ctxt->mode == X86EMUL_MODE_PROT64)
486 return ~0UL;
487 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
488 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
489}
490
Avi Kivity612e89f2012-06-12 20:03:23 +0300491static int stack_size(struct x86_emulate_ctxt *ctxt)
492{
493 return (__fls(stack_mask(ctxt)) + 1) >> 3;
494}
495
Avi Kivity6aa8b732006-12-10 02:21:36 -0800496/* Access/update address held in a register, based on addressing mode. */
Harvey Harrisone4706772008-02-19 07:40:38 -0800497static inline unsigned long
Avi Kivity9dac77f2011-06-01 15:34:25 +0300498address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800499{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300500 if (ctxt->ad_bytes == sizeof(unsigned long))
Harvey Harrisone4706772008-02-19 07:40:38 -0800501 return reg;
502 else
Avi Kivity9dac77f2011-06-01 15:34:25 +0300503 return reg & ad_mask(ctxt);
Harvey Harrisone4706772008-02-19 07:40:38 -0800504}
505
506static inline unsigned long
Paolo Bonzini01485a22014-11-19 18:25:08 +0100507register_address(struct x86_emulate_ctxt *ctxt, int reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800508{
Paolo Bonzini01485a22014-11-19 18:25:08 +0100509 return address_mask(ctxt, reg_read(ctxt, reg));
Harvey Harrisone4706772008-02-19 07:40:38 -0800510}
511
Avi Kivity5ad105e2012-08-19 14:34:31 +0300512static void masked_increment(ulong *reg, ulong mask, int inc)
513{
514 assign_masked(reg, *reg + inc, mask);
515}
516
Harvey Harrison7a9572752008-02-19 07:40:41 -0800517static inline void
Paolo Bonzini01485a22014-11-19 18:25:08 +0100518register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
Harvey Harrison7a9572752008-02-19 07:40:41 -0800519{
Avi Kivity5ad105e2012-08-19 14:34:31 +0300520 ulong mask;
521
Avi Kivity9dac77f2011-06-01 15:34:25 +0300522 if (ctxt->ad_bytes == sizeof(unsigned long))
Avi Kivity5ad105e2012-08-19 14:34:31 +0300523 mask = ~0UL;
Harvey Harrison7a9572752008-02-19 07:40:41 -0800524 else
Avi Kivity5ad105e2012-08-19 14:34:31 +0300525 mask = ad_mask(ctxt);
Paolo Bonzini01485a22014-11-19 18:25:08 +0100526 masked_increment(reg_rmw(ctxt, reg), mask, inc);
Avi Kivity5ad105e2012-08-19 14:34:31 +0300527}
528
529static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
530{
Avi Kivitydd856ef2012-08-27 23:46:17 +0300531 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
Harvey Harrison7a9572752008-02-19 07:40:41 -0800532}
Avi Kivity6aa8b732006-12-10 02:21:36 -0800533
Avi Kivity56697682011-04-03 14:08:51 +0300534static u32 desc_limit_scaled(struct desc_struct *desc)
535{
536 u32 limit = get_desc_limit(desc);
537
538 return desc->g ? (limit << 12) | 0xfff : limit;
539}
540
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900541static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300542{
543 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
544 return 0;
545
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900546 return ctxt->ops->get_cached_segment_base(ctxt, seg);
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300547}
548
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200549static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
550 u32 error, bool valid)
Gleb Natapov54b84862010-04-28 19:15:44 +0300551{
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +0200552 WARN_ON(vec > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +0200553 ctxt->exception.vector = vec;
554 ctxt->exception.error_code = error;
555 ctxt->exception.error_code_valid = valid;
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200556 return X86EMUL_PROPAGATE_FAULT;
Gleb Natapov54b84862010-04-28 19:15:44 +0300557}
558
Joerg Roedel3b88e412011-04-04 12:39:29 +0200559static int emulate_db(struct x86_emulate_ctxt *ctxt)
560{
561 return emulate_exception(ctxt, DB_VECTOR, 0, false);
562}
563
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200564static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300565{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200566 return emulate_exception(ctxt, GP_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300567}
568
Avi Kivity618ff152011-04-03 12:32:09 +0300569static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
570{
571 return emulate_exception(ctxt, SS_VECTOR, err, true);
572}
573
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200574static int emulate_ud(struct x86_emulate_ctxt *ctxt)
Gleb Natapov54b84862010-04-28 19:15:44 +0300575{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200576 return emulate_exception(ctxt, UD_VECTOR, 0, false);
Gleb Natapov54b84862010-04-28 19:15:44 +0300577}
578
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200579static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300580{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200581 return emulate_exception(ctxt, TS_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300582}
583
Avi Kivity34d1f492010-08-26 11:59:01 +0300584static int emulate_de(struct x86_emulate_ctxt *ctxt)
585{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200586 return emulate_exception(ctxt, DE_VECTOR, 0, false);
Avi Kivity34d1f492010-08-26 11:59:01 +0300587}
588
Avi Kivity1253791d2011-03-29 11:41:27 +0200589static int emulate_nm(struct x86_emulate_ctxt *ctxt)
590{
591 return emulate_exception(ctxt, NM_VECTOR, 0, false);
592}
593
Avi Kivity1aa36612011-04-27 13:20:30 +0300594static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
595{
596 u16 selector;
597 struct desc_struct desc;
598
599 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
600 return selector;
601}
602
603static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
604 unsigned seg)
605{
606 u16 dummy;
607 u32 base3;
608 struct desc_struct desc;
609
610 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
611 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
612}
613
Avi Kivity1c11b372012-04-09 18:39:59 +0300614/*
615 * x86 defines three classes of vector instructions: explicitly
616 * aligned, explicitly unaligned, and the rest, which change behaviour
617 * depending on whether they're AVX encoded or not.
618 *
619 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
620 * subject to the same check.
621 */
622static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
623{
624 if (likely(size < 16))
625 return false;
626
627 if (ctxt->d & Aligned)
628 return true;
629 else if (ctxt->d & Unaligned)
630 return false;
631 else if (ctxt->d & Avx)
632 return false;
633 else
634 return true;
635}
636
Paolo Bonzinid09155d2014-10-27 14:54:44 +0100637static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
638 struct segmented_address addr,
639 unsigned *max_size, unsigned size,
640 bool write, bool fetch,
Nadav Amitd50eaa12014-11-19 17:43:11 +0200641 enum x86emul_mode mode, ulong *linear)
Avi Kivity52fd8b42011-04-03 12:33:12 +0300642{
Avi Kivity618ff152011-04-03 12:32:09 +0300643 struct desc_struct desc;
644 bool usable;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300645 ulong la;
Avi Kivity618ff152011-04-03 12:32:09 +0300646 u32 lim;
Avi Kivity1aa36612011-04-27 13:20:30 +0300647 u16 sel;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300648
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900649 la = seg_base(ctxt, addr.seg) + addr.ea;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100650 *max_size = 0;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200651 switch (mode) {
Avi Kivity618ff152011-04-03 12:32:09 +0300652 case X86EMUL_MODE_PROT64:
Nadav Amit4be4de72014-09-18 22:39:40 +0300653 if (is_noncanonical_address(la))
Nadav Amitabc7d8a2014-11-19 17:43:12 +0200654 goto bad;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100655
656 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
657 if (size > *max_size)
658 goto bad;
Avi Kivity618ff152011-04-03 12:32:09 +0300659 break;
660 default:
Avi Kivity1aa36612011-04-27 13:20:30 +0300661 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
662 addr.seg);
Avi Kivity618ff152011-04-03 12:32:09 +0300663 if (!usable)
664 goto bad;
Gleb Natapov58b78252012-12-11 15:14:12 +0200665 /* code segment in protected mode or read-only data segment */
666 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
667 || !(desc.type & 2)) && write)
Avi Kivity618ff152011-04-03 12:32:09 +0300668 goto bad;
669 /* unreadable code segment */
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400670 if (!fetch && (desc.type & 8) && !(desc.type & 2))
Avi Kivity618ff152011-04-03 12:32:09 +0300671 goto bad;
672 lim = desc_limit_scaled(&desc);
Paolo Bonzini997b0412014-11-19 18:33:38 +0100673 if (!(desc.type & 8) && (desc.type & 4)) {
Guo Chaofc058682012-06-28 15:19:51 +0800674 /* expand-down segment */
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100675 if (addr.ea <= lim)
Avi Kivity618ff152011-04-03 12:32:09 +0300676 goto bad;
677 lim = desc.d ? 0xffffffff : 0xffff;
Avi Kivity618ff152011-04-03 12:32:09 +0300678 }
Paolo Bonzini997b0412014-11-19 18:33:38 +0100679 if (addr.ea > lim)
680 goto bad;
681 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100682 if (size > *max_size)
683 goto bad;
Nadav Amit31ff6482014-11-19 17:43:13 +0200684 la &= (u32)-1;
Avi Kivity618ff152011-04-03 12:32:09 +0300685 break;
686 }
Avi Kivity1c11b372012-04-09 18:39:59 +0300687 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
688 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300689 *linear = la;
690 return X86EMUL_CONTINUE;
Avi Kivity618ff152011-04-03 12:32:09 +0300691bad:
692 if (addr.seg == VCPU_SREG_SS)
Paolo Bonzini36061892014-10-27 14:40:49 +0100693 return emulate_ss(ctxt, 0);
Avi Kivity618ff152011-04-03 12:32:09 +0300694 else
Paolo Bonzini36061892014-10-27 14:40:49 +0100695 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300696}
697
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400698static int linearize(struct x86_emulate_ctxt *ctxt,
699 struct segmented_address addr,
700 unsigned size, bool write,
701 ulong *linear)
702{
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100703 unsigned max_size;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200704 return __linearize(ctxt, addr, &max_size, size, write, false,
705 ctxt->mode, linear);
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400706}
707
Nadav Amitd50eaa12014-11-19 17:43:11 +0200708static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
709 enum x86emul_mode mode)
710{
711 ulong linear;
712 int rc;
713 unsigned max_size;
714 struct segmented_address addr = { .seg = VCPU_SREG_CS,
715 .ea = dst };
716
717 if (ctxt->op_bytes != sizeof(unsigned long))
718 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
719 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
720 if (rc == X86EMUL_CONTINUE)
721 ctxt->_eip = addr.ea;
722 return rc;
723}
724
725static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
726{
727 return assign_eip(ctxt, dst, ctxt->mode);
728}
729
730static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
731 const struct desc_struct *cs_desc)
732{
733 enum x86emul_mode mode = ctxt->mode;
734
735#ifdef CONFIG_X86_64
736 if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) {
737 u64 efer = 0;
738
739 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
740 if (efer & EFER_LMA)
741 mode = X86EMUL_MODE_PROT64;
742 }
743#endif
744 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
745 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
746 return assign_eip(ctxt, dst, mode);
747}
748
749static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
750{
751 return assign_eip_near(ctxt, ctxt->_eip + rel);
752}
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400753
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200754static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
755 struct segmented_address addr,
756 void *data,
757 unsigned size)
758{
Avi Kivity9fa088f2011-03-31 18:54:30 +0200759 int rc;
760 ulong linear;
761
Avi Kivity83b87952011-04-03 11:31:19 +0300762 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +0200763 if (rc != X86EMUL_CONTINUE)
764 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +0300765 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200766}
767
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900768/*
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200769 * Prefetch the remaining bytes of the instruction without crossing page
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900770 * boundary if they are not in fetch_cache yet.
771 */
Paolo Bonzini9506d572014-05-06 13:05:25 +0200772static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
Avi Kivity62266862007-11-20 13:15:52 +0200773{
Avi Kivity62266862007-11-20 13:15:52 +0200774 int rc;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100775 unsigned size, max_size;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200776 unsigned long linear;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200777 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200778 struct segmented_address addr = { .seg = VCPU_SREG_CS,
Paolo Bonzini17052f12014-05-06 16:33:01 +0200779 .ea = ctxt->eip + cur_size };
780
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100781 /*
782 * We do not know exactly how many bytes will be needed, and
783 * __linearize is expensive, so fetch as much as possible. We
784 * just have to avoid going beyond the 15 byte limit, the end
785 * of the segment, or the end of the page.
786 *
787 * __linearize is called with size 0 so that it does not do any
788 * boundary check itself. Instead, we use max_size to check
789 * against op_size.
790 */
Nadav Amitd50eaa12014-11-19 17:43:11 +0200791 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
792 &linear);
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200793 if (unlikely(rc != X86EMUL_CONTINUE))
794 return rc;
795
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100796 size = min_t(unsigned, 15UL ^ cur_size, max_size);
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200797 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
Paolo Bonzini5cfc7e02014-05-06 13:05:25 +0200798
799 /*
800 * One instruction can only straddle two pages,
801 * and one has been loaded at the beginning of
802 * x86_decode_insn. So, if not enough bytes
803 * still, we must have hit the 15-byte boundary.
804 */
805 if (unlikely(size < op_size))
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100806 return emulate_gp(ctxt, 0);
807
Paolo Bonzini17052f12014-05-06 16:33:01 +0200808 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200809 size, &ctxt->exception);
810 if (unlikely(rc != X86EMUL_CONTINUE))
811 return rc;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200812 ctxt->fetch.end += size;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +0900813 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200814}
815
Paolo Bonzini9506d572014-05-06 13:05:25 +0200816static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
817 unsigned size)
Avi Kivity62266862007-11-20 13:15:52 +0200818{
Nadav Amit08da44a2014-10-03 01:10:04 +0300819 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
820
821 if (unlikely(done_size < size))
822 return __do_insn_fetch_bytes(ctxt, size - done_size);
Paolo Bonzini9506d572014-05-06 13:05:25 +0200823 else
824 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200825}
826
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900827/* Fetch next part of the instruction being emulated. */
Takuya Yoshikawae85a1082011-07-30 18:01:26 +0900828#define insn_fetch(_type, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200829({ _type _x; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200830 \
831 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900832 if (rc != X86EMUL_CONTINUE) \
833 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200834 ctxt->_eip += sizeof(_type); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200835 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
836 ctxt->fetch.ptr += sizeof(_type); \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200837 _x; \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900838})
839
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900840#define insn_fetch_arr(_arr, _size, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200841({ \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200842 rc = do_insn_fetch_bytes(_ctxt, _size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900843 if (rc != X86EMUL_CONTINUE) \
844 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200845 ctxt->_eip += (_size); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200846 memcpy(_arr, ctxt->fetch.ptr, _size); \
847 ctxt->fetch.ptr += (_size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900848})
849
Rusty Russell1e3c5cb2007-07-17 23:16:11 +1000850/*
851 * Given the 'reg' portion of a ModRM byte, and a register block, return a
852 * pointer into the block that addresses the relevant register.
853 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
854 */
Avi Kivitydd856ef2012-08-27 23:46:17 +0300855static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200856 int byteop)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800857{
858 void *p;
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200859 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800860
Avi Kivity6aa8b732006-12-10 02:21:36 -0800861 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
Avi Kivitydd856ef2012-08-27 23:46:17 +0300862 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
863 else
864 p = reg_rmw(ctxt, modrm_reg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800865 return p;
866}
867
868static int read_descriptor(struct x86_emulate_ctxt *ctxt,
Avi Kivity90de84f2010-11-17 15:28:21 +0200869 struct segmented_address addr,
Avi Kivity6aa8b732006-12-10 02:21:36 -0800870 u16 *size, unsigned long *address, int op_bytes)
871{
872 int rc;
873
874 if (op_bytes == 2)
875 op_bytes = 3;
876 *address = 0;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200877 rc = segmented_read_std(ctxt, addr, size, 2);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +0900878 if (rc != X86EMUL_CONTINUE)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800879 return rc;
Avi Kivity30b31ab2010-11-17 15:28:22 +0200880 addr.ea += 2;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200881 rc = segmented_read_std(ctxt, addr, address, op_bytes);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800882 return rc;
883}
884
Avi Kivity34b77652013-01-19 19:51:56 +0200885FASTOP2(add);
886FASTOP2(or);
887FASTOP2(adc);
888FASTOP2(sbb);
889FASTOP2(and);
890FASTOP2(sub);
891FASTOP2(xor);
892FASTOP2(cmp);
893FASTOP2(test);
894
Avi Kivityb9fa4092013-02-09 11:31:48 +0200895FASTOP1SRC2(mul, mul_ex);
896FASTOP1SRC2(imul, imul_ex);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200897FASTOP1SRC2EX(div, div_ex);
898FASTOP1SRC2EX(idiv, idiv_ex);
Avi Kivityb9fa4092013-02-09 11:31:48 +0200899
Avi Kivity34b77652013-01-19 19:51:56 +0200900FASTOP3WCL(shld);
901FASTOP3WCL(shrd);
902
903FASTOP2W(imul);
904
905FASTOP1(not);
906FASTOP1(neg);
907FASTOP1(inc);
908FASTOP1(dec);
909
910FASTOP2CL(rol);
911FASTOP2CL(ror);
912FASTOP2CL(rcl);
913FASTOP2CL(rcr);
914FASTOP2CL(shl);
915FASTOP2CL(shr);
916FASTOP2CL(sar);
917
918FASTOP2W(bsf);
919FASTOP2W(bsr);
920FASTOP2W(bt);
921FASTOP2W(bts);
922FASTOP2W(btr);
923FASTOP2W(btc);
924
Avi Kivitye47a5f52013-02-09 11:31:51 +0200925FASTOP2(xadd);
926
Nadav Amit5aca3722014-11-02 11:54:50 +0200927FASTOP2R(cmp, cmp_r);
928
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200929static u8 test_cc(unsigned int condition, unsigned long flags)
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300930{
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200931 u8 rc;
932 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300933
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200934 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivity3f0c3d02013-01-26 23:56:04 +0200935 asm("push %[flags]; popf; call *%[fastop]"
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200936 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
937 return rc;
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300938}
939
Avi Kivity91ff3cb2010-08-01 12:53:09 +0300940static void fetch_register_operand(struct operand *op)
941{
942 switch (op->bytes) {
943 case 1:
944 op->val = *(u8 *)op->addr.reg;
945 break;
946 case 2:
947 op->val = *(u16 *)op->addr.reg;
948 break;
949 case 4:
950 op->val = *(u32 *)op->addr.reg;
951 break;
952 case 8:
953 op->val = *(u64 *)op->addr.reg;
954 break;
955 }
956}
957
Avi Kivity1253791d2011-03-29 11:41:27 +0200958static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
959{
960 ctxt->ops->get_fpu(ctxt);
961 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200962 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
963 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
964 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
965 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
966 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
967 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
968 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
969 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +0200970#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +0200971 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
972 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
973 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
974 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
975 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
976 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
977 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
978 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +0200979#endif
980 default: BUG();
981 }
982 ctxt->ops->put_fpu(ctxt);
983}
984
985static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
986 int reg)
987{
988 ctxt->ops->get_fpu(ctxt);
989 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200990 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
991 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
992 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
993 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
994 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
995 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
996 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
997 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +0200998#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +0200999 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1000 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1001 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1002 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1003 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1004 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1005 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1006 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +02001007#endif
1008 default: BUG();
1009 }
1010 ctxt->ops->put_fpu(ctxt);
1011}
1012
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001013static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1014{
1015 ctxt->ops->get_fpu(ctxt);
1016 switch (reg) {
1017 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1018 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1019 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1020 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1021 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1022 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1023 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1024 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1025 default: BUG();
1026 }
1027 ctxt->ops->put_fpu(ctxt);
1028}
1029
1030static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1031{
1032 ctxt->ops->get_fpu(ctxt);
1033 switch (reg) {
1034 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1035 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1036 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1037 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1038 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1039 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1040 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1041 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1042 default: BUG();
1043 }
1044 ctxt->ops->put_fpu(ctxt);
1045}
1046
Gleb Natapov045a2822012-12-20 16:57:43 +02001047static int em_fninit(struct x86_emulate_ctxt *ctxt)
1048{
1049 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1050 return emulate_nm(ctxt);
1051
1052 ctxt->ops->get_fpu(ctxt);
1053 asm volatile("fninit");
1054 ctxt->ops->put_fpu(ctxt);
1055 return X86EMUL_CONTINUE;
1056}
1057
1058static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1059{
1060 u16 fcw;
1061
1062 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1063 return emulate_nm(ctxt);
1064
1065 ctxt->ops->get_fpu(ctxt);
1066 asm volatile("fnstcw %0": "+m"(fcw));
1067 ctxt->ops->put_fpu(ctxt);
1068
Gleb Natapov045a2822012-12-20 16:57:43 +02001069 ctxt->dst.val = fcw;
1070
1071 return X86EMUL_CONTINUE;
1072}
1073
1074static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1075{
1076 u16 fsw;
1077
1078 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1079 return emulate_nm(ctxt);
1080
1081 ctxt->ops->get_fpu(ctxt);
1082 asm volatile("fnstsw %0": "+m"(fsw));
1083 ctxt->ops->put_fpu(ctxt);
1084
Gleb Natapov045a2822012-12-20 16:57:43 +02001085 ctxt->dst.val = fsw;
1086
1087 return X86EMUL_CONTINUE;
1088}
1089
Avi Kivity1253791d2011-03-29 11:41:27 +02001090static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
Avi Kivity2adb5ad2012-01-16 15:08:45 +02001091 struct operand *op)
Avi Kivity3c118e22007-10-31 10:27:04 +02001092{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001093 unsigned reg = ctxt->modrm_reg;
Avi Kivity33615aa2007-10-31 11:15:56 +02001094
Avi Kivity9dac77f2011-06-01 15:34:25 +03001095 if (!(ctxt->d & ModRM))
1096 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
Avi Kivity1253791d2011-03-29 11:41:27 +02001097
Avi Kivity9dac77f2011-06-01 15:34:25 +03001098 if (ctxt->d & Sse) {
Avi Kivity1253791d2011-03-29 11:41:27 +02001099 op->type = OP_XMM;
1100 op->bytes = 16;
1101 op->addr.xmm = reg;
1102 read_sse_reg(ctxt, &op->vec_val, reg);
1103 return;
1104 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001105 if (ctxt->d & Mmx) {
1106 reg &= 7;
1107 op->type = OP_MM;
1108 op->bytes = 8;
1109 op->addr.mm = reg;
1110 return;
1111 }
Avi Kivity1253791d2011-03-29 11:41:27 +02001112
Avi Kivity3c118e22007-10-31 10:27:04 +02001113 op->type = OP_REG;
Gleb Natapov6d4d85e2013-11-04 15:52:42 +02001114 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1115 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1116
Avi Kivity91ff3cb2010-08-01 12:53:09 +03001117 fetch_register_operand(op);
Avi Kivity3c118e22007-10-31 10:27:04 +02001118 op->orig_val = op->val;
1119}
1120
Avi Kivitya6e34072012-06-10 17:15:39 +03001121static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1122{
1123 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1124 ctxt->modrm_seg = VCPU_SREG_SS;
1125}
1126
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001127static int decode_modrm(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001128 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001129{
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001130 u8 sib;
Bandan Das02357bd2014-04-16 12:46:11 -04001131 int index_reg, base_reg, scale;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001132 int rc = X86EMUL_CONTINUE;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001133 ulong modrm_ea = 0;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001134
Bandan Das02357bd2014-04-16 12:46:11 -04001135 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1136 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1137 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001138
Bandan Das02357bd2014-04-16 12:46:11 -04001139 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001140 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
Bandan Das02357bd2014-04-16 12:46:11 -04001141 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001142 ctxt->modrm_seg = VCPU_SREG_DS;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001143
Nadav Amit9b88ae92014-05-25 23:05:21 +03001144 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001145 op->type = OP_REG;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001146 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Paolo Bonzini8acb42072013-05-30 16:35:55 +02001147 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02001148 ctxt->d & ByteOp);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001149 if (ctxt->d & Sse) {
Avi Kivity1253791d2011-03-29 11:41:27 +02001150 op->type = OP_XMM;
1151 op->bytes = 16;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001152 op->addr.xmm = ctxt->modrm_rm;
1153 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
Avi Kivity1253791d2011-03-29 11:41:27 +02001154 return rc;
1155 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001156 if (ctxt->d & Mmx) {
1157 op->type = OP_MM;
1158 op->bytes = 8;
Paolo Bonzinibdc90722014-05-06 14:03:29 +02001159 op->addr.mm = ctxt->modrm_rm & 7;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001160 return rc;
1161 }
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001162 fetch_register_operand(op);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001163 return rc;
1164 }
1165
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001166 op->type = OP_MEM;
1167
Avi Kivity9dac77f2011-06-01 15:34:25 +03001168 if (ctxt->ad_bytes == 2) {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001169 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1170 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1171 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1172 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001173
1174 /* 16-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001175 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001176 case 0:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001177 if (ctxt->modrm_rm == 6)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001178 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001179 break;
1180 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001181 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001182 break;
1183 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001184 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001185 break;
1186 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001187 switch (ctxt->modrm_rm) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001188 case 0:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001189 modrm_ea += bx + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001190 break;
1191 case 1:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001192 modrm_ea += bx + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001193 break;
1194 case 2:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001195 modrm_ea += bp + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001196 break;
1197 case 3:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001198 modrm_ea += bp + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001199 break;
1200 case 4:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001201 modrm_ea += si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001202 break;
1203 case 5:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001204 modrm_ea += di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001205 break;
1206 case 6:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001207 if (ctxt->modrm_mod != 0)
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001208 modrm_ea += bp;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001209 break;
1210 case 7:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001211 modrm_ea += bx;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001212 break;
1213 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001214 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1215 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1216 ctxt->modrm_seg = VCPU_SREG_SS;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001217 modrm_ea = (u16)modrm_ea;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001218 } else {
1219 /* 32/64-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001220 if ((ctxt->modrm_rm & 7) == 4) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001221 sib = insn_fetch(u8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001222 index_reg |= (sib >> 3) & 7;
1223 base_reg |= sib & 7;
1224 scale = sib >> 6;
1225
Avi Kivity9dac77f2011-06-01 15:34:25 +03001226 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001227 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivitya6e34072012-06-10 17:15:39 +03001228 else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001229 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001230 adjust_modrm_seg(ctxt, base_reg);
Nadav Amitab708092014-12-25 02:52:21 +02001231 /* Increment ESP on POP [ESP] */
1232 if ((ctxt->d & IncSP) &&
1233 base_reg == VCPU_REGS_RSP)
1234 modrm_ea += ctxt->op_bytes;
Avi Kivitya6e34072012-06-10 17:15:39 +03001235 }
Avi Kivitydc71d0f2008-06-15 21:23:17 -07001236 if (index_reg != 4)
Avi Kivitydd856ef2012-08-27 23:46:17 +03001237 modrm_ea += reg_read(ctxt, index_reg) << scale;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001238 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
Nadav Amit5b38ab82014-11-02 11:54:41 +02001239 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity84411d82008-06-15 21:53:26 -07001240 if (ctxt->mode == X86EMUL_MODE_PROT64)
Avi Kivity9dac77f2011-06-01 15:34:25 +03001241 ctxt->rip_relative = 1;
Avi Kivitya6e34072012-06-10 17:15:39 +03001242 } else {
1243 base_reg = ctxt->modrm_rm;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001244 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001245 adjust_modrm_seg(ctxt, base_reg);
1246 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001247 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001248 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001249 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001250 break;
1251 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001252 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001253 break;
1254 }
1255 }
Avi Kivity90de84f2010-11-17 15:28:21 +02001256 op->addr.mem.ea = modrm_ea;
Bandan Das41061cd2014-04-16 12:46:14 -04001257 if (ctxt->ad_bytes != 8)
1258 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1259
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001260done:
1261 return rc;
1262}
1263
1264static int decode_abs(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001265 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001266{
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001267 int rc = X86EMUL_CONTINUE;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001268
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001269 op->type = OP_MEM;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001270 switch (ctxt->ad_bytes) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001271 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001272 op->addr.mem.ea = insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001273 break;
1274 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001275 op->addr.mem.ea = insn_fetch(u32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001276 break;
1277 case 8:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001278 op->addr.mem.ea = insn_fetch(u64, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001279 break;
1280 }
1281done:
1282 return rc;
1283}
1284
Avi Kivity9dac77f2011-06-01 15:34:25 +03001285static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
Wei Yongjun35c843c2010-08-09 11:34:56 +08001286{
Sheng Yang7129eec2010-09-28 16:33:32 +08001287 long sv = 0, mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001288
Avi Kivity9dac77f2011-06-01 15:34:25 +03001289 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
Nadav Amit7dec5602014-06-15 16:12:57 +03001290 mask = ~((long)ctxt->dst.bytes * 8 - 1);
Wei Yongjun35c843c2010-08-09 11:34:56 +08001291
Avi Kivity9dac77f2011-06-01 15:34:25 +03001292 if (ctxt->src.bytes == 2)
1293 sv = (s16)ctxt->src.val & (s16)mask;
1294 else if (ctxt->src.bytes == 4)
1295 sv = (s32)ctxt->src.val & (s32)mask;
Nadav Amit7dec5602014-06-15 16:12:57 +03001296 else
1297 sv = (s64)ctxt->src.val & (s64)mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001298
Nadav Amit1c1c35a2014-11-19 17:43:09 +02001299 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1300 ctxt->dst.addr.mem.ea + (sv >> 3));
Wei Yongjun35c843c2010-08-09 11:34:56 +08001301 }
Wei Yongjunba7ff2b2010-08-09 11:39:14 +08001302
1303 /* only subword offset */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001304 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001305}
1306
Gleb Natapov9de41572010-04-28 19:15:22 +03001307static int read_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov9de41572010-04-28 19:15:22 +03001308 unsigned long addr, void *dest, unsigned size)
1309{
1310 int rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001311 struct read_cache *mc = &ctxt->mem_read;
Gleb Natapov9de41572010-04-28 19:15:22 +03001312
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001313 if (mc->pos < mc->end)
1314 goto read_cached;
Gleb Natapov9de41572010-04-28 19:15:22 +03001315
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001316 WARN_ON((mc->end + size) >= sizeof(mc->data));
Gleb Natapov9de41572010-04-28 19:15:22 +03001317
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001318 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1319 &ctxt->exception);
1320 if (rc != X86EMUL_CONTINUE)
1321 return rc;
1322
1323 mc->end += size;
1324
1325read_cached:
1326 memcpy(dest, mc->data + mc->pos, size);
1327 mc->pos += size;
Gleb Natapov9de41572010-04-28 19:15:22 +03001328 return X86EMUL_CONTINUE;
1329}
1330
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001331static int segmented_read(struct x86_emulate_ctxt *ctxt,
1332 struct segmented_address addr,
1333 void *data,
1334 unsigned size)
1335{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001336 int rc;
1337 ulong linear;
1338
Avi Kivity83b87952011-04-03 11:31:19 +03001339 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001340 if (rc != X86EMUL_CONTINUE)
1341 return rc;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001342 return read_emulated(ctxt, linear, data, size);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001343}
1344
1345static int segmented_write(struct x86_emulate_ctxt *ctxt,
1346 struct segmented_address addr,
1347 const void *data,
1348 unsigned size)
1349{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001350 int rc;
1351 ulong linear;
1352
Avi Kivity83b87952011-04-03 11:31:19 +03001353 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001354 if (rc != X86EMUL_CONTINUE)
1355 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001356 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1357 &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001358}
1359
1360static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1361 struct segmented_address addr,
1362 const void *orig_data, const void *data,
1363 unsigned size)
1364{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001365 int rc;
1366 ulong linear;
1367
Avi Kivity83b87952011-04-03 11:31:19 +03001368 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001369 if (rc != X86EMUL_CONTINUE)
1370 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001371 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1372 size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001373}
1374
Gleb Natapov7b262e92010-03-18 15:20:27 +02001375static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov7b262e92010-03-18 15:20:27 +02001376 unsigned int size, unsigned short port,
1377 void *dest)
1378{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001379 struct read_cache *rc = &ctxt->io_read;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001380
1381 if (rc->pos == rc->end) { /* refill pio read ahead */
Gleb Natapov7b262e92010-03-18 15:20:27 +02001382 unsigned int in_page, n;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001383 unsigned int count = ctxt->rep_prefix ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001384 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001385 in_page = (ctxt->eflags & EFLG_DF) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001386 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1387 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
Mark Rustadb55a8142014-07-25 06:27:05 -07001388 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
Gleb Natapov7b262e92010-03-18 15:20:27 +02001389 if (n == 0)
1390 n = 1;
1391 rc->pos = rc->end = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001392 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
Gleb Natapov7b262e92010-03-18 15:20:27 +02001393 return 0;
1394 rc->end = n * size;
1395 }
1396
Nadav Amite6e39f02014-04-18 03:35:10 +03001397 if (ctxt->rep_prefix && (ctxt->d & String) &&
1398 !(ctxt->eflags & EFLG_DF)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001399 ctxt->dst.data = rc->data + rc->pos;
1400 ctxt->dst.type = OP_MEM_STR;
1401 ctxt->dst.count = (rc->end - rc->pos) / size;
1402 rc->pos = rc->end;
1403 } else {
1404 memcpy(dest, rc->data + rc->pos, size);
1405 rc->pos += size;
1406 }
Gleb Natapov7b262e92010-03-18 15:20:27 +02001407 return 1;
1408}
1409
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01001410static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1411 u16 index, struct desc_struct *desc)
1412{
1413 struct desc_ptr dt;
1414 ulong addr;
1415
1416 ctxt->ops->get_idt(ctxt, &dt);
1417
1418 if (dt.size < index * 8 + 7)
1419 return emulate_gp(ctxt, index << 3 | 0x2);
1420
1421 addr = dt.address + index * 8;
1422 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1423 &ctxt->exception);
1424}
1425
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001426static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001427 u16 selector, struct desc_ptr *dt)
1428{
Mathias Krause0225fb52012-08-30 01:30:16 +02001429 const struct x86_emulate_ops *ops = ctxt->ops;
Nadav Amit2eedcac2014-06-02 18:34:05 +03001430 u32 base3 = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001431
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001432 if (selector & 1 << 2) {
1433 struct desc_struct desc;
Avi Kivity1aa36612011-04-27 13:20:30 +03001434 u16 sel;
1435
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001436 memset (dt, 0, sizeof *dt);
Nadav Amit2eedcac2014-06-02 18:34:05 +03001437 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1438 VCPU_SREG_LDTR))
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001439 return;
1440
1441 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
Nadav Amit2eedcac2014-06-02 18:34:05 +03001442 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001443 } else
Avi Kivity4bff1e862011-04-20 13:37:53 +03001444 ops->get_gdt(ctxt, dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001445}
1446
Nadav Amitedccda72014-12-25 02:52:23 +02001447static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1448 u16 selector, ulong *desc_addr_p)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001449{
1450 struct desc_ptr dt;
1451 u16 index = selector >> 3;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001452 ulong addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001453
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001454 get_descriptor_table_ptr(ctxt, selector, &dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001455
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001456 if (dt.size < index * 8 + 7)
1457 return emulate_gp(ctxt, selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001458
1459 addr = dt.address + index * 8;
Nadav Amitedccda72014-12-25 02:52:23 +02001460
1461#ifdef CONFIG_X86_64
1462 if (addr >> 32 != 0) {
1463 u64 efer = 0;
1464
1465 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1466 if (!(efer & EFER_LMA))
1467 addr &= (u32)-1;
1468 }
1469#endif
1470
1471 *desc_addr_p = addr;
1472 return X86EMUL_CONTINUE;
1473}
1474
1475/* allowed just for 8 bytes segments */
1476static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1477 u16 selector, struct desc_struct *desc,
1478 ulong *desc_addr_p)
1479{
1480 int rc;
1481
1482 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1483 if (rc != X86EMUL_CONTINUE)
1484 return rc;
1485
1486 return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1487 &ctxt->exception);
1488}
1489
1490/* allowed just for 8 bytes segments */
1491static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1492 u16 selector, struct desc_struct *desc)
1493{
1494 int rc;
1495 ulong addr;
1496
1497 rc = get_descriptor_ptr(ctxt, selector, &addr);
1498 if (rc != X86EMUL_CONTINUE)
1499 return rc;
1500
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001501 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1502 &ctxt->exception);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001503}
1504
Gleb Natapov5601d052011-03-07 14:55:06 +02001505/* Does not support long mode */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001506static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Nadav Amitd1442d82014-09-18 22:39:39 +03001507 u16 selector, int seg, u8 cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001508 enum x86_transfer_type transfer,
Nadav Amitd1442d82014-09-18 22:39:39 +03001509 struct desc_struct *desc)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001510{
Avi Kivity869be992012-06-13 16:30:53 +03001511 struct desc_struct seg_desc, old_desc;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001512 u8 dpl, rpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001513 unsigned err_vec = GP_VECTOR;
1514 u32 err_code = 0;
1515 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
Avi Kivitye9194642012-06-13 16:29:39 +03001516 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001517 int ret;
Avi Kivity03ebebe2012-08-21 17:07:04 +03001518 u16 dummy;
Nadav Amite37a75a2014-06-02 18:34:04 +03001519 u32 base3 = 0;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001520
1521 memset(&seg_desc, 0, sizeof seg_desc);
1522
Kevin Wolff8da94e2013-04-11 14:06:03 +02001523 if (ctxt->mode == X86EMUL_MODE_REAL) {
1524 /* set real mode segment descriptor (keep limit etc. for
1525 * unreal mode) */
Avi Kivity03ebebe2012-08-21 17:07:04 +03001526 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001527 set_desc_base(&seg_desc, selector << 4);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001528 goto load;
Kevin Wolff8da94e2013-04-11 14:06:03 +02001529 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1530 /* VM86 needs a clean new segment descriptor */
1531 set_desc_base(&seg_desc, selector << 4);
1532 set_desc_limit(&seg_desc, 0xffff);
1533 seg_desc.type = 3;
1534 seg_desc.p = 1;
1535 seg_desc.s = 1;
1536 seg_desc.dpl = 3;
1537 goto load;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001538 }
1539
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001540 rpl = selector & 3;
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001541
1542 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1543 if ((seg == VCPU_SREG_CS
1544 || (seg == VCPU_SREG_SS
1545 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1546 || seg == VCPU_SREG_TR)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001547 && null_selector)
1548 goto exception;
1549
1550 /* TR should be in GDT only */
1551 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1552 goto exception;
1553
1554 if (null_selector) /* for NULL selector skip all following checks */
1555 goto load;
1556
Avi Kivitye9194642012-06-13 16:29:39 +03001557 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001558 if (ret != X86EMUL_CONTINUE)
1559 return ret;
1560
1561 err_code = selector & 0xfffc;
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001562 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1563 GP_VECTOR;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001564
Guo Chaofc058682012-06-28 15:19:51 +08001565 /* can't load system descriptor into segment selector */
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001566 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1567 if (transfer == X86_TRANSFER_CALL_JMP)
1568 return X86EMUL_UNHANDLEABLE;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001569 goto exception;
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001570 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001571
1572 if (!seg_desc.p) {
1573 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1574 goto exception;
1575 }
1576
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001577 dpl = seg_desc.dpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001578
1579 switch (seg) {
1580 case VCPU_SREG_SS:
1581 /*
1582 * segment is not a writable data segment or segment
1583 * selector's RPL != CPL or segment selector's RPL != CPL
1584 */
1585 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1586 goto exception;
1587 break;
1588 case VCPU_SREG_CS:
1589 if (!(seg_desc.type & 8))
1590 goto exception;
1591
1592 if (seg_desc.type & 4) {
1593 /* conforming */
1594 if (dpl > cpl)
1595 goto exception;
1596 } else {
1597 /* nonconforming */
1598 if (rpl > cpl || dpl != cpl)
1599 goto exception;
1600 }
Nadav Amit040c8dc2014-09-18 22:39:43 +03001601 /* in long-mode d/b must be clear if l is set */
1602 if (seg_desc.d && seg_desc.l) {
1603 u64 efer = 0;
1604
1605 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1606 if (efer & EFER_LMA)
1607 goto exception;
1608 }
1609
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001610 /* CS(RPL) <- CPL */
1611 selector = (selector & 0xfffc) | cpl;
1612 break;
1613 case VCPU_SREG_TR:
1614 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1615 goto exception;
Avi Kivity869be992012-06-13 16:30:53 +03001616 old_desc = seg_desc;
1617 seg_desc.type |= 2; /* busy */
1618 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1619 sizeof(seg_desc), &ctxt->exception);
1620 if (ret != X86EMUL_CONTINUE)
1621 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001622 break;
1623 case VCPU_SREG_LDTR:
1624 if (seg_desc.s || seg_desc.type != 2)
1625 goto exception;
1626 break;
1627 default: /* DS, ES, FS, or GS */
1628 /*
1629 * segment is not a data or readable code segment or
1630 * ((segment is a data or nonconforming code segment)
1631 * and (both RPL and CPL > DPL))
1632 */
1633 if ((seg_desc.type & 0xa) == 0x8 ||
1634 (((seg_desc.type & 0xc) != 0xc) &&
1635 (rpl > dpl && cpl > dpl)))
1636 goto exception;
1637 break;
1638 }
1639
1640 if (seg_desc.s) {
1641 /* mark segment as accessed */
Nadav Amite2cefa72014-12-25 02:52:22 +02001642 if (!(seg_desc.type & 1)) {
1643 seg_desc.type |= 1;
1644 ret = write_segment_descriptor(ctxt, selector,
1645 &seg_desc);
1646 if (ret != X86EMUL_CONTINUE)
1647 return ret;
1648 }
Nadav Amite37a75a2014-06-02 18:34:04 +03001649 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1650 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1651 sizeof(base3), &ctxt->exception);
1652 if (ret != X86EMUL_CONTINUE)
1653 return ret;
Nadav Amit9a9abf62014-11-02 11:54:56 +02001654 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1655 ((u64)base3 << 32)))
1656 return emulate_gp(ctxt, 0);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001657 }
1658load:
Nadav Amite37a75a2014-06-02 18:34:04 +03001659 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
Nadav Amitd1442d82014-09-18 22:39:39 +03001660 if (desc)
1661 *desc = seg_desc;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001662 return X86EMUL_CONTINUE;
1663exception:
Paolo Bonzini592f0852014-08-20 10:05:08 +02001664 return emulate_exception(ctxt, err_vec, err_code, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001665}
1666
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001667static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1668 u16 selector, int seg)
1669{
1670 u8 cpl = ctxt->ops->cpl(ctxt);
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001671 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1672 X86_TRANSFER_NONE, NULL);
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001673}
1674
Wei Yongjun31be40b2010-08-17 09:17:30 +08001675static void write_register_operand(struct operand *op)
1676{
1677 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1678 switch (op->bytes) {
1679 case 1:
1680 *(u8 *)op->addr.reg = (u8)op->val;
1681 break;
1682 case 2:
1683 *(u16 *)op->addr.reg = (u16)op->val;
1684 break;
1685 case 4:
1686 *op->addr.reg = (u32)op->val;
1687 break; /* 64b: zero-extend */
1688 case 8:
1689 *op->addr.reg = op->val;
1690 break;
1691 }
1692}
1693
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001694static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
Wei Yongjunc37eda12010-06-15 09:03:33 +08001695{
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001696 switch (op->type) {
Wei Yongjunc37eda12010-06-15 09:03:33 +08001697 case OP_REG:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001698 write_register_operand(op);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001699 break;
1700 case OP_MEM:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001701 if (ctxt->lock_prefix)
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001702 return segmented_cmpxchg(ctxt,
1703 op->addr.mem,
1704 &op->orig_val,
1705 &op->val,
1706 op->bytes);
1707 else
1708 return segmented_write(ctxt,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001709 op->addr.mem,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001710 &op->val,
1711 op->bytes);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001712 break;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001713 case OP_MEM_STR:
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001714 return segmented_write(ctxt,
1715 op->addr.mem,
1716 op->data,
1717 op->bytes * op->count);
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001718 break;
Avi Kivity1253791d2011-03-29 11:41:27 +02001719 case OP_XMM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001720 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
Avi Kivity1253791d2011-03-29 11:41:27 +02001721 break;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001722 case OP_MM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001723 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001724 break;
Wei Yongjunc37eda12010-06-15 09:03:33 +08001725 case OP_NONE:
1726 /* no writeback */
1727 break;
1728 default:
1729 break;
1730 }
1731 return X86EMUL_CONTINUE;
1732}
1733
Avi Kivity51ddff52012-06-12 20:19:40 +03001734static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001735{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001736 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001737
Avi Kivity5ad105e2012-08-19 14:34:31 +03001738 rsp_increment(ctxt, -bytes);
Avi Kivitydd856ef2012-08-27 23:46:17 +03001739 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001740 addr.seg = VCPU_SREG_SS;
1741
Avi Kivity51ddff52012-06-12 20:19:40 +03001742 return segmented_write(ctxt, addr, data, bytes);
1743}
1744
1745static int em_push(struct x86_emulate_ctxt *ctxt)
1746{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001747 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001748 ctxt->dst.type = OP_NONE;
Avi Kivity51ddff52012-06-12 20:19:40 +03001749 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001750}
1751
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001752static int emulate_pop(struct x86_emulate_ctxt *ctxt,
Avi Kivity350f69d2009-01-05 11:12:40 +02001753 void *dest, int len)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001754{
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001755 int rc;
Avi Kivity90de84f2010-11-17 15:28:21 +02001756 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001757
Avi Kivitydd856ef2012-08-27 23:46:17 +03001758 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Avi Kivity90de84f2010-11-17 15:28:21 +02001759 addr.seg = VCPU_SREG_SS;
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001760 rc = segmented_read(ctxt, addr, dest, len);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09001761 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001762 return rc;
1763
Avi Kivity5ad105e2012-08-19 14:34:31 +03001764 rsp_increment(ctxt, len);
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001765 return rc;
1766}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001767
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001768static int em_pop(struct x86_emulate_ctxt *ctxt)
1769{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001770 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001771}
1772
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001773static int emulate_popf(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001774 void *dest, int len)
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001775{
1776 int rc;
1777 unsigned long val, change_mask;
1778 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001779 int cpl = ctxt->ops->cpl(ctxt);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001780
Takuya Yoshikawa3b9be3b2011-05-02 02:27:55 +09001781 rc = emulate_pop(ctxt, &val, len);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001782 if (rc != X86EMUL_CONTINUE)
1783 return rc;
1784
1785 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
Nadav Amit163b1352014-07-21 14:37:28 +03001786 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001787
1788 switch(ctxt->mode) {
1789 case X86EMUL_MODE_PROT64:
1790 case X86EMUL_MODE_PROT32:
1791 case X86EMUL_MODE_PROT16:
1792 if (cpl == 0)
1793 change_mask |= EFLG_IOPL;
1794 if (cpl <= iopl)
1795 change_mask |= EFLG_IF;
1796 break;
1797 case X86EMUL_MODE_VM86:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001798 if (iopl < 3)
1799 return emulate_gp(ctxt, 0);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001800 change_mask |= EFLG_IF;
1801 break;
1802 default: /* real mode */
1803 change_mask |= (EFLG_IOPL | EFLG_IF);
1804 break;
1805 }
1806
1807 *(unsigned long *)dest =
1808 (ctxt->eflags & ~change_mask) | (val & change_mask);
1809
1810 return rc;
1811}
1812
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001813static int em_popf(struct x86_emulate_ctxt *ctxt)
1814{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001815 ctxt->dst.type = OP_REG;
1816 ctxt->dst.addr.reg = &ctxt->eflags;
1817 ctxt->dst.bytes = ctxt->op_bytes;
1818 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001819}
1820
Avi Kivity612e89f2012-06-12 20:03:23 +03001821static int em_enter(struct x86_emulate_ctxt *ctxt)
1822{
1823 int rc;
1824 unsigned frame_size = ctxt->src.val;
1825 unsigned nesting_level = ctxt->src2.val & 31;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001826 ulong rbp;
Avi Kivity612e89f2012-06-12 20:03:23 +03001827
1828 if (nesting_level)
1829 return X86EMUL_UNHANDLEABLE;
1830
Avi Kivitydd856ef2012-08-27 23:46:17 +03001831 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1832 rc = push(ctxt, &rbp, stack_size(ctxt));
Avi Kivity612e89f2012-06-12 20:03:23 +03001833 if (rc != X86EMUL_CONTINUE)
1834 return rc;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001835 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
Avi Kivity612e89f2012-06-12 20:03:23 +03001836 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001837 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1838 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
Avi Kivity612e89f2012-06-12 20:03:23 +03001839 stack_mask(ctxt));
1840 return X86EMUL_CONTINUE;
1841}
1842
Avi Kivityf47cfa32012-06-07 17:49:24 +03001843static int em_leave(struct x86_emulate_ctxt *ctxt)
1844{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001845 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
Avi Kivityf47cfa32012-06-07 17:49:24 +03001846 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001847 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
Avi Kivityf47cfa32012-06-07 17:49:24 +03001848}
1849
Avi Kivity1cd196e2011-09-13 10:45:51 +03001850static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001851{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001852 int seg = ctxt->src2.val;
1853
Avi Kivity9dac77f2011-06-01 15:34:25 +03001854 ctxt->src.val = get_segment_selector(ctxt, seg);
Nadav Amit0fcc2072014-11-02 11:54:51 +02001855 if (ctxt->op_bytes == 4) {
1856 rsp_increment(ctxt, -2);
1857 ctxt->op_bytes = 2;
1858 }
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001859
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001860 return em_push(ctxt);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001861}
1862
Avi Kivity1cd196e2011-09-13 10:45:51 +03001863static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001864{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001865 int seg = ctxt->src2.val;
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001866 unsigned long selector;
1867 int rc;
1868
Nadav Amit3313bc42014-12-25 02:52:17 +02001869 rc = emulate_pop(ctxt, &selector, 2);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001870 if (rc != X86EMUL_CONTINUE)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001871 return rc;
1872
Paolo Bonzinia5457e72014-06-05 17:29:34 +02001873 if (ctxt->modrm_reg == VCPU_SREG_SS)
1874 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
Nadav Amit3313bc42014-12-25 02:52:17 +02001875 if (ctxt->op_bytes > 2)
1876 rsp_increment(ctxt, ctxt->op_bytes - 2);
Paolo Bonzinia5457e72014-06-05 17:29:34 +02001877
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001878 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001879 return rc;
1880}
1881
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001882static int em_pusha(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001883{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001884 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001885 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001886 int reg = VCPU_REGS_RAX;
1887
1888 while (reg <= VCPU_REGS_RDI) {
1889 (reg == VCPU_REGS_RSP) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001890 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001891
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001892 rc = em_push(ctxt);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001893 if (rc != X86EMUL_CONTINUE)
1894 return rc;
1895
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001896 ++reg;
1897 }
Wei Yongjunc37eda12010-06-15 09:03:33 +08001898
Wei Yongjunc37eda12010-06-15 09:03:33 +08001899 return rc;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001900}
1901
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001902static int em_pushf(struct x86_emulate_ctxt *ctxt)
1903{
Nadav Amitbc397a62014-12-10 11:19:03 +02001904 ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001905 return em_push(ctxt);
1906}
1907
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001908static int em_popa(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001909{
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001910 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001911 int reg = VCPU_REGS_RDI;
1912
1913 while (reg >= VCPU_REGS_RAX) {
1914 if (reg == VCPU_REGS_RSP) {
Avi Kivity5ad105e2012-08-19 14:34:31 +03001915 rsp_increment(ctxt, ctxt->op_bytes);
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001916 --reg;
1917 }
1918
Avi Kivitydd856ef2012-08-27 23:46:17 +03001919 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001920 if (rc != X86EMUL_CONTINUE)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001921 break;
1922 --reg;
1923 }
1924 return rc;
1925}
1926
Avi Kivitydd856ef2012-08-27 23:46:17 +03001927static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001928{
Mathias Krause0225fb52012-08-30 01:30:16 +02001929 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001930 int rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001931 struct desc_ptr dt;
1932 gva_t cs_addr;
1933 gva_t eip_addr;
1934 u16 cs, eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001935
1936 /* TODO: Add limit checks */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001937 ctxt->src.val = ctxt->eflags;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001938 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001939 if (rc != X86EMUL_CONTINUE)
1940 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001941
1942 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1943
Avi Kivity9dac77f2011-06-01 15:34:25 +03001944 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001945 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001946 if (rc != X86EMUL_CONTINUE)
1947 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001948
Avi Kivity9dac77f2011-06-01 15:34:25 +03001949 ctxt->src.val = ctxt->_eip;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001950 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001951 if (rc != X86EMUL_CONTINUE)
1952 return rc;
1953
Avi Kivity4bff1e862011-04-20 13:37:53 +03001954 ops->get_idt(ctxt, &dt);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001955
1956 eip_addr = dt.address + (irq << 2);
1957 cs_addr = dt.address + (irq << 2) + 2;
1958
Avi Kivity0f65dd72011-04-20 13:37:53 +03001959 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001960 if (rc != X86EMUL_CONTINUE)
1961 return rc;
1962
Avi Kivity0f65dd72011-04-20 13:37:53 +03001963 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001964 if (rc != X86EMUL_CONTINUE)
1965 return rc;
1966
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001967 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001968 if (rc != X86EMUL_CONTINUE)
1969 return rc;
1970
Avi Kivity9dac77f2011-06-01 15:34:25 +03001971 ctxt->_eip = eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001972
1973 return rc;
1974}
1975
Avi Kivitydd856ef2012-08-27 23:46:17 +03001976int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1977{
1978 int rc;
1979
1980 invalidate_registers(ctxt);
1981 rc = __emulate_int_real(ctxt, irq);
1982 if (rc == X86EMUL_CONTINUE)
1983 writeback_registers(ctxt);
1984 return rc;
1985}
1986
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001987static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001988{
1989 switch(ctxt->mode) {
1990 case X86EMUL_MODE_REAL:
Avi Kivitydd856ef2012-08-27 23:46:17 +03001991 return __emulate_int_real(ctxt, irq);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001992 case X86EMUL_MODE_VM86:
1993 case X86EMUL_MODE_PROT16:
1994 case X86EMUL_MODE_PROT32:
1995 case X86EMUL_MODE_PROT64:
1996 default:
1997 /* Protected mode interrupts unimplemented yet */
1998 return X86EMUL_UNHANDLEABLE;
1999 }
2000}
2001
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002002static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002003{
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002004 int rc = X86EMUL_CONTINUE;
2005 unsigned long temp_eip = 0;
2006 unsigned long temp_eflags = 0;
2007 unsigned long cs = 0;
2008 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
2009 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
2010 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
2011 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
2012
2013 /* TODO: Add stack limit check */
2014
Avi Kivity9dac77f2011-06-01 15:34:25 +03002015 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002016
2017 if (rc != X86EMUL_CONTINUE)
2018 return rc;
2019
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002020 if (temp_eip & ~0xffff)
2021 return emulate_gp(ctxt, 0);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002022
Avi Kivity9dac77f2011-06-01 15:34:25 +03002023 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002024
2025 if (rc != X86EMUL_CONTINUE)
2026 return rc;
2027
Avi Kivity9dac77f2011-06-01 15:34:25 +03002028 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002029
2030 if (rc != X86EMUL_CONTINUE)
2031 return rc;
2032
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002033 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002034
2035 if (rc != X86EMUL_CONTINUE)
2036 return rc;
2037
Avi Kivity9dac77f2011-06-01 15:34:25 +03002038 ctxt->_eip = temp_eip;
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002039
2040
Avi Kivity9dac77f2011-06-01 15:34:25 +03002041 if (ctxt->op_bytes == 4)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002042 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
Avi Kivity9dac77f2011-06-01 15:34:25 +03002043 else if (ctxt->op_bytes == 2) {
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002044 ctxt->eflags &= ~0xffff;
2045 ctxt->eflags |= temp_eflags;
2046 }
2047
2048 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2049 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2050
2051 return rc;
2052}
2053
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002054static int em_iret(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002055{
2056 switch(ctxt->mode) {
2057 case X86EMUL_MODE_REAL:
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002058 return emulate_iret_real(ctxt);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002059 case X86EMUL_MODE_VM86:
2060 case X86EMUL_MODE_PROT16:
2061 case X86EMUL_MODE_PROT32:
2062 case X86EMUL_MODE_PROT64:
2063 default:
2064 /* iret from protected mode unimplemented yet */
2065 return X86EMUL_UNHANDLEABLE;
2066 }
2067}
2068
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002069static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2070{
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002071 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002072 unsigned short sel, old_sel;
2073 struct desc_struct old_desc, new_desc;
2074 const struct x86_emulate_ops *ops = ctxt->ops;
2075 u8 cpl = ctxt->ops->cpl(ctxt);
2076
2077 /* Assignment of RIP may only fail in 64-bit mode */
2078 if (ctxt->mode == X86EMUL_MODE_PROT64)
2079 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2080 VCPU_SREG_CS);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002081
Avi Kivity9dac77f2011-06-01 15:34:25 +03002082 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002083
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002084 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2085 X86_TRANSFER_CALL_JMP,
Nadav Amitd1442d82014-09-18 22:39:39 +03002086 &new_desc);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002087 if (rc != X86EMUL_CONTINUE)
2088 return rc;
2089
Nadav Amitd50eaa12014-11-19 17:43:11 +02002090 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03002091 if (rc != X86EMUL_CONTINUE) {
Nadav Amit7e46ddd2014-10-28 00:03:43 +02002092 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
Nadav Amitd1442d82014-09-18 22:39:39 +03002093 /* assigning eip failed; restore the old cs */
2094 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2095 return rc;
2096 }
2097 return rc;
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002098}
2099
Nadav Amitf7784042014-09-18 22:39:41 +03002100static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002101{
Nadav Amitf7784042014-09-18 22:39:41 +03002102 return assign_eip_near(ctxt, ctxt->src.val);
2103}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002104
Nadav Amitf7784042014-09-18 22:39:41 +03002105static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2106{
2107 int rc;
2108 long int old_eip;
2109
2110 old_eip = ctxt->_eip;
2111 rc = assign_eip_near(ctxt, ctxt->src.val);
2112 if (rc != X86EMUL_CONTINUE)
2113 return rc;
2114 ctxt->src.val = old_eip;
2115 rc = em_push(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09002116 return rc;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002117}
2118
Takuya Yoshikawae0dac402011-12-06 18:07:27 +09002119static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002120{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002121 u64 old = ctxt->dst.orig_val64;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002122
Nadav Amitaaa05f22014-06-02 18:34:10 +03002123 if (ctxt->dst.bytes == 16)
2124 return X86EMUL_UNHANDLEABLE;
2125
Avi Kivitydd856ef2012-08-27 23:46:17 +03002126 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2127 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2128 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2129 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
Laurent Vivier05f086f2007-09-24 11:10:55 +02002130 ctxt->eflags &= ~EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002131 } else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03002132 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2133 (u32) reg_read(ctxt, VCPU_REGS_RBX);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002134
Laurent Vivier05f086f2007-09-24 11:10:55 +02002135 ctxt->eflags |= EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002136 }
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002137 return X86EMUL_CONTINUE;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002138}
2139
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002140static int em_ret(struct x86_emulate_ctxt *ctxt)
2141{
Nadav Amit234f3ce2014-09-18 22:39:38 +03002142 int rc;
2143 unsigned long eip;
2144
2145 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2146 if (rc != X86EMUL_CONTINUE)
2147 return rc;
2148
2149 return assign_eip_near(ctxt, eip);
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002150}
2151
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002152static int em_ret_far(struct x86_emulate_ctxt *ctxt)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002153{
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002154 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002155 unsigned long eip, cs;
2156 u16 old_cs;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002157 int cpl = ctxt->ops->cpl(ctxt);
Nadav Amitd1442d82014-09-18 22:39:39 +03002158 struct desc_struct old_desc, new_desc;
2159 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002160
Nadav Amitd1442d82014-09-18 22:39:39 +03002161 if (ctxt->mode == X86EMUL_MODE_PROT64)
2162 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2163 VCPU_SREG_CS);
2164
2165 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002166 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002167 return rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002168 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002169 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002170 return rc;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002171 /* Outer-privilege level return is not implemented */
2172 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2173 return X86EMUL_UNHANDLEABLE;
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002174 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2175 X86_TRANSFER_RET,
Nadav Amitd1442d82014-09-18 22:39:39 +03002176 &new_desc);
2177 if (rc != X86EMUL_CONTINUE)
2178 return rc;
Nadav Amitd50eaa12014-11-19 17:43:11 +02002179 rc = assign_eip_far(ctxt, eip, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03002180 if (rc != X86EMUL_CONTINUE) {
Nadav Amit7e46ddd2014-10-28 00:03:43 +02002181 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
Nadav Amitd1442d82014-09-18 22:39:39 +03002182 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2183 }
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002184 return rc;
2185}
2186
Bruce Rogers32611072013-09-09 09:40:20 -06002187static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2188{
2189 int rc;
2190
2191 rc = em_ret_far(ctxt);
2192 if (rc != X86EMUL_CONTINUE)
2193 return rc;
2194 rsp_increment(ctxt, ctxt->src.val);
2195 return X86EMUL_CONTINUE;
2196}
2197
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002198static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2199{
2200 /* Save real source value, then compare EAX against destination. */
Nadav Amit37c564f2014-06-02 18:34:07 +03002201 ctxt->dst.orig_val = ctxt->dst.val;
2202 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002203 ctxt->src.orig_val = ctxt->src.val;
Nadav Amit37c564f2014-06-02 18:34:07 +03002204 ctxt->src.val = ctxt->dst.orig_val;
Avi Kivity158de572013-01-19 19:51:57 +02002205 fastop(ctxt, em_cmp);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002206
2207 if (ctxt->eflags & EFLG_ZF) {
Nadav Amit2fcf5c82015-01-26 09:32:21 +02002208 /* Success: write back to memory; no update of EAX */
2209 ctxt->src.type = OP_NONE;
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002210 ctxt->dst.val = ctxt->src.orig_val;
2211 } else {
2212 /* Failure: write the value we saw to EAX. */
Nadav Amit2fcf5c82015-01-26 09:32:21 +02002213 ctxt->src.type = OP_REG;
2214 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2215 ctxt->src.val = ctxt->dst.orig_val;
2216 /* Create write-cycle to dest by writing the same value */
Nadav Amit37c564f2014-06-02 18:34:07 +03002217 ctxt->dst.val = ctxt->dst.orig_val;
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002218 }
2219 return X86EMUL_CONTINUE;
2220}
2221
Avi Kivityd4b43252011-09-13 10:45:50 +03002222static int em_lseg(struct x86_emulate_ctxt *ctxt)
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002223{
Avi Kivityd4b43252011-09-13 10:45:50 +03002224 int seg = ctxt->src2.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002225 unsigned short sel;
2226 int rc;
2227
Avi Kivity9dac77f2011-06-01 15:34:25 +03002228 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002229
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002230 rc = load_segment_descriptor(ctxt, sel, seg);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002231 if (rc != X86EMUL_CONTINUE)
2232 return rc;
2233
Avi Kivity9dac77f2011-06-01 15:34:25 +03002234 ctxt->dst.val = ctxt->src.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002235 return rc;
2236}
2237
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002238static void
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002239setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002240 struct desc_struct *cs, struct desc_struct *ss)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002241{
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002242 cs->l = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002243 set_desc_base(cs, 0); /* flat segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002244 cs->g = 1; /* 4kb granularity */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002245 set_desc_limit(cs, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002246 cs->type = 0x0b; /* Read, Execute, Accessed */
2247 cs->s = 1;
2248 cs->dpl = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002249 cs->p = 1;
2250 cs->d = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002251 cs->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002252
Gleb Natapov79168fd2010-04-28 19:15:30 +03002253 set_desc_base(ss, 0); /* flat segment */
2254 set_desc_limit(ss, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002255 ss->g = 1; /* 4kb granularity */
2256 ss->s = 1;
2257 ss->type = 0x03; /* Read/Write, Accessed */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002258 ss->d = 1; /* 32bit stack segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002259 ss->dpl = 0;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002260 ss->p = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002261 ss->l = 0;
2262 ss->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002263}
2264
Avi Kivity1a18a692012-02-01 12:23:21 +02002265static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2266{
2267 u32 eax, ebx, ecx, edx;
2268
2269 eax = ecx = 0;
Avi Kivity0017f932012-06-07 14:10:16 +03002270 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2271 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
Avi Kivity1a18a692012-02-01 12:23:21 +02002272 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2273 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2274}
2275
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002276static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2277{
Mathias Krause0225fb52012-08-30 01:30:16 +02002278 const struct x86_emulate_ops *ops = ctxt->ops;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002279 u32 eax, ebx, ecx, edx;
2280
2281 /*
2282 * syscall should always be enabled in longmode - so only become
2283 * vendor specific (cpuid) if other modes are active...
2284 */
2285 if (ctxt->mode == X86EMUL_MODE_PROT64)
2286 return true;
2287
2288 eax = 0x00000000;
2289 ecx = 0x00000000;
Avi Kivity0017f932012-06-07 14:10:16 +03002290 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2291 /*
2292 * Intel ("GenuineIntel")
2293 * remark: Intel CPUs only support "syscall" in 64bit
2294 * longmode. Also an 64bit guest with a
2295 * 32bit compat-app running will #UD !! While this
2296 * behaviour can be fixed (by emulating) into AMD
2297 * response - CPUs of AMD can't behave like Intel.
2298 */
2299 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2300 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2301 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2302 return false;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002303
Avi Kivity0017f932012-06-07 14:10:16 +03002304 /* AMD ("AuthenticAMD") */
2305 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2306 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2307 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2308 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002309
Avi Kivity0017f932012-06-07 14:10:16 +03002310 /* AMD ("AMDisbetter!") */
2311 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2312 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2313 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2314 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002315
2316 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2317 return false;
2318}
2319
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002320static int em_syscall(struct x86_emulate_ctxt *ctxt)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002321{
Mathias Krause0225fb52012-08-30 01:30:16 +02002322 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002323 struct desc_struct cs, ss;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002324 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002325 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002326 u64 efer = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002327
2328 /* syscall is not available in real mode */
Gleb Natapov2e901c42010-03-18 15:20:12 +02002329 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002330 ctxt->mode == X86EMUL_MODE_VM86)
2331 return emulate_ud(ctxt);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002332
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002333 if (!(em_syscall_is_enabled(ctxt)))
2334 return emulate_ud(ctxt);
2335
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002336 ops->get_msr(ctxt, MSR_EFER, &efer);
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002337 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002338
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002339 if (!(efer & EFER_SCE))
2340 return emulate_ud(ctxt);
2341
Avi Kivity717746e2011-04-20 13:37:53 +03002342 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002343 msr_data >>= 32;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002344 cs_sel = (u16)(msr_data & 0xfffc);
2345 ss_sel = (u16)(msr_data + 8);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002346
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002347 if (efer & EFER_LMA) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002348 cs.d = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002349 cs.l = 1;
2350 }
Avi Kivity1aa36612011-04-27 13:20:30 +03002351 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2352 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002353
Avi Kivitydd856ef2012-08-27 23:46:17 +03002354 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002355 if (efer & EFER_LMA) {
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002356#ifdef CONFIG_X86_64
Nadav Amit6c6cb692014-07-21 14:37:30 +03002357 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002358
Avi Kivity717746e2011-04-20 13:37:53 +03002359 ops->get_msr(ctxt,
Gleb Natapov3fb1b5d2010-04-28 19:15:28 +03002360 ctxt->mode == X86EMUL_MODE_PROT64 ?
2361 MSR_LSTAR : MSR_CSTAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002362 ctxt->_eip = msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002363
Avi Kivity717746e2011-04-20 13:37:53 +03002364 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
Nadav Amit6c6cb692014-07-21 14:37:30 +03002365 ctxt->eflags &= ~msr_data;
Nadav Amit807c1422014-11-02 11:54:49 +02002366 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002367#endif
2368 } else {
2369 /* legacy mode */
Avi Kivity717746e2011-04-20 13:37:53 +03002370 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002371 ctxt->_eip = (u32)msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002372
Nadav Amit6c6cb692014-07-21 14:37:30 +03002373 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002374 }
2375
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002376 return X86EMUL_CONTINUE;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002377}
2378
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002379static int em_sysenter(struct x86_emulate_ctxt *ctxt)
Andre Przywara8c604352009-06-18 12:56:01 +02002380{
Mathias Krause0225fb52012-08-30 01:30:16 +02002381 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002382 struct desc_struct cs, ss;
Andre Przywara8c604352009-06-18 12:56:01 +02002383 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002384 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002385 u64 efer = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002386
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002387 ops->get_msr(ctxt, MSR_EFER, &efer);
Gleb Natapova0044752010-02-10 14:21:31 +02002388 /* inject #GP if in real mode */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002389 if (ctxt->mode == X86EMUL_MODE_REAL)
2390 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002391
Avi Kivity1a18a692012-02-01 12:23:21 +02002392 /*
2393 * Not recognized on AMD in compat mode (but is recognized in legacy
2394 * mode).
2395 */
2396 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2397 && !vendor_intel(ctxt))
2398 return emulate_ud(ctxt);
2399
Nadav Amitb2c9d432014-11-02 11:55:01 +02002400 /* sysenter/sysexit have not been tested in 64bit mode. */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002401 if (ctxt->mode == X86EMUL_MODE_PROT64)
Nadav Amitb2c9d432014-11-02 11:55:01 +02002402 return X86EMUL_UNHANDLEABLE;
Andre Przywara8c604352009-06-18 12:56:01 +02002403
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002404 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara8c604352009-06-18 12:56:01 +02002405
Avi Kivity717746e2011-04-20 13:37:53 +03002406 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara8c604352009-06-18 12:56:01 +02002407 switch (ctxt->mode) {
2408 case X86EMUL_MODE_PROT32:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002409 if ((msr_data & 0xfffc) == 0x0)
2410 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002411 break;
2412 case X86EMUL_MODE_PROT64:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002413 if (msr_data == 0x0)
2414 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002415 break;
Gleb Natapov9d1b39a2012-09-03 15:24:27 +03002416 default:
2417 break;
Andre Przywara8c604352009-06-18 12:56:01 +02002418 }
2419
Nadav Amit6c6cb692014-07-21 14:37:30 +03002420 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002421 cs_sel = (u16)msr_data;
2422 cs_sel &= ~SELECTOR_RPL_MASK;
2423 ss_sel = cs_sel + 8;
2424 ss_sel &= ~SELECTOR_RPL_MASK;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002425 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002426 cs.d = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002427 cs.l = 1;
2428 }
2429
Avi Kivity1aa36612011-04-27 13:20:30 +03002430 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2431 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara8c604352009-06-18 12:56:01 +02002432
Avi Kivity717746e2011-04-20 13:37:53 +03002433 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002434 ctxt->_eip = msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002435
Avi Kivity717746e2011-04-20 13:37:53 +03002436 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
Avi Kivitydd856ef2012-08-27 23:46:17 +03002437 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002438
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002439 return X86EMUL_CONTINUE;
Andre Przywara8c604352009-06-18 12:56:01 +02002440}
2441
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002442static int em_sysexit(struct x86_emulate_ctxt *ctxt)
Andre Przywara4668f052009-06-18 12:56:02 +02002443{
Mathias Krause0225fb52012-08-30 01:30:16 +02002444 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002445 struct desc_struct cs, ss;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002446 u64 msr_data, rcx, rdx;
Andre Przywara4668f052009-06-18 12:56:02 +02002447 int usermode;
Xiao Guangrong1249b962011-05-15 23:25:10 +08002448 u16 cs_sel = 0, ss_sel = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002449
Gleb Natapova0044752010-02-10 14:21:31 +02002450 /* inject #GP if in real mode or Virtual 8086 mode */
2451 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002452 ctxt->mode == X86EMUL_MODE_VM86)
2453 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002454
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002455 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara4668f052009-06-18 12:56:02 +02002456
Avi Kivity9dac77f2011-06-01 15:34:25 +03002457 if ((ctxt->rex_prefix & 0x8) != 0x0)
Andre Przywara4668f052009-06-18 12:56:02 +02002458 usermode = X86EMUL_MODE_PROT64;
2459 else
2460 usermode = X86EMUL_MODE_PROT32;
2461
Nadav Amit234f3ce2014-09-18 22:39:38 +03002462 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2463 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2464
Andre Przywara4668f052009-06-18 12:56:02 +02002465 cs.dpl = 3;
2466 ss.dpl = 3;
Avi Kivity717746e2011-04-20 13:37:53 +03002467 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara4668f052009-06-18 12:56:02 +02002468 switch (usermode) {
2469 case X86EMUL_MODE_PROT32:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002470 cs_sel = (u16)(msr_data + 16);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002471 if ((msr_data & 0xfffc) == 0x0)
2472 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002473 ss_sel = (u16)(msr_data + 24);
Nadav Amitbf0b6822014-09-18 22:39:45 +03002474 rcx = (u32)rcx;
2475 rdx = (u32)rdx;
Andre Przywara4668f052009-06-18 12:56:02 +02002476 break;
2477 case X86EMUL_MODE_PROT64:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002478 cs_sel = (u16)(msr_data + 32);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002479 if (msr_data == 0x0)
2480 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002481 ss_sel = cs_sel + 8;
2482 cs.d = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002483 cs.l = 1;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002484 if (is_noncanonical_address(rcx) ||
2485 is_noncanonical_address(rdx))
2486 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002487 break;
2488 }
Gleb Natapov79168fd2010-04-28 19:15:30 +03002489 cs_sel |= SELECTOR_RPL_MASK;
2490 ss_sel |= SELECTOR_RPL_MASK;
Andre Przywara4668f052009-06-18 12:56:02 +02002491
Avi Kivity1aa36612011-04-27 13:20:30 +03002492 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2493 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara4668f052009-06-18 12:56:02 +02002494
Nadav Amit234f3ce2014-09-18 22:39:38 +03002495 ctxt->_eip = rdx;
2496 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
Andre Przywara4668f052009-06-18 12:56:02 +02002497
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002498 return X86EMUL_CONTINUE;
Andre Przywara4668f052009-06-18 12:56:02 +02002499}
2500
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002501static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002502{
2503 int iopl;
2504 if (ctxt->mode == X86EMUL_MODE_REAL)
2505 return false;
2506 if (ctxt->mode == X86EMUL_MODE_VM86)
2507 return true;
2508 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002509 return ctxt->ops->cpl(ctxt) > iopl;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002510}
2511
2512static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002513 u16 port, u16 len)
2514{
Mathias Krause0225fb52012-08-30 01:30:16 +02002515 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002516 struct desc_struct tr_seg;
Gleb Natapov5601d052011-03-07 14:55:06 +02002517 u32 base3;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002518 int r;
Avi Kivity1aa36612011-04-27 13:20:30 +03002519 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002520 unsigned mask = (1 << len) - 1;
Gleb Natapov5601d052011-03-07 14:55:06 +02002521 unsigned long base;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002522
Avi Kivity1aa36612011-04-27 13:20:30 +03002523 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002524 if (!tr_seg.p)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002525 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002526 if (desc_limit_scaled(&tr_seg) < 103)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002527 return false;
Gleb Natapov5601d052011-03-07 14:55:06 +02002528 base = get_desc_base(&tr_seg);
2529#ifdef CONFIG_X86_64
2530 base |= ((u64)base3) << 32;
2531#endif
Avi Kivity0f65dd72011-04-20 13:37:53 +03002532 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002533 if (r != X86EMUL_CONTINUE)
2534 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002535 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002536 return false;
Avi Kivity0f65dd72011-04-20 13:37:53 +03002537 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002538 if (r != X86EMUL_CONTINUE)
2539 return false;
2540 if ((perm >> bit_idx) & mask)
2541 return false;
2542 return true;
2543}
2544
2545static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002546 u16 port, u16 len)
2547{
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002548 if (ctxt->perm_ok)
2549 return true;
2550
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002551 if (emulator_bad_iopl(ctxt))
2552 if (!emulator_io_port_access_allowed(ctxt, port, len))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002553 return false;
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002554
2555 ctxt->perm_ok = true;
2556
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002557 return true;
2558}
2559
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002560static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002561 struct tss_segment_16 *tss)
2562{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002563 tss->ip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002564 tss->flag = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002565 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2566 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2567 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2568 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2569 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2570 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2571 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2572 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002573
Avi Kivity1aa36612011-04-27 13:20:30 +03002574 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2575 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2576 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2577 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2578 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002579}
2580
2581static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002582 struct tss_segment_16 *tss)
2583{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002584 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002585 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002586
Avi Kivity9dac77f2011-06-01 15:34:25 +03002587 ctxt->_eip = tss->ip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002588 ctxt->eflags = tss->flag | 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002589 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2590 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2591 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2592 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2593 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2594 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2595 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2596 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002597
2598 /*
2599 * SDM says that segment selectors are loaded before segment
2600 * descriptors
2601 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002602 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2603 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2604 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2605 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2606 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002607
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002608 cpl = tss->cs & 3;
2609
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002610 /*
Guo Chaofc058682012-06-28 15:19:51 +08002611 * Now load segment descriptors. If fault happens at this stage
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002612 * it is handled in a context of new task
2613 */
Nadav Amitd1442d82014-09-18 22:39:39 +03002614 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002615 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002616 if (ret != X86EMUL_CONTINUE)
2617 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002618 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002619 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002620 if (ret != X86EMUL_CONTINUE)
2621 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002622 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002623 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002624 if (ret != X86EMUL_CONTINUE)
2625 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002626 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002627 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002628 if (ret != X86EMUL_CONTINUE)
2629 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002630 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002631 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002632 if (ret != X86EMUL_CONTINUE)
2633 return ret;
2634
2635 return X86EMUL_CONTINUE;
2636}
2637
2638static int task_switch_16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002639 u16 tss_selector, u16 old_tss_sel,
2640 ulong old_tss_base, struct desc_struct *new_desc)
2641{
Mathias Krause0225fb52012-08-30 01:30:16 +02002642 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002643 struct tss_segment_16 tss_seg;
2644 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002645 u32 new_tss_base = get_desc_base(new_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002646
Avi Kivity0f65dd72011-04-20 13:37:53 +03002647 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002648 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002649 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002650 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002651
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002652 save_state_to_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002653
Avi Kivity0f65dd72011-04-20 13:37:53 +03002654 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002655 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002656 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002657 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002658
Avi Kivity0f65dd72011-04-20 13:37:53 +03002659 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002660 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002661 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002662 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002663
2664 if (old_tss_sel != 0xffff) {
2665 tss_seg.prev_task_link = old_tss_sel;
2666
Avi Kivity0f65dd72011-04-20 13:37:53 +03002667 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002668 &tss_seg.prev_task_link,
2669 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002670 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002671 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002672 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002673 }
2674
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002675 return load_state_from_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002676}
2677
2678static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002679 struct tss_segment_32 *tss)
2680{
Nadav Amit5c7411e2014-04-07 18:37:47 +03002681 /* CR3 and ldt selector are not saved intentionally */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002682 tss->eip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002683 tss->eflags = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002684 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2685 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2686 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2687 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2688 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2689 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2690 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2691 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002692
Avi Kivity1aa36612011-04-27 13:20:30 +03002693 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2694 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2695 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2696 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2697 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2698 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002699}
2700
2701static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002702 struct tss_segment_32 *tss)
2703{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002704 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002705 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002706
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002707 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002708 return emulate_gp(ctxt, 0);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002709 ctxt->_eip = tss->eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002710 ctxt->eflags = tss->eflags | 2;
Kevin Wolf4cee4792012-02-08 14:34:41 +01002711
2712 /* General purpose registers */
Avi Kivitydd856ef2012-08-27 23:46:17 +03002713 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2714 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2715 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2716 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2717 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2718 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2719 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2720 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002721
2722 /*
2723 * SDM says that segment selectors are loaded before segment
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002724 * descriptors. This is important because CPL checks will
2725 * use CS.RPL.
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002726 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002727 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2728 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2729 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2730 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2731 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2732 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2733 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002734
2735 /*
Kevin Wolf4cee4792012-02-08 14:34:41 +01002736 * If we're switching between Protected Mode and VM86, we need to make
2737 * sure to update the mode before loading the segment descriptors so
2738 * that the selectors are interpreted correctly.
Kevin Wolf4cee4792012-02-08 14:34:41 +01002739 */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002740 if (ctxt->eflags & X86_EFLAGS_VM) {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002741 ctxt->mode = X86EMUL_MODE_VM86;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002742 cpl = 3;
2743 } else {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002744 ctxt->mode = X86EMUL_MODE_PROT32;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002745 cpl = tss->cs & 3;
2746 }
Kevin Wolf4cee4792012-02-08 14:34:41 +01002747
2748 /*
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002749 * Now load segment descriptors. If fault happenes at this stage
2750 * it is handled in a context of new task
2751 */
Nadav Amitd1442d82014-09-18 22:39:39 +03002752 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002753 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002754 if (ret != X86EMUL_CONTINUE)
2755 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002756 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002757 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002758 if (ret != X86EMUL_CONTINUE)
2759 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002760 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002761 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002762 if (ret != X86EMUL_CONTINUE)
2763 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002764 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002765 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002766 if (ret != X86EMUL_CONTINUE)
2767 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002768 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002769 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002770 if (ret != X86EMUL_CONTINUE)
2771 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002772 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002773 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002774 if (ret != X86EMUL_CONTINUE)
2775 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002776 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002777 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002778 if (ret != X86EMUL_CONTINUE)
2779 return ret;
2780
2781 return X86EMUL_CONTINUE;
2782}
2783
2784static int task_switch_32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002785 u16 tss_selector, u16 old_tss_sel,
2786 ulong old_tss_base, struct desc_struct *new_desc)
2787{
Mathias Krause0225fb52012-08-30 01:30:16 +02002788 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002789 struct tss_segment_32 tss_seg;
2790 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002791 u32 new_tss_base = get_desc_base(new_desc);
Nadav Amit5c7411e2014-04-07 18:37:47 +03002792 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2793 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002794
Avi Kivity0f65dd72011-04-20 13:37:53 +03002795 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002796 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002797 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002798 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002799
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002800 save_state_to_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002801
Nadav Amit5c7411e2014-04-07 18:37:47 +03002802 /* Only GP registers and segment selectors are saved */
2803 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2804 ldt_sel_offset - eip_offset, &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002805 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002806 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002807
Avi Kivity0f65dd72011-04-20 13:37:53 +03002808 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002809 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002810 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002811 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002812
2813 if (old_tss_sel != 0xffff) {
2814 tss_seg.prev_task_link = old_tss_sel;
2815
Avi Kivity0f65dd72011-04-20 13:37:53 +03002816 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002817 &tss_seg.prev_task_link,
2818 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002819 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002820 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002821 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002822 }
2823
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002824 return load_state_from_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002825}
2826
2827static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002828 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002829 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002830{
Mathias Krause0225fb52012-08-30 01:30:16 +02002831 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002832 struct desc_struct curr_tss_desc, next_tss_desc;
2833 int ret;
Avi Kivity1aa36612011-04-27 13:20:30 +03002834 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002835 ulong old_tss_base =
Avi Kivity4bff1e862011-04-20 13:37:53 +03002836 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
Gleb Natapovceffb452010-03-18 15:20:19 +02002837 u32 desc_limit;
Avi Kivitye9194642012-06-13 16:29:39 +03002838 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002839
2840 /* FIXME: old_tss_base == ~0 ? */
2841
Avi Kivitye9194642012-06-13 16:29:39 +03002842 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002843 if (ret != X86EMUL_CONTINUE)
2844 return ret;
Avi Kivitye9194642012-06-13 16:29:39 +03002845 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002846 if (ret != X86EMUL_CONTINUE)
2847 return ret;
2848
2849 /* FIXME: check that next_tss_desc is tss */
2850
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002851 /*
2852 * Check privileges. The three cases are task switch caused by...
2853 *
2854 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2855 * 2. Exception/IRQ/iret: No check is performed
Nadav Amit2c2ca2d2014-11-02 11:54:57 +02002856 * 3. jmp/call to TSS/task-gate: No check is performed since the
2857 * hardware checks it before exiting.
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002858 */
2859 if (reason == TASK_SWITCH_GATE) {
2860 if (idt_index != -1) {
2861 /* Software interrupts */
2862 struct desc_struct task_gate_desc;
2863 int dpl;
2864
2865 ret = read_interrupt_descriptor(ctxt, idt_index,
2866 &task_gate_desc);
2867 if (ret != X86EMUL_CONTINUE)
2868 return ret;
2869
2870 dpl = task_gate_desc.dpl;
2871 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2872 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2873 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002874 }
2875
Gleb Natapovceffb452010-03-18 15:20:19 +02002876 desc_limit = desc_limit_scaled(&next_tss_desc);
2877 if (!next_tss_desc.p ||
2878 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2879 desc_limit < 0x2b)) {
Paolo Bonzini592f0852014-08-20 10:05:08 +02002880 return emulate_ts(ctxt, tss_selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002881 }
2882
2883 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2884 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002885 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002886 }
2887
2888 if (reason == TASK_SWITCH_IRET)
2889 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2890
2891 /* set back link to prev task only if NT bit is set in eflags
Guo Chaofc058682012-06-28 15:19:51 +08002892 note that old_tss_sel is not used after this point */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002893 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2894 old_tss_sel = 0xffff;
2895
2896 if (next_tss_desc.type & 8)
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002897 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002898 old_tss_base, &next_tss_desc);
2899 else
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002900 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002901 old_tss_base, &next_tss_desc);
Jan Kiszka0760d442010-04-14 15:50:57 +02002902 if (ret != X86EMUL_CONTINUE)
2903 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002904
2905 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2906 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2907
2908 if (reason != TASK_SWITCH_IRET) {
2909 next_tss_desc.type |= (1 << 1); /* set busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002910 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002911 }
2912
Avi Kivity717746e2011-04-20 13:37:53 +03002913 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
Avi Kivity1aa36612011-04-27 13:20:30 +03002914 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002915
Jan Kiszkae269fb22010-04-14 15:51:09 +02002916 if (has_error_code) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002917 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2918 ctxt->lock_prefix = 0;
2919 ctxt->src.val = (unsigned long) error_code;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09002920 ret = em_push(ctxt);
Jan Kiszkae269fb22010-04-14 15:51:09 +02002921 }
2922
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002923 return ret;
2924}
2925
2926int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002927 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002928 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002929{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002930 int rc;
2931
Avi Kivitydd856ef2012-08-27 23:46:17 +03002932 invalidate_registers(ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002933 ctxt->_eip = ctxt->eip;
2934 ctxt->dst.type = OP_NONE;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002935
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002936 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002937 has_error_code, error_code);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002938
Avi Kivitydd856ef2012-08-27 23:46:17 +03002939 if (rc == X86EMUL_CONTINUE) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002940 ctxt->eip = ctxt->_eip;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002941 writeback_registers(ctxt);
2942 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002943
Gleb Natapova0c0ab22011-03-28 16:57:49 +02002944 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002945}
2946
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03002947static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2948 struct operand *op)
Gleb Natapova682e352010-03-18 15:20:21 +02002949{
Gleb Natapovb3356bf2012-09-03 15:24:29 +03002950 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
Gleb Natapova682e352010-03-18 15:20:21 +02002951
Paolo Bonzini01485a22014-11-19 18:25:08 +01002952 register_address_increment(ctxt, reg, df * op->bytes);
2953 op->addr.mem.ea = register_address(ctxt, reg);
Gleb Natapova682e352010-03-18 15:20:21 +02002954}
2955
Avi Kivity7af04fc2010-08-18 14:16:35 +03002956static int em_das(struct x86_emulate_ctxt *ctxt)
2957{
Avi Kivity7af04fc2010-08-18 14:16:35 +03002958 u8 al, old_al;
2959 bool af, cf, old_cf;
2960
2961 cf = ctxt->eflags & X86_EFLAGS_CF;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002962 al = ctxt->dst.val;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002963
2964 old_al = al;
2965 old_cf = cf;
2966 cf = false;
2967 af = ctxt->eflags & X86_EFLAGS_AF;
2968 if ((al & 0x0f) > 9 || af) {
2969 al -= 6;
2970 cf = old_cf | (al >= 250);
2971 af = true;
2972 } else {
2973 af = false;
2974 }
2975 if (old_al > 0x99 || old_cf) {
2976 al -= 0x60;
2977 cf = true;
2978 }
2979
Avi Kivity9dac77f2011-06-01 15:34:25 +03002980 ctxt->dst.val = al;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002981 /* Set PF, ZF, SF */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002982 ctxt->src.type = OP_IMM;
2983 ctxt->src.val = 0;
2984 ctxt->src.bytes = 1;
Avi Kivity158de572013-01-19 19:51:57 +02002985 fastop(ctxt, em_or);
Avi Kivity7af04fc2010-08-18 14:16:35 +03002986 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2987 if (cf)
2988 ctxt->eflags |= X86_EFLAGS_CF;
2989 if (af)
2990 ctxt->eflags |= X86_EFLAGS_AF;
2991 return X86EMUL_CONTINUE;
2992}
2993
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02002994static int em_aam(struct x86_emulate_ctxt *ctxt)
2995{
2996 u8 al, ah;
2997
2998 if (ctxt->src.val == 0)
2999 return emulate_de(ctxt);
3000
3001 al = ctxt->dst.val & 0xff;
3002 ah = al / ctxt->src.val;
3003 al %= ctxt->src.val;
3004
3005 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3006
3007 /* Set PF, ZF, SF */
3008 ctxt->src.type = OP_IMM;
3009 ctxt->src.val = 0;
3010 ctxt->src.bytes = 1;
3011 fastop(ctxt, em_or);
3012
3013 return X86EMUL_CONTINUE;
3014}
3015
Gleb Natapov7f662272012-12-10 11:42:30 +02003016static int em_aad(struct x86_emulate_ctxt *ctxt)
3017{
3018 u8 al = ctxt->dst.val & 0xff;
3019 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3020
3021 al = (al + (ah * ctxt->src.val)) & 0xff;
3022
3023 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3024
Gleb Natapovf583c292013-02-13 17:50:39 +02003025 /* Set PF, ZF, SF */
3026 ctxt->src.type = OP_IMM;
3027 ctxt->src.val = 0;
3028 ctxt->src.bytes = 1;
3029 fastop(ctxt, em_or);
Gleb Natapov7f662272012-12-10 11:42:30 +02003030
3031 return X86EMUL_CONTINUE;
3032}
3033
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003034static int em_call(struct x86_emulate_ctxt *ctxt)
3035{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003036 int rc;
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003037 long rel = ctxt->src.val;
3038
3039 ctxt->src.val = (unsigned long)ctxt->_eip;
Nadav Amit234f3ce2014-09-18 22:39:38 +03003040 rc = jmp_rel(ctxt, rel);
3041 if (rc != X86EMUL_CONTINUE)
3042 return rc;
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003043 return em_push(ctxt);
3044}
3045
Avi Kivity0ef753b2010-08-18 14:51:45 +03003046static int em_call_far(struct x86_emulate_ctxt *ctxt)
3047{
Avi Kivity0ef753b2010-08-18 14:51:45 +03003048 u16 sel, old_cs;
3049 ulong old_eip;
3050 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03003051 struct desc_struct old_desc, new_desc;
3052 const struct x86_emulate_ops *ops = ctxt->ops;
3053 int cpl = ctxt->ops->cpl(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003054
Avi Kivity9dac77f2011-06-01 15:34:25 +03003055 old_eip = ctxt->_eip;
Nadav Amitd1442d82014-09-18 22:39:39 +03003056 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003057
Avi Kivity9dac77f2011-06-01 15:34:25 +03003058 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Nadav Amit3dc4bc42014-12-25 02:52:19 +02003059 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3060 X86_TRANSFER_CALL_JMP, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03003061 if (rc != X86EMUL_CONTINUE)
Nadav Amit80976db2014-12-25 02:52:20 +02003062 return rc;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003063
Nadav Amitd50eaa12014-11-19 17:43:11 +02003064 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03003065 if (rc != X86EMUL_CONTINUE)
3066 goto fail;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003067
Avi Kivity9dac77f2011-06-01 15:34:25 +03003068 ctxt->src.val = old_cs;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09003069 rc = em_push(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003070 if (rc != X86EMUL_CONTINUE)
Nadav Amitd1442d82014-09-18 22:39:39 +03003071 goto fail;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003072
Avi Kivity9dac77f2011-06-01 15:34:25 +03003073 ctxt->src.val = old_eip;
Nadav Amitd1442d82014-09-18 22:39:39 +03003074 rc = em_push(ctxt);
3075 /* If we failed, we tainted the memory, but the very least we should
3076 restore cs */
3077 if (rc != X86EMUL_CONTINUE)
3078 goto fail;
3079 return rc;
3080fail:
3081 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3082 return rc;
3083
Avi Kivity0ef753b2010-08-18 14:51:45 +03003084}
3085
Avi Kivity40ece7c2010-08-18 15:12:09 +03003086static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3087{
Avi Kivity40ece7c2010-08-18 15:12:09 +03003088 int rc;
Nadav Amit234f3ce2014-09-18 22:39:38 +03003089 unsigned long eip;
Avi Kivity40ece7c2010-08-18 15:12:09 +03003090
Nadav Amit234f3ce2014-09-18 22:39:38 +03003091 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3092 if (rc != X86EMUL_CONTINUE)
3093 return rc;
3094 rc = assign_eip_near(ctxt, eip);
Avi Kivity40ece7c2010-08-18 15:12:09 +03003095 if (rc != X86EMUL_CONTINUE)
3096 return rc;
Avi Kivity5ad105e2012-08-19 14:34:31 +03003097 rsp_increment(ctxt, ctxt->src.val);
Avi Kivity40ece7c2010-08-18 15:12:09 +03003098 return X86EMUL_CONTINUE;
3099}
3100
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003101static int em_xchg(struct x86_emulate_ctxt *ctxt)
3102{
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003103 /* Write back the register source. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003104 ctxt->src.val = ctxt->dst.val;
3105 write_register_operand(&ctxt->src);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003106
3107 /* Write back the memory destination with implicit LOCK prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003108 ctxt->dst.val = ctxt->src.orig_val;
3109 ctxt->lock_prefix = 1;
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003110 return X86EMUL_CONTINUE;
3111}
3112
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003113static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3114{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003115 ctxt->dst.val = ctxt->src2.val;
Avi Kivity4d758342013-01-19 19:51:55 +02003116 return fastop(ctxt, em_imul);
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003117}
3118
Avi Kivity61429142010-08-19 15:13:00 +03003119static int em_cwd(struct x86_emulate_ctxt *ctxt)
3120{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003121 ctxt->dst.type = OP_REG;
3122 ctxt->dst.bytes = ctxt->src.bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03003123 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivity9dac77f2011-06-01 15:34:25 +03003124 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
Avi Kivity61429142010-08-19 15:13:00 +03003125
3126 return X86EMUL_CONTINUE;
3127}
3128
Avi Kivity48bb5d32010-08-18 18:54:34 +03003129static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3130{
Avi Kivity48bb5d32010-08-18 18:54:34 +03003131 u64 tsc = 0;
3132
Avi Kivity717746e2011-04-20 13:37:53 +03003133 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003134 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3135 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
Avi Kivity48bb5d32010-08-18 18:54:34 +03003136 return X86EMUL_CONTINUE;
3137}
3138
Avi Kivity222d21a2011-11-10 14:57:30 +02003139static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3140{
3141 u64 pmc;
3142
Avi Kivitydd856ef2012-08-27 23:46:17 +03003143 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
Avi Kivity222d21a2011-11-10 14:57:30 +02003144 return emulate_gp(ctxt, 0);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003145 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3146 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
Avi Kivity222d21a2011-11-10 14:57:30 +02003147 return X86EMUL_CONTINUE;
3148}
3149
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003150static int em_mov(struct x86_emulate_ctxt *ctxt)
3151{
Paolo Bonzini54cfdb32014-03-27 11:36:25 +01003152 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003153 return X86EMUL_CONTINUE;
3154}
3155
Borislav Petkov84cffe42013-10-29 12:54:56 +01003156#define FFL(x) bit(X86_FEATURE_##x)
3157
3158static int em_movbe(struct x86_emulate_ctxt *ctxt)
3159{
3160 u32 ebx, ecx, edx, eax = 1;
3161 u16 tmp;
3162
3163 /*
3164 * Check MOVBE is set in the guest-visible CPUID leaf.
3165 */
3166 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3167 if (!(ecx & FFL(MOVBE)))
3168 return emulate_ud(ctxt);
3169
3170 switch (ctxt->op_bytes) {
3171 case 2:
3172 /*
3173 * From MOVBE definition: "...When the operand size is 16 bits,
3174 * the upper word of the destination register remains unchanged
3175 * ..."
3176 *
3177 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3178 * rules so we have to do the operation almost per hand.
3179 */
3180 tmp = (u16)ctxt->src.val;
3181 ctxt->dst.val &= ~0xffffUL;
3182 ctxt->dst.val |= (unsigned long)swab16(tmp);
3183 break;
3184 case 4:
3185 ctxt->dst.val = swab32((u32)ctxt->src.val);
3186 break;
3187 case 8:
3188 ctxt->dst.val = swab64(ctxt->src.val);
3189 break;
3190 default:
Paolo Bonzini592f0852014-08-20 10:05:08 +02003191 BUG();
Borislav Petkov84cffe42013-10-29 12:54:56 +01003192 }
3193 return X86EMUL_CONTINUE;
3194}
3195
Takuya Yoshikawabc00f8d2011-11-22 15:19:19 +09003196static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3197{
3198 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3199 return emulate_gp(ctxt, 0);
3200
3201 /* Disable writeback. */
3202 ctxt->dst.type = OP_NONE;
3203 return X86EMUL_CONTINUE;
3204}
3205
3206static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3207{
3208 unsigned long val;
3209
3210 if (ctxt->mode == X86EMUL_MODE_PROT64)
3211 val = ctxt->src.val & ~0ULL;
3212 else
3213 val = ctxt->src.val & ~0U;
3214
3215 /* #UD condition is already handled. */
3216 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3217 return emulate_gp(ctxt, 0);
3218
3219 /* Disable writeback. */
3220 ctxt->dst.type = OP_NONE;
3221 return X86EMUL_CONTINUE;
3222}
3223
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003224static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3225{
3226 u64 msr_data;
3227
Avi Kivitydd856ef2012-08-27 23:46:17 +03003228 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3229 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3230 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003231 return emulate_gp(ctxt, 0);
3232
3233 return X86EMUL_CONTINUE;
3234}
3235
3236static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3237{
3238 u64 msr_data;
3239
Avi Kivitydd856ef2012-08-27 23:46:17 +03003240 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003241 return emulate_gp(ctxt, 0);
3242
Avi Kivitydd856ef2012-08-27 23:46:17 +03003243 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3244 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003245 return X86EMUL_CONTINUE;
3246}
3247
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003248static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3249{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003250 if (ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003251 return emulate_ud(ctxt);
3252
Avi Kivity9dac77f2011-06-01 15:34:25 +03003253 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
Nadav Amitb5bbf102014-11-02 11:54:46 +02003254 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3255 ctxt->dst.bytes = 2;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003256 return X86EMUL_CONTINUE;
3257}
3258
3259static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3260{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003261 u16 sel = ctxt->src.val;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003262
Avi Kivity9dac77f2011-06-01 15:34:25 +03003263 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003264 return emulate_ud(ctxt);
3265
Avi Kivity9dac77f2011-06-01 15:34:25 +03003266 if (ctxt->modrm_reg == VCPU_SREG_SS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003267 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3268
3269 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003270 ctxt->dst.type = OP_NONE;
3271 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003272}
3273
Avi Kivitya14e5792012-06-13 12:28:33 +03003274static int em_lldt(struct x86_emulate_ctxt *ctxt)
3275{
3276 u16 sel = ctxt->src.val;
3277
3278 /* Disable writeback. */
3279 ctxt->dst.type = OP_NONE;
3280 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3281}
3282
Avi Kivity80890002012-06-13 16:33:29 +03003283static int em_ltr(struct x86_emulate_ctxt *ctxt)
3284{
3285 u16 sel = ctxt->src.val;
3286
3287 /* Disable writeback. */
3288 ctxt->dst.type = OP_NONE;
3289 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3290}
3291
Avi Kivity38503912011-03-31 18:48:09 +02003292static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3293{
Avi Kivity9fa088f2011-03-31 18:54:30 +02003294 int rc;
3295 ulong linear;
3296
Avi Kivity9dac77f2011-06-01 15:34:25 +03003297 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02003298 if (rc == X86EMUL_CONTINUE)
Avi Kivity3cb16fe2011-04-20 15:38:44 +03003299 ctxt->ops->invlpg(ctxt, linear);
Avi Kivity38503912011-03-31 18:48:09 +02003300 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003301 ctxt->dst.type = OP_NONE;
Avi Kivity38503912011-03-31 18:48:09 +02003302 return X86EMUL_CONTINUE;
3303}
3304
Avi Kivity2d04a052011-04-20 15:32:49 +03003305static int em_clts(struct x86_emulate_ctxt *ctxt)
3306{
3307 ulong cr0;
3308
3309 cr0 = ctxt->ops->get_cr(ctxt, 0);
3310 cr0 &= ~X86_CR0_TS;
3311 ctxt->ops->set_cr(ctxt, 0, cr0);
3312 return X86EMUL_CONTINUE;
3313}
3314
Avi Kivity26d05cc2011-04-21 12:07:59 +03003315static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3316{
Nadav Amit0f54a322014-08-29 11:26:55 +03003317 int rc = ctxt->ops->fix_hypercall(ctxt);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003318
Avi Kivity26d05cc2011-04-21 12:07:59 +03003319 if (rc != X86EMUL_CONTINUE)
3320 return rc;
3321
3322 /* Let the processor re-execute the fixed hypercall */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003323 ctxt->_eip = ctxt->eip;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003324 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003325 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003326 return X86EMUL_CONTINUE;
3327}
3328
Avi Kivity96051572012-06-10 17:21:18 +03003329static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3330 void (*get)(struct x86_emulate_ctxt *ctxt,
3331 struct desc_ptr *ptr))
3332{
3333 struct desc_ptr desc_ptr;
3334
3335 if (ctxt->mode == X86EMUL_MODE_PROT64)
3336 ctxt->op_bytes = 8;
3337 get(ctxt, &desc_ptr);
3338 if (ctxt->op_bytes == 2) {
3339 ctxt->op_bytes = 4;
3340 desc_ptr.address &= 0x00ffffff;
3341 }
3342 /* Disable writeback. */
3343 ctxt->dst.type = OP_NONE;
3344 return segmented_write(ctxt, ctxt->dst.addr.mem,
3345 &desc_ptr, 2 + ctxt->op_bytes);
3346}
3347
3348static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3349{
3350 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3351}
3352
3353static int em_sidt(struct x86_emulate_ctxt *ctxt)
3354{
3355 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3356}
3357
Nadav Amit5b7f6a1e2014-11-02 11:54:55 +02003358static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003359{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003360 struct desc_ptr desc_ptr;
3361 int rc;
3362
Avi Kivity510425f2012-06-07 17:04:36 +03003363 if (ctxt->mode == X86EMUL_MODE_PROT64)
3364 ctxt->op_bytes = 8;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003365 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
Avi Kivity26d05cc2011-04-21 12:07:59 +03003366 &desc_ptr.size, &desc_ptr.address,
Avi Kivity9dac77f2011-06-01 15:34:25 +03003367 ctxt->op_bytes);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003368 if (rc != X86EMUL_CONTINUE)
3369 return rc;
Nadav Amit9a9abf62014-11-02 11:54:56 +02003370 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3371 is_noncanonical_address(desc_ptr.address))
3372 return emulate_gp(ctxt, 0);
Nadav Amit5b7f6a1e2014-11-02 11:54:55 +02003373 if (lgdt)
3374 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3375 else
3376 ctxt->ops->set_idt(ctxt, &desc_ptr);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003377 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003378 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003379 return X86EMUL_CONTINUE;
3380}
3381
Nadav Amit5b7f6a1e2014-11-02 11:54:55 +02003382static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3383{
3384 return em_lgdt_lidt(ctxt, true);
3385}
3386
Avi Kivity5ef39c72011-04-21 12:21:50 +03003387static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003388{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003389 int rc;
3390
Avi Kivity5ef39c72011-04-21 12:21:50 +03003391 rc = ctxt->ops->fix_hypercall(ctxt);
3392
Avi Kivity26d05cc2011-04-21 12:07:59 +03003393 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003394 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003395 return rc;
3396}
3397
3398static int em_lidt(struct x86_emulate_ctxt *ctxt)
3399{
Nadav Amit5b7f6a1e2014-11-02 11:54:55 +02003400 return em_lgdt_lidt(ctxt, false);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003401}
3402
3403static int em_smsw(struct x86_emulate_ctxt *ctxt)
3404{
Nadav Amit32e94d02014-06-02 18:34:11 +03003405 if (ctxt->dst.type == OP_MEM)
3406 ctxt->dst.bytes = 2;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003407 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003408 return X86EMUL_CONTINUE;
3409}
3410
3411static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3412{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003413 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
Avi Kivity9dac77f2011-06-01 15:34:25 +03003414 | (ctxt->src.val & 0x0f));
3415 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003416 return X86EMUL_CONTINUE;
3417}
3418
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003419static int em_loop(struct x86_emulate_ctxt *ctxt)
3420{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003421 int rc = X86EMUL_CONTINUE;
3422
Paolo Bonzini01485a22014-11-19 18:25:08 +01003423 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003424 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
Avi Kivity9dac77f2011-06-01 15:34:25 +03003425 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
Nadav Amit234f3ce2014-09-18 22:39:38 +03003426 rc = jmp_rel(ctxt, ctxt->src.val);
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003427
Nadav Amit234f3ce2014-09-18 22:39:38 +03003428 return rc;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003429}
3430
3431static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3432{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003433 int rc = X86EMUL_CONTINUE;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003434
Nadav Amit234f3ce2014-09-18 22:39:38 +03003435 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3436 rc = jmp_rel(ctxt, ctxt->src.val);
3437
3438 return rc;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003439}
3440
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003441static int em_in(struct x86_emulate_ctxt *ctxt)
3442{
3443 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3444 &ctxt->dst.val))
3445 return X86EMUL_IO_NEEDED;
3446
3447 return X86EMUL_CONTINUE;
3448}
3449
3450static int em_out(struct x86_emulate_ctxt *ctxt)
3451{
3452 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3453 &ctxt->src.val, 1);
3454 /* Disable writeback. */
3455 ctxt->dst.type = OP_NONE;
3456 return X86EMUL_CONTINUE;
3457}
3458
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09003459static int em_cli(struct x86_emulate_ctxt *ctxt)
3460{
3461 if (emulator_bad_iopl(ctxt))
3462 return emulate_gp(ctxt, 0);
3463
3464 ctxt->eflags &= ~X86_EFLAGS_IF;
3465 return X86EMUL_CONTINUE;
3466}
3467
3468static int em_sti(struct x86_emulate_ctxt *ctxt)
3469{
3470 if (emulator_bad_iopl(ctxt))
3471 return emulate_gp(ctxt, 0);
3472
3473 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3474 ctxt->eflags |= X86_EFLAGS_IF;
3475 return X86EMUL_CONTINUE;
3476}
3477
Avi Kivity6d6eede2012-06-07 14:11:36 +03003478static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3479{
3480 u32 eax, ebx, ecx, edx;
3481
Avi Kivitydd856ef2012-08-27 23:46:17 +03003482 eax = reg_read(ctxt, VCPU_REGS_RAX);
3483 ecx = reg_read(ctxt, VCPU_REGS_RCX);
Avi Kivity6d6eede2012-06-07 14:11:36 +03003484 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003485 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3486 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3487 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3488 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
Avi Kivity6d6eede2012-06-07 14:11:36 +03003489 return X86EMUL_CONTINUE;
3490}
3491
Paolo Bonzini98f73632013-10-31 11:19:42 +01003492static int em_sahf(struct x86_emulate_ctxt *ctxt)
3493{
3494 u32 flags;
3495
3496 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3497 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3498
3499 ctxt->eflags &= ~0xffUL;
3500 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3501 return X86EMUL_CONTINUE;
3502}
3503
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003504static int em_lahf(struct x86_emulate_ctxt *ctxt)
3505{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003506 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3507 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003508 return X86EMUL_CONTINUE;
3509}
3510
Avi Kivity92998362012-06-13 12:25:06 +03003511static int em_bswap(struct x86_emulate_ctxt *ctxt)
3512{
3513 switch (ctxt->op_bytes) {
3514#ifdef CONFIG_X86_64
3515 case 8:
3516 asm("bswap %0" : "+r"(ctxt->dst.val));
3517 break;
3518#endif
3519 default:
3520 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3521 break;
3522 }
3523 return X86EMUL_CONTINUE;
3524}
3525
Nadav Amit13e457e2014-10-13 13:04:13 +03003526static int em_clflush(struct x86_emulate_ctxt *ctxt)
3527{
3528 /* emulating clflush regardless of cpuid */
3529 return X86EMUL_CONTINUE;
3530}
3531
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003532static bool valid_cr(int nr)
3533{
3534 switch (nr) {
3535 case 0:
3536 case 2 ... 4:
3537 case 8:
3538 return true;
3539 default:
3540 return false;
3541 }
3542}
3543
3544static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3545{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003546 if (!valid_cr(ctxt->modrm_reg))
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003547 return emulate_ud(ctxt);
3548
3549 return X86EMUL_CONTINUE;
3550}
3551
3552static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3553{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003554 u64 new_val = ctxt->src.val64;
3555 int cr = ctxt->modrm_reg;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003556 u64 efer = 0;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003557
3558 static u64 cr_reserved_bits[] = {
3559 0xffffffff00000000ULL,
3560 0, 0, 0, /* CR3 checked later */
3561 CR4_RESERVED_BITS,
3562 0, 0, 0,
3563 CR8_RESERVED_BITS,
3564 };
3565
3566 if (!valid_cr(cr))
3567 return emulate_ud(ctxt);
3568
3569 if (new_val & cr_reserved_bits[cr])
3570 return emulate_gp(ctxt, 0);
3571
3572 switch (cr) {
3573 case 0: {
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003574 u64 cr4;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003575 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3576 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3577 return emulate_gp(ctxt, 0);
3578
Avi Kivity717746e2011-04-20 13:37:53 +03003579 cr4 = ctxt->ops->get_cr(ctxt, 4);
3580 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003581
3582 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3583 !(cr4 & X86_CR4_PAE))
3584 return emulate_gp(ctxt, 0);
3585
3586 break;
3587 }
3588 case 3: {
3589 u64 rsvd = 0;
3590
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003591 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3592 if (efer & EFER_LMA)
Nadav Amit9d88fca2014-11-02 11:54:52 +02003593 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003594
3595 if (new_val & rsvd)
3596 return emulate_gp(ctxt, 0);
3597
3598 break;
3599 }
3600 case 4: {
Avi Kivity717746e2011-04-20 13:37:53 +03003601 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003602
3603 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3604 return emulate_gp(ctxt, 0);
3605
3606 break;
3607 }
3608 }
3609
3610 return X86EMUL_CONTINUE;
3611}
3612
Joerg Roedel3b88e412011-04-04 12:39:29 +02003613static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3614{
3615 unsigned long dr7;
3616
Avi Kivity717746e2011-04-20 13:37:53 +03003617 ctxt->ops->get_dr(ctxt, 7, &dr7);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003618
3619 /* Check if DR7.Global_Enable is set */
3620 return dr7 & (1 << 13);
3621}
3622
3623static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3624{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003625 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003626 u64 cr4;
3627
3628 if (dr > 7)
3629 return emulate_ud(ctxt);
3630
Avi Kivity717746e2011-04-20 13:37:53 +03003631 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003632 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3633 return emulate_ud(ctxt);
3634
Nadav Amit6d2a0522014-11-02 11:54:43 +02003635 if (check_dr7_gd(ctxt)) {
3636 ulong dr6;
3637
3638 ctxt->ops->get_dr(ctxt, 6, &dr6);
3639 dr6 &= ~15;
3640 dr6 |= DR6_BD | DR6_RTM;
3641 ctxt->ops->set_dr(ctxt, 6, dr6);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003642 return emulate_db(ctxt);
Nadav Amit6d2a0522014-11-02 11:54:43 +02003643 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02003644
3645 return X86EMUL_CONTINUE;
3646}
3647
3648static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3649{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003650 u64 new_val = ctxt->src.val64;
3651 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003652
3653 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3654 return emulate_gp(ctxt, 0);
3655
3656 return check_dr_read(ctxt);
3657}
3658
Joerg Roedel01de8b02011-04-04 12:39:31 +02003659static int check_svme(struct x86_emulate_ctxt *ctxt)
3660{
3661 u64 efer;
3662
Avi Kivity717746e2011-04-20 13:37:53 +03003663 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003664
3665 if (!(efer & EFER_SVME))
3666 return emulate_ud(ctxt);
3667
3668 return X86EMUL_CONTINUE;
3669}
3670
3671static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3672{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003673 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003674
3675 /* Valid physical address? */
Randy Dunlapd4224442011-04-21 09:09:22 -07003676 if (rax & 0xffff000000000000ULL)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003677 return emulate_gp(ctxt, 0);
3678
3679 return check_svme(ctxt);
3680}
3681
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003682static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3683{
Avi Kivity717746e2011-04-20 13:37:53 +03003684 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003685
Avi Kivity717746e2011-04-20 13:37:53 +03003686 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003687 return emulate_ud(ctxt);
3688
3689 return X86EMUL_CONTINUE;
3690}
3691
Joerg Roedel80612522011-04-04 12:39:33 +02003692static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3693{
Avi Kivity717746e2011-04-20 13:37:53 +03003694 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003695 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
Joerg Roedel80612522011-04-04 12:39:33 +02003696
Avi Kivity717746e2011-04-20 13:37:53 +03003697 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
Nadav Amit67f4d422014-06-02 18:34:09 +03003698 ctxt->ops->check_pmc(ctxt, rcx))
Joerg Roedel80612522011-04-04 12:39:33 +02003699 return emulate_gp(ctxt, 0);
3700
3701 return X86EMUL_CONTINUE;
3702}
3703
Joerg Roedelf6511932011-04-04 12:39:35 +02003704static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3705{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003706 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3707 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003708 return emulate_gp(ctxt, 0);
3709
3710 return X86EMUL_CONTINUE;
3711}
3712
3713static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3714{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003715 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3716 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003717 return emulate_gp(ctxt, 0);
3718
3719 return X86EMUL_CONTINUE;
3720}
3721
Avi Kivity73fba5f2010-07-29 15:11:53 +03003722#define D(_y) { .flags = (_y) }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003723#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3724#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3725 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Gleb Natapov0b789ee2013-04-11 11:59:55 +03003726#define N D(NotImpl)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003727#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003728#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3729#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
Nadav Amit39f062f2014-11-26 15:47:18 +02003730#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
Gleb Natapov045a2822012-12-20 16:57:43 +02003731#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003732#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
Avi Kivitye28bbd42013-01-04 16:18:48 +02003733#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
Avi Kivityc4f035c2011-04-04 12:39:22 +02003734#define II(_f, _e, _i) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003735 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
Joerg Roedeld09beab2011-04-04 12:39:25 +02003736#define IIP(_f, _e, _i, _p) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003737 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3738 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Avi Kivityaa97bb42010-01-20 18:09:23 +02003739#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003740
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003741#define D2bv(_f) D((_f) | ByteOp), D(_f)
Joerg Roedelf6511932011-04-04 12:39:35 +02003742#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003743#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
Avi Kivityf7857f32013-01-04 16:18:53 +02003744#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003745#define I2bvIP(_f, _e, _i, _p) \
3746 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003747
Avi Kivityfb864fb2013-01-04 16:18:54 +02003748#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3749 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3750 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
Avi Kivity6230f7f2010-08-26 18:34:55 +03003751
Nadav Amit0f54a322014-08-29 11:26:55 +03003752static const struct opcode group7_rm0[] = {
3753 N,
3754 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3755 N, N, N, N, N, N,
3756};
3757
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003758static const struct opcode group7_rm1[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003759 DI(SrcNone | Priv, monitor),
3760 DI(SrcNone | Priv, mwait),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003761 N, N, N, N, N, N,
3762};
3763
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003764static const struct opcode group7_rm3[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003765 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
Borislav Petkovb51e9742013-09-22 16:44:52 +02003766 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003767 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3768 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3769 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3770 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3771 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3772 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003773};
Avi Kivity6230f7f2010-08-26 18:34:55 +03003774
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003775static const struct opcode group7_rm7[] = {
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003776 N,
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003777 DIP(SrcNone, rdtscp, check_rdtsc),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003778 N, N, N, N, N, N,
3779};
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09003780
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003781static const struct opcode group1[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003782 F(Lock, em_add),
3783 F(Lock | PageTable, em_or),
3784 F(Lock, em_adc),
3785 F(Lock, em_sbb),
3786 F(Lock | PageTable, em_and),
3787 F(Lock, em_sub),
3788 F(Lock, em_xor),
3789 F(NoWrite, em_cmp),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003790};
3791
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003792static const struct opcode group1A[] = {
Nadav Amitab708092014-12-25 02:52:21 +02003793 I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003794};
3795
Avi Kivity007a3b52013-01-19 19:51:51 +02003796static const struct opcode group2[] = {
3797 F(DstMem | ModRM, em_rol),
3798 F(DstMem | ModRM, em_ror),
3799 F(DstMem | ModRM, em_rcl),
3800 F(DstMem | ModRM, em_rcr),
3801 F(DstMem | ModRM, em_shl),
3802 F(DstMem | ModRM, em_shr),
3803 F(DstMem | ModRM, em_shl),
3804 F(DstMem | ModRM, em_sar),
3805};
3806
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003807static const struct opcode group3[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003808 F(DstMem | SrcImm | NoWrite, em_test),
3809 F(DstMem | SrcImm | NoWrite, em_test),
Avi Kivity45a14672013-01-04 16:18:52 +02003810 F(DstMem | SrcNone | Lock, em_not),
3811 F(DstMem | SrcNone | Lock, em_neg),
Avi Kivityb9fa4092013-02-09 11:31:48 +02003812 F(DstXacc | Src2Mem, em_mul_ex),
3813 F(DstXacc | Src2Mem, em_imul_ex),
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02003814 F(DstXacc | Src2Mem, em_div_ex),
3815 F(DstXacc | Src2Mem, em_idiv_ex),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003816};
3817
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003818static const struct opcode group4[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003819 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3820 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003821 N, N, N, N, N, N,
3822};
3823
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003824static const struct opcode group5[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003825 F(DstMem | SrcNone | Lock, em_inc),
3826 F(DstMem | SrcNone | Lock, em_dec),
Nadav Amit58b70752014-10-24 11:35:09 +03003827 I(SrcMem | NearBranch, em_call_near_abs),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003828 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
Nadav Amit58b70752014-10-24 11:35:09 +03003829 I(SrcMem | NearBranch, em_jmp_abs),
Nadav Amitf7784042014-09-18 22:39:41 +03003830 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3831 I(SrcMem | Stack, em_push), D(Undefined),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003832};
3833
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003834static const struct opcode group6[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003835 DI(Prot, sldt),
3836 DI(Prot, str),
Avi Kivitya14e5792012-06-13 12:28:33 +03003837 II(Prot | Priv | SrcMem16, em_lldt, lldt),
Avi Kivity80890002012-06-13 16:33:29 +03003838 II(Prot | Priv | SrcMem16, em_ltr, ltr),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02003839 N, N, N, N,
3840};
3841
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003842static const struct group_dual group7 = { {
Nadav Amit606b1c32014-06-02 18:34:06 +03003843 II(Mov | DstMem, em_sgdt, sgdt),
3844 II(Mov | DstMem, em_sidt, sidt),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003845 II(SrcMem | Priv, em_lgdt, lgdt),
3846 II(SrcMem | Priv, em_lidt, lidt),
3847 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3848 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3849 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003850}, {
Nadav Amit0f54a322014-08-29 11:26:55 +03003851 EXT(0, group7_rm0),
Avi Kivity5ef39c72011-04-21 12:21:50 +03003852 EXT(0, group7_rm1),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003853 N, EXT(0, group7_rm3),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003854 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3855 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3856 EXT(0, group7_rm7),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003857} };
3858
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003859static const struct opcode group8[] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003860 N, N, N, N,
Avi Kivity11c363b2013-01-19 19:51:54 +02003861 F(DstMem | SrcImmByte | NoWrite, em_bt),
3862 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3863 F(DstMem | SrcImmByte | Lock, em_btr),
3864 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003865};
3866
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003867static const struct group_dual group9 = { {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003868 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003869}, {
3870 N, N, N, N, N, N, N, N,
3871} };
3872
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003873static const struct opcode group11[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003874 I(DstMem | SrcImm | Mov | PageTable, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003875 X7(D(Undefined)),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03003876};
3877
Nadav Amit13e457e2014-10-13 13:04:13 +03003878static const struct gprefix pfx_0f_ae_7 = {
Nadav Amit3f6f1482014-10-13 13:04:14 +03003879 I(SrcMem | ByteOp, em_clflush), N, N, N,
Nadav Amit13e457e2014-10-13 13:04:13 +03003880};
3881
3882static const struct group_dual group15 = { {
3883 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3884}, {
3885 N, N, N, N, N, N, N, N,
3886} };
3887
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003888static const struct gprefix pfx_0f_6f_0f_7f = {
Avi Kivitye5971752012-04-09 18:40:03 +03003889 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
Avi Kivityaa97bb42010-01-20 18:09:23 +02003890};
3891
Nadav Amit39f062f2014-11-26 15:47:18 +02003892static const struct instr_dual instr_dual_0f_2b = {
3893 I(0, em_mov), N
3894};
3895
Paolo Bonzinid5b77062014-07-14 12:54:48 +02003896static const struct gprefix pfx_0f_2b = {
Nadav Amit39f062f2014-11-26 15:47:18 +02003897 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
Avi Kivity3e114eb2012-04-09 18:40:01 +03003898};
3899
Igor Mammedov27ce8252014-03-15 21:01:59 +01003900static const struct gprefix pfx_0f_28_0f_29 = {
Igor Mammedov6fec27d2014-03-15 21:02:00 +01003901 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01003902};
3903
Alex Williamson0a370272014-07-11 11:56:31 -06003904static const struct gprefix pfx_0f_e7 = {
3905 N, I(Sse, em_mov), N, N,
3906};
3907
Gleb Natapov045a2822012-12-20 16:57:43 +02003908static const struct escape escape_d9 = { {
Nadav Amit16bebef2014-12-25 02:52:18 +02003909 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
Gleb Natapov045a2822012-12-20 16:57:43 +02003910}, {
3911 /* 0xC0 - 0xC7 */
3912 N, N, N, N, N, N, N, N,
3913 /* 0xC8 - 0xCF */
3914 N, N, N, N, N, N, N, N,
3915 /* 0xD0 - 0xC7 */
3916 N, N, N, N, N, N, N, N,
3917 /* 0xD8 - 0xDF */
3918 N, N, N, N, N, N, N, N,
3919 /* 0xE0 - 0xE7 */
3920 N, N, N, N, N, N, N, N,
3921 /* 0xE8 - 0xEF */
3922 N, N, N, N, N, N, N, N,
3923 /* 0xF0 - 0xF7 */
3924 N, N, N, N, N, N, N, N,
3925 /* 0xF8 - 0xFF */
3926 N, N, N, N, N, N, N, N,
3927} };
3928
3929static const struct escape escape_db = { {
3930 N, N, N, N, N, N, N, N,
3931}, {
3932 /* 0xC0 - 0xC7 */
3933 N, N, N, N, N, N, N, N,
3934 /* 0xC8 - 0xCF */
3935 N, N, N, N, N, N, N, N,
3936 /* 0xD0 - 0xC7 */
3937 N, N, N, N, N, N, N, N,
3938 /* 0xD8 - 0xDF */
3939 N, N, N, N, N, N, N, N,
3940 /* 0xE0 - 0xE7 */
3941 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3942 /* 0xE8 - 0xEF */
3943 N, N, N, N, N, N, N, N,
3944 /* 0xF0 - 0xF7 */
3945 N, N, N, N, N, N, N, N,
3946 /* 0xF8 - 0xFF */
3947 N, N, N, N, N, N, N, N,
3948} };
3949
3950static const struct escape escape_dd = { {
Nadav Amit16bebef2014-12-25 02:52:18 +02003951 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
Gleb Natapov045a2822012-12-20 16:57:43 +02003952}, {
3953 /* 0xC0 - 0xC7 */
3954 N, N, N, N, N, N, N, N,
3955 /* 0xC8 - 0xCF */
3956 N, N, N, N, N, N, N, N,
3957 /* 0xD0 - 0xC7 */
3958 N, N, N, N, N, N, N, N,
3959 /* 0xD8 - 0xDF */
3960 N, N, N, N, N, N, N, N,
3961 /* 0xE0 - 0xE7 */
3962 N, N, N, N, N, N, N, N,
3963 /* 0xE8 - 0xEF */
3964 N, N, N, N, N, N, N, N,
3965 /* 0xF0 - 0xF7 */
3966 N, N, N, N, N, N, N, N,
3967 /* 0xF8 - 0xFF */
3968 N, N, N, N, N, N, N, N,
3969} };
3970
Nadav Amit39f062f2014-11-26 15:47:18 +02003971static const struct instr_dual instr_dual_0f_c3 = {
3972 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
3973};
3974
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003975static const struct opcode opcode_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003976 /* 0x00 - 0x07 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003977 F6ALU(Lock, em_add),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003978 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3979 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003980 /* 0x08 - 0x0F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003981 F6ALU(Lock | PageTable, em_or),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003982 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3983 N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003984 /* 0x10 - 0x17 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003985 F6ALU(Lock, em_adc),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003986 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3987 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003988 /* 0x18 - 0x1F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003989 F6ALU(Lock, em_sbb),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003990 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3991 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003992 /* 0x20 - 0x27 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003993 F6ALU(Lock | PageTable, em_and), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003994 /* 0x28 - 0x2F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003995 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003996 /* 0x30 - 0x37 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003997 F6ALU(Lock, em_xor), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003998 /* 0x38 - 0x3F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003999 F6ALU(NoWrite, em_cmp), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004000 /* 0x40 - 0x4F */
Avi Kivity95413dc2013-01-19 19:51:53 +02004001 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004002 /* 0x50 - 0x57 */
Avi Kivity63540382010-07-29 15:11:55 +03004003 X8(I(SrcReg | Stack, em_push)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004004 /* 0x58 - 0x5F */
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09004005 X8(I(DstReg | Stack, em_pop)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004006 /* 0x60 - 0x67 */
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09004007 I(ImplicitOps | Stack | No64, em_pusha),
4008 I(ImplicitOps | Stack | No64, em_popa),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004009 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
4010 N, N, N, N,
4011 /* 0x68 - 0x6F */
Avi Kivityd46164d2010-08-18 19:29:33 +03004012 I(SrcImm | Mov | Stack, em_push),
4013 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03004014 I(SrcImmByte | Mov | Stack, em_push),
4015 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004016 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
Takuya Yoshikawa2b5e97e2011-11-23 12:27:39 +09004017 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
Avi Kivity73fba5f2010-07-29 15:11:53 +03004018 /* 0x70 - 0x7F */
Nadav Amit58b70752014-10-24 11:35:09 +03004019 X16(D(SrcImmByte | NearBranch)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004020 /* 0x80 - 0x87 */
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09004021 G(ByteOp | DstMem | SrcImm, group1),
4022 G(DstMem | SrcImm, group1),
4023 G(ByteOp | DstMem | SrcImm | No64, group1),
4024 G(DstMem | SrcImmByte, group1),
Avi Kivityfb864fb2013-01-04 16:18:54 +02004025 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004026 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004027 /* 0x88 - 0x8F */
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004028 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004029 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004030 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09004031 D(ModRM | SrcMem | NoAccess | DstReg),
4032 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4033 G(0, group1A),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004034 /* 0x90 - 0x97 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02004035 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004036 /* 0x98 - 0x9F */
Avi Kivity61429142010-08-19 15:13:00 +03004037 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
Wei Yongjuncc4feed2010-08-25 14:10:53 +08004038 I(SrcImmFAddr | No64, em_call_far), N,
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09004039 II(ImplicitOps | Stack, em_pushf, pushf),
Paolo Bonzini98f73632013-10-31 11:19:42 +01004040 II(ImplicitOps | Stack, em_popf, popf),
4041 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004042 /* 0xA0 - 0xA7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004043 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004044 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004045 I2bv(SrcSI | DstDI | Mov | String, em_mov),
Nadav Amit5aca3722014-11-02 11:54:50 +02004046 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004047 /* 0xA8 - 0xAF */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004048 F2bv(DstAcc | SrcImm | NoWrite, em_test),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004049 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4050 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
Nadav Amit5aca3722014-11-02 11:54:50 +02004051 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004052 /* 0xB0 - 0xB7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004053 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004054 /* 0xB8 - 0xBF */
Nadav Amit5e2c6882012-12-06 21:55:10 -02004055 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004056 /* 0xC0 - 0xC7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02004057 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
Nadav Amit58b70752014-10-24 11:35:09 +03004058 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4059 I(ImplicitOps | NearBranch, em_ret),
Avi Kivityd4b43252011-09-13 10:45:50 +03004060 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4061 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03004062 G(ByteOp, group11), G(0, group11),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004063 /* 0xC8 - 0xCF */
Avi Kivity612e89f2012-06-12 20:03:23 +03004064 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
Bruce Rogers32611072013-09-09 09:40:20 -06004065 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4066 I(ImplicitOps | Stack, em_ret_far),
Avi Kivity3c6e2762011-04-04 12:39:23 +02004067 D(ImplicitOps), DI(SrcImmByte, intn),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004068 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004069 /* 0xD0 - 0xD7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02004070 G(Src2One | ByteOp, group2), G(Src2One, group2),
4071 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02004072 I(DstAcc | SrcImmUByte | No64, em_aam),
Paolo Bonzini326f5782013-05-09 11:32:51 +02004073 I(DstAcc | SrcImmUByte | No64, em_aad),
4074 F(DstAcc | ByteOp | No64, em_salc),
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004075 I(DstAcc | SrcXLat | ByteOp, em_mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004076 /* 0xD8 - 0xDF */
Gleb Natapov045a2822012-12-20 16:57:43 +02004077 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004078 /* 0xE0 - 0xE7 */
Nadav Amit58b70752014-10-24 11:35:09 +03004079 X3(I(SrcImmByte | NearBranch, em_loop)),
4080 I(SrcImmByte | NearBranch, em_jcxz),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004081 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4082 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004083 /* 0xE8 - 0xEF */
Nadav Amit58b70752014-10-24 11:35:09 +03004084 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4085 I(SrcImmFAddr | No64, em_jmp_far),
4086 D(SrcImmByte | ImplicitOps | NearBranch),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004087 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4088 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004089 /* 0xF0 - 0xF7 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02004090 N, DI(ImplicitOps, icebp), N, N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02004091 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4092 G(ByteOp, group3), G(0, group3),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004093 /* 0xF8 - 0xFF */
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09004094 D(ImplicitOps), D(ImplicitOps),
4095 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004096 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4097};
4098
Mathias Krausefd0a0d82012-08-30 01:30:15 +02004099static const struct opcode twobyte_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03004100 /* 0x00 - 0x0F */
Joerg Roedeldee6bb72011-04-04 12:39:30 +02004101 G(0, group6), GD(0, &group7), N, N,
Borislav Petkovb51e9742013-09-22 16:44:52 +02004102 N, I(ImplicitOps | EmulateOnUD, em_syscall),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004103 II(ImplicitOps | Priv, em_clts, clts), N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02004104 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
Nadav Amit3f6f1482014-10-13 13:04:14 +03004105 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004106 /* 0x10 - 0x1F */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02004107 N, N, N, N, N, N, N, N,
Nadav Amit3f6f1482014-10-13 13:04:14 +03004108 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4109 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004110 /* 0x20 - 0x2F */
Nadav Amit9b88ae92014-05-25 23:05:21 +03004111 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4112 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4113 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4114 check_cr_write),
4115 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4116 check_dr_write),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004117 N, N, N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01004118 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4119 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
Paolo Bonzinid5b77062014-07-14 12:54:48 +02004120 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
Avi Kivity3e114eb2012-04-09 18:40:01 +03004121 N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004122 /* 0x30 - 0x3F */
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09004123 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
Joerg Roedel80612522011-04-04 12:39:33 +02004124 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09004125 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
Avi Kivity222d21a2011-11-10 14:57:30 +02004126 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
Borislav Petkovb51e9742013-09-22 16:44:52 +02004127 I(ImplicitOps | EmulateOnUD, em_sysenter),
4128 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
Avi Kivityd8671622011-02-01 16:32:03 +02004129 N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004130 N, N, N, N, N, N, N, N,
4131 /* 0x40 - 0x4F */
Nadav Amit140bad82014-06-15 16:13:00 +03004132 X16(D(DstReg | SrcMem | ModRM)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004133 /* 0x50 - 0x5F */
4134 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4135 /* 0x60 - 0x6F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02004136 N, N, N, N,
4137 N, N, N, N,
4138 N, N, N, N,
4139 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004140 /* 0x70 - 0x7F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02004141 N, N, N, N,
4142 N, N, N, N,
4143 N, N, N, N,
4144 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004145 /* 0x80 - 0x8F */
Nadav Amit58b70752014-10-24 11:35:09 +03004146 X16(D(SrcImm | NearBranch)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004147 /* 0x90 - 0x9F */
Wei Yongjunee45b582010-08-06 17:10:07 +08004148 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004149 /* 0xA0 - 0xA7 */
Avi Kivity1cd196e2011-09-13 10:45:51 +03004150 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
Avi Kivity11c363b2013-01-19 19:51:54 +02004151 II(ImplicitOps, em_cpuid, cpuid),
4152 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
Avi Kivity0bdea062013-01-19 19:51:50 +02004153 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4154 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004155 /* 0xA8 - 0xAF */
Avi Kivity1cd196e2011-09-13 10:45:51 +03004156 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004157 DI(ImplicitOps, rsm),
Avi Kivity11c363b2013-01-19 19:51:54 +02004158 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
Avi Kivity0bdea062013-01-19 19:51:50 +02004159 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4160 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
Nadav Amit13e457e2014-10-13 13:04:13 +03004161 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004162 /* 0xB0 - 0xB7 */
Nadav Amit2fcf5c82015-01-26 09:32:21 +02004163 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
Avi Kivityd4b43252011-09-13 10:45:50 +03004164 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
Avi Kivity11c363b2013-01-19 19:51:54 +02004165 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
Avi Kivityd4b43252011-09-13 10:45:50 +03004166 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4167 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004168 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004169 /* 0xB8 - 0xBF */
4170 N, N,
Takuya Yoshikawace7faab2011-11-22 15:17:48 +09004171 G(BitOp, group8),
Avi Kivity11c363b2013-01-19 19:51:54 +02004172 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4173 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004174 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity92998362012-06-13 12:25:06 +03004175 /* 0xC0 - 0xC7 */
Avi Kivitye47a5f52013-02-09 11:31:51 +02004176 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
Nadav Amit39f062f2014-11-26 15:47:18 +02004177 N, ID(0, &instr_dual_0f_c3),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004178 N, N, N, GD(0, &group9),
Avi Kivity92998362012-06-13 12:25:06 +03004179 /* 0xC8 - 0xCF */
4180 X8(I(DstReg, em_bswap)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004181 /* 0xD0 - 0xDF */
4182 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4183 /* 0xE0 - 0xEF */
Alex Williamson0a370272014-07-11 11:56:31 -06004184 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4185 N, N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004186 /* 0xF0 - 0xFF */
4187 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4188};
4189
Nadav Amit39f062f2014-11-26 15:47:18 +02004190static const struct instr_dual instr_dual_0f_38_f0 = {
4191 I(DstReg | SrcMem | Mov, em_movbe), N
4192};
4193
4194static const struct instr_dual instr_dual_0f_38_f1 = {
4195 I(DstMem | SrcReg | Mov, em_movbe), N
4196};
4197
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004198static const struct gprefix three_byte_0f_38_f0 = {
Nadav Amit39f062f2014-11-26 15:47:18 +02004199 ID(0, &instr_dual_0f_38_f0), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004200};
4201
4202static const struct gprefix three_byte_0f_38_f1 = {
Nadav Amit39f062f2014-11-26 15:47:18 +02004203 ID(0, &instr_dual_0f_38_f1), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004204};
4205
4206/*
4207 * Insns below are selected by the prefix which indexed by the third opcode
4208 * byte.
4209 */
4210static const struct opcode opcode_map_0f_38[256] = {
4211 /* 0x00 - 0x7f */
4212 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
Borislav Petkov84cffe42013-10-29 12:54:56 +01004213 /* 0x80 - 0xef */
4214 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4215 /* 0xf0 - 0xf1 */
Nadav Amit53bb4f72014-12-07 11:49:42 +02004216 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4217 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
Borislav Petkov84cffe42013-10-29 12:54:56 +01004218 /* 0xf2 - 0xff */
4219 N, N, X4(N), X8(N)
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004220};
4221
Avi Kivity73fba5f2010-07-29 15:11:53 +03004222#undef D
4223#undef N
4224#undef G
4225#undef GD
4226#undef I
Avi Kivityaa97bb42010-01-20 18:09:23 +02004227#undef GP
Joerg Roedel01de8b02011-04-04 12:39:31 +02004228#undef EXT
Avi Kivity73fba5f2010-07-29 15:11:53 +03004229
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004230#undef D2bv
Joerg Roedelf6511932011-04-04 12:39:35 +02004231#undef D2bvIP
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004232#undef I2bv
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004233#undef I2bvIP
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09004234#undef I6ALU
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004235
Avi Kivity9dac77f2011-06-01 15:34:25 +03004236static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
Avi Kivity39f21ee2010-08-18 19:20:21 +03004237{
4238 unsigned size;
4239
Avi Kivity9dac77f2011-06-01 15:34:25 +03004240 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004241 if (size == 8)
4242 size = 4;
4243 return size;
4244}
4245
4246static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4247 unsigned size, bool sign_extension)
4248{
Avi Kivity39f21ee2010-08-18 19:20:21 +03004249 int rc = X86EMUL_CONTINUE;
4250
4251 op->type = OP_IMM;
4252 op->bytes = size;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004253 op->addr.mem.ea = ctxt->_eip;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004254 /* NB. Immediates are sign-extended as necessary. */
4255 switch (op->bytes) {
4256 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004257 op->val = insn_fetch(s8, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004258 break;
4259 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004260 op->val = insn_fetch(s16, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004261 break;
4262 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004263 op->val = insn_fetch(s32, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004264 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004265 case 8:
4266 op->val = insn_fetch(s64, ctxt);
4267 break;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004268 }
4269 if (!sign_extension) {
4270 switch (op->bytes) {
4271 case 1:
4272 op->val &= 0xff;
4273 break;
4274 case 2:
4275 op->val &= 0xffff;
4276 break;
4277 case 4:
4278 op->val &= 0xffffffff;
4279 break;
4280 }
4281 }
4282done:
4283 return rc;
4284}
4285
Avi Kivitya99455492011-09-13 10:45:41 +03004286static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4287 unsigned d)
4288{
4289 int rc = X86EMUL_CONTINUE;
4290
4291 switch (d) {
4292 case OpReg:
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004293 decode_register_operand(ctxt, op);
Avi Kivitya99455492011-09-13 10:45:41 +03004294 break;
4295 case OpImmUByte:
Avi Kivity608aabe2011-09-13 10:45:45 +03004296 rc = decode_imm(ctxt, op, 1, false);
Avi Kivitya99455492011-09-13 10:45:41 +03004297 break;
4298 case OpMem:
Avi Kivity41ddf972011-09-13 10:45:48 +03004299 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity0fe59122011-09-13 10:45:47 +03004300 mem_common:
Avi Kivitya99455492011-09-13 10:45:41 +03004301 *op = ctxt->memop;
4302 ctxt->memopp = op;
Paolo Bonzini96888972014-04-01 14:54:19 +02004303 if (ctxt->d & BitOp)
Avi Kivitya99455492011-09-13 10:45:41 +03004304 fetch_bit_operand(ctxt);
4305 op->orig_val = op->val;
4306 break;
Avi Kivity41ddf972011-09-13 10:45:48 +03004307 case OpMem64:
Nadav Amitaaa05f22014-06-02 18:34:10 +03004308 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
Avi Kivity41ddf972011-09-13 10:45:48 +03004309 goto mem_common;
Avi Kivitya99455492011-09-13 10:45:41 +03004310 case OpAcc:
4311 op->type = OP_REG;
4312 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004313 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
Avi Kivitya99455492011-09-13 10:45:41 +03004314 fetch_register_operand(op);
4315 op->orig_val = op->val;
4316 break;
Avi Kivity820207c2013-02-09 11:31:45 +02004317 case OpAccLo:
4318 op->type = OP_REG;
4319 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4320 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4321 fetch_register_operand(op);
4322 op->orig_val = op->val;
4323 break;
4324 case OpAccHi:
4325 if (ctxt->d & ByteOp) {
4326 op->type = OP_NONE;
4327 break;
4328 }
4329 op->type = OP_REG;
4330 op->bytes = ctxt->op_bytes;
4331 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4332 fetch_register_operand(op);
4333 op->orig_val = op->val;
4334 break;
Avi Kivitya99455492011-09-13 10:45:41 +03004335 case OpDI:
4336 op->type = OP_MEM;
4337 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4338 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004339 register_address(ctxt, VCPU_REGS_RDI);
Avi Kivitya99455492011-09-13 10:45:41 +03004340 op->addr.mem.seg = VCPU_SREG_ES;
4341 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004342 op->count = 1;
Avi Kivitya99455492011-09-13 10:45:41 +03004343 break;
4344 case OpDX:
4345 op->type = OP_REG;
4346 op->bytes = 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004347 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivitya99455492011-09-13 10:45:41 +03004348 fetch_register_operand(op);
4349 break;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004350 case OpCL:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004351 op->type = OP_IMM;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004352 op->bytes = 1;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004353 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004354 break;
4355 case OpImmByte:
4356 rc = decode_imm(ctxt, op, 1, true);
4357 break;
4358 case OpOne:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004359 op->type = OP_IMM;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004360 op->bytes = 1;
4361 op->val = 1;
4362 break;
4363 case OpImm:
4364 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4365 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004366 case OpImm64:
4367 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4368 break;
Avi Kivity28867ce2012-01-16 15:08:44 +02004369 case OpMem8:
4370 ctxt->memop.bytes = 1;
Gleb Natapov660696d2013-04-24 13:38:36 +03004371 if (ctxt->memop.type == OP_REG) {
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02004372 ctxt->memop.addr.reg = decode_register(ctxt,
4373 ctxt->modrm_rm, true);
Gleb Natapov660696d2013-04-24 13:38:36 +03004374 fetch_register_operand(&ctxt->memop);
4375 }
Avi Kivity28867ce2012-01-16 15:08:44 +02004376 goto mem_common;
Avi Kivity0fe59122011-09-13 10:45:47 +03004377 case OpMem16:
4378 ctxt->memop.bytes = 2;
4379 goto mem_common;
4380 case OpMem32:
4381 ctxt->memop.bytes = 4;
4382 goto mem_common;
4383 case OpImmU16:
4384 rc = decode_imm(ctxt, op, 2, false);
4385 break;
4386 case OpImmU:
4387 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4388 break;
4389 case OpSI:
4390 op->type = OP_MEM;
4391 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4392 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004393 register_address(ctxt, VCPU_REGS_RSI);
Bandan Das573e80f2014-04-16 12:46:13 -04004394 op->addr.mem.seg = ctxt->seg_override;
Avi Kivity0fe59122011-09-13 10:45:47 +03004395 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004396 op->count = 1;
Avi Kivity0fe59122011-09-13 10:45:47 +03004397 break;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004398 case OpXLat:
4399 op->type = OP_MEM;
4400 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4401 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004402 address_mask(ctxt,
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004403 reg_read(ctxt, VCPU_REGS_RBX) +
4404 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
Bandan Das573e80f2014-04-16 12:46:13 -04004405 op->addr.mem.seg = ctxt->seg_override;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004406 op->val = 0;
4407 break;
Avi Kivity0fe59122011-09-13 10:45:47 +03004408 case OpImmFAddr:
4409 op->type = OP_IMM;
4410 op->addr.mem.ea = ctxt->_eip;
4411 op->bytes = ctxt->op_bytes + 2;
4412 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4413 break;
4414 case OpMemFAddr:
4415 ctxt->memop.bytes = ctxt->op_bytes + 2;
4416 goto mem_common;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004417 case OpES:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004418 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004419 op->val = VCPU_SREG_ES;
4420 break;
4421 case OpCS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004422 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004423 op->val = VCPU_SREG_CS;
4424 break;
4425 case OpSS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004426 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004427 op->val = VCPU_SREG_SS;
4428 break;
4429 case OpDS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004430 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004431 op->val = VCPU_SREG_DS;
4432 break;
4433 case OpFS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004434 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004435 op->val = VCPU_SREG_FS;
4436 break;
4437 case OpGS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004438 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004439 op->val = VCPU_SREG_GS;
4440 break;
Avi Kivitya99455492011-09-13 10:45:41 +03004441 case OpImplicit:
4442 /* Special instructions do their own operand decoding. */
4443 default:
4444 op->type = OP_NONE; /* Disable writeback. */
4445 break;
4446 }
4447
4448done:
4449 return rc;
4450}
4451
Takuya Yoshikawaef5d75c2011-05-15 00:57:43 +09004452int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004453{
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004454 int rc = X86EMUL_CONTINUE;
4455 int mode = ctxt->mode;
Avi Kivity46561642011-04-24 14:09:59 +03004456 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004457 bool op_prefix = false;
Bandan Das573e80f2014-04-16 12:46:13 -04004458 bool has_seg_override = false;
Avi Kivity46561642011-04-24 14:09:59 +03004459 struct opcode opcode;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004460
Avi Kivityf09ed832011-09-13 10:45:40 +03004461 ctxt->memop.type = OP_NONE;
4462 ctxt->memopp = NULL;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004463 ctxt->_eip = ctxt->eip;
Paolo Bonzini17052f12014-05-06 16:33:01 +02004464 ctxt->fetch.ptr = ctxt->fetch.data;
4465 ctxt->fetch.end = ctxt->fetch.data + insn_len;
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004466 ctxt->opcode_len = 1;
Andre Przywaradc25e892010-12-21 11:12:07 +01004467 if (insn_len > 0)
Avi Kivity9dac77f2011-06-01 15:34:25 +03004468 memcpy(ctxt->fetch.data, insn, insn_len);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004469 else {
Paolo Bonzini9506d572014-05-06 13:05:25 +02004470 rc = __do_insn_fetch_bytes(ctxt, 1);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004471 if (rc != X86EMUL_CONTINUE)
4472 return rc;
4473 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004474
4475 switch (mode) {
4476 case X86EMUL_MODE_REAL:
4477 case X86EMUL_MODE_VM86:
4478 case X86EMUL_MODE_PROT16:
4479 def_op_bytes = def_ad_bytes = 2;
4480 break;
4481 case X86EMUL_MODE_PROT32:
4482 def_op_bytes = def_ad_bytes = 4;
4483 break;
4484#ifdef CONFIG_X86_64
4485 case X86EMUL_MODE_PROT64:
4486 def_op_bytes = 4;
4487 def_ad_bytes = 8;
4488 break;
4489#endif
4490 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004491 return EMULATION_FAILED;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004492 }
4493
Avi Kivity9dac77f2011-06-01 15:34:25 +03004494 ctxt->op_bytes = def_op_bytes;
4495 ctxt->ad_bytes = def_ad_bytes;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004496
4497 /* Legacy prefixes. */
4498 for (;;) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004499 switch (ctxt->b = insn_fetch(u8, ctxt)) {
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004500 case 0x66: /* operand-size override */
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004501 op_prefix = true;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004502 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004503 ctxt->op_bytes = def_op_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004504 break;
4505 case 0x67: /* address-size override */
4506 if (mode == X86EMUL_MODE_PROT64)
4507 /* switch between 4/8 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004508 ctxt->ad_bytes = def_ad_bytes ^ 12;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004509 else
4510 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004511 ctxt->ad_bytes = def_ad_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004512 break;
4513 case 0x26: /* ES override */
4514 case 0x2e: /* CS override */
4515 case 0x36: /* SS override */
4516 case 0x3e: /* DS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004517 has_seg_override = true;
4518 ctxt->seg_override = (ctxt->b >> 3) & 3;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004519 break;
4520 case 0x64: /* FS override */
4521 case 0x65: /* GS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004522 has_seg_override = true;
4523 ctxt->seg_override = ctxt->b & 7;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004524 break;
4525 case 0x40 ... 0x4f: /* REX */
4526 if (mode != X86EMUL_MODE_PROT64)
4527 goto done_prefixes;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004528 ctxt->rex_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004529 continue;
4530 case 0xf0: /* LOCK */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004531 ctxt->lock_prefix = 1;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004532 break;
4533 case 0xf2: /* REPNE/REPNZ */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004534 case 0xf3: /* REP/REPE/REPZ */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004535 ctxt->rep_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004536 break;
4537 default:
4538 goto done_prefixes;
4539 }
4540
4541 /* Any legacy prefix after a REX prefix nullifies its effect. */
4542
Avi Kivity9dac77f2011-06-01 15:34:25 +03004543 ctxt->rex_prefix = 0;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004544 }
4545
4546done_prefixes:
4547
4548 /* REX prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004549 if (ctxt->rex_prefix & 8)
4550 ctxt->op_bytes = 8; /* REX.W */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004551
4552 /* Opcode byte(s). */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004553 opcode = opcode_table[ctxt->b];
Wei Yongjund3ad6242010-08-05 16:34:39 +08004554 /* Two-byte opcode? */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004555 if (ctxt->b == 0x0f) {
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004556 ctxt->opcode_len = 2;
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004557 ctxt->b = insn_fetch(u8, ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03004558 opcode = twobyte_table[ctxt->b];
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004559
4560 /* 0F_38 opcode map */
4561 if (ctxt->b == 0x38) {
4562 ctxt->opcode_len = 3;
4563 ctxt->b = insn_fetch(u8, ctxt);
4564 opcode = opcode_map_0f_38[ctxt->b];
4565 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004566 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004567 ctxt->d = opcode.flags;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004568
Takuya Yoshikawa9f4260e2012-04-30 17:48:25 +09004569 if (ctxt->d & ModRM)
4570 ctxt->modrm = insn_fetch(u8, ctxt);
4571
Nadav Amit7fe864d2014-06-02 18:34:03 +03004572 /* vex-prefix instructions are not implemented */
4573 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
Nadav Amitd14cb5d2014-11-02 11:54:58 +02004574 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
Nadav Amit7fe864d2014-06-02 18:34:03 +03004575 ctxt->d = NotImpl;
4576 }
4577
Avi Kivity9dac77f2011-06-01 15:34:25 +03004578 while (ctxt->d & GroupMask) {
4579 switch (ctxt->d & GroupMask) {
Avi Kivity46561642011-04-24 14:09:59 +03004580 case Group:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004581 goffset = (ctxt->modrm >> 3) & 7;
Avi Kivity46561642011-04-24 14:09:59 +03004582 opcode = opcode.u.group[goffset];
4583 break;
4584 case GroupDual:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004585 goffset = (ctxt->modrm >> 3) & 7;
4586 if ((ctxt->modrm >> 6) == 3)
Avi Kivity46561642011-04-24 14:09:59 +03004587 opcode = opcode.u.gdual->mod3[goffset];
4588 else
4589 opcode = opcode.u.gdual->mod012[goffset];
4590 break;
4591 case RMExt:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004592 goffset = ctxt->modrm & 7;
Joerg Roedel01de8b02011-04-04 12:39:31 +02004593 opcode = opcode.u.group[goffset];
Avi Kivity46561642011-04-24 14:09:59 +03004594 break;
4595 case Prefix:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004596 if (ctxt->rep_prefix && op_prefix)
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004597 return EMULATION_FAILED;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004598 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
Avi Kivity46561642011-04-24 14:09:59 +03004599 switch (simd_prefix) {
4600 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4601 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4602 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4603 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4604 }
4605 break;
Gleb Natapov045a2822012-12-20 16:57:43 +02004606 case Escape:
4607 if (ctxt->modrm > 0xbf)
4608 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4609 else
4610 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4611 break;
Nadav Amit39f062f2014-11-26 15:47:18 +02004612 case InstrDual:
4613 if ((ctxt->modrm >> 6) == 3)
4614 opcode = opcode.u.idual->mod3;
4615 else
4616 opcode = opcode.u.idual->mod012;
4617 break;
Avi Kivity46561642011-04-24 14:09:59 +03004618 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004619 return EMULATION_FAILED;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004620 }
Avi Kivity46561642011-04-24 14:09:59 +03004621
Avi Kivityb1ea50b2011-09-13 10:45:42 +03004622 ctxt->d &= ~(u64)GroupMask;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004623 ctxt->d |= opcode.flags;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004624 }
4625
Paolo Bonzinie24186e2014-03-27 12:00:57 +01004626 /* Unrecognised? */
4627 if (ctxt->d == 0)
4628 return EMULATION_FAILED;
4629
Avi Kivity9dac77f2011-06-01 15:34:25 +03004630 ctxt->execute = opcode.u.execute;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004631
Nadav Amit3a6095a2014-08-13 16:50:13 +03004632 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4633 return EMULATION_FAILED;
4634
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004635 if (unlikely(ctxt->d &
Nadav Amited9aad22014-11-02 11:55:00 +02004636 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4637 No16))) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004638 /*
4639 * These are copied unconditionally here, and checked unconditionally
4640 * in x86_emulate_insn.
4641 */
4642 ctxt->check_perm = opcode.check_perm;
4643 ctxt->intercept = opcode.intercept;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004644
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004645 if (ctxt->d & NotImpl)
4646 return EMULATION_FAILED;
Avi Kivityd8671622011-02-01 16:32:03 +02004647
Nadav Amit58b70752014-10-24 11:35:09 +03004648 if (mode == X86EMUL_MODE_PROT64) {
4649 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4650 ctxt->op_bytes = 8;
4651 else if (ctxt->d & NearBranch)
4652 ctxt->op_bytes = 8;
4653 }
Avi Kivity7f9b4b72010-08-01 14:46:54 +03004654
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004655 if (ctxt->d & Op3264) {
4656 if (mode == X86EMUL_MODE_PROT64)
4657 ctxt->op_bytes = 8;
4658 else
4659 ctxt->op_bytes = 4;
4660 }
4661
Nadav Amited9aad22014-11-02 11:55:00 +02004662 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4663 ctxt->op_bytes = 4;
4664
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004665 if (ctxt->d & Sse)
4666 ctxt->op_bytes = 16;
4667 else if (ctxt->d & Mmx)
4668 ctxt->op_bytes = 8;
4669 }
Avi Kivity1253791d2011-03-29 11:41:27 +02004670
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004671 /* ModRM and SIB bytes. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004672 if (ctxt->d & ModRM) {
Avi Kivityf09ed832011-09-13 10:45:40 +03004673 rc = decode_modrm(ctxt, &ctxt->memop);
Bandan Das573e80f2014-04-16 12:46:13 -04004674 if (!has_seg_override) {
4675 has_seg_override = true;
4676 ctxt->seg_override = ctxt->modrm_seg;
4677 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004678 } else if (ctxt->d & MemAbs)
Avi Kivityf09ed832011-09-13 10:45:40 +03004679 rc = decode_abs(ctxt, &ctxt->memop);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004680 if (rc != X86EMUL_CONTINUE)
4681 goto done;
4682
Bandan Das573e80f2014-04-16 12:46:13 -04004683 if (!has_seg_override)
4684 ctxt->seg_override = VCPU_SREG_DS;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004685
Bandan Das573e80f2014-04-16 12:46:13 -04004686 ctxt->memop.addr.mem.seg = ctxt->seg_override;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004687
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004688 /*
4689 * Decode and fetch the source operand: register, memory
4690 * or immediate.
4691 */
Avi Kivity0fe59122011-09-13 10:45:47 +03004692 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004693 if (rc != X86EMUL_CONTINUE)
4694 goto done;
4695
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004696 /*
4697 * Decode and fetch the second source operand: register, memory
4698 * or immediate.
4699 */
Avi Kivity4dd6a572011-09-13 10:45:43 +03004700 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004701 if (rc != X86EMUL_CONTINUE)
4702 goto done;
4703
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004704 /* Decode and fetch the destination operand: register or memory. */
Avi Kivitya99455492011-09-13 10:45:41 +03004705 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004706
Bandan Das41061cd2014-04-16 12:46:14 -04004707 if (ctxt->rip_relative)
Nadav Amit1c1c35a2014-11-19 17:43:09 +02004708 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
4709 ctxt->memopp->addr.mem.ea + ctxt->_eip);
Avi Kivitycb16c342011-06-19 19:21:11 +03004710
Paolo Bonzinia430c912014-10-23 14:54:14 +02004711done:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004712 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004713}
4714
Xiao Guangrong1cb3f3a2011-09-22 17:02:48 +08004715bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4716{
4717 return ctxt->d & PageTable;
4718}
4719
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004720static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4721{
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004722 /* The second termination condition only applies for REPE
4723 * and REPNE. Test if the repeat string operation prefix is
4724 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4725 * corresponding termination condition according to:
4726 * - if REPE/REPZ and ZF = 0 then done
4727 * - if REPNE/REPNZ and ZF = 1 then done
4728 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004729 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4730 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4731 && (((ctxt->rep_prefix == REPE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004732 ((ctxt->eflags & EFLG_ZF) == 0))
Avi Kivity9dac77f2011-06-01 15:34:25 +03004733 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004734 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4735 return true;
4736
4737 return false;
4738}
4739
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004740static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4741{
4742 bool fault = false;
4743
4744 ctxt->ops->get_fpu(ctxt);
4745 asm volatile("1: fwait \n\t"
4746 "2: \n\t"
4747 ".pushsection .fixup,\"ax\" \n\t"
4748 "3: \n\t"
4749 "movb $1, %[fault] \n\t"
4750 "jmp 2b \n\t"
4751 ".popsection \n\t"
4752 _ASM_EXTABLE(1b, 3b)
Avi Kivity38e8a2d2012-04-22 15:12:50 +03004753 : [fault]"+qm"(fault));
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004754 ctxt->ops->put_fpu(ctxt);
4755
4756 if (unlikely(fault))
4757 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4758
4759 return X86EMUL_CONTINUE;
4760}
4761
4762static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4763 struct operand *op)
4764{
4765 if (op->type == OP_MM)
4766 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4767}
4768
Avi Kivitye28bbd42013-01-04 16:18:48 +02004769static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4770{
4771 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivityb9fa4092013-02-09 11:31:48 +02004772 if (!(ctxt->d & ByteOp))
4773 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
Avi Kivitye28bbd42013-01-04 16:18:48 +02004774 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004775 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4776 [fastop]"+S"(fop)
4777 : "c"(ctxt->src2.val));
Avi Kivitye28bbd42013-01-04 16:18:48 +02004778 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004779 if (!fop) /* exception is returned in fop variable */
4780 return emulate_de(ctxt);
Avi Kivitye28bbd42013-01-04 16:18:48 +02004781 return X86EMUL_CONTINUE;
4782}
Avi Kivitydd856ef2012-08-27 23:46:17 +03004783
Bandan Das14985072014-04-16 12:46:09 -04004784void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4785{
Bandan Das573e80f2014-04-16 12:46:13 -04004786 memset(&ctxt->rip_relative, 0,
4787 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
Bandan Das14985072014-04-16 12:46:09 -04004788
Bandan Das14985072014-04-16 12:46:09 -04004789 ctxt->io_read.pos = 0;
4790 ctxt->io_read.end = 0;
Bandan Das14985072014-04-16 12:46:09 -04004791 ctxt->mem_read.end = 0;
4792}
4793
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09004794int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004795{
Mathias Krause0225fb52012-08-30 01:30:16 +02004796 const struct x86_emulate_ops *ops = ctxt->ops;
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09004797 int rc = X86EMUL_CONTINUE;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004798 int saved_dst_type = ctxt->dst.type;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004799
Avi Kivity9dac77f2011-06-01 15:34:25 +03004800 ctxt->mem_read.pos = 0;
Glauber Costa310b5d32009-05-12 16:21:06 -04004801
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004802 /* LOCK prefix is allowed only with some instructions */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004803 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004804 rc = emulate_ud(ctxt);
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004805 goto done;
4806 }
4807
Avi Kivity9dac77f2011-06-01 15:34:25 +03004808 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004809 rc = emulate_ud(ctxt);
Avi Kivity081bca02010-08-26 11:06:15 +03004810 goto done;
4811 }
4812
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004813 if (unlikely(ctxt->d &
4814 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4815 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4816 (ctxt->d & Undefined)) {
4817 rc = emulate_ud(ctxt);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004818 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004819 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004820
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004821 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4822 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4823 rc = emulate_ud(ctxt);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004824 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004825 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004826
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004827 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4828 rc = emulate_nm(ctxt);
Joerg Roedeld09beab2011-04-04 12:39:25 +02004829 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004830 }
Joerg Roedeld09beab2011-04-04 12:39:25 +02004831
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004832 if (ctxt->d & Mmx) {
4833 rc = flush_pending_x87_faults(ctxt);
4834 if (rc != X86EMUL_CONTINUE)
4835 goto done;
4836 /*
4837 * Now that we know the fpu is exception safe, we can fetch
4838 * operands from it.
4839 */
4840 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4841 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4842 if (!(ctxt->d & Mov))
4843 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4844 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004845
Bandan Das685bbf42014-04-16 12:46:10 -04004846 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004847 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4848 X86_ICPT_PRE_EXCEPT);
4849 if (rc != X86EMUL_CONTINUE)
4850 goto done;
4851 }
4852
Nadav Amit64a38292014-12-10 11:19:04 +02004853 /* Instruction can only be executed in protected mode */
4854 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4855 rc = emulate_ud(ctxt);
4856 goto done;
4857 }
4858
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004859 /* Privileged instruction can be executed only in CPL=0 */
4860 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
Nadav Amit68efa762014-06-18 17:19:35 +03004861 if (ctxt->d & PrivUD)
4862 rc = emulate_ud(ctxt);
4863 else
4864 rc = emulate_gp(ctxt, 0);
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004865 goto done;
4866 }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004867
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004868 /* Do instruction specific permission checks */
Bandan Das685bbf42014-04-16 12:46:10 -04004869 if (ctxt->d & CheckPerm) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004870 rc = ctxt->check_perm(ctxt);
4871 if (rc != X86EMUL_CONTINUE)
4872 goto done;
4873 }
4874
Bandan Das685bbf42014-04-16 12:46:10 -04004875 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004876 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4877 X86_ICPT_POST_EXCEPT);
4878 if (rc != X86EMUL_CONTINUE)
4879 goto done;
4880 }
4881
4882 if (ctxt->rep_prefix && (ctxt->d & String)) {
4883 /* All REP prefixes have the same first termination condition */
4884 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4885 ctxt->eip = ctxt->_eip;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004886 ctxt->eflags &= ~EFLG_RF;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004887 goto done;
4888 }
4889 }
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004890 }
4891
Avi Kivity9dac77f2011-06-01 15:34:25 +03004892 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4893 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4894 ctxt->src.valptr, ctxt->src.bytes);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09004895 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004896 goto done;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004897 ctxt->src.orig_val64 = ctxt->src.val64;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004898 }
4899
Avi Kivity9dac77f2011-06-01 15:34:25 +03004900 if (ctxt->src2.type == OP_MEM) {
4901 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4902 &ctxt->src2.val, ctxt->src2.bytes);
Gleb Natapove35b7b92010-02-25 16:36:42 +02004903 if (rc != X86EMUL_CONTINUE)
4904 goto done;
4905 }
4906
Avi Kivity9dac77f2011-06-01 15:34:25 +03004907 if ((ctxt->d & DstMask) == ImplicitOps)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004908 goto special_insn;
4909
4910
Avi Kivity9dac77f2011-06-01 15:34:25 +03004911 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004912 /* optimisation - avoid slow emulated read if Mov */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004913 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4914 &ctxt->dst.val, ctxt->dst.bytes);
Nadav Amitc205fb72014-12-25 02:52:16 +02004915 if (rc != X86EMUL_CONTINUE) {
4916 if (rc == X86EMUL_PROPAGATE_FAULT &&
4917 ctxt->exception.vector == PF_VECTOR)
4918 ctxt->exception.error_code |= PFERR_WRITE_MASK;
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004919 goto done;
Nadav Amitc205fb72014-12-25 02:52:16 +02004920 }
Avi Kivity038e51d2007-01-22 20:40:40 -08004921 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004922 ctxt->dst.orig_val = ctxt->dst.val;
Avi Kivity038e51d2007-01-22 20:40:40 -08004923
Avi Kivity018a98d2007-11-27 19:30:56 +02004924special_insn:
4925
Bandan Das685bbf42014-04-16 12:46:10 -04004926 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03004927 rc = emulator_check_intercept(ctxt, ctxt->intercept,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004928 X86_ICPT_POST_MEMACCESS);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004929 if (rc != X86EMUL_CONTINUE)
4930 goto done;
4931 }
4932
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03004933 if (ctxt->rep_prefix && (ctxt->d & String))
4934 ctxt->eflags |= EFLG_RF;
4935 else
4936 ctxt->eflags &= ~EFLG_RF;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004937
Avi Kivity9dac77f2011-06-01 15:34:25 +03004938 if (ctxt->execute) {
Avi Kivitye28bbd42013-01-04 16:18:48 +02004939 if (ctxt->d & Fastop) {
4940 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4941 rc = fastop(ctxt, fop);
4942 if (rc != X86EMUL_CONTINUE)
4943 goto done;
4944 goto writeback;
4945 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004946 rc = ctxt->execute(ctxt);
Avi Kivityef65c882010-07-29 15:11:51 +03004947 if (rc != X86EMUL_CONTINUE)
4948 goto done;
4949 goto writeback;
4950 }
4951
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004952 if (ctxt->opcode_len == 2)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004953 goto twobyte_insn;
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004954 else if (ctxt->opcode_len == 3)
4955 goto threebyte_insn;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004956
Avi Kivity9dac77f2011-06-01 15:34:25 +03004957 switch (ctxt->b) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08004958 case 0x63: /* movsxd */
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004959 if (ctxt->mode != X86EMUL_MODE_PROT64)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004960 goto cannot_emulate;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004961 ctxt->dst.val = (s32) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004962 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03004963 case 0x70 ... 0x7f: /* jcc (short) */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004964 if (test_cc(ctxt->b, ctxt->eflags))
Nadav Amit234f3ce2014-09-18 22:39:38 +03004965 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02004966 break;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03004967 case 0x8d: /* lea r16/r32, m */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004968 ctxt->dst.val = ctxt->src.addr.mem.ea;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03004969 break;
Avi Kivity3d9e77d2010-08-01 12:41:59 +03004970 case 0x90 ... 0x97: /* nop / xchg reg, rax */
Avi Kivitydd856ef2012-08-27 23:46:17 +03004971 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
Nadav Amita825f5c2014-06-15 16:13:01 +03004972 ctxt->dst.type = OP_NONE;
4973 else
4974 rc = em_xchg(ctxt);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09004975 break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08004976 case 0x98: /* cbw/cwde/cdqe */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004977 switch (ctxt->op_bytes) {
4978 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4979 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4980 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08004981 }
4982 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004983 case 0xcc: /* int3 */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09004984 rc = emulate_int(ctxt, 3);
4985 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004986 case 0xcd: /* int n */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004987 rc = emulate_int(ctxt, ctxt->src.val);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004988 break;
4989 case 0xce: /* into */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09004990 if (ctxt->eflags & EFLG_OF)
4991 rc = emulate_int(ctxt, 4);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004992 break;
Nitin A Kamble1a52e052007-09-18 16:34:25 -07004993 case 0xe9: /* jmp rel */
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004994 case 0xeb: /* jmp rel short */
Nadav Amit234f3ce2014-09-18 22:39:38 +03004995 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity9dac77f2011-06-01 15:34:25 +03004996 ctxt->dst.type = OP_NONE; /* Disable writeback. */
Nitin A Kamble1a52e052007-09-18 16:34:25 -07004997 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02004998 case 0xf4: /* hlt */
Avi Kivity6c3287f2011-04-20 15:43:05 +03004999 ctxt->ops->halt(ctxt);
Mohammed Gamal19fdfa02008-07-06 16:51:26 +03005000 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02005001 case 0xf5: /* cmc */
5002 /* complement carry flag from eflags reg */
5003 ctxt->eflags ^= EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02005004 break;
5005 case 0xf8: /* clc */
5006 ctxt->eflags &= ~EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02005007 break;
Mohammed Gamal8744aa92010-08-05 15:42:49 +03005008 case 0xf9: /* stc */
5009 ctxt->eflags |= EFLG_CF;
5010 break;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03005011 case 0xfc: /* cld */
5012 ctxt->eflags &= ~EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03005013 break;
5014 case 0xfd: /* std */
5015 ctxt->eflags |= EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03005016 break;
Avi Kivity91269b82010-07-25 14:51:16 +03005017 default:
5018 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005019 }
Avi Kivity018a98d2007-11-27 19:30:56 +02005020
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005021 if (rc != X86EMUL_CONTINUE)
5022 goto done;
5023
Avi Kivity018a98d2007-11-27 19:30:56 +02005024writeback:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02005025 if (ctxt->d & SrcWrite) {
5026 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5027 rc = writeback(ctxt, &ctxt->src);
5028 if (rc != X86EMUL_CONTINUE)
5029 goto done;
5030 }
Nadav Amitee212292014-06-15 16:12:58 +03005031 if (!(ctxt->d & NoWrite)) {
5032 rc = writeback(ctxt, &ctxt->dst);
5033 if (rc != X86EMUL_CONTINUE)
5034 goto done;
5035 }
Avi Kivity018a98d2007-11-27 19:30:56 +02005036
Gleb Natapov5cd21912010-03-18 15:20:26 +02005037 /*
5038 * restore dst type in case the decoding will be reused
5039 * (happens for string instruction )
5040 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005041 ctxt->dst.type = saved_dst_type;
Gleb Natapov5cd21912010-03-18 15:20:26 +02005042
Avi Kivity9dac77f2011-06-01 15:34:25 +03005043 if ((ctxt->d & SrcMask) == SrcSI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03005044 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
Gleb Natapova682e352010-03-18 15:20:21 +02005045
Avi Kivity9dac77f2011-06-01 15:34:25 +03005046 if ((ctxt->d & DstMask) == DstDI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03005047 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
Gleb Natapovd9271122010-03-18 15:20:22 +02005048
Avi Kivity9dac77f2011-06-01 15:34:25 +03005049 if (ctxt->rep_prefix && (ctxt->d & String)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03005050 unsigned int count;
Avi Kivity9dac77f2011-06-01 15:34:25 +03005051 struct read_cache *r = &ctxt->io_read;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03005052 if ((ctxt->d & SrcMask) == SrcSI)
5053 count = ctxt->src.count;
5054 else
5055 count = ctxt->dst.count;
Paolo Bonzini01485a22014-11-19 18:25:08 +01005056 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03005057
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005058 if (!string_insn_completed(ctxt)) {
5059 /*
5060 * Re-enter guest when pio read ahead buffer is empty
5061 * or, if it is not used, after each 1024 iteration.
5062 */
Avi Kivitydd856ef2012-08-27 23:46:17 +03005063 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005064 (r->end == 0 || r->end != r->pos)) {
5065 /*
5066 * Reset read cache. Usually happens before
5067 * decode, but since instruction is restarted
5068 * we have to do it here.
5069 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005070 ctxt->mem_read.end = 0;
Avi Kivitydd856ef2012-08-27 23:46:17 +03005071 writeback_registers(ctxt);
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005072 return EMULATION_RESTART;
5073 }
5074 goto done; /* skip rip writeback */
Avi Kivity0fa6ccb2010-08-17 11:22:17 +03005075 }
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03005076 ctxt->eflags &= ~EFLG_RF;
Gleb Natapov5cd21912010-03-18 15:20:26 +02005077 }
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005078
Avi Kivity9dac77f2011-06-01 15:34:25 +03005079 ctxt->eip = ctxt->_eip;
Avi Kivity018a98d2007-11-27 19:30:56 +02005080
5081done:
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02005082 if (rc == X86EMUL_PROPAGATE_FAULT) {
5083 WARN_ON(ctxt->exception.vector > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +02005084 ctxt->have_exception = true;
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02005085 }
Joerg Roedel775fde82011-04-04 12:39:24 +02005086 if (rc == X86EMUL_INTERCEPTED)
5087 return EMULATION_INTERCEPTED;
5088
Avi Kivitydd856ef2012-08-27 23:46:17 +03005089 if (rc == X86EMUL_CONTINUE)
5090 writeback_registers(ctxt);
5091
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005092 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005093
5094twobyte_insn:
Avi Kivity9dac77f2011-06-01 15:34:25 +03005095 switch (ctxt->b) {
Avi Kivity018a98d2007-11-27 19:30:56 +02005096 case 0x09: /* wbinvd */
Clemens Nosscfb22372011-04-21 21:16:05 +02005097 (ctxt->ops->wbinvd)(ctxt);
Sheng Yangf5f48ee2010-06-30 12:25:15 +08005098 break;
5099 case 0x08: /* invd */
Avi Kivity018a98d2007-11-27 19:30:56 +02005100 case 0x0d: /* GrpP (prefetch) */
5101 case 0x18: /* Grp16 (prefetch/nop) */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02005102 case 0x1f: /* nop */
Avi Kivity018a98d2007-11-27 19:30:56 +02005103 break;
5104 case 0x20: /* mov cr, reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005105 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
Avi Kivity018a98d2007-11-27 19:30:56 +02005106 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005107 case 0x21: /* mov from dr to reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005108 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005109 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005110 case 0x40 ... 0x4f: /* cmov */
Nadav Amit140bad82014-06-15 16:13:00 +03005111 if (test_cc(ctxt->b, ctxt->eflags))
5112 ctxt->dst.val = ctxt->src.val;
5113 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5114 ctxt->op_bytes != 4)
Avi Kivity9dac77f2011-06-01 15:34:25 +03005115 ctxt->dst.type = OP_NONE; /* no writeback */
Avi Kivity6aa8b732006-12-10 02:21:36 -08005116 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03005117 case 0x80 ... 0x8f: /* jnz rel, etc*/
Avi Kivity9dac77f2011-06-01 15:34:25 +03005118 if (test_cc(ctxt->b, ctxt->eflags))
Nadav Amit234f3ce2014-09-18 22:39:38 +03005119 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02005120 break;
Wei Yongjunee45b582010-08-06 17:10:07 +08005121 case 0x90 ... 0x9f: /* setcc r/m8 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005122 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
Wei Yongjunee45b582010-08-06 17:10:07 +08005123 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005124 case 0xb6 ... 0xb7: /* movzx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005125 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03005126 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
Avi Kivity9dac77f2011-06-01 15:34:25 +03005127 : (u16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005128 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005129 case 0xbe ... 0xbf: /* movsx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005130 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03005131 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
Avi Kivity9dac77f2011-06-01 15:34:25 +03005132 (s16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005133 break;
Avi Kivity91269b82010-07-25 14:51:16 +03005134 default:
5135 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005136 }
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005137
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01005138threebyte_insn:
5139
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005140 if (rc != X86EMUL_CONTINUE)
5141 goto done;
5142
Avi Kivity6aa8b732006-12-10 02:21:36 -08005143 goto writeback;
5144
5145cannot_emulate:
Gleb Natapova0c0ab22011-03-28 16:57:49 +02005146 return EMULATION_FAILED;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005147}
Avi Kivitydd856ef2012-08-27 23:46:17 +03005148
5149void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5150{
5151 invalidate_registers(ctxt);
5152}
5153
5154void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5155{
5156 writeback_registers(ctxt);
5157}