blob: fff11885a3a05551b75b16ac457d0f1b8a1bbae5 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/******************************************************************************
Avi Kivity56e82312009-08-12 15:04:37 +03002 * emulate.c
Avi Kivity6aa8b732006-12-10 02:21:36 -08003 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
Rusty Russelldcc07662007-07-17 23:16:56 +10009 * privileged instructions:
Avi Kivity6aa8b732006-12-10 02:21:36 -080010 *
11 * Copyright (C) 2006 Qumranet
Nicolas Kaiser9611c182010-10-06 14:23:22 +020012 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -080013 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
Avi Kivityedf88412007-12-16 11:02:48 +020023#include <linux/kvm_host.h>
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030024#include "kvm_cache_regs.h"
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/module.h>
Avi Kivity56e82312009-08-12 15:04:37 +030026#include <asm/kvm_emulate.h>
Avi Kivityb7d491e2013-01-04 16:18:49 +020027#include <linux/stringify.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028
Avi Kivity3eeb3282010-01-21 15:31:48 +020029#include "x86.h"
Gleb Natapov38ba30b2010-03-18 15:20:17 +020030#include "tss.h"
Andre Przywarae99f0502009-06-17 15:50:33 +020031
Avi Kivity6aa8b732006-12-10 02:21:36 -080032/*
Avi Kivitya99455492011-09-13 10:45:41 +030033 * Operand types
34 */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030035#define OpNone 0ull
36#define OpImplicit 1ull /* No generic decode */
37#define OpReg 2ull /* Register */
38#define OpMem 3ull /* Memory */
39#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40#define OpDI 5ull /* ES:DI/EDI/RDI */
41#define OpMem64 6ull /* Memory, 64-bit */
42#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43#define OpDX 8ull /* DX register */
Avi Kivity4dd6a572011-09-13 10:45:43 +030044#define OpCL 9ull /* CL register (for shifts) */
45#define OpImmByte 10ull /* 8-bit sign extended immediate */
46#define OpOne 11ull /* Implied 1 */
Nadav Amit5e2c6882012-12-06 21:55:10 -020047#define OpImm 12ull /* Sign extended up to 32-bit immediate */
Avi Kivity0fe59122011-09-13 10:45:47 +030048#define OpMem16 13ull /* Memory operand (16-bit). */
49#define OpMem32 14ull /* Memory operand (32-bit). */
50#define OpImmU 15ull /* Immediate operand, zero extended */
51#define OpSI 16ull /* SI/ESI/RSI */
52#define OpImmFAddr 17ull /* Immediate far address */
53#define OpMemFAddr 18ull /* Far address in memory */
54#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
Avi Kivityc191a7a2011-09-13 10:45:49 +030055#define OpES 20ull /* ES */
56#define OpCS 21ull /* CS */
57#define OpSS 22ull /* SS */
58#define OpDS 23ull /* DS */
59#define OpFS 24ull /* FS */
60#define OpGS 25ull /* GS */
Avi Kivity28867ce2012-01-16 15:08:44 +020061#define OpMem8 26ull /* 8-bit zero extended memory operand */
Nadav Amit5e2c6882012-12-06 21:55:10 -020062#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
Paolo Bonzini7fa57952013-05-09 11:32:50 +020063#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
Avi Kivity820207c2013-02-09 11:31:45 +020064#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
Avi Kivitya99455492011-09-13 10:45:41 +030066
Avi Kivity0fe59122011-09-13 10:45:47 +030067#define OpBits 5 /* Width of operand field */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030068#define OpMask ((1ull << OpBits) - 1)
Avi Kivitya99455492011-09-13 10:45:41 +030069
70/*
Avi Kivity6aa8b732006-12-10 02:21:36 -080071 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
76 * not be handled.
77 */
78
79/* Operand sizes: 8-bit operands or specified/overridden size. */
Avi Kivityab85b12b2010-07-29 15:11:49 +030080#define ByteOp (1<<0) /* 8-bit operands. */
Avi Kivity6aa8b732006-12-10 02:21:36 -080081/* Destination operand type. */
Avi Kivitya99455492011-09-13 10:45:41 +030082#define DstShift 1
83#define ImplicitOps (OpImplicit << DstShift)
84#define DstReg (OpReg << DstShift)
85#define DstMem (OpMem << DstShift)
86#define DstAcc (OpAcc << DstShift)
87#define DstDI (OpDI << DstShift)
88#define DstMem64 (OpMem64 << DstShift)
Nadav Amit16bebef2014-12-25 02:52:18 +020089#define DstMem16 (OpMem16 << DstShift)
Avi Kivitya99455492011-09-13 10:45:41 +030090#define DstImmUByte (OpImmUByte << DstShift)
91#define DstDX (OpDX << DstShift)
Avi Kivity820207c2013-02-09 11:31:45 +020092#define DstAccLo (OpAccLo << DstShift)
Avi Kivitya99455492011-09-13 10:45:41 +030093#define DstMask (OpMask << DstShift)
Avi Kivity6aa8b732006-12-10 02:21:36 -080094/* Source operand type. */
Avi Kivity0fe59122011-09-13 10:45:47 +030095#define SrcShift 6
96#define SrcNone (OpNone << SrcShift)
97#define SrcReg (OpReg << SrcShift)
98#define SrcMem (OpMem << SrcShift)
99#define SrcMem16 (OpMem16 << SrcShift)
100#define SrcMem32 (OpMem32 << SrcShift)
101#define SrcImm (OpImm << SrcShift)
102#define SrcImmByte (OpImmByte << SrcShift)
103#define SrcOne (OpOne << SrcShift)
104#define SrcImmUByte (OpImmUByte << SrcShift)
105#define SrcImmU (OpImmU << SrcShift)
106#define SrcSI (OpSI << SrcShift)
Paolo Bonzini7fa57952013-05-09 11:32:50 +0200107#define SrcXLat (OpXLat << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300108#define SrcImmFAddr (OpImmFAddr << SrcShift)
109#define SrcMemFAddr (OpMemFAddr << SrcShift)
110#define SrcAcc (OpAcc << SrcShift)
111#define SrcImmU16 (OpImmU16 << SrcShift)
Nadav Amit5e2c6882012-12-06 21:55:10 -0200112#define SrcImm64 (OpImm64 << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300113#define SrcDX (OpDX << SrcShift)
Avi Kivity28867ce2012-01-16 15:08:44 +0200114#define SrcMem8 (OpMem8 << SrcShift)
Avi Kivity820207c2013-02-09 11:31:45 +0200115#define SrcAccHi (OpAccHi << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300116#define SrcMask (OpMask << SrcShift)
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300117#define BitOp (1<<11)
118#define MemAbs (1<<12) /* Memory operand is absolute displacement */
119#define String (1<<13) /* String instruction (rep capable) */
120#define Stack (1<<14) /* Stack instruction (push/pop) */
121#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
122#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
123#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
124#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
125#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
Gleb Natapov045a2822012-12-20 16:57:43 +0200126#define Escape (5<<15) /* Escape to coprocessor instruction */
Nadav Amit39f062f2014-11-26 15:47:18 +0200127#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300128#define Sse (1<<18) /* SSE Vector instruction */
Avi Kivity20c29ff2011-09-13 10:45:44 +0300129/* Generic ModRM decode. */
130#define ModRM (1<<19)
131/* Destination is only written; never read. */
132#define Mov (1<<20)
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300133/* Misc flags */
Joerg Roedel8ea7d6a2011-04-04 12:39:26 +0200134#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
Borislav Petkovb51e9742013-09-22 16:44:52 +0200135#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
Avi Kivity5a506b12010-08-01 15:10:29 +0300136#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
Avi Kivity7f9b4b72010-08-01 14:46:54 +0300137#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
Avi Kivity047a4812010-07-26 14:37:47 +0300138#define Undefined (1<<25) /* No Such Instruction */
Gleb Natapovd380a5e2010-02-10 14:21:36 +0200139#define Lock (1<<26) /* lock prefix is allowed for the instruction */
Gleb Natapove92805a2010-02-10 14:21:35 +0200140#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300141#define No64 (1<<28)
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +0800142#define PageTable (1 << 29) /* instruction used to write page table */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300143#define NotImpl (1 << 30) /* instruction is not implemented */
Guillaume Thouvenin0dc8d102008-12-04 14:26:42 +0100144/* Source 2 operand type */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300145#define Src2Shift (31)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300146#define Src2None (OpNone << Src2Shift)
Avi Kivityab2c5ce2013-02-09 11:31:46 +0200147#define Src2Mem (OpMem << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300148#define Src2CL (OpCL << Src2Shift)
149#define Src2ImmByte (OpImmByte << Src2Shift)
150#define Src2One (OpOne << Src2Shift)
151#define Src2Imm (OpImm << Src2Shift)
Avi Kivityc191a7a2011-09-13 10:45:49 +0300152#define Src2ES (OpES << Src2Shift)
153#define Src2CS (OpCS << Src2Shift)
154#define Src2SS (OpSS << Src2Shift)
155#define Src2DS (OpDS << Src2Shift)
156#define Src2FS (OpFS << Src2Shift)
157#define Src2GS (OpGS << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300158#define Src2Mask (OpMask << Src2Shift)
Avi Kivitycbe2c9d2012-04-09 18:40:02 +0300159#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
Avi Kivity1c11b372012-04-09 18:39:59 +0300160#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
161#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
162#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
Avi Kivitye28bbd42013-01-04 16:18:48 +0200163#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
Avi Kivityb6744dc2013-01-04 16:18:50 +0200164#define NoWrite ((u64)1 << 45) /* No writeback */
Avi Kivityfb32b1e2013-02-09 11:31:44 +0200165#define SrcWrite ((u64)1 << 46) /* Write back src operand */
Nadav Amit9b88ae92014-05-25 23:05:21 +0300166#define NoMod ((u64)1 << 47) /* Mod field is ignored */
Paolo Bonzinid40a6892014-03-27 11:58:02 +0100167#define Intercept ((u64)1 << 48) /* Has valid intercept field */
168#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
Nadav Amit10e38fc2014-06-18 17:19:34 +0300169#define NoBigReal ((u64)1 << 50) /* No big real mode */
Nadav Amit68efa762014-06-18 17:19:35 +0300170#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
Nadav Amit58b70752014-10-24 11:35:09 +0300171#define NearBranch ((u64)1 << 52) /* Near branches */
Nadav Amited9aad22014-11-02 11:55:00 +0200172#define No16 ((u64)1 << 53) /* No 16 bit operand */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800173
Avi Kivity820207c2013-02-09 11:31:45 +0200174#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800175
Avi Kivityd0e53322010-07-29 15:11:54 +0300176#define X2(x...) x, x
177#define X3(x...) X2(x), x
178#define X4(x...) X2(x), X2(x)
179#define X5(x...) X4(x), x
180#define X6(x...) X4(x), X2(x)
181#define X7(x...) X4(x), X3(x)
182#define X8(x...) X4(x), X4(x)
183#define X16(x...) X8(x), X8(x)
Avi Kivity83babbc2010-07-26 14:37:39 +0300184
Avi Kivitye28bbd42013-01-04 16:18:48 +0200185#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
186#define FASTOP_SIZE 8
187
188/*
189 * fastop functions have a special calling convention:
190 *
Avi Kivity017da7b2013-02-09 11:31:47 +0200191 * dst: rax (in/out)
192 * src: rdx (in/out)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200193 * src2: rcx (in)
194 * flags: rflags (in/out)
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200195 * ex: rsi (in:fastop pointer, out:zero if exception)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200196 *
197 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
198 * different operand sizes can be reached by calculation, rather than a jump
199 * table (which would be bigger than the code).
200 *
201 * fastop functions are declared as taking a never-defined fastop parameter,
202 * so they can't be called from C directly.
203 */
204
205struct fastop;
206
Avi Kivityd65b1de2010-07-29 15:11:35 +0300207struct opcode {
Avi Kivityb1ea50b2011-09-13 10:45:42 +0300208 u64 flags : 56;
209 u64 intercept : 8;
Avi Kivity120df892010-07-29 15:11:39 +0300210 union {
Avi Kivityef65c882010-07-29 15:11:51 +0300211 int (*execute)(struct x86_emulate_ctxt *ctxt);
Mathias Krausefd0a0d82012-08-30 01:30:15 +0200212 const struct opcode *group;
213 const struct group_dual *gdual;
214 const struct gprefix *gprefix;
Gleb Natapov045a2822012-12-20 16:57:43 +0200215 const struct escape *esc;
Nadav Amit39f062f2014-11-26 15:47:18 +0200216 const struct instr_dual *idual;
Avi Kivitye28bbd42013-01-04 16:18:48 +0200217 void (*fastop)(struct fastop *fake);
Avi Kivity120df892010-07-29 15:11:39 +0300218 } u;
Joerg Roedeld09beab2011-04-04 12:39:25 +0200219 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
Avi Kivity120df892010-07-29 15:11:39 +0300220};
221
222struct group_dual {
223 struct opcode mod012[8];
224 struct opcode mod3[8];
Avi Kivityd65b1de2010-07-29 15:11:35 +0300225};
226
Avi Kivity0d7cdee2011-03-29 11:34:38 +0200227struct gprefix {
228 struct opcode pfx_no;
229 struct opcode pfx_66;
230 struct opcode pfx_f2;
231 struct opcode pfx_f3;
232};
233
Gleb Natapov045a2822012-12-20 16:57:43 +0200234struct escape {
235 struct opcode op[8];
236 struct opcode high[64];
237};
238
Nadav Amit39f062f2014-11-26 15:47:18 +0200239struct instr_dual {
240 struct opcode mod012;
241 struct opcode mod3;
242};
243
Avi Kivity6aa8b732006-12-10 02:21:36 -0800244/* EFLAGS bit definitions. */
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200245#define EFLG_ID (1<<21)
246#define EFLG_VIP (1<<20)
247#define EFLG_VIF (1<<19)
248#define EFLG_AC (1<<18)
Andre Przywarab1d86142009-06-17 15:50:32 +0200249#define EFLG_VM (1<<17)
250#define EFLG_RF (1<<16)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200251#define EFLG_IOPL (3<<12)
252#define EFLG_NT (1<<14)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800253#define EFLG_OF (1<<11)
254#define EFLG_DF (1<<10)
Andre Przywarab1d86142009-06-17 15:50:32 +0200255#define EFLG_IF (1<<9)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200256#define EFLG_TF (1<<8)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800257#define EFLG_SF (1<<7)
258#define EFLG_ZF (1<<6)
259#define EFLG_AF (1<<4)
260#define EFLG_PF (1<<2)
261#define EFLG_CF (1<<0)
262
Mohammed Gamal62bd4302010-07-28 12:38:40 +0300263#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
264#define EFLG_RESERVED_ONE_MASK 2
265
Avi Kivitydd856ef2012-08-27 23:46:17 +0300266static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
267{
268 if (!(ctxt->regs_valid & (1 << nr))) {
269 ctxt->regs_valid |= 1 << nr;
270 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
271 }
272 return ctxt->_regs[nr];
273}
274
275static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
276{
277 ctxt->regs_valid |= 1 << nr;
278 ctxt->regs_dirty |= 1 << nr;
279 return &ctxt->_regs[nr];
280}
281
282static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
283{
284 reg_read(ctxt, nr);
285 return reg_write(ctxt, nr);
286}
287
288static void writeback_registers(struct x86_emulate_ctxt *ctxt)
289{
290 unsigned reg;
291
292 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
293 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
294}
295
296static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
297{
298 ctxt->regs_dirty = 0;
299 ctxt->regs_valid = 0;
300}
301
Avi Kivity6aa8b732006-12-10 02:21:36 -0800302/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800303 * These EFLAGS bits are restored from saved value during emulation, and
304 * any changes are written back to the saved value after emulation.
305 */
306#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
307
Avi Kivitydda96d82008-11-26 15:14:10 +0200308#ifdef CONFIG_X86_64
309#define ON64(x) x
310#else
311#define ON64(x)
312#endif
313
Avi Kivity4d758342013-01-19 19:51:55 +0200314static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
315
Avi Kivityb7d491e2013-01-04 16:18:49 +0200316#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
317#define FOP_RET "ret \n\t"
318
319#define FOP_START(op) \
320 extern void em_##op(struct fastop *fake); \
321 asm(".pushsection .text, \"ax\" \n\t" \
322 ".global em_" #op " \n\t" \
323 FOP_ALIGN \
324 "em_" #op ": \n\t"
325
326#define FOP_END \
327 ".popsection")
328
Avi Kivity0bdea062013-01-19 19:51:50 +0200329#define FOPNOP() FOP_ALIGN FOP_RET
330
Avi Kivityb7d491e2013-01-04 16:18:49 +0200331#define FOP1E(op, dst) \
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200332 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
333
334#define FOP1EEX(op, dst) \
335 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
Avi Kivityb7d491e2013-01-04 16:18:49 +0200336
337#define FASTOP1(op) \
338 FOP_START(op) \
339 FOP1E(op##b, al) \
340 FOP1E(op##w, ax) \
341 FOP1E(op##l, eax) \
342 ON64(FOP1E(op##q, rax)) \
343 FOP_END
344
Avi Kivityb9fa4092013-02-09 11:31:48 +0200345/* 1-operand, using src2 (for MUL/DIV r/m) */
346#define FASTOP1SRC2(op, name) \
347 FOP_START(name) \
348 FOP1E(op, cl) \
349 FOP1E(op, cx) \
350 FOP1E(op, ecx) \
351 ON64(FOP1E(op, rcx)) \
352 FOP_END
353
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200354/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
355#define FASTOP1SRC2EX(op, name) \
356 FOP_START(name) \
357 FOP1EEX(op, cl) \
358 FOP1EEX(op, cx) \
359 FOP1EEX(op, ecx) \
360 ON64(FOP1EEX(op, rcx)) \
361 FOP_END
362
Avi Kivityf7857f32013-01-04 16:18:53 +0200363#define FOP2E(op, dst, src) \
364 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
365
366#define FASTOP2(op) \
367 FOP_START(op) \
Avi Kivity017da7b2013-02-09 11:31:47 +0200368 FOP2E(op##b, al, dl) \
369 FOP2E(op##w, ax, dx) \
370 FOP2E(op##l, eax, edx) \
371 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivityf7857f32013-01-04 16:18:53 +0200372 FOP_END
373
Avi Kivity11c363b2013-01-19 19:51:54 +0200374/* 2 operand, word only */
375#define FASTOP2W(op) \
376 FOP_START(op) \
377 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200378 FOP2E(op##w, ax, dx) \
379 FOP2E(op##l, eax, edx) \
380 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivity11c363b2013-01-19 19:51:54 +0200381 FOP_END
382
Avi Kivity007a3b52013-01-19 19:51:51 +0200383/* 2 operand, src is CL */
384#define FASTOP2CL(op) \
385 FOP_START(op) \
386 FOP2E(op##b, al, cl) \
387 FOP2E(op##w, ax, cl) \
388 FOP2E(op##l, eax, cl) \
389 ON64(FOP2E(op##q, rax, cl)) \
390 FOP_END
391
Nadav Amit5aca3722014-11-02 11:54:50 +0200392/* 2 operand, src and dest are reversed */
393#define FASTOP2R(op, name) \
394 FOP_START(name) \
395 FOP2E(op##b, dl, al) \
396 FOP2E(op##w, dx, ax) \
397 FOP2E(op##l, edx, eax) \
398 ON64(FOP2E(op##q, rdx, rax)) \
399 FOP_END
400
Avi Kivity0bdea062013-01-19 19:51:50 +0200401#define FOP3E(op, dst, src, src2) \
402 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
403
404/* 3-operand, word-only, src2=cl */
405#define FASTOP3WCL(op) \
406 FOP_START(op) \
407 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200408 FOP3E(op##w, ax, dx, cl) \
409 FOP3E(op##l, eax, edx, cl) \
410 ON64(FOP3E(op##q, rax, rdx, cl)) \
Avi Kivity0bdea062013-01-19 19:51:50 +0200411 FOP_END
412
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200413/* Special case for SETcc - 1 instruction per cc */
414#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
415
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200416asm(".global kvm_fastop_exception \n"
417 "kvm_fastop_exception: xor %esi, %esi; ret");
418
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200419FOP_START(setcc)
420FOP_SETCC(seto)
421FOP_SETCC(setno)
422FOP_SETCC(setc)
423FOP_SETCC(setnc)
424FOP_SETCC(setz)
425FOP_SETCC(setnz)
426FOP_SETCC(setbe)
427FOP_SETCC(setnbe)
428FOP_SETCC(sets)
429FOP_SETCC(setns)
430FOP_SETCC(setp)
431FOP_SETCC(setnp)
432FOP_SETCC(setl)
433FOP_SETCC(setnl)
434FOP_SETCC(setle)
435FOP_SETCC(setnle)
436FOP_END;
437
Paolo Bonzini326f5782013-05-09 11:32:51 +0200438FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
439FOP_END;
440
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200441static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
442 enum x86_intercept intercept,
443 enum x86_intercept_stage stage)
444{
445 struct x86_instruction_info info = {
446 .intercept = intercept,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300447 .rep_prefix = ctxt->rep_prefix,
448 .modrm_mod = ctxt->modrm_mod,
449 .modrm_reg = ctxt->modrm_reg,
450 .modrm_rm = ctxt->modrm_rm,
451 .src_val = ctxt->src.val64,
Jan Kiszka6cbc5f52014-06-30 12:52:55 +0200452 .dst_val = ctxt->dst.val64,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300453 .src_bytes = ctxt->src.bytes,
454 .dst_bytes = ctxt->dst.bytes,
455 .ad_bytes = ctxt->ad_bytes,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200456 .next_rip = ctxt->eip,
457 };
458
Avi Kivity29535382011-04-20 13:37:53 +0300459 return ctxt->ops->intercept(ctxt, &info, stage);
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200460}
461
Avi Kivityf47cfa32012-06-07 17:49:24 +0300462static void assign_masked(ulong *dest, ulong src, ulong mask)
463{
464 *dest = (*dest & ~mask) | (src & mask);
465}
466
Avi Kivity9dac77f2011-06-01 15:34:25 +0300467static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800468{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300469 return (1UL << (ctxt->ad_bytes << 3)) - 1;
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800470}
471
Avi Kivityf47cfa32012-06-07 17:49:24 +0300472static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
473{
474 u16 sel;
475 struct desc_struct ss;
476
477 if (ctxt->mode == X86EMUL_MODE_PROT64)
478 return ~0UL;
479 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
480 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
481}
482
Avi Kivity612e89f2012-06-12 20:03:23 +0300483static int stack_size(struct x86_emulate_ctxt *ctxt)
484{
485 return (__fls(stack_mask(ctxt)) + 1) >> 3;
486}
487
Avi Kivity6aa8b732006-12-10 02:21:36 -0800488/* Access/update address held in a register, based on addressing mode. */
Harvey Harrisone4706772008-02-19 07:40:38 -0800489static inline unsigned long
Avi Kivity9dac77f2011-06-01 15:34:25 +0300490address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800491{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300492 if (ctxt->ad_bytes == sizeof(unsigned long))
Harvey Harrisone4706772008-02-19 07:40:38 -0800493 return reg;
494 else
Avi Kivity9dac77f2011-06-01 15:34:25 +0300495 return reg & ad_mask(ctxt);
Harvey Harrisone4706772008-02-19 07:40:38 -0800496}
497
498static inline unsigned long
Paolo Bonzini01485a22014-11-19 18:25:08 +0100499register_address(struct x86_emulate_ctxt *ctxt, int reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800500{
Paolo Bonzini01485a22014-11-19 18:25:08 +0100501 return address_mask(ctxt, reg_read(ctxt, reg));
Harvey Harrisone4706772008-02-19 07:40:38 -0800502}
503
Avi Kivity5ad105e2012-08-19 14:34:31 +0300504static void masked_increment(ulong *reg, ulong mask, int inc)
505{
506 assign_masked(reg, *reg + inc, mask);
507}
508
Harvey Harrison7a9572752008-02-19 07:40:41 -0800509static inline void
Paolo Bonzini01485a22014-11-19 18:25:08 +0100510register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
Harvey Harrison7a9572752008-02-19 07:40:41 -0800511{
Avi Kivity5ad105e2012-08-19 14:34:31 +0300512 ulong mask;
513
Avi Kivity9dac77f2011-06-01 15:34:25 +0300514 if (ctxt->ad_bytes == sizeof(unsigned long))
Avi Kivity5ad105e2012-08-19 14:34:31 +0300515 mask = ~0UL;
Harvey Harrison7a9572752008-02-19 07:40:41 -0800516 else
Avi Kivity5ad105e2012-08-19 14:34:31 +0300517 mask = ad_mask(ctxt);
Paolo Bonzini01485a22014-11-19 18:25:08 +0100518 masked_increment(reg_rmw(ctxt, reg), mask, inc);
Avi Kivity5ad105e2012-08-19 14:34:31 +0300519}
520
521static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
522{
Avi Kivitydd856ef2012-08-27 23:46:17 +0300523 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
Harvey Harrison7a9572752008-02-19 07:40:41 -0800524}
Avi Kivity6aa8b732006-12-10 02:21:36 -0800525
Avi Kivity56697682011-04-03 14:08:51 +0300526static u32 desc_limit_scaled(struct desc_struct *desc)
527{
528 u32 limit = get_desc_limit(desc);
529
530 return desc->g ? (limit << 12) | 0xfff : limit;
531}
532
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900533static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300534{
535 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
536 return 0;
537
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900538 return ctxt->ops->get_cached_segment_base(ctxt, seg);
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300539}
540
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200541static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
542 u32 error, bool valid)
Gleb Natapov54b84862010-04-28 19:15:44 +0300543{
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +0200544 WARN_ON(vec > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +0200545 ctxt->exception.vector = vec;
546 ctxt->exception.error_code = error;
547 ctxt->exception.error_code_valid = valid;
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200548 return X86EMUL_PROPAGATE_FAULT;
Gleb Natapov54b84862010-04-28 19:15:44 +0300549}
550
Joerg Roedel3b88e412011-04-04 12:39:29 +0200551static int emulate_db(struct x86_emulate_ctxt *ctxt)
552{
553 return emulate_exception(ctxt, DB_VECTOR, 0, false);
554}
555
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200556static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300557{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200558 return emulate_exception(ctxt, GP_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300559}
560
Avi Kivity618ff152011-04-03 12:32:09 +0300561static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
562{
563 return emulate_exception(ctxt, SS_VECTOR, err, true);
564}
565
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200566static int emulate_ud(struct x86_emulate_ctxt *ctxt)
Gleb Natapov54b84862010-04-28 19:15:44 +0300567{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200568 return emulate_exception(ctxt, UD_VECTOR, 0, false);
Gleb Natapov54b84862010-04-28 19:15:44 +0300569}
570
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200571static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300572{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200573 return emulate_exception(ctxt, TS_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300574}
575
Avi Kivity34d1f492010-08-26 11:59:01 +0300576static int emulate_de(struct x86_emulate_ctxt *ctxt)
577{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200578 return emulate_exception(ctxt, DE_VECTOR, 0, false);
Avi Kivity34d1f492010-08-26 11:59:01 +0300579}
580
Avi Kivity1253791d2011-03-29 11:41:27 +0200581static int emulate_nm(struct x86_emulate_ctxt *ctxt)
582{
583 return emulate_exception(ctxt, NM_VECTOR, 0, false);
584}
585
Avi Kivity1aa36612011-04-27 13:20:30 +0300586static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
587{
588 u16 selector;
589 struct desc_struct desc;
590
591 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
592 return selector;
593}
594
595static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
596 unsigned seg)
597{
598 u16 dummy;
599 u32 base3;
600 struct desc_struct desc;
601
602 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
603 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
604}
605
Avi Kivity1c11b372012-04-09 18:39:59 +0300606/*
607 * x86 defines three classes of vector instructions: explicitly
608 * aligned, explicitly unaligned, and the rest, which change behaviour
609 * depending on whether they're AVX encoded or not.
610 *
611 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
612 * subject to the same check.
613 */
614static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
615{
616 if (likely(size < 16))
617 return false;
618
619 if (ctxt->d & Aligned)
620 return true;
621 else if (ctxt->d & Unaligned)
622 return false;
623 else if (ctxt->d & Avx)
624 return false;
625 else
626 return true;
627}
628
Paolo Bonzinid09155d2014-10-27 14:54:44 +0100629static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
630 struct segmented_address addr,
631 unsigned *max_size, unsigned size,
632 bool write, bool fetch,
Nadav Amitd50eaa12014-11-19 17:43:11 +0200633 enum x86emul_mode mode, ulong *linear)
Avi Kivity52fd8b42011-04-03 12:33:12 +0300634{
Avi Kivity618ff152011-04-03 12:32:09 +0300635 struct desc_struct desc;
636 bool usable;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300637 ulong la;
Avi Kivity618ff152011-04-03 12:32:09 +0300638 u32 lim;
Avi Kivity1aa36612011-04-27 13:20:30 +0300639 u16 sel;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300640
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900641 la = seg_base(ctxt, addr.seg) + addr.ea;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100642 *max_size = 0;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200643 switch (mode) {
Avi Kivity618ff152011-04-03 12:32:09 +0300644 case X86EMUL_MODE_PROT64:
Nadav Amit4be4de72014-09-18 22:39:40 +0300645 if (is_noncanonical_address(la))
Nadav Amitabc7d8a2014-11-19 17:43:12 +0200646 goto bad;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100647
648 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
649 if (size > *max_size)
650 goto bad;
Avi Kivity618ff152011-04-03 12:32:09 +0300651 break;
652 default:
Avi Kivity1aa36612011-04-27 13:20:30 +0300653 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
654 addr.seg);
Avi Kivity618ff152011-04-03 12:32:09 +0300655 if (!usable)
656 goto bad;
Gleb Natapov58b78252012-12-11 15:14:12 +0200657 /* code segment in protected mode or read-only data segment */
658 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
659 || !(desc.type & 2)) && write)
Avi Kivity618ff152011-04-03 12:32:09 +0300660 goto bad;
661 /* unreadable code segment */
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400662 if (!fetch && (desc.type & 8) && !(desc.type & 2))
Avi Kivity618ff152011-04-03 12:32:09 +0300663 goto bad;
664 lim = desc_limit_scaled(&desc);
Paolo Bonzini997b0412014-11-19 18:33:38 +0100665 if (!(desc.type & 8) && (desc.type & 4)) {
Guo Chaofc058682012-06-28 15:19:51 +0800666 /* expand-down segment */
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100667 if (addr.ea <= lim)
Avi Kivity618ff152011-04-03 12:32:09 +0300668 goto bad;
669 lim = desc.d ? 0xffffffff : 0xffff;
Avi Kivity618ff152011-04-03 12:32:09 +0300670 }
Paolo Bonzini997b0412014-11-19 18:33:38 +0100671 if (addr.ea > lim)
672 goto bad;
673 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100674 if (size > *max_size)
675 goto bad;
Nadav Amit31ff6482014-11-19 17:43:13 +0200676 la &= (u32)-1;
Avi Kivity618ff152011-04-03 12:32:09 +0300677 break;
678 }
Avi Kivity1c11b372012-04-09 18:39:59 +0300679 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
680 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300681 *linear = la;
682 return X86EMUL_CONTINUE;
Avi Kivity618ff152011-04-03 12:32:09 +0300683bad:
684 if (addr.seg == VCPU_SREG_SS)
Paolo Bonzini36061892014-10-27 14:40:49 +0100685 return emulate_ss(ctxt, 0);
Avi Kivity618ff152011-04-03 12:32:09 +0300686 else
Paolo Bonzini36061892014-10-27 14:40:49 +0100687 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300688}
689
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400690static int linearize(struct x86_emulate_ctxt *ctxt,
691 struct segmented_address addr,
692 unsigned size, bool write,
693 ulong *linear)
694{
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100695 unsigned max_size;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200696 return __linearize(ctxt, addr, &max_size, size, write, false,
697 ctxt->mode, linear);
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400698}
699
Nadav Amitd50eaa12014-11-19 17:43:11 +0200700static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
701 enum x86emul_mode mode)
702{
703 ulong linear;
704 int rc;
705 unsigned max_size;
706 struct segmented_address addr = { .seg = VCPU_SREG_CS,
707 .ea = dst };
708
709 if (ctxt->op_bytes != sizeof(unsigned long))
710 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
711 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
712 if (rc == X86EMUL_CONTINUE)
713 ctxt->_eip = addr.ea;
714 return rc;
715}
716
717static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
718{
719 return assign_eip(ctxt, dst, ctxt->mode);
720}
721
722static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
723 const struct desc_struct *cs_desc)
724{
725 enum x86emul_mode mode = ctxt->mode;
726
727#ifdef CONFIG_X86_64
728 if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) {
729 u64 efer = 0;
730
731 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
732 if (efer & EFER_LMA)
733 mode = X86EMUL_MODE_PROT64;
734 }
735#endif
736 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
737 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
738 return assign_eip(ctxt, dst, mode);
739}
740
741static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
742{
743 return assign_eip_near(ctxt, ctxt->_eip + rel);
744}
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400745
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200746static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
747 struct segmented_address addr,
748 void *data,
749 unsigned size)
750{
Avi Kivity9fa088f2011-03-31 18:54:30 +0200751 int rc;
752 ulong linear;
753
Avi Kivity83b87952011-04-03 11:31:19 +0300754 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +0200755 if (rc != X86EMUL_CONTINUE)
756 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +0300757 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200758}
759
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900760/*
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200761 * Prefetch the remaining bytes of the instruction without crossing page
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900762 * boundary if they are not in fetch_cache yet.
763 */
Paolo Bonzini9506d572014-05-06 13:05:25 +0200764static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
Avi Kivity62266862007-11-20 13:15:52 +0200765{
Avi Kivity62266862007-11-20 13:15:52 +0200766 int rc;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100767 unsigned size, max_size;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200768 unsigned long linear;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200769 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200770 struct segmented_address addr = { .seg = VCPU_SREG_CS,
Paolo Bonzini17052f12014-05-06 16:33:01 +0200771 .ea = ctxt->eip + cur_size };
772
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100773 /*
774 * We do not know exactly how many bytes will be needed, and
775 * __linearize is expensive, so fetch as much as possible. We
776 * just have to avoid going beyond the 15 byte limit, the end
777 * of the segment, or the end of the page.
778 *
779 * __linearize is called with size 0 so that it does not do any
780 * boundary check itself. Instead, we use max_size to check
781 * against op_size.
782 */
Nadav Amitd50eaa12014-11-19 17:43:11 +0200783 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
784 &linear);
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200785 if (unlikely(rc != X86EMUL_CONTINUE))
786 return rc;
787
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100788 size = min_t(unsigned, 15UL ^ cur_size, max_size);
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200789 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
Paolo Bonzini5cfc7e02014-05-06 13:05:25 +0200790
791 /*
792 * One instruction can only straddle two pages,
793 * and one has been loaded at the beginning of
794 * x86_decode_insn. So, if not enough bytes
795 * still, we must have hit the 15-byte boundary.
796 */
797 if (unlikely(size < op_size))
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100798 return emulate_gp(ctxt, 0);
799
Paolo Bonzini17052f12014-05-06 16:33:01 +0200800 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200801 size, &ctxt->exception);
802 if (unlikely(rc != X86EMUL_CONTINUE))
803 return rc;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200804 ctxt->fetch.end += size;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +0900805 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200806}
807
Paolo Bonzini9506d572014-05-06 13:05:25 +0200808static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
809 unsigned size)
Avi Kivity62266862007-11-20 13:15:52 +0200810{
Nadav Amit08da44a2014-10-03 01:10:04 +0300811 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
812
813 if (unlikely(done_size < size))
814 return __do_insn_fetch_bytes(ctxt, size - done_size);
Paolo Bonzini9506d572014-05-06 13:05:25 +0200815 else
816 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200817}
818
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900819/* Fetch next part of the instruction being emulated. */
Takuya Yoshikawae85a1082011-07-30 18:01:26 +0900820#define insn_fetch(_type, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200821({ _type _x; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200822 \
823 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900824 if (rc != X86EMUL_CONTINUE) \
825 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200826 ctxt->_eip += sizeof(_type); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200827 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
828 ctxt->fetch.ptr += sizeof(_type); \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200829 _x; \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900830})
831
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900832#define insn_fetch_arr(_arr, _size, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200833({ \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200834 rc = do_insn_fetch_bytes(_ctxt, _size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900835 if (rc != X86EMUL_CONTINUE) \
836 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200837 ctxt->_eip += (_size); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200838 memcpy(_arr, ctxt->fetch.ptr, _size); \
839 ctxt->fetch.ptr += (_size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900840})
841
Rusty Russell1e3c5cb2007-07-17 23:16:11 +1000842/*
843 * Given the 'reg' portion of a ModRM byte, and a register block, return a
844 * pointer into the block that addresses the relevant register.
845 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
846 */
Avi Kivitydd856ef2012-08-27 23:46:17 +0300847static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200848 int byteop)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800849{
850 void *p;
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200851 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800852
Avi Kivity6aa8b732006-12-10 02:21:36 -0800853 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
Avi Kivitydd856ef2012-08-27 23:46:17 +0300854 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
855 else
856 p = reg_rmw(ctxt, modrm_reg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800857 return p;
858}
859
860static int read_descriptor(struct x86_emulate_ctxt *ctxt,
Avi Kivity90de84f2010-11-17 15:28:21 +0200861 struct segmented_address addr,
Avi Kivity6aa8b732006-12-10 02:21:36 -0800862 u16 *size, unsigned long *address, int op_bytes)
863{
864 int rc;
865
866 if (op_bytes == 2)
867 op_bytes = 3;
868 *address = 0;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200869 rc = segmented_read_std(ctxt, addr, size, 2);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +0900870 if (rc != X86EMUL_CONTINUE)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800871 return rc;
Avi Kivity30b31ab2010-11-17 15:28:22 +0200872 addr.ea += 2;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200873 rc = segmented_read_std(ctxt, addr, address, op_bytes);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800874 return rc;
875}
876
Avi Kivity34b77652013-01-19 19:51:56 +0200877FASTOP2(add);
878FASTOP2(or);
879FASTOP2(adc);
880FASTOP2(sbb);
881FASTOP2(and);
882FASTOP2(sub);
883FASTOP2(xor);
884FASTOP2(cmp);
885FASTOP2(test);
886
Avi Kivityb9fa4092013-02-09 11:31:48 +0200887FASTOP1SRC2(mul, mul_ex);
888FASTOP1SRC2(imul, imul_ex);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200889FASTOP1SRC2EX(div, div_ex);
890FASTOP1SRC2EX(idiv, idiv_ex);
Avi Kivityb9fa4092013-02-09 11:31:48 +0200891
Avi Kivity34b77652013-01-19 19:51:56 +0200892FASTOP3WCL(shld);
893FASTOP3WCL(shrd);
894
895FASTOP2W(imul);
896
897FASTOP1(not);
898FASTOP1(neg);
899FASTOP1(inc);
900FASTOP1(dec);
901
902FASTOP2CL(rol);
903FASTOP2CL(ror);
904FASTOP2CL(rcl);
905FASTOP2CL(rcr);
906FASTOP2CL(shl);
907FASTOP2CL(shr);
908FASTOP2CL(sar);
909
910FASTOP2W(bsf);
911FASTOP2W(bsr);
912FASTOP2W(bt);
913FASTOP2W(bts);
914FASTOP2W(btr);
915FASTOP2W(btc);
916
Avi Kivitye47a5f52013-02-09 11:31:51 +0200917FASTOP2(xadd);
918
Nadav Amit5aca3722014-11-02 11:54:50 +0200919FASTOP2R(cmp, cmp_r);
920
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200921static u8 test_cc(unsigned int condition, unsigned long flags)
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300922{
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200923 u8 rc;
924 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300925
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200926 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivity3f0c3d02013-01-26 23:56:04 +0200927 asm("push %[flags]; popf; call *%[fastop]"
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200928 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
929 return rc;
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300930}
931
Avi Kivity91ff3cb2010-08-01 12:53:09 +0300932static void fetch_register_operand(struct operand *op)
933{
934 switch (op->bytes) {
935 case 1:
936 op->val = *(u8 *)op->addr.reg;
937 break;
938 case 2:
939 op->val = *(u16 *)op->addr.reg;
940 break;
941 case 4:
942 op->val = *(u32 *)op->addr.reg;
943 break;
944 case 8:
945 op->val = *(u64 *)op->addr.reg;
946 break;
947 }
948}
949
Avi Kivity1253791d2011-03-29 11:41:27 +0200950static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
951{
952 ctxt->ops->get_fpu(ctxt);
953 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200954 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
955 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
956 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
957 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
958 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
959 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
960 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
961 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +0200962#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +0200963 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
964 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
965 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
966 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
967 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
968 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
969 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
970 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +0200971#endif
972 default: BUG();
973 }
974 ctxt->ops->put_fpu(ctxt);
975}
976
977static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
978 int reg)
979{
980 ctxt->ops->get_fpu(ctxt);
981 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200982 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
983 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
984 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
985 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
986 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
987 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
988 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
989 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +0200990#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +0200991 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
992 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
993 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
994 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
995 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
996 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
997 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
998 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +0200999#endif
1000 default: BUG();
1001 }
1002 ctxt->ops->put_fpu(ctxt);
1003}
1004
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001005static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1006{
1007 ctxt->ops->get_fpu(ctxt);
1008 switch (reg) {
1009 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1010 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1011 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1012 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1013 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1014 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1015 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1016 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1017 default: BUG();
1018 }
1019 ctxt->ops->put_fpu(ctxt);
1020}
1021
1022static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1023{
1024 ctxt->ops->get_fpu(ctxt);
1025 switch (reg) {
1026 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1027 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1028 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1029 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1030 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1031 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1032 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1033 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1034 default: BUG();
1035 }
1036 ctxt->ops->put_fpu(ctxt);
1037}
1038
Gleb Natapov045a2822012-12-20 16:57:43 +02001039static int em_fninit(struct x86_emulate_ctxt *ctxt)
1040{
1041 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1042 return emulate_nm(ctxt);
1043
1044 ctxt->ops->get_fpu(ctxt);
1045 asm volatile("fninit");
1046 ctxt->ops->put_fpu(ctxt);
1047 return X86EMUL_CONTINUE;
1048}
1049
1050static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1051{
1052 u16 fcw;
1053
1054 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1055 return emulate_nm(ctxt);
1056
1057 ctxt->ops->get_fpu(ctxt);
1058 asm volatile("fnstcw %0": "+m"(fcw));
1059 ctxt->ops->put_fpu(ctxt);
1060
Gleb Natapov045a2822012-12-20 16:57:43 +02001061 ctxt->dst.val = fcw;
1062
1063 return X86EMUL_CONTINUE;
1064}
1065
1066static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1067{
1068 u16 fsw;
1069
1070 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1071 return emulate_nm(ctxt);
1072
1073 ctxt->ops->get_fpu(ctxt);
1074 asm volatile("fnstsw %0": "+m"(fsw));
1075 ctxt->ops->put_fpu(ctxt);
1076
Gleb Natapov045a2822012-12-20 16:57:43 +02001077 ctxt->dst.val = fsw;
1078
1079 return X86EMUL_CONTINUE;
1080}
1081
Avi Kivity1253791d2011-03-29 11:41:27 +02001082static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
Avi Kivity2adb5ad2012-01-16 15:08:45 +02001083 struct operand *op)
Avi Kivity3c118e22007-10-31 10:27:04 +02001084{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001085 unsigned reg = ctxt->modrm_reg;
Avi Kivity33615aa2007-10-31 11:15:56 +02001086
Avi Kivity9dac77f2011-06-01 15:34:25 +03001087 if (!(ctxt->d & ModRM))
1088 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
Avi Kivity1253791d2011-03-29 11:41:27 +02001089
Avi Kivity9dac77f2011-06-01 15:34:25 +03001090 if (ctxt->d & Sse) {
Avi Kivity1253791d2011-03-29 11:41:27 +02001091 op->type = OP_XMM;
1092 op->bytes = 16;
1093 op->addr.xmm = reg;
1094 read_sse_reg(ctxt, &op->vec_val, reg);
1095 return;
1096 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001097 if (ctxt->d & Mmx) {
1098 reg &= 7;
1099 op->type = OP_MM;
1100 op->bytes = 8;
1101 op->addr.mm = reg;
1102 return;
1103 }
Avi Kivity1253791d2011-03-29 11:41:27 +02001104
Avi Kivity3c118e22007-10-31 10:27:04 +02001105 op->type = OP_REG;
Gleb Natapov6d4d85e2013-11-04 15:52:42 +02001106 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1107 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1108
Avi Kivity91ff3cb2010-08-01 12:53:09 +03001109 fetch_register_operand(op);
Avi Kivity3c118e22007-10-31 10:27:04 +02001110 op->orig_val = op->val;
1111}
1112
Avi Kivitya6e34072012-06-10 17:15:39 +03001113static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1114{
1115 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1116 ctxt->modrm_seg = VCPU_SREG_SS;
1117}
1118
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001119static int decode_modrm(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001120 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001121{
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001122 u8 sib;
Bandan Das02357bd2014-04-16 12:46:11 -04001123 int index_reg, base_reg, scale;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001124 int rc = X86EMUL_CONTINUE;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001125 ulong modrm_ea = 0;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001126
Bandan Das02357bd2014-04-16 12:46:11 -04001127 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1128 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1129 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001130
Bandan Das02357bd2014-04-16 12:46:11 -04001131 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001132 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
Bandan Das02357bd2014-04-16 12:46:11 -04001133 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001134 ctxt->modrm_seg = VCPU_SREG_DS;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001135
Nadav Amit9b88ae92014-05-25 23:05:21 +03001136 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001137 op->type = OP_REG;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001138 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Paolo Bonzini8acb42072013-05-30 16:35:55 +02001139 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02001140 ctxt->d & ByteOp);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001141 if (ctxt->d & Sse) {
Avi Kivity1253791d2011-03-29 11:41:27 +02001142 op->type = OP_XMM;
1143 op->bytes = 16;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001144 op->addr.xmm = ctxt->modrm_rm;
1145 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
Avi Kivity1253791d2011-03-29 11:41:27 +02001146 return rc;
1147 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001148 if (ctxt->d & Mmx) {
1149 op->type = OP_MM;
1150 op->bytes = 8;
Paolo Bonzinibdc90722014-05-06 14:03:29 +02001151 op->addr.mm = ctxt->modrm_rm & 7;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001152 return rc;
1153 }
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001154 fetch_register_operand(op);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001155 return rc;
1156 }
1157
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001158 op->type = OP_MEM;
1159
Avi Kivity9dac77f2011-06-01 15:34:25 +03001160 if (ctxt->ad_bytes == 2) {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001161 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1162 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1163 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1164 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001165
1166 /* 16-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001167 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001168 case 0:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001169 if (ctxt->modrm_rm == 6)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001170 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001171 break;
1172 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001173 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001174 break;
1175 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001176 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001177 break;
1178 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001179 switch (ctxt->modrm_rm) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001180 case 0:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001181 modrm_ea += bx + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001182 break;
1183 case 1:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001184 modrm_ea += bx + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001185 break;
1186 case 2:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001187 modrm_ea += bp + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001188 break;
1189 case 3:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001190 modrm_ea += bp + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001191 break;
1192 case 4:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001193 modrm_ea += si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001194 break;
1195 case 5:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001196 modrm_ea += di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001197 break;
1198 case 6:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001199 if (ctxt->modrm_mod != 0)
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001200 modrm_ea += bp;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001201 break;
1202 case 7:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001203 modrm_ea += bx;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001204 break;
1205 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001206 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1207 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1208 ctxt->modrm_seg = VCPU_SREG_SS;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001209 modrm_ea = (u16)modrm_ea;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001210 } else {
1211 /* 32/64-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001212 if ((ctxt->modrm_rm & 7) == 4) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001213 sib = insn_fetch(u8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001214 index_reg |= (sib >> 3) & 7;
1215 base_reg |= sib & 7;
1216 scale = sib >> 6;
1217
Avi Kivity9dac77f2011-06-01 15:34:25 +03001218 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001219 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivitya6e34072012-06-10 17:15:39 +03001220 else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001221 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001222 adjust_modrm_seg(ctxt, base_reg);
1223 }
Avi Kivitydc71d0f2008-06-15 21:23:17 -07001224 if (index_reg != 4)
Avi Kivitydd856ef2012-08-27 23:46:17 +03001225 modrm_ea += reg_read(ctxt, index_reg) << scale;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001226 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
Nadav Amit5b38ab82014-11-02 11:54:41 +02001227 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity84411d82008-06-15 21:53:26 -07001228 if (ctxt->mode == X86EMUL_MODE_PROT64)
Avi Kivity9dac77f2011-06-01 15:34:25 +03001229 ctxt->rip_relative = 1;
Avi Kivitya6e34072012-06-10 17:15:39 +03001230 } else {
1231 base_reg = ctxt->modrm_rm;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001232 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001233 adjust_modrm_seg(ctxt, base_reg);
1234 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001235 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001236 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001237 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001238 break;
1239 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001240 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001241 break;
1242 }
1243 }
Avi Kivity90de84f2010-11-17 15:28:21 +02001244 op->addr.mem.ea = modrm_ea;
Bandan Das41061cd2014-04-16 12:46:14 -04001245 if (ctxt->ad_bytes != 8)
1246 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1247
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001248done:
1249 return rc;
1250}
1251
1252static int decode_abs(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001253 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001254{
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001255 int rc = X86EMUL_CONTINUE;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001256
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001257 op->type = OP_MEM;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001258 switch (ctxt->ad_bytes) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001259 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001260 op->addr.mem.ea = insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001261 break;
1262 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001263 op->addr.mem.ea = insn_fetch(u32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001264 break;
1265 case 8:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001266 op->addr.mem.ea = insn_fetch(u64, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001267 break;
1268 }
1269done:
1270 return rc;
1271}
1272
Avi Kivity9dac77f2011-06-01 15:34:25 +03001273static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
Wei Yongjun35c843c2010-08-09 11:34:56 +08001274{
Sheng Yang7129eec2010-09-28 16:33:32 +08001275 long sv = 0, mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001276
Avi Kivity9dac77f2011-06-01 15:34:25 +03001277 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
Nadav Amit7dec5602014-06-15 16:12:57 +03001278 mask = ~((long)ctxt->dst.bytes * 8 - 1);
Wei Yongjun35c843c2010-08-09 11:34:56 +08001279
Avi Kivity9dac77f2011-06-01 15:34:25 +03001280 if (ctxt->src.bytes == 2)
1281 sv = (s16)ctxt->src.val & (s16)mask;
1282 else if (ctxt->src.bytes == 4)
1283 sv = (s32)ctxt->src.val & (s32)mask;
Nadav Amit7dec5602014-06-15 16:12:57 +03001284 else
1285 sv = (s64)ctxt->src.val & (s64)mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001286
Nadav Amit1c1c35a2014-11-19 17:43:09 +02001287 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1288 ctxt->dst.addr.mem.ea + (sv >> 3));
Wei Yongjun35c843c2010-08-09 11:34:56 +08001289 }
Wei Yongjunba7ff2b2010-08-09 11:39:14 +08001290
1291 /* only subword offset */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001292 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001293}
1294
Gleb Natapov9de41572010-04-28 19:15:22 +03001295static int read_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov9de41572010-04-28 19:15:22 +03001296 unsigned long addr, void *dest, unsigned size)
1297{
1298 int rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001299 struct read_cache *mc = &ctxt->mem_read;
Gleb Natapov9de41572010-04-28 19:15:22 +03001300
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001301 if (mc->pos < mc->end)
1302 goto read_cached;
Gleb Natapov9de41572010-04-28 19:15:22 +03001303
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001304 WARN_ON((mc->end + size) >= sizeof(mc->data));
Gleb Natapov9de41572010-04-28 19:15:22 +03001305
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001306 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1307 &ctxt->exception);
1308 if (rc != X86EMUL_CONTINUE)
1309 return rc;
1310
1311 mc->end += size;
1312
1313read_cached:
1314 memcpy(dest, mc->data + mc->pos, size);
1315 mc->pos += size;
Gleb Natapov9de41572010-04-28 19:15:22 +03001316 return X86EMUL_CONTINUE;
1317}
1318
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001319static int segmented_read(struct x86_emulate_ctxt *ctxt,
1320 struct segmented_address addr,
1321 void *data,
1322 unsigned size)
1323{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001324 int rc;
1325 ulong linear;
1326
Avi Kivity83b87952011-04-03 11:31:19 +03001327 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001328 if (rc != X86EMUL_CONTINUE)
1329 return rc;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001330 return read_emulated(ctxt, linear, data, size);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001331}
1332
1333static int segmented_write(struct x86_emulate_ctxt *ctxt,
1334 struct segmented_address addr,
1335 const void *data,
1336 unsigned size)
1337{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001338 int rc;
1339 ulong linear;
1340
Avi Kivity83b87952011-04-03 11:31:19 +03001341 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001342 if (rc != X86EMUL_CONTINUE)
1343 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001344 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1345 &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001346}
1347
1348static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1349 struct segmented_address addr,
1350 const void *orig_data, const void *data,
1351 unsigned size)
1352{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001353 int rc;
1354 ulong linear;
1355
Avi Kivity83b87952011-04-03 11:31:19 +03001356 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001357 if (rc != X86EMUL_CONTINUE)
1358 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001359 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1360 size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001361}
1362
Gleb Natapov7b262e92010-03-18 15:20:27 +02001363static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov7b262e92010-03-18 15:20:27 +02001364 unsigned int size, unsigned short port,
1365 void *dest)
1366{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001367 struct read_cache *rc = &ctxt->io_read;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001368
1369 if (rc->pos == rc->end) { /* refill pio read ahead */
Gleb Natapov7b262e92010-03-18 15:20:27 +02001370 unsigned int in_page, n;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001371 unsigned int count = ctxt->rep_prefix ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001372 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001373 in_page = (ctxt->eflags & EFLG_DF) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001374 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1375 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
Mark Rustadb55a8142014-07-25 06:27:05 -07001376 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
Gleb Natapov7b262e92010-03-18 15:20:27 +02001377 if (n == 0)
1378 n = 1;
1379 rc->pos = rc->end = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001380 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
Gleb Natapov7b262e92010-03-18 15:20:27 +02001381 return 0;
1382 rc->end = n * size;
1383 }
1384
Nadav Amite6e39f02014-04-18 03:35:10 +03001385 if (ctxt->rep_prefix && (ctxt->d & String) &&
1386 !(ctxt->eflags & EFLG_DF)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001387 ctxt->dst.data = rc->data + rc->pos;
1388 ctxt->dst.type = OP_MEM_STR;
1389 ctxt->dst.count = (rc->end - rc->pos) / size;
1390 rc->pos = rc->end;
1391 } else {
1392 memcpy(dest, rc->data + rc->pos, size);
1393 rc->pos += size;
1394 }
Gleb Natapov7b262e92010-03-18 15:20:27 +02001395 return 1;
1396}
1397
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01001398static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1399 u16 index, struct desc_struct *desc)
1400{
1401 struct desc_ptr dt;
1402 ulong addr;
1403
1404 ctxt->ops->get_idt(ctxt, &dt);
1405
1406 if (dt.size < index * 8 + 7)
1407 return emulate_gp(ctxt, index << 3 | 0x2);
1408
1409 addr = dt.address + index * 8;
1410 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1411 &ctxt->exception);
1412}
1413
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001414static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001415 u16 selector, struct desc_ptr *dt)
1416{
Mathias Krause0225fb52012-08-30 01:30:16 +02001417 const struct x86_emulate_ops *ops = ctxt->ops;
Nadav Amit2eedcac2014-06-02 18:34:05 +03001418 u32 base3 = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001419
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001420 if (selector & 1 << 2) {
1421 struct desc_struct desc;
Avi Kivity1aa36612011-04-27 13:20:30 +03001422 u16 sel;
1423
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001424 memset (dt, 0, sizeof *dt);
Nadav Amit2eedcac2014-06-02 18:34:05 +03001425 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1426 VCPU_SREG_LDTR))
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001427 return;
1428
1429 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
Nadav Amit2eedcac2014-06-02 18:34:05 +03001430 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001431 } else
Avi Kivity4bff1e862011-04-20 13:37:53 +03001432 ops->get_gdt(ctxt, dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001433}
1434
1435/* allowed just for 8 bytes segments */
1436static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Avi Kivitye9194642012-06-13 16:29:39 +03001437 u16 selector, struct desc_struct *desc,
1438 ulong *desc_addr_p)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001439{
1440 struct desc_ptr dt;
1441 u16 index = selector >> 3;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001442 ulong addr;
1443
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001444 get_descriptor_table_ptr(ctxt, selector, &dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001445
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001446 if (dt.size < index * 8 + 7)
1447 return emulate_gp(ctxt, selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001448
Avi Kivitye9194642012-06-13 16:29:39 +03001449 *desc_addr_p = addr = dt.address + index * 8;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001450 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1451 &ctxt->exception);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001452}
1453
1454/* allowed just for 8 bytes segments */
1455static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001456 u16 selector, struct desc_struct *desc)
1457{
1458 struct desc_ptr dt;
1459 u16 index = selector >> 3;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001460 ulong addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001461
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001462 get_descriptor_table_ptr(ctxt, selector, &dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001463
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001464 if (dt.size < index * 8 + 7)
1465 return emulate_gp(ctxt, selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001466
1467 addr = dt.address + index * 8;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001468 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1469 &ctxt->exception);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001470}
1471
Gleb Natapov5601d052011-03-07 14:55:06 +02001472/* Does not support long mode */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001473static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Nadav Amitd1442d82014-09-18 22:39:39 +03001474 u16 selector, int seg, u8 cpl,
1475 bool in_task_switch,
1476 struct desc_struct *desc)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001477{
Avi Kivity869be992012-06-13 16:30:53 +03001478 struct desc_struct seg_desc, old_desc;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001479 u8 dpl, rpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001480 unsigned err_vec = GP_VECTOR;
1481 u32 err_code = 0;
1482 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
Avi Kivitye9194642012-06-13 16:29:39 +03001483 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001484 int ret;
Avi Kivity03ebebe2012-08-21 17:07:04 +03001485 u16 dummy;
Nadav Amite37a75a2014-06-02 18:34:04 +03001486 u32 base3 = 0;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001487
1488 memset(&seg_desc, 0, sizeof seg_desc);
1489
Kevin Wolff8da94e2013-04-11 14:06:03 +02001490 if (ctxt->mode == X86EMUL_MODE_REAL) {
1491 /* set real mode segment descriptor (keep limit etc. for
1492 * unreal mode) */
Avi Kivity03ebebe2012-08-21 17:07:04 +03001493 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001494 set_desc_base(&seg_desc, selector << 4);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001495 goto load;
Kevin Wolff8da94e2013-04-11 14:06:03 +02001496 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1497 /* VM86 needs a clean new segment descriptor */
1498 set_desc_base(&seg_desc, selector << 4);
1499 set_desc_limit(&seg_desc, 0xffff);
1500 seg_desc.type = 3;
1501 seg_desc.p = 1;
1502 seg_desc.s = 1;
1503 seg_desc.dpl = 3;
1504 goto load;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001505 }
1506
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001507 rpl = selector & 3;
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001508
1509 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1510 if ((seg == VCPU_SREG_CS
1511 || (seg == VCPU_SREG_SS
1512 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1513 || seg == VCPU_SREG_TR)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001514 && null_selector)
1515 goto exception;
1516
1517 /* TR should be in GDT only */
1518 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1519 goto exception;
1520
1521 if (null_selector) /* for NULL selector skip all following checks */
1522 goto load;
1523
Avi Kivitye9194642012-06-13 16:29:39 +03001524 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001525 if (ret != X86EMUL_CONTINUE)
1526 return ret;
1527
1528 err_code = selector & 0xfffc;
Paolo Bonzini15fc0752014-08-18 13:17:00 +02001529 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001530
Guo Chaofc058682012-06-28 15:19:51 +08001531 /* can't load system descriptor into segment selector */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001532 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1533 goto exception;
1534
1535 if (!seg_desc.p) {
1536 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1537 goto exception;
1538 }
1539
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001540 dpl = seg_desc.dpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001541
1542 switch (seg) {
1543 case VCPU_SREG_SS:
1544 /*
1545 * segment is not a writable data segment or segment
1546 * selector's RPL != CPL or segment selector's RPL != CPL
1547 */
1548 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1549 goto exception;
1550 break;
1551 case VCPU_SREG_CS:
1552 if (!(seg_desc.type & 8))
1553 goto exception;
1554
1555 if (seg_desc.type & 4) {
1556 /* conforming */
1557 if (dpl > cpl)
1558 goto exception;
1559 } else {
1560 /* nonconforming */
1561 if (rpl > cpl || dpl != cpl)
1562 goto exception;
1563 }
Nadav Amit040c8dc2014-09-18 22:39:43 +03001564 /* in long-mode d/b must be clear if l is set */
1565 if (seg_desc.d && seg_desc.l) {
1566 u64 efer = 0;
1567
1568 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1569 if (efer & EFER_LMA)
1570 goto exception;
1571 }
1572
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001573 /* CS(RPL) <- CPL */
1574 selector = (selector & 0xfffc) | cpl;
1575 break;
1576 case VCPU_SREG_TR:
1577 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1578 goto exception;
Avi Kivity869be992012-06-13 16:30:53 +03001579 old_desc = seg_desc;
1580 seg_desc.type |= 2; /* busy */
1581 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1582 sizeof(seg_desc), &ctxt->exception);
1583 if (ret != X86EMUL_CONTINUE)
1584 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001585 break;
1586 case VCPU_SREG_LDTR:
1587 if (seg_desc.s || seg_desc.type != 2)
1588 goto exception;
1589 break;
1590 default: /* DS, ES, FS, or GS */
1591 /*
1592 * segment is not a data or readable code segment or
1593 * ((segment is a data or nonconforming code segment)
1594 * and (both RPL and CPL > DPL))
1595 */
1596 if ((seg_desc.type & 0xa) == 0x8 ||
1597 (((seg_desc.type & 0xc) != 0xc) &&
1598 (rpl > dpl && cpl > dpl)))
1599 goto exception;
1600 break;
1601 }
1602
1603 if (seg_desc.s) {
1604 /* mark segment as accessed */
1605 seg_desc.type |= 1;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001606 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001607 if (ret != X86EMUL_CONTINUE)
1608 return ret;
Nadav Amite37a75a2014-06-02 18:34:04 +03001609 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1610 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1611 sizeof(base3), &ctxt->exception);
1612 if (ret != X86EMUL_CONTINUE)
1613 return ret;
Nadav Amit9a9abf62014-11-02 11:54:56 +02001614 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1615 ((u64)base3 << 32)))
1616 return emulate_gp(ctxt, 0);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001617 }
1618load:
Nadav Amite37a75a2014-06-02 18:34:04 +03001619 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
Nadav Amitd1442d82014-09-18 22:39:39 +03001620 if (desc)
1621 *desc = seg_desc;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001622 return X86EMUL_CONTINUE;
1623exception:
Paolo Bonzini592f0852014-08-20 10:05:08 +02001624 return emulate_exception(ctxt, err_vec, err_code, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001625}
1626
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001627static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1628 u16 selector, int seg)
1629{
1630 u8 cpl = ctxt->ops->cpl(ctxt);
Nadav Amitd1442d82014-09-18 22:39:39 +03001631 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001632}
1633
Wei Yongjun31be40b2010-08-17 09:17:30 +08001634static void write_register_operand(struct operand *op)
1635{
1636 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1637 switch (op->bytes) {
1638 case 1:
1639 *(u8 *)op->addr.reg = (u8)op->val;
1640 break;
1641 case 2:
1642 *(u16 *)op->addr.reg = (u16)op->val;
1643 break;
1644 case 4:
1645 *op->addr.reg = (u32)op->val;
1646 break; /* 64b: zero-extend */
1647 case 8:
1648 *op->addr.reg = op->val;
1649 break;
1650 }
1651}
1652
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001653static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
Wei Yongjunc37eda12010-06-15 09:03:33 +08001654{
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001655 switch (op->type) {
Wei Yongjunc37eda12010-06-15 09:03:33 +08001656 case OP_REG:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001657 write_register_operand(op);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001658 break;
1659 case OP_MEM:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001660 if (ctxt->lock_prefix)
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001661 return segmented_cmpxchg(ctxt,
1662 op->addr.mem,
1663 &op->orig_val,
1664 &op->val,
1665 op->bytes);
1666 else
1667 return segmented_write(ctxt,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001668 op->addr.mem,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001669 &op->val,
1670 op->bytes);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001671 break;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001672 case OP_MEM_STR:
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001673 return segmented_write(ctxt,
1674 op->addr.mem,
1675 op->data,
1676 op->bytes * op->count);
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001677 break;
Avi Kivity1253791d2011-03-29 11:41:27 +02001678 case OP_XMM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001679 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
Avi Kivity1253791d2011-03-29 11:41:27 +02001680 break;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001681 case OP_MM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001682 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001683 break;
Wei Yongjunc37eda12010-06-15 09:03:33 +08001684 case OP_NONE:
1685 /* no writeback */
1686 break;
1687 default:
1688 break;
1689 }
1690 return X86EMUL_CONTINUE;
1691}
1692
Avi Kivity51ddff52012-06-12 20:19:40 +03001693static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001694{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001695 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001696
Avi Kivity5ad105e2012-08-19 14:34:31 +03001697 rsp_increment(ctxt, -bytes);
Avi Kivitydd856ef2012-08-27 23:46:17 +03001698 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001699 addr.seg = VCPU_SREG_SS;
1700
Avi Kivity51ddff52012-06-12 20:19:40 +03001701 return segmented_write(ctxt, addr, data, bytes);
1702}
1703
1704static int em_push(struct x86_emulate_ctxt *ctxt)
1705{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001706 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001707 ctxt->dst.type = OP_NONE;
Avi Kivity51ddff52012-06-12 20:19:40 +03001708 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001709}
1710
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001711static int emulate_pop(struct x86_emulate_ctxt *ctxt,
Avi Kivity350f69d2009-01-05 11:12:40 +02001712 void *dest, int len)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001713{
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001714 int rc;
Avi Kivity90de84f2010-11-17 15:28:21 +02001715 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001716
Avi Kivitydd856ef2012-08-27 23:46:17 +03001717 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Avi Kivity90de84f2010-11-17 15:28:21 +02001718 addr.seg = VCPU_SREG_SS;
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001719 rc = segmented_read(ctxt, addr, dest, len);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09001720 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001721 return rc;
1722
Avi Kivity5ad105e2012-08-19 14:34:31 +03001723 rsp_increment(ctxt, len);
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001724 return rc;
1725}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001726
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001727static int em_pop(struct x86_emulate_ctxt *ctxt)
1728{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001729 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001730}
1731
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001732static int emulate_popf(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001733 void *dest, int len)
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001734{
1735 int rc;
1736 unsigned long val, change_mask;
1737 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001738 int cpl = ctxt->ops->cpl(ctxt);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001739
Takuya Yoshikawa3b9be3b2011-05-02 02:27:55 +09001740 rc = emulate_pop(ctxt, &val, len);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001741 if (rc != X86EMUL_CONTINUE)
1742 return rc;
1743
1744 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
Nadav Amit163b1352014-07-21 14:37:28 +03001745 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001746
1747 switch(ctxt->mode) {
1748 case X86EMUL_MODE_PROT64:
1749 case X86EMUL_MODE_PROT32:
1750 case X86EMUL_MODE_PROT16:
1751 if (cpl == 0)
1752 change_mask |= EFLG_IOPL;
1753 if (cpl <= iopl)
1754 change_mask |= EFLG_IF;
1755 break;
1756 case X86EMUL_MODE_VM86:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001757 if (iopl < 3)
1758 return emulate_gp(ctxt, 0);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001759 change_mask |= EFLG_IF;
1760 break;
1761 default: /* real mode */
1762 change_mask |= (EFLG_IOPL | EFLG_IF);
1763 break;
1764 }
1765
1766 *(unsigned long *)dest =
1767 (ctxt->eflags & ~change_mask) | (val & change_mask);
1768
1769 return rc;
1770}
1771
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001772static int em_popf(struct x86_emulate_ctxt *ctxt)
1773{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001774 ctxt->dst.type = OP_REG;
1775 ctxt->dst.addr.reg = &ctxt->eflags;
1776 ctxt->dst.bytes = ctxt->op_bytes;
1777 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001778}
1779
Avi Kivity612e89f2012-06-12 20:03:23 +03001780static int em_enter(struct x86_emulate_ctxt *ctxt)
1781{
1782 int rc;
1783 unsigned frame_size = ctxt->src.val;
1784 unsigned nesting_level = ctxt->src2.val & 31;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001785 ulong rbp;
Avi Kivity612e89f2012-06-12 20:03:23 +03001786
1787 if (nesting_level)
1788 return X86EMUL_UNHANDLEABLE;
1789
Avi Kivitydd856ef2012-08-27 23:46:17 +03001790 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1791 rc = push(ctxt, &rbp, stack_size(ctxt));
Avi Kivity612e89f2012-06-12 20:03:23 +03001792 if (rc != X86EMUL_CONTINUE)
1793 return rc;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001794 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
Avi Kivity612e89f2012-06-12 20:03:23 +03001795 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001796 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1797 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
Avi Kivity612e89f2012-06-12 20:03:23 +03001798 stack_mask(ctxt));
1799 return X86EMUL_CONTINUE;
1800}
1801
Avi Kivityf47cfa32012-06-07 17:49:24 +03001802static int em_leave(struct x86_emulate_ctxt *ctxt)
1803{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001804 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
Avi Kivityf47cfa32012-06-07 17:49:24 +03001805 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001806 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
Avi Kivityf47cfa32012-06-07 17:49:24 +03001807}
1808
Avi Kivity1cd196e2011-09-13 10:45:51 +03001809static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001810{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001811 int seg = ctxt->src2.val;
1812
Avi Kivity9dac77f2011-06-01 15:34:25 +03001813 ctxt->src.val = get_segment_selector(ctxt, seg);
Nadav Amit0fcc2072014-11-02 11:54:51 +02001814 if (ctxt->op_bytes == 4) {
1815 rsp_increment(ctxt, -2);
1816 ctxt->op_bytes = 2;
1817 }
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001818
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001819 return em_push(ctxt);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001820}
1821
Avi Kivity1cd196e2011-09-13 10:45:51 +03001822static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001823{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001824 int seg = ctxt->src2.val;
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001825 unsigned long selector;
1826 int rc;
1827
Nadav Amit3313bc42014-12-25 02:52:17 +02001828 rc = emulate_pop(ctxt, &selector, 2);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001829 if (rc != X86EMUL_CONTINUE)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001830 return rc;
1831
Paolo Bonzinia5457e72014-06-05 17:29:34 +02001832 if (ctxt->modrm_reg == VCPU_SREG_SS)
1833 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
Nadav Amit3313bc42014-12-25 02:52:17 +02001834 if (ctxt->op_bytes > 2)
1835 rsp_increment(ctxt, ctxt->op_bytes - 2);
Paolo Bonzinia5457e72014-06-05 17:29:34 +02001836
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001837 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001838 return rc;
1839}
1840
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001841static int em_pusha(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001842{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001843 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001844 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001845 int reg = VCPU_REGS_RAX;
1846
1847 while (reg <= VCPU_REGS_RDI) {
1848 (reg == VCPU_REGS_RSP) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001849 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001850
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001851 rc = em_push(ctxt);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001852 if (rc != X86EMUL_CONTINUE)
1853 return rc;
1854
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001855 ++reg;
1856 }
Wei Yongjunc37eda12010-06-15 09:03:33 +08001857
Wei Yongjunc37eda12010-06-15 09:03:33 +08001858 return rc;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001859}
1860
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001861static int em_pushf(struct x86_emulate_ctxt *ctxt)
1862{
Nadav Amitbc397a62014-12-10 11:19:03 +02001863 ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001864 return em_push(ctxt);
1865}
1866
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001867static int em_popa(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001868{
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001869 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001870 int reg = VCPU_REGS_RDI;
1871
1872 while (reg >= VCPU_REGS_RAX) {
1873 if (reg == VCPU_REGS_RSP) {
Avi Kivity5ad105e2012-08-19 14:34:31 +03001874 rsp_increment(ctxt, ctxt->op_bytes);
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001875 --reg;
1876 }
1877
Avi Kivitydd856ef2012-08-27 23:46:17 +03001878 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001879 if (rc != X86EMUL_CONTINUE)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001880 break;
1881 --reg;
1882 }
1883 return rc;
1884}
1885
Avi Kivitydd856ef2012-08-27 23:46:17 +03001886static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001887{
Mathias Krause0225fb52012-08-30 01:30:16 +02001888 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001889 int rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001890 struct desc_ptr dt;
1891 gva_t cs_addr;
1892 gva_t eip_addr;
1893 u16 cs, eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001894
1895 /* TODO: Add limit checks */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001896 ctxt->src.val = ctxt->eflags;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001897 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001898 if (rc != X86EMUL_CONTINUE)
1899 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001900
1901 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1902
Avi Kivity9dac77f2011-06-01 15:34:25 +03001903 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001904 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001905 if (rc != X86EMUL_CONTINUE)
1906 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001907
Avi Kivity9dac77f2011-06-01 15:34:25 +03001908 ctxt->src.val = ctxt->_eip;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001909 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001910 if (rc != X86EMUL_CONTINUE)
1911 return rc;
1912
Avi Kivity4bff1e862011-04-20 13:37:53 +03001913 ops->get_idt(ctxt, &dt);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001914
1915 eip_addr = dt.address + (irq << 2);
1916 cs_addr = dt.address + (irq << 2) + 2;
1917
Avi Kivity0f65dd72011-04-20 13:37:53 +03001918 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001919 if (rc != X86EMUL_CONTINUE)
1920 return rc;
1921
Avi Kivity0f65dd72011-04-20 13:37:53 +03001922 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001923 if (rc != X86EMUL_CONTINUE)
1924 return rc;
1925
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001926 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001927 if (rc != X86EMUL_CONTINUE)
1928 return rc;
1929
Avi Kivity9dac77f2011-06-01 15:34:25 +03001930 ctxt->_eip = eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001931
1932 return rc;
1933}
1934
Avi Kivitydd856ef2012-08-27 23:46:17 +03001935int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1936{
1937 int rc;
1938
1939 invalidate_registers(ctxt);
1940 rc = __emulate_int_real(ctxt, irq);
1941 if (rc == X86EMUL_CONTINUE)
1942 writeback_registers(ctxt);
1943 return rc;
1944}
1945
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001946static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001947{
1948 switch(ctxt->mode) {
1949 case X86EMUL_MODE_REAL:
Avi Kivitydd856ef2012-08-27 23:46:17 +03001950 return __emulate_int_real(ctxt, irq);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001951 case X86EMUL_MODE_VM86:
1952 case X86EMUL_MODE_PROT16:
1953 case X86EMUL_MODE_PROT32:
1954 case X86EMUL_MODE_PROT64:
1955 default:
1956 /* Protected mode interrupts unimplemented yet */
1957 return X86EMUL_UNHANDLEABLE;
1958 }
1959}
1960
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001961static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001962{
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001963 int rc = X86EMUL_CONTINUE;
1964 unsigned long temp_eip = 0;
1965 unsigned long temp_eflags = 0;
1966 unsigned long cs = 0;
1967 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1968 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1969 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1970 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1971
1972 /* TODO: Add stack limit check */
1973
Avi Kivity9dac77f2011-06-01 15:34:25 +03001974 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001975
1976 if (rc != X86EMUL_CONTINUE)
1977 return rc;
1978
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001979 if (temp_eip & ~0xffff)
1980 return emulate_gp(ctxt, 0);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001981
Avi Kivity9dac77f2011-06-01 15:34:25 +03001982 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001983
1984 if (rc != X86EMUL_CONTINUE)
1985 return rc;
1986
Avi Kivity9dac77f2011-06-01 15:34:25 +03001987 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001988
1989 if (rc != X86EMUL_CONTINUE)
1990 return rc;
1991
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001992 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001993
1994 if (rc != X86EMUL_CONTINUE)
1995 return rc;
1996
Avi Kivity9dac77f2011-06-01 15:34:25 +03001997 ctxt->_eip = temp_eip;
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001998
1999
Avi Kivity9dac77f2011-06-01 15:34:25 +03002000 if (ctxt->op_bytes == 4)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002001 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
Avi Kivity9dac77f2011-06-01 15:34:25 +03002002 else if (ctxt->op_bytes == 2) {
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002003 ctxt->eflags &= ~0xffff;
2004 ctxt->eflags |= temp_eflags;
2005 }
2006
2007 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2008 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2009
2010 return rc;
2011}
2012
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002013static int em_iret(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002014{
2015 switch(ctxt->mode) {
2016 case X86EMUL_MODE_REAL:
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002017 return emulate_iret_real(ctxt);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002018 case X86EMUL_MODE_VM86:
2019 case X86EMUL_MODE_PROT16:
2020 case X86EMUL_MODE_PROT32:
2021 case X86EMUL_MODE_PROT64:
2022 default:
2023 /* iret from protected mode unimplemented yet */
2024 return X86EMUL_UNHANDLEABLE;
2025 }
2026}
2027
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002028static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2029{
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002030 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002031 unsigned short sel, old_sel;
2032 struct desc_struct old_desc, new_desc;
2033 const struct x86_emulate_ops *ops = ctxt->ops;
2034 u8 cpl = ctxt->ops->cpl(ctxt);
2035
2036 /* Assignment of RIP may only fail in 64-bit mode */
2037 if (ctxt->mode == X86EMUL_MODE_PROT64)
2038 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2039 VCPU_SREG_CS);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002040
Avi Kivity9dac77f2011-06-01 15:34:25 +03002041 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002042
Nadav Amitd1442d82014-09-18 22:39:39 +03002043 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2044 &new_desc);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002045 if (rc != X86EMUL_CONTINUE)
2046 return rc;
2047
Nadav Amitd50eaa12014-11-19 17:43:11 +02002048 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03002049 if (rc != X86EMUL_CONTINUE) {
Nadav Amit7e46ddd2014-10-28 00:03:43 +02002050 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
Nadav Amitd1442d82014-09-18 22:39:39 +03002051 /* assigning eip failed; restore the old cs */
2052 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2053 return rc;
2054 }
2055 return rc;
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002056}
2057
Nadav Amitf7784042014-09-18 22:39:41 +03002058static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002059{
Nadav Amitf7784042014-09-18 22:39:41 +03002060 return assign_eip_near(ctxt, ctxt->src.val);
2061}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002062
Nadav Amitf7784042014-09-18 22:39:41 +03002063static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2064{
2065 int rc;
2066 long int old_eip;
2067
2068 old_eip = ctxt->_eip;
2069 rc = assign_eip_near(ctxt, ctxt->src.val);
2070 if (rc != X86EMUL_CONTINUE)
2071 return rc;
2072 ctxt->src.val = old_eip;
2073 rc = em_push(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09002074 return rc;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002075}
2076
Takuya Yoshikawae0dac402011-12-06 18:07:27 +09002077static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002078{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002079 u64 old = ctxt->dst.orig_val64;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002080
Nadav Amitaaa05f22014-06-02 18:34:10 +03002081 if (ctxt->dst.bytes == 16)
2082 return X86EMUL_UNHANDLEABLE;
2083
Avi Kivitydd856ef2012-08-27 23:46:17 +03002084 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2085 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2086 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2087 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
Laurent Vivier05f086f2007-09-24 11:10:55 +02002088 ctxt->eflags &= ~EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002089 } else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03002090 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2091 (u32) reg_read(ctxt, VCPU_REGS_RBX);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002092
Laurent Vivier05f086f2007-09-24 11:10:55 +02002093 ctxt->eflags |= EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002094 }
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002095 return X86EMUL_CONTINUE;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002096}
2097
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002098static int em_ret(struct x86_emulate_ctxt *ctxt)
2099{
Nadav Amit234f3ce2014-09-18 22:39:38 +03002100 int rc;
2101 unsigned long eip;
2102
2103 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2104 if (rc != X86EMUL_CONTINUE)
2105 return rc;
2106
2107 return assign_eip_near(ctxt, eip);
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002108}
2109
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002110static int em_ret_far(struct x86_emulate_ctxt *ctxt)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002111{
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002112 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002113 unsigned long eip, cs;
2114 u16 old_cs;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002115 int cpl = ctxt->ops->cpl(ctxt);
Nadav Amitd1442d82014-09-18 22:39:39 +03002116 struct desc_struct old_desc, new_desc;
2117 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002118
Nadav Amitd1442d82014-09-18 22:39:39 +03002119 if (ctxt->mode == X86EMUL_MODE_PROT64)
2120 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2121 VCPU_SREG_CS);
2122
2123 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002124 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002125 return rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002126 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002127 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002128 return rc;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002129 /* Outer-privilege level return is not implemented */
2130 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2131 return X86EMUL_UNHANDLEABLE;
Nadav Amitab646f52014-12-11 12:27:14 +01002132 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, false,
Nadav Amitd1442d82014-09-18 22:39:39 +03002133 &new_desc);
2134 if (rc != X86EMUL_CONTINUE)
2135 return rc;
Nadav Amitd50eaa12014-11-19 17:43:11 +02002136 rc = assign_eip_far(ctxt, eip, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03002137 if (rc != X86EMUL_CONTINUE) {
Nadav Amit7e46ddd2014-10-28 00:03:43 +02002138 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
Nadav Amitd1442d82014-09-18 22:39:39 +03002139 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2140 }
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002141 return rc;
2142}
2143
Bruce Rogers32611072013-09-09 09:40:20 -06002144static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2145{
2146 int rc;
2147
2148 rc = em_ret_far(ctxt);
2149 if (rc != X86EMUL_CONTINUE)
2150 return rc;
2151 rsp_increment(ctxt, ctxt->src.val);
2152 return X86EMUL_CONTINUE;
2153}
2154
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002155static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2156{
2157 /* Save real source value, then compare EAX against destination. */
Nadav Amit37c564f2014-06-02 18:34:07 +03002158 ctxt->dst.orig_val = ctxt->dst.val;
2159 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002160 ctxt->src.orig_val = ctxt->src.val;
Nadav Amit37c564f2014-06-02 18:34:07 +03002161 ctxt->src.val = ctxt->dst.orig_val;
Avi Kivity158de572013-01-19 19:51:57 +02002162 fastop(ctxt, em_cmp);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002163
2164 if (ctxt->eflags & EFLG_ZF) {
2165 /* Success: write back to memory. */
2166 ctxt->dst.val = ctxt->src.orig_val;
2167 } else {
2168 /* Failure: write the value we saw to EAX. */
2169 ctxt->dst.type = OP_REG;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002170 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
Nadav Amit37c564f2014-06-02 18:34:07 +03002171 ctxt->dst.val = ctxt->dst.orig_val;
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002172 }
2173 return X86EMUL_CONTINUE;
2174}
2175
Avi Kivityd4b43252011-09-13 10:45:50 +03002176static int em_lseg(struct x86_emulate_ctxt *ctxt)
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002177{
Avi Kivityd4b43252011-09-13 10:45:50 +03002178 int seg = ctxt->src2.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002179 unsigned short sel;
2180 int rc;
2181
Avi Kivity9dac77f2011-06-01 15:34:25 +03002182 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002183
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002184 rc = load_segment_descriptor(ctxt, sel, seg);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002185 if (rc != X86EMUL_CONTINUE)
2186 return rc;
2187
Avi Kivity9dac77f2011-06-01 15:34:25 +03002188 ctxt->dst.val = ctxt->src.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002189 return rc;
2190}
2191
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002192static void
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002193setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002194 struct desc_struct *cs, struct desc_struct *ss)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002195{
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002196 cs->l = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002197 set_desc_base(cs, 0); /* flat segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002198 cs->g = 1; /* 4kb granularity */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002199 set_desc_limit(cs, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002200 cs->type = 0x0b; /* Read, Execute, Accessed */
2201 cs->s = 1;
2202 cs->dpl = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002203 cs->p = 1;
2204 cs->d = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002205 cs->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002206
Gleb Natapov79168fd2010-04-28 19:15:30 +03002207 set_desc_base(ss, 0); /* flat segment */
2208 set_desc_limit(ss, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002209 ss->g = 1; /* 4kb granularity */
2210 ss->s = 1;
2211 ss->type = 0x03; /* Read/Write, Accessed */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002212 ss->d = 1; /* 32bit stack segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002213 ss->dpl = 0;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002214 ss->p = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002215 ss->l = 0;
2216 ss->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002217}
2218
Avi Kivity1a18a692012-02-01 12:23:21 +02002219static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2220{
2221 u32 eax, ebx, ecx, edx;
2222
2223 eax = ecx = 0;
Avi Kivity0017f932012-06-07 14:10:16 +03002224 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2225 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
Avi Kivity1a18a692012-02-01 12:23:21 +02002226 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2227 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2228}
2229
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002230static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2231{
Mathias Krause0225fb52012-08-30 01:30:16 +02002232 const struct x86_emulate_ops *ops = ctxt->ops;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002233 u32 eax, ebx, ecx, edx;
2234
2235 /*
2236 * syscall should always be enabled in longmode - so only become
2237 * vendor specific (cpuid) if other modes are active...
2238 */
2239 if (ctxt->mode == X86EMUL_MODE_PROT64)
2240 return true;
2241
2242 eax = 0x00000000;
2243 ecx = 0x00000000;
Avi Kivity0017f932012-06-07 14:10:16 +03002244 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2245 /*
2246 * Intel ("GenuineIntel")
2247 * remark: Intel CPUs only support "syscall" in 64bit
2248 * longmode. Also an 64bit guest with a
2249 * 32bit compat-app running will #UD !! While this
2250 * behaviour can be fixed (by emulating) into AMD
2251 * response - CPUs of AMD can't behave like Intel.
2252 */
2253 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2254 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2255 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2256 return false;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002257
Avi Kivity0017f932012-06-07 14:10:16 +03002258 /* AMD ("AuthenticAMD") */
2259 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2260 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2261 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2262 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002263
Avi Kivity0017f932012-06-07 14:10:16 +03002264 /* AMD ("AMDisbetter!") */
2265 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2266 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2267 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2268 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002269
2270 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2271 return false;
2272}
2273
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002274static int em_syscall(struct x86_emulate_ctxt *ctxt)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002275{
Mathias Krause0225fb52012-08-30 01:30:16 +02002276 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002277 struct desc_struct cs, ss;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002278 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002279 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002280 u64 efer = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002281
2282 /* syscall is not available in real mode */
Gleb Natapov2e901c42010-03-18 15:20:12 +02002283 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002284 ctxt->mode == X86EMUL_MODE_VM86)
2285 return emulate_ud(ctxt);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002286
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002287 if (!(em_syscall_is_enabled(ctxt)))
2288 return emulate_ud(ctxt);
2289
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002290 ops->get_msr(ctxt, MSR_EFER, &efer);
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002291 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002292
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002293 if (!(efer & EFER_SCE))
2294 return emulate_ud(ctxt);
2295
Avi Kivity717746e2011-04-20 13:37:53 +03002296 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002297 msr_data >>= 32;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002298 cs_sel = (u16)(msr_data & 0xfffc);
2299 ss_sel = (u16)(msr_data + 8);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002300
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002301 if (efer & EFER_LMA) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002302 cs.d = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002303 cs.l = 1;
2304 }
Avi Kivity1aa36612011-04-27 13:20:30 +03002305 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2306 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002307
Avi Kivitydd856ef2012-08-27 23:46:17 +03002308 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002309 if (efer & EFER_LMA) {
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002310#ifdef CONFIG_X86_64
Nadav Amit6c6cb692014-07-21 14:37:30 +03002311 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002312
Avi Kivity717746e2011-04-20 13:37:53 +03002313 ops->get_msr(ctxt,
Gleb Natapov3fb1b5d2010-04-28 19:15:28 +03002314 ctxt->mode == X86EMUL_MODE_PROT64 ?
2315 MSR_LSTAR : MSR_CSTAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002316 ctxt->_eip = msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002317
Avi Kivity717746e2011-04-20 13:37:53 +03002318 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
Nadav Amit6c6cb692014-07-21 14:37:30 +03002319 ctxt->eflags &= ~msr_data;
Nadav Amit807c1422014-11-02 11:54:49 +02002320 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002321#endif
2322 } else {
2323 /* legacy mode */
Avi Kivity717746e2011-04-20 13:37:53 +03002324 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002325 ctxt->_eip = (u32)msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002326
Nadav Amit6c6cb692014-07-21 14:37:30 +03002327 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002328 }
2329
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002330 return X86EMUL_CONTINUE;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002331}
2332
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002333static int em_sysenter(struct x86_emulate_ctxt *ctxt)
Andre Przywara8c604352009-06-18 12:56:01 +02002334{
Mathias Krause0225fb52012-08-30 01:30:16 +02002335 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002336 struct desc_struct cs, ss;
Andre Przywara8c604352009-06-18 12:56:01 +02002337 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002338 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002339 u64 efer = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002340
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002341 ops->get_msr(ctxt, MSR_EFER, &efer);
Gleb Natapova0044752010-02-10 14:21:31 +02002342 /* inject #GP if in real mode */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002343 if (ctxt->mode == X86EMUL_MODE_REAL)
2344 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002345
Avi Kivity1a18a692012-02-01 12:23:21 +02002346 /*
2347 * Not recognized on AMD in compat mode (but is recognized in legacy
2348 * mode).
2349 */
2350 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2351 && !vendor_intel(ctxt))
2352 return emulate_ud(ctxt);
2353
Nadav Amitb2c9d432014-11-02 11:55:01 +02002354 /* sysenter/sysexit have not been tested in 64bit mode. */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002355 if (ctxt->mode == X86EMUL_MODE_PROT64)
Nadav Amitb2c9d432014-11-02 11:55:01 +02002356 return X86EMUL_UNHANDLEABLE;
Andre Przywara8c604352009-06-18 12:56:01 +02002357
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002358 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara8c604352009-06-18 12:56:01 +02002359
Avi Kivity717746e2011-04-20 13:37:53 +03002360 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara8c604352009-06-18 12:56:01 +02002361 switch (ctxt->mode) {
2362 case X86EMUL_MODE_PROT32:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002363 if ((msr_data & 0xfffc) == 0x0)
2364 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002365 break;
2366 case X86EMUL_MODE_PROT64:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002367 if (msr_data == 0x0)
2368 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002369 break;
Gleb Natapov9d1b39a2012-09-03 15:24:27 +03002370 default:
2371 break;
Andre Przywara8c604352009-06-18 12:56:01 +02002372 }
2373
Nadav Amit6c6cb692014-07-21 14:37:30 +03002374 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002375 cs_sel = (u16)msr_data;
2376 cs_sel &= ~SELECTOR_RPL_MASK;
2377 ss_sel = cs_sel + 8;
2378 ss_sel &= ~SELECTOR_RPL_MASK;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002379 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002380 cs.d = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002381 cs.l = 1;
2382 }
2383
Avi Kivity1aa36612011-04-27 13:20:30 +03002384 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2385 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara8c604352009-06-18 12:56:01 +02002386
Avi Kivity717746e2011-04-20 13:37:53 +03002387 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002388 ctxt->_eip = msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002389
Avi Kivity717746e2011-04-20 13:37:53 +03002390 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
Avi Kivitydd856ef2012-08-27 23:46:17 +03002391 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002392
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002393 return X86EMUL_CONTINUE;
Andre Przywara8c604352009-06-18 12:56:01 +02002394}
2395
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002396static int em_sysexit(struct x86_emulate_ctxt *ctxt)
Andre Przywara4668f052009-06-18 12:56:02 +02002397{
Mathias Krause0225fb52012-08-30 01:30:16 +02002398 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002399 struct desc_struct cs, ss;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002400 u64 msr_data, rcx, rdx;
Andre Przywara4668f052009-06-18 12:56:02 +02002401 int usermode;
Xiao Guangrong1249b962011-05-15 23:25:10 +08002402 u16 cs_sel = 0, ss_sel = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002403
Gleb Natapova0044752010-02-10 14:21:31 +02002404 /* inject #GP if in real mode or Virtual 8086 mode */
2405 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002406 ctxt->mode == X86EMUL_MODE_VM86)
2407 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002408
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002409 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara4668f052009-06-18 12:56:02 +02002410
Avi Kivity9dac77f2011-06-01 15:34:25 +03002411 if ((ctxt->rex_prefix & 0x8) != 0x0)
Andre Przywara4668f052009-06-18 12:56:02 +02002412 usermode = X86EMUL_MODE_PROT64;
2413 else
2414 usermode = X86EMUL_MODE_PROT32;
2415
Nadav Amit234f3ce2014-09-18 22:39:38 +03002416 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2417 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2418
Andre Przywara4668f052009-06-18 12:56:02 +02002419 cs.dpl = 3;
2420 ss.dpl = 3;
Avi Kivity717746e2011-04-20 13:37:53 +03002421 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara4668f052009-06-18 12:56:02 +02002422 switch (usermode) {
2423 case X86EMUL_MODE_PROT32:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002424 cs_sel = (u16)(msr_data + 16);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002425 if ((msr_data & 0xfffc) == 0x0)
2426 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002427 ss_sel = (u16)(msr_data + 24);
Nadav Amitbf0b6822014-09-18 22:39:45 +03002428 rcx = (u32)rcx;
2429 rdx = (u32)rdx;
Andre Przywara4668f052009-06-18 12:56:02 +02002430 break;
2431 case X86EMUL_MODE_PROT64:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002432 cs_sel = (u16)(msr_data + 32);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002433 if (msr_data == 0x0)
2434 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002435 ss_sel = cs_sel + 8;
2436 cs.d = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002437 cs.l = 1;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002438 if (is_noncanonical_address(rcx) ||
2439 is_noncanonical_address(rdx))
2440 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002441 break;
2442 }
Gleb Natapov79168fd2010-04-28 19:15:30 +03002443 cs_sel |= SELECTOR_RPL_MASK;
2444 ss_sel |= SELECTOR_RPL_MASK;
Andre Przywara4668f052009-06-18 12:56:02 +02002445
Avi Kivity1aa36612011-04-27 13:20:30 +03002446 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2447 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara4668f052009-06-18 12:56:02 +02002448
Nadav Amit234f3ce2014-09-18 22:39:38 +03002449 ctxt->_eip = rdx;
2450 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
Andre Przywara4668f052009-06-18 12:56:02 +02002451
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002452 return X86EMUL_CONTINUE;
Andre Przywara4668f052009-06-18 12:56:02 +02002453}
2454
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002455static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002456{
2457 int iopl;
2458 if (ctxt->mode == X86EMUL_MODE_REAL)
2459 return false;
2460 if (ctxt->mode == X86EMUL_MODE_VM86)
2461 return true;
2462 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002463 return ctxt->ops->cpl(ctxt) > iopl;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002464}
2465
2466static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002467 u16 port, u16 len)
2468{
Mathias Krause0225fb52012-08-30 01:30:16 +02002469 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002470 struct desc_struct tr_seg;
Gleb Natapov5601d052011-03-07 14:55:06 +02002471 u32 base3;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002472 int r;
Avi Kivity1aa36612011-04-27 13:20:30 +03002473 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002474 unsigned mask = (1 << len) - 1;
Gleb Natapov5601d052011-03-07 14:55:06 +02002475 unsigned long base;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002476
Avi Kivity1aa36612011-04-27 13:20:30 +03002477 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002478 if (!tr_seg.p)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002479 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002480 if (desc_limit_scaled(&tr_seg) < 103)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002481 return false;
Gleb Natapov5601d052011-03-07 14:55:06 +02002482 base = get_desc_base(&tr_seg);
2483#ifdef CONFIG_X86_64
2484 base |= ((u64)base3) << 32;
2485#endif
Avi Kivity0f65dd72011-04-20 13:37:53 +03002486 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002487 if (r != X86EMUL_CONTINUE)
2488 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002489 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002490 return false;
Avi Kivity0f65dd72011-04-20 13:37:53 +03002491 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002492 if (r != X86EMUL_CONTINUE)
2493 return false;
2494 if ((perm >> bit_idx) & mask)
2495 return false;
2496 return true;
2497}
2498
2499static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002500 u16 port, u16 len)
2501{
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002502 if (ctxt->perm_ok)
2503 return true;
2504
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002505 if (emulator_bad_iopl(ctxt))
2506 if (!emulator_io_port_access_allowed(ctxt, port, len))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002507 return false;
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002508
2509 ctxt->perm_ok = true;
2510
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002511 return true;
2512}
2513
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002514static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002515 struct tss_segment_16 *tss)
2516{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002517 tss->ip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002518 tss->flag = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002519 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2520 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2521 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2522 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2523 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2524 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2525 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2526 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002527
Avi Kivity1aa36612011-04-27 13:20:30 +03002528 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2529 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2530 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2531 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2532 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002533}
2534
2535static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002536 struct tss_segment_16 *tss)
2537{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002538 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002539 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002540
Avi Kivity9dac77f2011-06-01 15:34:25 +03002541 ctxt->_eip = tss->ip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002542 ctxt->eflags = tss->flag | 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002543 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2544 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2545 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2546 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2547 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2548 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2549 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2550 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002551
2552 /*
2553 * SDM says that segment selectors are loaded before segment
2554 * descriptors
2555 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002556 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2557 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2558 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2559 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2560 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002561
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002562 cpl = tss->cs & 3;
2563
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002564 /*
Guo Chaofc058682012-06-28 15:19:51 +08002565 * Now load segment descriptors. If fault happens at this stage
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002566 * it is handled in a context of new task
2567 */
Nadav Amitd1442d82014-09-18 22:39:39 +03002568 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2569 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002570 if (ret != X86EMUL_CONTINUE)
2571 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002572 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2573 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002574 if (ret != X86EMUL_CONTINUE)
2575 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002576 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2577 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002578 if (ret != X86EMUL_CONTINUE)
2579 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002580 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2581 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002582 if (ret != X86EMUL_CONTINUE)
2583 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002584 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2585 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002586 if (ret != X86EMUL_CONTINUE)
2587 return ret;
2588
2589 return X86EMUL_CONTINUE;
2590}
2591
2592static int task_switch_16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002593 u16 tss_selector, u16 old_tss_sel,
2594 ulong old_tss_base, struct desc_struct *new_desc)
2595{
Mathias Krause0225fb52012-08-30 01:30:16 +02002596 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002597 struct tss_segment_16 tss_seg;
2598 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002599 u32 new_tss_base = get_desc_base(new_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002600
Avi Kivity0f65dd72011-04-20 13:37:53 +03002601 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002602 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002603 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002604 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002605
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002606 save_state_to_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002607
Avi Kivity0f65dd72011-04-20 13:37:53 +03002608 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002609 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002610 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002611 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002612
Avi Kivity0f65dd72011-04-20 13:37:53 +03002613 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002614 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002615 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002616 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002617
2618 if (old_tss_sel != 0xffff) {
2619 tss_seg.prev_task_link = old_tss_sel;
2620
Avi Kivity0f65dd72011-04-20 13:37:53 +03002621 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002622 &tss_seg.prev_task_link,
2623 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002624 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002625 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002626 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002627 }
2628
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002629 return load_state_from_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002630}
2631
2632static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002633 struct tss_segment_32 *tss)
2634{
Nadav Amit5c7411e2014-04-07 18:37:47 +03002635 /* CR3 and ldt selector are not saved intentionally */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002636 tss->eip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002637 tss->eflags = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002638 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2639 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2640 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2641 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2642 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2643 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2644 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2645 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002646
Avi Kivity1aa36612011-04-27 13:20:30 +03002647 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2648 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2649 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2650 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2651 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2652 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002653}
2654
2655static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002656 struct tss_segment_32 *tss)
2657{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002658 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002659 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002660
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002661 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002662 return emulate_gp(ctxt, 0);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002663 ctxt->_eip = tss->eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002664 ctxt->eflags = tss->eflags | 2;
Kevin Wolf4cee4792012-02-08 14:34:41 +01002665
2666 /* General purpose registers */
Avi Kivitydd856ef2012-08-27 23:46:17 +03002667 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2668 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2669 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2670 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2671 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2672 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2673 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2674 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002675
2676 /*
2677 * SDM says that segment selectors are loaded before segment
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002678 * descriptors. This is important because CPL checks will
2679 * use CS.RPL.
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002680 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002681 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2682 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2683 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2684 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2685 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2686 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2687 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002688
2689 /*
Kevin Wolf4cee4792012-02-08 14:34:41 +01002690 * If we're switching between Protected Mode and VM86, we need to make
2691 * sure to update the mode before loading the segment descriptors so
2692 * that the selectors are interpreted correctly.
Kevin Wolf4cee4792012-02-08 14:34:41 +01002693 */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002694 if (ctxt->eflags & X86_EFLAGS_VM) {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002695 ctxt->mode = X86EMUL_MODE_VM86;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002696 cpl = 3;
2697 } else {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002698 ctxt->mode = X86EMUL_MODE_PROT32;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002699 cpl = tss->cs & 3;
2700 }
Kevin Wolf4cee4792012-02-08 14:34:41 +01002701
2702 /*
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002703 * Now load segment descriptors. If fault happenes at this stage
2704 * it is handled in a context of new task
2705 */
Nadav Amitd1442d82014-09-18 22:39:39 +03002706 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2707 cpl, true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002708 if (ret != X86EMUL_CONTINUE)
2709 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002710 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2711 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002712 if (ret != X86EMUL_CONTINUE)
2713 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002714 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2715 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002716 if (ret != X86EMUL_CONTINUE)
2717 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002718 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2719 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002720 if (ret != X86EMUL_CONTINUE)
2721 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002722 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2723 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002724 if (ret != X86EMUL_CONTINUE)
2725 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002726 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2727 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002728 if (ret != X86EMUL_CONTINUE)
2729 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002730 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2731 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002732 if (ret != X86EMUL_CONTINUE)
2733 return ret;
2734
2735 return X86EMUL_CONTINUE;
2736}
2737
2738static int task_switch_32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002739 u16 tss_selector, u16 old_tss_sel,
2740 ulong old_tss_base, struct desc_struct *new_desc)
2741{
Mathias Krause0225fb52012-08-30 01:30:16 +02002742 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002743 struct tss_segment_32 tss_seg;
2744 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002745 u32 new_tss_base = get_desc_base(new_desc);
Nadav Amit5c7411e2014-04-07 18:37:47 +03002746 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2747 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002748
Avi Kivity0f65dd72011-04-20 13:37:53 +03002749 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002750 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002751 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002752 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002753
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002754 save_state_to_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002755
Nadav Amit5c7411e2014-04-07 18:37:47 +03002756 /* Only GP registers and segment selectors are saved */
2757 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2758 ldt_sel_offset - eip_offset, &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002759 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002760 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002761
Avi Kivity0f65dd72011-04-20 13:37:53 +03002762 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002763 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002764 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002765 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002766
2767 if (old_tss_sel != 0xffff) {
2768 tss_seg.prev_task_link = old_tss_sel;
2769
Avi Kivity0f65dd72011-04-20 13:37:53 +03002770 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002771 &tss_seg.prev_task_link,
2772 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002773 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002774 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002775 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002776 }
2777
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002778 return load_state_from_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002779}
2780
2781static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002782 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002783 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002784{
Mathias Krause0225fb52012-08-30 01:30:16 +02002785 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002786 struct desc_struct curr_tss_desc, next_tss_desc;
2787 int ret;
Avi Kivity1aa36612011-04-27 13:20:30 +03002788 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002789 ulong old_tss_base =
Avi Kivity4bff1e862011-04-20 13:37:53 +03002790 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
Gleb Natapovceffb452010-03-18 15:20:19 +02002791 u32 desc_limit;
Avi Kivitye9194642012-06-13 16:29:39 +03002792 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002793
2794 /* FIXME: old_tss_base == ~0 ? */
2795
Avi Kivitye9194642012-06-13 16:29:39 +03002796 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002797 if (ret != X86EMUL_CONTINUE)
2798 return ret;
Avi Kivitye9194642012-06-13 16:29:39 +03002799 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002800 if (ret != X86EMUL_CONTINUE)
2801 return ret;
2802
2803 /* FIXME: check that next_tss_desc is tss */
2804
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002805 /*
2806 * Check privileges. The three cases are task switch caused by...
2807 *
2808 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2809 * 2. Exception/IRQ/iret: No check is performed
Nadav Amit2c2ca2d2014-11-02 11:54:57 +02002810 * 3. jmp/call to TSS/task-gate: No check is performed since the
2811 * hardware checks it before exiting.
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002812 */
2813 if (reason == TASK_SWITCH_GATE) {
2814 if (idt_index != -1) {
2815 /* Software interrupts */
2816 struct desc_struct task_gate_desc;
2817 int dpl;
2818
2819 ret = read_interrupt_descriptor(ctxt, idt_index,
2820 &task_gate_desc);
2821 if (ret != X86EMUL_CONTINUE)
2822 return ret;
2823
2824 dpl = task_gate_desc.dpl;
2825 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2826 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2827 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002828 }
2829
Gleb Natapovceffb452010-03-18 15:20:19 +02002830 desc_limit = desc_limit_scaled(&next_tss_desc);
2831 if (!next_tss_desc.p ||
2832 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2833 desc_limit < 0x2b)) {
Paolo Bonzini592f0852014-08-20 10:05:08 +02002834 return emulate_ts(ctxt, tss_selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002835 }
2836
2837 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2838 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002839 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002840 }
2841
2842 if (reason == TASK_SWITCH_IRET)
2843 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2844
2845 /* set back link to prev task only if NT bit is set in eflags
Guo Chaofc058682012-06-28 15:19:51 +08002846 note that old_tss_sel is not used after this point */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002847 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2848 old_tss_sel = 0xffff;
2849
2850 if (next_tss_desc.type & 8)
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002851 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002852 old_tss_base, &next_tss_desc);
2853 else
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002854 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002855 old_tss_base, &next_tss_desc);
Jan Kiszka0760d442010-04-14 15:50:57 +02002856 if (ret != X86EMUL_CONTINUE)
2857 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002858
2859 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2860 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2861
2862 if (reason != TASK_SWITCH_IRET) {
2863 next_tss_desc.type |= (1 << 1); /* set busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002864 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002865 }
2866
Avi Kivity717746e2011-04-20 13:37:53 +03002867 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
Avi Kivity1aa36612011-04-27 13:20:30 +03002868 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002869
Jan Kiszkae269fb22010-04-14 15:51:09 +02002870 if (has_error_code) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002871 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2872 ctxt->lock_prefix = 0;
2873 ctxt->src.val = (unsigned long) error_code;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09002874 ret = em_push(ctxt);
Jan Kiszkae269fb22010-04-14 15:51:09 +02002875 }
2876
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002877 return ret;
2878}
2879
2880int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002881 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002882 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002883{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002884 int rc;
2885
Avi Kivitydd856ef2012-08-27 23:46:17 +03002886 invalidate_registers(ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002887 ctxt->_eip = ctxt->eip;
2888 ctxt->dst.type = OP_NONE;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002889
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002890 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002891 has_error_code, error_code);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002892
Avi Kivitydd856ef2012-08-27 23:46:17 +03002893 if (rc == X86EMUL_CONTINUE) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002894 ctxt->eip = ctxt->_eip;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002895 writeback_registers(ctxt);
2896 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002897
Gleb Natapova0c0ab22011-03-28 16:57:49 +02002898 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002899}
2900
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03002901static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2902 struct operand *op)
Gleb Natapova682e352010-03-18 15:20:21 +02002903{
Gleb Natapovb3356bf2012-09-03 15:24:29 +03002904 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
Gleb Natapova682e352010-03-18 15:20:21 +02002905
Paolo Bonzini01485a22014-11-19 18:25:08 +01002906 register_address_increment(ctxt, reg, df * op->bytes);
2907 op->addr.mem.ea = register_address(ctxt, reg);
Gleb Natapova682e352010-03-18 15:20:21 +02002908}
2909
Avi Kivity7af04fc2010-08-18 14:16:35 +03002910static int em_das(struct x86_emulate_ctxt *ctxt)
2911{
Avi Kivity7af04fc2010-08-18 14:16:35 +03002912 u8 al, old_al;
2913 bool af, cf, old_cf;
2914
2915 cf = ctxt->eflags & X86_EFLAGS_CF;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002916 al = ctxt->dst.val;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002917
2918 old_al = al;
2919 old_cf = cf;
2920 cf = false;
2921 af = ctxt->eflags & X86_EFLAGS_AF;
2922 if ((al & 0x0f) > 9 || af) {
2923 al -= 6;
2924 cf = old_cf | (al >= 250);
2925 af = true;
2926 } else {
2927 af = false;
2928 }
2929 if (old_al > 0x99 || old_cf) {
2930 al -= 0x60;
2931 cf = true;
2932 }
2933
Avi Kivity9dac77f2011-06-01 15:34:25 +03002934 ctxt->dst.val = al;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002935 /* Set PF, ZF, SF */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002936 ctxt->src.type = OP_IMM;
2937 ctxt->src.val = 0;
2938 ctxt->src.bytes = 1;
Avi Kivity158de572013-01-19 19:51:57 +02002939 fastop(ctxt, em_or);
Avi Kivity7af04fc2010-08-18 14:16:35 +03002940 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2941 if (cf)
2942 ctxt->eflags |= X86_EFLAGS_CF;
2943 if (af)
2944 ctxt->eflags |= X86_EFLAGS_AF;
2945 return X86EMUL_CONTINUE;
2946}
2947
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02002948static int em_aam(struct x86_emulate_ctxt *ctxt)
2949{
2950 u8 al, ah;
2951
2952 if (ctxt->src.val == 0)
2953 return emulate_de(ctxt);
2954
2955 al = ctxt->dst.val & 0xff;
2956 ah = al / ctxt->src.val;
2957 al %= ctxt->src.val;
2958
2959 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2960
2961 /* Set PF, ZF, SF */
2962 ctxt->src.type = OP_IMM;
2963 ctxt->src.val = 0;
2964 ctxt->src.bytes = 1;
2965 fastop(ctxt, em_or);
2966
2967 return X86EMUL_CONTINUE;
2968}
2969
Gleb Natapov7f662272012-12-10 11:42:30 +02002970static int em_aad(struct x86_emulate_ctxt *ctxt)
2971{
2972 u8 al = ctxt->dst.val & 0xff;
2973 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2974
2975 al = (al + (ah * ctxt->src.val)) & 0xff;
2976
2977 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2978
Gleb Natapovf583c292013-02-13 17:50:39 +02002979 /* Set PF, ZF, SF */
2980 ctxt->src.type = OP_IMM;
2981 ctxt->src.val = 0;
2982 ctxt->src.bytes = 1;
2983 fastop(ctxt, em_or);
Gleb Natapov7f662272012-12-10 11:42:30 +02002984
2985 return X86EMUL_CONTINUE;
2986}
2987
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09002988static int em_call(struct x86_emulate_ctxt *ctxt)
2989{
Nadav Amit234f3ce2014-09-18 22:39:38 +03002990 int rc;
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09002991 long rel = ctxt->src.val;
2992
2993 ctxt->src.val = (unsigned long)ctxt->_eip;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002994 rc = jmp_rel(ctxt, rel);
2995 if (rc != X86EMUL_CONTINUE)
2996 return rc;
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09002997 return em_push(ctxt);
2998}
2999
Avi Kivity0ef753b2010-08-18 14:51:45 +03003000static int em_call_far(struct x86_emulate_ctxt *ctxt)
3001{
Avi Kivity0ef753b2010-08-18 14:51:45 +03003002 u16 sel, old_cs;
3003 ulong old_eip;
3004 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03003005 struct desc_struct old_desc, new_desc;
3006 const struct x86_emulate_ops *ops = ctxt->ops;
3007 int cpl = ctxt->ops->cpl(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003008
Avi Kivity9dac77f2011-06-01 15:34:25 +03003009 old_eip = ctxt->_eip;
Nadav Amitd1442d82014-09-18 22:39:39 +03003010 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003011
Avi Kivity9dac77f2011-06-01 15:34:25 +03003012 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Nadav Amitd1442d82014-09-18 22:39:39 +03003013 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3014 &new_desc);
3015 if (rc != X86EMUL_CONTINUE)
Avi Kivity0ef753b2010-08-18 14:51:45 +03003016 return X86EMUL_CONTINUE;
3017
Nadav Amitd50eaa12014-11-19 17:43:11 +02003018 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03003019 if (rc != X86EMUL_CONTINUE)
3020 goto fail;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003021
Avi Kivity9dac77f2011-06-01 15:34:25 +03003022 ctxt->src.val = old_cs;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09003023 rc = em_push(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003024 if (rc != X86EMUL_CONTINUE)
Nadav Amitd1442d82014-09-18 22:39:39 +03003025 goto fail;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003026
Avi Kivity9dac77f2011-06-01 15:34:25 +03003027 ctxt->src.val = old_eip;
Nadav Amitd1442d82014-09-18 22:39:39 +03003028 rc = em_push(ctxt);
3029 /* If we failed, we tainted the memory, but the very least we should
3030 restore cs */
3031 if (rc != X86EMUL_CONTINUE)
3032 goto fail;
3033 return rc;
3034fail:
3035 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3036 return rc;
3037
Avi Kivity0ef753b2010-08-18 14:51:45 +03003038}
3039
Avi Kivity40ece7c2010-08-18 15:12:09 +03003040static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3041{
Avi Kivity40ece7c2010-08-18 15:12:09 +03003042 int rc;
Nadav Amit234f3ce2014-09-18 22:39:38 +03003043 unsigned long eip;
Avi Kivity40ece7c2010-08-18 15:12:09 +03003044
Nadav Amit234f3ce2014-09-18 22:39:38 +03003045 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3046 if (rc != X86EMUL_CONTINUE)
3047 return rc;
3048 rc = assign_eip_near(ctxt, eip);
Avi Kivity40ece7c2010-08-18 15:12:09 +03003049 if (rc != X86EMUL_CONTINUE)
3050 return rc;
Avi Kivity5ad105e2012-08-19 14:34:31 +03003051 rsp_increment(ctxt, ctxt->src.val);
Avi Kivity40ece7c2010-08-18 15:12:09 +03003052 return X86EMUL_CONTINUE;
3053}
3054
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003055static int em_xchg(struct x86_emulate_ctxt *ctxt)
3056{
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003057 /* Write back the register source. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003058 ctxt->src.val = ctxt->dst.val;
3059 write_register_operand(&ctxt->src);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003060
3061 /* Write back the memory destination with implicit LOCK prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003062 ctxt->dst.val = ctxt->src.orig_val;
3063 ctxt->lock_prefix = 1;
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003064 return X86EMUL_CONTINUE;
3065}
3066
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003067static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3068{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003069 ctxt->dst.val = ctxt->src2.val;
Avi Kivity4d758342013-01-19 19:51:55 +02003070 return fastop(ctxt, em_imul);
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003071}
3072
Avi Kivity61429142010-08-19 15:13:00 +03003073static int em_cwd(struct x86_emulate_ctxt *ctxt)
3074{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003075 ctxt->dst.type = OP_REG;
3076 ctxt->dst.bytes = ctxt->src.bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03003077 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivity9dac77f2011-06-01 15:34:25 +03003078 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
Avi Kivity61429142010-08-19 15:13:00 +03003079
3080 return X86EMUL_CONTINUE;
3081}
3082
Avi Kivity48bb5d32010-08-18 18:54:34 +03003083static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3084{
Avi Kivity48bb5d32010-08-18 18:54:34 +03003085 u64 tsc = 0;
3086
Avi Kivity717746e2011-04-20 13:37:53 +03003087 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003088 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3089 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
Avi Kivity48bb5d32010-08-18 18:54:34 +03003090 return X86EMUL_CONTINUE;
3091}
3092
Avi Kivity222d21a2011-11-10 14:57:30 +02003093static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3094{
3095 u64 pmc;
3096
Avi Kivitydd856ef2012-08-27 23:46:17 +03003097 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
Avi Kivity222d21a2011-11-10 14:57:30 +02003098 return emulate_gp(ctxt, 0);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003099 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3100 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
Avi Kivity222d21a2011-11-10 14:57:30 +02003101 return X86EMUL_CONTINUE;
3102}
3103
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003104static int em_mov(struct x86_emulate_ctxt *ctxt)
3105{
Paolo Bonzini54cfdb32014-03-27 11:36:25 +01003106 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003107 return X86EMUL_CONTINUE;
3108}
3109
Borislav Petkov84cffe42013-10-29 12:54:56 +01003110#define FFL(x) bit(X86_FEATURE_##x)
3111
3112static int em_movbe(struct x86_emulate_ctxt *ctxt)
3113{
3114 u32 ebx, ecx, edx, eax = 1;
3115 u16 tmp;
3116
3117 /*
3118 * Check MOVBE is set in the guest-visible CPUID leaf.
3119 */
3120 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3121 if (!(ecx & FFL(MOVBE)))
3122 return emulate_ud(ctxt);
3123
3124 switch (ctxt->op_bytes) {
3125 case 2:
3126 /*
3127 * From MOVBE definition: "...When the operand size is 16 bits,
3128 * the upper word of the destination register remains unchanged
3129 * ..."
3130 *
3131 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3132 * rules so we have to do the operation almost per hand.
3133 */
3134 tmp = (u16)ctxt->src.val;
3135 ctxt->dst.val &= ~0xffffUL;
3136 ctxt->dst.val |= (unsigned long)swab16(tmp);
3137 break;
3138 case 4:
3139 ctxt->dst.val = swab32((u32)ctxt->src.val);
3140 break;
3141 case 8:
3142 ctxt->dst.val = swab64(ctxt->src.val);
3143 break;
3144 default:
Paolo Bonzini592f0852014-08-20 10:05:08 +02003145 BUG();
Borislav Petkov84cffe42013-10-29 12:54:56 +01003146 }
3147 return X86EMUL_CONTINUE;
3148}
3149
Takuya Yoshikawabc00f8d2011-11-22 15:19:19 +09003150static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3151{
3152 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3153 return emulate_gp(ctxt, 0);
3154
3155 /* Disable writeback. */
3156 ctxt->dst.type = OP_NONE;
3157 return X86EMUL_CONTINUE;
3158}
3159
3160static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3161{
3162 unsigned long val;
3163
3164 if (ctxt->mode == X86EMUL_MODE_PROT64)
3165 val = ctxt->src.val & ~0ULL;
3166 else
3167 val = ctxt->src.val & ~0U;
3168
3169 /* #UD condition is already handled. */
3170 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3171 return emulate_gp(ctxt, 0);
3172
3173 /* Disable writeback. */
3174 ctxt->dst.type = OP_NONE;
3175 return X86EMUL_CONTINUE;
3176}
3177
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003178static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3179{
3180 u64 msr_data;
3181
Avi Kivitydd856ef2012-08-27 23:46:17 +03003182 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3183 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3184 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003185 return emulate_gp(ctxt, 0);
3186
3187 return X86EMUL_CONTINUE;
3188}
3189
3190static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3191{
3192 u64 msr_data;
3193
Avi Kivitydd856ef2012-08-27 23:46:17 +03003194 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003195 return emulate_gp(ctxt, 0);
3196
Avi Kivitydd856ef2012-08-27 23:46:17 +03003197 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3198 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003199 return X86EMUL_CONTINUE;
3200}
3201
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003202static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3203{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003204 if (ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003205 return emulate_ud(ctxt);
3206
Avi Kivity9dac77f2011-06-01 15:34:25 +03003207 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
Nadav Amitb5bbf102014-11-02 11:54:46 +02003208 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3209 ctxt->dst.bytes = 2;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003210 return X86EMUL_CONTINUE;
3211}
3212
3213static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3214{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003215 u16 sel = ctxt->src.val;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003216
Avi Kivity9dac77f2011-06-01 15:34:25 +03003217 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003218 return emulate_ud(ctxt);
3219
Avi Kivity9dac77f2011-06-01 15:34:25 +03003220 if (ctxt->modrm_reg == VCPU_SREG_SS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003221 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3222
3223 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003224 ctxt->dst.type = OP_NONE;
3225 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003226}
3227
Avi Kivitya14e5792012-06-13 12:28:33 +03003228static int em_lldt(struct x86_emulate_ctxt *ctxt)
3229{
3230 u16 sel = ctxt->src.val;
3231
3232 /* Disable writeback. */
3233 ctxt->dst.type = OP_NONE;
3234 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3235}
3236
Avi Kivity80890002012-06-13 16:33:29 +03003237static int em_ltr(struct x86_emulate_ctxt *ctxt)
3238{
3239 u16 sel = ctxt->src.val;
3240
3241 /* Disable writeback. */
3242 ctxt->dst.type = OP_NONE;
3243 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3244}
3245
Avi Kivity38503912011-03-31 18:48:09 +02003246static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3247{
Avi Kivity9fa088f2011-03-31 18:54:30 +02003248 int rc;
3249 ulong linear;
3250
Avi Kivity9dac77f2011-06-01 15:34:25 +03003251 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02003252 if (rc == X86EMUL_CONTINUE)
Avi Kivity3cb16fe2011-04-20 15:38:44 +03003253 ctxt->ops->invlpg(ctxt, linear);
Avi Kivity38503912011-03-31 18:48:09 +02003254 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003255 ctxt->dst.type = OP_NONE;
Avi Kivity38503912011-03-31 18:48:09 +02003256 return X86EMUL_CONTINUE;
3257}
3258
Avi Kivity2d04a052011-04-20 15:32:49 +03003259static int em_clts(struct x86_emulate_ctxt *ctxt)
3260{
3261 ulong cr0;
3262
3263 cr0 = ctxt->ops->get_cr(ctxt, 0);
3264 cr0 &= ~X86_CR0_TS;
3265 ctxt->ops->set_cr(ctxt, 0, cr0);
3266 return X86EMUL_CONTINUE;
3267}
3268
Avi Kivity26d05cc2011-04-21 12:07:59 +03003269static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3270{
Nadav Amit0f54a322014-08-29 11:26:55 +03003271 int rc = ctxt->ops->fix_hypercall(ctxt);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003272
Avi Kivity26d05cc2011-04-21 12:07:59 +03003273 if (rc != X86EMUL_CONTINUE)
3274 return rc;
3275
3276 /* Let the processor re-execute the fixed hypercall */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003277 ctxt->_eip = ctxt->eip;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003278 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003279 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003280 return X86EMUL_CONTINUE;
3281}
3282
Avi Kivity96051572012-06-10 17:21:18 +03003283static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3284 void (*get)(struct x86_emulate_ctxt *ctxt,
3285 struct desc_ptr *ptr))
3286{
3287 struct desc_ptr desc_ptr;
3288
3289 if (ctxt->mode == X86EMUL_MODE_PROT64)
3290 ctxt->op_bytes = 8;
3291 get(ctxt, &desc_ptr);
3292 if (ctxt->op_bytes == 2) {
3293 ctxt->op_bytes = 4;
3294 desc_ptr.address &= 0x00ffffff;
3295 }
3296 /* Disable writeback. */
3297 ctxt->dst.type = OP_NONE;
3298 return segmented_write(ctxt, ctxt->dst.addr.mem,
3299 &desc_ptr, 2 + ctxt->op_bytes);
3300}
3301
3302static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3303{
3304 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3305}
3306
3307static int em_sidt(struct x86_emulate_ctxt *ctxt)
3308{
3309 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3310}
3311
Nadav Amit5b7f6a1e2014-11-02 11:54:55 +02003312static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003313{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003314 struct desc_ptr desc_ptr;
3315 int rc;
3316
Avi Kivity510425f2012-06-07 17:04:36 +03003317 if (ctxt->mode == X86EMUL_MODE_PROT64)
3318 ctxt->op_bytes = 8;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003319 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
Avi Kivity26d05cc2011-04-21 12:07:59 +03003320 &desc_ptr.size, &desc_ptr.address,
Avi Kivity9dac77f2011-06-01 15:34:25 +03003321 ctxt->op_bytes);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003322 if (rc != X86EMUL_CONTINUE)
3323 return rc;
Nadav Amit9a9abf62014-11-02 11:54:56 +02003324 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3325 is_noncanonical_address(desc_ptr.address))
3326 return emulate_gp(ctxt, 0);
Nadav Amit5b7f6a1e2014-11-02 11:54:55 +02003327 if (lgdt)
3328 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3329 else
3330 ctxt->ops->set_idt(ctxt, &desc_ptr);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003331 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003332 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003333 return X86EMUL_CONTINUE;
3334}
3335
Nadav Amit5b7f6a1e2014-11-02 11:54:55 +02003336static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3337{
3338 return em_lgdt_lidt(ctxt, true);
3339}
3340
Avi Kivity5ef39c72011-04-21 12:21:50 +03003341static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003342{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003343 int rc;
3344
Avi Kivity5ef39c72011-04-21 12:21:50 +03003345 rc = ctxt->ops->fix_hypercall(ctxt);
3346
Avi Kivity26d05cc2011-04-21 12:07:59 +03003347 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003348 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003349 return rc;
3350}
3351
3352static int em_lidt(struct x86_emulate_ctxt *ctxt)
3353{
Nadav Amit5b7f6a1e2014-11-02 11:54:55 +02003354 return em_lgdt_lidt(ctxt, false);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003355}
3356
3357static int em_smsw(struct x86_emulate_ctxt *ctxt)
3358{
Nadav Amit32e94d02014-06-02 18:34:11 +03003359 if (ctxt->dst.type == OP_MEM)
3360 ctxt->dst.bytes = 2;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003361 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003362 return X86EMUL_CONTINUE;
3363}
3364
3365static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3366{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003367 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
Avi Kivity9dac77f2011-06-01 15:34:25 +03003368 | (ctxt->src.val & 0x0f));
3369 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003370 return X86EMUL_CONTINUE;
3371}
3372
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003373static int em_loop(struct x86_emulate_ctxt *ctxt)
3374{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003375 int rc = X86EMUL_CONTINUE;
3376
Paolo Bonzini01485a22014-11-19 18:25:08 +01003377 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003378 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
Avi Kivity9dac77f2011-06-01 15:34:25 +03003379 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
Nadav Amit234f3ce2014-09-18 22:39:38 +03003380 rc = jmp_rel(ctxt, ctxt->src.val);
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003381
Nadav Amit234f3ce2014-09-18 22:39:38 +03003382 return rc;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003383}
3384
3385static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3386{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003387 int rc = X86EMUL_CONTINUE;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003388
Nadav Amit234f3ce2014-09-18 22:39:38 +03003389 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3390 rc = jmp_rel(ctxt, ctxt->src.val);
3391
3392 return rc;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003393}
3394
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003395static int em_in(struct x86_emulate_ctxt *ctxt)
3396{
3397 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3398 &ctxt->dst.val))
3399 return X86EMUL_IO_NEEDED;
3400
3401 return X86EMUL_CONTINUE;
3402}
3403
3404static int em_out(struct x86_emulate_ctxt *ctxt)
3405{
3406 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3407 &ctxt->src.val, 1);
3408 /* Disable writeback. */
3409 ctxt->dst.type = OP_NONE;
3410 return X86EMUL_CONTINUE;
3411}
3412
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09003413static int em_cli(struct x86_emulate_ctxt *ctxt)
3414{
3415 if (emulator_bad_iopl(ctxt))
3416 return emulate_gp(ctxt, 0);
3417
3418 ctxt->eflags &= ~X86_EFLAGS_IF;
3419 return X86EMUL_CONTINUE;
3420}
3421
3422static int em_sti(struct x86_emulate_ctxt *ctxt)
3423{
3424 if (emulator_bad_iopl(ctxt))
3425 return emulate_gp(ctxt, 0);
3426
3427 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3428 ctxt->eflags |= X86_EFLAGS_IF;
3429 return X86EMUL_CONTINUE;
3430}
3431
Avi Kivity6d6eede2012-06-07 14:11:36 +03003432static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3433{
3434 u32 eax, ebx, ecx, edx;
3435
Avi Kivitydd856ef2012-08-27 23:46:17 +03003436 eax = reg_read(ctxt, VCPU_REGS_RAX);
3437 ecx = reg_read(ctxt, VCPU_REGS_RCX);
Avi Kivity6d6eede2012-06-07 14:11:36 +03003438 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003439 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3440 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3441 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3442 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
Avi Kivity6d6eede2012-06-07 14:11:36 +03003443 return X86EMUL_CONTINUE;
3444}
3445
Paolo Bonzini98f73632013-10-31 11:19:42 +01003446static int em_sahf(struct x86_emulate_ctxt *ctxt)
3447{
3448 u32 flags;
3449
3450 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3451 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3452
3453 ctxt->eflags &= ~0xffUL;
3454 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3455 return X86EMUL_CONTINUE;
3456}
3457
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003458static int em_lahf(struct x86_emulate_ctxt *ctxt)
3459{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003460 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3461 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003462 return X86EMUL_CONTINUE;
3463}
3464
Avi Kivity92998362012-06-13 12:25:06 +03003465static int em_bswap(struct x86_emulate_ctxt *ctxt)
3466{
3467 switch (ctxt->op_bytes) {
3468#ifdef CONFIG_X86_64
3469 case 8:
3470 asm("bswap %0" : "+r"(ctxt->dst.val));
3471 break;
3472#endif
3473 default:
3474 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3475 break;
3476 }
3477 return X86EMUL_CONTINUE;
3478}
3479
Nadav Amit13e457e2014-10-13 13:04:13 +03003480static int em_clflush(struct x86_emulate_ctxt *ctxt)
3481{
3482 /* emulating clflush regardless of cpuid */
3483 return X86EMUL_CONTINUE;
3484}
3485
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003486static bool valid_cr(int nr)
3487{
3488 switch (nr) {
3489 case 0:
3490 case 2 ... 4:
3491 case 8:
3492 return true;
3493 default:
3494 return false;
3495 }
3496}
3497
3498static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3499{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003500 if (!valid_cr(ctxt->modrm_reg))
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003501 return emulate_ud(ctxt);
3502
3503 return X86EMUL_CONTINUE;
3504}
3505
3506static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3507{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003508 u64 new_val = ctxt->src.val64;
3509 int cr = ctxt->modrm_reg;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003510 u64 efer = 0;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003511
3512 static u64 cr_reserved_bits[] = {
3513 0xffffffff00000000ULL,
3514 0, 0, 0, /* CR3 checked later */
3515 CR4_RESERVED_BITS,
3516 0, 0, 0,
3517 CR8_RESERVED_BITS,
3518 };
3519
3520 if (!valid_cr(cr))
3521 return emulate_ud(ctxt);
3522
3523 if (new_val & cr_reserved_bits[cr])
3524 return emulate_gp(ctxt, 0);
3525
3526 switch (cr) {
3527 case 0: {
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003528 u64 cr4;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003529 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3530 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3531 return emulate_gp(ctxt, 0);
3532
Avi Kivity717746e2011-04-20 13:37:53 +03003533 cr4 = ctxt->ops->get_cr(ctxt, 4);
3534 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003535
3536 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3537 !(cr4 & X86_CR4_PAE))
3538 return emulate_gp(ctxt, 0);
3539
3540 break;
3541 }
3542 case 3: {
3543 u64 rsvd = 0;
3544
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003545 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3546 if (efer & EFER_LMA)
Nadav Amit9d88fca2014-11-02 11:54:52 +02003547 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003548
3549 if (new_val & rsvd)
3550 return emulate_gp(ctxt, 0);
3551
3552 break;
3553 }
3554 case 4: {
Avi Kivity717746e2011-04-20 13:37:53 +03003555 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003556
3557 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3558 return emulate_gp(ctxt, 0);
3559
3560 break;
3561 }
3562 }
3563
3564 return X86EMUL_CONTINUE;
3565}
3566
Joerg Roedel3b88e412011-04-04 12:39:29 +02003567static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3568{
3569 unsigned long dr7;
3570
Avi Kivity717746e2011-04-20 13:37:53 +03003571 ctxt->ops->get_dr(ctxt, 7, &dr7);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003572
3573 /* Check if DR7.Global_Enable is set */
3574 return dr7 & (1 << 13);
3575}
3576
3577static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3578{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003579 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003580 u64 cr4;
3581
3582 if (dr > 7)
3583 return emulate_ud(ctxt);
3584
Avi Kivity717746e2011-04-20 13:37:53 +03003585 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003586 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3587 return emulate_ud(ctxt);
3588
Nadav Amit6d2a0522014-11-02 11:54:43 +02003589 if (check_dr7_gd(ctxt)) {
3590 ulong dr6;
3591
3592 ctxt->ops->get_dr(ctxt, 6, &dr6);
3593 dr6 &= ~15;
3594 dr6 |= DR6_BD | DR6_RTM;
3595 ctxt->ops->set_dr(ctxt, 6, dr6);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003596 return emulate_db(ctxt);
Nadav Amit6d2a0522014-11-02 11:54:43 +02003597 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02003598
3599 return X86EMUL_CONTINUE;
3600}
3601
3602static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3603{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003604 u64 new_val = ctxt->src.val64;
3605 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003606
3607 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3608 return emulate_gp(ctxt, 0);
3609
3610 return check_dr_read(ctxt);
3611}
3612
Joerg Roedel01de8b02011-04-04 12:39:31 +02003613static int check_svme(struct x86_emulate_ctxt *ctxt)
3614{
3615 u64 efer;
3616
Avi Kivity717746e2011-04-20 13:37:53 +03003617 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003618
3619 if (!(efer & EFER_SVME))
3620 return emulate_ud(ctxt);
3621
3622 return X86EMUL_CONTINUE;
3623}
3624
3625static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3626{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003627 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003628
3629 /* Valid physical address? */
Randy Dunlapd4224442011-04-21 09:09:22 -07003630 if (rax & 0xffff000000000000ULL)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003631 return emulate_gp(ctxt, 0);
3632
3633 return check_svme(ctxt);
3634}
3635
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003636static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3637{
Avi Kivity717746e2011-04-20 13:37:53 +03003638 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003639
Avi Kivity717746e2011-04-20 13:37:53 +03003640 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003641 return emulate_ud(ctxt);
3642
3643 return X86EMUL_CONTINUE;
3644}
3645
Joerg Roedel80612522011-04-04 12:39:33 +02003646static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3647{
Avi Kivity717746e2011-04-20 13:37:53 +03003648 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003649 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
Joerg Roedel80612522011-04-04 12:39:33 +02003650
Avi Kivity717746e2011-04-20 13:37:53 +03003651 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
Nadav Amit67f4d422014-06-02 18:34:09 +03003652 ctxt->ops->check_pmc(ctxt, rcx))
Joerg Roedel80612522011-04-04 12:39:33 +02003653 return emulate_gp(ctxt, 0);
3654
3655 return X86EMUL_CONTINUE;
3656}
3657
Joerg Roedelf6511932011-04-04 12:39:35 +02003658static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3659{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003660 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3661 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003662 return emulate_gp(ctxt, 0);
3663
3664 return X86EMUL_CONTINUE;
3665}
3666
3667static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3668{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003669 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3670 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003671 return emulate_gp(ctxt, 0);
3672
3673 return X86EMUL_CONTINUE;
3674}
3675
Avi Kivity73fba5f2010-07-29 15:11:53 +03003676#define D(_y) { .flags = (_y) }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003677#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3678#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3679 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Gleb Natapov0b789ee2013-04-11 11:59:55 +03003680#define N D(NotImpl)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003681#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003682#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3683#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
Nadav Amit39f062f2014-11-26 15:47:18 +02003684#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
Gleb Natapov045a2822012-12-20 16:57:43 +02003685#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003686#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
Avi Kivitye28bbd42013-01-04 16:18:48 +02003687#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
Avi Kivityc4f035c2011-04-04 12:39:22 +02003688#define II(_f, _e, _i) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003689 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
Joerg Roedeld09beab2011-04-04 12:39:25 +02003690#define IIP(_f, _e, _i, _p) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003691 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3692 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Avi Kivityaa97bb42010-01-20 18:09:23 +02003693#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003694
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003695#define D2bv(_f) D((_f) | ByteOp), D(_f)
Joerg Roedelf6511932011-04-04 12:39:35 +02003696#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003697#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
Avi Kivityf7857f32013-01-04 16:18:53 +02003698#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003699#define I2bvIP(_f, _e, _i, _p) \
3700 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003701
Avi Kivityfb864fb2013-01-04 16:18:54 +02003702#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3703 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3704 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
Avi Kivity6230f7f2010-08-26 18:34:55 +03003705
Nadav Amit0f54a322014-08-29 11:26:55 +03003706static const struct opcode group7_rm0[] = {
3707 N,
3708 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3709 N, N, N, N, N, N,
3710};
3711
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003712static const struct opcode group7_rm1[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003713 DI(SrcNone | Priv, monitor),
3714 DI(SrcNone | Priv, mwait),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003715 N, N, N, N, N, N,
3716};
3717
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003718static const struct opcode group7_rm3[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003719 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
Borislav Petkovb51e9742013-09-22 16:44:52 +02003720 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003721 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3722 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3723 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3724 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3725 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3726 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003727};
Avi Kivity6230f7f2010-08-26 18:34:55 +03003728
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003729static const struct opcode group7_rm7[] = {
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003730 N,
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003731 DIP(SrcNone, rdtscp, check_rdtsc),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003732 N, N, N, N, N, N,
3733};
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09003734
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003735static const struct opcode group1[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003736 F(Lock, em_add),
3737 F(Lock | PageTable, em_or),
3738 F(Lock, em_adc),
3739 F(Lock, em_sbb),
3740 F(Lock | PageTable, em_and),
3741 F(Lock, em_sub),
3742 F(Lock, em_xor),
3743 F(NoWrite, em_cmp),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003744};
3745
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003746static const struct opcode group1A[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003747 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003748};
3749
Avi Kivity007a3b52013-01-19 19:51:51 +02003750static const struct opcode group2[] = {
3751 F(DstMem | ModRM, em_rol),
3752 F(DstMem | ModRM, em_ror),
3753 F(DstMem | ModRM, em_rcl),
3754 F(DstMem | ModRM, em_rcr),
3755 F(DstMem | ModRM, em_shl),
3756 F(DstMem | ModRM, em_shr),
3757 F(DstMem | ModRM, em_shl),
3758 F(DstMem | ModRM, em_sar),
3759};
3760
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003761static const struct opcode group3[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003762 F(DstMem | SrcImm | NoWrite, em_test),
3763 F(DstMem | SrcImm | NoWrite, em_test),
Avi Kivity45a14672013-01-04 16:18:52 +02003764 F(DstMem | SrcNone | Lock, em_not),
3765 F(DstMem | SrcNone | Lock, em_neg),
Avi Kivityb9fa4092013-02-09 11:31:48 +02003766 F(DstXacc | Src2Mem, em_mul_ex),
3767 F(DstXacc | Src2Mem, em_imul_ex),
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02003768 F(DstXacc | Src2Mem, em_div_ex),
3769 F(DstXacc | Src2Mem, em_idiv_ex),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003770};
3771
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003772static const struct opcode group4[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003773 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3774 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003775 N, N, N, N, N, N,
3776};
3777
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003778static const struct opcode group5[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003779 F(DstMem | SrcNone | Lock, em_inc),
3780 F(DstMem | SrcNone | Lock, em_dec),
Nadav Amit58b70752014-10-24 11:35:09 +03003781 I(SrcMem | NearBranch, em_call_near_abs),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003782 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
Nadav Amit58b70752014-10-24 11:35:09 +03003783 I(SrcMem | NearBranch, em_jmp_abs),
Nadav Amitf7784042014-09-18 22:39:41 +03003784 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3785 I(SrcMem | Stack, em_push), D(Undefined),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003786};
3787
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003788static const struct opcode group6[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003789 DI(Prot, sldt),
3790 DI(Prot, str),
Avi Kivitya14e5792012-06-13 12:28:33 +03003791 II(Prot | Priv | SrcMem16, em_lldt, lldt),
Avi Kivity80890002012-06-13 16:33:29 +03003792 II(Prot | Priv | SrcMem16, em_ltr, ltr),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02003793 N, N, N, N,
3794};
3795
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003796static const struct group_dual group7 = { {
Nadav Amit606b1c32014-06-02 18:34:06 +03003797 II(Mov | DstMem, em_sgdt, sgdt),
3798 II(Mov | DstMem, em_sidt, sidt),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003799 II(SrcMem | Priv, em_lgdt, lgdt),
3800 II(SrcMem | Priv, em_lidt, lidt),
3801 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3802 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3803 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003804}, {
Nadav Amit0f54a322014-08-29 11:26:55 +03003805 EXT(0, group7_rm0),
Avi Kivity5ef39c72011-04-21 12:21:50 +03003806 EXT(0, group7_rm1),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003807 N, EXT(0, group7_rm3),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003808 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3809 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3810 EXT(0, group7_rm7),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003811} };
3812
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003813static const struct opcode group8[] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003814 N, N, N, N,
Avi Kivity11c363b2013-01-19 19:51:54 +02003815 F(DstMem | SrcImmByte | NoWrite, em_bt),
3816 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3817 F(DstMem | SrcImmByte | Lock, em_btr),
3818 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003819};
3820
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003821static const struct group_dual group9 = { {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003822 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003823}, {
3824 N, N, N, N, N, N, N, N,
3825} };
3826
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003827static const struct opcode group11[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003828 I(DstMem | SrcImm | Mov | PageTable, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003829 X7(D(Undefined)),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03003830};
3831
Nadav Amit13e457e2014-10-13 13:04:13 +03003832static const struct gprefix pfx_0f_ae_7 = {
Nadav Amit3f6f1482014-10-13 13:04:14 +03003833 I(SrcMem | ByteOp, em_clflush), N, N, N,
Nadav Amit13e457e2014-10-13 13:04:13 +03003834};
3835
3836static const struct group_dual group15 = { {
3837 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3838}, {
3839 N, N, N, N, N, N, N, N,
3840} };
3841
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003842static const struct gprefix pfx_0f_6f_0f_7f = {
Avi Kivitye5971752012-04-09 18:40:03 +03003843 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
Avi Kivityaa97bb42010-01-20 18:09:23 +02003844};
3845
Nadav Amit39f062f2014-11-26 15:47:18 +02003846static const struct instr_dual instr_dual_0f_2b = {
3847 I(0, em_mov), N
3848};
3849
Paolo Bonzinid5b77062014-07-14 12:54:48 +02003850static const struct gprefix pfx_0f_2b = {
Nadav Amit39f062f2014-11-26 15:47:18 +02003851 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
Avi Kivity3e114eb2012-04-09 18:40:01 +03003852};
3853
Igor Mammedov27ce8252014-03-15 21:01:59 +01003854static const struct gprefix pfx_0f_28_0f_29 = {
Igor Mammedov6fec27d2014-03-15 21:02:00 +01003855 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01003856};
3857
Alex Williamson0a370272014-07-11 11:56:31 -06003858static const struct gprefix pfx_0f_e7 = {
3859 N, I(Sse, em_mov), N, N,
3860};
3861
Gleb Natapov045a2822012-12-20 16:57:43 +02003862static const struct escape escape_d9 = { {
Nadav Amit16bebef2014-12-25 02:52:18 +02003863 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
Gleb Natapov045a2822012-12-20 16:57:43 +02003864}, {
3865 /* 0xC0 - 0xC7 */
3866 N, N, N, N, N, N, N, N,
3867 /* 0xC8 - 0xCF */
3868 N, N, N, N, N, N, N, N,
3869 /* 0xD0 - 0xC7 */
3870 N, N, N, N, N, N, N, N,
3871 /* 0xD8 - 0xDF */
3872 N, N, N, N, N, N, N, N,
3873 /* 0xE0 - 0xE7 */
3874 N, N, N, N, N, N, N, N,
3875 /* 0xE8 - 0xEF */
3876 N, N, N, N, N, N, N, N,
3877 /* 0xF0 - 0xF7 */
3878 N, N, N, N, N, N, N, N,
3879 /* 0xF8 - 0xFF */
3880 N, N, N, N, N, N, N, N,
3881} };
3882
3883static const struct escape escape_db = { {
3884 N, N, N, N, N, N, N, N,
3885}, {
3886 /* 0xC0 - 0xC7 */
3887 N, N, N, N, N, N, N, N,
3888 /* 0xC8 - 0xCF */
3889 N, N, N, N, N, N, N, N,
3890 /* 0xD0 - 0xC7 */
3891 N, N, N, N, N, N, N, N,
3892 /* 0xD8 - 0xDF */
3893 N, N, N, N, N, N, N, N,
3894 /* 0xE0 - 0xE7 */
3895 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3896 /* 0xE8 - 0xEF */
3897 N, N, N, N, N, N, N, N,
3898 /* 0xF0 - 0xF7 */
3899 N, N, N, N, N, N, N, N,
3900 /* 0xF8 - 0xFF */
3901 N, N, N, N, N, N, N, N,
3902} };
3903
3904static const struct escape escape_dd = { {
Nadav Amit16bebef2014-12-25 02:52:18 +02003905 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
Gleb Natapov045a2822012-12-20 16:57:43 +02003906}, {
3907 /* 0xC0 - 0xC7 */
3908 N, N, N, N, N, N, N, N,
3909 /* 0xC8 - 0xCF */
3910 N, N, N, N, N, N, N, N,
3911 /* 0xD0 - 0xC7 */
3912 N, N, N, N, N, N, N, N,
3913 /* 0xD8 - 0xDF */
3914 N, N, N, N, N, N, N, N,
3915 /* 0xE0 - 0xE7 */
3916 N, N, N, N, N, N, N, N,
3917 /* 0xE8 - 0xEF */
3918 N, N, N, N, N, N, N, N,
3919 /* 0xF0 - 0xF7 */
3920 N, N, N, N, N, N, N, N,
3921 /* 0xF8 - 0xFF */
3922 N, N, N, N, N, N, N, N,
3923} };
3924
Nadav Amit39f062f2014-11-26 15:47:18 +02003925static const struct instr_dual instr_dual_0f_c3 = {
3926 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
3927};
3928
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003929static const struct opcode opcode_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003930 /* 0x00 - 0x07 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003931 F6ALU(Lock, em_add),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003932 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3933 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003934 /* 0x08 - 0x0F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003935 F6ALU(Lock | PageTable, em_or),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003936 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3937 N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003938 /* 0x10 - 0x17 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003939 F6ALU(Lock, em_adc),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003940 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3941 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003942 /* 0x18 - 0x1F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003943 F6ALU(Lock, em_sbb),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003944 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3945 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003946 /* 0x20 - 0x27 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003947 F6ALU(Lock | PageTable, em_and), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003948 /* 0x28 - 0x2F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003949 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003950 /* 0x30 - 0x37 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003951 F6ALU(Lock, em_xor), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003952 /* 0x38 - 0x3F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003953 F6ALU(NoWrite, em_cmp), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003954 /* 0x40 - 0x4F */
Avi Kivity95413dc2013-01-19 19:51:53 +02003955 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003956 /* 0x50 - 0x57 */
Avi Kivity63540382010-07-29 15:11:55 +03003957 X8(I(SrcReg | Stack, em_push)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003958 /* 0x58 - 0x5F */
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09003959 X8(I(DstReg | Stack, em_pop)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003960 /* 0x60 - 0x67 */
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09003961 I(ImplicitOps | Stack | No64, em_pusha),
3962 I(ImplicitOps | Stack | No64, em_popa),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003963 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3964 N, N, N, N,
3965 /* 0x68 - 0x6F */
Avi Kivityd46164d2010-08-18 19:29:33 +03003966 I(SrcImm | Mov | Stack, em_push),
3967 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003968 I(SrcImmByte | Mov | Stack, em_push),
3969 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
Gleb Natapovb3356bf2012-09-03 15:24:29 +03003970 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
Takuya Yoshikawa2b5e97e2011-11-23 12:27:39 +09003971 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
Avi Kivity73fba5f2010-07-29 15:11:53 +03003972 /* 0x70 - 0x7F */
Nadav Amit58b70752014-10-24 11:35:09 +03003973 X16(D(SrcImmByte | NearBranch)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003974 /* 0x80 - 0x87 */
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003975 G(ByteOp | DstMem | SrcImm, group1),
3976 G(DstMem | SrcImm, group1),
3977 G(ByteOp | DstMem | SrcImm | No64, group1),
3978 G(DstMem | SrcImmByte, group1),
Avi Kivityfb864fb2013-01-04 16:18:54 +02003979 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003980 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003981 /* 0x88 - 0x8F */
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003982 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003983 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003984 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003985 D(ModRM | SrcMem | NoAccess | DstReg),
3986 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3987 G(0, group1A),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003988 /* 0x90 - 0x97 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02003989 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003990 /* 0x98 - 0x9F */
Avi Kivity61429142010-08-19 15:13:00 +03003991 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
Wei Yongjuncc4feed2010-08-25 14:10:53 +08003992 I(SrcImmFAddr | No64, em_call_far), N,
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09003993 II(ImplicitOps | Stack, em_pushf, pushf),
Paolo Bonzini98f73632013-10-31 11:19:42 +01003994 II(ImplicitOps | Stack, em_popf, popf),
3995 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003996 /* 0xA0 - 0xA7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003997 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003998 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003999 I2bv(SrcSI | DstDI | Mov | String, em_mov),
Nadav Amit5aca3722014-11-02 11:54:50 +02004000 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004001 /* 0xA8 - 0xAF */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004002 F2bv(DstAcc | SrcImm | NoWrite, em_test),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004003 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4004 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
Nadav Amit5aca3722014-11-02 11:54:50 +02004005 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004006 /* 0xB0 - 0xB7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004007 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004008 /* 0xB8 - 0xBF */
Nadav Amit5e2c6882012-12-06 21:55:10 -02004009 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004010 /* 0xC0 - 0xC7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02004011 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
Nadav Amit58b70752014-10-24 11:35:09 +03004012 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4013 I(ImplicitOps | NearBranch, em_ret),
Avi Kivityd4b43252011-09-13 10:45:50 +03004014 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4015 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03004016 G(ByteOp, group11), G(0, group11),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004017 /* 0xC8 - 0xCF */
Avi Kivity612e89f2012-06-12 20:03:23 +03004018 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
Bruce Rogers32611072013-09-09 09:40:20 -06004019 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4020 I(ImplicitOps | Stack, em_ret_far),
Avi Kivity3c6e2762011-04-04 12:39:23 +02004021 D(ImplicitOps), DI(SrcImmByte, intn),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004022 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004023 /* 0xD0 - 0xD7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02004024 G(Src2One | ByteOp, group2), G(Src2One, group2),
4025 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02004026 I(DstAcc | SrcImmUByte | No64, em_aam),
Paolo Bonzini326f5782013-05-09 11:32:51 +02004027 I(DstAcc | SrcImmUByte | No64, em_aad),
4028 F(DstAcc | ByteOp | No64, em_salc),
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004029 I(DstAcc | SrcXLat | ByteOp, em_mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004030 /* 0xD8 - 0xDF */
Gleb Natapov045a2822012-12-20 16:57:43 +02004031 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004032 /* 0xE0 - 0xE7 */
Nadav Amit58b70752014-10-24 11:35:09 +03004033 X3(I(SrcImmByte | NearBranch, em_loop)),
4034 I(SrcImmByte | NearBranch, em_jcxz),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004035 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4036 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004037 /* 0xE8 - 0xEF */
Nadav Amit58b70752014-10-24 11:35:09 +03004038 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4039 I(SrcImmFAddr | No64, em_jmp_far),
4040 D(SrcImmByte | ImplicitOps | NearBranch),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004041 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4042 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004043 /* 0xF0 - 0xF7 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02004044 N, DI(ImplicitOps, icebp), N, N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02004045 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4046 G(ByteOp, group3), G(0, group3),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004047 /* 0xF8 - 0xFF */
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09004048 D(ImplicitOps), D(ImplicitOps),
4049 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004050 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4051};
4052
Mathias Krausefd0a0d82012-08-30 01:30:15 +02004053static const struct opcode twobyte_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03004054 /* 0x00 - 0x0F */
Joerg Roedeldee6bb72011-04-04 12:39:30 +02004055 G(0, group6), GD(0, &group7), N, N,
Borislav Petkovb51e9742013-09-22 16:44:52 +02004056 N, I(ImplicitOps | EmulateOnUD, em_syscall),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004057 II(ImplicitOps | Priv, em_clts, clts), N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02004058 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
Nadav Amit3f6f1482014-10-13 13:04:14 +03004059 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004060 /* 0x10 - 0x1F */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02004061 N, N, N, N, N, N, N, N,
Nadav Amit3f6f1482014-10-13 13:04:14 +03004062 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4063 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004064 /* 0x20 - 0x2F */
Nadav Amit9b88ae92014-05-25 23:05:21 +03004065 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4066 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4067 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4068 check_cr_write),
4069 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4070 check_dr_write),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004071 N, N, N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01004072 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4073 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
Paolo Bonzinid5b77062014-07-14 12:54:48 +02004074 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
Avi Kivity3e114eb2012-04-09 18:40:01 +03004075 N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004076 /* 0x30 - 0x3F */
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09004077 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
Joerg Roedel80612522011-04-04 12:39:33 +02004078 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09004079 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
Avi Kivity222d21a2011-11-10 14:57:30 +02004080 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
Borislav Petkovb51e9742013-09-22 16:44:52 +02004081 I(ImplicitOps | EmulateOnUD, em_sysenter),
4082 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
Avi Kivityd8671622011-02-01 16:32:03 +02004083 N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004084 N, N, N, N, N, N, N, N,
4085 /* 0x40 - 0x4F */
Nadav Amit140bad82014-06-15 16:13:00 +03004086 X16(D(DstReg | SrcMem | ModRM)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004087 /* 0x50 - 0x5F */
4088 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4089 /* 0x60 - 0x6F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02004090 N, N, N, N,
4091 N, N, N, N,
4092 N, N, N, N,
4093 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004094 /* 0x70 - 0x7F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02004095 N, N, N, N,
4096 N, N, N, N,
4097 N, N, N, N,
4098 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004099 /* 0x80 - 0x8F */
Nadav Amit58b70752014-10-24 11:35:09 +03004100 X16(D(SrcImm | NearBranch)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004101 /* 0x90 - 0x9F */
Wei Yongjunee45b582010-08-06 17:10:07 +08004102 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004103 /* 0xA0 - 0xA7 */
Avi Kivity1cd196e2011-09-13 10:45:51 +03004104 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
Avi Kivity11c363b2013-01-19 19:51:54 +02004105 II(ImplicitOps, em_cpuid, cpuid),
4106 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
Avi Kivity0bdea062013-01-19 19:51:50 +02004107 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4108 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004109 /* 0xA8 - 0xAF */
Avi Kivity1cd196e2011-09-13 10:45:51 +03004110 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004111 DI(ImplicitOps, rsm),
Avi Kivity11c363b2013-01-19 19:51:54 +02004112 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
Avi Kivity0bdea062013-01-19 19:51:50 +02004113 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4114 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
Nadav Amit13e457e2014-10-13 13:04:13 +03004115 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004116 /* 0xB0 - 0xB7 */
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09004117 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
Avi Kivityd4b43252011-09-13 10:45:50 +03004118 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
Avi Kivity11c363b2013-01-19 19:51:54 +02004119 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
Avi Kivityd4b43252011-09-13 10:45:50 +03004120 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4121 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004122 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004123 /* 0xB8 - 0xBF */
4124 N, N,
Takuya Yoshikawace7faab2011-11-22 15:17:48 +09004125 G(BitOp, group8),
Avi Kivity11c363b2013-01-19 19:51:54 +02004126 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4127 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004128 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity92998362012-06-13 12:25:06 +03004129 /* 0xC0 - 0xC7 */
Avi Kivitye47a5f52013-02-09 11:31:51 +02004130 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
Nadav Amit39f062f2014-11-26 15:47:18 +02004131 N, ID(0, &instr_dual_0f_c3),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004132 N, N, N, GD(0, &group9),
Avi Kivity92998362012-06-13 12:25:06 +03004133 /* 0xC8 - 0xCF */
4134 X8(I(DstReg, em_bswap)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004135 /* 0xD0 - 0xDF */
4136 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4137 /* 0xE0 - 0xEF */
Alex Williamson0a370272014-07-11 11:56:31 -06004138 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4139 N, N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004140 /* 0xF0 - 0xFF */
4141 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4142};
4143
Nadav Amit39f062f2014-11-26 15:47:18 +02004144static const struct instr_dual instr_dual_0f_38_f0 = {
4145 I(DstReg | SrcMem | Mov, em_movbe), N
4146};
4147
4148static const struct instr_dual instr_dual_0f_38_f1 = {
4149 I(DstMem | SrcReg | Mov, em_movbe), N
4150};
4151
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004152static const struct gprefix three_byte_0f_38_f0 = {
Nadav Amit39f062f2014-11-26 15:47:18 +02004153 ID(0, &instr_dual_0f_38_f0), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004154};
4155
4156static const struct gprefix three_byte_0f_38_f1 = {
Nadav Amit39f062f2014-11-26 15:47:18 +02004157 ID(0, &instr_dual_0f_38_f1), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004158};
4159
4160/*
4161 * Insns below are selected by the prefix which indexed by the third opcode
4162 * byte.
4163 */
4164static const struct opcode opcode_map_0f_38[256] = {
4165 /* 0x00 - 0x7f */
4166 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
Borislav Petkov84cffe42013-10-29 12:54:56 +01004167 /* 0x80 - 0xef */
4168 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4169 /* 0xf0 - 0xf1 */
Nadav Amit53bb4f72014-12-07 11:49:42 +02004170 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4171 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
Borislav Petkov84cffe42013-10-29 12:54:56 +01004172 /* 0xf2 - 0xff */
4173 N, N, X4(N), X8(N)
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004174};
4175
Avi Kivity73fba5f2010-07-29 15:11:53 +03004176#undef D
4177#undef N
4178#undef G
4179#undef GD
4180#undef I
Avi Kivityaa97bb42010-01-20 18:09:23 +02004181#undef GP
Joerg Roedel01de8b02011-04-04 12:39:31 +02004182#undef EXT
Avi Kivity73fba5f2010-07-29 15:11:53 +03004183
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004184#undef D2bv
Joerg Roedelf6511932011-04-04 12:39:35 +02004185#undef D2bvIP
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004186#undef I2bv
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004187#undef I2bvIP
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09004188#undef I6ALU
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004189
Avi Kivity9dac77f2011-06-01 15:34:25 +03004190static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
Avi Kivity39f21ee2010-08-18 19:20:21 +03004191{
4192 unsigned size;
4193
Avi Kivity9dac77f2011-06-01 15:34:25 +03004194 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004195 if (size == 8)
4196 size = 4;
4197 return size;
4198}
4199
4200static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4201 unsigned size, bool sign_extension)
4202{
Avi Kivity39f21ee2010-08-18 19:20:21 +03004203 int rc = X86EMUL_CONTINUE;
4204
4205 op->type = OP_IMM;
4206 op->bytes = size;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004207 op->addr.mem.ea = ctxt->_eip;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004208 /* NB. Immediates are sign-extended as necessary. */
4209 switch (op->bytes) {
4210 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004211 op->val = insn_fetch(s8, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004212 break;
4213 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004214 op->val = insn_fetch(s16, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004215 break;
4216 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004217 op->val = insn_fetch(s32, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004218 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004219 case 8:
4220 op->val = insn_fetch(s64, ctxt);
4221 break;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004222 }
4223 if (!sign_extension) {
4224 switch (op->bytes) {
4225 case 1:
4226 op->val &= 0xff;
4227 break;
4228 case 2:
4229 op->val &= 0xffff;
4230 break;
4231 case 4:
4232 op->val &= 0xffffffff;
4233 break;
4234 }
4235 }
4236done:
4237 return rc;
4238}
4239
Avi Kivitya99455492011-09-13 10:45:41 +03004240static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4241 unsigned d)
4242{
4243 int rc = X86EMUL_CONTINUE;
4244
4245 switch (d) {
4246 case OpReg:
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004247 decode_register_operand(ctxt, op);
Avi Kivitya99455492011-09-13 10:45:41 +03004248 break;
4249 case OpImmUByte:
Avi Kivity608aabe2011-09-13 10:45:45 +03004250 rc = decode_imm(ctxt, op, 1, false);
Avi Kivitya99455492011-09-13 10:45:41 +03004251 break;
4252 case OpMem:
Avi Kivity41ddf972011-09-13 10:45:48 +03004253 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity0fe59122011-09-13 10:45:47 +03004254 mem_common:
Avi Kivitya99455492011-09-13 10:45:41 +03004255 *op = ctxt->memop;
4256 ctxt->memopp = op;
Paolo Bonzini96888972014-04-01 14:54:19 +02004257 if (ctxt->d & BitOp)
Avi Kivitya99455492011-09-13 10:45:41 +03004258 fetch_bit_operand(ctxt);
4259 op->orig_val = op->val;
4260 break;
Avi Kivity41ddf972011-09-13 10:45:48 +03004261 case OpMem64:
Nadav Amitaaa05f22014-06-02 18:34:10 +03004262 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
Avi Kivity41ddf972011-09-13 10:45:48 +03004263 goto mem_common;
Avi Kivitya99455492011-09-13 10:45:41 +03004264 case OpAcc:
4265 op->type = OP_REG;
4266 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004267 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
Avi Kivitya99455492011-09-13 10:45:41 +03004268 fetch_register_operand(op);
4269 op->orig_val = op->val;
4270 break;
Avi Kivity820207c2013-02-09 11:31:45 +02004271 case OpAccLo:
4272 op->type = OP_REG;
4273 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4274 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4275 fetch_register_operand(op);
4276 op->orig_val = op->val;
4277 break;
4278 case OpAccHi:
4279 if (ctxt->d & ByteOp) {
4280 op->type = OP_NONE;
4281 break;
4282 }
4283 op->type = OP_REG;
4284 op->bytes = ctxt->op_bytes;
4285 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4286 fetch_register_operand(op);
4287 op->orig_val = op->val;
4288 break;
Avi Kivitya99455492011-09-13 10:45:41 +03004289 case OpDI:
4290 op->type = OP_MEM;
4291 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4292 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004293 register_address(ctxt, VCPU_REGS_RDI);
Avi Kivitya99455492011-09-13 10:45:41 +03004294 op->addr.mem.seg = VCPU_SREG_ES;
4295 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004296 op->count = 1;
Avi Kivitya99455492011-09-13 10:45:41 +03004297 break;
4298 case OpDX:
4299 op->type = OP_REG;
4300 op->bytes = 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004301 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivitya99455492011-09-13 10:45:41 +03004302 fetch_register_operand(op);
4303 break;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004304 case OpCL:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004305 op->type = OP_IMM;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004306 op->bytes = 1;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004307 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004308 break;
4309 case OpImmByte:
4310 rc = decode_imm(ctxt, op, 1, true);
4311 break;
4312 case OpOne:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004313 op->type = OP_IMM;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004314 op->bytes = 1;
4315 op->val = 1;
4316 break;
4317 case OpImm:
4318 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4319 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004320 case OpImm64:
4321 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4322 break;
Avi Kivity28867ce2012-01-16 15:08:44 +02004323 case OpMem8:
4324 ctxt->memop.bytes = 1;
Gleb Natapov660696d2013-04-24 13:38:36 +03004325 if (ctxt->memop.type == OP_REG) {
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02004326 ctxt->memop.addr.reg = decode_register(ctxt,
4327 ctxt->modrm_rm, true);
Gleb Natapov660696d2013-04-24 13:38:36 +03004328 fetch_register_operand(&ctxt->memop);
4329 }
Avi Kivity28867ce2012-01-16 15:08:44 +02004330 goto mem_common;
Avi Kivity0fe59122011-09-13 10:45:47 +03004331 case OpMem16:
4332 ctxt->memop.bytes = 2;
4333 goto mem_common;
4334 case OpMem32:
4335 ctxt->memop.bytes = 4;
4336 goto mem_common;
4337 case OpImmU16:
4338 rc = decode_imm(ctxt, op, 2, false);
4339 break;
4340 case OpImmU:
4341 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4342 break;
4343 case OpSI:
4344 op->type = OP_MEM;
4345 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4346 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004347 register_address(ctxt, VCPU_REGS_RSI);
Bandan Das573e80f2014-04-16 12:46:13 -04004348 op->addr.mem.seg = ctxt->seg_override;
Avi Kivity0fe59122011-09-13 10:45:47 +03004349 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004350 op->count = 1;
Avi Kivity0fe59122011-09-13 10:45:47 +03004351 break;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004352 case OpXLat:
4353 op->type = OP_MEM;
4354 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4355 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004356 address_mask(ctxt,
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004357 reg_read(ctxt, VCPU_REGS_RBX) +
4358 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
Bandan Das573e80f2014-04-16 12:46:13 -04004359 op->addr.mem.seg = ctxt->seg_override;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004360 op->val = 0;
4361 break;
Avi Kivity0fe59122011-09-13 10:45:47 +03004362 case OpImmFAddr:
4363 op->type = OP_IMM;
4364 op->addr.mem.ea = ctxt->_eip;
4365 op->bytes = ctxt->op_bytes + 2;
4366 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4367 break;
4368 case OpMemFAddr:
4369 ctxt->memop.bytes = ctxt->op_bytes + 2;
4370 goto mem_common;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004371 case OpES:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004372 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004373 op->val = VCPU_SREG_ES;
4374 break;
4375 case OpCS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004376 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004377 op->val = VCPU_SREG_CS;
4378 break;
4379 case OpSS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004380 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004381 op->val = VCPU_SREG_SS;
4382 break;
4383 case OpDS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004384 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004385 op->val = VCPU_SREG_DS;
4386 break;
4387 case OpFS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004388 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004389 op->val = VCPU_SREG_FS;
4390 break;
4391 case OpGS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004392 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004393 op->val = VCPU_SREG_GS;
4394 break;
Avi Kivitya99455492011-09-13 10:45:41 +03004395 case OpImplicit:
4396 /* Special instructions do their own operand decoding. */
4397 default:
4398 op->type = OP_NONE; /* Disable writeback. */
4399 break;
4400 }
4401
4402done:
4403 return rc;
4404}
4405
Takuya Yoshikawaef5d75c2011-05-15 00:57:43 +09004406int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004407{
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004408 int rc = X86EMUL_CONTINUE;
4409 int mode = ctxt->mode;
Avi Kivity46561642011-04-24 14:09:59 +03004410 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004411 bool op_prefix = false;
Bandan Das573e80f2014-04-16 12:46:13 -04004412 bool has_seg_override = false;
Avi Kivity46561642011-04-24 14:09:59 +03004413 struct opcode opcode;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004414
Avi Kivityf09ed832011-09-13 10:45:40 +03004415 ctxt->memop.type = OP_NONE;
4416 ctxt->memopp = NULL;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004417 ctxt->_eip = ctxt->eip;
Paolo Bonzini17052f12014-05-06 16:33:01 +02004418 ctxt->fetch.ptr = ctxt->fetch.data;
4419 ctxt->fetch.end = ctxt->fetch.data + insn_len;
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004420 ctxt->opcode_len = 1;
Andre Przywaradc25e892010-12-21 11:12:07 +01004421 if (insn_len > 0)
Avi Kivity9dac77f2011-06-01 15:34:25 +03004422 memcpy(ctxt->fetch.data, insn, insn_len);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004423 else {
Paolo Bonzini9506d572014-05-06 13:05:25 +02004424 rc = __do_insn_fetch_bytes(ctxt, 1);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004425 if (rc != X86EMUL_CONTINUE)
4426 return rc;
4427 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004428
4429 switch (mode) {
4430 case X86EMUL_MODE_REAL:
4431 case X86EMUL_MODE_VM86:
4432 case X86EMUL_MODE_PROT16:
4433 def_op_bytes = def_ad_bytes = 2;
4434 break;
4435 case X86EMUL_MODE_PROT32:
4436 def_op_bytes = def_ad_bytes = 4;
4437 break;
4438#ifdef CONFIG_X86_64
4439 case X86EMUL_MODE_PROT64:
4440 def_op_bytes = 4;
4441 def_ad_bytes = 8;
4442 break;
4443#endif
4444 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004445 return EMULATION_FAILED;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004446 }
4447
Avi Kivity9dac77f2011-06-01 15:34:25 +03004448 ctxt->op_bytes = def_op_bytes;
4449 ctxt->ad_bytes = def_ad_bytes;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004450
4451 /* Legacy prefixes. */
4452 for (;;) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004453 switch (ctxt->b = insn_fetch(u8, ctxt)) {
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004454 case 0x66: /* operand-size override */
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004455 op_prefix = true;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004456 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004457 ctxt->op_bytes = def_op_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004458 break;
4459 case 0x67: /* address-size override */
4460 if (mode == X86EMUL_MODE_PROT64)
4461 /* switch between 4/8 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004462 ctxt->ad_bytes = def_ad_bytes ^ 12;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004463 else
4464 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004465 ctxt->ad_bytes = def_ad_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004466 break;
4467 case 0x26: /* ES override */
4468 case 0x2e: /* CS override */
4469 case 0x36: /* SS override */
4470 case 0x3e: /* DS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004471 has_seg_override = true;
4472 ctxt->seg_override = (ctxt->b >> 3) & 3;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004473 break;
4474 case 0x64: /* FS override */
4475 case 0x65: /* GS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004476 has_seg_override = true;
4477 ctxt->seg_override = ctxt->b & 7;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004478 break;
4479 case 0x40 ... 0x4f: /* REX */
4480 if (mode != X86EMUL_MODE_PROT64)
4481 goto done_prefixes;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004482 ctxt->rex_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004483 continue;
4484 case 0xf0: /* LOCK */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004485 ctxt->lock_prefix = 1;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004486 break;
4487 case 0xf2: /* REPNE/REPNZ */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004488 case 0xf3: /* REP/REPE/REPZ */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004489 ctxt->rep_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004490 break;
4491 default:
4492 goto done_prefixes;
4493 }
4494
4495 /* Any legacy prefix after a REX prefix nullifies its effect. */
4496
Avi Kivity9dac77f2011-06-01 15:34:25 +03004497 ctxt->rex_prefix = 0;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004498 }
4499
4500done_prefixes:
4501
4502 /* REX prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004503 if (ctxt->rex_prefix & 8)
4504 ctxt->op_bytes = 8; /* REX.W */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004505
4506 /* Opcode byte(s). */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004507 opcode = opcode_table[ctxt->b];
Wei Yongjund3ad6242010-08-05 16:34:39 +08004508 /* Two-byte opcode? */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004509 if (ctxt->b == 0x0f) {
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004510 ctxt->opcode_len = 2;
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004511 ctxt->b = insn_fetch(u8, ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03004512 opcode = twobyte_table[ctxt->b];
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004513
4514 /* 0F_38 opcode map */
4515 if (ctxt->b == 0x38) {
4516 ctxt->opcode_len = 3;
4517 ctxt->b = insn_fetch(u8, ctxt);
4518 opcode = opcode_map_0f_38[ctxt->b];
4519 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004520 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004521 ctxt->d = opcode.flags;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004522
Takuya Yoshikawa9f4260e2012-04-30 17:48:25 +09004523 if (ctxt->d & ModRM)
4524 ctxt->modrm = insn_fetch(u8, ctxt);
4525
Nadav Amit7fe864d2014-06-02 18:34:03 +03004526 /* vex-prefix instructions are not implemented */
4527 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
Nadav Amitd14cb5d2014-11-02 11:54:58 +02004528 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
Nadav Amit7fe864d2014-06-02 18:34:03 +03004529 ctxt->d = NotImpl;
4530 }
4531
Avi Kivity9dac77f2011-06-01 15:34:25 +03004532 while (ctxt->d & GroupMask) {
4533 switch (ctxt->d & GroupMask) {
Avi Kivity46561642011-04-24 14:09:59 +03004534 case Group:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004535 goffset = (ctxt->modrm >> 3) & 7;
Avi Kivity46561642011-04-24 14:09:59 +03004536 opcode = opcode.u.group[goffset];
4537 break;
4538 case GroupDual:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004539 goffset = (ctxt->modrm >> 3) & 7;
4540 if ((ctxt->modrm >> 6) == 3)
Avi Kivity46561642011-04-24 14:09:59 +03004541 opcode = opcode.u.gdual->mod3[goffset];
4542 else
4543 opcode = opcode.u.gdual->mod012[goffset];
4544 break;
4545 case RMExt:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004546 goffset = ctxt->modrm & 7;
Joerg Roedel01de8b02011-04-04 12:39:31 +02004547 opcode = opcode.u.group[goffset];
Avi Kivity46561642011-04-24 14:09:59 +03004548 break;
4549 case Prefix:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004550 if (ctxt->rep_prefix && op_prefix)
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004551 return EMULATION_FAILED;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004552 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
Avi Kivity46561642011-04-24 14:09:59 +03004553 switch (simd_prefix) {
4554 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4555 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4556 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4557 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4558 }
4559 break;
Gleb Natapov045a2822012-12-20 16:57:43 +02004560 case Escape:
4561 if (ctxt->modrm > 0xbf)
4562 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4563 else
4564 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4565 break;
Nadav Amit39f062f2014-11-26 15:47:18 +02004566 case InstrDual:
4567 if ((ctxt->modrm >> 6) == 3)
4568 opcode = opcode.u.idual->mod3;
4569 else
4570 opcode = opcode.u.idual->mod012;
4571 break;
Avi Kivity46561642011-04-24 14:09:59 +03004572 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004573 return EMULATION_FAILED;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004574 }
Avi Kivity46561642011-04-24 14:09:59 +03004575
Avi Kivityb1ea50b2011-09-13 10:45:42 +03004576 ctxt->d &= ~(u64)GroupMask;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004577 ctxt->d |= opcode.flags;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004578 }
4579
Paolo Bonzinie24186e2014-03-27 12:00:57 +01004580 /* Unrecognised? */
4581 if (ctxt->d == 0)
4582 return EMULATION_FAILED;
4583
Avi Kivity9dac77f2011-06-01 15:34:25 +03004584 ctxt->execute = opcode.u.execute;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004585
Nadav Amit3a6095a2014-08-13 16:50:13 +03004586 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4587 return EMULATION_FAILED;
4588
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004589 if (unlikely(ctxt->d &
Nadav Amited9aad22014-11-02 11:55:00 +02004590 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4591 No16))) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004592 /*
4593 * These are copied unconditionally here, and checked unconditionally
4594 * in x86_emulate_insn.
4595 */
4596 ctxt->check_perm = opcode.check_perm;
4597 ctxt->intercept = opcode.intercept;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004598
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004599 if (ctxt->d & NotImpl)
4600 return EMULATION_FAILED;
Avi Kivityd8671622011-02-01 16:32:03 +02004601
Nadav Amit58b70752014-10-24 11:35:09 +03004602 if (mode == X86EMUL_MODE_PROT64) {
4603 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4604 ctxt->op_bytes = 8;
4605 else if (ctxt->d & NearBranch)
4606 ctxt->op_bytes = 8;
4607 }
Avi Kivity7f9b4b72010-08-01 14:46:54 +03004608
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004609 if (ctxt->d & Op3264) {
4610 if (mode == X86EMUL_MODE_PROT64)
4611 ctxt->op_bytes = 8;
4612 else
4613 ctxt->op_bytes = 4;
4614 }
4615
Nadav Amited9aad22014-11-02 11:55:00 +02004616 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4617 ctxt->op_bytes = 4;
4618
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004619 if (ctxt->d & Sse)
4620 ctxt->op_bytes = 16;
4621 else if (ctxt->d & Mmx)
4622 ctxt->op_bytes = 8;
4623 }
Avi Kivity1253791d2011-03-29 11:41:27 +02004624
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004625 /* ModRM and SIB bytes. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004626 if (ctxt->d & ModRM) {
Avi Kivityf09ed832011-09-13 10:45:40 +03004627 rc = decode_modrm(ctxt, &ctxt->memop);
Bandan Das573e80f2014-04-16 12:46:13 -04004628 if (!has_seg_override) {
4629 has_seg_override = true;
4630 ctxt->seg_override = ctxt->modrm_seg;
4631 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004632 } else if (ctxt->d & MemAbs)
Avi Kivityf09ed832011-09-13 10:45:40 +03004633 rc = decode_abs(ctxt, &ctxt->memop);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004634 if (rc != X86EMUL_CONTINUE)
4635 goto done;
4636
Bandan Das573e80f2014-04-16 12:46:13 -04004637 if (!has_seg_override)
4638 ctxt->seg_override = VCPU_SREG_DS;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004639
Bandan Das573e80f2014-04-16 12:46:13 -04004640 ctxt->memop.addr.mem.seg = ctxt->seg_override;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004641
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004642 /*
4643 * Decode and fetch the source operand: register, memory
4644 * or immediate.
4645 */
Avi Kivity0fe59122011-09-13 10:45:47 +03004646 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004647 if (rc != X86EMUL_CONTINUE)
4648 goto done;
4649
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004650 /*
4651 * Decode and fetch the second source operand: register, memory
4652 * or immediate.
4653 */
Avi Kivity4dd6a572011-09-13 10:45:43 +03004654 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004655 if (rc != X86EMUL_CONTINUE)
4656 goto done;
4657
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004658 /* Decode and fetch the destination operand: register or memory. */
Avi Kivitya99455492011-09-13 10:45:41 +03004659 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004660
Bandan Das41061cd2014-04-16 12:46:14 -04004661 if (ctxt->rip_relative)
Nadav Amit1c1c35a2014-11-19 17:43:09 +02004662 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
4663 ctxt->memopp->addr.mem.ea + ctxt->_eip);
Avi Kivitycb16c342011-06-19 19:21:11 +03004664
Paolo Bonzinia430c912014-10-23 14:54:14 +02004665done:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004666 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004667}
4668
Xiao Guangrong1cb3f3a2011-09-22 17:02:48 +08004669bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4670{
4671 return ctxt->d & PageTable;
4672}
4673
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004674static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4675{
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004676 /* The second termination condition only applies for REPE
4677 * and REPNE. Test if the repeat string operation prefix is
4678 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4679 * corresponding termination condition according to:
4680 * - if REPE/REPZ and ZF = 0 then done
4681 * - if REPNE/REPNZ and ZF = 1 then done
4682 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004683 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4684 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4685 && (((ctxt->rep_prefix == REPE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004686 ((ctxt->eflags & EFLG_ZF) == 0))
Avi Kivity9dac77f2011-06-01 15:34:25 +03004687 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004688 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4689 return true;
4690
4691 return false;
4692}
4693
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004694static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4695{
4696 bool fault = false;
4697
4698 ctxt->ops->get_fpu(ctxt);
4699 asm volatile("1: fwait \n\t"
4700 "2: \n\t"
4701 ".pushsection .fixup,\"ax\" \n\t"
4702 "3: \n\t"
4703 "movb $1, %[fault] \n\t"
4704 "jmp 2b \n\t"
4705 ".popsection \n\t"
4706 _ASM_EXTABLE(1b, 3b)
Avi Kivity38e8a2d2012-04-22 15:12:50 +03004707 : [fault]"+qm"(fault));
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004708 ctxt->ops->put_fpu(ctxt);
4709
4710 if (unlikely(fault))
4711 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4712
4713 return X86EMUL_CONTINUE;
4714}
4715
4716static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4717 struct operand *op)
4718{
4719 if (op->type == OP_MM)
4720 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4721}
4722
Avi Kivitye28bbd42013-01-04 16:18:48 +02004723static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4724{
4725 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivityb9fa4092013-02-09 11:31:48 +02004726 if (!(ctxt->d & ByteOp))
4727 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
Avi Kivitye28bbd42013-01-04 16:18:48 +02004728 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004729 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4730 [fastop]"+S"(fop)
4731 : "c"(ctxt->src2.val));
Avi Kivitye28bbd42013-01-04 16:18:48 +02004732 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004733 if (!fop) /* exception is returned in fop variable */
4734 return emulate_de(ctxt);
Avi Kivitye28bbd42013-01-04 16:18:48 +02004735 return X86EMUL_CONTINUE;
4736}
Avi Kivitydd856ef2012-08-27 23:46:17 +03004737
Bandan Das14985072014-04-16 12:46:09 -04004738void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4739{
Bandan Das573e80f2014-04-16 12:46:13 -04004740 memset(&ctxt->rip_relative, 0,
4741 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
Bandan Das14985072014-04-16 12:46:09 -04004742
Bandan Das14985072014-04-16 12:46:09 -04004743 ctxt->io_read.pos = 0;
4744 ctxt->io_read.end = 0;
Bandan Das14985072014-04-16 12:46:09 -04004745 ctxt->mem_read.end = 0;
4746}
4747
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09004748int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004749{
Mathias Krause0225fb52012-08-30 01:30:16 +02004750 const struct x86_emulate_ops *ops = ctxt->ops;
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09004751 int rc = X86EMUL_CONTINUE;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004752 int saved_dst_type = ctxt->dst.type;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004753
Avi Kivity9dac77f2011-06-01 15:34:25 +03004754 ctxt->mem_read.pos = 0;
Glauber Costa310b5d32009-05-12 16:21:06 -04004755
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004756 /* LOCK prefix is allowed only with some instructions */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004757 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004758 rc = emulate_ud(ctxt);
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004759 goto done;
4760 }
4761
Avi Kivity9dac77f2011-06-01 15:34:25 +03004762 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004763 rc = emulate_ud(ctxt);
Avi Kivity081bca02010-08-26 11:06:15 +03004764 goto done;
4765 }
4766
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004767 if (unlikely(ctxt->d &
4768 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4769 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4770 (ctxt->d & Undefined)) {
4771 rc = emulate_ud(ctxt);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004772 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004773 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004774
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004775 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4776 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4777 rc = emulate_ud(ctxt);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004778 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004779 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004780
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004781 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4782 rc = emulate_nm(ctxt);
Joerg Roedeld09beab2011-04-04 12:39:25 +02004783 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004784 }
Joerg Roedeld09beab2011-04-04 12:39:25 +02004785
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004786 if (ctxt->d & Mmx) {
4787 rc = flush_pending_x87_faults(ctxt);
4788 if (rc != X86EMUL_CONTINUE)
4789 goto done;
4790 /*
4791 * Now that we know the fpu is exception safe, we can fetch
4792 * operands from it.
4793 */
4794 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4795 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4796 if (!(ctxt->d & Mov))
4797 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4798 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004799
Bandan Das685bbf42014-04-16 12:46:10 -04004800 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004801 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4802 X86_ICPT_PRE_EXCEPT);
4803 if (rc != X86EMUL_CONTINUE)
4804 goto done;
4805 }
4806
Nadav Amit64a38292014-12-10 11:19:04 +02004807 /* Instruction can only be executed in protected mode */
4808 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4809 rc = emulate_ud(ctxt);
4810 goto done;
4811 }
4812
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004813 /* Privileged instruction can be executed only in CPL=0 */
4814 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
Nadav Amit68efa762014-06-18 17:19:35 +03004815 if (ctxt->d & PrivUD)
4816 rc = emulate_ud(ctxt);
4817 else
4818 rc = emulate_gp(ctxt, 0);
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004819 goto done;
4820 }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004821
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004822 /* Do instruction specific permission checks */
Bandan Das685bbf42014-04-16 12:46:10 -04004823 if (ctxt->d & CheckPerm) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004824 rc = ctxt->check_perm(ctxt);
4825 if (rc != X86EMUL_CONTINUE)
4826 goto done;
4827 }
4828
Bandan Das685bbf42014-04-16 12:46:10 -04004829 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004830 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4831 X86_ICPT_POST_EXCEPT);
4832 if (rc != X86EMUL_CONTINUE)
4833 goto done;
4834 }
4835
4836 if (ctxt->rep_prefix && (ctxt->d & String)) {
4837 /* All REP prefixes have the same first termination condition */
4838 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4839 ctxt->eip = ctxt->_eip;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004840 ctxt->eflags &= ~EFLG_RF;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004841 goto done;
4842 }
4843 }
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004844 }
4845
Avi Kivity9dac77f2011-06-01 15:34:25 +03004846 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4847 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4848 ctxt->src.valptr, ctxt->src.bytes);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09004849 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004850 goto done;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004851 ctxt->src.orig_val64 = ctxt->src.val64;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004852 }
4853
Avi Kivity9dac77f2011-06-01 15:34:25 +03004854 if (ctxt->src2.type == OP_MEM) {
4855 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4856 &ctxt->src2.val, ctxt->src2.bytes);
Gleb Natapove35b7b92010-02-25 16:36:42 +02004857 if (rc != X86EMUL_CONTINUE)
4858 goto done;
4859 }
4860
Avi Kivity9dac77f2011-06-01 15:34:25 +03004861 if ((ctxt->d & DstMask) == ImplicitOps)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004862 goto special_insn;
4863
4864
Avi Kivity9dac77f2011-06-01 15:34:25 +03004865 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004866 /* optimisation - avoid slow emulated read if Mov */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004867 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4868 &ctxt->dst.val, ctxt->dst.bytes);
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004869 if (rc != X86EMUL_CONTINUE)
4870 goto done;
Avi Kivity038e51d2007-01-22 20:40:40 -08004871 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004872 ctxt->dst.orig_val = ctxt->dst.val;
Avi Kivity038e51d2007-01-22 20:40:40 -08004873
Avi Kivity018a98d2007-11-27 19:30:56 +02004874special_insn:
4875
Bandan Das685bbf42014-04-16 12:46:10 -04004876 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03004877 rc = emulator_check_intercept(ctxt, ctxt->intercept,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004878 X86_ICPT_POST_MEMACCESS);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004879 if (rc != X86EMUL_CONTINUE)
4880 goto done;
4881 }
4882
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03004883 if (ctxt->rep_prefix && (ctxt->d & String))
4884 ctxt->eflags |= EFLG_RF;
4885 else
4886 ctxt->eflags &= ~EFLG_RF;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004887
Avi Kivity9dac77f2011-06-01 15:34:25 +03004888 if (ctxt->execute) {
Avi Kivitye28bbd42013-01-04 16:18:48 +02004889 if (ctxt->d & Fastop) {
4890 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4891 rc = fastop(ctxt, fop);
4892 if (rc != X86EMUL_CONTINUE)
4893 goto done;
4894 goto writeback;
4895 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004896 rc = ctxt->execute(ctxt);
Avi Kivityef65c882010-07-29 15:11:51 +03004897 if (rc != X86EMUL_CONTINUE)
4898 goto done;
4899 goto writeback;
4900 }
4901
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004902 if (ctxt->opcode_len == 2)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004903 goto twobyte_insn;
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004904 else if (ctxt->opcode_len == 3)
4905 goto threebyte_insn;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004906
Avi Kivity9dac77f2011-06-01 15:34:25 +03004907 switch (ctxt->b) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08004908 case 0x63: /* movsxd */
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004909 if (ctxt->mode != X86EMUL_MODE_PROT64)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004910 goto cannot_emulate;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004911 ctxt->dst.val = (s32) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004912 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03004913 case 0x70 ... 0x7f: /* jcc (short) */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004914 if (test_cc(ctxt->b, ctxt->eflags))
Nadav Amit234f3ce2014-09-18 22:39:38 +03004915 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02004916 break;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03004917 case 0x8d: /* lea r16/r32, m */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004918 ctxt->dst.val = ctxt->src.addr.mem.ea;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03004919 break;
Avi Kivity3d9e77d2010-08-01 12:41:59 +03004920 case 0x90 ... 0x97: /* nop / xchg reg, rax */
Avi Kivitydd856ef2012-08-27 23:46:17 +03004921 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
Nadav Amita825f5c2014-06-15 16:13:01 +03004922 ctxt->dst.type = OP_NONE;
4923 else
4924 rc = em_xchg(ctxt);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09004925 break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08004926 case 0x98: /* cbw/cwde/cdqe */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004927 switch (ctxt->op_bytes) {
4928 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4929 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4930 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08004931 }
4932 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004933 case 0xcc: /* int3 */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09004934 rc = emulate_int(ctxt, 3);
4935 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004936 case 0xcd: /* int n */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004937 rc = emulate_int(ctxt, ctxt->src.val);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004938 break;
4939 case 0xce: /* into */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09004940 if (ctxt->eflags & EFLG_OF)
4941 rc = emulate_int(ctxt, 4);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004942 break;
Nitin A Kamble1a52e052007-09-18 16:34:25 -07004943 case 0xe9: /* jmp rel */
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004944 case 0xeb: /* jmp rel short */
Nadav Amit234f3ce2014-09-18 22:39:38 +03004945 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity9dac77f2011-06-01 15:34:25 +03004946 ctxt->dst.type = OP_NONE; /* Disable writeback. */
Nitin A Kamble1a52e052007-09-18 16:34:25 -07004947 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02004948 case 0xf4: /* hlt */
Avi Kivity6c3287f2011-04-20 15:43:05 +03004949 ctxt->ops->halt(ctxt);
Mohammed Gamal19fdfa02008-07-06 16:51:26 +03004950 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02004951 case 0xf5: /* cmc */
4952 /* complement carry flag from eflags reg */
4953 ctxt->eflags ^= EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02004954 break;
4955 case 0xf8: /* clc */
4956 ctxt->eflags &= ~EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02004957 break;
Mohammed Gamal8744aa92010-08-05 15:42:49 +03004958 case 0xf9: /* stc */
4959 ctxt->eflags |= EFLG_CF;
4960 break;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004961 case 0xfc: /* cld */
4962 ctxt->eflags &= ~EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004963 break;
4964 case 0xfd: /* std */
4965 ctxt->eflags |= EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004966 break;
Avi Kivity91269b82010-07-25 14:51:16 +03004967 default:
4968 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004969 }
Avi Kivity018a98d2007-11-27 19:30:56 +02004970
Avi Kivity7d9ddae2010-08-30 17:12:28 +03004971 if (rc != X86EMUL_CONTINUE)
4972 goto done;
4973
Avi Kivity018a98d2007-11-27 19:30:56 +02004974writeback:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02004975 if (ctxt->d & SrcWrite) {
4976 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4977 rc = writeback(ctxt, &ctxt->src);
4978 if (rc != X86EMUL_CONTINUE)
4979 goto done;
4980 }
Nadav Amitee212292014-06-15 16:12:58 +03004981 if (!(ctxt->d & NoWrite)) {
4982 rc = writeback(ctxt, &ctxt->dst);
4983 if (rc != X86EMUL_CONTINUE)
4984 goto done;
4985 }
Avi Kivity018a98d2007-11-27 19:30:56 +02004986
Gleb Natapov5cd21912010-03-18 15:20:26 +02004987 /*
4988 * restore dst type in case the decoding will be reused
4989 * (happens for string instruction )
4990 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004991 ctxt->dst.type = saved_dst_type;
Gleb Natapov5cd21912010-03-18 15:20:26 +02004992
Avi Kivity9dac77f2011-06-01 15:34:25 +03004993 if ((ctxt->d & SrcMask) == SrcSI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03004994 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
Gleb Natapova682e352010-03-18 15:20:21 +02004995
Avi Kivity9dac77f2011-06-01 15:34:25 +03004996 if ((ctxt->d & DstMask) == DstDI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03004997 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
Gleb Natapovd9271122010-03-18 15:20:22 +02004998
Avi Kivity9dac77f2011-06-01 15:34:25 +03004999 if (ctxt->rep_prefix && (ctxt->d & String)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03005000 unsigned int count;
Avi Kivity9dac77f2011-06-01 15:34:25 +03005001 struct read_cache *r = &ctxt->io_read;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03005002 if ((ctxt->d & SrcMask) == SrcSI)
5003 count = ctxt->src.count;
5004 else
5005 count = ctxt->dst.count;
Paolo Bonzini01485a22014-11-19 18:25:08 +01005006 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03005007
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005008 if (!string_insn_completed(ctxt)) {
5009 /*
5010 * Re-enter guest when pio read ahead buffer is empty
5011 * or, if it is not used, after each 1024 iteration.
5012 */
Avi Kivitydd856ef2012-08-27 23:46:17 +03005013 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005014 (r->end == 0 || r->end != r->pos)) {
5015 /*
5016 * Reset read cache. Usually happens before
5017 * decode, but since instruction is restarted
5018 * we have to do it here.
5019 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005020 ctxt->mem_read.end = 0;
Avi Kivitydd856ef2012-08-27 23:46:17 +03005021 writeback_registers(ctxt);
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005022 return EMULATION_RESTART;
5023 }
5024 goto done; /* skip rip writeback */
Avi Kivity0fa6ccb2010-08-17 11:22:17 +03005025 }
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03005026 ctxt->eflags &= ~EFLG_RF;
Gleb Natapov5cd21912010-03-18 15:20:26 +02005027 }
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005028
Avi Kivity9dac77f2011-06-01 15:34:25 +03005029 ctxt->eip = ctxt->_eip;
Avi Kivity018a98d2007-11-27 19:30:56 +02005030
5031done:
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02005032 if (rc == X86EMUL_PROPAGATE_FAULT) {
5033 WARN_ON(ctxt->exception.vector > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +02005034 ctxt->have_exception = true;
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02005035 }
Joerg Roedel775fde82011-04-04 12:39:24 +02005036 if (rc == X86EMUL_INTERCEPTED)
5037 return EMULATION_INTERCEPTED;
5038
Avi Kivitydd856ef2012-08-27 23:46:17 +03005039 if (rc == X86EMUL_CONTINUE)
5040 writeback_registers(ctxt);
5041
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005042 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005043
5044twobyte_insn:
Avi Kivity9dac77f2011-06-01 15:34:25 +03005045 switch (ctxt->b) {
Avi Kivity018a98d2007-11-27 19:30:56 +02005046 case 0x09: /* wbinvd */
Clemens Nosscfb22372011-04-21 21:16:05 +02005047 (ctxt->ops->wbinvd)(ctxt);
Sheng Yangf5f48ee2010-06-30 12:25:15 +08005048 break;
5049 case 0x08: /* invd */
Avi Kivity018a98d2007-11-27 19:30:56 +02005050 case 0x0d: /* GrpP (prefetch) */
5051 case 0x18: /* Grp16 (prefetch/nop) */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02005052 case 0x1f: /* nop */
Avi Kivity018a98d2007-11-27 19:30:56 +02005053 break;
5054 case 0x20: /* mov cr, reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005055 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
Avi Kivity018a98d2007-11-27 19:30:56 +02005056 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005057 case 0x21: /* mov from dr to reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005058 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005059 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005060 case 0x40 ... 0x4f: /* cmov */
Nadav Amit140bad82014-06-15 16:13:00 +03005061 if (test_cc(ctxt->b, ctxt->eflags))
5062 ctxt->dst.val = ctxt->src.val;
5063 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5064 ctxt->op_bytes != 4)
Avi Kivity9dac77f2011-06-01 15:34:25 +03005065 ctxt->dst.type = OP_NONE; /* no writeback */
Avi Kivity6aa8b732006-12-10 02:21:36 -08005066 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03005067 case 0x80 ... 0x8f: /* jnz rel, etc*/
Avi Kivity9dac77f2011-06-01 15:34:25 +03005068 if (test_cc(ctxt->b, ctxt->eflags))
Nadav Amit234f3ce2014-09-18 22:39:38 +03005069 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02005070 break;
Wei Yongjunee45b582010-08-06 17:10:07 +08005071 case 0x90 ... 0x9f: /* setcc r/m8 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005072 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
Wei Yongjunee45b582010-08-06 17:10:07 +08005073 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005074 case 0xb6 ... 0xb7: /* movzx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005075 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03005076 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
Avi Kivity9dac77f2011-06-01 15:34:25 +03005077 : (u16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005078 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005079 case 0xbe ... 0xbf: /* movsx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005080 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03005081 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
Avi Kivity9dac77f2011-06-01 15:34:25 +03005082 (s16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005083 break;
Avi Kivity91269b82010-07-25 14:51:16 +03005084 default:
5085 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005086 }
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005087
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01005088threebyte_insn:
5089
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005090 if (rc != X86EMUL_CONTINUE)
5091 goto done;
5092
Avi Kivity6aa8b732006-12-10 02:21:36 -08005093 goto writeback;
5094
5095cannot_emulate:
Gleb Natapova0c0ab22011-03-28 16:57:49 +02005096 return EMULATION_FAILED;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005097}
Avi Kivitydd856ef2012-08-27 23:46:17 +03005098
5099void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5100{
5101 invalidate_registers(ctxt);
5102}
5103
5104void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5105{
5106 writeback_registers(ctxt);
5107}