blob: 9f960b428bb254cd351e110e194eaa5e15c05010 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/******************************************************************************
Avi Kivity56e82312009-08-12 15:04:37 +03002 * emulate.c
Avi Kivity6aa8b732006-12-10 02:21:36 -08003 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
Rusty Russelldcc07662007-07-17 23:16:56 +10009 * privileged instructions:
Avi Kivity6aa8b732006-12-10 02:21:36 -080010 *
11 * Copyright (C) 2006 Qumranet
Nicolas Kaiser9611c182010-10-06 14:23:22 +020012 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -080013 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
Avi Kivityedf88412007-12-16 11:02:48 +020023#include <linux/kvm_host.h>
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030024#include "kvm_cache_regs.h"
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/module.h>
Avi Kivity56e82312009-08-12 15:04:37 +030026#include <asm/kvm_emulate.h>
Avi Kivityb7d491e2013-01-04 16:18:49 +020027#include <linux/stringify.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028
Avi Kivity3eeb3282010-01-21 15:31:48 +020029#include "x86.h"
Gleb Natapov38ba30b2010-03-18 15:20:17 +020030#include "tss.h"
Andre Przywarae99f0502009-06-17 15:50:33 +020031
Avi Kivity6aa8b732006-12-10 02:21:36 -080032/*
Avi Kivitya99455492011-09-13 10:45:41 +030033 * Operand types
34 */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030035#define OpNone 0ull
36#define OpImplicit 1ull /* No generic decode */
37#define OpReg 2ull /* Register */
38#define OpMem 3ull /* Memory */
39#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40#define OpDI 5ull /* ES:DI/EDI/RDI */
41#define OpMem64 6ull /* Memory, 64-bit */
42#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43#define OpDX 8ull /* DX register */
Avi Kivity4dd6a572011-09-13 10:45:43 +030044#define OpCL 9ull /* CL register (for shifts) */
45#define OpImmByte 10ull /* 8-bit sign extended immediate */
46#define OpOne 11ull /* Implied 1 */
Nadav Amit5e2c6882012-12-06 21:55:10 -020047#define OpImm 12ull /* Sign extended up to 32-bit immediate */
Avi Kivity0fe59122011-09-13 10:45:47 +030048#define OpMem16 13ull /* Memory operand (16-bit). */
49#define OpMem32 14ull /* Memory operand (32-bit). */
50#define OpImmU 15ull /* Immediate operand, zero extended */
51#define OpSI 16ull /* SI/ESI/RSI */
52#define OpImmFAddr 17ull /* Immediate far address */
53#define OpMemFAddr 18ull /* Far address in memory */
54#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
Avi Kivityc191a7a2011-09-13 10:45:49 +030055#define OpES 20ull /* ES */
56#define OpCS 21ull /* CS */
57#define OpSS 22ull /* SS */
58#define OpDS 23ull /* DS */
59#define OpFS 24ull /* FS */
60#define OpGS 25ull /* GS */
Avi Kivity28867ce2012-01-16 15:08:44 +020061#define OpMem8 26ull /* 8-bit zero extended memory operand */
Nadav Amit5e2c6882012-12-06 21:55:10 -020062#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
Paolo Bonzini7fa57952013-05-09 11:32:50 +020063#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
Avi Kivity820207c2013-02-09 11:31:45 +020064#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
Avi Kivitya99455492011-09-13 10:45:41 +030066
Avi Kivity0fe59122011-09-13 10:45:47 +030067#define OpBits 5 /* Width of operand field */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030068#define OpMask ((1ull << OpBits) - 1)
Avi Kivitya99455492011-09-13 10:45:41 +030069
70/*
Avi Kivity6aa8b732006-12-10 02:21:36 -080071 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
76 * not be handled.
77 */
78
79/* Operand sizes: 8-bit operands or specified/overridden size. */
Avi Kivityab85b12b2010-07-29 15:11:49 +030080#define ByteOp (1<<0) /* 8-bit operands. */
Avi Kivity6aa8b732006-12-10 02:21:36 -080081/* Destination operand type. */
Avi Kivitya99455492011-09-13 10:45:41 +030082#define DstShift 1
83#define ImplicitOps (OpImplicit << DstShift)
84#define DstReg (OpReg << DstShift)
85#define DstMem (OpMem << DstShift)
86#define DstAcc (OpAcc << DstShift)
87#define DstDI (OpDI << DstShift)
88#define DstMem64 (OpMem64 << DstShift)
89#define DstImmUByte (OpImmUByte << DstShift)
90#define DstDX (OpDX << DstShift)
Avi Kivity820207c2013-02-09 11:31:45 +020091#define DstAccLo (OpAccLo << DstShift)
Avi Kivitya99455492011-09-13 10:45:41 +030092#define DstMask (OpMask << DstShift)
Avi Kivity6aa8b732006-12-10 02:21:36 -080093/* Source operand type. */
Avi Kivity0fe59122011-09-13 10:45:47 +030094#define SrcShift 6
95#define SrcNone (OpNone << SrcShift)
96#define SrcReg (OpReg << SrcShift)
97#define SrcMem (OpMem << SrcShift)
98#define SrcMem16 (OpMem16 << SrcShift)
99#define SrcMem32 (OpMem32 << SrcShift)
100#define SrcImm (OpImm << SrcShift)
101#define SrcImmByte (OpImmByte << SrcShift)
102#define SrcOne (OpOne << SrcShift)
103#define SrcImmUByte (OpImmUByte << SrcShift)
104#define SrcImmU (OpImmU << SrcShift)
105#define SrcSI (OpSI << SrcShift)
Paolo Bonzini7fa57952013-05-09 11:32:50 +0200106#define SrcXLat (OpXLat << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300107#define SrcImmFAddr (OpImmFAddr << SrcShift)
108#define SrcMemFAddr (OpMemFAddr << SrcShift)
109#define SrcAcc (OpAcc << SrcShift)
110#define SrcImmU16 (OpImmU16 << SrcShift)
Nadav Amit5e2c6882012-12-06 21:55:10 -0200111#define SrcImm64 (OpImm64 << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300112#define SrcDX (OpDX << SrcShift)
Avi Kivity28867ce2012-01-16 15:08:44 +0200113#define SrcMem8 (OpMem8 << SrcShift)
Avi Kivity820207c2013-02-09 11:31:45 +0200114#define SrcAccHi (OpAccHi << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300115#define SrcMask (OpMask << SrcShift)
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300116#define BitOp (1<<11)
117#define MemAbs (1<<12) /* Memory operand is absolute displacement */
118#define String (1<<13) /* String instruction (rep capable) */
119#define Stack (1<<14) /* Stack instruction (push/pop) */
120#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
Gleb Natapov045a2822012-12-20 16:57:43 +0200125#define Escape (5<<15) /* Escape to coprocessor instruction */
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300126#define Sse (1<<18) /* SSE Vector instruction */
Avi Kivity20c29ff2011-09-13 10:45:44 +0300127/* Generic ModRM decode. */
128#define ModRM (1<<19)
129/* Destination is only written; never read. */
130#define Mov (1<<20)
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300131/* Misc flags */
Joerg Roedel8ea7d6a2011-04-04 12:39:26 +0200132#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
Borislav Petkovb51e9742013-09-22 16:44:52 +0200133#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
Avi Kivity5a506b12010-08-01 15:10:29 +0300134#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
Avi Kivity7f9b4b72010-08-01 14:46:54 +0300135#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
Avi Kivity047a4812010-07-26 14:37:47 +0300136#define Undefined (1<<25) /* No Such Instruction */
Gleb Natapovd380a5e2010-02-10 14:21:36 +0200137#define Lock (1<<26) /* lock prefix is allowed for the instruction */
Gleb Natapove92805a2010-02-10 14:21:35 +0200138#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300139#define No64 (1<<28)
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +0800140#define PageTable (1 << 29) /* instruction used to write page table */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300141#define NotImpl (1 << 30) /* instruction is not implemented */
Guillaume Thouvenin0dc8d102008-12-04 14:26:42 +0100142/* Source 2 operand type */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300143#define Src2Shift (31)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300144#define Src2None (OpNone << Src2Shift)
Avi Kivityab2c5ce2013-02-09 11:31:46 +0200145#define Src2Mem (OpMem << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300146#define Src2CL (OpCL << Src2Shift)
147#define Src2ImmByte (OpImmByte << Src2Shift)
148#define Src2One (OpOne << Src2Shift)
149#define Src2Imm (OpImm << Src2Shift)
Avi Kivityc191a7a2011-09-13 10:45:49 +0300150#define Src2ES (OpES << Src2Shift)
151#define Src2CS (OpCS << Src2Shift)
152#define Src2SS (OpSS << Src2Shift)
153#define Src2DS (OpDS << Src2Shift)
154#define Src2FS (OpFS << Src2Shift)
155#define Src2GS (OpGS << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300156#define Src2Mask (OpMask << Src2Shift)
Avi Kivitycbe2c9d2012-04-09 18:40:02 +0300157#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
Avi Kivity1c11b372012-04-09 18:39:59 +0300158#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
Avi Kivitye28bbd42013-01-04 16:18:48 +0200161#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
Avi Kivityb6744dc2013-01-04 16:18:50 +0200162#define NoWrite ((u64)1 << 45) /* No writeback */
Avi Kivityfb32b1e2013-02-09 11:31:44 +0200163#define SrcWrite ((u64)1 << 46) /* Write back src operand */
Nadav Amit9b88ae92014-05-25 23:05:21 +0300164#define NoMod ((u64)1 << 47) /* Mod field is ignored */
Paolo Bonzinid40a6892014-03-27 11:58:02 +0100165#define Intercept ((u64)1 << 48) /* Has valid intercept field */
166#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
Nadav Amit10e38fc2014-06-18 17:19:34 +0300167#define NoBigReal ((u64)1 << 50) /* No big real mode */
Nadav Amit68efa762014-06-18 17:19:35 +0300168#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
Nadav Amit58b70752014-10-24 11:35:09 +0300169#define NearBranch ((u64)1 << 52) /* Near branches */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800170
Avi Kivity820207c2013-02-09 11:31:45 +0200171#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800172
Avi Kivityd0e53322010-07-29 15:11:54 +0300173#define X2(x...) x, x
174#define X3(x...) X2(x), x
175#define X4(x...) X2(x), X2(x)
176#define X5(x...) X4(x), x
177#define X6(x...) X4(x), X2(x)
178#define X7(x...) X4(x), X3(x)
179#define X8(x...) X4(x), X4(x)
180#define X16(x...) X8(x), X8(x)
Avi Kivity83babbc2010-07-26 14:37:39 +0300181
Avi Kivitye28bbd42013-01-04 16:18:48 +0200182#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
183#define FASTOP_SIZE 8
184
185/*
186 * fastop functions have a special calling convention:
187 *
Avi Kivity017da7b2013-02-09 11:31:47 +0200188 * dst: rax (in/out)
189 * src: rdx (in/out)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200190 * src2: rcx (in)
191 * flags: rflags (in/out)
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200192 * ex: rsi (in:fastop pointer, out:zero if exception)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200193 *
194 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
195 * different operand sizes can be reached by calculation, rather than a jump
196 * table (which would be bigger than the code).
197 *
198 * fastop functions are declared as taking a never-defined fastop parameter,
199 * so they can't be called from C directly.
200 */
201
202struct fastop;
203
Avi Kivityd65b1de2010-07-29 15:11:35 +0300204struct opcode {
Avi Kivityb1ea50b2011-09-13 10:45:42 +0300205 u64 flags : 56;
206 u64 intercept : 8;
Avi Kivity120df892010-07-29 15:11:39 +0300207 union {
Avi Kivityef65c882010-07-29 15:11:51 +0300208 int (*execute)(struct x86_emulate_ctxt *ctxt);
Mathias Krausefd0a0d82012-08-30 01:30:15 +0200209 const struct opcode *group;
210 const struct group_dual *gdual;
211 const struct gprefix *gprefix;
Gleb Natapov045a2822012-12-20 16:57:43 +0200212 const struct escape *esc;
Avi Kivitye28bbd42013-01-04 16:18:48 +0200213 void (*fastop)(struct fastop *fake);
Avi Kivity120df892010-07-29 15:11:39 +0300214 } u;
Joerg Roedeld09beab2011-04-04 12:39:25 +0200215 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
Avi Kivity120df892010-07-29 15:11:39 +0300216};
217
218struct group_dual {
219 struct opcode mod012[8];
220 struct opcode mod3[8];
Avi Kivityd65b1de2010-07-29 15:11:35 +0300221};
222
Avi Kivity0d7cdee2011-03-29 11:34:38 +0200223struct gprefix {
224 struct opcode pfx_no;
225 struct opcode pfx_66;
226 struct opcode pfx_f2;
227 struct opcode pfx_f3;
228};
229
Gleb Natapov045a2822012-12-20 16:57:43 +0200230struct escape {
231 struct opcode op[8];
232 struct opcode high[64];
233};
234
Avi Kivity6aa8b732006-12-10 02:21:36 -0800235/* EFLAGS bit definitions. */
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200236#define EFLG_ID (1<<21)
237#define EFLG_VIP (1<<20)
238#define EFLG_VIF (1<<19)
239#define EFLG_AC (1<<18)
Andre Przywarab1d86142009-06-17 15:50:32 +0200240#define EFLG_VM (1<<17)
241#define EFLG_RF (1<<16)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200242#define EFLG_IOPL (3<<12)
243#define EFLG_NT (1<<14)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800244#define EFLG_OF (1<<11)
245#define EFLG_DF (1<<10)
Andre Przywarab1d86142009-06-17 15:50:32 +0200246#define EFLG_IF (1<<9)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200247#define EFLG_TF (1<<8)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800248#define EFLG_SF (1<<7)
249#define EFLG_ZF (1<<6)
250#define EFLG_AF (1<<4)
251#define EFLG_PF (1<<2)
252#define EFLG_CF (1<<0)
253
Mohammed Gamal62bd4302010-07-28 12:38:40 +0300254#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
255#define EFLG_RESERVED_ONE_MASK 2
256
Avi Kivitydd856ef2012-08-27 23:46:17 +0300257static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
258{
259 if (!(ctxt->regs_valid & (1 << nr))) {
260 ctxt->regs_valid |= 1 << nr;
261 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
262 }
263 return ctxt->_regs[nr];
264}
265
266static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
267{
268 ctxt->regs_valid |= 1 << nr;
269 ctxt->regs_dirty |= 1 << nr;
270 return &ctxt->_regs[nr];
271}
272
273static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
274{
275 reg_read(ctxt, nr);
276 return reg_write(ctxt, nr);
277}
278
279static void writeback_registers(struct x86_emulate_ctxt *ctxt)
280{
281 unsigned reg;
282
283 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
284 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
285}
286
287static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
288{
289 ctxt->regs_dirty = 0;
290 ctxt->regs_valid = 0;
291}
292
Avi Kivity6aa8b732006-12-10 02:21:36 -0800293/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800294 * These EFLAGS bits are restored from saved value during emulation, and
295 * any changes are written back to the saved value after emulation.
296 */
297#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
298
Avi Kivitydda96d82008-11-26 15:14:10 +0200299#ifdef CONFIG_X86_64
300#define ON64(x) x
301#else
302#define ON64(x)
303#endif
304
Avi Kivity4d758342013-01-19 19:51:55 +0200305static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
306
Avi Kivityb7d491e2013-01-04 16:18:49 +0200307#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
308#define FOP_RET "ret \n\t"
309
310#define FOP_START(op) \
311 extern void em_##op(struct fastop *fake); \
312 asm(".pushsection .text, \"ax\" \n\t" \
313 ".global em_" #op " \n\t" \
314 FOP_ALIGN \
315 "em_" #op ": \n\t"
316
317#define FOP_END \
318 ".popsection")
319
Avi Kivity0bdea062013-01-19 19:51:50 +0200320#define FOPNOP() FOP_ALIGN FOP_RET
321
Avi Kivityb7d491e2013-01-04 16:18:49 +0200322#define FOP1E(op, dst) \
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200323 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
324
325#define FOP1EEX(op, dst) \
326 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
Avi Kivityb7d491e2013-01-04 16:18:49 +0200327
328#define FASTOP1(op) \
329 FOP_START(op) \
330 FOP1E(op##b, al) \
331 FOP1E(op##w, ax) \
332 FOP1E(op##l, eax) \
333 ON64(FOP1E(op##q, rax)) \
334 FOP_END
335
Avi Kivityb9fa4092013-02-09 11:31:48 +0200336/* 1-operand, using src2 (for MUL/DIV r/m) */
337#define FASTOP1SRC2(op, name) \
338 FOP_START(name) \
339 FOP1E(op, cl) \
340 FOP1E(op, cx) \
341 FOP1E(op, ecx) \
342 ON64(FOP1E(op, rcx)) \
343 FOP_END
344
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200345/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
346#define FASTOP1SRC2EX(op, name) \
347 FOP_START(name) \
348 FOP1EEX(op, cl) \
349 FOP1EEX(op, cx) \
350 FOP1EEX(op, ecx) \
351 ON64(FOP1EEX(op, rcx)) \
352 FOP_END
353
Avi Kivityf7857f32013-01-04 16:18:53 +0200354#define FOP2E(op, dst, src) \
355 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
356
357#define FASTOP2(op) \
358 FOP_START(op) \
Avi Kivity017da7b2013-02-09 11:31:47 +0200359 FOP2E(op##b, al, dl) \
360 FOP2E(op##w, ax, dx) \
361 FOP2E(op##l, eax, edx) \
362 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivityf7857f32013-01-04 16:18:53 +0200363 FOP_END
364
Avi Kivity11c363b2013-01-19 19:51:54 +0200365/* 2 operand, word only */
366#define FASTOP2W(op) \
367 FOP_START(op) \
368 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200369 FOP2E(op##w, ax, dx) \
370 FOP2E(op##l, eax, edx) \
371 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivity11c363b2013-01-19 19:51:54 +0200372 FOP_END
373
Avi Kivity007a3b52013-01-19 19:51:51 +0200374/* 2 operand, src is CL */
375#define FASTOP2CL(op) \
376 FOP_START(op) \
377 FOP2E(op##b, al, cl) \
378 FOP2E(op##w, ax, cl) \
379 FOP2E(op##l, eax, cl) \
380 ON64(FOP2E(op##q, rax, cl)) \
381 FOP_END
382
Nadav Amit5aca3722014-11-02 11:54:50 +0200383/* 2 operand, src and dest are reversed */
384#define FASTOP2R(op, name) \
385 FOP_START(name) \
386 FOP2E(op##b, dl, al) \
387 FOP2E(op##w, dx, ax) \
388 FOP2E(op##l, edx, eax) \
389 ON64(FOP2E(op##q, rdx, rax)) \
390 FOP_END
391
Avi Kivity0bdea062013-01-19 19:51:50 +0200392#define FOP3E(op, dst, src, src2) \
393 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
394
395/* 3-operand, word-only, src2=cl */
396#define FASTOP3WCL(op) \
397 FOP_START(op) \
398 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200399 FOP3E(op##w, ax, dx, cl) \
400 FOP3E(op##l, eax, edx, cl) \
401 ON64(FOP3E(op##q, rax, rdx, cl)) \
Avi Kivity0bdea062013-01-19 19:51:50 +0200402 FOP_END
403
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200404/* Special case for SETcc - 1 instruction per cc */
405#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
406
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200407asm(".global kvm_fastop_exception \n"
408 "kvm_fastop_exception: xor %esi, %esi; ret");
409
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200410FOP_START(setcc)
411FOP_SETCC(seto)
412FOP_SETCC(setno)
413FOP_SETCC(setc)
414FOP_SETCC(setnc)
415FOP_SETCC(setz)
416FOP_SETCC(setnz)
417FOP_SETCC(setbe)
418FOP_SETCC(setnbe)
419FOP_SETCC(sets)
420FOP_SETCC(setns)
421FOP_SETCC(setp)
422FOP_SETCC(setnp)
423FOP_SETCC(setl)
424FOP_SETCC(setnl)
425FOP_SETCC(setle)
426FOP_SETCC(setnle)
427FOP_END;
428
Paolo Bonzini326f5782013-05-09 11:32:51 +0200429FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
430FOP_END;
431
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200432static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
433 enum x86_intercept intercept,
434 enum x86_intercept_stage stage)
435{
436 struct x86_instruction_info info = {
437 .intercept = intercept,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300438 .rep_prefix = ctxt->rep_prefix,
439 .modrm_mod = ctxt->modrm_mod,
440 .modrm_reg = ctxt->modrm_reg,
441 .modrm_rm = ctxt->modrm_rm,
442 .src_val = ctxt->src.val64,
Jan Kiszka6cbc5f52014-06-30 12:52:55 +0200443 .dst_val = ctxt->dst.val64,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300444 .src_bytes = ctxt->src.bytes,
445 .dst_bytes = ctxt->dst.bytes,
446 .ad_bytes = ctxt->ad_bytes,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200447 .next_rip = ctxt->eip,
448 };
449
Avi Kivity29535382011-04-20 13:37:53 +0300450 return ctxt->ops->intercept(ctxt, &info, stage);
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200451}
452
Avi Kivityf47cfa32012-06-07 17:49:24 +0300453static void assign_masked(ulong *dest, ulong src, ulong mask)
454{
455 *dest = (*dest & ~mask) | (src & mask);
456}
457
Avi Kivity9dac77f2011-06-01 15:34:25 +0300458static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800459{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300460 return (1UL << (ctxt->ad_bytes << 3)) - 1;
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800461}
462
Avi Kivityf47cfa32012-06-07 17:49:24 +0300463static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
464{
465 u16 sel;
466 struct desc_struct ss;
467
468 if (ctxt->mode == X86EMUL_MODE_PROT64)
469 return ~0UL;
470 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
471 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
472}
473
Avi Kivity612e89f2012-06-12 20:03:23 +0300474static int stack_size(struct x86_emulate_ctxt *ctxt)
475{
476 return (__fls(stack_mask(ctxt)) + 1) >> 3;
477}
478
Avi Kivity6aa8b732006-12-10 02:21:36 -0800479/* Access/update address held in a register, based on addressing mode. */
Harvey Harrisone4706772008-02-19 07:40:38 -0800480static inline unsigned long
Avi Kivity9dac77f2011-06-01 15:34:25 +0300481address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800482{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300483 if (ctxt->ad_bytes == sizeof(unsigned long))
Harvey Harrisone4706772008-02-19 07:40:38 -0800484 return reg;
485 else
Avi Kivity9dac77f2011-06-01 15:34:25 +0300486 return reg & ad_mask(ctxt);
Harvey Harrisone4706772008-02-19 07:40:38 -0800487}
488
489static inline unsigned long
Avi Kivity9dac77f2011-06-01 15:34:25 +0300490register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800491{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300492 return address_mask(ctxt, reg);
Harvey Harrisone4706772008-02-19 07:40:38 -0800493}
494
Avi Kivity5ad105e2012-08-19 14:34:31 +0300495static void masked_increment(ulong *reg, ulong mask, int inc)
496{
497 assign_masked(reg, *reg + inc, mask);
498}
499
Harvey Harrison7a9572752008-02-19 07:40:41 -0800500static inline void
Avi Kivity9dac77f2011-06-01 15:34:25 +0300501register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
Harvey Harrison7a9572752008-02-19 07:40:41 -0800502{
Avi Kivity5ad105e2012-08-19 14:34:31 +0300503 ulong mask;
504
Avi Kivity9dac77f2011-06-01 15:34:25 +0300505 if (ctxt->ad_bytes == sizeof(unsigned long))
Avi Kivity5ad105e2012-08-19 14:34:31 +0300506 mask = ~0UL;
Harvey Harrison7a9572752008-02-19 07:40:41 -0800507 else
Avi Kivity5ad105e2012-08-19 14:34:31 +0300508 mask = ad_mask(ctxt);
509 masked_increment(reg, mask, inc);
510}
511
512static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
513{
Avi Kivitydd856ef2012-08-27 23:46:17 +0300514 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
Harvey Harrison7a9572752008-02-19 07:40:41 -0800515}
Avi Kivity6aa8b732006-12-10 02:21:36 -0800516
Avi Kivity56697682011-04-03 14:08:51 +0300517static u32 desc_limit_scaled(struct desc_struct *desc)
518{
519 u32 limit = get_desc_limit(desc);
520
521 return desc->g ? (limit << 12) | 0xfff : limit;
522}
523
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900524static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300525{
526 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
527 return 0;
528
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900529 return ctxt->ops->get_cached_segment_base(ctxt, seg);
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300530}
531
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200532static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
533 u32 error, bool valid)
Gleb Natapov54b84862010-04-28 19:15:44 +0300534{
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +0200535 WARN_ON(vec > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +0200536 ctxt->exception.vector = vec;
537 ctxt->exception.error_code = error;
538 ctxt->exception.error_code_valid = valid;
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200539 return X86EMUL_PROPAGATE_FAULT;
Gleb Natapov54b84862010-04-28 19:15:44 +0300540}
541
Joerg Roedel3b88e412011-04-04 12:39:29 +0200542static int emulate_db(struct x86_emulate_ctxt *ctxt)
543{
544 return emulate_exception(ctxt, DB_VECTOR, 0, false);
545}
546
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200547static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300548{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200549 return emulate_exception(ctxt, GP_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300550}
551
Avi Kivity618ff152011-04-03 12:32:09 +0300552static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
553{
554 return emulate_exception(ctxt, SS_VECTOR, err, true);
555}
556
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200557static int emulate_ud(struct x86_emulate_ctxt *ctxt)
Gleb Natapov54b84862010-04-28 19:15:44 +0300558{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200559 return emulate_exception(ctxt, UD_VECTOR, 0, false);
Gleb Natapov54b84862010-04-28 19:15:44 +0300560}
561
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200562static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300563{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200564 return emulate_exception(ctxt, TS_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300565}
566
Avi Kivity34d1f492010-08-26 11:59:01 +0300567static int emulate_de(struct x86_emulate_ctxt *ctxt)
568{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200569 return emulate_exception(ctxt, DE_VECTOR, 0, false);
Avi Kivity34d1f492010-08-26 11:59:01 +0300570}
571
Avi Kivity1253791d2011-03-29 11:41:27 +0200572static int emulate_nm(struct x86_emulate_ctxt *ctxt)
573{
574 return emulate_exception(ctxt, NM_VECTOR, 0, false);
575}
576
Nadav Amit234f3ce2014-09-18 22:39:38 +0300577static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
578 int cs_l)
Nadav Amit05c83ec2014-09-18 22:39:37 +0300579{
580 switch (ctxt->op_bytes) {
581 case 2:
582 ctxt->_eip = (u16)dst;
583 break;
584 case 4:
585 ctxt->_eip = (u32)dst;
586 break;
Nadav Amitcd9b8e2c2014-10-28 00:03:43 +0200587#ifdef CONFIG_X86_64
Nadav Amit05c83ec2014-09-18 22:39:37 +0300588 case 8:
Nadav Amit234f3ce2014-09-18 22:39:38 +0300589 if ((cs_l && is_noncanonical_address(dst)) ||
Nadav Amitcd9b8e2c2014-10-28 00:03:43 +0200590 (!cs_l && (dst >> 32) != 0))
Nadav Amit234f3ce2014-09-18 22:39:38 +0300591 return emulate_gp(ctxt, 0);
Nadav Amit05c83ec2014-09-18 22:39:37 +0300592 ctxt->_eip = dst;
593 break;
Nadav Amitcd9b8e2c2014-10-28 00:03:43 +0200594#endif
Nadav Amit05c83ec2014-09-18 22:39:37 +0300595 default:
596 WARN(1, "unsupported eip assignment size\n");
597 }
Nadav Amit234f3ce2014-09-18 22:39:38 +0300598 return X86EMUL_CONTINUE;
Nadav Amit05c83ec2014-09-18 22:39:37 +0300599}
600
Nadav Amit234f3ce2014-09-18 22:39:38 +0300601static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
Nadav Amit05c83ec2014-09-18 22:39:37 +0300602{
Nadav Amit234f3ce2014-09-18 22:39:38 +0300603 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
604}
605
606static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
607{
608 return assign_eip_near(ctxt, ctxt->_eip + rel);
Nadav Amit05c83ec2014-09-18 22:39:37 +0300609}
610
Avi Kivity1aa36612011-04-27 13:20:30 +0300611static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
612{
613 u16 selector;
614 struct desc_struct desc;
615
616 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
617 return selector;
618}
619
620static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
621 unsigned seg)
622{
623 u16 dummy;
624 u32 base3;
625 struct desc_struct desc;
626
627 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
628 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
629}
630
Avi Kivity1c11b372012-04-09 18:39:59 +0300631/*
632 * x86 defines three classes of vector instructions: explicitly
633 * aligned, explicitly unaligned, and the rest, which change behaviour
634 * depending on whether they're AVX encoded or not.
635 *
636 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
637 * subject to the same check.
638 */
639static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
640{
641 if (likely(size < 16))
642 return false;
643
644 if (ctxt->d & Aligned)
645 return true;
646 else if (ctxt->d & Unaligned)
647 return false;
648 else if (ctxt->d & Avx)
649 return false;
650 else
651 return true;
652}
653
Paolo Bonzinid09155d2014-10-27 14:54:44 +0100654static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
655 struct segmented_address addr,
656 unsigned *max_size, unsigned size,
657 bool write, bool fetch,
658 ulong *linear)
Avi Kivity52fd8b42011-04-03 12:33:12 +0300659{
Avi Kivity618ff152011-04-03 12:32:09 +0300660 struct desc_struct desc;
661 bool usable;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300662 ulong la;
Avi Kivity618ff152011-04-03 12:32:09 +0300663 u32 lim;
Avi Kivity1aa36612011-04-27 13:20:30 +0300664 u16 sel;
Gleb Natapov3a78a4f2012-12-20 16:57:42 +0200665 unsigned cpl;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300666
Nadav Amit518547b2014-09-30 20:49:19 +0300667 la = seg_base(ctxt, addr.seg) +
668 (fetch || ctxt->ad_bytes == 8 ? addr.ea : (u32)addr.ea);
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100669 *max_size = 0;
Avi Kivity618ff152011-04-03 12:32:09 +0300670 switch (ctxt->mode) {
Avi Kivity618ff152011-04-03 12:32:09 +0300671 case X86EMUL_MODE_PROT64:
Nadav Amit4be4de72014-09-18 22:39:40 +0300672 if (is_noncanonical_address(la))
Avi Kivity618ff152011-04-03 12:32:09 +0300673 return emulate_gp(ctxt, 0);
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100674
675 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
676 if (size > *max_size)
677 goto bad;
Avi Kivity618ff152011-04-03 12:32:09 +0300678 break;
679 default:
Avi Kivity1aa36612011-04-27 13:20:30 +0300680 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
681 addr.seg);
Avi Kivity618ff152011-04-03 12:32:09 +0300682 if (!usable)
683 goto bad;
Gleb Natapov58b78252012-12-11 15:14:12 +0200684 /* code segment in protected mode or read-only data segment */
685 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
686 || !(desc.type & 2)) && write)
Avi Kivity618ff152011-04-03 12:32:09 +0300687 goto bad;
688 /* unreadable code segment */
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400689 if (!fetch && (desc.type & 8) && !(desc.type & 2))
Avi Kivity618ff152011-04-03 12:32:09 +0300690 goto bad;
691 lim = desc_limit_scaled(&desc);
Nadav Amit10e38fc2014-06-18 17:19:34 +0300692 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
693 (ctxt->d & NoBigReal)) {
694 /* la is between zero and 0xffff */
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100695 if (la > 0xffff)
Nadav Amit10e38fc2014-06-18 17:19:34 +0300696 goto bad;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100697 *max_size = 0x10000 - la;
Nadav Amit10e38fc2014-06-18 17:19:34 +0300698 } else if ((desc.type & 8) || !(desc.type & 4)) {
Avi Kivity618ff152011-04-03 12:32:09 +0300699 /* expand-up segment */
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100700 if (addr.ea > lim)
Avi Kivity618ff152011-04-03 12:32:09 +0300701 goto bad;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100702 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
Avi Kivity618ff152011-04-03 12:32:09 +0300703 } else {
Guo Chaofc058682012-06-28 15:19:51 +0800704 /* expand-down segment */
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100705 if (addr.ea <= lim)
Avi Kivity618ff152011-04-03 12:32:09 +0300706 goto bad;
707 lim = desc.d ? 0xffffffff : 0xffff;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100708 if (addr.ea > lim)
Avi Kivity618ff152011-04-03 12:32:09 +0300709 goto bad;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100710 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
Avi Kivity618ff152011-04-03 12:32:09 +0300711 }
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100712 if (size > *max_size)
713 goto bad;
Avi Kivity717746e2011-04-20 13:37:53 +0300714 cpl = ctxt->ops->cpl(ctxt);
Nadav Amitc49c7592014-10-03 01:10:03 +0300715 if (!fetch) {
716 /* data segment or readable code segment */
Avi Kivity618ff152011-04-03 12:32:09 +0300717 if (cpl > desc.dpl)
718 goto bad;
719 } else if ((desc.type & 8) && !(desc.type & 4)) {
720 /* nonconforming code segment */
721 if (cpl != desc.dpl)
722 goto bad;
723 } else if ((desc.type & 8) && (desc.type & 4)) {
724 /* conforming code segment */
725 if (cpl < desc.dpl)
726 goto bad;
727 }
728 break;
729 }
Nadav Amit518547b2014-09-30 20:49:19 +0300730 if (ctxt->mode != X86EMUL_MODE_PROT64)
Avi Kivity52fd8b42011-04-03 12:33:12 +0300731 la &= (u32)-1;
Avi Kivity1c11b372012-04-09 18:39:59 +0300732 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
733 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300734 *linear = la;
735 return X86EMUL_CONTINUE;
Avi Kivity618ff152011-04-03 12:32:09 +0300736bad:
737 if (addr.seg == VCPU_SREG_SS)
Paolo Bonzini36061892014-10-27 14:40:49 +0100738 return emulate_ss(ctxt, 0);
Avi Kivity618ff152011-04-03 12:32:09 +0300739 else
Paolo Bonzini36061892014-10-27 14:40:49 +0100740 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300741}
742
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400743static int linearize(struct x86_emulate_ctxt *ctxt,
744 struct segmented_address addr,
745 unsigned size, bool write,
746 ulong *linear)
747{
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100748 unsigned max_size;
749 return __linearize(ctxt, addr, &max_size, size, write, false, linear);
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400750}
751
752
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200753static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
754 struct segmented_address addr,
755 void *data,
756 unsigned size)
757{
Avi Kivity9fa088f2011-03-31 18:54:30 +0200758 int rc;
759 ulong linear;
760
Avi Kivity83b87952011-04-03 11:31:19 +0300761 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +0200762 if (rc != X86EMUL_CONTINUE)
763 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +0300764 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200765}
766
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900767/*
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200768 * Prefetch the remaining bytes of the instruction without crossing page
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900769 * boundary if they are not in fetch_cache yet.
770 */
Paolo Bonzini9506d572014-05-06 13:05:25 +0200771static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
Avi Kivity62266862007-11-20 13:15:52 +0200772{
Avi Kivity62266862007-11-20 13:15:52 +0200773 int rc;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100774 unsigned size, max_size;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200775 unsigned long linear;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200776 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200777 struct segmented_address addr = { .seg = VCPU_SREG_CS,
Paolo Bonzini17052f12014-05-06 16:33:01 +0200778 .ea = ctxt->eip + cur_size };
779
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100780 /*
781 * We do not know exactly how many bytes will be needed, and
782 * __linearize is expensive, so fetch as much as possible. We
783 * just have to avoid going beyond the 15 byte limit, the end
784 * of the segment, or the end of the page.
785 *
786 * __linearize is called with size 0 so that it does not do any
787 * boundary check itself. Instead, we use max_size to check
788 * against op_size.
789 */
790 rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200791 if (unlikely(rc != X86EMUL_CONTINUE))
792 return rc;
793
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100794 size = min_t(unsigned, 15UL ^ cur_size, max_size);
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200795 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
Paolo Bonzini5cfc7e02014-05-06 13:05:25 +0200796
797 /*
798 * One instruction can only straddle two pages,
799 * and one has been loaded at the beginning of
800 * x86_decode_insn. So, if not enough bytes
801 * still, we must have hit the 15-byte boundary.
802 */
803 if (unlikely(size < op_size))
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100804 return emulate_gp(ctxt, 0);
805
Paolo Bonzini17052f12014-05-06 16:33:01 +0200806 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200807 size, &ctxt->exception);
808 if (unlikely(rc != X86EMUL_CONTINUE))
809 return rc;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200810 ctxt->fetch.end += size;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +0900811 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200812}
813
Paolo Bonzini9506d572014-05-06 13:05:25 +0200814static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
815 unsigned size)
Avi Kivity62266862007-11-20 13:15:52 +0200816{
Nadav Amit08da44a2014-10-03 01:10:04 +0300817 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
818
819 if (unlikely(done_size < size))
820 return __do_insn_fetch_bytes(ctxt, size - done_size);
Paolo Bonzini9506d572014-05-06 13:05:25 +0200821 else
822 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200823}
824
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900825/* Fetch next part of the instruction being emulated. */
Takuya Yoshikawae85a1082011-07-30 18:01:26 +0900826#define insn_fetch(_type, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200827({ _type _x; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200828 \
829 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900830 if (rc != X86EMUL_CONTINUE) \
831 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200832 ctxt->_eip += sizeof(_type); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200833 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
834 ctxt->fetch.ptr += sizeof(_type); \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200835 _x; \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900836})
837
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900838#define insn_fetch_arr(_arr, _size, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200839({ \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200840 rc = do_insn_fetch_bytes(_ctxt, _size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900841 if (rc != X86EMUL_CONTINUE) \
842 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200843 ctxt->_eip += (_size); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200844 memcpy(_arr, ctxt->fetch.ptr, _size); \
845 ctxt->fetch.ptr += (_size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900846})
847
Rusty Russell1e3c5cb2007-07-17 23:16:11 +1000848/*
849 * Given the 'reg' portion of a ModRM byte, and a register block, return a
850 * pointer into the block that addresses the relevant register.
851 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
852 */
Avi Kivitydd856ef2012-08-27 23:46:17 +0300853static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200854 int byteop)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800855{
856 void *p;
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200857 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800858
Avi Kivity6aa8b732006-12-10 02:21:36 -0800859 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
Avi Kivitydd856ef2012-08-27 23:46:17 +0300860 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
861 else
862 p = reg_rmw(ctxt, modrm_reg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800863 return p;
864}
865
866static int read_descriptor(struct x86_emulate_ctxt *ctxt,
Avi Kivity90de84f2010-11-17 15:28:21 +0200867 struct segmented_address addr,
Avi Kivity6aa8b732006-12-10 02:21:36 -0800868 u16 *size, unsigned long *address, int op_bytes)
869{
870 int rc;
871
872 if (op_bytes == 2)
873 op_bytes = 3;
874 *address = 0;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200875 rc = segmented_read_std(ctxt, addr, size, 2);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +0900876 if (rc != X86EMUL_CONTINUE)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800877 return rc;
Avi Kivity30b31ab2010-11-17 15:28:22 +0200878 addr.ea += 2;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200879 rc = segmented_read_std(ctxt, addr, address, op_bytes);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800880 return rc;
881}
882
Avi Kivity34b77652013-01-19 19:51:56 +0200883FASTOP2(add);
884FASTOP2(or);
885FASTOP2(adc);
886FASTOP2(sbb);
887FASTOP2(and);
888FASTOP2(sub);
889FASTOP2(xor);
890FASTOP2(cmp);
891FASTOP2(test);
892
Avi Kivityb9fa4092013-02-09 11:31:48 +0200893FASTOP1SRC2(mul, mul_ex);
894FASTOP1SRC2(imul, imul_ex);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200895FASTOP1SRC2EX(div, div_ex);
896FASTOP1SRC2EX(idiv, idiv_ex);
Avi Kivityb9fa4092013-02-09 11:31:48 +0200897
Avi Kivity34b77652013-01-19 19:51:56 +0200898FASTOP3WCL(shld);
899FASTOP3WCL(shrd);
900
901FASTOP2W(imul);
902
903FASTOP1(not);
904FASTOP1(neg);
905FASTOP1(inc);
906FASTOP1(dec);
907
908FASTOP2CL(rol);
909FASTOP2CL(ror);
910FASTOP2CL(rcl);
911FASTOP2CL(rcr);
912FASTOP2CL(shl);
913FASTOP2CL(shr);
914FASTOP2CL(sar);
915
916FASTOP2W(bsf);
917FASTOP2W(bsr);
918FASTOP2W(bt);
919FASTOP2W(bts);
920FASTOP2W(btr);
921FASTOP2W(btc);
922
Avi Kivitye47a5f52013-02-09 11:31:51 +0200923FASTOP2(xadd);
924
Nadav Amit5aca3722014-11-02 11:54:50 +0200925FASTOP2R(cmp, cmp_r);
926
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200927static u8 test_cc(unsigned int condition, unsigned long flags)
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300928{
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200929 u8 rc;
930 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300931
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200932 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivity3f0c3d02013-01-26 23:56:04 +0200933 asm("push %[flags]; popf; call *%[fastop]"
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200934 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
935 return rc;
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300936}
937
Avi Kivity91ff3cb2010-08-01 12:53:09 +0300938static void fetch_register_operand(struct operand *op)
939{
940 switch (op->bytes) {
941 case 1:
942 op->val = *(u8 *)op->addr.reg;
943 break;
944 case 2:
945 op->val = *(u16 *)op->addr.reg;
946 break;
947 case 4:
948 op->val = *(u32 *)op->addr.reg;
949 break;
950 case 8:
951 op->val = *(u64 *)op->addr.reg;
952 break;
953 }
954}
955
Avi Kivity1253791d2011-03-29 11:41:27 +0200956static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
957{
958 ctxt->ops->get_fpu(ctxt);
959 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200960 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
961 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
962 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
963 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
964 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
965 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
966 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
967 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +0200968#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +0200969 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
970 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
971 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
972 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
973 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
974 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
975 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
976 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +0200977#endif
978 default: BUG();
979 }
980 ctxt->ops->put_fpu(ctxt);
981}
982
983static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
984 int reg)
985{
986 ctxt->ops->get_fpu(ctxt);
987 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200988 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
989 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
990 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
991 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
992 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
993 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
994 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
995 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +0200996#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +0200997 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
998 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
999 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1000 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1001 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1002 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1003 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1004 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
Avi Kivity1253791d2011-03-29 11:41:27 +02001005#endif
1006 default: BUG();
1007 }
1008 ctxt->ops->put_fpu(ctxt);
1009}
1010
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001011static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1012{
1013 ctxt->ops->get_fpu(ctxt);
1014 switch (reg) {
1015 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1016 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1017 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1018 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1019 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1020 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1021 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1022 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1023 default: BUG();
1024 }
1025 ctxt->ops->put_fpu(ctxt);
1026}
1027
1028static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1029{
1030 ctxt->ops->get_fpu(ctxt);
1031 switch (reg) {
1032 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1033 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1034 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1035 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1036 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1037 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1038 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1039 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1040 default: BUG();
1041 }
1042 ctxt->ops->put_fpu(ctxt);
1043}
1044
Gleb Natapov045a2822012-12-20 16:57:43 +02001045static int em_fninit(struct x86_emulate_ctxt *ctxt)
1046{
1047 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1048 return emulate_nm(ctxt);
1049
1050 ctxt->ops->get_fpu(ctxt);
1051 asm volatile("fninit");
1052 ctxt->ops->put_fpu(ctxt);
1053 return X86EMUL_CONTINUE;
1054}
1055
1056static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1057{
1058 u16 fcw;
1059
1060 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1061 return emulate_nm(ctxt);
1062
1063 ctxt->ops->get_fpu(ctxt);
1064 asm volatile("fnstcw %0": "+m"(fcw));
1065 ctxt->ops->put_fpu(ctxt);
1066
1067 /* force 2 byte destination */
1068 ctxt->dst.bytes = 2;
1069 ctxt->dst.val = fcw;
1070
1071 return X86EMUL_CONTINUE;
1072}
1073
1074static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1075{
1076 u16 fsw;
1077
1078 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1079 return emulate_nm(ctxt);
1080
1081 ctxt->ops->get_fpu(ctxt);
1082 asm volatile("fnstsw %0": "+m"(fsw));
1083 ctxt->ops->put_fpu(ctxt);
1084
1085 /* force 2 byte destination */
1086 ctxt->dst.bytes = 2;
1087 ctxt->dst.val = fsw;
1088
1089 return X86EMUL_CONTINUE;
1090}
1091
Avi Kivity1253791d2011-03-29 11:41:27 +02001092static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
Avi Kivity2adb5ad2012-01-16 15:08:45 +02001093 struct operand *op)
Avi Kivity3c118e22007-10-31 10:27:04 +02001094{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001095 unsigned reg = ctxt->modrm_reg;
Avi Kivity33615aa2007-10-31 11:15:56 +02001096
Avi Kivity9dac77f2011-06-01 15:34:25 +03001097 if (!(ctxt->d & ModRM))
1098 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
Avi Kivity1253791d2011-03-29 11:41:27 +02001099
Avi Kivity9dac77f2011-06-01 15:34:25 +03001100 if (ctxt->d & Sse) {
Avi Kivity1253791d2011-03-29 11:41:27 +02001101 op->type = OP_XMM;
1102 op->bytes = 16;
1103 op->addr.xmm = reg;
1104 read_sse_reg(ctxt, &op->vec_val, reg);
1105 return;
1106 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001107 if (ctxt->d & Mmx) {
1108 reg &= 7;
1109 op->type = OP_MM;
1110 op->bytes = 8;
1111 op->addr.mm = reg;
1112 return;
1113 }
Avi Kivity1253791d2011-03-29 11:41:27 +02001114
Avi Kivity3c118e22007-10-31 10:27:04 +02001115 op->type = OP_REG;
Gleb Natapov6d4d85e2013-11-04 15:52:42 +02001116 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1117 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1118
Avi Kivity91ff3cb2010-08-01 12:53:09 +03001119 fetch_register_operand(op);
Avi Kivity3c118e22007-10-31 10:27:04 +02001120 op->orig_val = op->val;
1121}
1122
Avi Kivitya6e34072012-06-10 17:15:39 +03001123static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1124{
1125 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1126 ctxt->modrm_seg = VCPU_SREG_SS;
1127}
1128
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001129static int decode_modrm(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001130 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001131{
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001132 u8 sib;
Bandan Das02357bd2014-04-16 12:46:11 -04001133 int index_reg, base_reg, scale;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001134 int rc = X86EMUL_CONTINUE;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001135 ulong modrm_ea = 0;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001136
Bandan Das02357bd2014-04-16 12:46:11 -04001137 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1138 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1139 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001140
Bandan Das02357bd2014-04-16 12:46:11 -04001141 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001142 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
Bandan Das02357bd2014-04-16 12:46:11 -04001143 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001144 ctxt->modrm_seg = VCPU_SREG_DS;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001145
Nadav Amit9b88ae92014-05-25 23:05:21 +03001146 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001147 op->type = OP_REG;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001148 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Paolo Bonzini8acb42072013-05-30 16:35:55 +02001149 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02001150 ctxt->d & ByteOp);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001151 if (ctxt->d & Sse) {
Avi Kivity1253791d2011-03-29 11:41:27 +02001152 op->type = OP_XMM;
1153 op->bytes = 16;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001154 op->addr.xmm = ctxt->modrm_rm;
1155 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
Avi Kivity1253791d2011-03-29 11:41:27 +02001156 return rc;
1157 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001158 if (ctxt->d & Mmx) {
1159 op->type = OP_MM;
1160 op->bytes = 8;
Paolo Bonzinibdc90722014-05-06 14:03:29 +02001161 op->addr.mm = ctxt->modrm_rm & 7;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001162 return rc;
1163 }
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001164 fetch_register_operand(op);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001165 return rc;
1166 }
1167
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001168 op->type = OP_MEM;
1169
Avi Kivity9dac77f2011-06-01 15:34:25 +03001170 if (ctxt->ad_bytes == 2) {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001171 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1172 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1173 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1174 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001175
1176 /* 16-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001177 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001178 case 0:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001179 if (ctxt->modrm_rm == 6)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001180 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001181 break;
1182 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001183 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001184 break;
1185 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001186 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001187 break;
1188 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001189 switch (ctxt->modrm_rm) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001190 case 0:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001191 modrm_ea += bx + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001192 break;
1193 case 1:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001194 modrm_ea += bx + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001195 break;
1196 case 2:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001197 modrm_ea += bp + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001198 break;
1199 case 3:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001200 modrm_ea += bp + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001201 break;
1202 case 4:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001203 modrm_ea += si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001204 break;
1205 case 5:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001206 modrm_ea += di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001207 break;
1208 case 6:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001209 if (ctxt->modrm_mod != 0)
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001210 modrm_ea += bp;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001211 break;
1212 case 7:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001213 modrm_ea += bx;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001214 break;
1215 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001216 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1217 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1218 ctxt->modrm_seg = VCPU_SREG_SS;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001219 modrm_ea = (u16)modrm_ea;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001220 } else {
1221 /* 32/64-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001222 if ((ctxt->modrm_rm & 7) == 4) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001223 sib = insn_fetch(u8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001224 index_reg |= (sib >> 3) & 7;
1225 base_reg |= sib & 7;
1226 scale = sib >> 6;
1227
Avi Kivity9dac77f2011-06-01 15:34:25 +03001228 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001229 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivitya6e34072012-06-10 17:15:39 +03001230 else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001231 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001232 adjust_modrm_seg(ctxt, base_reg);
1233 }
Avi Kivitydc71d0f2008-06-15 21:23:17 -07001234 if (index_reg != 4)
Avi Kivitydd856ef2012-08-27 23:46:17 +03001235 modrm_ea += reg_read(ctxt, index_reg) << scale;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001236 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
Nadav Amit5b38ab82014-11-02 11:54:41 +02001237 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity84411d82008-06-15 21:53:26 -07001238 if (ctxt->mode == X86EMUL_MODE_PROT64)
Avi Kivity9dac77f2011-06-01 15:34:25 +03001239 ctxt->rip_relative = 1;
Avi Kivitya6e34072012-06-10 17:15:39 +03001240 } else {
1241 base_reg = ctxt->modrm_rm;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001242 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001243 adjust_modrm_seg(ctxt, base_reg);
1244 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001245 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001246 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001247 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001248 break;
1249 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001250 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001251 break;
1252 }
1253 }
Avi Kivity90de84f2010-11-17 15:28:21 +02001254 op->addr.mem.ea = modrm_ea;
Bandan Das41061cd2014-04-16 12:46:14 -04001255 if (ctxt->ad_bytes != 8)
1256 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1257
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001258done:
1259 return rc;
1260}
1261
1262static int decode_abs(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001263 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001264{
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001265 int rc = X86EMUL_CONTINUE;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001266
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001267 op->type = OP_MEM;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001268 switch (ctxt->ad_bytes) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001269 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001270 op->addr.mem.ea = insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001271 break;
1272 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001273 op->addr.mem.ea = insn_fetch(u32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001274 break;
1275 case 8:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001276 op->addr.mem.ea = insn_fetch(u64, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001277 break;
1278 }
1279done:
1280 return rc;
1281}
1282
Avi Kivity9dac77f2011-06-01 15:34:25 +03001283static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
Wei Yongjun35c843c2010-08-09 11:34:56 +08001284{
Sheng Yang7129eec2010-09-28 16:33:32 +08001285 long sv = 0, mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001286
Avi Kivity9dac77f2011-06-01 15:34:25 +03001287 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
Nadav Amit7dec5602014-06-15 16:12:57 +03001288 mask = ~((long)ctxt->dst.bytes * 8 - 1);
Wei Yongjun35c843c2010-08-09 11:34:56 +08001289
Avi Kivity9dac77f2011-06-01 15:34:25 +03001290 if (ctxt->src.bytes == 2)
1291 sv = (s16)ctxt->src.val & (s16)mask;
1292 else if (ctxt->src.bytes == 4)
1293 sv = (s32)ctxt->src.val & (s32)mask;
Nadav Amit7dec5602014-06-15 16:12:57 +03001294 else
1295 sv = (s64)ctxt->src.val & (s64)mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001296
Avi Kivity9dac77f2011-06-01 15:34:25 +03001297 ctxt->dst.addr.mem.ea += (sv >> 3);
Wei Yongjun35c843c2010-08-09 11:34:56 +08001298 }
Wei Yongjunba7ff2b2010-08-09 11:39:14 +08001299
1300 /* only subword offset */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001301 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001302}
1303
Gleb Natapov9de41572010-04-28 19:15:22 +03001304static int read_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov9de41572010-04-28 19:15:22 +03001305 unsigned long addr, void *dest, unsigned size)
1306{
1307 int rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001308 struct read_cache *mc = &ctxt->mem_read;
Gleb Natapov9de41572010-04-28 19:15:22 +03001309
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001310 if (mc->pos < mc->end)
1311 goto read_cached;
Gleb Natapov9de41572010-04-28 19:15:22 +03001312
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001313 WARN_ON((mc->end + size) >= sizeof(mc->data));
Gleb Natapov9de41572010-04-28 19:15:22 +03001314
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001315 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1316 &ctxt->exception);
1317 if (rc != X86EMUL_CONTINUE)
1318 return rc;
1319
1320 mc->end += size;
1321
1322read_cached:
1323 memcpy(dest, mc->data + mc->pos, size);
1324 mc->pos += size;
Gleb Natapov9de41572010-04-28 19:15:22 +03001325 return X86EMUL_CONTINUE;
1326}
1327
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001328static int segmented_read(struct x86_emulate_ctxt *ctxt,
1329 struct segmented_address addr,
1330 void *data,
1331 unsigned size)
1332{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001333 int rc;
1334 ulong linear;
1335
Avi Kivity83b87952011-04-03 11:31:19 +03001336 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001337 if (rc != X86EMUL_CONTINUE)
1338 return rc;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001339 return read_emulated(ctxt, linear, data, size);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001340}
1341
1342static int segmented_write(struct x86_emulate_ctxt *ctxt,
1343 struct segmented_address addr,
1344 const void *data,
1345 unsigned size)
1346{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001347 int rc;
1348 ulong linear;
1349
Avi Kivity83b87952011-04-03 11:31:19 +03001350 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001351 if (rc != X86EMUL_CONTINUE)
1352 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001353 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1354 &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001355}
1356
1357static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1358 struct segmented_address addr,
1359 const void *orig_data, const void *data,
1360 unsigned size)
1361{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001362 int rc;
1363 ulong linear;
1364
Avi Kivity83b87952011-04-03 11:31:19 +03001365 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001366 if (rc != X86EMUL_CONTINUE)
1367 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001368 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1369 size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001370}
1371
Gleb Natapov7b262e92010-03-18 15:20:27 +02001372static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov7b262e92010-03-18 15:20:27 +02001373 unsigned int size, unsigned short port,
1374 void *dest)
1375{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001376 struct read_cache *rc = &ctxt->io_read;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001377
1378 if (rc->pos == rc->end) { /* refill pio read ahead */
Gleb Natapov7b262e92010-03-18 15:20:27 +02001379 unsigned int in_page, n;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001380 unsigned int count = ctxt->rep_prefix ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001381 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001382 in_page = (ctxt->eflags & EFLG_DF) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001383 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1384 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
Mark Rustadb55a8142014-07-25 06:27:05 -07001385 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
Gleb Natapov7b262e92010-03-18 15:20:27 +02001386 if (n == 0)
1387 n = 1;
1388 rc->pos = rc->end = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001389 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
Gleb Natapov7b262e92010-03-18 15:20:27 +02001390 return 0;
1391 rc->end = n * size;
1392 }
1393
Nadav Amite6e39f02014-04-18 03:35:10 +03001394 if (ctxt->rep_prefix && (ctxt->d & String) &&
1395 !(ctxt->eflags & EFLG_DF)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001396 ctxt->dst.data = rc->data + rc->pos;
1397 ctxt->dst.type = OP_MEM_STR;
1398 ctxt->dst.count = (rc->end - rc->pos) / size;
1399 rc->pos = rc->end;
1400 } else {
1401 memcpy(dest, rc->data + rc->pos, size);
1402 rc->pos += size;
1403 }
Gleb Natapov7b262e92010-03-18 15:20:27 +02001404 return 1;
1405}
1406
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01001407static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1408 u16 index, struct desc_struct *desc)
1409{
1410 struct desc_ptr dt;
1411 ulong addr;
1412
1413 ctxt->ops->get_idt(ctxt, &dt);
1414
1415 if (dt.size < index * 8 + 7)
1416 return emulate_gp(ctxt, index << 3 | 0x2);
1417
1418 addr = dt.address + index * 8;
1419 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1420 &ctxt->exception);
1421}
1422
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001423static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001424 u16 selector, struct desc_ptr *dt)
1425{
Mathias Krause0225fb52012-08-30 01:30:16 +02001426 const struct x86_emulate_ops *ops = ctxt->ops;
Nadav Amit2eedcac2014-06-02 18:34:05 +03001427 u32 base3 = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001428
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001429 if (selector & 1 << 2) {
1430 struct desc_struct desc;
Avi Kivity1aa36612011-04-27 13:20:30 +03001431 u16 sel;
1432
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001433 memset (dt, 0, sizeof *dt);
Nadav Amit2eedcac2014-06-02 18:34:05 +03001434 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1435 VCPU_SREG_LDTR))
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001436 return;
1437
1438 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
Nadav Amit2eedcac2014-06-02 18:34:05 +03001439 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001440 } else
Avi Kivity4bff1e862011-04-20 13:37:53 +03001441 ops->get_gdt(ctxt, dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001442}
1443
1444/* allowed just for 8 bytes segments */
1445static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Avi Kivitye9194642012-06-13 16:29:39 +03001446 u16 selector, struct desc_struct *desc,
1447 ulong *desc_addr_p)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001448{
1449 struct desc_ptr dt;
1450 u16 index = selector >> 3;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001451 ulong addr;
1452
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001453 get_descriptor_table_ptr(ctxt, selector, &dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001454
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001455 if (dt.size < index * 8 + 7)
1456 return emulate_gp(ctxt, selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001457
Avi Kivitye9194642012-06-13 16:29:39 +03001458 *desc_addr_p = addr = dt.address + index * 8;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001459 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1460 &ctxt->exception);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001461}
1462
1463/* allowed just for 8 bytes segments */
1464static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001465 u16 selector, struct desc_struct *desc)
1466{
1467 struct desc_ptr dt;
1468 u16 index = selector >> 3;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001469 ulong addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001470
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001471 get_descriptor_table_ptr(ctxt, selector, &dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001472
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001473 if (dt.size < index * 8 + 7)
1474 return emulate_gp(ctxt, selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001475
1476 addr = dt.address + index * 8;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001477 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1478 &ctxt->exception);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001479}
1480
Gleb Natapov5601d052011-03-07 14:55:06 +02001481/* Does not support long mode */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001482static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Nadav Amitd1442d82014-09-18 22:39:39 +03001483 u16 selector, int seg, u8 cpl,
1484 bool in_task_switch,
1485 struct desc_struct *desc)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001486{
Avi Kivity869be992012-06-13 16:30:53 +03001487 struct desc_struct seg_desc, old_desc;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001488 u8 dpl, rpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001489 unsigned err_vec = GP_VECTOR;
1490 u32 err_code = 0;
1491 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
Avi Kivitye9194642012-06-13 16:29:39 +03001492 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001493 int ret;
Avi Kivity03ebebe2012-08-21 17:07:04 +03001494 u16 dummy;
Nadav Amite37a75a2014-06-02 18:34:04 +03001495 u32 base3 = 0;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001496
1497 memset(&seg_desc, 0, sizeof seg_desc);
1498
Kevin Wolff8da94e2013-04-11 14:06:03 +02001499 if (ctxt->mode == X86EMUL_MODE_REAL) {
1500 /* set real mode segment descriptor (keep limit etc. for
1501 * unreal mode) */
Avi Kivity03ebebe2012-08-21 17:07:04 +03001502 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001503 set_desc_base(&seg_desc, selector << 4);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001504 goto load;
Kevin Wolff8da94e2013-04-11 14:06:03 +02001505 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1506 /* VM86 needs a clean new segment descriptor */
1507 set_desc_base(&seg_desc, selector << 4);
1508 set_desc_limit(&seg_desc, 0xffff);
1509 seg_desc.type = 3;
1510 seg_desc.p = 1;
1511 seg_desc.s = 1;
1512 seg_desc.dpl = 3;
1513 goto load;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001514 }
1515
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001516 rpl = selector & 3;
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001517
1518 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1519 if ((seg == VCPU_SREG_CS
1520 || (seg == VCPU_SREG_SS
1521 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1522 || seg == VCPU_SREG_TR)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001523 && null_selector)
1524 goto exception;
1525
1526 /* TR should be in GDT only */
1527 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1528 goto exception;
1529
1530 if (null_selector) /* for NULL selector skip all following checks */
1531 goto load;
1532
Avi Kivitye9194642012-06-13 16:29:39 +03001533 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001534 if (ret != X86EMUL_CONTINUE)
1535 return ret;
1536
1537 err_code = selector & 0xfffc;
Paolo Bonzini15fc0752014-08-18 13:17:00 +02001538 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001539
Guo Chaofc058682012-06-28 15:19:51 +08001540 /* can't load system descriptor into segment selector */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001541 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1542 goto exception;
1543
1544 if (!seg_desc.p) {
1545 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1546 goto exception;
1547 }
1548
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001549 dpl = seg_desc.dpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001550
1551 switch (seg) {
1552 case VCPU_SREG_SS:
1553 /*
1554 * segment is not a writable data segment or segment
1555 * selector's RPL != CPL or segment selector's RPL != CPL
1556 */
1557 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1558 goto exception;
1559 break;
1560 case VCPU_SREG_CS:
1561 if (!(seg_desc.type & 8))
1562 goto exception;
1563
1564 if (seg_desc.type & 4) {
1565 /* conforming */
1566 if (dpl > cpl)
1567 goto exception;
1568 } else {
1569 /* nonconforming */
1570 if (rpl > cpl || dpl != cpl)
1571 goto exception;
1572 }
Nadav Amit040c8dc2014-09-18 22:39:43 +03001573 /* in long-mode d/b must be clear if l is set */
1574 if (seg_desc.d && seg_desc.l) {
1575 u64 efer = 0;
1576
1577 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1578 if (efer & EFER_LMA)
1579 goto exception;
1580 }
1581
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001582 /* CS(RPL) <- CPL */
1583 selector = (selector & 0xfffc) | cpl;
1584 break;
1585 case VCPU_SREG_TR:
1586 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1587 goto exception;
Avi Kivity869be992012-06-13 16:30:53 +03001588 old_desc = seg_desc;
1589 seg_desc.type |= 2; /* busy */
1590 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1591 sizeof(seg_desc), &ctxt->exception);
1592 if (ret != X86EMUL_CONTINUE)
1593 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001594 break;
1595 case VCPU_SREG_LDTR:
1596 if (seg_desc.s || seg_desc.type != 2)
1597 goto exception;
1598 break;
1599 default: /* DS, ES, FS, or GS */
1600 /*
1601 * segment is not a data or readable code segment or
1602 * ((segment is a data or nonconforming code segment)
1603 * and (both RPL and CPL > DPL))
1604 */
1605 if ((seg_desc.type & 0xa) == 0x8 ||
1606 (((seg_desc.type & 0xc) != 0xc) &&
1607 (rpl > dpl && cpl > dpl)))
1608 goto exception;
1609 break;
1610 }
1611
1612 if (seg_desc.s) {
1613 /* mark segment as accessed */
1614 seg_desc.type |= 1;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001615 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001616 if (ret != X86EMUL_CONTINUE)
1617 return ret;
Nadav Amite37a75a2014-06-02 18:34:04 +03001618 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1619 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1620 sizeof(base3), &ctxt->exception);
1621 if (ret != X86EMUL_CONTINUE)
1622 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001623 }
1624load:
Nadav Amite37a75a2014-06-02 18:34:04 +03001625 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
Nadav Amitd1442d82014-09-18 22:39:39 +03001626 if (desc)
1627 *desc = seg_desc;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001628 return X86EMUL_CONTINUE;
1629exception:
Paolo Bonzini592f0852014-08-20 10:05:08 +02001630 return emulate_exception(ctxt, err_vec, err_code, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001631}
1632
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001633static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1634 u16 selector, int seg)
1635{
1636 u8 cpl = ctxt->ops->cpl(ctxt);
Nadav Amitd1442d82014-09-18 22:39:39 +03001637 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001638}
1639
Wei Yongjun31be40b2010-08-17 09:17:30 +08001640static void write_register_operand(struct operand *op)
1641{
1642 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1643 switch (op->bytes) {
1644 case 1:
1645 *(u8 *)op->addr.reg = (u8)op->val;
1646 break;
1647 case 2:
1648 *(u16 *)op->addr.reg = (u16)op->val;
1649 break;
1650 case 4:
1651 *op->addr.reg = (u32)op->val;
1652 break; /* 64b: zero-extend */
1653 case 8:
1654 *op->addr.reg = op->val;
1655 break;
1656 }
1657}
1658
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001659static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
Wei Yongjunc37eda12010-06-15 09:03:33 +08001660{
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001661 switch (op->type) {
Wei Yongjunc37eda12010-06-15 09:03:33 +08001662 case OP_REG:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001663 write_register_operand(op);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001664 break;
1665 case OP_MEM:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001666 if (ctxt->lock_prefix)
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001667 return segmented_cmpxchg(ctxt,
1668 op->addr.mem,
1669 &op->orig_val,
1670 &op->val,
1671 op->bytes);
1672 else
1673 return segmented_write(ctxt,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001674 op->addr.mem,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001675 &op->val,
1676 op->bytes);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001677 break;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001678 case OP_MEM_STR:
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001679 return segmented_write(ctxt,
1680 op->addr.mem,
1681 op->data,
1682 op->bytes * op->count);
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001683 break;
Avi Kivity1253791d2011-03-29 11:41:27 +02001684 case OP_XMM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001685 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
Avi Kivity1253791d2011-03-29 11:41:27 +02001686 break;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001687 case OP_MM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001688 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001689 break;
Wei Yongjunc37eda12010-06-15 09:03:33 +08001690 case OP_NONE:
1691 /* no writeback */
1692 break;
1693 default:
1694 break;
1695 }
1696 return X86EMUL_CONTINUE;
1697}
1698
Avi Kivity51ddff52012-06-12 20:19:40 +03001699static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001700{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001701 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001702
Avi Kivity5ad105e2012-08-19 14:34:31 +03001703 rsp_increment(ctxt, -bytes);
Avi Kivitydd856ef2012-08-27 23:46:17 +03001704 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001705 addr.seg = VCPU_SREG_SS;
1706
Avi Kivity51ddff52012-06-12 20:19:40 +03001707 return segmented_write(ctxt, addr, data, bytes);
1708}
1709
1710static int em_push(struct x86_emulate_ctxt *ctxt)
1711{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001712 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001713 ctxt->dst.type = OP_NONE;
Avi Kivity51ddff52012-06-12 20:19:40 +03001714 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001715}
1716
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001717static int emulate_pop(struct x86_emulate_ctxt *ctxt,
Avi Kivity350f69d2009-01-05 11:12:40 +02001718 void *dest, int len)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001719{
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001720 int rc;
Avi Kivity90de84f2010-11-17 15:28:21 +02001721 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001722
Avi Kivitydd856ef2012-08-27 23:46:17 +03001723 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Avi Kivity90de84f2010-11-17 15:28:21 +02001724 addr.seg = VCPU_SREG_SS;
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001725 rc = segmented_read(ctxt, addr, dest, len);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09001726 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001727 return rc;
1728
Avi Kivity5ad105e2012-08-19 14:34:31 +03001729 rsp_increment(ctxt, len);
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001730 return rc;
1731}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001732
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001733static int em_pop(struct x86_emulate_ctxt *ctxt)
1734{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001735 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001736}
1737
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001738static int emulate_popf(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001739 void *dest, int len)
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001740{
1741 int rc;
1742 unsigned long val, change_mask;
1743 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001744 int cpl = ctxt->ops->cpl(ctxt);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001745
Takuya Yoshikawa3b9be3b2011-05-02 02:27:55 +09001746 rc = emulate_pop(ctxt, &val, len);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001747 if (rc != X86EMUL_CONTINUE)
1748 return rc;
1749
1750 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
Nadav Amit163b1352014-07-21 14:37:28 +03001751 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001752
1753 switch(ctxt->mode) {
1754 case X86EMUL_MODE_PROT64:
1755 case X86EMUL_MODE_PROT32:
1756 case X86EMUL_MODE_PROT16:
1757 if (cpl == 0)
1758 change_mask |= EFLG_IOPL;
1759 if (cpl <= iopl)
1760 change_mask |= EFLG_IF;
1761 break;
1762 case X86EMUL_MODE_VM86:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001763 if (iopl < 3)
1764 return emulate_gp(ctxt, 0);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001765 change_mask |= EFLG_IF;
1766 break;
1767 default: /* real mode */
1768 change_mask |= (EFLG_IOPL | EFLG_IF);
1769 break;
1770 }
1771
1772 *(unsigned long *)dest =
1773 (ctxt->eflags & ~change_mask) | (val & change_mask);
1774
1775 return rc;
1776}
1777
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001778static int em_popf(struct x86_emulate_ctxt *ctxt)
1779{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001780 ctxt->dst.type = OP_REG;
1781 ctxt->dst.addr.reg = &ctxt->eflags;
1782 ctxt->dst.bytes = ctxt->op_bytes;
1783 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001784}
1785
Avi Kivity612e89f2012-06-12 20:03:23 +03001786static int em_enter(struct x86_emulate_ctxt *ctxt)
1787{
1788 int rc;
1789 unsigned frame_size = ctxt->src.val;
1790 unsigned nesting_level = ctxt->src2.val & 31;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001791 ulong rbp;
Avi Kivity612e89f2012-06-12 20:03:23 +03001792
1793 if (nesting_level)
1794 return X86EMUL_UNHANDLEABLE;
1795
Avi Kivitydd856ef2012-08-27 23:46:17 +03001796 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1797 rc = push(ctxt, &rbp, stack_size(ctxt));
Avi Kivity612e89f2012-06-12 20:03:23 +03001798 if (rc != X86EMUL_CONTINUE)
1799 return rc;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001800 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
Avi Kivity612e89f2012-06-12 20:03:23 +03001801 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001802 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1803 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
Avi Kivity612e89f2012-06-12 20:03:23 +03001804 stack_mask(ctxt));
1805 return X86EMUL_CONTINUE;
1806}
1807
Avi Kivityf47cfa32012-06-07 17:49:24 +03001808static int em_leave(struct x86_emulate_ctxt *ctxt)
1809{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001810 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
Avi Kivityf47cfa32012-06-07 17:49:24 +03001811 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001812 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
Avi Kivityf47cfa32012-06-07 17:49:24 +03001813}
1814
Avi Kivity1cd196e2011-09-13 10:45:51 +03001815static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001816{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001817 int seg = ctxt->src2.val;
1818
Avi Kivity9dac77f2011-06-01 15:34:25 +03001819 ctxt->src.val = get_segment_selector(ctxt, seg);
Nadav Amit0fcc2072014-11-02 11:54:51 +02001820 if (ctxt->op_bytes == 4) {
1821 rsp_increment(ctxt, -2);
1822 ctxt->op_bytes = 2;
1823 }
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001824
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001825 return em_push(ctxt);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001826}
1827
Avi Kivity1cd196e2011-09-13 10:45:51 +03001828static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001829{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001830 int seg = ctxt->src2.val;
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001831 unsigned long selector;
1832 int rc;
1833
Avi Kivity9dac77f2011-06-01 15:34:25 +03001834 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001835 if (rc != X86EMUL_CONTINUE)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001836 return rc;
1837
Paolo Bonzinia5457e72014-06-05 17:29:34 +02001838 if (ctxt->modrm_reg == VCPU_SREG_SS)
1839 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1840
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001841 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001842 return rc;
1843}
1844
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001845static int em_pusha(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001846{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001847 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001848 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001849 int reg = VCPU_REGS_RAX;
1850
1851 while (reg <= VCPU_REGS_RDI) {
1852 (reg == VCPU_REGS_RSP) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001853 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001854
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001855 rc = em_push(ctxt);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001856 if (rc != X86EMUL_CONTINUE)
1857 return rc;
1858
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001859 ++reg;
1860 }
Wei Yongjunc37eda12010-06-15 09:03:33 +08001861
Wei Yongjunc37eda12010-06-15 09:03:33 +08001862 return rc;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001863}
1864
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001865static int em_pushf(struct x86_emulate_ctxt *ctxt)
1866{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001867 ctxt->src.val = (unsigned long)ctxt->eflags;
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001868 return em_push(ctxt);
1869}
1870
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001871static int em_popa(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001872{
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001873 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001874 int reg = VCPU_REGS_RDI;
1875
1876 while (reg >= VCPU_REGS_RAX) {
1877 if (reg == VCPU_REGS_RSP) {
Avi Kivity5ad105e2012-08-19 14:34:31 +03001878 rsp_increment(ctxt, ctxt->op_bytes);
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001879 --reg;
1880 }
1881
Avi Kivitydd856ef2012-08-27 23:46:17 +03001882 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001883 if (rc != X86EMUL_CONTINUE)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001884 break;
1885 --reg;
1886 }
1887 return rc;
1888}
1889
Avi Kivitydd856ef2012-08-27 23:46:17 +03001890static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001891{
Mathias Krause0225fb52012-08-30 01:30:16 +02001892 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001893 int rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001894 struct desc_ptr dt;
1895 gva_t cs_addr;
1896 gva_t eip_addr;
1897 u16 cs, eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001898
1899 /* TODO: Add limit checks */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001900 ctxt->src.val = ctxt->eflags;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001901 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001902 if (rc != X86EMUL_CONTINUE)
1903 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001904
1905 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1906
Avi Kivity9dac77f2011-06-01 15:34:25 +03001907 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001908 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001909 if (rc != X86EMUL_CONTINUE)
1910 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001911
Avi Kivity9dac77f2011-06-01 15:34:25 +03001912 ctxt->src.val = ctxt->_eip;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001913 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001914 if (rc != X86EMUL_CONTINUE)
1915 return rc;
1916
Avi Kivity4bff1e862011-04-20 13:37:53 +03001917 ops->get_idt(ctxt, &dt);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001918
1919 eip_addr = dt.address + (irq << 2);
1920 cs_addr = dt.address + (irq << 2) + 2;
1921
Avi Kivity0f65dd72011-04-20 13:37:53 +03001922 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001923 if (rc != X86EMUL_CONTINUE)
1924 return rc;
1925
Avi Kivity0f65dd72011-04-20 13:37:53 +03001926 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001927 if (rc != X86EMUL_CONTINUE)
1928 return rc;
1929
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001930 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001931 if (rc != X86EMUL_CONTINUE)
1932 return rc;
1933
Avi Kivity9dac77f2011-06-01 15:34:25 +03001934 ctxt->_eip = eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001935
1936 return rc;
1937}
1938
Avi Kivitydd856ef2012-08-27 23:46:17 +03001939int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1940{
1941 int rc;
1942
1943 invalidate_registers(ctxt);
1944 rc = __emulate_int_real(ctxt, irq);
1945 if (rc == X86EMUL_CONTINUE)
1946 writeback_registers(ctxt);
1947 return rc;
1948}
1949
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001950static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001951{
1952 switch(ctxt->mode) {
1953 case X86EMUL_MODE_REAL:
Avi Kivitydd856ef2012-08-27 23:46:17 +03001954 return __emulate_int_real(ctxt, irq);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001955 case X86EMUL_MODE_VM86:
1956 case X86EMUL_MODE_PROT16:
1957 case X86EMUL_MODE_PROT32:
1958 case X86EMUL_MODE_PROT64:
1959 default:
1960 /* Protected mode interrupts unimplemented yet */
1961 return X86EMUL_UNHANDLEABLE;
1962 }
1963}
1964
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001965static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001966{
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001967 int rc = X86EMUL_CONTINUE;
1968 unsigned long temp_eip = 0;
1969 unsigned long temp_eflags = 0;
1970 unsigned long cs = 0;
1971 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1972 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1973 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1974 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1975
1976 /* TODO: Add stack limit check */
1977
Avi Kivity9dac77f2011-06-01 15:34:25 +03001978 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001979
1980 if (rc != X86EMUL_CONTINUE)
1981 return rc;
1982
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001983 if (temp_eip & ~0xffff)
1984 return emulate_gp(ctxt, 0);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001985
Avi Kivity9dac77f2011-06-01 15:34:25 +03001986 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001987
1988 if (rc != X86EMUL_CONTINUE)
1989 return rc;
1990
Avi Kivity9dac77f2011-06-01 15:34:25 +03001991 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001992
1993 if (rc != X86EMUL_CONTINUE)
1994 return rc;
1995
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001996 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001997
1998 if (rc != X86EMUL_CONTINUE)
1999 return rc;
2000
Avi Kivity9dac77f2011-06-01 15:34:25 +03002001 ctxt->_eip = temp_eip;
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002002
2003
Avi Kivity9dac77f2011-06-01 15:34:25 +03002004 if (ctxt->op_bytes == 4)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002005 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
Avi Kivity9dac77f2011-06-01 15:34:25 +03002006 else if (ctxt->op_bytes == 2) {
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002007 ctxt->eflags &= ~0xffff;
2008 ctxt->eflags |= temp_eflags;
2009 }
2010
2011 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2012 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2013
2014 return rc;
2015}
2016
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002017static int em_iret(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002018{
2019 switch(ctxt->mode) {
2020 case X86EMUL_MODE_REAL:
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002021 return emulate_iret_real(ctxt);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002022 case X86EMUL_MODE_VM86:
2023 case X86EMUL_MODE_PROT16:
2024 case X86EMUL_MODE_PROT32:
2025 case X86EMUL_MODE_PROT64:
2026 default:
2027 /* iret from protected mode unimplemented yet */
2028 return X86EMUL_UNHANDLEABLE;
2029 }
2030}
2031
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002032static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2033{
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002034 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002035 unsigned short sel, old_sel;
2036 struct desc_struct old_desc, new_desc;
2037 const struct x86_emulate_ops *ops = ctxt->ops;
2038 u8 cpl = ctxt->ops->cpl(ctxt);
2039
2040 /* Assignment of RIP may only fail in 64-bit mode */
2041 if (ctxt->mode == X86EMUL_MODE_PROT64)
2042 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2043 VCPU_SREG_CS);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002044
Avi Kivity9dac77f2011-06-01 15:34:25 +03002045 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002046
Nadav Amitd1442d82014-09-18 22:39:39 +03002047 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2048 &new_desc);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002049 if (rc != X86EMUL_CONTINUE)
2050 return rc;
2051
Nadav Amitd1442d82014-09-18 22:39:39 +03002052 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2053 if (rc != X86EMUL_CONTINUE) {
Nadav Amitcd9b8e2c2014-10-28 00:03:43 +02002054 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
Nadav Amitd1442d82014-09-18 22:39:39 +03002055 /* assigning eip failed; restore the old cs */
2056 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2057 return rc;
2058 }
2059 return rc;
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002060}
2061
Nadav Amitf7784042014-09-18 22:39:41 +03002062static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002063{
Nadav Amitf7784042014-09-18 22:39:41 +03002064 return assign_eip_near(ctxt, ctxt->src.val);
2065}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002066
Nadav Amitf7784042014-09-18 22:39:41 +03002067static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2068{
2069 int rc;
2070 long int old_eip;
2071
2072 old_eip = ctxt->_eip;
2073 rc = assign_eip_near(ctxt, ctxt->src.val);
2074 if (rc != X86EMUL_CONTINUE)
2075 return rc;
2076 ctxt->src.val = old_eip;
2077 rc = em_push(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09002078 return rc;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002079}
2080
Takuya Yoshikawae0dac402011-12-06 18:07:27 +09002081static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002082{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002083 u64 old = ctxt->dst.orig_val64;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002084
Nadav Amitaaa05f22014-06-02 18:34:10 +03002085 if (ctxt->dst.bytes == 16)
2086 return X86EMUL_UNHANDLEABLE;
2087
Avi Kivitydd856ef2012-08-27 23:46:17 +03002088 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2089 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2090 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2091 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
Laurent Vivier05f086f2007-09-24 11:10:55 +02002092 ctxt->eflags &= ~EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002093 } else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03002094 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2095 (u32) reg_read(ctxt, VCPU_REGS_RBX);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002096
Laurent Vivier05f086f2007-09-24 11:10:55 +02002097 ctxt->eflags |= EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002098 }
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002099 return X86EMUL_CONTINUE;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002100}
2101
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002102static int em_ret(struct x86_emulate_ctxt *ctxt)
2103{
Nadav Amit234f3ce2014-09-18 22:39:38 +03002104 int rc;
2105 unsigned long eip;
2106
2107 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2108 if (rc != X86EMUL_CONTINUE)
2109 return rc;
2110
2111 return assign_eip_near(ctxt, eip);
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002112}
2113
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002114static int em_ret_far(struct x86_emulate_ctxt *ctxt)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002115{
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002116 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002117 unsigned long eip, cs;
2118 u16 old_cs;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002119 int cpl = ctxt->ops->cpl(ctxt);
Nadav Amitd1442d82014-09-18 22:39:39 +03002120 struct desc_struct old_desc, new_desc;
2121 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002122
Nadav Amitd1442d82014-09-18 22:39:39 +03002123 if (ctxt->mode == X86EMUL_MODE_PROT64)
2124 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2125 VCPU_SREG_CS);
2126
2127 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002128 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002129 return rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002130 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002131 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002132 return rc;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002133 /* Outer-privilege level return is not implemented */
2134 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2135 return X86EMUL_UNHANDLEABLE;
Nadav Amitd1442d82014-09-18 22:39:39 +03002136 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
2137 &new_desc);
2138 if (rc != X86EMUL_CONTINUE)
2139 return rc;
2140 rc = assign_eip_far(ctxt, eip, new_desc.l);
2141 if (rc != X86EMUL_CONTINUE) {
Nadav Amitcd9b8e2c2014-10-28 00:03:43 +02002142 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
Nadav Amitd1442d82014-09-18 22:39:39 +03002143 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2144 }
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002145 return rc;
2146}
2147
Bruce Rogers32611072013-09-09 09:40:20 -06002148static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2149{
2150 int rc;
2151
2152 rc = em_ret_far(ctxt);
2153 if (rc != X86EMUL_CONTINUE)
2154 return rc;
2155 rsp_increment(ctxt, ctxt->src.val);
2156 return X86EMUL_CONTINUE;
2157}
2158
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002159static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2160{
2161 /* Save real source value, then compare EAX against destination. */
Nadav Amit37c564f2014-06-02 18:34:07 +03002162 ctxt->dst.orig_val = ctxt->dst.val;
2163 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002164 ctxt->src.orig_val = ctxt->src.val;
Nadav Amit37c564f2014-06-02 18:34:07 +03002165 ctxt->src.val = ctxt->dst.orig_val;
Avi Kivity158de572013-01-19 19:51:57 +02002166 fastop(ctxt, em_cmp);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002167
2168 if (ctxt->eflags & EFLG_ZF) {
2169 /* Success: write back to memory. */
2170 ctxt->dst.val = ctxt->src.orig_val;
2171 } else {
2172 /* Failure: write the value we saw to EAX. */
2173 ctxt->dst.type = OP_REG;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002174 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
Nadav Amit37c564f2014-06-02 18:34:07 +03002175 ctxt->dst.val = ctxt->dst.orig_val;
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002176 }
2177 return X86EMUL_CONTINUE;
2178}
2179
Avi Kivityd4b43252011-09-13 10:45:50 +03002180static int em_lseg(struct x86_emulate_ctxt *ctxt)
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002181{
Avi Kivityd4b43252011-09-13 10:45:50 +03002182 int seg = ctxt->src2.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002183 unsigned short sel;
2184 int rc;
2185
Avi Kivity9dac77f2011-06-01 15:34:25 +03002186 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002187
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002188 rc = load_segment_descriptor(ctxt, sel, seg);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002189 if (rc != X86EMUL_CONTINUE)
2190 return rc;
2191
Avi Kivity9dac77f2011-06-01 15:34:25 +03002192 ctxt->dst.val = ctxt->src.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002193 return rc;
2194}
2195
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002196static void
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002197setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002198 struct desc_struct *cs, struct desc_struct *ss)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002199{
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002200 cs->l = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002201 set_desc_base(cs, 0); /* flat segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002202 cs->g = 1; /* 4kb granularity */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002203 set_desc_limit(cs, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002204 cs->type = 0x0b; /* Read, Execute, Accessed */
2205 cs->s = 1;
2206 cs->dpl = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002207 cs->p = 1;
2208 cs->d = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002209 cs->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002210
Gleb Natapov79168fd2010-04-28 19:15:30 +03002211 set_desc_base(ss, 0); /* flat segment */
2212 set_desc_limit(ss, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002213 ss->g = 1; /* 4kb granularity */
2214 ss->s = 1;
2215 ss->type = 0x03; /* Read/Write, Accessed */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002216 ss->d = 1; /* 32bit stack segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002217 ss->dpl = 0;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002218 ss->p = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002219 ss->l = 0;
2220 ss->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002221}
2222
Avi Kivity1a18a692012-02-01 12:23:21 +02002223static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2224{
2225 u32 eax, ebx, ecx, edx;
2226
2227 eax = ecx = 0;
Avi Kivity0017f932012-06-07 14:10:16 +03002228 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2229 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
Avi Kivity1a18a692012-02-01 12:23:21 +02002230 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2231 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2232}
2233
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002234static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2235{
Mathias Krause0225fb52012-08-30 01:30:16 +02002236 const struct x86_emulate_ops *ops = ctxt->ops;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002237 u32 eax, ebx, ecx, edx;
2238
2239 /*
2240 * syscall should always be enabled in longmode - so only become
2241 * vendor specific (cpuid) if other modes are active...
2242 */
2243 if (ctxt->mode == X86EMUL_MODE_PROT64)
2244 return true;
2245
2246 eax = 0x00000000;
2247 ecx = 0x00000000;
Avi Kivity0017f932012-06-07 14:10:16 +03002248 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2249 /*
2250 * Intel ("GenuineIntel")
2251 * remark: Intel CPUs only support "syscall" in 64bit
2252 * longmode. Also an 64bit guest with a
2253 * 32bit compat-app running will #UD !! While this
2254 * behaviour can be fixed (by emulating) into AMD
2255 * response - CPUs of AMD can't behave like Intel.
2256 */
2257 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2258 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2259 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2260 return false;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002261
Avi Kivity0017f932012-06-07 14:10:16 +03002262 /* AMD ("AuthenticAMD") */
2263 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2264 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2265 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2266 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002267
Avi Kivity0017f932012-06-07 14:10:16 +03002268 /* AMD ("AMDisbetter!") */
2269 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2270 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2271 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2272 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002273
2274 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2275 return false;
2276}
2277
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002278static int em_syscall(struct x86_emulate_ctxt *ctxt)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002279{
Mathias Krause0225fb52012-08-30 01:30:16 +02002280 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002281 struct desc_struct cs, ss;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002282 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002283 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002284 u64 efer = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002285
2286 /* syscall is not available in real mode */
Gleb Natapov2e901c42010-03-18 15:20:12 +02002287 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002288 ctxt->mode == X86EMUL_MODE_VM86)
2289 return emulate_ud(ctxt);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002290
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002291 if (!(em_syscall_is_enabled(ctxt)))
2292 return emulate_ud(ctxt);
2293
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002294 ops->get_msr(ctxt, MSR_EFER, &efer);
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002295 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002296
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002297 if (!(efer & EFER_SCE))
2298 return emulate_ud(ctxt);
2299
Avi Kivity717746e2011-04-20 13:37:53 +03002300 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002301 msr_data >>= 32;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002302 cs_sel = (u16)(msr_data & 0xfffc);
2303 ss_sel = (u16)(msr_data + 8);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002304
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002305 if (efer & EFER_LMA) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002306 cs.d = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002307 cs.l = 1;
2308 }
Avi Kivity1aa36612011-04-27 13:20:30 +03002309 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2310 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002311
Avi Kivitydd856ef2012-08-27 23:46:17 +03002312 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002313 if (efer & EFER_LMA) {
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002314#ifdef CONFIG_X86_64
Nadav Amit6c6cb692014-07-21 14:37:30 +03002315 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002316
Avi Kivity717746e2011-04-20 13:37:53 +03002317 ops->get_msr(ctxt,
Gleb Natapov3fb1b5d2010-04-28 19:15:28 +03002318 ctxt->mode == X86EMUL_MODE_PROT64 ?
2319 MSR_LSTAR : MSR_CSTAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002320 ctxt->_eip = msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002321
Avi Kivity717746e2011-04-20 13:37:53 +03002322 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
Nadav Amit6c6cb692014-07-21 14:37:30 +03002323 ctxt->eflags &= ~msr_data;
Nadav Amit807c1422014-11-02 11:54:49 +02002324 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002325#endif
2326 } else {
2327 /* legacy mode */
Avi Kivity717746e2011-04-20 13:37:53 +03002328 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002329 ctxt->_eip = (u32)msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002330
Nadav Amit6c6cb692014-07-21 14:37:30 +03002331 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002332 }
2333
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002334 return X86EMUL_CONTINUE;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002335}
2336
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002337static int em_sysenter(struct x86_emulate_ctxt *ctxt)
Andre Przywara8c604352009-06-18 12:56:01 +02002338{
Mathias Krause0225fb52012-08-30 01:30:16 +02002339 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002340 struct desc_struct cs, ss;
Andre Przywara8c604352009-06-18 12:56:01 +02002341 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002342 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002343 u64 efer = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002344
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002345 ops->get_msr(ctxt, MSR_EFER, &efer);
Gleb Natapova0044752010-02-10 14:21:31 +02002346 /* inject #GP if in real mode */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002347 if (ctxt->mode == X86EMUL_MODE_REAL)
2348 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002349
Avi Kivity1a18a692012-02-01 12:23:21 +02002350 /*
2351 * Not recognized on AMD in compat mode (but is recognized in legacy
2352 * mode).
2353 */
2354 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2355 && !vendor_intel(ctxt))
2356 return emulate_ud(ctxt);
2357
Andre Przywara8c604352009-06-18 12:56:01 +02002358 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2359 * Therefore, we inject an #UD.
2360 */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002361 if (ctxt->mode == X86EMUL_MODE_PROT64)
2362 return emulate_ud(ctxt);
Andre Przywara8c604352009-06-18 12:56:01 +02002363
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002364 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara8c604352009-06-18 12:56:01 +02002365
Avi Kivity717746e2011-04-20 13:37:53 +03002366 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara8c604352009-06-18 12:56:01 +02002367 switch (ctxt->mode) {
2368 case X86EMUL_MODE_PROT32:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002369 if ((msr_data & 0xfffc) == 0x0)
2370 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002371 break;
2372 case X86EMUL_MODE_PROT64:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002373 if (msr_data == 0x0)
2374 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002375 break;
Gleb Natapov9d1b39a2012-09-03 15:24:27 +03002376 default:
2377 break;
Andre Przywara8c604352009-06-18 12:56:01 +02002378 }
2379
Nadav Amit6c6cb692014-07-21 14:37:30 +03002380 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002381 cs_sel = (u16)msr_data;
2382 cs_sel &= ~SELECTOR_RPL_MASK;
2383 ss_sel = cs_sel + 8;
2384 ss_sel &= ~SELECTOR_RPL_MASK;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002385 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002386 cs.d = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002387 cs.l = 1;
2388 }
2389
Avi Kivity1aa36612011-04-27 13:20:30 +03002390 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2391 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara8c604352009-06-18 12:56:01 +02002392
Avi Kivity717746e2011-04-20 13:37:53 +03002393 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002394 ctxt->_eip = msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002395
Avi Kivity717746e2011-04-20 13:37:53 +03002396 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
Avi Kivitydd856ef2012-08-27 23:46:17 +03002397 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002398
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002399 return X86EMUL_CONTINUE;
Andre Przywara8c604352009-06-18 12:56:01 +02002400}
2401
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002402static int em_sysexit(struct x86_emulate_ctxt *ctxt)
Andre Przywara4668f052009-06-18 12:56:02 +02002403{
Mathias Krause0225fb52012-08-30 01:30:16 +02002404 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002405 struct desc_struct cs, ss;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002406 u64 msr_data, rcx, rdx;
Andre Przywara4668f052009-06-18 12:56:02 +02002407 int usermode;
Xiao Guangrong1249b962011-05-15 23:25:10 +08002408 u16 cs_sel = 0, ss_sel = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002409
Gleb Natapova0044752010-02-10 14:21:31 +02002410 /* inject #GP if in real mode or Virtual 8086 mode */
2411 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002412 ctxt->mode == X86EMUL_MODE_VM86)
2413 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002414
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002415 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara4668f052009-06-18 12:56:02 +02002416
Avi Kivity9dac77f2011-06-01 15:34:25 +03002417 if ((ctxt->rex_prefix & 0x8) != 0x0)
Andre Przywara4668f052009-06-18 12:56:02 +02002418 usermode = X86EMUL_MODE_PROT64;
2419 else
2420 usermode = X86EMUL_MODE_PROT32;
2421
Nadav Amit234f3ce2014-09-18 22:39:38 +03002422 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2423 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2424
Andre Przywara4668f052009-06-18 12:56:02 +02002425 cs.dpl = 3;
2426 ss.dpl = 3;
Avi Kivity717746e2011-04-20 13:37:53 +03002427 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara4668f052009-06-18 12:56:02 +02002428 switch (usermode) {
2429 case X86EMUL_MODE_PROT32:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002430 cs_sel = (u16)(msr_data + 16);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002431 if ((msr_data & 0xfffc) == 0x0)
2432 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002433 ss_sel = (u16)(msr_data + 24);
Nadav Amitbf0b6822014-09-18 22:39:45 +03002434 rcx = (u32)rcx;
2435 rdx = (u32)rdx;
Andre Przywara4668f052009-06-18 12:56:02 +02002436 break;
2437 case X86EMUL_MODE_PROT64:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002438 cs_sel = (u16)(msr_data + 32);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002439 if (msr_data == 0x0)
2440 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002441 ss_sel = cs_sel + 8;
2442 cs.d = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002443 cs.l = 1;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002444 if (is_noncanonical_address(rcx) ||
2445 is_noncanonical_address(rdx))
2446 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002447 break;
2448 }
Gleb Natapov79168fd2010-04-28 19:15:30 +03002449 cs_sel |= SELECTOR_RPL_MASK;
2450 ss_sel |= SELECTOR_RPL_MASK;
Andre Przywara4668f052009-06-18 12:56:02 +02002451
Avi Kivity1aa36612011-04-27 13:20:30 +03002452 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2453 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara4668f052009-06-18 12:56:02 +02002454
Nadav Amit234f3ce2014-09-18 22:39:38 +03002455 ctxt->_eip = rdx;
2456 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
Andre Przywara4668f052009-06-18 12:56:02 +02002457
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002458 return X86EMUL_CONTINUE;
Andre Przywara4668f052009-06-18 12:56:02 +02002459}
2460
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002461static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002462{
2463 int iopl;
2464 if (ctxt->mode == X86EMUL_MODE_REAL)
2465 return false;
2466 if (ctxt->mode == X86EMUL_MODE_VM86)
2467 return true;
2468 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002469 return ctxt->ops->cpl(ctxt) > iopl;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002470}
2471
2472static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002473 u16 port, u16 len)
2474{
Mathias Krause0225fb52012-08-30 01:30:16 +02002475 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002476 struct desc_struct tr_seg;
Gleb Natapov5601d052011-03-07 14:55:06 +02002477 u32 base3;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002478 int r;
Avi Kivity1aa36612011-04-27 13:20:30 +03002479 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002480 unsigned mask = (1 << len) - 1;
Gleb Natapov5601d052011-03-07 14:55:06 +02002481 unsigned long base;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002482
Avi Kivity1aa36612011-04-27 13:20:30 +03002483 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002484 if (!tr_seg.p)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002485 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002486 if (desc_limit_scaled(&tr_seg) < 103)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002487 return false;
Gleb Natapov5601d052011-03-07 14:55:06 +02002488 base = get_desc_base(&tr_seg);
2489#ifdef CONFIG_X86_64
2490 base |= ((u64)base3) << 32;
2491#endif
Avi Kivity0f65dd72011-04-20 13:37:53 +03002492 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002493 if (r != X86EMUL_CONTINUE)
2494 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002495 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002496 return false;
Avi Kivity0f65dd72011-04-20 13:37:53 +03002497 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002498 if (r != X86EMUL_CONTINUE)
2499 return false;
2500 if ((perm >> bit_idx) & mask)
2501 return false;
2502 return true;
2503}
2504
2505static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002506 u16 port, u16 len)
2507{
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002508 if (ctxt->perm_ok)
2509 return true;
2510
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002511 if (emulator_bad_iopl(ctxt))
2512 if (!emulator_io_port_access_allowed(ctxt, port, len))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002513 return false;
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002514
2515 ctxt->perm_ok = true;
2516
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002517 return true;
2518}
2519
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002520static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002521 struct tss_segment_16 *tss)
2522{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002523 tss->ip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002524 tss->flag = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002525 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2526 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2527 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2528 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2529 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2530 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2531 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2532 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002533
Avi Kivity1aa36612011-04-27 13:20:30 +03002534 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2535 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2536 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2537 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2538 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002539}
2540
2541static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002542 struct tss_segment_16 *tss)
2543{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002544 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002545 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002546
Avi Kivity9dac77f2011-06-01 15:34:25 +03002547 ctxt->_eip = tss->ip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002548 ctxt->eflags = tss->flag | 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002549 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2550 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2551 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2552 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2553 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2554 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2555 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2556 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002557
2558 /*
2559 * SDM says that segment selectors are loaded before segment
2560 * descriptors
2561 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002562 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2563 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2564 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2565 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2566 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002567
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002568 cpl = tss->cs & 3;
2569
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002570 /*
Guo Chaofc058682012-06-28 15:19:51 +08002571 * Now load segment descriptors. If fault happens at this stage
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002572 * it is handled in a context of new task
2573 */
Nadav Amitd1442d82014-09-18 22:39:39 +03002574 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2575 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002576 if (ret != X86EMUL_CONTINUE)
2577 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002578 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2579 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002580 if (ret != X86EMUL_CONTINUE)
2581 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002582 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2583 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002584 if (ret != X86EMUL_CONTINUE)
2585 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002586 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2587 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002588 if (ret != X86EMUL_CONTINUE)
2589 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002590 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2591 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002592 if (ret != X86EMUL_CONTINUE)
2593 return ret;
2594
2595 return X86EMUL_CONTINUE;
2596}
2597
2598static int task_switch_16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002599 u16 tss_selector, u16 old_tss_sel,
2600 ulong old_tss_base, struct desc_struct *new_desc)
2601{
Mathias Krause0225fb52012-08-30 01:30:16 +02002602 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002603 struct tss_segment_16 tss_seg;
2604 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002605 u32 new_tss_base = get_desc_base(new_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002606
Avi Kivity0f65dd72011-04-20 13:37:53 +03002607 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002608 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002609 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002610 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002611 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002612
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002613 save_state_to_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002614
Avi Kivity0f65dd72011-04-20 13:37:53 +03002615 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002616 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002617 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002618 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002619 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002620
Avi Kivity0f65dd72011-04-20 13:37:53 +03002621 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002622 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002623 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002624 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002625 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002626
2627 if (old_tss_sel != 0xffff) {
2628 tss_seg.prev_task_link = old_tss_sel;
2629
Avi Kivity0f65dd72011-04-20 13:37:53 +03002630 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002631 &tss_seg.prev_task_link,
2632 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002633 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002634 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002635 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002636 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002637 }
2638
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002639 return load_state_from_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002640}
2641
2642static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002643 struct tss_segment_32 *tss)
2644{
Nadav Amit5c7411e2014-04-07 18:37:47 +03002645 /* CR3 and ldt selector are not saved intentionally */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002646 tss->eip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002647 tss->eflags = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002648 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2649 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2650 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2651 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2652 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2653 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2654 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2655 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002656
Avi Kivity1aa36612011-04-27 13:20:30 +03002657 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2658 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2659 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2660 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2661 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2662 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002663}
2664
2665static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002666 struct tss_segment_32 *tss)
2667{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002668 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002669 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002670
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002671 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002672 return emulate_gp(ctxt, 0);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002673 ctxt->_eip = tss->eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002674 ctxt->eflags = tss->eflags | 2;
Kevin Wolf4cee4792012-02-08 14:34:41 +01002675
2676 /* General purpose registers */
Avi Kivitydd856ef2012-08-27 23:46:17 +03002677 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2678 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2679 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2680 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2681 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2682 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2683 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2684 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002685
2686 /*
2687 * SDM says that segment selectors are loaded before segment
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002688 * descriptors. This is important because CPL checks will
2689 * use CS.RPL.
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002690 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002691 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2692 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2693 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2694 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2695 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2696 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2697 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002698
2699 /*
Kevin Wolf4cee4792012-02-08 14:34:41 +01002700 * If we're switching between Protected Mode and VM86, we need to make
2701 * sure to update the mode before loading the segment descriptors so
2702 * that the selectors are interpreted correctly.
Kevin Wolf4cee4792012-02-08 14:34:41 +01002703 */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002704 if (ctxt->eflags & X86_EFLAGS_VM) {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002705 ctxt->mode = X86EMUL_MODE_VM86;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002706 cpl = 3;
2707 } else {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002708 ctxt->mode = X86EMUL_MODE_PROT32;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002709 cpl = tss->cs & 3;
2710 }
Kevin Wolf4cee4792012-02-08 14:34:41 +01002711
2712 /*
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002713 * Now load segment descriptors. If fault happenes at this stage
2714 * it is handled in a context of new task
2715 */
Nadav Amitd1442d82014-09-18 22:39:39 +03002716 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2717 cpl, true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002718 if (ret != X86EMUL_CONTINUE)
2719 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002720 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2721 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002722 if (ret != X86EMUL_CONTINUE)
2723 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002724 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2725 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002726 if (ret != X86EMUL_CONTINUE)
2727 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002728 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2729 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002730 if (ret != X86EMUL_CONTINUE)
2731 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002732 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2733 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002734 if (ret != X86EMUL_CONTINUE)
2735 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002736 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2737 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002738 if (ret != X86EMUL_CONTINUE)
2739 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002740 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2741 true, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002742 if (ret != X86EMUL_CONTINUE)
2743 return ret;
2744
2745 return X86EMUL_CONTINUE;
2746}
2747
2748static int task_switch_32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002749 u16 tss_selector, u16 old_tss_sel,
2750 ulong old_tss_base, struct desc_struct *new_desc)
2751{
Mathias Krause0225fb52012-08-30 01:30:16 +02002752 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002753 struct tss_segment_32 tss_seg;
2754 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002755 u32 new_tss_base = get_desc_base(new_desc);
Nadav Amit5c7411e2014-04-07 18:37:47 +03002756 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2757 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002758
Avi Kivity0f65dd72011-04-20 13:37:53 +03002759 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002760 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002761 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002762 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002763 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002764
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002765 save_state_to_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002766
Nadav Amit5c7411e2014-04-07 18:37:47 +03002767 /* Only GP registers and segment selectors are saved */
2768 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2769 ldt_sel_offset - eip_offset, &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002770 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002771 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002772 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002773
Avi Kivity0f65dd72011-04-20 13:37:53 +03002774 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002775 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002776 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002777 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002778 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002779
2780 if (old_tss_sel != 0xffff) {
2781 tss_seg.prev_task_link = old_tss_sel;
2782
Avi Kivity0f65dd72011-04-20 13:37:53 +03002783 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002784 &tss_seg.prev_task_link,
2785 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002786 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002787 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002788 /* FIXME: need to provide precise fault address */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002789 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002790 }
2791
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002792 return load_state_from_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002793}
2794
2795static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002796 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002797 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002798{
Mathias Krause0225fb52012-08-30 01:30:16 +02002799 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002800 struct desc_struct curr_tss_desc, next_tss_desc;
2801 int ret;
Avi Kivity1aa36612011-04-27 13:20:30 +03002802 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002803 ulong old_tss_base =
Avi Kivity4bff1e862011-04-20 13:37:53 +03002804 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
Gleb Natapovceffb452010-03-18 15:20:19 +02002805 u32 desc_limit;
Avi Kivitye9194642012-06-13 16:29:39 +03002806 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002807
2808 /* FIXME: old_tss_base == ~0 ? */
2809
Avi Kivitye9194642012-06-13 16:29:39 +03002810 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002811 if (ret != X86EMUL_CONTINUE)
2812 return ret;
Avi Kivitye9194642012-06-13 16:29:39 +03002813 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002814 if (ret != X86EMUL_CONTINUE)
2815 return ret;
2816
2817 /* FIXME: check that next_tss_desc is tss */
2818
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002819 /*
2820 * Check privileges. The three cases are task switch caused by...
2821 *
2822 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2823 * 2. Exception/IRQ/iret: No check is performed
Guo Chaofc058682012-06-28 15:19:51 +08002824 * 3. jmp/call to TSS: Check against DPL of the TSS
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002825 */
2826 if (reason == TASK_SWITCH_GATE) {
2827 if (idt_index != -1) {
2828 /* Software interrupts */
2829 struct desc_struct task_gate_desc;
2830 int dpl;
2831
2832 ret = read_interrupt_descriptor(ctxt, idt_index,
2833 &task_gate_desc);
2834 if (ret != X86EMUL_CONTINUE)
2835 return ret;
2836
2837 dpl = task_gate_desc.dpl;
2838 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2839 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2840 }
2841 } else if (reason != TASK_SWITCH_IRET) {
2842 int dpl = next_tss_desc.dpl;
2843 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2844 return emulate_gp(ctxt, tss_selector);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002845 }
2846
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002847
Gleb Natapovceffb452010-03-18 15:20:19 +02002848 desc_limit = desc_limit_scaled(&next_tss_desc);
2849 if (!next_tss_desc.p ||
2850 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2851 desc_limit < 0x2b)) {
Paolo Bonzini592f0852014-08-20 10:05:08 +02002852 return emulate_ts(ctxt, tss_selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002853 }
2854
2855 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2856 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002857 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002858 }
2859
2860 if (reason == TASK_SWITCH_IRET)
2861 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2862
2863 /* set back link to prev task only if NT bit is set in eflags
Guo Chaofc058682012-06-28 15:19:51 +08002864 note that old_tss_sel is not used after this point */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002865 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2866 old_tss_sel = 0xffff;
2867
2868 if (next_tss_desc.type & 8)
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002869 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002870 old_tss_base, &next_tss_desc);
2871 else
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002872 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002873 old_tss_base, &next_tss_desc);
Jan Kiszka0760d442010-04-14 15:50:57 +02002874 if (ret != X86EMUL_CONTINUE)
2875 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002876
2877 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2878 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2879
2880 if (reason != TASK_SWITCH_IRET) {
2881 next_tss_desc.type |= (1 << 1); /* set busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002882 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002883 }
2884
Avi Kivity717746e2011-04-20 13:37:53 +03002885 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
Avi Kivity1aa36612011-04-27 13:20:30 +03002886 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002887
Jan Kiszkae269fb22010-04-14 15:51:09 +02002888 if (has_error_code) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002889 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2890 ctxt->lock_prefix = 0;
2891 ctxt->src.val = (unsigned long) error_code;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09002892 ret = em_push(ctxt);
Jan Kiszkae269fb22010-04-14 15:51:09 +02002893 }
2894
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002895 return ret;
2896}
2897
2898int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002899 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002900 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002901{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002902 int rc;
2903
Avi Kivitydd856ef2012-08-27 23:46:17 +03002904 invalidate_registers(ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002905 ctxt->_eip = ctxt->eip;
2906 ctxt->dst.type = OP_NONE;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002907
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002908 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002909 has_error_code, error_code);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002910
Avi Kivitydd856ef2012-08-27 23:46:17 +03002911 if (rc == X86EMUL_CONTINUE) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002912 ctxt->eip = ctxt->_eip;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002913 writeback_registers(ctxt);
2914 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002915
Gleb Natapova0c0ab22011-03-28 16:57:49 +02002916 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002917}
2918
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03002919static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2920 struct operand *op)
Gleb Natapova682e352010-03-18 15:20:21 +02002921{
Gleb Natapovb3356bf2012-09-03 15:24:29 +03002922 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
Gleb Natapova682e352010-03-18 15:20:21 +02002923
Avi Kivitydd856ef2012-08-27 23:46:17 +03002924 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2925 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
Gleb Natapova682e352010-03-18 15:20:21 +02002926}
2927
Avi Kivity7af04fc2010-08-18 14:16:35 +03002928static int em_das(struct x86_emulate_ctxt *ctxt)
2929{
Avi Kivity7af04fc2010-08-18 14:16:35 +03002930 u8 al, old_al;
2931 bool af, cf, old_cf;
2932
2933 cf = ctxt->eflags & X86_EFLAGS_CF;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002934 al = ctxt->dst.val;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002935
2936 old_al = al;
2937 old_cf = cf;
2938 cf = false;
2939 af = ctxt->eflags & X86_EFLAGS_AF;
2940 if ((al & 0x0f) > 9 || af) {
2941 al -= 6;
2942 cf = old_cf | (al >= 250);
2943 af = true;
2944 } else {
2945 af = false;
2946 }
2947 if (old_al > 0x99 || old_cf) {
2948 al -= 0x60;
2949 cf = true;
2950 }
2951
Avi Kivity9dac77f2011-06-01 15:34:25 +03002952 ctxt->dst.val = al;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002953 /* Set PF, ZF, SF */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002954 ctxt->src.type = OP_IMM;
2955 ctxt->src.val = 0;
2956 ctxt->src.bytes = 1;
Avi Kivity158de572013-01-19 19:51:57 +02002957 fastop(ctxt, em_or);
Avi Kivity7af04fc2010-08-18 14:16:35 +03002958 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2959 if (cf)
2960 ctxt->eflags |= X86_EFLAGS_CF;
2961 if (af)
2962 ctxt->eflags |= X86_EFLAGS_AF;
2963 return X86EMUL_CONTINUE;
2964}
2965
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02002966static int em_aam(struct x86_emulate_ctxt *ctxt)
2967{
2968 u8 al, ah;
2969
2970 if (ctxt->src.val == 0)
2971 return emulate_de(ctxt);
2972
2973 al = ctxt->dst.val & 0xff;
2974 ah = al / ctxt->src.val;
2975 al %= ctxt->src.val;
2976
2977 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2978
2979 /* Set PF, ZF, SF */
2980 ctxt->src.type = OP_IMM;
2981 ctxt->src.val = 0;
2982 ctxt->src.bytes = 1;
2983 fastop(ctxt, em_or);
2984
2985 return X86EMUL_CONTINUE;
2986}
2987
Gleb Natapov7f662272012-12-10 11:42:30 +02002988static int em_aad(struct x86_emulate_ctxt *ctxt)
2989{
2990 u8 al = ctxt->dst.val & 0xff;
2991 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2992
2993 al = (al + (ah * ctxt->src.val)) & 0xff;
2994
2995 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2996
Gleb Natapovf583c292013-02-13 17:50:39 +02002997 /* Set PF, ZF, SF */
2998 ctxt->src.type = OP_IMM;
2999 ctxt->src.val = 0;
3000 ctxt->src.bytes = 1;
3001 fastop(ctxt, em_or);
Gleb Natapov7f662272012-12-10 11:42:30 +02003002
3003 return X86EMUL_CONTINUE;
3004}
3005
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003006static int em_call(struct x86_emulate_ctxt *ctxt)
3007{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003008 int rc;
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003009 long rel = ctxt->src.val;
3010
3011 ctxt->src.val = (unsigned long)ctxt->_eip;
Nadav Amit234f3ce2014-09-18 22:39:38 +03003012 rc = jmp_rel(ctxt, rel);
3013 if (rc != X86EMUL_CONTINUE)
3014 return rc;
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003015 return em_push(ctxt);
3016}
3017
Avi Kivity0ef753b2010-08-18 14:51:45 +03003018static int em_call_far(struct x86_emulate_ctxt *ctxt)
3019{
Avi Kivity0ef753b2010-08-18 14:51:45 +03003020 u16 sel, old_cs;
3021 ulong old_eip;
3022 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03003023 struct desc_struct old_desc, new_desc;
3024 const struct x86_emulate_ops *ops = ctxt->ops;
3025 int cpl = ctxt->ops->cpl(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003026
Avi Kivity9dac77f2011-06-01 15:34:25 +03003027 old_eip = ctxt->_eip;
Nadav Amitd1442d82014-09-18 22:39:39 +03003028 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003029
Avi Kivity9dac77f2011-06-01 15:34:25 +03003030 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Nadav Amitd1442d82014-09-18 22:39:39 +03003031 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3032 &new_desc);
3033 if (rc != X86EMUL_CONTINUE)
Avi Kivity0ef753b2010-08-18 14:51:45 +03003034 return X86EMUL_CONTINUE;
3035
Nadav Amitd1442d82014-09-18 22:39:39 +03003036 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
3037 if (rc != X86EMUL_CONTINUE)
3038 goto fail;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003039
Avi Kivity9dac77f2011-06-01 15:34:25 +03003040 ctxt->src.val = old_cs;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09003041 rc = em_push(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003042 if (rc != X86EMUL_CONTINUE)
Nadav Amitd1442d82014-09-18 22:39:39 +03003043 goto fail;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003044
Avi Kivity9dac77f2011-06-01 15:34:25 +03003045 ctxt->src.val = old_eip;
Nadav Amitd1442d82014-09-18 22:39:39 +03003046 rc = em_push(ctxt);
3047 /* If we failed, we tainted the memory, but the very least we should
3048 restore cs */
3049 if (rc != X86EMUL_CONTINUE)
3050 goto fail;
3051 return rc;
3052fail:
3053 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3054 return rc;
3055
Avi Kivity0ef753b2010-08-18 14:51:45 +03003056}
3057
Avi Kivity40ece7c2010-08-18 15:12:09 +03003058static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3059{
Avi Kivity40ece7c2010-08-18 15:12:09 +03003060 int rc;
Nadav Amit234f3ce2014-09-18 22:39:38 +03003061 unsigned long eip;
Avi Kivity40ece7c2010-08-18 15:12:09 +03003062
Nadav Amit234f3ce2014-09-18 22:39:38 +03003063 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3064 if (rc != X86EMUL_CONTINUE)
3065 return rc;
3066 rc = assign_eip_near(ctxt, eip);
Avi Kivity40ece7c2010-08-18 15:12:09 +03003067 if (rc != X86EMUL_CONTINUE)
3068 return rc;
Avi Kivity5ad105e2012-08-19 14:34:31 +03003069 rsp_increment(ctxt, ctxt->src.val);
Avi Kivity40ece7c2010-08-18 15:12:09 +03003070 return X86EMUL_CONTINUE;
3071}
3072
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003073static int em_xchg(struct x86_emulate_ctxt *ctxt)
3074{
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003075 /* Write back the register source. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003076 ctxt->src.val = ctxt->dst.val;
3077 write_register_operand(&ctxt->src);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003078
3079 /* Write back the memory destination with implicit LOCK prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003080 ctxt->dst.val = ctxt->src.orig_val;
3081 ctxt->lock_prefix = 1;
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003082 return X86EMUL_CONTINUE;
3083}
3084
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003085static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3086{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003087 ctxt->dst.val = ctxt->src2.val;
Avi Kivity4d758342013-01-19 19:51:55 +02003088 return fastop(ctxt, em_imul);
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003089}
3090
Avi Kivity61429142010-08-19 15:13:00 +03003091static int em_cwd(struct x86_emulate_ctxt *ctxt)
3092{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003093 ctxt->dst.type = OP_REG;
3094 ctxt->dst.bytes = ctxt->src.bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03003095 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivity9dac77f2011-06-01 15:34:25 +03003096 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
Avi Kivity61429142010-08-19 15:13:00 +03003097
3098 return X86EMUL_CONTINUE;
3099}
3100
Avi Kivity48bb5d32010-08-18 18:54:34 +03003101static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3102{
Avi Kivity48bb5d32010-08-18 18:54:34 +03003103 u64 tsc = 0;
3104
Avi Kivity717746e2011-04-20 13:37:53 +03003105 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003106 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3107 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
Avi Kivity48bb5d32010-08-18 18:54:34 +03003108 return X86EMUL_CONTINUE;
3109}
3110
Avi Kivity222d21a2011-11-10 14:57:30 +02003111static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3112{
3113 u64 pmc;
3114
Avi Kivitydd856ef2012-08-27 23:46:17 +03003115 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
Avi Kivity222d21a2011-11-10 14:57:30 +02003116 return emulate_gp(ctxt, 0);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003117 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3118 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
Avi Kivity222d21a2011-11-10 14:57:30 +02003119 return X86EMUL_CONTINUE;
3120}
3121
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003122static int em_mov(struct x86_emulate_ctxt *ctxt)
3123{
Paolo Bonzini54cfdb32014-03-27 11:36:25 +01003124 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003125 return X86EMUL_CONTINUE;
3126}
3127
Borislav Petkov84cffe42013-10-29 12:54:56 +01003128#define FFL(x) bit(X86_FEATURE_##x)
3129
3130static int em_movbe(struct x86_emulate_ctxt *ctxt)
3131{
3132 u32 ebx, ecx, edx, eax = 1;
3133 u16 tmp;
3134
3135 /*
3136 * Check MOVBE is set in the guest-visible CPUID leaf.
3137 */
3138 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3139 if (!(ecx & FFL(MOVBE)))
3140 return emulate_ud(ctxt);
3141
3142 switch (ctxt->op_bytes) {
3143 case 2:
3144 /*
3145 * From MOVBE definition: "...When the operand size is 16 bits,
3146 * the upper word of the destination register remains unchanged
3147 * ..."
3148 *
3149 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3150 * rules so we have to do the operation almost per hand.
3151 */
3152 tmp = (u16)ctxt->src.val;
3153 ctxt->dst.val &= ~0xffffUL;
3154 ctxt->dst.val |= (unsigned long)swab16(tmp);
3155 break;
3156 case 4:
3157 ctxt->dst.val = swab32((u32)ctxt->src.val);
3158 break;
3159 case 8:
3160 ctxt->dst.val = swab64(ctxt->src.val);
3161 break;
3162 default:
Paolo Bonzini592f0852014-08-20 10:05:08 +02003163 BUG();
Borislav Petkov84cffe42013-10-29 12:54:56 +01003164 }
3165 return X86EMUL_CONTINUE;
3166}
3167
Takuya Yoshikawabc00f8d2011-11-22 15:19:19 +09003168static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3169{
3170 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3171 return emulate_gp(ctxt, 0);
3172
3173 /* Disable writeback. */
3174 ctxt->dst.type = OP_NONE;
3175 return X86EMUL_CONTINUE;
3176}
3177
3178static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3179{
3180 unsigned long val;
3181
3182 if (ctxt->mode == X86EMUL_MODE_PROT64)
3183 val = ctxt->src.val & ~0ULL;
3184 else
3185 val = ctxt->src.val & ~0U;
3186
3187 /* #UD condition is already handled. */
3188 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3189 return emulate_gp(ctxt, 0);
3190
3191 /* Disable writeback. */
3192 ctxt->dst.type = OP_NONE;
3193 return X86EMUL_CONTINUE;
3194}
3195
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003196static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3197{
3198 u64 msr_data;
3199
Avi Kivitydd856ef2012-08-27 23:46:17 +03003200 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3201 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3202 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003203 return emulate_gp(ctxt, 0);
3204
3205 return X86EMUL_CONTINUE;
3206}
3207
3208static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3209{
3210 u64 msr_data;
3211
Avi Kivitydd856ef2012-08-27 23:46:17 +03003212 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003213 return emulate_gp(ctxt, 0);
3214
Avi Kivitydd856ef2012-08-27 23:46:17 +03003215 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3216 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003217 return X86EMUL_CONTINUE;
3218}
3219
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003220static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3221{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003222 if (ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003223 return emulate_ud(ctxt);
3224
Avi Kivity9dac77f2011-06-01 15:34:25 +03003225 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
Nadav Amitb5bbf102014-11-02 11:54:46 +02003226 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3227 ctxt->dst.bytes = 2;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003228 return X86EMUL_CONTINUE;
3229}
3230
3231static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3232{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003233 u16 sel = ctxt->src.val;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003234
Avi Kivity9dac77f2011-06-01 15:34:25 +03003235 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003236 return emulate_ud(ctxt);
3237
Avi Kivity9dac77f2011-06-01 15:34:25 +03003238 if (ctxt->modrm_reg == VCPU_SREG_SS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003239 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3240
3241 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003242 ctxt->dst.type = OP_NONE;
3243 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003244}
3245
Avi Kivitya14e5792012-06-13 12:28:33 +03003246static int em_lldt(struct x86_emulate_ctxt *ctxt)
3247{
3248 u16 sel = ctxt->src.val;
3249
3250 /* Disable writeback. */
3251 ctxt->dst.type = OP_NONE;
3252 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3253}
3254
Avi Kivity80890002012-06-13 16:33:29 +03003255static int em_ltr(struct x86_emulate_ctxt *ctxt)
3256{
3257 u16 sel = ctxt->src.val;
3258
3259 /* Disable writeback. */
3260 ctxt->dst.type = OP_NONE;
3261 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3262}
3263
Avi Kivity38503912011-03-31 18:48:09 +02003264static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3265{
Avi Kivity9fa088f2011-03-31 18:54:30 +02003266 int rc;
3267 ulong linear;
3268
Avi Kivity9dac77f2011-06-01 15:34:25 +03003269 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02003270 if (rc == X86EMUL_CONTINUE)
Avi Kivity3cb16fe2011-04-20 15:38:44 +03003271 ctxt->ops->invlpg(ctxt, linear);
Avi Kivity38503912011-03-31 18:48:09 +02003272 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003273 ctxt->dst.type = OP_NONE;
Avi Kivity38503912011-03-31 18:48:09 +02003274 return X86EMUL_CONTINUE;
3275}
3276
Avi Kivity2d04a052011-04-20 15:32:49 +03003277static int em_clts(struct x86_emulate_ctxt *ctxt)
3278{
3279 ulong cr0;
3280
3281 cr0 = ctxt->ops->get_cr(ctxt, 0);
3282 cr0 &= ~X86_CR0_TS;
3283 ctxt->ops->set_cr(ctxt, 0, cr0);
3284 return X86EMUL_CONTINUE;
3285}
3286
Avi Kivity26d05cc2011-04-21 12:07:59 +03003287static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3288{
Nadav Amit0f54a322014-08-29 11:26:55 +03003289 int rc = ctxt->ops->fix_hypercall(ctxt);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003290
Avi Kivity26d05cc2011-04-21 12:07:59 +03003291 if (rc != X86EMUL_CONTINUE)
3292 return rc;
3293
3294 /* Let the processor re-execute the fixed hypercall */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003295 ctxt->_eip = ctxt->eip;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003296 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003297 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003298 return X86EMUL_CONTINUE;
3299}
3300
Avi Kivity96051572012-06-10 17:21:18 +03003301static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3302 void (*get)(struct x86_emulate_ctxt *ctxt,
3303 struct desc_ptr *ptr))
3304{
3305 struct desc_ptr desc_ptr;
3306
3307 if (ctxt->mode == X86EMUL_MODE_PROT64)
3308 ctxt->op_bytes = 8;
3309 get(ctxt, &desc_ptr);
3310 if (ctxt->op_bytes == 2) {
3311 ctxt->op_bytes = 4;
3312 desc_ptr.address &= 0x00ffffff;
3313 }
3314 /* Disable writeback. */
3315 ctxt->dst.type = OP_NONE;
3316 return segmented_write(ctxt, ctxt->dst.addr.mem,
3317 &desc_ptr, 2 + ctxt->op_bytes);
3318}
3319
3320static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3321{
3322 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3323}
3324
3325static int em_sidt(struct x86_emulate_ctxt *ctxt)
3326{
3327 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3328}
3329
Avi Kivity26d05cc2011-04-21 12:07:59 +03003330static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3331{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003332 struct desc_ptr desc_ptr;
3333 int rc;
3334
Avi Kivity510425f2012-06-07 17:04:36 +03003335 if (ctxt->mode == X86EMUL_MODE_PROT64)
3336 ctxt->op_bytes = 8;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003337 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
Avi Kivity26d05cc2011-04-21 12:07:59 +03003338 &desc_ptr.size, &desc_ptr.address,
Avi Kivity9dac77f2011-06-01 15:34:25 +03003339 ctxt->op_bytes);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003340 if (rc != X86EMUL_CONTINUE)
3341 return rc;
3342 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3343 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003344 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003345 return X86EMUL_CONTINUE;
3346}
3347
Avi Kivity5ef39c72011-04-21 12:21:50 +03003348static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003349{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003350 int rc;
3351
Avi Kivity5ef39c72011-04-21 12:21:50 +03003352 rc = ctxt->ops->fix_hypercall(ctxt);
3353
Avi Kivity26d05cc2011-04-21 12:07:59 +03003354 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003355 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003356 return rc;
3357}
3358
3359static int em_lidt(struct x86_emulate_ctxt *ctxt)
3360{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003361 struct desc_ptr desc_ptr;
3362 int rc;
3363
Avi Kivity510425f2012-06-07 17:04:36 +03003364 if (ctxt->mode == X86EMUL_MODE_PROT64)
3365 ctxt->op_bytes = 8;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003366 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
Takuya Yoshikawa509cf9f2011-05-02 02:25:07 +09003367 &desc_ptr.size, &desc_ptr.address,
Avi Kivity9dac77f2011-06-01 15:34:25 +03003368 ctxt->op_bytes);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003369 if (rc != X86EMUL_CONTINUE)
3370 return rc;
3371 ctxt->ops->set_idt(ctxt, &desc_ptr);
3372 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003373 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003374 return X86EMUL_CONTINUE;
3375}
3376
3377static int em_smsw(struct x86_emulate_ctxt *ctxt)
3378{
Nadav Amit32e94d02014-06-02 18:34:11 +03003379 if (ctxt->dst.type == OP_MEM)
3380 ctxt->dst.bytes = 2;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003381 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003382 return X86EMUL_CONTINUE;
3383}
3384
3385static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3386{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003387 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
Avi Kivity9dac77f2011-06-01 15:34:25 +03003388 | (ctxt->src.val & 0x0f));
3389 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003390 return X86EMUL_CONTINUE;
3391}
3392
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003393static int em_loop(struct x86_emulate_ctxt *ctxt)
3394{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003395 int rc = X86EMUL_CONTINUE;
3396
Avi Kivitydd856ef2012-08-27 23:46:17 +03003397 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3398 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
Avi Kivity9dac77f2011-06-01 15:34:25 +03003399 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
Nadav Amit234f3ce2014-09-18 22:39:38 +03003400 rc = jmp_rel(ctxt, ctxt->src.val);
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003401
Nadav Amit234f3ce2014-09-18 22:39:38 +03003402 return rc;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003403}
3404
3405static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3406{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003407 int rc = X86EMUL_CONTINUE;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003408
Nadav Amit234f3ce2014-09-18 22:39:38 +03003409 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3410 rc = jmp_rel(ctxt, ctxt->src.val);
3411
3412 return rc;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003413}
3414
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003415static int em_in(struct x86_emulate_ctxt *ctxt)
3416{
3417 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3418 &ctxt->dst.val))
3419 return X86EMUL_IO_NEEDED;
3420
3421 return X86EMUL_CONTINUE;
3422}
3423
3424static int em_out(struct x86_emulate_ctxt *ctxt)
3425{
3426 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3427 &ctxt->src.val, 1);
3428 /* Disable writeback. */
3429 ctxt->dst.type = OP_NONE;
3430 return X86EMUL_CONTINUE;
3431}
3432
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09003433static int em_cli(struct x86_emulate_ctxt *ctxt)
3434{
3435 if (emulator_bad_iopl(ctxt))
3436 return emulate_gp(ctxt, 0);
3437
3438 ctxt->eflags &= ~X86_EFLAGS_IF;
3439 return X86EMUL_CONTINUE;
3440}
3441
3442static int em_sti(struct x86_emulate_ctxt *ctxt)
3443{
3444 if (emulator_bad_iopl(ctxt))
3445 return emulate_gp(ctxt, 0);
3446
3447 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3448 ctxt->eflags |= X86_EFLAGS_IF;
3449 return X86EMUL_CONTINUE;
3450}
3451
Avi Kivity6d6eede2012-06-07 14:11:36 +03003452static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3453{
3454 u32 eax, ebx, ecx, edx;
3455
Avi Kivitydd856ef2012-08-27 23:46:17 +03003456 eax = reg_read(ctxt, VCPU_REGS_RAX);
3457 ecx = reg_read(ctxt, VCPU_REGS_RCX);
Avi Kivity6d6eede2012-06-07 14:11:36 +03003458 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003459 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3460 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3461 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3462 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
Avi Kivity6d6eede2012-06-07 14:11:36 +03003463 return X86EMUL_CONTINUE;
3464}
3465
Paolo Bonzini98f73632013-10-31 11:19:42 +01003466static int em_sahf(struct x86_emulate_ctxt *ctxt)
3467{
3468 u32 flags;
3469
3470 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3471 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3472
3473 ctxt->eflags &= ~0xffUL;
3474 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3475 return X86EMUL_CONTINUE;
3476}
3477
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003478static int em_lahf(struct x86_emulate_ctxt *ctxt)
3479{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003480 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3481 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003482 return X86EMUL_CONTINUE;
3483}
3484
Avi Kivity92998362012-06-13 12:25:06 +03003485static int em_bswap(struct x86_emulate_ctxt *ctxt)
3486{
3487 switch (ctxt->op_bytes) {
3488#ifdef CONFIG_X86_64
3489 case 8:
3490 asm("bswap %0" : "+r"(ctxt->dst.val));
3491 break;
3492#endif
3493 default:
3494 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3495 break;
3496 }
3497 return X86EMUL_CONTINUE;
3498}
3499
Nadav Amit13e457e2014-10-13 13:04:13 +03003500static int em_clflush(struct x86_emulate_ctxt *ctxt)
3501{
3502 /* emulating clflush regardless of cpuid */
3503 return X86EMUL_CONTINUE;
3504}
3505
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003506static bool valid_cr(int nr)
3507{
3508 switch (nr) {
3509 case 0:
3510 case 2 ... 4:
3511 case 8:
3512 return true;
3513 default:
3514 return false;
3515 }
3516}
3517
3518static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3519{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003520 if (!valid_cr(ctxt->modrm_reg))
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003521 return emulate_ud(ctxt);
3522
3523 return X86EMUL_CONTINUE;
3524}
3525
3526static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3527{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003528 u64 new_val = ctxt->src.val64;
3529 int cr = ctxt->modrm_reg;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003530 u64 efer = 0;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003531
3532 static u64 cr_reserved_bits[] = {
3533 0xffffffff00000000ULL,
3534 0, 0, 0, /* CR3 checked later */
3535 CR4_RESERVED_BITS,
3536 0, 0, 0,
3537 CR8_RESERVED_BITS,
3538 };
3539
3540 if (!valid_cr(cr))
3541 return emulate_ud(ctxt);
3542
3543 if (new_val & cr_reserved_bits[cr])
3544 return emulate_gp(ctxt, 0);
3545
3546 switch (cr) {
3547 case 0: {
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003548 u64 cr4;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003549 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3550 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3551 return emulate_gp(ctxt, 0);
3552
Avi Kivity717746e2011-04-20 13:37:53 +03003553 cr4 = ctxt->ops->get_cr(ctxt, 4);
3554 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003555
3556 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3557 !(cr4 & X86_CR4_PAE))
3558 return emulate_gp(ctxt, 0);
3559
3560 break;
3561 }
3562 case 3: {
3563 u64 rsvd = 0;
3564
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003565 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3566 if (efer & EFER_LMA)
Nadav Amit9d88fca2014-11-02 11:54:52 +02003567 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003568
3569 if (new_val & rsvd)
3570 return emulate_gp(ctxt, 0);
3571
3572 break;
3573 }
3574 case 4: {
Avi Kivity717746e2011-04-20 13:37:53 +03003575 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003576
3577 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3578 return emulate_gp(ctxt, 0);
3579
3580 break;
3581 }
3582 }
3583
3584 return X86EMUL_CONTINUE;
3585}
3586
Joerg Roedel3b88e412011-04-04 12:39:29 +02003587static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3588{
3589 unsigned long dr7;
3590
Avi Kivity717746e2011-04-20 13:37:53 +03003591 ctxt->ops->get_dr(ctxt, 7, &dr7);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003592
3593 /* Check if DR7.Global_Enable is set */
3594 return dr7 & (1 << 13);
3595}
3596
3597static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3598{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003599 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003600 u64 cr4;
3601
3602 if (dr > 7)
3603 return emulate_ud(ctxt);
3604
Avi Kivity717746e2011-04-20 13:37:53 +03003605 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003606 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3607 return emulate_ud(ctxt);
3608
Nadav Amit6d2a0522014-11-02 11:54:43 +02003609 if (check_dr7_gd(ctxt)) {
3610 ulong dr6;
3611
3612 ctxt->ops->get_dr(ctxt, 6, &dr6);
3613 dr6 &= ~15;
3614 dr6 |= DR6_BD | DR6_RTM;
3615 ctxt->ops->set_dr(ctxt, 6, dr6);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003616 return emulate_db(ctxt);
Nadav Amit6d2a0522014-11-02 11:54:43 +02003617 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02003618
3619 return X86EMUL_CONTINUE;
3620}
3621
3622static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3623{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003624 u64 new_val = ctxt->src.val64;
3625 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003626
3627 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3628 return emulate_gp(ctxt, 0);
3629
3630 return check_dr_read(ctxt);
3631}
3632
Joerg Roedel01de8b02011-04-04 12:39:31 +02003633static int check_svme(struct x86_emulate_ctxt *ctxt)
3634{
3635 u64 efer;
3636
Avi Kivity717746e2011-04-20 13:37:53 +03003637 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003638
3639 if (!(efer & EFER_SVME))
3640 return emulate_ud(ctxt);
3641
3642 return X86EMUL_CONTINUE;
3643}
3644
3645static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3646{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003647 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003648
3649 /* Valid physical address? */
Randy Dunlapd4224442011-04-21 09:09:22 -07003650 if (rax & 0xffff000000000000ULL)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003651 return emulate_gp(ctxt, 0);
3652
3653 return check_svme(ctxt);
3654}
3655
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003656static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3657{
Avi Kivity717746e2011-04-20 13:37:53 +03003658 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003659
Avi Kivity717746e2011-04-20 13:37:53 +03003660 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003661 return emulate_ud(ctxt);
3662
3663 return X86EMUL_CONTINUE;
3664}
3665
Joerg Roedel80612522011-04-04 12:39:33 +02003666static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3667{
Avi Kivity717746e2011-04-20 13:37:53 +03003668 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003669 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
Joerg Roedel80612522011-04-04 12:39:33 +02003670
Avi Kivity717746e2011-04-20 13:37:53 +03003671 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
Nadav Amit67f4d422014-06-02 18:34:09 +03003672 ctxt->ops->check_pmc(ctxt, rcx))
Joerg Roedel80612522011-04-04 12:39:33 +02003673 return emulate_gp(ctxt, 0);
3674
3675 return X86EMUL_CONTINUE;
3676}
3677
Joerg Roedelf6511932011-04-04 12:39:35 +02003678static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3679{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003680 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3681 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003682 return emulate_gp(ctxt, 0);
3683
3684 return X86EMUL_CONTINUE;
3685}
3686
3687static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3688{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003689 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3690 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003691 return emulate_gp(ctxt, 0);
3692
3693 return X86EMUL_CONTINUE;
3694}
3695
Avi Kivity73fba5f2010-07-29 15:11:53 +03003696#define D(_y) { .flags = (_y) }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003697#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3698#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3699 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Gleb Natapov0b789ee2013-04-11 11:59:55 +03003700#define N D(NotImpl)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003701#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003702#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3703#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
Gleb Natapov045a2822012-12-20 16:57:43 +02003704#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003705#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
Avi Kivitye28bbd42013-01-04 16:18:48 +02003706#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
Avi Kivityc4f035c2011-04-04 12:39:22 +02003707#define II(_f, _e, _i) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003708 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
Joerg Roedeld09beab2011-04-04 12:39:25 +02003709#define IIP(_f, _e, _i, _p) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003710 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3711 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Avi Kivityaa97bb42010-01-20 18:09:23 +02003712#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003713
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003714#define D2bv(_f) D((_f) | ByteOp), D(_f)
Joerg Roedelf6511932011-04-04 12:39:35 +02003715#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003716#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
Avi Kivityf7857f32013-01-04 16:18:53 +02003717#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003718#define I2bvIP(_f, _e, _i, _p) \
3719 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003720
Avi Kivityfb864fb2013-01-04 16:18:54 +02003721#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3722 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3723 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
Avi Kivity6230f7f2010-08-26 18:34:55 +03003724
Nadav Amit0f54a322014-08-29 11:26:55 +03003725static const struct opcode group7_rm0[] = {
3726 N,
3727 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3728 N, N, N, N, N, N,
3729};
3730
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003731static const struct opcode group7_rm1[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003732 DI(SrcNone | Priv, monitor),
3733 DI(SrcNone | Priv, mwait),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003734 N, N, N, N, N, N,
3735};
3736
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003737static const struct opcode group7_rm3[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003738 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
Borislav Petkovb51e9742013-09-22 16:44:52 +02003739 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003740 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3741 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3742 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3743 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3744 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3745 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003746};
Avi Kivity6230f7f2010-08-26 18:34:55 +03003747
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003748static const struct opcode group7_rm7[] = {
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003749 N,
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003750 DIP(SrcNone, rdtscp, check_rdtsc),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003751 N, N, N, N, N, N,
3752};
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09003753
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003754static const struct opcode group1[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003755 F(Lock, em_add),
3756 F(Lock | PageTable, em_or),
3757 F(Lock, em_adc),
3758 F(Lock, em_sbb),
3759 F(Lock | PageTable, em_and),
3760 F(Lock, em_sub),
3761 F(Lock, em_xor),
3762 F(NoWrite, em_cmp),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003763};
3764
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003765static const struct opcode group1A[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003766 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003767};
3768
Avi Kivity007a3b52013-01-19 19:51:51 +02003769static const struct opcode group2[] = {
3770 F(DstMem | ModRM, em_rol),
3771 F(DstMem | ModRM, em_ror),
3772 F(DstMem | ModRM, em_rcl),
3773 F(DstMem | ModRM, em_rcr),
3774 F(DstMem | ModRM, em_shl),
3775 F(DstMem | ModRM, em_shr),
3776 F(DstMem | ModRM, em_shl),
3777 F(DstMem | ModRM, em_sar),
3778};
3779
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003780static const struct opcode group3[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003781 F(DstMem | SrcImm | NoWrite, em_test),
3782 F(DstMem | SrcImm | NoWrite, em_test),
Avi Kivity45a14672013-01-04 16:18:52 +02003783 F(DstMem | SrcNone | Lock, em_not),
3784 F(DstMem | SrcNone | Lock, em_neg),
Avi Kivityb9fa4092013-02-09 11:31:48 +02003785 F(DstXacc | Src2Mem, em_mul_ex),
3786 F(DstXacc | Src2Mem, em_imul_ex),
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02003787 F(DstXacc | Src2Mem, em_div_ex),
3788 F(DstXacc | Src2Mem, em_idiv_ex),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003789};
3790
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003791static const struct opcode group4[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003792 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3793 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003794 N, N, N, N, N, N,
3795};
3796
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003797static const struct opcode group5[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003798 F(DstMem | SrcNone | Lock, em_inc),
3799 F(DstMem | SrcNone | Lock, em_dec),
Nadav Amit58b70752014-10-24 11:35:09 +03003800 I(SrcMem | NearBranch, em_call_near_abs),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003801 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
Nadav Amit58b70752014-10-24 11:35:09 +03003802 I(SrcMem | NearBranch, em_jmp_abs),
Nadav Amitf7784042014-09-18 22:39:41 +03003803 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3804 I(SrcMem | Stack, em_push), D(Undefined),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003805};
3806
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003807static const struct opcode group6[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003808 DI(Prot, sldt),
3809 DI(Prot, str),
Avi Kivitya14e5792012-06-13 12:28:33 +03003810 II(Prot | Priv | SrcMem16, em_lldt, lldt),
Avi Kivity80890002012-06-13 16:33:29 +03003811 II(Prot | Priv | SrcMem16, em_ltr, ltr),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02003812 N, N, N, N,
3813};
3814
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003815static const struct group_dual group7 = { {
Nadav Amit606b1c32014-06-02 18:34:06 +03003816 II(Mov | DstMem, em_sgdt, sgdt),
3817 II(Mov | DstMem, em_sidt, sidt),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003818 II(SrcMem | Priv, em_lgdt, lgdt),
3819 II(SrcMem | Priv, em_lidt, lidt),
3820 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3821 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3822 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003823}, {
Nadav Amit0f54a322014-08-29 11:26:55 +03003824 EXT(0, group7_rm0),
Avi Kivity5ef39c72011-04-21 12:21:50 +03003825 EXT(0, group7_rm1),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003826 N, EXT(0, group7_rm3),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003827 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3828 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3829 EXT(0, group7_rm7),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003830} };
3831
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003832static const struct opcode group8[] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003833 N, N, N, N,
Avi Kivity11c363b2013-01-19 19:51:54 +02003834 F(DstMem | SrcImmByte | NoWrite, em_bt),
3835 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3836 F(DstMem | SrcImmByte | Lock, em_btr),
3837 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003838};
3839
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003840static const struct group_dual group9 = { {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003841 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003842}, {
3843 N, N, N, N, N, N, N, N,
3844} };
3845
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003846static const struct opcode group11[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003847 I(DstMem | SrcImm | Mov | PageTable, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003848 X7(D(Undefined)),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03003849};
3850
Nadav Amit13e457e2014-10-13 13:04:13 +03003851static const struct gprefix pfx_0f_ae_7 = {
Nadav Amit3f6f1482014-10-13 13:04:14 +03003852 I(SrcMem | ByteOp, em_clflush), N, N, N,
Nadav Amit13e457e2014-10-13 13:04:13 +03003853};
3854
3855static const struct group_dual group15 = { {
3856 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3857}, {
3858 N, N, N, N, N, N, N, N,
3859} };
3860
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003861static const struct gprefix pfx_0f_6f_0f_7f = {
Avi Kivitye5971752012-04-09 18:40:03 +03003862 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
Avi Kivityaa97bb42010-01-20 18:09:23 +02003863};
3864
Paolo Bonzinid5b77062014-07-14 12:54:48 +02003865static const struct gprefix pfx_0f_2b = {
3866 I(0, em_mov), I(0, em_mov), N, N,
Avi Kivity3e114eb2012-04-09 18:40:01 +03003867};
3868
Igor Mammedov27ce8252014-03-15 21:01:59 +01003869static const struct gprefix pfx_0f_28_0f_29 = {
Igor Mammedov6fec27d2014-03-15 21:02:00 +01003870 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01003871};
3872
Alex Williamson0a370272014-07-11 11:56:31 -06003873static const struct gprefix pfx_0f_e7 = {
3874 N, I(Sse, em_mov), N, N,
3875};
3876
Gleb Natapov045a2822012-12-20 16:57:43 +02003877static const struct escape escape_d9 = { {
3878 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3879}, {
3880 /* 0xC0 - 0xC7 */
3881 N, N, N, N, N, N, N, N,
3882 /* 0xC8 - 0xCF */
3883 N, N, N, N, N, N, N, N,
3884 /* 0xD0 - 0xC7 */
3885 N, N, N, N, N, N, N, N,
3886 /* 0xD8 - 0xDF */
3887 N, N, N, N, N, N, N, N,
3888 /* 0xE0 - 0xE7 */
3889 N, N, N, N, N, N, N, N,
3890 /* 0xE8 - 0xEF */
3891 N, N, N, N, N, N, N, N,
3892 /* 0xF0 - 0xF7 */
3893 N, N, N, N, N, N, N, N,
3894 /* 0xF8 - 0xFF */
3895 N, N, N, N, N, N, N, N,
3896} };
3897
3898static const struct escape escape_db = { {
3899 N, N, N, N, N, N, N, N,
3900}, {
3901 /* 0xC0 - 0xC7 */
3902 N, N, N, N, N, N, N, N,
3903 /* 0xC8 - 0xCF */
3904 N, N, N, N, N, N, N, N,
3905 /* 0xD0 - 0xC7 */
3906 N, N, N, N, N, N, N, N,
3907 /* 0xD8 - 0xDF */
3908 N, N, N, N, N, N, N, N,
3909 /* 0xE0 - 0xE7 */
3910 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3911 /* 0xE8 - 0xEF */
3912 N, N, N, N, N, N, N, N,
3913 /* 0xF0 - 0xF7 */
3914 N, N, N, N, N, N, N, N,
3915 /* 0xF8 - 0xFF */
3916 N, N, N, N, N, N, N, N,
3917} };
3918
3919static const struct escape escape_dd = { {
3920 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3921}, {
3922 /* 0xC0 - 0xC7 */
3923 N, N, N, N, N, N, N, N,
3924 /* 0xC8 - 0xCF */
3925 N, N, N, N, N, N, N, N,
3926 /* 0xD0 - 0xC7 */
3927 N, N, N, N, N, N, N, N,
3928 /* 0xD8 - 0xDF */
3929 N, N, N, N, N, N, N, N,
3930 /* 0xE0 - 0xE7 */
3931 N, N, N, N, N, N, N, N,
3932 /* 0xE8 - 0xEF */
3933 N, N, N, N, N, N, N, N,
3934 /* 0xF0 - 0xF7 */
3935 N, N, N, N, N, N, N, N,
3936 /* 0xF8 - 0xFF */
3937 N, N, N, N, N, N, N, N,
3938} };
3939
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003940static const struct opcode opcode_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003941 /* 0x00 - 0x07 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003942 F6ALU(Lock, em_add),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003943 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3944 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003945 /* 0x08 - 0x0F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003946 F6ALU(Lock | PageTable, em_or),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003947 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3948 N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003949 /* 0x10 - 0x17 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003950 F6ALU(Lock, em_adc),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003951 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3952 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003953 /* 0x18 - 0x1F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003954 F6ALU(Lock, em_sbb),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003955 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3956 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003957 /* 0x20 - 0x27 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003958 F6ALU(Lock | PageTable, em_and), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003959 /* 0x28 - 0x2F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003960 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003961 /* 0x30 - 0x37 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003962 F6ALU(Lock, em_xor), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003963 /* 0x38 - 0x3F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003964 F6ALU(NoWrite, em_cmp), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003965 /* 0x40 - 0x4F */
Avi Kivity95413dc2013-01-19 19:51:53 +02003966 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003967 /* 0x50 - 0x57 */
Avi Kivity63540382010-07-29 15:11:55 +03003968 X8(I(SrcReg | Stack, em_push)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003969 /* 0x58 - 0x5F */
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09003970 X8(I(DstReg | Stack, em_pop)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003971 /* 0x60 - 0x67 */
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09003972 I(ImplicitOps | Stack | No64, em_pusha),
3973 I(ImplicitOps | Stack | No64, em_popa),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003974 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3975 N, N, N, N,
3976 /* 0x68 - 0x6F */
Avi Kivityd46164d2010-08-18 19:29:33 +03003977 I(SrcImm | Mov | Stack, em_push),
3978 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003979 I(SrcImmByte | Mov | Stack, em_push),
3980 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
Gleb Natapovb3356bf2012-09-03 15:24:29 +03003981 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
Takuya Yoshikawa2b5e97e2011-11-23 12:27:39 +09003982 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
Avi Kivity73fba5f2010-07-29 15:11:53 +03003983 /* 0x70 - 0x7F */
Nadav Amit58b70752014-10-24 11:35:09 +03003984 X16(D(SrcImmByte | NearBranch)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003985 /* 0x80 - 0x87 */
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003986 G(ByteOp | DstMem | SrcImm, group1),
3987 G(DstMem | SrcImm, group1),
3988 G(ByteOp | DstMem | SrcImm | No64, group1),
3989 G(DstMem | SrcImmByte, group1),
Avi Kivityfb864fb2013-01-04 16:18:54 +02003990 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003991 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003992 /* 0x88 - 0x8F */
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003993 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003994 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003995 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003996 D(ModRM | SrcMem | NoAccess | DstReg),
3997 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3998 G(0, group1A),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003999 /* 0x90 - 0x97 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02004000 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004001 /* 0x98 - 0x9F */
Avi Kivity61429142010-08-19 15:13:00 +03004002 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
Wei Yongjuncc4feed2010-08-25 14:10:53 +08004003 I(SrcImmFAddr | No64, em_call_far), N,
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09004004 II(ImplicitOps | Stack, em_pushf, pushf),
Paolo Bonzini98f73632013-10-31 11:19:42 +01004005 II(ImplicitOps | Stack, em_popf, popf),
4006 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004007 /* 0xA0 - 0xA7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004008 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004009 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004010 I2bv(SrcSI | DstDI | Mov | String, em_mov),
Nadav Amit5aca3722014-11-02 11:54:50 +02004011 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004012 /* 0xA8 - 0xAF */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004013 F2bv(DstAcc | SrcImm | NoWrite, em_test),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004014 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4015 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
Nadav Amit5aca3722014-11-02 11:54:50 +02004016 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004017 /* 0xB0 - 0xB7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004018 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004019 /* 0xB8 - 0xBF */
Nadav Amit5e2c6882012-12-06 21:55:10 -02004020 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004021 /* 0xC0 - 0xC7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02004022 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
Nadav Amit58b70752014-10-24 11:35:09 +03004023 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4024 I(ImplicitOps | NearBranch, em_ret),
Avi Kivityd4b43252011-09-13 10:45:50 +03004025 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4026 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03004027 G(ByteOp, group11), G(0, group11),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004028 /* 0xC8 - 0xCF */
Avi Kivity612e89f2012-06-12 20:03:23 +03004029 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
Bruce Rogers32611072013-09-09 09:40:20 -06004030 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4031 I(ImplicitOps | Stack, em_ret_far),
Avi Kivity3c6e2762011-04-04 12:39:23 +02004032 D(ImplicitOps), DI(SrcImmByte, intn),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004033 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004034 /* 0xD0 - 0xD7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02004035 G(Src2One | ByteOp, group2), G(Src2One, group2),
4036 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02004037 I(DstAcc | SrcImmUByte | No64, em_aam),
Paolo Bonzini326f5782013-05-09 11:32:51 +02004038 I(DstAcc | SrcImmUByte | No64, em_aad),
4039 F(DstAcc | ByteOp | No64, em_salc),
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004040 I(DstAcc | SrcXLat | ByteOp, em_mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004041 /* 0xD8 - 0xDF */
Gleb Natapov045a2822012-12-20 16:57:43 +02004042 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004043 /* 0xE0 - 0xE7 */
Nadav Amit58b70752014-10-24 11:35:09 +03004044 X3(I(SrcImmByte | NearBranch, em_loop)),
4045 I(SrcImmByte | NearBranch, em_jcxz),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004046 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4047 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004048 /* 0xE8 - 0xEF */
Nadav Amit58b70752014-10-24 11:35:09 +03004049 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4050 I(SrcImmFAddr | No64, em_jmp_far),
4051 D(SrcImmByte | ImplicitOps | NearBranch),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004052 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4053 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004054 /* 0xF0 - 0xF7 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02004055 N, DI(ImplicitOps, icebp), N, N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02004056 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4057 G(ByteOp, group3), G(0, group3),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004058 /* 0xF8 - 0xFF */
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09004059 D(ImplicitOps), D(ImplicitOps),
4060 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004061 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4062};
4063
Mathias Krausefd0a0d82012-08-30 01:30:15 +02004064static const struct opcode twobyte_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03004065 /* 0x00 - 0x0F */
Joerg Roedeldee6bb72011-04-04 12:39:30 +02004066 G(0, group6), GD(0, &group7), N, N,
Borislav Petkovb51e9742013-09-22 16:44:52 +02004067 N, I(ImplicitOps | EmulateOnUD, em_syscall),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004068 II(ImplicitOps | Priv, em_clts, clts), N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02004069 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
Nadav Amit3f6f1482014-10-13 13:04:14 +03004070 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004071 /* 0x10 - 0x1F */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02004072 N, N, N, N, N, N, N, N,
Nadav Amit3f6f1482014-10-13 13:04:14 +03004073 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4074 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004075 /* 0x20 - 0x2F */
Nadav Amit9b88ae92014-05-25 23:05:21 +03004076 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4077 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4078 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4079 check_cr_write),
4080 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4081 check_dr_write),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004082 N, N, N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01004083 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4084 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
Paolo Bonzinid5b77062014-07-14 12:54:48 +02004085 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
Avi Kivity3e114eb2012-04-09 18:40:01 +03004086 N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004087 /* 0x30 - 0x3F */
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09004088 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
Joerg Roedel80612522011-04-04 12:39:33 +02004089 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09004090 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
Avi Kivity222d21a2011-11-10 14:57:30 +02004091 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
Borislav Petkovb51e9742013-09-22 16:44:52 +02004092 I(ImplicitOps | EmulateOnUD, em_sysenter),
4093 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
Avi Kivityd8671622011-02-01 16:32:03 +02004094 N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004095 N, N, N, N, N, N, N, N,
4096 /* 0x40 - 0x4F */
Nadav Amit140bad82014-06-15 16:13:00 +03004097 X16(D(DstReg | SrcMem | ModRM)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004098 /* 0x50 - 0x5F */
4099 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4100 /* 0x60 - 0x6F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02004101 N, N, N, N,
4102 N, N, N, N,
4103 N, N, N, N,
4104 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004105 /* 0x70 - 0x7F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02004106 N, N, N, N,
4107 N, N, N, N,
4108 N, N, N, N,
4109 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004110 /* 0x80 - 0x8F */
Nadav Amit58b70752014-10-24 11:35:09 +03004111 X16(D(SrcImm | NearBranch)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004112 /* 0x90 - 0x9F */
Wei Yongjunee45b582010-08-06 17:10:07 +08004113 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004114 /* 0xA0 - 0xA7 */
Avi Kivity1cd196e2011-09-13 10:45:51 +03004115 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
Avi Kivity11c363b2013-01-19 19:51:54 +02004116 II(ImplicitOps, em_cpuid, cpuid),
4117 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
Avi Kivity0bdea062013-01-19 19:51:50 +02004118 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4119 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004120 /* 0xA8 - 0xAF */
Avi Kivity1cd196e2011-09-13 10:45:51 +03004121 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004122 DI(ImplicitOps, rsm),
Avi Kivity11c363b2013-01-19 19:51:54 +02004123 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
Avi Kivity0bdea062013-01-19 19:51:50 +02004124 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4125 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
Nadav Amit13e457e2014-10-13 13:04:13 +03004126 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004127 /* 0xB0 - 0xB7 */
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09004128 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
Avi Kivityd4b43252011-09-13 10:45:50 +03004129 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
Avi Kivity11c363b2013-01-19 19:51:54 +02004130 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
Avi Kivityd4b43252011-09-13 10:45:50 +03004131 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4132 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004133 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004134 /* 0xB8 - 0xBF */
4135 N, N,
Takuya Yoshikawace7faab2011-11-22 15:17:48 +09004136 G(BitOp, group8),
Avi Kivity11c363b2013-01-19 19:51:54 +02004137 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4138 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004139 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity92998362012-06-13 12:25:06 +03004140 /* 0xC0 - 0xC7 */
Avi Kivitye47a5f52013-02-09 11:31:51 +02004141 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
Wei Yongjun92f738a52010-08-17 09:19:34 +08004142 N, D(DstMem | SrcReg | ModRM | Mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004143 N, N, N, GD(0, &group9),
Avi Kivity92998362012-06-13 12:25:06 +03004144 /* 0xC8 - 0xCF */
4145 X8(I(DstReg, em_bswap)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004146 /* 0xD0 - 0xDF */
4147 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4148 /* 0xE0 - 0xEF */
Alex Williamson0a370272014-07-11 11:56:31 -06004149 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4150 N, N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004151 /* 0xF0 - 0xFF */
4152 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4153};
4154
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004155static const struct gprefix three_byte_0f_38_f0 = {
Borislav Petkov84cffe42013-10-29 12:54:56 +01004156 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004157};
4158
4159static const struct gprefix three_byte_0f_38_f1 = {
Borislav Petkov84cffe42013-10-29 12:54:56 +01004160 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004161};
4162
4163/*
4164 * Insns below are selected by the prefix which indexed by the third opcode
4165 * byte.
4166 */
4167static const struct opcode opcode_map_0f_38[256] = {
4168 /* 0x00 - 0x7f */
4169 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
Borislav Petkov84cffe42013-10-29 12:54:56 +01004170 /* 0x80 - 0xef */
4171 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4172 /* 0xf0 - 0xf1 */
4173 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
4174 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4175 /* 0xf2 - 0xff */
4176 N, N, X4(N), X8(N)
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004177};
4178
Avi Kivity73fba5f2010-07-29 15:11:53 +03004179#undef D
4180#undef N
4181#undef G
4182#undef GD
4183#undef I
Avi Kivityaa97bb42010-01-20 18:09:23 +02004184#undef GP
Joerg Roedel01de8b02011-04-04 12:39:31 +02004185#undef EXT
Avi Kivity73fba5f2010-07-29 15:11:53 +03004186
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004187#undef D2bv
Joerg Roedelf6511932011-04-04 12:39:35 +02004188#undef D2bvIP
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004189#undef I2bv
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004190#undef I2bvIP
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09004191#undef I6ALU
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004192
Avi Kivity9dac77f2011-06-01 15:34:25 +03004193static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
Avi Kivity39f21ee2010-08-18 19:20:21 +03004194{
4195 unsigned size;
4196
Avi Kivity9dac77f2011-06-01 15:34:25 +03004197 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004198 if (size == 8)
4199 size = 4;
4200 return size;
4201}
4202
4203static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4204 unsigned size, bool sign_extension)
4205{
Avi Kivity39f21ee2010-08-18 19:20:21 +03004206 int rc = X86EMUL_CONTINUE;
4207
4208 op->type = OP_IMM;
4209 op->bytes = size;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004210 op->addr.mem.ea = ctxt->_eip;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004211 /* NB. Immediates are sign-extended as necessary. */
4212 switch (op->bytes) {
4213 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004214 op->val = insn_fetch(s8, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004215 break;
4216 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004217 op->val = insn_fetch(s16, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004218 break;
4219 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004220 op->val = insn_fetch(s32, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004221 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004222 case 8:
4223 op->val = insn_fetch(s64, ctxt);
4224 break;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004225 }
4226 if (!sign_extension) {
4227 switch (op->bytes) {
4228 case 1:
4229 op->val &= 0xff;
4230 break;
4231 case 2:
4232 op->val &= 0xffff;
4233 break;
4234 case 4:
4235 op->val &= 0xffffffff;
4236 break;
4237 }
4238 }
4239done:
4240 return rc;
4241}
4242
Avi Kivitya99455492011-09-13 10:45:41 +03004243static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4244 unsigned d)
4245{
4246 int rc = X86EMUL_CONTINUE;
4247
4248 switch (d) {
4249 case OpReg:
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004250 decode_register_operand(ctxt, op);
Avi Kivitya99455492011-09-13 10:45:41 +03004251 break;
4252 case OpImmUByte:
Avi Kivity608aabe2011-09-13 10:45:45 +03004253 rc = decode_imm(ctxt, op, 1, false);
Avi Kivitya99455492011-09-13 10:45:41 +03004254 break;
4255 case OpMem:
Avi Kivity41ddf972011-09-13 10:45:48 +03004256 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity0fe59122011-09-13 10:45:47 +03004257 mem_common:
Avi Kivitya99455492011-09-13 10:45:41 +03004258 *op = ctxt->memop;
4259 ctxt->memopp = op;
Paolo Bonzini96888972014-04-01 14:54:19 +02004260 if (ctxt->d & BitOp)
Avi Kivitya99455492011-09-13 10:45:41 +03004261 fetch_bit_operand(ctxt);
4262 op->orig_val = op->val;
4263 break;
Avi Kivity41ddf972011-09-13 10:45:48 +03004264 case OpMem64:
Nadav Amitaaa05f22014-06-02 18:34:10 +03004265 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
Avi Kivity41ddf972011-09-13 10:45:48 +03004266 goto mem_common;
Avi Kivitya99455492011-09-13 10:45:41 +03004267 case OpAcc:
4268 op->type = OP_REG;
4269 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004270 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
Avi Kivitya99455492011-09-13 10:45:41 +03004271 fetch_register_operand(op);
4272 op->orig_val = op->val;
4273 break;
Avi Kivity820207c2013-02-09 11:31:45 +02004274 case OpAccLo:
4275 op->type = OP_REG;
4276 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4277 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4278 fetch_register_operand(op);
4279 op->orig_val = op->val;
4280 break;
4281 case OpAccHi:
4282 if (ctxt->d & ByteOp) {
4283 op->type = OP_NONE;
4284 break;
4285 }
4286 op->type = OP_REG;
4287 op->bytes = ctxt->op_bytes;
4288 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4289 fetch_register_operand(op);
4290 op->orig_val = op->val;
4291 break;
Avi Kivitya99455492011-09-13 10:45:41 +03004292 case OpDI:
4293 op->type = OP_MEM;
4294 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4295 op->addr.mem.ea =
Avi Kivitydd856ef2012-08-27 23:46:17 +03004296 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
Avi Kivitya99455492011-09-13 10:45:41 +03004297 op->addr.mem.seg = VCPU_SREG_ES;
4298 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004299 op->count = 1;
Avi Kivitya99455492011-09-13 10:45:41 +03004300 break;
4301 case OpDX:
4302 op->type = OP_REG;
4303 op->bytes = 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004304 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivitya99455492011-09-13 10:45:41 +03004305 fetch_register_operand(op);
4306 break;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004307 case OpCL:
4308 op->bytes = 1;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004309 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004310 break;
4311 case OpImmByte:
4312 rc = decode_imm(ctxt, op, 1, true);
4313 break;
4314 case OpOne:
4315 op->bytes = 1;
4316 op->val = 1;
4317 break;
4318 case OpImm:
4319 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4320 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004321 case OpImm64:
4322 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4323 break;
Avi Kivity28867ce2012-01-16 15:08:44 +02004324 case OpMem8:
4325 ctxt->memop.bytes = 1;
Gleb Natapov660696d2013-04-24 13:38:36 +03004326 if (ctxt->memop.type == OP_REG) {
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02004327 ctxt->memop.addr.reg = decode_register(ctxt,
4328 ctxt->modrm_rm, true);
Gleb Natapov660696d2013-04-24 13:38:36 +03004329 fetch_register_operand(&ctxt->memop);
4330 }
Avi Kivity28867ce2012-01-16 15:08:44 +02004331 goto mem_common;
Avi Kivity0fe59122011-09-13 10:45:47 +03004332 case OpMem16:
4333 ctxt->memop.bytes = 2;
4334 goto mem_common;
4335 case OpMem32:
4336 ctxt->memop.bytes = 4;
4337 goto mem_common;
4338 case OpImmU16:
4339 rc = decode_imm(ctxt, op, 2, false);
4340 break;
4341 case OpImmU:
4342 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4343 break;
4344 case OpSI:
4345 op->type = OP_MEM;
4346 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4347 op->addr.mem.ea =
Avi Kivitydd856ef2012-08-27 23:46:17 +03004348 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
Bandan Das573e80f2014-04-16 12:46:13 -04004349 op->addr.mem.seg = ctxt->seg_override;
Avi Kivity0fe59122011-09-13 10:45:47 +03004350 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004351 op->count = 1;
Avi Kivity0fe59122011-09-13 10:45:47 +03004352 break;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004353 case OpXLat:
4354 op->type = OP_MEM;
4355 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4356 op->addr.mem.ea =
4357 register_address(ctxt,
4358 reg_read(ctxt, VCPU_REGS_RBX) +
4359 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
Bandan Das573e80f2014-04-16 12:46:13 -04004360 op->addr.mem.seg = ctxt->seg_override;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004361 op->val = 0;
4362 break;
Avi Kivity0fe59122011-09-13 10:45:47 +03004363 case OpImmFAddr:
4364 op->type = OP_IMM;
4365 op->addr.mem.ea = ctxt->_eip;
4366 op->bytes = ctxt->op_bytes + 2;
4367 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4368 break;
4369 case OpMemFAddr:
4370 ctxt->memop.bytes = ctxt->op_bytes + 2;
4371 goto mem_common;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004372 case OpES:
4373 op->val = VCPU_SREG_ES;
4374 break;
4375 case OpCS:
4376 op->val = VCPU_SREG_CS;
4377 break;
4378 case OpSS:
4379 op->val = VCPU_SREG_SS;
4380 break;
4381 case OpDS:
4382 op->val = VCPU_SREG_DS;
4383 break;
4384 case OpFS:
4385 op->val = VCPU_SREG_FS;
4386 break;
4387 case OpGS:
4388 op->val = VCPU_SREG_GS;
4389 break;
Avi Kivitya99455492011-09-13 10:45:41 +03004390 case OpImplicit:
4391 /* Special instructions do their own operand decoding. */
4392 default:
4393 op->type = OP_NONE; /* Disable writeback. */
4394 break;
4395 }
4396
4397done:
4398 return rc;
4399}
4400
Takuya Yoshikawaef5d75c2011-05-15 00:57:43 +09004401int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004402{
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004403 int rc = X86EMUL_CONTINUE;
4404 int mode = ctxt->mode;
Avi Kivity46561642011-04-24 14:09:59 +03004405 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004406 bool op_prefix = false;
Bandan Das573e80f2014-04-16 12:46:13 -04004407 bool has_seg_override = false;
Avi Kivity46561642011-04-24 14:09:59 +03004408 struct opcode opcode;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004409
Avi Kivityf09ed832011-09-13 10:45:40 +03004410 ctxt->memop.type = OP_NONE;
4411 ctxt->memopp = NULL;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004412 ctxt->_eip = ctxt->eip;
Paolo Bonzini17052f12014-05-06 16:33:01 +02004413 ctxt->fetch.ptr = ctxt->fetch.data;
4414 ctxt->fetch.end = ctxt->fetch.data + insn_len;
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004415 ctxt->opcode_len = 1;
Andre Przywaradc25e892010-12-21 11:12:07 +01004416 if (insn_len > 0)
Avi Kivity9dac77f2011-06-01 15:34:25 +03004417 memcpy(ctxt->fetch.data, insn, insn_len);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004418 else {
Paolo Bonzini9506d572014-05-06 13:05:25 +02004419 rc = __do_insn_fetch_bytes(ctxt, 1);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004420 if (rc != X86EMUL_CONTINUE)
4421 return rc;
4422 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004423
4424 switch (mode) {
4425 case X86EMUL_MODE_REAL:
4426 case X86EMUL_MODE_VM86:
4427 case X86EMUL_MODE_PROT16:
4428 def_op_bytes = def_ad_bytes = 2;
4429 break;
4430 case X86EMUL_MODE_PROT32:
4431 def_op_bytes = def_ad_bytes = 4;
4432 break;
4433#ifdef CONFIG_X86_64
4434 case X86EMUL_MODE_PROT64:
4435 def_op_bytes = 4;
4436 def_ad_bytes = 8;
4437 break;
4438#endif
4439 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004440 return EMULATION_FAILED;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004441 }
4442
Avi Kivity9dac77f2011-06-01 15:34:25 +03004443 ctxt->op_bytes = def_op_bytes;
4444 ctxt->ad_bytes = def_ad_bytes;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004445
4446 /* Legacy prefixes. */
4447 for (;;) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004448 switch (ctxt->b = insn_fetch(u8, ctxt)) {
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004449 case 0x66: /* operand-size override */
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004450 op_prefix = true;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004451 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004452 ctxt->op_bytes = def_op_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004453 break;
4454 case 0x67: /* address-size override */
4455 if (mode == X86EMUL_MODE_PROT64)
4456 /* switch between 4/8 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004457 ctxt->ad_bytes = def_ad_bytes ^ 12;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004458 else
4459 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004460 ctxt->ad_bytes = def_ad_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004461 break;
4462 case 0x26: /* ES override */
4463 case 0x2e: /* CS override */
4464 case 0x36: /* SS override */
4465 case 0x3e: /* DS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004466 has_seg_override = true;
4467 ctxt->seg_override = (ctxt->b >> 3) & 3;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004468 break;
4469 case 0x64: /* FS override */
4470 case 0x65: /* GS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004471 has_seg_override = true;
4472 ctxt->seg_override = ctxt->b & 7;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004473 break;
4474 case 0x40 ... 0x4f: /* REX */
4475 if (mode != X86EMUL_MODE_PROT64)
4476 goto done_prefixes;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004477 ctxt->rex_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004478 continue;
4479 case 0xf0: /* LOCK */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004480 ctxt->lock_prefix = 1;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004481 break;
4482 case 0xf2: /* REPNE/REPNZ */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004483 case 0xf3: /* REP/REPE/REPZ */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004484 ctxt->rep_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004485 break;
4486 default:
4487 goto done_prefixes;
4488 }
4489
4490 /* Any legacy prefix after a REX prefix nullifies its effect. */
4491
Avi Kivity9dac77f2011-06-01 15:34:25 +03004492 ctxt->rex_prefix = 0;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004493 }
4494
4495done_prefixes:
4496
4497 /* REX prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004498 if (ctxt->rex_prefix & 8)
4499 ctxt->op_bytes = 8; /* REX.W */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004500
4501 /* Opcode byte(s). */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004502 opcode = opcode_table[ctxt->b];
Wei Yongjund3ad6242010-08-05 16:34:39 +08004503 /* Two-byte opcode? */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004504 if (ctxt->b == 0x0f) {
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004505 ctxt->opcode_len = 2;
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004506 ctxt->b = insn_fetch(u8, ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03004507 opcode = twobyte_table[ctxt->b];
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004508
4509 /* 0F_38 opcode map */
4510 if (ctxt->b == 0x38) {
4511 ctxt->opcode_len = 3;
4512 ctxt->b = insn_fetch(u8, ctxt);
4513 opcode = opcode_map_0f_38[ctxt->b];
4514 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004515 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004516 ctxt->d = opcode.flags;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004517
Takuya Yoshikawa9f4260e2012-04-30 17:48:25 +09004518 if (ctxt->d & ModRM)
4519 ctxt->modrm = insn_fetch(u8, ctxt);
4520
Nadav Amit7fe864d2014-06-02 18:34:03 +03004521 /* vex-prefix instructions are not implemented */
4522 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4523 (mode == X86EMUL_MODE_PROT64 ||
4524 (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
4525 ctxt->d = NotImpl;
4526 }
4527
Avi Kivity9dac77f2011-06-01 15:34:25 +03004528 while (ctxt->d & GroupMask) {
4529 switch (ctxt->d & GroupMask) {
Avi Kivity46561642011-04-24 14:09:59 +03004530 case Group:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004531 goffset = (ctxt->modrm >> 3) & 7;
Avi Kivity46561642011-04-24 14:09:59 +03004532 opcode = opcode.u.group[goffset];
4533 break;
4534 case GroupDual:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004535 goffset = (ctxt->modrm >> 3) & 7;
4536 if ((ctxt->modrm >> 6) == 3)
Avi Kivity46561642011-04-24 14:09:59 +03004537 opcode = opcode.u.gdual->mod3[goffset];
4538 else
4539 opcode = opcode.u.gdual->mod012[goffset];
4540 break;
4541 case RMExt:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004542 goffset = ctxt->modrm & 7;
Joerg Roedel01de8b02011-04-04 12:39:31 +02004543 opcode = opcode.u.group[goffset];
Avi Kivity46561642011-04-24 14:09:59 +03004544 break;
4545 case Prefix:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004546 if (ctxt->rep_prefix && op_prefix)
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004547 return EMULATION_FAILED;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004548 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
Avi Kivity46561642011-04-24 14:09:59 +03004549 switch (simd_prefix) {
4550 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4551 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4552 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4553 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4554 }
4555 break;
Gleb Natapov045a2822012-12-20 16:57:43 +02004556 case Escape:
4557 if (ctxt->modrm > 0xbf)
4558 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4559 else
4560 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4561 break;
Avi Kivity46561642011-04-24 14:09:59 +03004562 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004563 return EMULATION_FAILED;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004564 }
Avi Kivity46561642011-04-24 14:09:59 +03004565
Avi Kivityb1ea50b2011-09-13 10:45:42 +03004566 ctxt->d &= ~(u64)GroupMask;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004567 ctxt->d |= opcode.flags;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004568 }
4569
Paolo Bonzinie24186e2014-03-27 12:00:57 +01004570 /* Unrecognised? */
4571 if (ctxt->d == 0)
4572 return EMULATION_FAILED;
4573
Avi Kivity9dac77f2011-06-01 15:34:25 +03004574 ctxt->execute = opcode.u.execute;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004575
Nadav Amit3a6095a2014-08-13 16:50:13 +03004576 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4577 return EMULATION_FAILED;
4578
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004579 if (unlikely(ctxt->d &
Nadav Amit58b70752014-10-24 11:35:09 +03004580 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch))) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004581 /*
4582 * These are copied unconditionally here, and checked unconditionally
4583 * in x86_emulate_insn.
4584 */
4585 ctxt->check_perm = opcode.check_perm;
4586 ctxt->intercept = opcode.intercept;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004587
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004588 if (ctxt->d & NotImpl)
4589 return EMULATION_FAILED;
Avi Kivityd8671622011-02-01 16:32:03 +02004590
Nadav Amit58b70752014-10-24 11:35:09 +03004591 if (mode == X86EMUL_MODE_PROT64) {
4592 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4593 ctxt->op_bytes = 8;
4594 else if (ctxt->d & NearBranch)
4595 ctxt->op_bytes = 8;
4596 }
Avi Kivity7f9b4b72010-08-01 14:46:54 +03004597
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004598 if (ctxt->d & Op3264) {
4599 if (mode == X86EMUL_MODE_PROT64)
4600 ctxt->op_bytes = 8;
4601 else
4602 ctxt->op_bytes = 4;
4603 }
4604
4605 if (ctxt->d & Sse)
4606 ctxt->op_bytes = 16;
4607 else if (ctxt->d & Mmx)
4608 ctxt->op_bytes = 8;
4609 }
Avi Kivity1253791d2011-03-29 11:41:27 +02004610
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004611 /* ModRM and SIB bytes. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004612 if (ctxt->d & ModRM) {
Avi Kivityf09ed832011-09-13 10:45:40 +03004613 rc = decode_modrm(ctxt, &ctxt->memop);
Bandan Das573e80f2014-04-16 12:46:13 -04004614 if (!has_seg_override) {
4615 has_seg_override = true;
4616 ctxt->seg_override = ctxt->modrm_seg;
4617 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004618 } else if (ctxt->d & MemAbs)
Avi Kivityf09ed832011-09-13 10:45:40 +03004619 rc = decode_abs(ctxt, &ctxt->memop);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004620 if (rc != X86EMUL_CONTINUE)
4621 goto done;
4622
Bandan Das573e80f2014-04-16 12:46:13 -04004623 if (!has_seg_override)
4624 ctxt->seg_override = VCPU_SREG_DS;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004625
Bandan Das573e80f2014-04-16 12:46:13 -04004626 ctxt->memop.addr.mem.seg = ctxt->seg_override;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004627
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004628 /*
4629 * Decode and fetch the source operand: register, memory
4630 * or immediate.
4631 */
Avi Kivity0fe59122011-09-13 10:45:47 +03004632 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004633 if (rc != X86EMUL_CONTINUE)
4634 goto done;
4635
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004636 /*
4637 * Decode and fetch the second source operand: register, memory
4638 * or immediate.
4639 */
Avi Kivity4dd6a572011-09-13 10:45:43 +03004640 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004641 if (rc != X86EMUL_CONTINUE)
4642 goto done;
4643
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004644 /* Decode and fetch the destination operand: register or memory. */
Avi Kivitya99455492011-09-13 10:45:41 +03004645 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004646
Bandan Das41061cd2014-04-16 12:46:14 -04004647 if (ctxt->rip_relative)
Avi Kivityf09ed832011-09-13 10:45:40 +03004648 ctxt->memopp->addr.mem.ea += ctxt->_eip;
Avi Kivitycb16c342011-06-19 19:21:11 +03004649
Paolo Bonzinia430c912014-10-23 14:54:14 +02004650done:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004651 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004652}
4653
Xiao Guangrong1cb3f3a2011-09-22 17:02:48 +08004654bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4655{
4656 return ctxt->d & PageTable;
4657}
4658
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004659static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4660{
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004661 /* The second termination condition only applies for REPE
4662 * and REPNE. Test if the repeat string operation prefix is
4663 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4664 * corresponding termination condition according to:
4665 * - if REPE/REPZ and ZF = 0 then done
4666 * - if REPNE/REPNZ and ZF = 1 then done
4667 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004668 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4669 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4670 && (((ctxt->rep_prefix == REPE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004671 ((ctxt->eflags & EFLG_ZF) == 0))
Avi Kivity9dac77f2011-06-01 15:34:25 +03004672 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004673 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4674 return true;
4675
4676 return false;
4677}
4678
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004679static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4680{
4681 bool fault = false;
4682
4683 ctxt->ops->get_fpu(ctxt);
4684 asm volatile("1: fwait \n\t"
4685 "2: \n\t"
4686 ".pushsection .fixup,\"ax\" \n\t"
4687 "3: \n\t"
4688 "movb $1, %[fault] \n\t"
4689 "jmp 2b \n\t"
4690 ".popsection \n\t"
4691 _ASM_EXTABLE(1b, 3b)
Avi Kivity38e8a2d2012-04-22 15:12:50 +03004692 : [fault]"+qm"(fault));
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004693 ctxt->ops->put_fpu(ctxt);
4694
4695 if (unlikely(fault))
4696 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4697
4698 return X86EMUL_CONTINUE;
4699}
4700
4701static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4702 struct operand *op)
4703{
4704 if (op->type == OP_MM)
4705 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4706}
4707
Avi Kivitye28bbd42013-01-04 16:18:48 +02004708static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4709{
4710 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivityb9fa4092013-02-09 11:31:48 +02004711 if (!(ctxt->d & ByteOp))
4712 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
Avi Kivitye28bbd42013-01-04 16:18:48 +02004713 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004714 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4715 [fastop]"+S"(fop)
4716 : "c"(ctxt->src2.val));
Avi Kivitye28bbd42013-01-04 16:18:48 +02004717 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004718 if (!fop) /* exception is returned in fop variable */
4719 return emulate_de(ctxt);
Avi Kivitye28bbd42013-01-04 16:18:48 +02004720 return X86EMUL_CONTINUE;
4721}
Avi Kivitydd856ef2012-08-27 23:46:17 +03004722
Bandan Das14985072014-04-16 12:46:09 -04004723void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4724{
Bandan Das573e80f2014-04-16 12:46:13 -04004725 memset(&ctxt->rip_relative, 0,
4726 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
Bandan Das14985072014-04-16 12:46:09 -04004727
Bandan Das14985072014-04-16 12:46:09 -04004728 ctxt->io_read.pos = 0;
4729 ctxt->io_read.end = 0;
Bandan Das14985072014-04-16 12:46:09 -04004730 ctxt->mem_read.end = 0;
4731}
4732
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09004733int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004734{
Mathias Krause0225fb52012-08-30 01:30:16 +02004735 const struct x86_emulate_ops *ops = ctxt->ops;
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09004736 int rc = X86EMUL_CONTINUE;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004737 int saved_dst_type = ctxt->dst.type;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004738
Avi Kivity9dac77f2011-06-01 15:34:25 +03004739 ctxt->mem_read.pos = 0;
Glauber Costa310b5d32009-05-12 16:21:06 -04004740
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004741 /* LOCK prefix is allowed only with some instructions */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004742 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004743 rc = emulate_ud(ctxt);
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004744 goto done;
4745 }
4746
Avi Kivity9dac77f2011-06-01 15:34:25 +03004747 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004748 rc = emulate_ud(ctxt);
Avi Kivity081bca02010-08-26 11:06:15 +03004749 goto done;
4750 }
4751
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004752 if (unlikely(ctxt->d &
4753 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4754 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4755 (ctxt->d & Undefined)) {
4756 rc = emulate_ud(ctxt);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004757 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004758 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004759
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004760 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4761 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4762 rc = emulate_ud(ctxt);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004763 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004764 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004765
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004766 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4767 rc = emulate_nm(ctxt);
Joerg Roedeld09beab2011-04-04 12:39:25 +02004768 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004769 }
Joerg Roedeld09beab2011-04-04 12:39:25 +02004770
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004771 if (ctxt->d & Mmx) {
4772 rc = flush_pending_x87_faults(ctxt);
4773 if (rc != X86EMUL_CONTINUE)
4774 goto done;
4775 /*
4776 * Now that we know the fpu is exception safe, we can fetch
4777 * operands from it.
4778 */
4779 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4780 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4781 if (!(ctxt->d & Mov))
4782 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4783 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004784
Bandan Das685bbf42014-04-16 12:46:10 -04004785 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004786 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4787 X86_ICPT_PRE_EXCEPT);
4788 if (rc != X86EMUL_CONTINUE)
4789 goto done;
4790 }
4791
4792 /* Privileged instruction can be executed only in CPL=0 */
4793 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
Nadav Amit68efa762014-06-18 17:19:35 +03004794 if (ctxt->d & PrivUD)
4795 rc = emulate_ud(ctxt);
4796 else
4797 rc = emulate_gp(ctxt, 0);
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004798 goto done;
4799 }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004800
4801 /* Instruction can only be executed in protected mode */
4802 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4803 rc = emulate_ud(ctxt);
4804 goto done;
4805 }
4806
4807 /* Do instruction specific permission checks */
Bandan Das685bbf42014-04-16 12:46:10 -04004808 if (ctxt->d & CheckPerm) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004809 rc = ctxt->check_perm(ctxt);
4810 if (rc != X86EMUL_CONTINUE)
4811 goto done;
4812 }
4813
Bandan Das685bbf42014-04-16 12:46:10 -04004814 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004815 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4816 X86_ICPT_POST_EXCEPT);
4817 if (rc != X86EMUL_CONTINUE)
4818 goto done;
4819 }
4820
4821 if (ctxt->rep_prefix && (ctxt->d & String)) {
4822 /* All REP prefixes have the same first termination condition */
4823 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4824 ctxt->eip = ctxt->_eip;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004825 ctxt->eflags &= ~EFLG_RF;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004826 goto done;
4827 }
4828 }
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004829 }
4830
Avi Kivity9dac77f2011-06-01 15:34:25 +03004831 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4832 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4833 ctxt->src.valptr, ctxt->src.bytes);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09004834 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004835 goto done;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004836 ctxt->src.orig_val64 = ctxt->src.val64;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004837 }
4838
Avi Kivity9dac77f2011-06-01 15:34:25 +03004839 if (ctxt->src2.type == OP_MEM) {
4840 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4841 &ctxt->src2.val, ctxt->src2.bytes);
Gleb Natapove35b7b92010-02-25 16:36:42 +02004842 if (rc != X86EMUL_CONTINUE)
4843 goto done;
4844 }
4845
Avi Kivity9dac77f2011-06-01 15:34:25 +03004846 if ((ctxt->d & DstMask) == ImplicitOps)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004847 goto special_insn;
4848
4849
Avi Kivity9dac77f2011-06-01 15:34:25 +03004850 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004851 /* optimisation - avoid slow emulated read if Mov */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004852 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4853 &ctxt->dst.val, ctxt->dst.bytes);
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004854 if (rc != X86EMUL_CONTINUE)
4855 goto done;
Avi Kivity038e51d2007-01-22 20:40:40 -08004856 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004857 ctxt->dst.orig_val = ctxt->dst.val;
Avi Kivity038e51d2007-01-22 20:40:40 -08004858
Avi Kivity018a98d2007-11-27 19:30:56 +02004859special_insn:
4860
Bandan Das685bbf42014-04-16 12:46:10 -04004861 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03004862 rc = emulator_check_intercept(ctxt, ctxt->intercept,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004863 X86_ICPT_POST_MEMACCESS);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004864 if (rc != X86EMUL_CONTINUE)
4865 goto done;
4866 }
4867
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03004868 if (ctxt->rep_prefix && (ctxt->d & String))
4869 ctxt->eflags |= EFLG_RF;
4870 else
4871 ctxt->eflags &= ~EFLG_RF;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004872
Avi Kivity9dac77f2011-06-01 15:34:25 +03004873 if (ctxt->execute) {
Avi Kivitye28bbd42013-01-04 16:18:48 +02004874 if (ctxt->d & Fastop) {
4875 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4876 rc = fastop(ctxt, fop);
4877 if (rc != X86EMUL_CONTINUE)
4878 goto done;
4879 goto writeback;
4880 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004881 rc = ctxt->execute(ctxt);
Avi Kivityef65c882010-07-29 15:11:51 +03004882 if (rc != X86EMUL_CONTINUE)
4883 goto done;
4884 goto writeback;
4885 }
4886
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004887 if (ctxt->opcode_len == 2)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004888 goto twobyte_insn;
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004889 else if (ctxt->opcode_len == 3)
4890 goto threebyte_insn;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004891
Avi Kivity9dac77f2011-06-01 15:34:25 +03004892 switch (ctxt->b) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08004893 case 0x63: /* movsxd */
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004894 if (ctxt->mode != X86EMUL_MODE_PROT64)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004895 goto cannot_emulate;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004896 ctxt->dst.val = (s32) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004897 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03004898 case 0x70 ... 0x7f: /* jcc (short) */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004899 if (test_cc(ctxt->b, ctxt->eflags))
Nadav Amit234f3ce2014-09-18 22:39:38 +03004900 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02004901 break;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03004902 case 0x8d: /* lea r16/r32, m */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004903 ctxt->dst.val = ctxt->src.addr.mem.ea;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03004904 break;
Avi Kivity3d9e77d2010-08-01 12:41:59 +03004905 case 0x90 ... 0x97: /* nop / xchg reg, rax */
Avi Kivitydd856ef2012-08-27 23:46:17 +03004906 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
Nadav Amita825f5c2014-06-15 16:13:01 +03004907 ctxt->dst.type = OP_NONE;
4908 else
4909 rc = em_xchg(ctxt);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09004910 break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08004911 case 0x98: /* cbw/cwde/cdqe */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004912 switch (ctxt->op_bytes) {
4913 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4914 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4915 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08004916 }
4917 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004918 case 0xcc: /* int3 */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09004919 rc = emulate_int(ctxt, 3);
4920 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004921 case 0xcd: /* int n */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004922 rc = emulate_int(ctxt, ctxt->src.val);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004923 break;
4924 case 0xce: /* into */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09004925 if (ctxt->eflags & EFLG_OF)
4926 rc = emulate_int(ctxt, 4);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004927 break;
Nitin A Kamble1a52e052007-09-18 16:34:25 -07004928 case 0xe9: /* jmp rel */
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004929 case 0xeb: /* jmp rel short */
Nadav Amit234f3ce2014-09-18 22:39:38 +03004930 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity9dac77f2011-06-01 15:34:25 +03004931 ctxt->dst.type = OP_NONE; /* Disable writeback. */
Nitin A Kamble1a52e052007-09-18 16:34:25 -07004932 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02004933 case 0xf4: /* hlt */
Avi Kivity6c3287f2011-04-20 15:43:05 +03004934 ctxt->ops->halt(ctxt);
Mohammed Gamal19fdfa02008-07-06 16:51:26 +03004935 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02004936 case 0xf5: /* cmc */
4937 /* complement carry flag from eflags reg */
4938 ctxt->eflags ^= EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02004939 break;
4940 case 0xf8: /* clc */
4941 ctxt->eflags &= ~EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02004942 break;
Mohammed Gamal8744aa92010-08-05 15:42:49 +03004943 case 0xf9: /* stc */
4944 ctxt->eflags |= EFLG_CF;
4945 break;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004946 case 0xfc: /* cld */
4947 ctxt->eflags &= ~EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004948 break;
4949 case 0xfd: /* std */
4950 ctxt->eflags |= EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004951 break;
Avi Kivity91269b82010-07-25 14:51:16 +03004952 default:
4953 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004954 }
Avi Kivity018a98d2007-11-27 19:30:56 +02004955
Avi Kivity7d9ddae2010-08-30 17:12:28 +03004956 if (rc != X86EMUL_CONTINUE)
4957 goto done;
4958
Avi Kivity018a98d2007-11-27 19:30:56 +02004959writeback:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02004960 if (ctxt->d & SrcWrite) {
4961 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4962 rc = writeback(ctxt, &ctxt->src);
4963 if (rc != X86EMUL_CONTINUE)
4964 goto done;
4965 }
Nadav Amitee212292014-06-15 16:12:58 +03004966 if (!(ctxt->d & NoWrite)) {
4967 rc = writeback(ctxt, &ctxt->dst);
4968 if (rc != X86EMUL_CONTINUE)
4969 goto done;
4970 }
Avi Kivity018a98d2007-11-27 19:30:56 +02004971
Gleb Natapov5cd21912010-03-18 15:20:26 +02004972 /*
4973 * restore dst type in case the decoding will be reused
4974 * (happens for string instruction )
4975 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004976 ctxt->dst.type = saved_dst_type;
Gleb Natapov5cd21912010-03-18 15:20:26 +02004977
Avi Kivity9dac77f2011-06-01 15:34:25 +03004978 if ((ctxt->d & SrcMask) == SrcSI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03004979 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
Gleb Natapova682e352010-03-18 15:20:21 +02004980
Avi Kivity9dac77f2011-06-01 15:34:25 +03004981 if ((ctxt->d & DstMask) == DstDI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03004982 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
Gleb Natapovd9271122010-03-18 15:20:22 +02004983
Avi Kivity9dac77f2011-06-01 15:34:25 +03004984 if (ctxt->rep_prefix && (ctxt->d & String)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004985 unsigned int count;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004986 struct read_cache *r = &ctxt->io_read;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004987 if ((ctxt->d & SrcMask) == SrcSI)
4988 count = ctxt->src.count;
4989 else
4990 count = ctxt->dst.count;
4991 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4992 -count);
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004993
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03004994 if (!string_insn_completed(ctxt)) {
4995 /*
4996 * Re-enter guest when pio read ahead buffer is empty
4997 * or, if it is not used, after each 1024 iteration.
4998 */
Avi Kivitydd856ef2012-08-27 23:46:17 +03004999 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005000 (r->end == 0 || r->end != r->pos)) {
5001 /*
5002 * Reset read cache. Usually happens before
5003 * decode, but since instruction is restarted
5004 * we have to do it here.
5005 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005006 ctxt->mem_read.end = 0;
Avi Kivitydd856ef2012-08-27 23:46:17 +03005007 writeback_registers(ctxt);
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005008 return EMULATION_RESTART;
5009 }
5010 goto done; /* skip rip writeback */
Avi Kivity0fa6ccb2010-08-17 11:22:17 +03005011 }
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03005012 ctxt->eflags &= ~EFLG_RF;
Gleb Natapov5cd21912010-03-18 15:20:26 +02005013 }
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005014
Avi Kivity9dac77f2011-06-01 15:34:25 +03005015 ctxt->eip = ctxt->_eip;
Avi Kivity018a98d2007-11-27 19:30:56 +02005016
5017done:
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02005018 if (rc == X86EMUL_PROPAGATE_FAULT) {
5019 WARN_ON(ctxt->exception.vector > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +02005020 ctxt->have_exception = true;
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02005021 }
Joerg Roedel775fde82011-04-04 12:39:24 +02005022 if (rc == X86EMUL_INTERCEPTED)
5023 return EMULATION_INTERCEPTED;
5024
Avi Kivitydd856ef2012-08-27 23:46:17 +03005025 if (rc == X86EMUL_CONTINUE)
5026 writeback_registers(ctxt);
5027
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005028 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005029
5030twobyte_insn:
Avi Kivity9dac77f2011-06-01 15:34:25 +03005031 switch (ctxt->b) {
Avi Kivity018a98d2007-11-27 19:30:56 +02005032 case 0x09: /* wbinvd */
Clemens Nosscfb22372011-04-21 21:16:05 +02005033 (ctxt->ops->wbinvd)(ctxt);
Sheng Yangf5f48ee2010-06-30 12:25:15 +08005034 break;
5035 case 0x08: /* invd */
Avi Kivity018a98d2007-11-27 19:30:56 +02005036 case 0x0d: /* GrpP (prefetch) */
5037 case 0x18: /* Grp16 (prefetch/nop) */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02005038 case 0x1f: /* nop */
Avi Kivity018a98d2007-11-27 19:30:56 +02005039 break;
5040 case 0x20: /* mov cr, reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005041 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
Avi Kivity018a98d2007-11-27 19:30:56 +02005042 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005043 case 0x21: /* mov from dr to reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005044 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005045 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005046 case 0x40 ... 0x4f: /* cmov */
Nadav Amit140bad82014-06-15 16:13:00 +03005047 if (test_cc(ctxt->b, ctxt->eflags))
5048 ctxt->dst.val = ctxt->src.val;
5049 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5050 ctxt->op_bytes != 4)
Avi Kivity9dac77f2011-06-01 15:34:25 +03005051 ctxt->dst.type = OP_NONE; /* no writeback */
Avi Kivity6aa8b732006-12-10 02:21:36 -08005052 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03005053 case 0x80 ... 0x8f: /* jnz rel, etc*/
Avi Kivity9dac77f2011-06-01 15:34:25 +03005054 if (test_cc(ctxt->b, ctxt->eflags))
Nadav Amit234f3ce2014-09-18 22:39:38 +03005055 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02005056 break;
Wei Yongjunee45b582010-08-06 17:10:07 +08005057 case 0x90 ... 0x9f: /* setcc r/m8 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005058 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
Wei Yongjunee45b582010-08-06 17:10:07 +08005059 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005060 case 0xb6 ... 0xb7: /* movzx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005061 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03005062 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
Avi Kivity9dac77f2011-06-01 15:34:25 +03005063 : (u16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005064 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005065 case 0xbe ... 0xbf: /* movsx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005066 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03005067 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
Avi Kivity9dac77f2011-06-01 15:34:25 +03005068 (s16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005069 break;
Sheng Yanga012e652007-10-15 14:24:20 +08005070 case 0xc3: /* movnti */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005071 ctxt->dst.bytes = ctxt->op_bytes;
Nadav Amit3b320042014-06-02 18:34:08 +03005072 ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
5073 (u32) ctxt->src.val;
Sheng Yanga012e652007-10-15 14:24:20 +08005074 break;
Avi Kivity91269b82010-07-25 14:51:16 +03005075 default:
5076 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005077 }
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005078
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01005079threebyte_insn:
5080
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005081 if (rc != X86EMUL_CONTINUE)
5082 goto done;
5083
Avi Kivity6aa8b732006-12-10 02:21:36 -08005084 goto writeback;
5085
5086cannot_emulate:
Gleb Natapova0c0ab22011-03-28 16:57:49 +02005087 return EMULATION_FAILED;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005088}
Avi Kivitydd856ef2012-08-27 23:46:17 +03005089
5090void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5091{
5092 invalidate_registers(ctxt);
5093}
5094
5095void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5096{
5097 writeback_registers(ctxt);
5098}