blob: a236e39cc385a4a468e745ce0d9fdb1e2d08341b [file] [log] [blame]
Vegard Nossuma656c8e2008-07-22 21:27:11 +02001/*
Carsten Otte043405e2007-10-10 17:16:19 +02002 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This header defines architecture specific interfaces, x86 version
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
8 *
9 */
10
H. Peter Anvin1965aae2008-10-22 22:26:29 -070011#ifndef _ASM_X86_KVM_HOST_H
12#define _ASM_X86_KVM_HOST_H
Carsten Otte043405e2007-10-10 17:16:19 +020013
Zhang Xiantao34c16ee2007-10-20 15:34:38 +080014#include <linux/types.h>
15#include <linux/mm.h>
Andrea Arcangelie930bff2008-07-25 16:24:52 +020016#include <linux/mmu_notifier.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030017#include <linux/tracepoint.h>
Sheng Yangf5f48ee2010-06-30 12:25:15 +080018#include <linux/cpumask.h>
Gleb Natapovf5132b02011-11-10 14:57:22 +020019#include <linux/irq_work.h>
Zhang Xiantao34c16ee2007-10-20 15:34:38 +080020
21#include <linux/kvm.h>
22#include <linux/kvm_para.h>
Avi Kivityedf88412007-12-16 11:02:48 +020023#include <linux/kvm_types.h>
Gleb Natapovf5132b02011-11-10 14:57:22 +020024#include <linux/perf_event.h>
Marcelo Tosattid8281992012-11-27 23:29:01 -020025#include <linux/pvclock_gtod.h>
26#include <linux/clocksource.h>
Zhang Xiantao34c16ee2007-10-20 15:34:38 +080027
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +020028#include <asm/pvclock-abi.h>
Hollis Blancharde01a1b52007-12-03 15:30:25 -060029#include <asm/desc.h>
Sheng Yang0bed3b52008-10-09 16:01:54 +080030#include <asm/mtrr.h>
Alexander Graf9962d032008-11-25 20:17:02 +010031#include <asm/msr-index.h>
H. Peter Anvin3ee89722012-04-20 13:41:59 -070032#include <asm/asm.h>
Hollis Blancharde01a1b52007-12-03 15:30:25 -060033
Chegu Vinodcbf64352013-04-27 18:31:04 -070034#define KVM_MAX_VCPUS 255
Marcelo Tosattia59cb292012-02-03 12:28:31 -020035#define KVM_SOFT_MAX_VCPUS 160
Igor Mammedov1d4e7e32014-11-06 15:52:47 +000036#define KVM_USER_MEM_SLOTS 509
Alex Williamson07432472012-12-10 10:33:15 -070037/* memory slots that are not exposed to userspace */
38#define KVM_PRIVATE_MEM_SLOTS 3
Alex Williamsonbbacc0c2012-12-10 10:33:09 -070039#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
Xiao Guangrong93a5cef2011-11-24 17:37:48 +080040
Avi Kivity69a9f692008-03-21 12:38:23 +020041#define KVM_PIO_PAGE_OFFSET 1
Laurent Vivier542472b2008-05-30 16:05:55 +020042#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
Avi Kivity69a9f692008-03-21 12:38:23 +020043
Alexander Graf8175e5b2013-04-15 10:42:33 +020044#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
45
Joerg Roedelcfec82c2011-04-04 12:39:28 +020046#define CR0_RESERVED_BITS \
47 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
48 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
49 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
50
Nadav Amit346874c2014-04-18 03:35:09 +030051#define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
Borislav Petkovcfaa7902015-01-15 09:44:56 +010052#define CR3_PCID_INVD BIT_64(63)
Joerg Roedelcfec82c2011-04-04 12:39:28 +020053#define CR4_RESERVED_BITS \
54 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
55 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
Mao, Junjiead756a12012-07-02 01:18:48 +000056 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
H. Peter Anvinafcbf132013-04-27 16:37:47 -070057 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
Feng Wu56d6efc2014-04-01 17:46:33 +080058 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP))
Joerg Roedelcfec82c2011-04-04 12:39:28 +020059
60#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
61
62
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080063
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080064#define INVALID_PAGE (~(hpa_t)0)
Xiao Guangrongdd180b32010-07-03 16:02:42 +080065#define VALID_PAGE(x) ((x) != INVALID_PAGE)
66
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080067#define UNMAPPED_GVA (~(gpa_t)0)
68
Joerg Roedelec04b262009-06-19 15:16:23 +020069/* KVM Hugepage definitions for x86 */
Joerg Roedel04326ca2009-07-27 16:30:47 +020070#define KVM_NR_PAGE_SIZES 3
Joerg Roedel82855412010-07-01 16:00:11 +020071#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
72#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
Joerg Roedelec04b262009-06-19 15:16:23 +020073#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
74#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
75#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
Marcelo Tosatti05da4552008-02-23 11:44:30 -030076
Christoffer Dall6d9d41e2013-10-02 14:22:28 -070077static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
78{
79 /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
80 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
81 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
82}
83
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080084#define SELECTOR_TI_MASK (1 << 2)
85#define SELECTOR_RPL_MASK 0x03
86
87#define IOPL_SHIFT 12
88
Zhang Xiantaod657a982007-12-14 09:41:22 +080089#define KVM_PERMILLE_MMU_PAGES 20
90#define KVM_MIN_ALLOC_MMU_PAGES 64
Dong, Eddie1ae0a132008-01-07 13:20:25 +020091#define KVM_MMU_HASH_SHIFT 10
92#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
Zhang Xiantaod657a982007-12-14 09:41:22 +080093#define KVM_MIN_FREE_MMU_PAGES 5
94#define KVM_REFILL_PAGES 25
Andre Przywara73c11602010-12-01 12:17:44 +010095#define KVM_MAX_CPUID_ENTRIES 80
Sheng Yang0bed3b52008-10-09 16:01:54 +080096#define KVM_NR_FIXED_MTRR_REGION 88
Paolo Bonzini0d234da2014-08-18 16:39:48 +020097#define KVM_NR_VAR_MTRR 8
Zhang Xiantaod657a982007-12-14 09:41:22 +080098
Gleb Natapovaf585b92010-10-14 11:22:46 +020099#define ASYNC_PF_PER_VCPU 64
100
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300101enum kvm_reg {
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800102 VCPU_REGS_RAX = 0,
103 VCPU_REGS_RCX = 1,
104 VCPU_REGS_RDX = 2,
105 VCPU_REGS_RBX = 3,
106 VCPU_REGS_RSP = 4,
107 VCPU_REGS_RBP = 5,
108 VCPU_REGS_RSI = 6,
109 VCPU_REGS_RDI = 7,
110#ifdef CONFIG_X86_64
111 VCPU_REGS_R8 = 8,
112 VCPU_REGS_R9 = 9,
113 VCPU_REGS_R10 = 10,
114 VCPU_REGS_R11 = 11,
115 VCPU_REGS_R12 = 12,
116 VCPU_REGS_R13 = 13,
117 VCPU_REGS_R14 = 14,
118 VCPU_REGS_R15 = 15,
119#endif
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300120 VCPU_REGS_RIP,
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800121 NR_VCPU_REGS
122};
123
Avi Kivity6de4f3a2009-05-31 22:58:47 +0300124enum kvm_reg_ex {
125 VCPU_EXREG_PDPTR = NR_VCPU_REGS,
Avi Kivityaff48ba2010-12-05 18:56:11 +0200126 VCPU_EXREG_CR3,
Avi Kivity6de12732011-03-07 12:51:22 +0200127 VCPU_EXREG_RFLAGS,
Avi Kivity2fb92db2011-04-27 19:42:18 +0300128 VCPU_EXREG_SEGMENTS,
Avi Kivity6de4f3a2009-05-31 22:58:47 +0300129};
130
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800131enum {
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800132 VCPU_SREG_ES,
Avi Kivity81609e32008-05-27 16:26:01 +0300133 VCPU_SREG_CS,
134 VCPU_SREG_SS,
135 VCPU_SREG_DS,
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800136 VCPU_SREG_FS,
137 VCPU_SREG_GS,
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800138 VCPU_SREG_TR,
139 VCPU_SREG_LDTR,
140};
141
Avi Kivity56e82312009-08-12 15:04:37 +0300142#include <asm/kvm_emulate.h>
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800143
Zhang Xiantaod657a982007-12-14 09:41:22 +0800144#define KVM_NR_MEM_OBJS 40
145
Jan Kiszka42dbaa52008-12-15 13:52:10 +0100146#define KVM_NR_DB_REGS 4
147
148#define DR6_BD (1 << 13)
149#define DR6_BS (1 << 14)
Nadav Amit6f43ed02014-07-15 17:37:46 +0300150#define DR6_RTM (1 << 16)
151#define DR6_FIXED_1 0xfffe0ff0
152#define DR6_INIT 0xffff0ff0
153#define DR6_VOLATILE 0x0001e00f
Jan Kiszka42dbaa52008-12-15 13:52:10 +0100154
155#define DR7_BP_EN_MASK 0x000000ff
156#define DR7_GE (1 << 9)
157#define DR7_GD (1 << 13)
158#define DR7_FIXED_1 0x00000400
Nadav Amit6f43ed02014-07-15 17:37:46 +0300159#define DR7_VOLATILE 0xffff2bff
Jan Kiszka42dbaa52008-12-15 13:52:10 +0100160
Nadav Amitc205fb72014-12-25 02:52:16 +0200161#define PFERR_PRESENT_BIT 0
162#define PFERR_WRITE_BIT 1
163#define PFERR_USER_BIT 2
164#define PFERR_RSVD_BIT 3
165#define PFERR_FETCH_BIT 4
166
167#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
168#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
169#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
170#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
171#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
172
Gleb Natapov41383772012-04-19 14:06:29 +0300173/* apic attention bits */
174#define KVM_APIC_CHECK_VAPIC 0
Michael S. Tsirkinae7a2a32012-06-24 19:25:07 +0300175/*
176 * The following bit is set with PV-EOI, unset on EOI.
177 * We detect PV-EOI changes by guest by comparing
178 * this bit with PV-EOI in guest memory.
179 * See the implementation in apic_update_pv_eoi.
180 */
181#define KVM_APIC_PV_EOI_PENDING 1
Gleb Natapov41383772012-04-19 14:06:29 +0300182
Zhang Xiantaod657a982007-12-14 09:41:22 +0800183/*
184 * We don't want allocation failures within the mmu code, so we preallocate
185 * enough memory for a single page fault in a cache.
186 */
187struct kvm_mmu_memory_cache {
188 int nobjs;
189 void *objects[KVM_NR_MEM_OBJS];
190};
191
Zhang Xiantaod657a982007-12-14 09:41:22 +0800192/*
193 * kvm_mmu_page_role, below, is defined as:
194 *
195 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
196 * bits 4:7 - page table level for this shadow (1-4)
197 * bits 8:9 - page table quadrant for 2-level guests
Avi Kivityf6e2c02b2009-01-11 13:02:10 +0200198 * bit 16 - direct mapping of virtual to physical mapping at gfn
199 * used for real mode and two-dimensional paging
Zhang Xiantaod657a982007-12-14 09:41:22 +0800200 * bits 17:19 - common access permissions for all ptes in this shadow page
201 */
202union kvm_mmu_page_role {
203 unsigned word;
204 struct {
Joe Perches7d76b4d2008-03-23 01:02:34 -0700205 unsigned level:4;
Avi Kivity5b7e0102010-04-14 19:20:03 +0300206 unsigned cr4_pae:1;
Joe Perches7d76b4d2008-03-23 01:02:34 -0700207 unsigned quadrant:2;
208 unsigned pad_for_nice_hex_output:6;
Avi Kivityf6e2c02b2009-01-11 13:02:10 +0200209 unsigned direct:1;
Joe Perches7d76b4d2008-03-23 01:02:34 -0700210 unsigned access:3;
Marcelo Tosatti2e53d632008-02-20 14:47:24 -0500211 unsigned invalid:1;
Avi Kivity9645bb562009-03-31 11:31:54 +0300212 unsigned nxe:1;
Avi Kivity3dbe1412010-05-12 11:48:18 +0300213 unsigned cr0_wp:1;
Avi Kivity411c5882011-06-06 16:11:54 +0300214 unsigned smep_andnot_wp:1;
Zhang Xiantaod657a982007-12-14 09:41:22 +0800215 };
216};
217
218struct kvm_mmu_page {
219 struct list_head link;
220 struct hlist_node hash_link;
221
222 /*
223 * The following two entries are used to key the shadow page in the
224 * hash table.
225 */
226 gfn_t gfn;
227 union kvm_mmu_page_role role;
228
229 u64 *spt;
230 /* hold the gfn of each spte inside spt */
231 gfn_t *gfns;
Marcelo Tosatti4731d4c2008-09-23 13:18:39 -0300232 bool unsync;
Xiao Guangrong0571d362010-04-16 21:27:54 +0800233 int root_count; /* Currently serving as active root */
Marcelo Tosatti60c8aec2008-12-01 22:32:02 -0200234 unsigned int unsync_children;
Xiao Guangrong67052b32011-05-15 23:27:08 +0800235 unsigned long parent_ptes; /* Reverse mapping for parent_pte */
Xiao Guangrongf6f8ade2013-06-19 17:09:24 +0800236
237 /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */
Xiao Guangrong5304b8d2013-05-31 08:36:22 +0800238 unsigned long mmu_valid_gen;
Xiao Guangrongf6f8ade2013-06-19 17:09:24 +0800239
Marcelo Tosatti0074ff62008-09-23 13:18:40 -0300240 DECLARE_BITMAP(unsync_child_bitmap, 512);
Xiao Guangrongc2a2ac22011-07-12 03:32:13 +0800241
242#ifdef CONFIG_X86_32
Xiao Guangrongaccaefe2013-06-19 17:09:20 +0800243 /*
244 * Used out of the mmu-lock to avoid reading spte values while an
245 * update is in progress; see the comments in __get_spte_lockless().
246 */
Xiao Guangrongc2a2ac22011-07-12 03:32:13 +0800247 int clear_spte_count;
248#endif
249
Xiao Guangrong0cbf8e42013-06-19 17:09:21 +0800250 /* Number of writes since the last time traversal visited this page. */
Xiao Guangronga30f47c2011-09-22 16:58:36 +0800251 int write_flooding_count;
Zhang Xiantaod657a982007-12-14 09:41:22 +0800252};
253
Avi Kivity1c083642009-01-04 12:39:07 +0200254struct kvm_pio_request {
255 unsigned long count;
Avi Kivity1c083642009-01-04 12:39:07 +0200256 int in;
257 int port;
258 int size;
Avi Kivity1c083642009-01-04 12:39:07 +0200259};
260
Zhang Xiantaod657a982007-12-14 09:41:22 +0800261/*
262 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
263 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
264 * mode.
265 */
266struct kvm_mmu {
Joerg Roedelf43addd2010-09-10 17:30:40 +0200267 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
Joerg Roedel5777ed32010-09-10 17:30:42 +0200268 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
Avi Kivitye4e517b2011-07-28 11:36:17 +0300269 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
Xiao Guangrong78b2c542010-12-07 10:48:06 +0800270 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
271 bool prefault);
Avi Kivity6389ee92010-11-29 16:12:30 +0200272 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
273 struct x86_exception *fault);
Gleb Natapov1871c602010-02-10 14:21:32 +0200274 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
Avi Kivityab9ae312010-11-22 17:53:26 +0200275 struct x86_exception *exception);
Paolo Bonzini54987b72014-09-02 13:23:06 +0200276 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
277 struct x86_exception *exception);
Marcelo Tosattie8bc2172008-09-23 13:18:33 -0300278 int (*sync_page)(struct kvm_vcpu *vcpu,
Xiao Guangronga4a8e6f2010-11-19 17:04:03 +0800279 struct kvm_mmu_page *sp);
Marcelo Tosattia7052892008-09-23 13:18:35 -0300280 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
Xiao Guangrong0f53b5b2011-03-09 15:43:51 +0800281 void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
Xiao Guangrong7c562522011-03-28 10:29:27 +0800282 u64 *spte, const void *pte);
Zhang Xiantaod657a982007-12-14 09:41:22 +0800283 hpa_t root_hpa;
284 int root_level;
285 int shadow_root_level;
Avi Kivitya770f6f2008-12-21 19:20:09 +0200286 union kvm_mmu_page_role base_role;
Joerg Roedelc5a78f2b2010-09-10 17:30:39 +0200287 bool direct_map;
Zhang Xiantaod657a982007-12-14 09:41:22 +0800288
Avi Kivity97d64b72012-09-12 14:52:00 +0300289 /*
290 * Bitmap; bit set = permission fault
291 * Byte index: page fault error code [4:1]
292 * Bit index: pte permissions in ACC_* format
293 */
294 u8 permissions[16];
295
Zhang Xiantaod657a982007-12-14 09:41:22 +0800296 u64 *pae_root;
Joerg Roedel81407ca2010-09-10 17:31:00 +0200297 u64 *lm_root;
Dong, Eddie82725b22009-03-30 16:21:08 +0800298 u64 rsvd_bits_mask[2][4];
Yang Zhang25d92082013-08-06 12:00:32 +0300299 u64 bad_mt_xwr;
Joerg Roedelff03a072010-09-10 17:30:57 +0200300
Avi Kivity6fd01b72012-09-12 20:46:56 +0300301 /*
302 * Bitmap: bit set = last pte in walk
303 * index[0:1]: level (zero-based)
304 * index[2]: pte.ps
305 */
306 u8 last_pte_bitmap;
307
Joerg Roedel2d48a982010-09-10 17:31:01 +0200308 bool nx;
309
Joerg Roedelff03a072010-09-10 17:30:57 +0200310 u64 pdptrs[4]; /* pae */
Zhang Xiantaod657a982007-12-14 09:41:22 +0800311};
312
Gleb Natapovf5132b02011-11-10 14:57:22 +0200313enum pmc_type {
314 KVM_PMC_GP = 0,
315 KVM_PMC_FIXED,
316};
317
318struct kvm_pmc {
319 enum pmc_type type;
320 u8 idx;
321 u64 counter;
322 u64 eventsel;
323 struct perf_event *perf_event;
324 struct kvm_vcpu *vcpu;
325};
326
327struct kvm_pmu {
328 unsigned nr_arch_gp_counters;
329 unsigned nr_arch_fixed_counters;
330 unsigned available_event_types;
331 u64 fixed_ctr_ctrl;
332 u64 global_ctrl;
333 u64 global_status;
334 u64 global_ovf_ctrl;
335 u64 counter_bitmask[2];
336 u64 global_ctrl_mask;
Andi Kleen103af0a2013-07-18 15:57:02 -0700337 u64 reserved_bits;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200338 u8 version;
Robert Richter15c7ad52012-06-20 20:46:33 +0200339 struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
340 struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
Gleb Natapovf5132b02011-11-10 14:57:22 +0200341 struct irq_work irq_work;
342 u64 reprogram_pmi;
343};
344
Paolo Bonzini360b9482014-02-21 09:55:56 +0100345enum {
346 KVM_DEBUGREG_BP_ENABLED = 1,
Paolo Bonzinic77fb5f2014-02-21 10:17:24 +0100347 KVM_DEBUGREG_WONT_EXIT = 2,
Paolo Bonzini360b9482014-02-21 09:55:56 +0100348};
349
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800350struct kvm_vcpu_arch {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300351 /*
352 * rip and regs accesses must go through
353 * kvm_{register,rip}_{read,write} functions.
354 */
355 unsigned long regs[NR_VCPU_REGS];
356 u32 regs_avail;
357 u32 regs_dirty;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800358
359 unsigned long cr0;
Avi Kivitye8467fd2009-12-29 18:43:06 +0200360 unsigned long cr0_guest_owned_bits;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800361 unsigned long cr2;
362 unsigned long cr3;
363 unsigned long cr4;
Avi Kivityfc78f512009-12-07 12:16:48 +0200364 unsigned long cr4_guest_owned_bits;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800365 unsigned long cr8;
Alexander Graf1371d902008-11-25 20:17:04 +0100366 u32 hflags;
Avi Kivityf6801df2010-01-21 15:31:50 +0200367 u64 efer;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800368 u64 apic_base;
369 struct kvm_lapic *apic; /* kernel irqchip context */
Gleb Natapov41383772012-04-19 14:06:29 +0300370 unsigned long apic_attention;
Gleb Natapove1035712009-03-05 16:34:59 +0200371 int32_t apic_arb_prio;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800372 int mp_state;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800373 u64 ia32_misc_enable_msr;
Avi Kivityb209749f2007-10-22 16:50:39 +0200374 bool tpr_access_reporting;
Wanpeng Li20300092014-12-02 19:14:59 +0800375 u64 ia32_xss;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800376
Joerg Roedel14dfe852010-09-10 17:30:49 +0200377 /*
378 * Paging state of the vcpu
379 *
380 * If the vcpu runs in guest mode with two level paging this still saves
381 * the paging mode of the l1 guest. This context is always used to
382 * handle faults.
383 */
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800384 struct kvm_mmu mmu;
Joerg Roedel8df25a32010-09-10 17:30:46 +0200385
386 /*
Joerg Roedel6539e732010-09-10 17:30:50 +0200387 * Paging state of an L2 guest (used for nested npt)
388 *
389 * This context will save all necessary information to walk page tables
390 * of the an L2 guest. This context is only initialized for page table
391 * walking and not for faulting since we never handle l2 page faults on
392 * the host.
393 */
394 struct kvm_mmu nested_mmu;
395
396 /*
Joerg Roedel14dfe852010-09-10 17:30:49 +0200397 * Pointer to the mmu context currently used for
398 * gva_to_gpa translations.
399 */
400 struct kvm_mmu *walk_mmu;
401
Xiao Guangrong53c07b12011-05-15 23:26:20 +0800402 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800403 struct kvm_mmu_memory_cache mmu_page_cache;
404 struct kvm_mmu_memory_cache mmu_page_header_cache;
405
Sheng Yang98918832010-05-17 17:08:28 +0800406 struct fpu guest_fpu;
Dexuan Cui2acf9232010-06-10 11:27:12 +0800407 u64 xcr0;
Paolo Bonzinid7876f12013-10-02 16:06:15 +0200408 u64 guest_supported_xcr0;
Paolo Bonzini4344ee92013-10-02 16:06:16 +0200409 u32 guest_xstate_size;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800410
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800411 struct kvm_pio_request pio;
412 void *pio_data;
413
Gleb Natapov66fd3f72009-05-11 13:35:50 +0300414 u8 event_exit_inst_len;
415
Avi Kivity298101d2007-11-25 13:41:11 +0200416 struct kvm_queued_exception {
417 bool pending;
418 bool has_error_code;
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200419 bool reinject;
Avi Kivity298101d2007-11-25 13:41:11 +0200420 u8 nr;
421 u32 error_code;
422 } exception;
423
Avi Kivity937a7ea2008-07-03 15:17:01 +0300424 struct kvm_queued_interrupt {
425 bool pending;
Gleb Natapov66fd3f72009-05-11 13:35:50 +0300426 bool soft;
Avi Kivity937a7ea2008-07-03 15:17:01 +0300427 u8 nr;
428 } interrupt;
429
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800430 int halt_request; /* real mode on Intel only */
431
432 int cpuid_nent;
Dan Kenigsberg07716712007-11-21 17:10:04 +0200433 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800434 /* emulate context */
435
436 struct x86_emulate_ctxt emulate_ctxt;
Gleb Natapov7ae441e2011-03-31 12:06:41 +0200437 bool emulate_regs_need_sync_to_vcpu;
438 bool emulate_regs_need_sync_from_vcpu;
Gleb Natapov716d51a2012-09-03 15:24:26 +0300439 int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200440
441 gpa_t time;
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200442 struct pvclock_vcpu_time_info hv_clock;
Zachary Amsdene48672f2010-08-19 22:07:23 -1000443 unsigned int hw_tsc_khz;
Andy Honig0b794592013-02-20 14:48:10 -0800444 struct gfn_to_hva_cache pv_time;
445 bool pv_time_enabled;
Marcelo Tosatti51d59c62012-08-03 15:57:49 -0300446 /* set guest stopped flag in pvclock flags field */
447 bool pvclock_set_guest_stopped_request;
Glauber Costac9aaa892011-07-11 15:28:14 -0400448
449 struct {
450 u64 msr_val;
451 u64 last_steal;
452 u64 accum_steal;
453 struct gfn_to_hva_cache stime;
454 struct kvm_steal_time steal;
455 } st;
456
Zachary Amsden1d5f0662010-08-19 22:07:30 -1000457 u64 last_guest_tsc;
Zachary Amsden6f526ec2012-02-03 15:43:54 -0200458 u64 last_host_tsc;
Zachary Amsden0dd6a6e2012-02-03 15:43:56 -0200459 u64 tsc_offset_adjustment;
Zachary Amsdene26101b2012-02-03 15:43:57 -0200460 u64 this_tsc_nsec;
461 u64 this_tsc_write;
Tomasz Grabiec0d3da0d2014-06-24 09:42:43 +0200462 u64 this_tsc_generation;
Zachary Amsdenc2855452010-09-18 14:38:15 -1000463 bool tsc_catchup;
Zachary Amsdencc578282012-02-03 15:43:50 -0200464 bool tsc_always_catchup;
465 s8 virtual_tsc_shift;
466 u32 virtual_tsc_mult;
467 u32 virtual_tsc_khz;
Will Auldba904632012-11-29 12:42:50 -0800468 s64 ia32_tsc_adjust_msr;
Sheng Yang3419ffc2008-05-15 09:52:48 +0800469
Avi Kivity7460fb4a2011-09-20 13:43:14 +0300470 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
471 unsigned nmi_pending; /* NMI queued after currently running handler */
472 bool nmi_injected; /* Trying to inject an NMI this entry */
Avi Kivity9ba075a2008-05-26 20:06:35 +0300473
Sheng Yang0bed3b52008-10-09 16:01:54 +0800474 struct mtrr_state_type mtrr_state;
Paolo Bonzini7cb060a2014-06-19 11:40:18 +0200475 u64 pat;
Jan Kiszka42dbaa52008-12-15 13:52:10 +0100476
Paolo Bonzini360b9482014-02-21 09:55:56 +0100477 unsigned switch_db_regs;
Jan Kiszka42dbaa52008-12-15 13:52:10 +0100478 unsigned long db[KVM_NR_DB_REGS];
479 unsigned long dr6;
480 unsigned long dr7;
481 unsigned long eff_db[KVM_NR_DB_REGS];
Jan Kiszkac8639012012-09-21 05:42:55 +0200482 unsigned long guest_debug_dr7;
Huang Ying890ca9a2009-05-11 16:48:15 +0800483
484 u64 mcg_cap;
485 u64 mcg_status;
486 u64 mcg_ctl;
487 u64 *mce_banks;
Jan Kiszka94fe45d2009-10-18 13:24:44 +0200488
Xiao Guangrongbebb1062011-07-12 03:23:20 +0800489 /* Cache MMIO info */
490 u64 mmio_gva;
491 unsigned access;
492 gfn_t mmio_gfn;
David Matlack56f17dd2014-08-18 15:46:07 -0700493 u64 mmio_gen;
Xiao Guangrongbebb1062011-07-12 03:23:20 +0800494
Gleb Natapovf5132b02011-11-10 14:57:22 +0200495 struct kvm_pmu pmu;
496
Jan Kiszka94fe45d2009-10-18 13:24:44 +0200497 /* used for guest single stepping over the given code position */
Jan Kiszka94fe45d2009-10-18 13:24:44 +0200498 unsigned long singlestep_rip;
Jan Kiszkaf92653e2010-02-23 17:47:55 +0100499
Gleb Natapov10388a02010-01-17 15:51:23 +0200500 /* fields used by HYPER-V emulation */
501 u64 hv_vapic;
Sheng Yangf5f48ee2010-06-30 12:25:15 +0800502
503 cpumask_var_t wbinvd_dirty_mask;
Gleb Natapovaf585b92010-10-14 11:22:46 +0200504
Xiao Guangrong1cb3f3a2011-09-22 17:02:48 +0800505 unsigned long last_retry_eip;
506 unsigned long last_retry_addr;
507
Gleb Natapovaf585b92010-10-14 11:22:46 +0200508 struct {
509 bool halted;
510 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
Gleb Natapov344d9582010-10-14 11:22:50 +0200511 struct gfn_to_hva_cache data;
512 u64 msr_val;
Gleb Natapov7c907052010-10-14 11:22:53 +0200513 u32 id;
Gleb Natapov6adba522010-10-14 11:22:55 +0200514 bool send_user_only;
Gleb Natapovaf585b92010-10-14 11:22:46 +0200515 } apf;
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500516
517 /* OSVW MSRs (AMD only) */
518 struct {
519 u64 length;
520 u64 status;
521 } osvw;
Michael S. Tsirkinae7a2a32012-06-24 19:25:07 +0300522
523 struct {
524 u64 msr_val;
525 struct gfn_to_hva_cache data;
526 } pv_eoi;
Xiao Guangrong93c05d32013-01-13 23:49:07 +0800527
528 /*
529 * Indicate whether the access faults on its page table in guest
530 * which is set when fix page fault and used to detect unhandeable
531 * instruction.
532 */
533 bool write_fault_to_shadow_pgtable;
Yang Zhang25d92082013-08-06 12:00:32 +0300534
535 /* set at EPT violation at this point */
536 unsigned long exit_qualification;
Srivatsa Vaddagiri6aef2662013-08-26 14:18:34 +0530537
538 /* pv related host specific info */
539 struct {
540 bool pv_unhalted;
541 } pv;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800542};
543
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900544struct kvm_lpage_info {
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900545 int write_count;
546};
547
548struct kvm_arch_memory_slot {
Takuya Yoshikawad89cc612012-08-01 18:03:28 +0900549 unsigned long *rmap[KVM_NR_PAGE_SIZES];
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900550 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
551};
552
Gleb Natapov1e08ec42012-09-13 17:19:24 +0300553struct kvm_apic_map {
554 struct rcu_head rcu;
555 u8 ldr_bits;
556 /* fields bellow are used to decode ldr values in different modes */
Nadav Amit394457a2014-10-03 00:30:52 +0300557 u32 cid_shift, cid_mask, lid_mask, broadcast;
Gleb Natapov1e08ec42012-09-13 17:19:24 +0300558 struct kvm_lapic *phys_map[256];
559 /* first index is cluster id second is cpu id in a cluster */
560 struct kvm_lapic *logical_map[16][16];
561};
562
Marcelo Tosattifef9cce2009-12-23 14:35:17 -0200563struct kvm_arch {
Dave Hansen49d5ca22010-08-19 18:11:28 -0700564 unsigned int n_used_mmu_pages;
Zhang Xiantaof05e70a2007-12-14 10:01:48 +0800565 unsigned int n_requested_mmu_pages;
Dave Hansen39de71e2010-08-19 18:11:14 -0700566 unsigned int n_max_mmu_pages;
Xiao Guangrong332b2072011-05-15 23:20:27 +0800567 unsigned int indirect_shadow_pages;
Xiao Guangrong5304b8d2013-05-31 08:36:22 +0800568 unsigned long mmu_valid_gen;
Zhang Xiantaof05e70a2007-12-14 10:01:48 +0800569 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
570 /*
571 * Hash table of struct kvm_mmu_page.
572 */
573 struct list_head active_mmu_pages;
Xiao Guangrong365c8862013-05-31 08:36:29 +0800574 struct list_head zapped_obsolete_pages;
575
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +0300576 struct list_head assigned_dev_head;
Joerg Roedel19de40a2008-12-03 14:43:34 +0100577 struct iommu_domain *iommu_domain;
Alex Williamsond96eb2c2013-10-30 11:02:23 -0600578 bool iommu_noncoherent;
Alex Williamsone0f0bbc2013-10-30 11:02:30 -0600579#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
580 atomic_t noncoherent_dma_count;
Zhang Xiantaod7deeeb02007-12-14 10:17:34 +0800581 struct kvm_pic *vpic;
582 struct kvm_ioapic *vioapic;
Sheng Yang78376992008-01-28 05:10:22 +0800583 struct kvm_pit *vpit;
Jan Kiszkacc6e4622008-10-20 10:20:03 +0200584 int vapics_in_nmi_mode;
Gleb Natapov1e08ec42012-09-13 17:19:24 +0300585 struct mutex apic_map_lock;
586 struct kvm_apic_map *apic_map;
Zhang Xiantaobfc6d222007-12-14 10:20:16 +0800587
Zhang Xiantaobfc6d222007-12-14 10:20:16 +0800588 unsigned int tss_addr;
Tang Chenc24ae0d2014-09-24 15:57:58 +0800589 bool apic_access_page_done;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200590
591 gpa_t wall_clock;
Sheng Yangb7ebfb02008-04-25 21:44:52 +0800592
Sheng Yangb7ebfb02008-04-25 21:44:52 +0800593 bool ept_identity_pagetable_done;
Sheng Yangb927a3c2009-07-21 10:42:48 +0800594 gpa_t ept_identity_map_addr;
Sheng Yang5550af42008-10-15 20:15:06 +0800595
596 unsigned long irq_sources_bitmap;
Glauber Costaafbcf7a2009-10-16 15:28:36 -0400597 s64 kvmclock_offset;
Jan Kiszka038f8c12011-02-04 10:49:11 +0100598 raw_spinlock_t tsc_write_lock;
Zachary Amsdenf38e0982010-08-19 22:07:20 -1000599 u64 last_tsc_nsec;
Zachary Amsdenf38e0982010-08-19 22:07:20 -1000600 u64 last_tsc_write;
Zachary Amsden5d3cb0f62012-02-03 15:43:51 -0200601 u32 last_tsc_khz;
Zachary Amsdene26101b2012-02-03 15:43:57 -0200602 u64 cur_tsc_nsec;
603 u64 cur_tsc_write;
604 u64 cur_tsc_offset;
Tomasz Grabiec0d3da0d2014-06-24 09:42:43 +0200605 u64 cur_tsc_generation;
Marcelo Tosattib48aa972012-11-27 23:29:03 -0200606 int nr_vcpus_matched_tsc;
Ed Swierkffde22a2009-10-15 15:21:43 -0700607
Marcelo Tosattid8281992012-11-27 23:29:01 -0200608 spinlock_t pvclock_gtod_sync_lock;
609 bool use_master_clock;
610 u64 master_kernel_ns;
611 cycle_t master_cycle_now;
Andrew Jones7e44e442014-02-28 12:52:54 +0100612 struct delayed_work kvmclock_update_work;
Andrew Jones332967a2014-02-28 12:52:55 +0100613 struct delayed_work kvmclock_sync_work;
Marcelo Tosattid8281992012-11-27 23:29:01 -0200614
Ed Swierkffde22a2009-10-15 15:21:43 -0700615 struct kvm_xen_hvm_config xen_hvm_config;
Gleb Natapov55cd8e52010-01-17 15:51:22 +0200616
Paolo Bonzini6ef768f2014-11-20 13:45:31 +0100617 /* reads protected by irq_srcu, writes by irq_lock */
618 struct hlist_head mask_notifier_list;
619
Gleb Natapov55cd8e52010-01-17 15:51:22 +0200620 /* fields used by HYPER-V emulation */
621 u64 hv_guest_os_id;
622 u64 hv_hypercall;
Vadim Rozenfelde9840972014-01-16 20:18:37 +1100623 u64 hv_tsc_page;
Xiao Guangrongb034cf02010-12-23 16:08:35 +0800624
625 #ifdef CONFIG_KVM_MMU_AUDIT
626 int audit_point;
627 #endif
Marcelo Tosatti54750f22015-01-20 15:54:52 -0200628
629 bool boot_vcpu_runs_old_kvmclock;
Zhang Xiantaod69fb812007-12-14 09:54:20 +0800630};
631
Zhang Xiantao07114562007-12-14 10:23:23 +0800632struct kvm_vm_stat {
633 u32 mmu_shadow_zapped;
634 u32 mmu_pte_write;
635 u32 mmu_pte_updated;
636 u32 mmu_pde_zapped;
637 u32 mmu_flooded;
638 u32 mmu_recycled;
Avi Kivitydfc5aa02007-12-18 19:47:18 +0200639 u32 mmu_cache_miss;
Marcelo Tosatti4731d4c2008-09-23 13:18:39 -0300640 u32 mmu_unsync;
Zhang Xiantao07114562007-12-14 10:23:23 +0800641 u32 remote_tlb_flush;
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300642 u32 lpages;
Zhang Xiantao07114562007-12-14 10:23:23 +0800643};
644
Zhang Xiantao77b4c252007-12-14 09:49:26 +0800645struct kvm_vcpu_stat {
646 u32 pf_fixed;
647 u32 pf_guest;
648 u32 tlb_flush;
649 u32 invlpg;
650
651 u32 exits;
652 u32 io_exits;
653 u32 mmio_exits;
654 u32 signal_exits;
655 u32 irq_window_exits;
Sheng Yangf08864b2008-05-15 18:23:25 +0800656 u32 nmi_window_exits;
Zhang Xiantao77b4c252007-12-14 09:49:26 +0800657 u32 halt_exits;
Paolo Bonzinif7819512015-02-04 18:20:58 +0100658 u32 halt_successful_poll;
Zhang Xiantao77b4c252007-12-14 09:49:26 +0800659 u32 halt_wakeup;
660 u32 request_irq_exits;
661 u32 irq_exits;
662 u32 host_state_reload;
663 u32 efer_reload;
664 u32 fpu_reload;
665 u32 insn_emulation;
666 u32 insn_emulation_fail;
Amit Shahf11c3a82008-02-21 01:00:30 +0530667 u32 hypercalls;
Avi Kivityfa89a812008-09-01 15:57:51 +0300668 u32 irq_injections;
Jan Kiszkac4abb7c2008-09-26 09:30:55 +0200669 u32 nmi_injections;
Zhang Xiantao77b4c252007-12-14 09:49:26 +0800670};
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800671
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200672struct x86_instruction_info;
673
Will Auld8fe8ab42012-11-29 12:42:12 -0800674struct msr_data {
675 bool host_initiated;
676 u32 index;
677 u64 data;
678};
679
Paolo Bonzinicb5281a2014-12-17 18:17:20 +0100680struct kvm_lapic_irq {
681 u32 vector;
682 u32 delivery_mode;
683 u32 dest_mode;
684 u32 level;
685 u32 trig_mode;
686 u32 shorthand;
687 u32 dest_id;
688};
689
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800690struct kvm_x86_ops {
691 int (*cpu_has_kvm_support)(void); /* __init */
692 int (*disabled_by_bios)(void); /* __init */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200693 int (*hardware_enable)(void);
694 void (*hardware_disable)(void);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800695 void (*check_processor_compatibility)(void *rtn);
696 int (*hardware_setup)(void); /* __init */
697 void (*hardware_unsetup)(void); /* __exit */
Avi Kivity774ead32007-12-26 13:57:04 +0200698 bool (*cpu_has_accelerated_tpr)(void);
Sheng Yang0e851882009-12-18 16:48:46 +0800699 void (*cpuid_update)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800700
701 /* Create, but do not attach this VCPU */
702 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
703 void (*vcpu_free)(struct kvm_vcpu *vcpu);
Jan Kiszka57f252f2013-03-12 10:20:24 +0100704 void (*vcpu_reset)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800705
706 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
707 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
708 void (*vcpu_put)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800709
Jan Kiszkac8639012012-09-21 05:42:55 +0200710 void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800711 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
Will Auld8fe8ab42012-11-29 12:42:12 -0800712 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800713 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
714 void (*get_segment)(struct kvm_vcpu *vcpu,
715 struct kvm_segment *var, int seg);
Izik Eidus2e4d2652008-03-24 19:38:34 +0200716 int (*get_cpl)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800717 void (*set_segment)(struct kvm_vcpu *vcpu,
718 struct kvm_segment *var, int seg);
719 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
Avi Kivitye8467fd2009-12-29 18:43:06 +0200720 void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
Avi Kivityaff48ba2010-12-05 18:56:11 +0200721 void (*decache_cr3)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800722 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
723 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
724 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
Nadav Har'El5e1746d2011-05-25 23:03:24 +0300725 int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800726 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
Gleb Natapov89a27f42010-02-16 10:51:48 +0200727 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
728 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
729 void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
730 void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
Jan Kiszka73aaf249e2014-01-04 18:47:16 +0100731 u64 (*get_dr6)(struct kvm_vcpu *vcpu);
732 void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
Paolo Bonzinic77fb5f2014-02-21 10:17:24 +0100733 void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
Gleb Natapov020df072010-04-13 10:05:23 +0300734 void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300735 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800736 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
737 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
Avi Kivity02daab22009-12-30 12:40:26 +0200738 void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800739
740 void (*tlb_flush)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800741
Avi Kivity851ba692009-08-24 11:10:17 +0300742 void (*run)(struct kvm_vcpu *vcpu);
743 int (*handle_exit)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800744 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
Glauber Costa2809f5d2009-05-12 16:21:05 -0400745 void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
Paolo Bonzini37ccdcb2014-05-20 14:29:47 +0200746 u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800747 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
748 unsigned char *hypercall_addr);
Gleb Natapov66fd3f72009-05-11 13:35:50 +0300749 void (*set_irq)(struct kvm_vcpu *vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +0300750 void (*set_nmi)(struct kvm_vcpu *vcpu);
Avi Kivity298101d2007-11-25 13:41:11 +0200751 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200752 bool has_error_code, u32 error_code,
753 bool reinject);
Avi Kivityb463a6f2010-07-20 15:06:17 +0300754 void (*cancel_injection)(struct kvm_vcpu *vcpu);
Gleb Natapov78646122009-03-23 12:12:11 +0200755 int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +0300756 int (*nmi_allowed)(struct kvm_vcpu *vcpu);
Jan Kiszka3cfc3092009-11-12 01:04:25 +0100757 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
758 void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
Jan Kiszkac9a79532014-03-07 20:03:15 +0100759 void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
760 void (*enable_irq_window)(struct kvm_vcpu *vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +0300761 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
Yang Zhangc7c9c562013-01-25 10:18:51 +0800762 int (*vm_has_apicv)(struct kvm *kvm);
763 void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
764 void (*hwapic_isr_update)(struct kvm *kvm, int isr);
765 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
Yang Zhang8d146952013-01-25 10:18:50 +0800766 void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
Tang Chen4256f432014-09-24 15:57:54 +0800767 void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
Yang Zhanga20ed542013-04-11 19:25:15 +0800768 void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
769 void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800770 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
Sheng Yang67253af2008-04-25 10:20:22 +0800771 int (*get_tdp_level)(void);
Sheng Yang4b12f0d2009-04-27 20:35:42 +0800772 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
Sheng Yang17cc3932010-01-05 19:02:27 +0800773 int (*get_lpage_level)(void);
Sheng Yang4e47c7a2009-12-18 16:48:47 +0800774 bool (*rdtscp_supported)(void);
Mao, Junjiead756a12012-07-02 01:18:48 +0000775 bool (*invpcid_supported)(void);
Marcelo Tosattif1e2b262012-02-03 15:43:55 -0200776 void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
Joerg Roedel344f4142009-07-27 16:30:48 +0200777
Joerg Roedel1c97f0a2010-09-10 17:30:41 +0200778 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
779
Joerg Roedeld4330ef2010-04-22 12:33:11 +0200780 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
781
Sheng Yangf5f48ee2010-06-30 12:25:15 +0800782 bool (*has_wbinvd_exit)(void);
783
Zachary Amsdencc578282012-02-03 15:43:50 -0200784 void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
Will Auldba904632012-11-29 12:42:50 -0800785 u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
Zachary Amsden99e3e302010-08-19 22:07:17 -1000786 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
787
Joerg Roedel857e4092011-03-25 09:44:50 +0100788 u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
Marcelo Tosatti886b4702012-11-27 23:28:58 -0200789 u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
Joerg Roedel857e4092011-03-25 09:44:50 +0100790
Avi Kivity586f9602010-11-18 13:09:54 +0200791 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200792
793 int (*check_intercept)(struct kvm_vcpu *vcpu,
794 struct x86_instruction_info *info,
795 enum x86_intercept_stage stage);
Yang Zhanga547c6d2013-04-11 19:25:10 +0800796 void (*handle_external_intr)(struct kvm_vcpu *vcpu);
Liu, Jinsongda8999d2014-02-24 10:55:46 +0000797 bool (*mpx_supported)(void);
Wanpeng Li55412b22014-12-02 19:21:30 +0800798 bool (*xsaves_supported)(void);
Jan Kiszkab6b8a142014-03-07 20:03:12 +0100799
800 int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
Radim Krčmářae97a3b2014-08-21 18:08:06 +0200801
802 void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
Kai Huang88178fd2015-01-28 10:54:27 +0800803
804 /*
805 * Arch-specific dirty logging hooks. These hooks are only supposed to
806 * be valid if the specific arch has hardware-accelerated dirty logging
807 * mechanism. Currently only for PML on VMX.
808 *
809 * - slot_enable_log_dirty:
810 * called when enabling log dirty mode for the slot.
811 * - slot_disable_log_dirty:
812 * called when disabling log dirty mode for the slot.
813 * also called when slot is created with log dirty disabled.
814 * - flush_log_dirty:
815 * called before reporting dirty_bitmap to userspace.
816 * - enable_log_dirty_pt_masked:
817 * called when reenabling log dirty for the GFNs in the mask after
818 * corresponding bits are cleared in slot->dirty_bitmap.
819 */
820 void (*slot_enable_log_dirty)(struct kvm *kvm,
821 struct kvm_memory_slot *slot);
822 void (*slot_disable_log_dirty)(struct kvm *kvm,
823 struct kvm_memory_slot *slot);
824 void (*flush_log_dirty)(struct kvm *kvm);
825 void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
826 struct kvm_memory_slot *slot,
827 gfn_t offset, unsigned long mask);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800828};
829
Gleb Natapovaf585b92010-10-14 11:22:46 +0200830struct kvm_arch_async_pf {
Gleb Natapov7c907052010-10-14 11:22:53 +0200831 u32 token;
Gleb Natapovaf585b92010-10-14 11:22:46 +0200832 gfn_t gfn;
Xiao Guangrongfb67e142010-12-07 10:35:25 +0800833 unsigned long cr3;
Xiao Guangrongc4806ac2010-11-12 14:49:55 +0800834 bool direct_map;
Gleb Natapovaf585b92010-10-14 11:22:46 +0200835};
836
Zhang Xiantao97896d02007-11-14 20:09:30 +0800837extern struct kvm_x86_ops *kvm_x86_ops;
838
Marcelo Tosattif1e2b262012-02-03 15:43:55 -0200839static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
840 s64 adjustment)
841{
842 kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
843}
844
845static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
846{
847 kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
848}
849
Zhang Xiantao54f15852007-11-19 15:24:28 +0800850int kvm_mmu_module_init(void);
851void kvm_mmu_module_exit(void);
852
853void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
854int kvm_mmu_create(struct kvm_vcpu *vcpu);
Paolo Bonzini8a3c1a332013-10-02 16:56:13 +0200855void kvm_mmu_setup(struct kvm_vcpu *vcpu);
Sheng Yang7b523452008-04-25 21:13:50 +0800856void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
Sheng Yang4b12f0d2009-04-27 20:35:42 +0800857 u64 dirty_mask, u64 nx_mask, u64 x_mask);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800858
Paolo Bonzini8a3c1a332013-10-02 16:56:13 +0200859void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
Kai Huang1c91cad42015-01-28 10:54:26 +0800860void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
861 struct kvm_memory_slot *memslot);
Kai Huangf4b4b182015-01-28 10:54:24 +0800862void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
863 struct kvm_memory_slot *memslot);
864void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
865 struct kvm_memory_slot *memslot);
866void kvm_mmu_slot_set_dirty(struct kvm *kvm,
867 struct kvm_memory_slot *memslot);
868void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
869 struct kvm_memory_slot *slot,
870 gfn_t gfn_offset, unsigned long mask);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800871void kvm_mmu_zap_all(struct kvm *kvm);
Xiao Guangrongf8f55942013-06-07 16:51:26 +0800872void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm);
Zhang Xiantao3ad82a72007-11-20 13:11:38 +0800873unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800874void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
875
Joerg Roedelff03a072010-09-10 17:30:57 +0200876int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
Joerg Roedelcc4b6872008-02-07 13:47:43 +0100877
Marcelo Tosatti3200f402008-03-29 20:17:59 -0300878int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
Avi Kivity9f811282008-03-02 14:06:05 +0200879 const void *val, int bytes);
Sheng Yang4b12f0d2009-04-27 20:35:42 +0800880u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -0500881
Paolo Bonzini6ef768f2014-11-20 13:45:31 +0100882struct kvm_irq_mask_notifier {
883 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
884 int irq;
885 struct hlist_node link;
886};
887
888void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
889 struct kvm_irq_mask_notifier *kimn);
890void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
891 struct kvm_irq_mask_notifier *kimn);
892void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
893 bool mask);
894
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -0500895extern bool tdp_enabled;
Avi Kivity9f811282008-03-02 14:06:05 +0200896
Liu, Jinsonga3e06bb2011-09-22 16:55:52 +0800897u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
898
Joerg Roedel92a1f122011-03-25 09:44:51 +0100899/* control of guest tsc rate supported? */
900extern bool kvm_has_tsc_control;
901/* minimum supported tsc_khz for guests */
902extern u32 kvm_min_guest_tsc_khz;
903/* maximum supported tsc_khz for guests */
904extern u32 kvm_max_guest_tsc_khz;
905
Zhang Xiantao54f15852007-11-19 15:24:28 +0800906enum emulation_result {
Paolo Bonziniac0a48c2013-06-25 18:24:41 +0200907 EMULATE_DONE, /* no further processing */
908 EMULATE_USER_EXIT, /* kvm_run ready for userspace exit */
Zhang Xiantao54f15852007-11-19 15:24:28 +0800909 EMULATE_FAIL, /* can't emulate this instruction */
910};
911
Sheng Yang571008d2008-01-02 14:49:22 +0800912#define EMULTYPE_NO_DECODE (1 << 0)
913#define EMULTYPE_TRAP_UD (1 << 1)
Gleb Natapovba8afb62009-04-12 13:36:57 +0300914#define EMULTYPE_SKIP (1 << 2)
Xiao Guangrong1cb3f3a2011-09-22 17:02:48 +0800915#define EMULTYPE_RETRY (1 << 3)
Gleb Natapov991eebf2013-04-11 12:10:51 +0300916#define EMULTYPE_NO_REEXECUTE (1 << 4)
Andre Przywaradc25e892010-12-21 11:12:07 +0100917int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
918 int emulation_type, void *insn, int insn_len);
Andre Przywara51d8b662010-12-21 11:12:02 +0100919
920static inline int emulate_instruction(struct kvm_vcpu *vcpu,
921 int emulation_type)
922{
Andre Przywaradc25e892010-12-21 11:12:07 +0100923 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
Andre Przywara51d8b662010-12-21 11:12:02 +0100924}
925
Joerg Roedelf2b4b7d2008-01-31 14:57:37 +0100926void kvm_enable_efer_bits(u64);
Jan Kiszka384bb782013-04-20 10:52:36 +0200927bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800928int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
Will Auld8fe8ab42012-11-29 12:42:12 -0800929int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800930
931struct x86_emulate_ctxt;
932
Gleb Natapovcf8f70b2010-03-18 15:20:23 +0200933int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800934void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
935int kvm_emulate_halt(struct kvm_vcpu *vcpu);
Sheng Yangf5f48ee2010-06-30 12:25:15 +0800936int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800937
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +0200938void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
Gleb Natapovc6975182010-02-18 12:15:01 +0200939int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
Paolo Bonzini2b4a2732014-11-24 14:35:24 +0100940void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +0200941
Kevin Wolf7f3d35f2012-02-08 14:34:38 +0100942int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
943 int reason, bool has_error_code, u32 error_code);
Izik Eidus37817f22008-03-24 23:14:53 +0200944
Avi Kivity49a9b072010-06-10 17:02:14 +0300945int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
Avi Kivity23902182010-06-10 17:02:16 +0300946int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
Avi Kivitya83b29c2010-06-10 17:02:15 +0300947int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
Andre Przywaraeea1cff2010-12-21 11:12:00 +0100948int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
Gleb Natapov020df072010-04-13 10:05:23 +0300949int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
950int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200951unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
952void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800953void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
Dexuan Cui2acf9232010-06-10 11:27:12 +0800954int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800955
956int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
Will Auld8fe8ab42012-11-29 12:42:12 -0800957int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800958
Jan Kiszka91586a32009-10-05 13:07:21 +0200959unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
960void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
Avi Kivity022cd0e2011-11-10 14:57:23 +0200961bool kvm_rdpmc(struct kvm_vcpu *vcpu);
Jan Kiszka91586a32009-10-05 13:07:21 +0200962
Avi Kivity298101d2007-11-25 13:41:11 +0200963void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
964void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200965void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
966void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
Avi Kivity6389ee92010-11-29 16:12:30 +0200967void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
Joerg Roedelec92fe42010-09-10 17:30:51 +0200968int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
969 gfn_t gfn, void *data, int offset, int len,
970 u32 access);
Avi Kivity0a79b002009-09-01 12:03:25 +0300971bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
Nadav Amit16f8a6f2014-10-03 01:10:05 +0300972bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
Avi Kivity298101d2007-11-25 13:41:11 +0200973
Michael S. Tsirkin1a577b722012-07-19 13:45:20 +0300974static inline int __kvm_irq_line_state(unsigned long *irq_state,
975 int irq_source_id, int level)
976{
977 /* Logical OR for level trig interrupt */
978 if (level)
979 __set_bit(irq_source_id, irq_state);
980 else
981 __clear_bit(irq_source_id, irq_state);
982
983 return !!(*irq_state);
984}
985
986int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
987void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
Xiantao Zhang3de42dc2008-10-06 13:48:45 +0800988
Sheng Yang3419ffc2008-05-15 09:52:48 +0800989void kvm_inject_nmi(struct kvm_vcpu *vcpu);
990
Jan Kiszka10ab25c2010-05-25 16:01:50 +0200991int fx_init(struct kvm_vcpu *vcpu);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800992
Zhang Xiantao54f15852007-11-19 15:24:28 +0800993void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
Xiao Guangrongf57f2ef2011-09-22 16:56:39 +0800994 const u8 *new, int bytes);
Xiao Guangrong1cb3f3a2011-09-22 17:02:48 +0800995int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800996int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
997void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
998int kvm_mmu_load(struct kvm_vcpu *vcpu);
999void kvm_mmu_unload(struct kvm_vcpu *vcpu);
Marcelo Tosatti0ba73cd2008-09-23 13:18:34 -03001000void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
Paolo Bonzini54987b72014-09-02 13:23:06 +02001001gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1002 struct x86_exception *exception);
Avi Kivityab9ae312010-11-22 17:53:26 +02001003gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
1004 struct x86_exception *exception);
1005gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
1006 struct x86_exception *exception);
1007gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
1008 struct x86_exception *exception);
1009gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
1010 struct x86_exception *exception);
Zhang Xiantao54f15852007-11-19 15:24:28 +08001011
1012int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
1013
Andre Przywaradc25e892010-12-21 11:12:07 +01001014int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
1015 void *insn, int insn_len);
Marcelo Tosattia7052892008-09-23 13:18:35 -03001016void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
Paolo Bonzinid8d173d2013-10-02 16:56:11 +02001017void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
Zhang Xiantao34c16ee2007-10-20 15:34:38 +08001018
Joerg Roedel18552672008-02-07 13:47:41 +01001019void kvm_enable_tdp(void);
Joerg Roedel5f4cb662008-07-14 20:36:36 +02001020void kvm_disable_tdp(void);
Joerg Roedel18552672008-02-07 13:47:41 +01001021
Paolo Bonzini54987b72014-09-02 13:23:06 +02001022static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1023 struct x86_exception *exception)
Xiao Guangronge459e322011-11-28 20:42:16 +08001024{
1025 return gpa;
1026}
1027
Zhang Xiantaoec6d2732007-11-19 15:08:31 +08001028static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
1029{
1030 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
1031
1032 return (struct kvm_mmu_page *)page_private(page);
1033}
1034
Avi Kivityd6e88ae2008-07-10 16:53:33 +03001035static inline u16 kvm_read_ldt(void)
Zhang Xiantaoec6d2732007-11-19 15:08:31 +08001036{
1037 u16 ldt;
1038 asm("sldt %0" : "=g"(ldt));
1039 return ldt;
1040}
1041
Avi Kivityd6e88ae2008-07-10 16:53:33 +03001042static inline void kvm_load_ldt(u16 sel)
Zhang Xiantaoec6d2732007-11-19 15:08:31 +08001043{
1044 asm("lldt %0" : : "rm"(sel));
1045}
Zhang Xiantaoec6d2732007-11-19 15:08:31 +08001046
Zhang Xiantaoec6d2732007-11-19 15:08:31 +08001047#ifdef CONFIG_X86_64
1048static inline unsigned long read_msr(unsigned long msr)
1049{
1050 u64 value;
1051
1052 rdmsrl(msr, value);
1053 return value;
1054}
1055#endif
1056
Zhang Xiantaoec6d2732007-11-19 15:08:31 +08001057static inline u32 get_rdx_init_val(void)
1058{
1059 return 0x600; /* P6 family */
1060}
1061
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02001062static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
1063{
1064 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
1065}
1066
Nadav Amit854e8bb2014-09-16 03:24:05 +03001067static inline u64 get_canonical(u64 la)
1068{
1069 return ((int64_t)la << 16) >> 16;
1070}
1071
1072static inline bool is_noncanonical_address(u64 la)
1073{
1074#ifdef CONFIG_X86_64
1075 return get_canonical(la) != la;
1076#else
1077 return false;
1078#endif
1079}
1080
Zhang Xiantaoec6d2732007-11-19 15:08:31 +08001081#define TSS_IOPB_BASE_OFFSET 0x66
1082#define TSS_BASE_SIZE 0x68
1083#define TSS_IOPB_SIZE (65536 / 8)
1084#define TSS_REDIRECTION_SIZE (256 / 8)
Joe Perches7d76b4d2008-03-23 01:02:34 -07001085#define RMODE_TSS_SIZE \
1086 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
Hollis Blanchard53e0aa72007-12-03 16:15:26 -06001087
Izik Eidus37817f22008-03-24 23:14:53 +02001088enum {
1089 TASK_SWITCH_CALL = 0,
1090 TASK_SWITCH_IRET = 1,
1091 TASK_SWITCH_JMP = 2,
1092 TASK_SWITCH_GATE = 3,
1093};
1094
Alexander Graf1371d902008-11-25 20:17:04 +01001095#define HF_GIF_MASK (1 << 0)
Alexander Graf3d6368e2008-11-25 20:17:07 +01001096#define HF_HIF_MASK (1 << 1)
1097#define HF_VINTR_MASK (1 << 2)
Gleb Natapov95ba8273132009-04-21 17:45:08 +03001098#define HF_NMI_MASK (1 << 3)
Gleb Natapov44c11432009-05-11 13:35:52 +03001099#define HF_IRET_MASK (1 << 4)
Joerg Roedelec9e60b2010-11-29 17:51:47 +01001100#define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */
Alexander Graf1371d902008-11-25 20:17:04 +01001101
Avi Kivity4ecac3f2008-05-13 13:23:38 +03001102/*
1103 * Hardware virtualization extension instructions may fault if a
1104 * reboot turns off virtualization while processes are running.
1105 * Trap the fault and ignore the instruction if that happens.
1106 */
Avi Kivityb7c41452010-12-02 17:52:50 +02001107asmlinkage void kvm_spurious_fault(void);
Avi Kivity4ecac3f2008-05-13 13:23:38 +03001108
Avi Kivity5e520e62011-05-15 10:13:12 -04001109#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
Avi Kivity4ecac3f2008-05-13 13:23:38 +03001110 "666: " insn "\n\t" \
Avi Kivityb7c41452010-12-02 17:52:50 +02001111 "668: \n\t" \
Eduardo Habkost18b13e52008-08-19 20:00:08 -03001112 ".pushsection .fixup, \"ax\" \n" \
Avi Kivity4ecac3f2008-05-13 13:23:38 +03001113 "667: \n\t" \
Avi Kivity5e520e62011-05-15 10:13:12 -04001114 cleanup_insn "\n\t" \
Avi Kivityb7c41452010-12-02 17:52:50 +02001115 "cmpb $0, kvm_rebooting \n\t" \
1116 "jne 668b \n\t" \
Avi Kivity8ceed3472008-08-14 21:25:47 +03001117 __ASM_SIZE(push) " $666b \n\t" \
Avi Kivityb7c41452010-12-02 17:52:50 +02001118 "call kvm_spurious_fault \n\t" \
Avi Kivity4ecac3f2008-05-13 13:23:38 +03001119 ".popsection \n\t" \
H. Peter Anvin3ee89722012-04-20 13:41:59 -07001120 _ASM_EXTABLE(666b, 667b)
Avi Kivity4ecac3f2008-05-13 13:23:38 +03001121
Avi Kivity5e520e62011-05-15 10:13:12 -04001122#define __kvm_handle_fault_on_reboot(insn) \
1123 ____kvm_handle_fault_on_reboot(insn, "")
1124
Andrea Arcangelie930bff2008-07-25 16:24:52 +02001125#define KVM_ARCH_WANT_MMU_NOTIFIER
1126int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
Takuya Yoshikawab3ae2092012-07-02 17:56:33 +09001127int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -07001128int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
Andrea Arcangeli8ee53822011-01-13 15:47:10 -08001129int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
Izik Eidus3da0dd42009-09-23 21:47:18 +03001130void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
Dong, Eddie82725b22009-03-30 16:21:08 +08001131int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
Yang Zhangc7c9c562013-01-25 10:18:51 +08001132int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
Gleb Natapova1b37102009-07-09 15:33:52 +03001133int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
1134int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
Gleb Natapov0b717852009-07-09 15:33:53 +03001135int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
Jan Kiszka66450a22013-03-13 12:42:34 +01001136void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
Tang Chen4256f432014-09-24 15:57:54 +08001137void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
Tang Chenfe71557a2014-09-24 15:57:57 +08001138void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
1139 unsigned long address);
Andrea Arcangelie930bff2008-07-25 16:24:52 +02001140
Avi Kivity18863bd2009-09-07 11:12:18 +03001141void kvm_define_shared_msr(unsigned index, u32 msr);
Andy Honig8b3c3102014-08-27 11:16:44 -07001142int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
Avi Kivity18863bd2009-09-07 11:12:18 +03001143
Nadav Amit82b32772014-11-02 11:54:45 +02001144unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
Jan Kiszkaf92653e2010-02-23 17:47:55 +01001145bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
1146
Gleb Natapovaf585b92010-10-14 11:22:46 +02001147void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1148 struct kvm_async_pf *work);
1149void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1150 struct kvm_async_pf *work);
Gleb Natapov56028d02010-10-17 18:13:42 +02001151void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1152 struct kvm_async_pf *work);
Gleb Natapov7c907052010-10-14 11:22:53 +02001153bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
Gleb Natapovaf585b92010-10-14 11:22:46 +02001154extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1155
Andre Przywaradb8fcef2010-12-21 11:12:01 +01001156void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
1157
Gleb Natapovf5132b02011-11-10 14:57:22 +02001158int kvm_is_in_guest(void);
1159
1160void kvm_pmu_init(struct kvm_vcpu *vcpu);
1161void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
1162void kvm_pmu_reset(struct kvm_vcpu *vcpu);
1163void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
1164bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
1165int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
Paolo Bonziniafd80d82013-03-28 17:18:35 +01001166int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
Nadav Amit67f4d422014-06-02 18:34:09 +03001167int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc);
Gleb Natapovf5132b02011-11-10 14:57:22 +02001168int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
1169void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
1170void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
1171
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001172#endif /* _ASM_X86_KVM_HOST_H */