blob: 830150099958d5913c720190ddeafd7780741ffa [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
Nicolas Kaiser9611c182010-10-06 14:23:22 +02007 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -08008 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
Avi Kivityedf88412007-12-16 11:02:48 +020017#include <linux/kvm_host.h>
18
Eddie Dong85f455f2007-07-06 12:20:49 +030019#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080020#include "mmu.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030021#include "kvm_cache_regs.h"
Gleb Natapovfe4c7b12009-03-23 11:23:18 +020022#include "x86.h"
Avi Kivitye4956062007-06-28 14:15:57 -040023
Avi Kivity6aa8b732006-12-10 02:21:36 -080024#include <linux/module.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020025#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080026#include <linux/vmalloc.h>
27#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040028#include <linux/sched.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030029#include <linux/ftrace_event.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080031
Joerg Roedel67ec6602010-05-17 14:43:35 +020032#include <asm/tlbflush.h>
Avi Kivitye4956062007-06-28 14:15:57 -040033#include <asm/desc.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020034#include <asm/kvm_para.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080035
Eduardo Habkost63d11422008-11-17 19:03:20 -020036#include <asm/virtext.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030037#include "trace.h"
Eduardo Habkost63d11422008-11-17 19:03:20 -020038
Avi Kivity4ecac3f2008-05-13 13:23:38 +030039#define __ex(x) __kvm_handle_fault_on_reboot(x)
40
Avi Kivity6aa8b732006-12-10 02:21:36 -080041MODULE_AUTHOR("Qumranet");
42MODULE_LICENSE("GPL");
43
44#define IOPM_ALLOC_ORDER 2
45#define MSRPM_ALLOC_ORDER 1
46
Avi Kivity6aa8b732006-12-10 02:21:36 -080047#define SEG_TYPE_LDT 2
48#define SEG_TYPE_BUSY_TSS16 3
49
Andre Przywara6bc31bd2010-04-11 23:07:28 +020050#define SVM_FEATURE_NPT (1 << 0)
51#define SVM_FEATURE_LBRV (1 << 1)
52#define SVM_FEATURE_SVML (1 << 2)
53#define SVM_FEATURE_NRIP (1 << 3)
Andre Przywaraddce97a2010-12-21 11:12:03 +010054#define SVM_FEATURE_TSC_RATE (1 << 4)
55#define SVM_FEATURE_VMCB_CLEAN (1 << 5)
56#define SVM_FEATURE_FLUSH_ASID (1 << 6)
57#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
Andre Przywara6bc31bd2010-04-11 23:07:28 +020058#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
Joerg Roedel80b77062007-03-30 17:02:14 +030059
Joerg Roedel410e4d52009-08-07 11:49:44 +020060#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
61#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
62#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
63
Joerg Roedel24e09cb2008-02-13 18:58:47 +010064#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
65
Joerg Roedelfbc0db72011-03-25 09:44:46 +010066#define TSC_RATIO_RSVD 0xffffff0000000000ULL
67
Joerg Roedel67ec6602010-05-17 14:43:35 +020068static bool erratum_383_found __read_mostly;
69
Avi Kivity6c8166a2009-05-31 18:15:37 +030070static const u32 host_save_user_msrs[] = {
71#ifdef CONFIG_X86_64
72 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
73 MSR_FS_BASE,
74#endif
75 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
76};
77
78#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
79
80struct kvm_vcpu;
81
Joerg Roedele6aa9ab2009-08-07 11:49:33 +020082struct nested_state {
83 struct vmcb *hsave;
84 u64 hsave_msr;
Joerg Roedel4a810182010-02-24 18:59:15 +010085 u64 vm_cr_msr;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +020086 u64 vmcb;
87
88 /* These are the merged vectors */
89 u32 *msrpm;
90
91 /* gpa pointers to the real vectors */
92 u64 vmcb_msrpm;
Joerg Roedelce2ac082010-03-01 15:34:39 +010093 u64 vmcb_iopm;
Joerg Roedelaad42c62009-08-07 11:49:34 +020094
Joerg Roedelcd3ff652009-10-09 16:08:26 +020095 /* A VMEXIT is required but not yet emulated */
96 bool exit_required;
97
Joerg Roedelaad42c62009-08-07 11:49:34 +020098 /* cache for intercepts of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +010099 u32 intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100100 u32 intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +0200101 u32 intercept_exceptions;
102 u64 intercept;
103
Joerg Roedel5bd2edc2010-09-10 17:31:02 +0200104 /* Nested Paging related state */
105 u64 nested_cr3;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200106};
107
Joerg Roedel323c3d82010-03-01 15:34:37 +0100108#define MSRPM_OFFSETS 16
109static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
110
Avi Kivity6c8166a2009-05-31 18:15:37 +0300111struct vcpu_svm {
112 struct kvm_vcpu vcpu;
113 struct vmcb *vmcb;
114 unsigned long vmcb_pa;
115 struct svm_cpu_data *svm_data;
116 uint64_t asid_generation;
117 uint64_t sysenter_esp;
118 uint64_t sysenter_eip;
119
120 u64 next_rip;
121
122 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
Avi Kivityafe9e662010-10-21 12:20:32 +0200123 struct {
Avi Kivitydacccfd2010-10-21 12:20:33 +0200124 u16 fs;
125 u16 gs;
126 u16 ldt;
Avi Kivityafe9e662010-10-21 12:20:32 +0200127 u64 gs_base;
128 } host;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300129
130 u32 *msrpm;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300131
Avi Kivitybd3d1ec2011-02-03 15:29:52 +0200132 ulong nmi_iret_rip;
133
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200134 struct nested_state nested;
Jan Kiszka6be7d302009-10-18 13:24:54 +0200135
136 bool nmi_singlestep;
Jan Kiszka66b71382010-02-23 17:47:56 +0100137
138 unsigned int3_injected;
139 unsigned long int3_rip;
Gleb Natapov631bc482010-10-14 11:22:52 +0200140 u32 apf_reason;
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100141
142 u64 tsc_ratio;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300143};
144
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100145static DEFINE_PER_CPU(u64, current_tsc_ratio);
146#define TSC_RATIO_DEFAULT 0x0100000000ULL
147
Joerg Roedel455716f2010-03-01 15:34:35 +0100148#define MSR_INVALID 0xffffffffU
149
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100150static struct svm_direct_access_msrs {
151 u32 index; /* Index of the MSR */
152 bool always; /* True if intercept is always on */
153} direct_access_msrs[] = {
Brian Gerst8c065852010-07-17 09:03:26 -0400154 { .index = MSR_STAR, .always = true },
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100155 { .index = MSR_IA32_SYSENTER_CS, .always = true },
156#ifdef CONFIG_X86_64
157 { .index = MSR_GS_BASE, .always = true },
158 { .index = MSR_FS_BASE, .always = true },
159 { .index = MSR_KERNEL_GS_BASE, .always = true },
160 { .index = MSR_LSTAR, .always = true },
161 { .index = MSR_CSTAR, .always = true },
162 { .index = MSR_SYSCALL_MASK, .always = true },
163#endif
164 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
165 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
166 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
167 { .index = MSR_IA32_LASTINTTOIP, .always = false },
168 { .index = MSR_INVALID, .always = false },
Avi Kivity6aa8b732006-12-10 02:21:36 -0800169};
170
171/* enable NPT for AMD64 and X86 with PAE */
172#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
173static bool npt_enabled = true;
174#else
Joerg Roedele0231712010-02-24 18:59:10 +0100175static bool npt_enabled;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800176#endif
177static int npt = 1;
178
179module_param(npt, int, S_IRUGO);
180
Joerg Roedel4b6e4dc2009-08-07 11:49:48 +0200181static int nested = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800182module_param(nested, int, S_IRUGO);
183
184static void svm_flush_tlb(struct kvm_vcpu *vcpu);
Joerg Roedela5c38322009-08-07 11:49:32 +0200185static void svm_complete_interrupts(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800186
Joerg Roedel410e4d52009-08-07 11:49:44 +0200187static int nested_svm_exit_handled(struct vcpu_svm *svm);
Joerg Roedelb8e88bc2010-02-19 16:23:02 +0100188static int nested_svm_intercept(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800189static int nested_svm_vmexit(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800190static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
191 bool has_error_code, u32 error_code);
192
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100193enum {
Joerg Roedel116a0a22010-12-03 11:45:49 +0100194 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
195 pause filter count */
Joerg Roedelf56838e2010-12-03 11:45:50 +0100196 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
Joerg Roedeld48086d2010-12-03 11:45:51 +0100197 VMCB_ASID, /* ASID */
Joerg Roedeldecdbf62010-12-03 11:45:52 +0100198 VMCB_INTR, /* int_ctl, int_vector */
Joerg Roedelb2747162010-12-03 11:45:53 +0100199 VMCB_NPT, /* npt_en, nCR3, gPAT */
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100200 VMCB_CR, /* CR0, CR3, CR4, EFER */
Joerg Roedel72214b92010-12-03 11:45:55 +0100201 VMCB_DR, /* DR6, DR7 */
Joerg Roedel17a703c2010-12-03 11:45:56 +0100202 VMCB_DT, /* GDT, IDT */
Joerg Roedel060d0c92010-12-03 11:45:57 +0100203 VMCB_SEG, /* CS, DS, SS, ES, CPL */
Joerg Roedel0574dec2010-12-03 11:45:58 +0100204 VMCB_CR2, /* CR2 only */
Joerg Roedelb53ba3f2010-12-03 11:45:59 +0100205 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100206 VMCB_DIRTY_MAX,
207};
208
Joerg Roedel0574dec2010-12-03 11:45:58 +0100209/* TPR and CR2 are always written before VMRUN */
210#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100211
212static inline void mark_all_dirty(struct vmcb *vmcb)
213{
214 vmcb->control.clean = 0;
215}
216
217static inline void mark_all_clean(struct vmcb *vmcb)
218{
219 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
220 & ~VMCB_ALWAYS_DIRTY_MASK;
221}
222
223static inline void mark_dirty(struct vmcb *vmcb, int bit)
224{
225 vmcb->control.clean &= ~(1 << bit);
226}
227
Avi Kivity6aa8b732006-12-10 02:21:36 -0800228static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
229{
230 return container_of(vcpu, struct vcpu_svm, vcpu);
231}
232
Joerg Roedel384c6362010-11-30 18:03:56 +0100233static void recalc_intercepts(struct vcpu_svm *svm)
234{
235 struct vmcb_control_area *c, *h;
236 struct nested_state *g;
237
Joerg Roedel116a0a22010-12-03 11:45:49 +0100238 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
239
Joerg Roedel384c6362010-11-30 18:03:56 +0100240 if (!is_guest_mode(&svm->vcpu))
241 return;
242
243 c = &svm->vmcb->control;
244 h = &svm->nested.hsave->control;
245 g = &svm->nested;
246
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100247 c->intercept_cr = h->intercept_cr | g->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100248 c->intercept_dr = h->intercept_dr | g->intercept_dr;
Joerg Roedel384c6362010-11-30 18:03:56 +0100249 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
250 c->intercept = h->intercept | g->intercept;
251}
252
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100253static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
254{
255 if (is_guest_mode(&svm->vcpu))
256 return svm->nested.hsave;
257 else
258 return svm->vmcb;
259}
260
261static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
262{
263 struct vmcb *vmcb = get_host_vmcb(svm);
264
265 vmcb->control.intercept_cr |= (1U << bit);
266
267 recalc_intercepts(svm);
268}
269
270static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
271{
272 struct vmcb *vmcb = get_host_vmcb(svm);
273
274 vmcb->control.intercept_cr &= ~(1U << bit);
275
276 recalc_intercepts(svm);
277}
278
279static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
280{
281 struct vmcb *vmcb = get_host_vmcb(svm);
282
283 return vmcb->control.intercept_cr & (1U << bit);
284}
285
Joerg Roedel3aed0412010-11-30 18:03:58 +0100286static inline void set_dr_intercept(struct vcpu_svm *svm, int bit)
287{
288 struct vmcb *vmcb = get_host_vmcb(svm);
289
290 vmcb->control.intercept_dr |= (1U << bit);
291
292 recalc_intercepts(svm);
293}
294
295static inline void clr_dr_intercept(struct vcpu_svm *svm, int bit)
296{
297 struct vmcb *vmcb = get_host_vmcb(svm);
298
299 vmcb->control.intercept_dr &= ~(1U << bit);
300
301 recalc_intercepts(svm);
302}
303
Joerg Roedel18c918c2010-11-30 18:03:59 +0100304static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
305{
306 struct vmcb *vmcb = get_host_vmcb(svm);
307
308 vmcb->control.intercept_exceptions |= (1U << bit);
309
310 recalc_intercepts(svm);
311}
312
313static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
314{
315 struct vmcb *vmcb = get_host_vmcb(svm);
316
317 vmcb->control.intercept_exceptions &= ~(1U << bit);
318
319 recalc_intercepts(svm);
320}
321
Joerg Roedel8a05a1b82010-11-30 18:04:00 +0100322static inline void set_intercept(struct vcpu_svm *svm, int bit)
323{
324 struct vmcb *vmcb = get_host_vmcb(svm);
325
326 vmcb->control.intercept |= (1ULL << bit);
327
328 recalc_intercepts(svm);
329}
330
331static inline void clr_intercept(struct vcpu_svm *svm, int bit)
332{
333 struct vmcb *vmcb = get_host_vmcb(svm);
334
335 vmcb->control.intercept &= ~(1ULL << bit);
336
337 recalc_intercepts(svm);
338}
339
Joerg Roedel2af91942009-08-07 11:49:28 +0200340static inline void enable_gif(struct vcpu_svm *svm)
341{
342 svm->vcpu.arch.hflags |= HF_GIF_MASK;
343}
344
345static inline void disable_gif(struct vcpu_svm *svm)
346{
347 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
348}
349
350static inline bool gif_set(struct vcpu_svm *svm)
351{
352 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
353}
354
Avi Kivity6aa8b732006-12-10 02:21:36 -0800355static unsigned long iopm_base;
356
357struct kvm_ldttss_desc {
358 u16 limit0;
359 u16 base0;
Joerg Roedele0231712010-02-24 18:59:10 +0100360 unsigned base1:8, type:5, dpl:2, p:1;
361 unsigned limit1:4, zero0:3, g:1, base2:8;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800362 u32 base3;
363 u32 zero1;
364} __attribute__((packed));
365
366struct svm_cpu_data {
367 int cpu;
368
Avi Kivity5008fdf2007-04-02 13:05:50 +0300369 u64 asid_generation;
370 u32 max_asid;
371 u32 next_asid;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800372 struct kvm_ldttss_desc *tss_desc;
373
374 struct page *save_area;
375};
376
377static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
378
379struct svm_init_data {
380 int cpu;
381 int r;
382};
383
384static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
385
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200386#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800387#define MSRS_RANGE_SIZE 2048
388#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
389
Joerg Roedel455716f2010-03-01 15:34:35 +0100390static u32 svm_msrpm_offset(u32 msr)
391{
392 u32 offset;
393 int i;
394
395 for (i = 0; i < NUM_MSR_MAPS; i++) {
396 if (msr < msrpm_ranges[i] ||
397 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
398 continue;
399
400 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
401 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
402
403 /* Now we have the u8 offset - but need the u32 offset */
404 return offset / 4;
405 }
406
407 /* MSR not in any range */
408 return MSR_INVALID;
409}
410
Avi Kivity6aa8b732006-12-10 02:21:36 -0800411#define MAX_INST_SIZE 15
412
Avi Kivity6aa8b732006-12-10 02:21:36 -0800413static inline void clgi(void)
414{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300415 asm volatile (__ex(SVM_CLGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800416}
417
418static inline void stgi(void)
419{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300420 asm volatile (__ex(SVM_STGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800421}
422
423static inline void invlpga(unsigned long addr, u32 asid)
424{
Joerg Roedele0231712010-02-24 18:59:10 +0100425 asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800426}
427
Joerg Roedel4b161842010-09-10 17:31:03 +0200428static int get_npt_level(void)
429{
430#ifdef CONFIG_X86_64
431 return PT64_ROOT_LEVEL;
432#else
433 return PT32E_ROOT_LEVEL;
434#endif
435}
436
Avi Kivity6aa8b732006-12-10 02:21:36 -0800437static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
438{
Zachary Amsden6dc696d2010-05-26 15:09:43 -1000439 vcpu->arch.efer = efer;
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100440 if (!npt_enabled && !(efer & EFER_LMA))
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600441 efer &= ~EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800442
Alexander Graf9962d032008-11-25 20:17:02 +0100443 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100444 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800445}
446
Avi Kivity6aa8b732006-12-10 02:21:36 -0800447static int is_external_interrupt(u32 info)
448{
449 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
450 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
451}
452
Glauber Costa2809f5d2009-05-12 16:21:05 -0400453static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
454{
455 struct vcpu_svm *svm = to_svm(vcpu);
456 u32 ret = 0;
457
458 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
Jan Kiszka48005f62010-02-19 19:38:07 +0100459 ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
Glauber Costa2809f5d2009-05-12 16:21:05 -0400460 return ret & mask;
461}
462
463static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
464{
465 struct vcpu_svm *svm = to_svm(vcpu);
466
467 if (mask == 0)
468 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
469 else
470 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
471
472}
473
Avi Kivity6aa8b732006-12-10 02:21:36 -0800474static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
475{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400476 struct vcpu_svm *svm = to_svm(vcpu);
477
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200478 if (svm->vmcb->control.next_rip != 0)
479 svm->next_rip = svm->vmcb->control.next_rip;
480
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400481 if (!svm->next_rip) {
Andre Przywara51d8b662010-12-21 11:12:02 +0100482 if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
Gleb Natapovf629cf82009-05-11 13:35:49 +0300483 EMULATE_DONE)
484 printk(KERN_DEBUG "%s: NOP\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800485 return;
486 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300487 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
488 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
489 __func__, kvm_rip_read(vcpu), svm->next_rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800490
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300491 kvm_rip_write(vcpu, svm->next_rip);
Glauber Costa2809f5d2009-05-12 16:21:05 -0400492 svm_set_interrupt_shadow(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800493}
494
Jan Kiszka116a4752010-02-23 17:47:54 +0100495static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200496 bool has_error_code, u32 error_code,
497 bool reinject)
Jan Kiszka116a4752010-02-23 17:47:54 +0100498{
499 struct vcpu_svm *svm = to_svm(vcpu);
500
Joerg Roedele0231712010-02-24 18:59:10 +0100501 /*
502 * If we are within a nested VM we'd better #VMEXIT and let the guest
503 * handle the exception
504 */
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200505 if (!reinject &&
506 nested_svm_check_exception(svm, nr, has_error_code, error_code))
Jan Kiszka116a4752010-02-23 17:47:54 +0100507 return;
508
Avi Kivity2a6b20b2010-11-09 16:15:42 +0200509 if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
Jan Kiszka66b71382010-02-23 17:47:56 +0100510 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
511
512 /*
513 * For guest debugging where we have to reinject #BP if some
514 * INT3 is guest-owned:
515 * Emulate nRIP by moving RIP forward. Will fail if injection
516 * raises a fault that is not intercepted. Still better than
517 * failing in all cases.
518 */
519 skip_emulated_instruction(&svm->vcpu);
520 rip = kvm_rip_read(&svm->vcpu);
521 svm->int3_rip = rip + svm->vmcb->save.cs.base;
522 svm->int3_injected = rip - old_rip;
523 }
524
Jan Kiszka116a4752010-02-23 17:47:54 +0100525 svm->vmcb->control.event_inj = nr
526 | SVM_EVTINJ_VALID
527 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
528 | SVM_EVTINJ_TYPE_EXEPT;
529 svm->vmcb->control.event_inj_err = error_code;
530}
531
Joerg Roedel67ec6602010-05-17 14:43:35 +0200532static void svm_init_erratum_383(void)
533{
534 u32 low, high;
535 int err;
536 u64 val;
537
Hans Rosenfeld1be85a62010-07-28 19:09:32 +0200538 if (!cpu_has_amd_erratum(amd_erratum_383))
Joerg Roedel67ec6602010-05-17 14:43:35 +0200539 return;
540
541 /* Use _safe variants to not break nested virtualization */
542 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
543 if (err)
544 return;
545
546 val |= (1ULL << 47);
547
548 low = lower_32_bits(val);
549 high = upper_32_bits(val);
550
551 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
552
553 erratum_383_found = true;
554}
555
Avi Kivity6aa8b732006-12-10 02:21:36 -0800556static int has_svm(void)
557{
Eduardo Habkost63d11422008-11-17 19:03:20 -0200558 const char *msg;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800559
Eduardo Habkost63d11422008-11-17 19:03:20 -0200560 if (!cpu_has_svm(&msg)) {
Joe Perchesff81ff12009-01-08 11:05:17 -0800561 printk(KERN_INFO "has_svm: %s\n", msg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800562 return 0;
563 }
564
Avi Kivity6aa8b732006-12-10 02:21:36 -0800565 return 1;
566}
567
568static void svm_hardware_disable(void *garbage)
569{
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100570 /* Make sure we clean up behind us */
571 if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
572 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
573
Eduardo Habkost2c8dcee2008-11-17 19:03:21 -0200574 cpu_svm_disable();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800575}
576
Alexander Graf10474ae2009-09-15 11:37:46 +0200577static int svm_hardware_enable(void *garbage)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800578{
579
Tejun Heo0fe1e002009-10-29 22:34:14 +0900580 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800581 uint64_t efer;
Gleb Natapov89a27f42010-02-16 10:51:48 +0200582 struct desc_ptr gdt_descr;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800583 struct desc_struct *gdt;
584 int me = raw_smp_processor_id();
585
Alexander Graf10474ae2009-09-15 11:37:46 +0200586 rdmsrl(MSR_EFER, efer);
587 if (efer & EFER_SVME)
588 return -EBUSY;
589
Avi Kivity6aa8b732006-12-10 02:21:36 -0800590 if (!has_svm()) {
Zachary Amsdene6732a52009-09-29 11:38:36 -1000591 printk(KERN_ERR "svm_hardware_enable: err EOPNOTSUPP on %d\n",
592 me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200593 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800594 }
Tejun Heo0fe1e002009-10-29 22:34:14 +0900595 sd = per_cpu(svm_data, me);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800596
Tejun Heo0fe1e002009-10-29 22:34:14 +0900597 if (!sd) {
Zachary Amsdene6732a52009-09-29 11:38:36 -1000598 printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
Avi Kivity6aa8b732006-12-10 02:21:36 -0800599 me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200600 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800601 }
602
Tejun Heo0fe1e002009-10-29 22:34:14 +0900603 sd->asid_generation = 1;
604 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
605 sd->next_asid = sd->max_asid + 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800606
Gleb Natapovd6ab1ed2010-02-25 12:43:07 +0200607 native_store_gdt(&gdt_descr);
Gleb Natapov89a27f42010-02-16 10:51:48 +0200608 gdt = (struct desc_struct *)gdt_descr.address;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900609 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800610
Alexander Graf9962d032008-11-25 20:17:02 +0100611 wrmsrl(MSR_EFER, efer | EFER_SVME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800612
Linus Torvaldsd0316552009-12-14 09:58:24 -0800613 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
Alexander Graf10474ae2009-09-15 11:37:46 +0200614
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100615 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
616 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
617 __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT;
618 }
619
Joerg Roedel67ec6602010-05-17 14:43:35 +0200620 svm_init_erratum_383();
621
Alexander Graf10474ae2009-09-15 11:37:46 +0200622 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800623}
624
Joerg Roedel0da1db752008-07-02 16:02:11 +0200625static void svm_cpu_uninit(int cpu)
626{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900627 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
Joerg Roedel0da1db752008-07-02 16:02:11 +0200628
Tejun Heo0fe1e002009-10-29 22:34:14 +0900629 if (!sd)
Joerg Roedel0da1db752008-07-02 16:02:11 +0200630 return;
631
632 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900633 __free_page(sd->save_area);
634 kfree(sd);
Joerg Roedel0da1db752008-07-02 16:02:11 +0200635}
636
Avi Kivity6aa8b732006-12-10 02:21:36 -0800637static int svm_cpu_init(int cpu)
638{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900639 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800640 int r;
641
Tejun Heo0fe1e002009-10-29 22:34:14 +0900642 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
643 if (!sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800644 return -ENOMEM;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900645 sd->cpu = cpu;
646 sd->save_area = alloc_page(GFP_KERNEL);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800647 r = -ENOMEM;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900648 if (!sd->save_area)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800649 goto err_1;
650
Tejun Heo0fe1e002009-10-29 22:34:14 +0900651 per_cpu(svm_data, cpu) = sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800652
653 return 0;
654
655err_1:
Tejun Heo0fe1e002009-10-29 22:34:14 +0900656 kfree(sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800657 return r;
658
659}
660
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100661static bool valid_msr_intercept(u32 index)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800662{
663 int i;
664
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100665 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
666 if (direct_access_msrs[i].index == index)
667 return true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800668
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100669 return false;
670}
671
Avi Kivity6aa8b732006-12-10 02:21:36 -0800672static void set_msr_interception(u32 *msrpm, unsigned msr,
673 int read, int write)
674{
Joerg Roedel455716f2010-03-01 15:34:35 +0100675 u8 bit_read, bit_write;
676 unsigned long tmp;
677 u32 offset;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800678
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100679 /*
680 * If this warning triggers extend the direct_access_msrs list at the
681 * beginning of the file
682 */
683 WARN_ON(!valid_msr_intercept(msr));
684
Joerg Roedel455716f2010-03-01 15:34:35 +0100685 offset = svm_msrpm_offset(msr);
686 bit_read = 2 * (msr & 0x0f);
687 bit_write = 2 * (msr & 0x0f) + 1;
688 tmp = msrpm[offset];
Avi Kivity6aa8b732006-12-10 02:21:36 -0800689
Joerg Roedel455716f2010-03-01 15:34:35 +0100690 BUG_ON(offset == MSR_INVALID);
691
692 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
693 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
694
695 msrpm[offset] = tmp;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800696}
697
Joerg Roedelf65c2292008-02-13 18:58:46 +0100698static void svm_vcpu_init_msrpm(u32 *msrpm)
699{
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100700 int i;
701
Joerg Roedelf65c2292008-02-13 18:58:46 +0100702 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
703
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100704 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
705 if (!direct_access_msrs[i].always)
706 continue;
707
708 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
709 }
Joerg Roedelf65c2292008-02-13 18:58:46 +0100710}
711
Joerg Roedel323c3d82010-03-01 15:34:37 +0100712static void add_msr_offset(u32 offset)
713{
714 int i;
715
716 for (i = 0; i < MSRPM_OFFSETS; ++i) {
717
718 /* Offset already in list? */
719 if (msrpm_offsets[i] == offset)
720 return;
721
722 /* Slot used by another offset? */
723 if (msrpm_offsets[i] != MSR_INVALID)
724 continue;
725
726 /* Add offset to list */
727 msrpm_offsets[i] = offset;
728
729 return;
730 }
731
732 /*
733 * If this BUG triggers the msrpm_offsets table has an overflow. Just
734 * increase MSRPM_OFFSETS in this case.
735 */
736 BUG();
737}
738
739static void init_msrpm_offsets(void)
740{
741 int i;
742
743 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
744
745 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
746 u32 offset;
747
748 offset = svm_msrpm_offset(direct_access_msrs[i].index);
749 BUG_ON(offset == MSR_INVALID);
750
751 add_msr_offset(offset);
752 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800753}
754
Joerg Roedel24e09cb2008-02-13 18:58:47 +0100755static void svm_enable_lbrv(struct vcpu_svm *svm)
756{
757 u32 *msrpm = svm->msrpm;
758
759 svm->vmcb->control.lbr_ctl = 1;
760 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
761 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
762 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
763 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
764}
765
766static void svm_disable_lbrv(struct vcpu_svm *svm)
767{
768 u32 *msrpm = svm->msrpm;
769
770 svm->vmcb->control.lbr_ctl = 0;
771 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
772 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
773 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
774 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
775}
776
Avi Kivity6aa8b732006-12-10 02:21:36 -0800777static __init int svm_hardware_setup(void)
778{
779 int cpu;
780 struct page *iopm_pages;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100781 void *iopm_va;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800782 int r;
783
Avi Kivity6aa8b732006-12-10 02:21:36 -0800784 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
785
786 if (!iopm_pages)
787 return -ENOMEM;
Anthony Liguoric8681332007-04-30 09:48:11 +0300788
789 iopm_va = page_address(iopm_pages);
790 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800791 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
792
Joerg Roedel323c3d82010-03-01 15:34:37 +0100793 init_msrpm_offsets();
794
Joerg Roedel50a37eb2008-01-31 14:57:38 +0100795 if (boot_cpu_has(X86_FEATURE_NX))
796 kvm_enable_efer_bits(EFER_NX);
797
Alexander Graf1b2fd702009-02-02 16:23:51 +0100798 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
799 kvm_enable_efer_bits(EFER_FFXSR);
800
Alexander Graf236de052008-11-25 20:17:10 +0100801 if (nested) {
802 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
Joerg Roedeleec4b142010-05-05 16:04:44 +0200803 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
Alexander Graf236de052008-11-25 20:17:10 +0100804 }
805
Zachary Amsden3230bb42009-09-29 11:38:37 -1000806 for_each_possible_cpu(cpu) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800807 r = svm_cpu_init(cpu);
808 if (r)
Joerg Roedelf65c2292008-02-13 18:58:46 +0100809 goto err;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800810 }
Joerg Roedel33bd6a02008-02-07 13:47:38 +0100811
Avi Kivity2a6b20b2010-11-09 16:15:42 +0200812 if (!boot_cpu_has(X86_FEATURE_NPT))
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100813 npt_enabled = false;
814
Joerg Roedel6c7dac72008-02-07 13:47:40 +0100815 if (npt_enabled && !npt) {
816 printk(KERN_INFO "kvm: Nested Paging disabled\n");
817 npt_enabled = false;
818 }
819
Joerg Roedel18552672008-02-07 13:47:41 +0100820 if (npt_enabled) {
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100821 printk(KERN_INFO "kvm: Nested Paging enabled\n");
Joerg Roedel18552672008-02-07 13:47:41 +0100822 kvm_enable_tdp();
Joerg Roedel5f4cb662008-07-14 20:36:36 +0200823 } else
824 kvm_disable_tdp();
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100825
Avi Kivity6aa8b732006-12-10 02:21:36 -0800826 return 0;
827
Joerg Roedelf65c2292008-02-13 18:58:46 +0100828err:
Avi Kivity6aa8b732006-12-10 02:21:36 -0800829 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
830 iopm_base = 0;
831 return r;
832}
833
834static __exit void svm_hardware_unsetup(void)
835{
Joerg Roedel0da1db752008-07-02 16:02:11 +0200836 int cpu;
837
Zachary Amsden3230bb42009-09-29 11:38:37 -1000838 for_each_possible_cpu(cpu)
Joerg Roedel0da1db752008-07-02 16:02:11 +0200839 svm_cpu_uninit(cpu);
840
Avi Kivity6aa8b732006-12-10 02:21:36 -0800841 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
Joerg Roedelf65c2292008-02-13 18:58:46 +0100842 iopm_base = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800843}
844
845static void init_seg(struct vmcb_seg *seg)
846{
847 seg->selector = 0;
848 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
Joerg Roedele0231712010-02-24 18:59:10 +0100849 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800850 seg->limit = 0xffff;
851 seg->base = 0;
852}
853
854static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
855{
856 seg->selector = 0;
857 seg->attrib = SVM_SELECTOR_P_MASK | type;
858 seg->limit = 0xffff;
859 seg->base = 0;
860}
861
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100862static u64 __scale_tsc(u64 ratio, u64 tsc)
863{
864 u64 mult, frac, _tsc;
865
866 mult = ratio >> 32;
867 frac = ratio & ((1ULL << 32) - 1);
868
869 _tsc = tsc;
870 _tsc *= mult;
871 _tsc += (tsc >> 32) * frac;
872 _tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32;
873
874 return _tsc;
875}
876
877static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
878{
879 struct vcpu_svm *svm = to_svm(vcpu);
880 u64 _tsc = tsc;
881
882 if (svm->tsc_ratio != TSC_RATIO_DEFAULT)
883 _tsc = __scale_tsc(svm->tsc_ratio, tsc);
884
885 return _tsc;
886}
887
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -1000888static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
889{
890 struct vcpu_svm *svm = to_svm(vcpu);
891 u64 g_tsc_offset = 0;
892
Joerg Roedel20307532010-11-29 17:51:48 +0100893 if (is_guest_mode(vcpu)) {
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -1000894 g_tsc_offset = svm->vmcb->control.tsc_offset -
895 svm->nested.hsave->control.tsc_offset;
896 svm->nested.hsave->control.tsc_offset = offset;
897 }
898
899 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
Joerg Roedel116a0a22010-12-03 11:45:49 +0100900
901 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -1000902}
903
Zachary Amsdene48672f2010-08-19 22:07:23 -1000904static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
905{
906 struct vcpu_svm *svm = to_svm(vcpu);
907
908 svm->vmcb->control.tsc_offset += adjustment;
Joerg Roedel20307532010-11-29 17:51:48 +0100909 if (is_guest_mode(vcpu))
Zachary Amsdene48672f2010-08-19 22:07:23 -1000910 svm->nested.hsave->control.tsc_offset += adjustment;
Joerg Roedel116a0a22010-12-03 11:45:49 +0100911 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Zachary Amsdene48672f2010-08-19 22:07:23 -1000912}
913
Joerg Roedele6101a92008-02-13 18:58:45 +0100914static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800915{
Joerg Roedele6101a92008-02-13 18:58:45 +0100916 struct vmcb_control_area *control = &svm->vmcb->control;
917 struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800918
Avi Kivitybff78272010-01-07 13:16:08 +0200919 svm->vcpu.fpu_active = 1;
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100920 svm->vcpu.arch.hflags = 0;
Avi Kivitybff78272010-01-07 13:16:08 +0200921
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100922 set_cr_intercept(svm, INTERCEPT_CR0_READ);
923 set_cr_intercept(svm, INTERCEPT_CR3_READ);
924 set_cr_intercept(svm, INTERCEPT_CR4_READ);
925 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
926 set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
927 set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
928 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800929
Joerg Roedel3aed0412010-11-30 18:03:58 +0100930 set_dr_intercept(svm, INTERCEPT_DR0_READ);
931 set_dr_intercept(svm, INTERCEPT_DR1_READ);
932 set_dr_intercept(svm, INTERCEPT_DR2_READ);
933 set_dr_intercept(svm, INTERCEPT_DR3_READ);
934 set_dr_intercept(svm, INTERCEPT_DR4_READ);
935 set_dr_intercept(svm, INTERCEPT_DR5_READ);
936 set_dr_intercept(svm, INTERCEPT_DR6_READ);
937 set_dr_intercept(svm, INTERCEPT_DR7_READ);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800938
Joerg Roedel3aed0412010-11-30 18:03:58 +0100939 set_dr_intercept(svm, INTERCEPT_DR0_WRITE);
940 set_dr_intercept(svm, INTERCEPT_DR1_WRITE);
941 set_dr_intercept(svm, INTERCEPT_DR2_WRITE);
942 set_dr_intercept(svm, INTERCEPT_DR3_WRITE);
943 set_dr_intercept(svm, INTERCEPT_DR4_WRITE);
944 set_dr_intercept(svm, INTERCEPT_DR5_WRITE);
945 set_dr_intercept(svm, INTERCEPT_DR6_WRITE);
946 set_dr_intercept(svm, INTERCEPT_DR7_WRITE);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800947
Joerg Roedel18c918c2010-11-30 18:03:59 +0100948 set_exception_intercept(svm, PF_VECTOR);
949 set_exception_intercept(svm, UD_VECTOR);
950 set_exception_intercept(svm, MC_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800951
Joerg Roedel8a05a1b82010-11-30 18:04:00 +0100952 set_intercept(svm, INTERCEPT_INTR);
953 set_intercept(svm, INTERCEPT_NMI);
954 set_intercept(svm, INTERCEPT_SMI);
955 set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
956 set_intercept(svm, INTERCEPT_CPUID);
957 set_intercept(svm, INTERCEPT_INVD);
958 set_intercept(svm, INTERCEPT_HLT);
959 set_intercept(svm, INTERCEPT_INVLPG);
960 set_intercept(svm, INTERCEPT_INVLPGA);
961 set_intercept(svm, INTERCEPT_IOIO_PROT);
962 set_intercept(svm, INTERCEPT_MSR_PROT);
963 set_intercept(svm, INTERCEPT_TASK_SWITCH);
964 set_intercept(svm, INTERCEPT_SHUTDOWN);
965 set_intercept(svm, INTERCEPT_VMRUN);
966 set_intercept(svm, INTERCEPT_VMMCALL);
967 set_intercept(svm, INTERCEPT_VMLOAD);
968 set_intercept(svm, INTERCEPT_VMSAVE);
969 set_intercept(svm, INTERCEPT_STGI);
970 set_intercept(svm, INTERCEPT_CLGI);
971 set_intercept(svm, INTERCEPT_SKINIT);
972 set_intercept(svm, INTERCEPT_WBINVD);
973 set_intercept(svm, INTERCEPT_MONITOR);
974 set_intercept(svm, INTERCEPT_MWAIT);
Joerg Roedel81dd35d2010-12-07 17:15:06 +0100975 set_intercept(svm, INTERCEPT_XSETBV);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800976
977 control->iopm_base_pa = iopm_base;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100978 control->msrpm_base_pa = __pa(svm->msrpm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800979 control->int_ctl = V_INTR_MASKING_MASK;
980
981 init_seg(&save->es);
982 init_seg(&save->ss);
983 init_seg(&save->ds);
984 init_seg(&save->fs);
985 init_seg(&save->gs);
986
987 save->cs.selector = 0xf000;
988 /* Executable/Readable Code Segment */
989 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
990 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
991 save->cs.limit = 0xffff;
Avi Kivityd92899a2007-02-12 00:54:38 -0800992 /*
993 * cs.base should really be 0xffff0000, but vmx can't handle that, so
994 * be consistent with it.
995 *
996 * Replace when we have real mode working for vmx.
997 */
998 save->cs.base = 0xf0000;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800999
1000 save->gdtr.limit = 0xffff;
1001 save->idtr.limit = 0xffff;
1002
1003 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1004 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1005
Marcelo Tosattieaa48512010-08-31 19:13:14 -03001006 svm_set_efer(&svm->vcpu, 0);
Mike Dayd77c26f2007-10-08 09:02:08 -04001007 save->dr6 = 0xffff0ff0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001008 save->dr7 = 0x400;
Avi Kivityf6e78472010-08-02 15:30:20 +03001009 kvm_set_rflags(&svm->vcpu, 2);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001010 save->rip = 0x0000fff0;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001011 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001012
Joerg Roedele0231712010-02-24 18:59:10 +01001013 /*
1014 * This is the guest-visible cr0 value.
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001015 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
Avi Kivity6aa8b732006-12-10 02:21:36 -08001016 */
Marcelo Tosatti678041a2010-08-31 19:13:13 -03001017 svm->vcpu.arch.cr0 = 0;
1018 (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001019
Rusty Russell66aee912007-07-17 23:34:16 +10001020 save->cr4 = X86_CR4_PAE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001021 /* rdx = ?? */
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001022
1023 if (npt_enabled) {
1024 /* Setup VMCB for Nested Paging */
1025 control->nested_ctl = 1;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001026 clr_intercept(svm, INTERCEPT_TASK_SWITCH);
1027 clr_intercept(svm, INTERCEPT_INVLPG);
Joerg Roedel18c918c2010-11-30 18:03:59 +01001028 clr_exception_intercept(svm, PF_VECTOR);
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001029 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1030 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001031 save->g_pat = 0x0007040600070406ULL;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001032 save->cr3 = 0;
1033 save->cr4 = 0;
1034 }
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001035 svm->asid_generation = 0;
Alexander Graf1371d902008-11-25 20:17:04 +01001036
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001037 svm->nested.vmcb = 0;
Joerg Roedel2af91942009-08-07 11:49:28 +02001038 svm->vcpu.arch.hflags = 0;
1039
Avi Kivity2a6b20b2010-11-09 16:15:42 +02001040 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
Mark Langsdorf565d0992009-10-06 14:25:02 -05001041 control->pause_filter_count = 3000;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001042 set_intercept(svm, INTERCEPT_PAUSE);
Mark Langsdorf565d0992009-10-06 14:25:02 -05001043 }
1044
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001045 mark_all_dirty(svm->vmcb);
1046
Joerg Roedel2af91942009-08-07 11:49:28 +02001047 enable_gif(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001048}
1049
Avi Kivitye00c8cf2007-10-21 11:00:39 +02001050static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
Avi Kivity04d2cc72007-09-10 18:10:54 +03001051{
1052 struct vcpu_svm *svm = to_svm(vcpu);
1053
Joerg Roedele6101a92008-02-13 18:58:45 +01001054 init_vmcb(svm);
Avi Kivity70433382007-11-07 12:57:23 +02001055
Gleb Natapovc5af89b2009-06-09 15:56:26 +03001056 if (!kvm_vcpu_is_bsp(vcpu)) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001057 kvm_rip_write(vcpu, 0);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001058 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
1059 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
Avi Kivity70433382007-11-07 12:57:23 +02001060 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001061 vcpu->arch.regs_avail = ~0;
1062 vcpu->arch.regs_dirty = ~0;
Avi Kivitye00c8cf2007-10-21 11:00:39 +02001063
1064 return 0;
Avi Kivity04d2cc72007-09-10 18:10:54 +03001065}
1066
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001067static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001068{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001069 struct vcpu_svm *svm;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001070 struct page *page;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001071 struct page *msrpm_pages;
Alexander Grafb286d5d2008-11-25 20:17:05 +01001072 struct page *hsave_page;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001073 struct page *nested_msrpm_pages;
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001074 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001075
Rusty Russellc16f8622007-07-30 21:12:19 +10001076 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001077 if (!svm) {
1078 err = -ENOMEM;
1079 goto out;
1080 }
1081
Joerg Roedelfbc0db72011-03-25 09:44:46 +01001082 svm->tsc_ratio = TSC_RATIO_DEFAULT;
1083
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001084 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
1085 if (err)
1086 goto free_svm;
1087
Joerg Roedelf65c2292008-02-13 18:58:46 +01001088 err = -ENOMEM;
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001089 page = alloc_page(GFP_KERNEL);
1090 if (!page)
1091 goto uninit;
1092
Joerg Roedelf65c2292008-02-13 18:58:46 +01001093 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1094 if (!msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001095 goto free_page1;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001096
1097 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1098 if (!nested_msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001099 goto free_page2;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001100
Alexander Grafb286d5d2008-11-25 20:17:05 +01001101 hsave_page = alloc_page(GFP_KERNEL);
1102 if (!hsave_page)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001103 goto free_page3;
1104
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001105 svm->nested.hsave = page_address(hsave_page);
Alexander Grafb286d5d2008-11-25 20:17:05 +01001106
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001107 svm->msrpm = page_address(msrpm_pages);
1108 svm_vcpu_init_msrpm(svm->msrpm);
1109
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001110 svm->nested.msrpm = page_address(nested_msrpm_pages);
Joerg Roedel323c3d82010-03-01 15:34:37 +01001111 svm_vcpu_init_msrpm(svm->nested.msrpm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01001112
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001113 svm->vmcb = page_address(page);
1114 clear_page(svm->vmcb);
1115 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
1116 svm->asid_generation = 0;
Joerg Roedele6101a92008-02-13 18:58:45 +01001117 init_vmcb(svm);
Zachary Amsden99e3e302010-08-19 22:07:17 -10001118 kvm_write_tsc(&svm->vcpu, 0);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001119
Jan Kiszka10ab25c2010-05-25 16:01:50 +02001120 err = fx_init(&svm->vcpu);
1121 if (err)
1122 goto free_page4;
1123
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001124 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
Gleb Natapovc5af89b2009-06-09 15:56:26 +03001125 if (kvm_vcpu_is_bsp(&svm->vcpu))
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001126 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001127
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001128 return &svm->vcpu;
Avi Kivity36241b82006-12-22 01:05:20 -08001129
Jan Kiszka10ab25c2010-05-25 16:01:50 +02001130free_page4:
1131 __free_page(hsave_page);
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001132free_page3:
1133 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1134free_page2:
1135 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1136free_page1:
1137 __free_page(page);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001138uninit:
1139 kvm_vcpu_uninit(&svm->vcpu);
1140free_svm:
Rusty Russella4770342007-08-01 14:46:11 +10001141 kmem_cache_free(kvm_vcpu_cache, svm);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001142out:
1143 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001144}
1145
1146static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1147{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001148 struct vcpu_svm *svm = to_svm(vcpu);
1149
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001150 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
Joerg Roedelf65c2292008-02-13 18:58:46 +01001151 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001152 __free_page(virt_to_page(svm->nested.hsave));
1153 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001154 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +10001155 kmem_cache_free(kvm_vcpu_cache, svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001156}
1157
Avi Kivity15ad7142007-07-11 18:17:21 +03001158static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001159{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001160 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03001161 int i;
Avi Kivity0cc50642007-03-25 12:07:27 +02001162
Avi Kivity0cc50642007-03-25 12:07:27 +02001163 if (unlikely(cpu != vcpu->cpu)) {
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03001164 svm->asid_generation = 0;
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001165 mark_all_dirty(svm->vmcb);
Avi Kivity0cc50642007-03-25 12:07:27 +02001166 }
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001167
Avi Kivity82ca2d12010-10-21 12:20:34 +02001168#ifdef CONFIG_X86_64
1169 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
1170#endif
Avi Kivitydacccfd2010-10-21 12:20:33 +02001171 savesegment(fs, svm->host.fs);
1172 savesegment(gs, svm->host.gs);
1173 svm->host.ldt = kvm_read_ldt();
1174
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001175 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001176 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Joerg Roedelfbc0db72011-03-25 09:44:46 +01001177
1178 if (static_cpu_has(X86_FEATURE_TSCRATEMSR) &&
1179 svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) {
1180 __get_cpu_var(current_tsc_ratio) = svm->tsc_ratio;
1181 wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
1182 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001183}
1184
1185static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1186{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001187 struct vcpu_svm *svm = to_svm(vcpu);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001188 int i;
1189
Avi Kivitye1beb1d2007-11-18 13:50:24 +02001190 ++vcpu->stat.host_state_reload;
Avi Kivitydacccfd2010-10-21 12:20:33 +02001191 kvm_load_ldt(svm->host.ldt);
1192#ifdef CONFIG_X86_64
1193 loadsegment(fs, svm->host.fs);
Avi Kivitydacccfd2010-10-21 12:20:33 +02001194 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
Joerg Roedel893a5ab2011-01-14 16:45:01 +01001195 load_gs_index(svm->host.gs);
Avi Kivitydacccfd2010-10-21 12:20:33 +02001196#else
Avi Kivity831ca602011-03-08 16:09:51 +02001197#ifdef CONFIG_X86_32_LAZY_GS
Avi Kivitydacccfd2010-10-21 12:20:33 +02001198 loadsegment(gs, svm->host.gs);
1199#endif
Avi Kivity831ca602011-03-08 16:09:51 +02001200#endif
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001201 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001202 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001203}
1204
Avi Kivity6aa8b732006-12-10 02:21:36 -08001205static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1206{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001207 return to_svm(vcpu)->vmcb->save.rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001208}
1209
1210static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1211{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001212 to_svm(vcpu)->vmcb->save.rflags = rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001213}
1214
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001215static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1216{
1217 switch (reg) {
1218 case VCPU_EXREG_PDPTR:
1219 BUG_ON(!npt_enabled);
Avi Kivity9f8fe502010-12-05 17:30:00 +02001220 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001221 break;
1222 default:
1223 BUG();
1224 }
1225}
1226
Alexander Graff0b85052008-11-25 20:17:01 +01001227static void svm_set_vintr(struct vcpu_svm *svm)
1228{
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001229 set_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01001230}
1231
1232static void svm_clear_vintr(struct vcpu_svm *svm)
1233{
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001234 clr_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01001235}
1236
Avi Kivity6aa8b732006-12-10 02:21:36 -08001237static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1238{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001239 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001240
1241 switch (seg) {
1242 case VCPU_SREG_CS: return &save->cs;
1243 case VCPU_SREG_DS: return &save->ds;
1244 case VCPU_SREG_ES: return &save->es;
1245 case VCPU_SREG_FS: return &save->fs;
1246 case VCPU_SREG_GS: return &save->gs;
1247 case VCPU_SREG_SS: return &save->ss;
1248 case VCPU_SREG_TR: return &save->tr;
1249 case VCPU_SREG_LDTR: return &save->ldtr;
1250 }
1251 BUG();
Al Viro8b6d44c2007-02-09 16:38:40 +00001252 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001253}
1254
1255static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1256{
1257 struct vmcb_seg *s = svm_seg(vcpu, seg);
1258
1259 return s->base;
1260}
1261
1262static void svm_get_segment(struct kvm_vcpu *vcpu,
1263 struct kvm_segment *var, int seg)
1264{
1265 struct vmcb_seg *s = svm_seg(vcpu, seg);
1266
1267 var->base = s->base;
1268 var->limit = s->limit;
1269 var->selector = s->selector;
1270 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1271 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1272 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1273 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1274 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1275 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1276 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1277 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
Amit Shah25022ac2008-10-27 09:04:17 +00001278
Joerg Roedele0231712010-02-24 18:59:10 +01001279 /*
1280 * AMD's VMCB does not have an explicit unusable field, so emulate it
Andre Przywara19bca6a2009-04-28 12:45:30 +02001281 * for cross vendor migration purposes by "not present"
1282 */
1283 var->unusable = !var->present || (var->type == 0);
1284
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001285 switch (seg) {
1286 case VCPU_SREG_CS:
1287 /*
1288 * SVM always stores 0 for the 'G' bit in the CS selector in
1289 * the VMCB on a VMEXIT. This hurts cross-vendor migration:
1290 * Intel's VMENTRY has a check on the 'G' bit.
1291 */
Amit Shah25022ac2008-10-27 09:04:17 +00001292 var->g = s->limit > 0xfffff;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001293 break;
1294 case VCPU_SREG_TR:
1295 /*
1296 * Work around a bug where the busy flag in the tr selector
1297 * isn't exposed
1298 */
Amit Shahc0d09822008-10-27 09:04:18 +00001299 var->type |= 0x2;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001300 break;
1301 case VCPU_SREG_DS:
1302 case VCPU_SREG_ES:
1303 case VCPU_SREG_FS:
1304 case VCPU_SREG_GS:
1305 /*
1306 * The accessed bit must always be set in the segment
1307 * descriptor cache, although it can be cleared in the
1308 * descriptor, the cached bit always remains at 1. Since
1309 * Intel has a check on this, set it here to support
1310 * cross-vendor migration.
1311 */
1312 if (!var->unusable)
1313 var->type |= 0x1;
1314 break;
Andre Przywarab586eb02009-04-28 12:45:43 +02001315 case VCPU_SREG_SS:
Joerg Roedele0231712010-02-24 18:59:10 +01001316 /*
1317 * On AMD CPUs sometimes the DB bit in the segment
Andre Przywarab586eb02009-04-28 12:45:43 +02001318 * descriptor is left as 1, although the whole segment has
1319 * been made unusable. Clear it here to pass an Intel VMX
1320 * entry check when cross vendor migrating.
1321 */
1322 if (var->unusable)
1323 var->db = 0;
1324 break;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001325 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001326}
1327
Izik Eidus2e4d2652008-03-24 19:38:34 +02001328static int svm_get_cpl(struct kvm_vcpu *vcpu)
1329{
1330 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1331
1332 return save->cpl;
1333}
1334
Gleb Natapov89a27f42010-02-16 10:51:48 +02001335static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001336{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001337 struct vcpu_svm *svm = to_svm(vcpu);
1338
Gleb Natapov89a27f42010-02-16 10:51:48 +02001339 dt->size = svm->vmcb->save.idtr.limit;
1340 dt->address = svm->vmcb->save.idtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001341}
1342
Gleb Natapov89a27f42010-02-16 10:51:48 +02001343static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001344{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001345 struct vcpu_svm *svm = to_svm(vcpu);
1346
Gleb Natapov89a27f42010-02-16 10:51:48 +02001347 svm->vmcb->save.idtr.limit = dt->size;
1348 svm->vmcb->save.idtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01001349 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001350}
1351
Gleb Natapov89a27f42010-02-16 10:51:48 +02001352static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001353{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001354 struct vcpu_svm *svm = to_svm(vcpu);
1355
Gleb Natapov89a27f42010-02-16 10:51:48 +02001356 dt->size = svm->vmcb->save.gdtr.limit;
1357 dt->address = svm->vmcb->save.gdtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001358}
1359
Gleb Natapov89a27f42010-02-16 10:51:48 +02001360static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001361{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001362 struct vcpu_svm *svm = to_svm(vcpu);
1363
Gleb Natapov89a27f42010-02-16 10:51:48 +02001364 svm->vmcb->save.gdtr.limit = dt->size;
1365 svm->vmcb->save.gdtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01001366 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001367}
1368
Avi Kivitye8467fd2009-12-29 18:43:06 +02001369static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
1370{
1371}
1372
Avi Kivityaff48ba2010-12-05 18:56:11 +02001373static void svm_decache_cr3(struct kvm_vcpu *vcpu)
1374{
1375}
1376
Anthony Liguori25c4c272007-04-27 09:29:21 +03001377static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -08001378{
1379}
1380
Avi Kivityd2251572010-01-06 10:55:27 +02001381static void update_cr0_intercept(struct vcpu_svm *svm)
1382{
1383 ulong gcr0 = svm->vcpu.arch.cr0;
1384 u64 *hcr0 = &svm->vmcb->save.cr0;
1385
1386 if (!svm->vcpu.fpu_active)
1387 *hcr0 |= SVM_CR0_SELECTIVE_MASK;
1388 else
1389 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
1390 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
1391
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001392 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02001393
1394 if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001395 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
1396 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02001397 } else {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001398 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1399 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02001400 }
1401}
1402
Avi Kivity6aa8b732006-12-10 02:21:36 -08001403static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1404{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001405 struct vcpu_svm *svm = to_svm(vcpu);
1406
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001407#ifdef CONFIG_X86_64
Avi Kivityf6801df2010-01-21 15:31:50 +02001408 if (vcpu->arch.efer & EFER_LME) {
Rusty Russell707d92fa2007-07-17 23:19:08 +10001409 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02001410 vcpu->arch.efer |= EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06001411 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001412 }
1413
Mike Dayd77c26f2007-10-08 09:02:08 -04001414 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02001415 vcpu->arch.efer &= ~EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06001416 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001417 }
1418 }
1419#endif
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001420 vcpu->arch.cr0 = cr0;
Avi Kivity888f9f32010-01-10 12:14:04 +02001421
1422 if (!npt_enabled)
1423 cr0 |= X86_CR0_PG | X86_CR0_WP;
Avi Kivity02daab22009-12-30 12:40:26 +02001424
1425 if (!vcpu->fpu_active)
Joerg Roedel334df502008-01-21 13:09:33 +01001426 cr0 |= X86_CR0_TS;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001427 /*
1428 * re-enable caching here because the QEMU bios
1429 * does not do it - this results in some delay at
1430 * reboot
1431 */
1432 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001433 svm->vmcb->save.cr0 = cr0;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001434 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02001435 update_cr0_intercept(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001436}
1437
1438static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1439{
Joerg Roedel6394b642008-04-09 14:15:29 +02001440 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
Joerg Roedele5eab0c2008-09-09 19:11:51 +02001441 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
1442
1443 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001444 svm_flush_tlb(vcpu);
Joerg Roedel6394b642008-04-09 14:15:29 +02001445
Joerg Roedelec077262008-04-09 14:15:28 +02001446 vcpu->arch.cr4 = cr4;
1447 if (!npt_enabled)
1448 cr4 |= X86_CR4_PAE;
Joerg Roedel6394b642008-04-09 14:15:29 +02001449 cr4 |= host_cr4_mce;
Joerg Roedelec077262008-04-09 14:15:28 +02001450 to_svm(vcpu)->vmcb->save.cr4 = cr4;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001451 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001452}
1453
1454static void svm_set_segment(struct kvm_vcpu *vcpu,
1455 struct kvm_segment *var, int seg)
1456{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001457 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001458 struct vmcb_seg *s = svm_seg(vcpu, seg);
1459
1460 s->base = var->base;
1461 s->limit = var->limit;
1462 s->selector = var->selector;
1463 if (var->unusable)
1464 s->attrib = 0;
1465 else {
1466 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1467 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1468 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1469 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
1470 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1471 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1472 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1473 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1474 }
1475 if (seg == VCPU_SREG_CS)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001476 svm->vmcb->save.cpl
1477 = (svm->vmcb->save.cs.attrib
Avi Kivity6aa8b732006-12-10 02:21:36 -08001478 >> SVM_SELECTOR_DPL_SHIFT) & 3;
1479
Joerg Roedel060d0c92010-12-03 11:45:57 +01001480 mark_dirty(svm->vmcb, VMCB_SEG);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001481}
1482
Gleb Natapov44c11432009-05-11 13:35:52 +03001483static void update_db_intercept(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001484{
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001485 struct vcpu_svm *svm = to_svm(vcpu);
1486
Joerg Roedel18c918c2010-11-30 18:03:59 +01001487 clr_exception_intercept(svm, DB_VECTOR);
1488 clr_exception_intercept(svm, BP_VECTOR);
Gleb Natapov44c11432009-05-11 13:35:52 +03001489
Jan Kiszka6be7d302009-10-18 13:24:54 +02001490 if (svm->nmi_singlestep)
Joerg Roedel18c918c2010-11-30 18:03:59 +01001491 set_exception_intercept(svm, DB_VECTOR);
Gleb Natapov44c11432009-05-11 13:35:52 +03001492
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001493 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1494 if (vcpu->guest_debug &
1495 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
Joerg Roedel18c918c2010-11-30 18:03:59 +01001496 set_exception_intercept(svm, DB_VECTOR);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001497 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
Joerg Roedel18c918c2010-11-30 18:03:59 +01001498 set_exception_intercept(svm, BP_VECTOR);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001499 } else
1500 vcpu->guest_debug = 0;
Gleb Natapov44c11432009-05-11 13:35:52 +03001501}
1502
Jan Kiszka355be0b2009-10-03 00:31:21 +02001503static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
Gleb Natapov44c11432009-05-11 13:35:52 +03001504{
Gleb Natapov44c11432009-05-11 13:35:52 +03001505 struct vcpu_svm *svm = to_svm(vcpu);
1506
Jan Kiszkaae675ef2008-12-15 13:52:10 +01001507 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1508 svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
1509 else
1510 svm->vmcb->save.dr7 = vcpu->arch.dr7;
1511
Joerg Roedel72214b92010-12-03 11:45:55 +01001512 mark_dirty(svm->vmcb, VMCB_DR);
1513
Jan Kiszka355be0b2009-10-03 00:31:21 +02001514 update_db_intercept(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001515}
1516
Tejun Heo0fe1e002009-10-29 22:34:14 +09001517static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001518{
Tejun Heo0fe1e002009-10-29 22:34:14 +09001519 if (sd->next_asid > sd->max_asid) {
1520 ++sd->asid_generation;
1521 sd->next_asid = 1;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001522 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001523 }
1524
Tejun Heo0fe1e002009-10-29 22:34:14 +09001525 svm->asid_generation = sd->asid_generation;
1526 svm->vmcb->control.asid = sd->next_asid++;
Joerg Roedeld48086d2010-12-03 11:45:51 +01001527
1528 mark_dirty(svm->vmcb, VMCB_ASID);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001529}
1530
Gleb Natapov020df072010-04-13 10:05:23 +03001531static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001532{
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001533 struct vcpu_svm *svm = to_svm(vcpu);
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001534
Gleb Natapov020df072010-04-13 10:05:23 +03001535 svm->vmcb->save.dr7 = value;
Joerg Roedel72214b92010-12-03 11:45:55 +01001536 mark_dirty(svm->vmcb, VMCB_DR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001537}
1538
Avi Kivity851ba692009-08-24 11:10:17 +03001539static int pf_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001540{
Gleb Natapov631bc482010-10-14 11:22:52 +02001541 u64 fault_address = svm->vmcb->control.exit_info_2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001542 u32 error_code;
Gleb Natapov631bc482010-10-14 11:22:52 +02001543 int r = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001544
Gleb Natapov631bc482010-10-14 11:22:52 +02001545 switch (svm->apf_reason) {
1546 default:
1547 error_code = svm->vmcb->control.exit_info_1;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001548
Gleb Natapov631bc482010-10-14 11:22:52 +02001549 trace_kvm_page_fault(fault_address, error_code);
1550 if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
1551 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
Andre Przywaradc25e892010-12-21 11:12:07 +01001552 r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
1553 svm->vmcb->control.insn_bytes,
1554 svm->vmcb->control.insn_len);
Gleb Natapov631bc482010-10-14 11:22:52 +02001555 break;
1556 case KVM_PV_REASON_PAGE_NOT_PRESENT:
1557 svm->apf_reason = 0;
1558 local_irq_disable();
1559 kvm_async_pf_task_wait(fault_address);
1560 local_irq_enable();
1561 break;
1562 case KVM_PV_REASON_PAGE_READY:
1563 svm->apf_reason = 0;
1564 local_irq_disable();
1565 kvm_async_pf_task_wake(fault_address);
1566 local_irq_enable();
1567 break;
1568 }
1569 return r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001570}
1571
Avi Kivity851ba692009-08-24 11:10:17 +03001572static int db_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001573{
Avi Kivity851ba692009-08-24 11:10:17 +03001574 struct kvm_run *kvm_run = svm->vcpu.run;
1575
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001576 if (!(svm->vcpu.guest_debug &
Gleb Natapov44c11432009-05-11 13:35:52 +03001577 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
Jan Kiszka6be7d302009-10-18 13:24:54 +02001578 !svm->nmi_singlestep) {
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001579 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1580 return 1;
1581 }
Gleb Natapov44c11432009-05-11 13:35:52 +03001582
Jan Kiszka6be7d302009-10-18 13:24:54 +02001583 if (svm->nmi_singlestep) {
1584 svm->nmi_singlestep = false;
Gleb Natapov44c11432009-05-11 13:35:52 +03001585 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
1586 svm->vmcb->save.rflags &=
1587 ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1588 update_db_intercept(&svm->vcpu);
1589 }
1590
1591 if (svm->vcpu.guest_debug &
Joerg Roedele0231712010-02-24 18:59:10 +01001592 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
Gleb Natapov44c11432009-05-11 13:35:52 +03001593 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1594 kvm_run->debug.arch.pc =
1595 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1596 kvm_run->debug.arch.exception = DB_VECTOR;
1597 return 0;
1598 }
1599
1600 return 1;
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001601}
1602
Avi Kivity851ba692009-08-24 11:10:17 +03001603static int bp_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001604{
Avi Kivity851ba692009-08-24 11:10:17 +03001605 struct kvm_run *kvm_run = svm->vcpu.run;
1606
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001607 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1608 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1609 kvm_run->debug.arch.exception = BP_VECTOR;
1610 return 0;
1611}
1612
Avi Kivity851ba692009-08-24 11:10:17 +03001613static int ud_interception(struct vcpu_svm *svm)
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001614{
1615 int er;
1616
Andre Przywara51d8b662010-12-21 11:12:02 +01001617 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001618 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001619 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001620 return 1;
1621}
1622
Avi Kivity6b52d182010-01-21 15:31:47 +02001623static void svm_fpu_activate(struct kvm_vcpu *vcpu)
Anthony Liguori7807fa62007-04-23 09:17:21 -05001624{
Avi Kivity6b52d182010-01-21 15:31:47 +02001625 struct vcpu_svm *svm = to_svm(vcpu);
Joerg Roedel66a562f2010-02-19 16:23:08 +01001626
Joerg Roedel18c918c2010-11-30 18:03:59 +01001627 clr_exception_intercept(svm, NM_VECTOR);
Joerg Roedel66a562f2010-02-19 16:23:08 +01001628
Rusty Russelle756fc62007-07-30 20:07:08 +10001629 svm->vcpu.fpu_active = 1;
Avi Kivityd2251572010-01-06 10:55:27 +02001630 update_cr0_intercept(svm);
Avi Kivity6b52d182010-01-21 15:31:47 +02001631}
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001632
Avi Kivity6b52d182010-01-21 15:31:47 +02001633static int nm_interception(struct vcpu_svm *svm)
1634{
1635 svm_fpu_activate(&svm->vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001636 return 1;
Anthony Liguori7807fa62007-04-23 09:17:21 -05001637}
1638
Joerg Roedel67ec6602010-05-17 14:43:35 +02001639static bool is_erratum_383(void)
1640{
1641 int err, i;
1642 u64 value;
1643
1644 if (!erratum_383_found)
1645 return false;
1646
1647 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
1648 if (err)
1649 return false;
1650
1651 /* Bit 62 may or may not be set for this mce */
1652 value &= ~(1ULL << 62);
1653
1654 if (value != 0xb600000000010015ULL)
1655 return false;
1656
1657 /* Clear MCi_STATUS registers */
1658 for (i = 0; i < 6; ++i)
1659 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
1660
1661 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
1662 if (!err) {
1663 u32 low, high;
1664
1665 value &= ~(1ULL << 2);
1666 low = lower_32_bits(value);
1667 high = upper_32_bits(value);
1668
1669 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
1670 }
1671
1672 /* Flush tlb to evict multi-match entries */
1673 __flush_tlb_all();
1674
1675 return true;
1676}
1677
Joerg Roedelfe5913e2010-05-17 14:43:34 +02001678static void svm_handle_mce(struct vcpu_svm *svm)
Joerg Roedel53371b52008-04-09 14:15:30 +02001679{
Joerg Roedel67ec6602010-05-17 14:43:35 +02001680 if (is_erratum_383()) {
1681 /*
1682 * Erratum 383 triggered. Guest state is corrupt so kill the
1683 * guest.
1684 */
1685 pr_err("KVM: Guest triggered AMD Erratum 383\n");
1686
Avi Kivitya8eeb042010-05-10 12:34:53 +03001687 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
Joerg Roedel67ec6602010-05-17 14:43:35 +02001688
1689 return;
1690 }
1691
Joerg Roedel53371b52008-04-09 14:15:30 +02001692 /*
1693 * On an #MC intercept the MCE handler is not called automatically in
1694 * the host. So do it by hand here.
1695 */
1696 asm volatile (
1697 "int $0x12\n");
1698 /* not sure if we ever come back to this point */
1699
Joerg Roedelfe5913e2010-05-17 14:43:34 +02001700 return;
1701}
1702
1703static int mc_interception(struct vcpu_svm *svm)
1704{
Joerg Roedel53371b52008-04-09 14:15:30 +02001705 return 1;
1706}
1707
Avi Kivity851ba692009-08-24 11:10:17 +03001708static int shutdown_interception(struct vcpu_svm *svm)
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001709{
Avi Kivity851ba692009-08-24 11:10:17 +03001710 struct kvm_run *kvm_run = svm->vcpu.run;
1711
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001712 /*
1713 * VMCB is undefined after a SHUTDOWN intercept
1714 * so reinitialize it.
1715 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001716 clear_page(svm->vmcb);
Joerg Roedele6101a92008-02-13 18:58:45 +01001717 init_vmcb(svm);
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001718
1719 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1720 return 0;
1721}
1722
Avi Kivity851ba692009-08-24 11:10:17 +03001723static int io_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001724{
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001725 struct kvm_vcpu *vcpu = &svm->vcpu;
Mike Dayd77c26f2007-10-08 09:02:08 -04001726 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
Jan Kiszka34c33d12009-02-08 13:28:15 +01001727 int size, in, string;
Avi Kivity039576c2007-03-20 12:46:50 +02001728 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001729
Rusty Russelle756fc62007-07-30 20:07:08 +10001730 ++svm->vcpu.stat.io_exits;
Laurent Viviere70669a2007-08-05 10:36:40 +03001731 string = (io_info & SVM_IOIO_STR_MASK) != 0;
Avi Kivity039576c2007-03-20 12:46:50 +02001732 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001733 if (string || in)
Andre Przywara51d8b662010-12-21 11:12:02 +01001734 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001735
Avi Kivity039576c2007-03-20 12:46:50 +02001736 port = io_info >> 16;
1737 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001738 svm->next_rip = svm->vmcb->control.exit_info_2;
Guillaume Thouvenine93f36b2008-10-28 10:51:30 +01001739 skip_emulated_instruction(&svm->vcpu);
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001740
1741 return kvm_fast_pio_out(vcpu, size, port);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001742}
1743
Avi Kivity851ba692009-08-24 11:10:17 +03001744static int nmi_interception(struct vcpu_svm *svm)
Joerg Roedelc47f0982008-04-30 17:56:00 +02001745{
1746 return 1;
1747}
1748
Avi Kivity851ba692009-08-24 11:10:17 +03001749static int intr_interception(struct vcpu_svm *svm)
Joerg Roedela0698052008-04-30 17:56:01 +02001750{
1751 ++svm->vcpu.stat.irq_exits;
1752 return 1;
1753}
1754
Avi Kivity851ba692009-08-24 11:10:17 +03001755static int nop_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001756{
1757 return 1;
1758}
1759
Avi Kivity851ba692009-08-24 11:10:17 +03001760static int halt_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001761{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001762 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
Rusty Russelle756fc62007-07-30 20:07:08 +10001763 skip_emulated_instruction(&svm->vcpu);
1764 return kvm_emulate_halt(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001765}
1766
Avi Kivity851ba692009-08-24 11:10:17 +03001767static int vmmcall_interception(struct vcpu_svm *svm)
Avi Kivity02e235b2007-02-19 14:37:47 +02001768{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001769 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Rusty Russelle756fc62007-07-30 20:07:08 +10001770 skip_emulated_instruction(&svm->vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001771 kvm_emulate_hypercall(&svm->vcpu);
1772 return 1;
Avi Kivity02e235b2007-02-19 14:37:47 +02001773}
1774
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001775static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
1776{
1777 struct vcpu_svm *svm = to_svm(vcpu);
1778
1779 return svm->nested.nested_cr3;
1780}
1781
1782static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
1783 unsigned long root)
1784{
1785 struct vcpu_svm *svm = to_svm(vcpu);
1786
1787 svm->vmcb->control.nested_cr3 = root;
Joerg Roedelb2747162010-12-03 11:45:53 +01001788 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001789 svm_flush_tlb(vcpu);
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001790}
1791
Avi Kivity6389ee92010-11-29 16:12:30 +02001792static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
1793 struct x86_exception *fault)
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001794{
1795 struct vcpu_svm *svm = to_svm(vcpu);
1796
1797 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
1798 svm->vmcb->control.exit_code_hi = 0;
Avi Kivity6389ee92010-11-29 16:12:30 +02001799 svm->vmcb->control.exit_info_1 = fault->error_code;
1800 svm->vmcb->control.exit_info_2 = fault->address;
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001801
1802 nested_svm_vmexit(svm);
1803}
1804
Joerg Roedel4b161842010-09-10 17:31:03 +02001805static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
1806{
1807 int r;
1808
1809 r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
1810
1811 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
1812 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
1813 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
1814 vcpu->arch.mmu.shadow_root_level = get_npt_level();
1815 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
1816
1817 return r;
1818}
1819
1820static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
1821{
1822 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
1823}
1824
Alexander Grafc0725422008-11-25 20:17:03 +01001825static int nested_svm_check_permissions(struct vcpu_svm *svm)
1826{
Avi Kivityf6801df2010-01-21 15:31:50 +02001827 if (!(svm->vcpu.arch.efer & EFER_SVME)
Alexander Grafc0725422008-11-25 20:17:03 +01001828 || !is_paging(&svm->vcpu)) {
1829 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1830 return 1;
1831 }
1832
1833 if (svm->vmcb->save.cpl) {
1834 kvm_inject_gp(&svm->vcpu, 0);
1835 return 1;
1836 }
1837
1838 return 0;
1839}
1840
Alexander Grafcf74a782008-11-25 20:17:08 +01001841static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1842 bool has_error_code, u32 error_code)
1843{
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01001844 int vmexit;
1845
Joerg Roedel20307532010-11-29 17:51:48 +01001846 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel0295ad72009-08-07 11:49:37 +02001847 return 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01001848
Joerg Roedel0295ad72009-08-07 11:49:37 +02001849 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1850 svm->vmcb->control.exit_code_hi = 0;
1851 svm->vmcb->control.exit_info_1 = error_code;
1852 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1853
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01001854 vmexit = nested_svm_intercept(svm);
1855 if (vmexit == NESTED_EXIT_DONE)
1856 svm->nested.exit_required = true;
1857
1858 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01001859}
1860
Joerg Roedel8fe54652010-02-19 16:23:01 +01001861/* This function returns true if it is save to enable the irq window */
1862static inline bool nested_svm_intr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01001863{
Joerg Roedel20307532010-11-29 17:51:48 +01001864 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel8fe54652010-02-19 16:23:01 +01001865 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01001866
Joerg Roedel26666952009-08-07 11:49:46 +02001867 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01001868 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01001869
Joerg Roedel26666952009-08-07 11:49:46 +02001870 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01001871 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01001872
Gleb Natapova0a07cd2010-09-20 10:15:32 +02001873 /*
1874 * if vmexit was already requested (by intercepted exception
1875 * for instance) do not overwrite it with "external interrupt"
1876 * vmexit.
1877 */
1878 if (svm->nested.exit_required)
1879 return false;
1880
Joerg Roedel197717d2010-02-24 18:59:19 +01001881 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
1882 svm->vmcb->control.exit_info_1 = 0;
1883 svm->vmcb->control.exit_info_2 = 0;
Joerg Roedel26666952009-08-07 11:49:46 +02001884
Joerg Roedelcd3ff652009-10-09 16:08:26 +02001885 if (svm->nested.intercept & 1ULL) {
1886 /*
1887 * The #vmexit can't be emulated here directly because this
1888 * code path runs with irqs and preemtion disabled. A
1889 * #vmexit emulation might sleep. Only signal request for
1890 * the #vmexit here.
1891 */
1892 svm->nested.exit_required = true;
Joerg Roedel236649d2009-10-09 16:08:30 +02001893 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
Joerg Roedel8fe54652010-02-19 16:23:01 +01001894 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01001895 }
1896
Joerg Roedel8fe54652010-02-19 16:23:01 +01001897 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01001898}
1899
Joerg Roedel887f5002010-02-24 18:59:12 +01001900/* This function returns true if it is save to enable the nmi window */
1901static inline bool nested_svm_nmi(struct vcpu_svm *svm)
1902{
Joerg Roedel20307532010-11-29 17:51:48 +01001903 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel887f5002010-02-24 18:59:12 +01001904 return true;
1905
1906 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
1907 return true;
1908
1909 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
1910 svm->nested.exit_required = true;
1911
1912 return false;
1913}
1914
Joerg Roedel7597f122010-02-19 16:23:00 +01001915static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02001916{
1917 struct page *page;
1918
Joerg Roedel6c3bd3d2010-02-19 16:23:04 +01001919 might_sleep();
1920
Joerg Roedel34f80cf2009-08-07 11:49:38 +02001921 page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02001922 if (is_error_page(page))
1923 goto error;
1924
Joerg Roedel7597f122010-02-19 16:23:00 +01001925 *_page = page;
1926
1927 return kmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02001928
1929error:
1930 kvm_release_page_clean(page);
1931 kvm_inject_gp(&svm->vcpu, 0);
1932
1933 return NULL;
1934}
1935
Joerg Roedel7597f122010-02-19 16:23:00 +01001936static void nested_svm_unmap(struct page *page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02001937{
Joerg Roedel7597f122010-02-19 16:23:00 +01001938 kunmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02001939 kvm_release_page_dirty(page);
1940}
1941
Joerg Roedelce2ac082010-03-01 15:34:39 +01001942static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01001943{
Joerg Roedelce2ac082010-03-01 15:34:39 +01001944 unsigned port;
1945 u8 val, bit;
1946 u64 gpa;
1947
1948 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
1949 return NESTED_EXIT_HOST;
1950
1951 port = svm->vmcb->control.exit_info_1 >> 16;
1952 gpa = svm->nested.vmcb_iopm + (port / 8);
1953 bit = port % 8;
1954 val = 0;
1955
1956 if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1))
1957 val &= (1 << bit);
1958
1959 return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1960}
1961
Joerg Roedeld2477822010-03-01 15:34:34 +01001962static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01001963{
Joerg Roedel0d6b3532010-03-01 15:34:38 +01001964 u32 offset, msr, value;
1965 int write, mask;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02001966
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02001967 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
Joerg Roedeld2477822010-03-01 15:34:34 +01001968 return NESTED_EXIT_HOST;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02001969
Joerg Roedel0d6b3532010-03-01 15:34:38 +01001970 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1971 offset = svm_msrpm_offset(msr);
1972 write = svm->vmcb->control.exit_info_1 & 1;
1973 mask = 1 << ((2 * (msr & 0xf)) + write);
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02001974
Joerg Roedel0d6b3532010-03-01 15:34:38 +01001975 if (offset == MSR_INVALID)
1976 return NESTED_EXIT_DONE;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02001977
Joerg Roedel0d6b3532010-03-01 15:34:38 +01001978 /* Offset is in 32 bit units but need in 8 bit units */
1979 offset *= 4;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02001980
Joerg Roedel0d6b3532010-03-01 15:34:38 +01001981 if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4))
1982 return NESTED_EXIT_DONE;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02001983
Joerg Roedel0d6b3532010-03-01 15:34:38 +01001984 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02001985}
1986
Joerg Roedel410e4d52009-08-07 11:49:44 +02001987static int nested_svm_exit_special(struct vcpu_svm *svm)
Joerg Roedel4c2161a2009-08-07 11:49:35 +02001988{
Alexander Grafcf74a782008-11-25 20:17:08 +01001989 u32 exit_code = svm->vmcb->control.exit_code;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02001990
Joerg Roedel410e4d52009-08-07 11:49:44 +02001991 switch (exit_code) {
1992 case SVM_EXIT_INTR:
1993 case SVM_EXIT_NMI:
Joerg Roedelff47a492010-04-22 12:33:14 +02001994 case SVM_EXIT_EXCP_BASE + MC_VECTOR:
Joerg Roedel410e4d52009-08-07 11:49:44 +02001995 return NESTED_EXIT_HOST;
Joerg Roedel410e4d52009-08-07 11:49:44 +02001996 case SVM_EXIT_NPF:
Joerg Roedele0231712010-02-24 18:59:10 +01001997 /* For now we are always handling NPFs when using them */
Joerg Roedel410e4d52009-08-07 11:49:44 +02001998 if (npt_enabled)
1999 return NESTED_EXIT_HOST;
2000 break;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002001 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
Gleb Natapov631bc482010-10-14 11:22:52 +02002002 /* When we're shadowing, trap PFs, but not async PF */
2003 if (!npt_enabled && svm->apf_reason == 0)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002004 return NESTED_EXIT_HOST;
2005 break;
Joerg Roedel66a562f2010-02-19 16:23:08 +01002006 case SVM_EXIT_EXCP_BASE + NM_VECTOR:
2007 nm_interception(svm);
2008 break;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002009 default:
2010 break;
Alexander Grafcf74a782008-11-25 20:17:08 +01002011 }
2012
Joerg Roedel410e4d52009-08-07 11:49:44 +02002013 return NESTED_EXIT_CONTINUE;
2014}
2015
2016/*
2017 * If this function returns true, this #vmexit was already handled
2018 */
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002019static int nested_svm_intercept(struct vcpu_svm *svm)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002020{
2021 u32 exit_code = svm->vmcb->control.exit_code;
2022 int vmexit = NESTED_EXIT_HOST;
2023
Alexander Grafcf74a782008-11-25 20:17:08 +01002024 switch (exit_code) {
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002025 case SVM_EXIT_MSR:
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002026 vmexit = nested_svm_exit_handled_msr(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002027 break;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002028 case SVM_EXIT_IOIO:
2029 vmexit = nested_svm_intercept_ioio(svm);
2030 break;
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002031 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
2032 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
2033 if (svm->nested.intercept_cr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002034 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002035 break;
2036 }
Joerg Roedel3aed0412010-11-30 18:03:58 +01002037 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
2038 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
2039 if (svm->nested.intercept_dr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002040 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002041 break;
2042 }
2043 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
2044 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
Joerg Roedelaad42c62009-08-07 11:49:34 +02002045 if (svm->nested.intercept_exceptions & excp_bits)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002046 vmexit = NESTED_EXIT_DONE;
Gleb Natapov631bc482010-10-14 11:22:52 +02002047 /* async page fault always cause vmexit */
2048 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
2049 svm->apf_reason != 0)
2050 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002051 break;
2052 }
Joerg Roedel228070b2010-04-22 12:33:10 +02002053 case SVM_EXIT_ERR: {
2054 vmexit = NESTED_EXIT_DONE;
2055 break;
2056 }
Alexander Grafcf74a782008-11-25 20:17:08 +01002057 default: {
2058 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
Joerg Roedelaad42c62009-08-07 11:49:34 +02002059 if (svm->nested.intercept & exit_bits)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002060 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002061 }
2062 }
2063
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002064 return vmexit;
2065}
2066
2067static int nested_svm_exit_handled(struct vcpu_svm *svm)
2068{
2069 int vmexit;
2070
2071 vmexit = nested_svm_intercept(svm);
2072
2073 if (vmexit == NESTED_EXIT_DONE)
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002074 nested_svm_vmexit(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002075
2076 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01002077}
2078
Joerg Roedel0460a972009-08-07 11:49:31 +02002079static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
2080{
2081 struct vmcb_control_area *dst = &dst_vmcb->control;
2082 struct vmcb_control_area *from = &from_vmcb->control;
2083
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002084 dst->intercept_cr = from->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01002085 dst->intercept_dr = from->intercept_dr;
Joerg Roedel0460a972009-08-07 11:49:31 +02002086 dst->intercept_exceptions = from->intercept_exceptions;
2087 dst->intercept = from->intercept;
2088 dst->iopm_base_pa = from->iopm_base_pa;
2089 dst->msrpm_base_pa = from->msrpm_base_pa;
2090 dst->tsc_offset = from->tsc_offset;
2091 dst->asid = from->asid;
2092 dst->tlb_ctl = from->tlb_ctl;
2093 dst->int_ctl = from->int_ctl;
2094 dst->int_vector = from->int_vector;
2095 dst->int_state = from->int_state;
2096 dst->exit_code = from->exit_code;
2097 dst->exit_code_hi = from->exit_code_hi;
2098 dst->exit_info_1 = from->exit_info_1;
2099 dst->exit_info_2 = from->exit_info_2;
2100 dst->exit_int_info = from->exit_int_info;
2101 dst->exit_int_info_err = from->exit_int_info_err;
2102 dst->nested_ctl = from->nested_ctl;
2103 dst->event_inj = from->event_inj;
2104 dst->event_inj_err = from->event_inj_err;
2105 dst->nested_cr3 = from->nested_cr3;
2106 dst->lbr_ctl = from->lbr_ctl;
2107}
2108
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002109static int nested_svm_vmexit(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002110{
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002111 struct vmcb *nested_vmcb;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002112 struct vmcb *hsave = svm->nested.hsave;
Joerg Roedel33740e42009-08-07 11:49:29 +02002113 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002114 struct page *page;
Alexander Grafcf74a782008-11-25 20:17:08 +01002115
Joerg Roedel17897f32009-10-09 16:08:29 +02002116 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
2117 vmcb->control.exit_info_1,
2118 vmcb->control.exit_info_2,
2119 vmcb->control.exit_int_info,
2120 vmcb->control.exit_int_info_err);
2121
Joerg Roedel7597f122010-02-19 16:23:00 +01002122 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002123 if (!nested_vmcb)
2124 return 1;
2125
Joerg Roedel20307532010-11-29 17:51:48 +01002126 /* Exit Guest-Mode */
2127 leave_guest_mode(&svm->vcpu);
Joerg Roedel06fc77722010-02-19 16:23:07 +01002128 svm->nested.vmcb = 0;
2129
Alexander Grafcf74a782008-11-25 20:17:08 +01002130 /* Give the current vmcb to the guest */
Joerg Roedel33740e42009-08-07 11:49:29 +02002131 disable_gif(svm);
2132
2133 nested_vmcb->save.es = vmcb->save.es;
2134 nested_vmcb->save.cs = vmcb->save.cs;
2135 nested_vmcb->save.ss = vmcb->save.ss;
2136 nested_vmcb->save.ds = vmcb->save.ds;
2137 nested_vmcb->save.gdtr = vmcb->save.gdtr;
2138 nested_vmcb->save.idtr = vmcb->save.idtr;
Joerg Roedel3f6a9d12010-07-27 18:14:20 +02002139 nested_vmcb->save.efer = svm->vcpu.arch.efer;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01002140 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
Avi Kivity9f8fe502010-12-05 17:30:00 +02002141 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
Joerg Roedel33740e42009-08-07 11:49:29 +02002142 nested_vmcb->save.cr2 = vmcb->save.cr2;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01002143 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
Avi Kivityf6e78472010-08-02 15:30:20 +03002144 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
Joerg Roedel33740e42009-08-07 11:49:29 +02002145 nested_vmcb->save.rip = vmcb->save.rip;
2146 nested_vmcb->save.rsp = vmcb->save.rsp;
2147 nested_vmcb->save.rax = vmcb->save.rax;
2148 nested_vmcb->save.dr7 = vmcb->save.dr7;
2149 nested_vmcb->save.dr6 = vmcb->save.dr6;
2150 nested_vmcb->save.cpl = vmcb->save.cpl;
2151
2152 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
2153 nested_vmcb->control.int_vector = vmcb->control.int_vector;
2154 nested_vmcb->control.int_state = vmcb->control.int_state;
2155 nested_vmcb->control.exit_code = vmcb->control.exit_code;
2156 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
2157 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
2158 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
2159 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
2160 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
Joerg Roedel7a190662010-07-27 18:14:21 +02002161 nested_vmcb->control.next_rip = vmcb->control.next_rip;
Alexander Graf8d23c462009-10-09 16:08:25 +02002162
2163 /*
2164 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
2165 * to make sure that we do not lose injected events. So check event_inj
2166 * here and copy it to exit_int_info if it is valid.
2167 * Exit_int_info and event_inj can't be both valid because the case
2168 * below only happens on a VMRUN instruction intercept which has
2169 * no valid exit_int_info set.
2170 */
2171 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
2172 struct vmcb_control_area *nc = &nested_vmcb->control;
2173
2174 nc->exit_int_info = vmcb->control.event_inj;
2175 nc->exit_int_info_err = vmcb->control.event_inj_err;
2176 }
2177
Joerg Roedel33740e42009-08-07 11:49:29 +02002178 nested_vmcb->control.tlb_ctl = 0;
2179 nested_vmcb->control.event_inj = 0;
2180 nested_vmcb->control.event_inj_err = 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01002181
2182 /* We always set V_INTR_MASKING and remember the old value in hflags */
2183 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
2184 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
2185
Alexander Grafcf74a782008-11-25 20:17:08 +01002186 /* Restore the original control entries */
Joerg Roedel0460a972009-08-07 11:49:31 +02002187 copy_vmcb_control_area(vmcb, hsave);
Alexander Grafcf74a782008-11-25 20:17:08 +01002188
Alexander Graf219b65d2009-06-15 15:21:25 +02002189 kvm_clear_exception_queue(&svm->vcpu);
2190 kvm_clear_interrupt_queue(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01002191
Joerg Roedel4b161842010-09-10 17:31:03 +02002192 svm->nested.nested_cr3 = 0;
2193
Alexander Grafcf74a782008-11-25 20:17:08 +01002194 /* Restore selected save entries */
2195 svm->vmcb->save.es = hsave->save.es;
2196 svm->vmcb->save.cs = hsave->save.cs;
2197 svm->vmcb->save.ss = hsave->save.ss;
2198 svm->vmcb->save.ds = hsave->save.ds;
2199 svm->vmcb->save.gdtr = hsave->save.gdtr;
2200 svm->vmcb->save.idtr = hsave->save.idtr;
Avi Kivityf6e78472010-08-02 15:30:20 +03002201 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
Alexander Grafcf74a782008-11-25 20:17:08 +01002202 svm_set_efer(&svm->vcpu, hsave->save.efer);
2203 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
2204 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
2205 if (npt_enabled) {
2206 svm->vmcb->save.cr3 = hsave->save.cr3;
2207 svm->vcpu.arch.cr3 = hsave->save.cr3;
2208 } else {
Avi Kivity23902182010-06-10 17:02:16 +03002209 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
Alexander Grafcf74a782008-11-25 20:17:08 +01002210 }
2211 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
2212 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
2213 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
2214 svm->vmcb->save.dr7 = 0;
2215 svm->vmcb->save.cpl = 0;
2216 svm->vmcb->control.exit_int_info = 0;
2217
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01002218 mark_all_dirty(svm->vmcb);
2219
Joerg Roedel7597f122010-02-19 16:23:00 +01002220 nested_svm_unmap(page);
Alexander Grafcf74a782008-11-25 20:17:08 +01002221
Joerg Roedel4b161842010-09-10 17:31:03 +02002222 nested_svm_uninit_mmu_context(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01002223 kvm_mmu_reset_context(&svm->vcpu);
2224 kvm_mmu_load(&svm->vcpu);
2225
2226 return 0;
2227}
Alexander Graf3d6368e2008-11-25 20:17:07 +01002228
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002229static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002230{
Joerg Roedel323c3d82010-03-01 15:34:37 +01002231 /*
2232 * This function merges the msr permission bitmaps of kvm and the
2233 * nested vmcb. It is omptimized in that it only merges the parts where
2234 * the kvm msr permission bitmap may contain zero bits
2235 */
Alexander Graf3d6368e2008-11-25 20:17:07 +01002236 int i;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002237
Joerg Roedel323c3d82010-03-01 15:34:37 +01002238 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
2239 return true;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002240
Joerg Roedel323c3d82010-03-01 15:34:37 +01002241 for (i = 0; i < MSRPM_OFFSETS; i++) {
2242 u32 value, p;
2243 u64 offset;
2244
2245 if (msrpm_offsets[i] == 0xffffffff)
2246 break;
2247
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002248 p = msrpm_offsets[i];
2249 offset = svm->nested.vmcb_msrpm + (p * 4);
Joerg Roedel323c3d82010-03-01 15:34:37 +01002250
2251 if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
2252 return false;
2253
2254 svm->nested.msrpm[p] = svm->msrpm[p] | value;
2255 }
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002256
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002257 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002258
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002259 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002260}
2261
Joerg Roedel52c65a302010-08-02 16:46:44 +02002262static bool nested_vmcb_checks(struct vmcb *vmcb)
2263{
2264 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
2265 return false;
2266
Joerg Roedeldbe77582010-08-02 16:46:45 +02002267 if (vmcb->control.asid == 0)
2268 return false;
2269
Joerg Roedel4b161842010-09-10 17:31:03 +02002270 if (vmcb->control.nested_ctl && !npt_enabled)
2271 return false;
2272
Joerg Roedel52c65a302010-08-02 16:46:44 +02002273 return true;
2274}
2275
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002276static bool nested_svm_vmrun(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002277{
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002278 struct vmcb *nested_vmcb;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002279 struct vmcb *hsave = svm->nested.hsave;
Joerg Roedeldefbba52009-08-07 11:49:30 +02002280 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002281 struct page *page;
Joerg Roedel06fc77722010-02-19 16:23:07 +01002282 u64 vmcb_gpa;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002283
Joerg Roedel06fc77722010-02-19 16:23:07 +01002284 vmcb_gpa = svm->vmcb->save.rax;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002285
Joerg Roedel7597f122010-02-19 16:23:00 +01002286 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002287 if (!nested_vmcb)
2288 return false;
2289
Joerg Roedel52c65a302010-08-02 16:46:44 +02002290 if (!nested_vmcb_checks(nested_vmcb)) {
2291 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
2292 nested_vmcb->control.exit_code_hi = 0;
2293 nested_vmcb->control.exit_info_1 = 0;
2294 nested_vmcb->control.exit_info_2 = 0;
2295
2296 nested_svm_unmap(page);
2297
2298 return false;
2299 }
2300
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02002301 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
Joerg Roedel0ac406d2009-10-09 16:08:27 +02002302 nested_vmcb->save.rip,
2303 nested_vmcb->control.int_ctl,
2304 nested_vmcb->control.event_inj,
2305 nested_vmcb->control.nested_ctl);
2306
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002307 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
2308 nested_vmcb->control.intercept_cr >> 16,
Joerg Roedel2e554e82010-02-24 18:59:14 +01002309 nested_vmcb->control.intercept_exceptions,
2310 nested_vmcb->control.intercept);
2311
Alexander Graf3d6368e2008-11-25 20:17:07 +01002312 /* Clear internal status */
Alexander Graf219b65d2009-06-15 15:21:25 +02002313 kvm_clear_exception_queue(&svm->vcpu);
2314 kvm_clear_interrupt_queue(&svm->vcpu);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002315
Joerg Roedele0231712010-02-24 18:59:10 +01002316 /*
2317 * Save the old vmcb, so we don't need to pick what we save, but can
2318 * restore everything when a VMEXIT occurs
2319 */
Joerg Roedeldefbba52009-08-07 11:49:30 +02002320 hsave->save.es = vmcb->save.es;
2321 hsave->save.cs = vmcb->save.cs;
2322 hsave->save.ss = vmcb->save.ss;
2323 hsave->save.ds = vmcb->save.ds;
2324 hsave->save.gdtr = vmcb->save.gdtr;
2325 hsave->save.idtr = vmcb->save.idtr;
Avi Kivityf6801df2010-01-21 15:31:50 +02002326 hsave->save.efer = svm->vcpu.arch.efer;
Avi Kivity4d4ec082009-12-29 18:07:30 +02002327 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
Joerg Roedeldefbba52009-08-07 11:49:30 +02002328 hsave->save.cr4 = svm->vcpu.arch.cr4;
Avi Kivityf6e78472010-08-02 15:30:20 +03002329 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02002330 hsave->save.rip = kvm_rip_read(&svm->vcpu);
Joerg Roedeldefbba52009-08-07 11:49:30 +02002331 hsave->save.rsp = vmcb->save.rsp;
2332 hsave->save.rax = vmcb->save.rax;
2333 if (npt_enabled)
2334 hsave->save.cr3 = vmcb->save.cr3;
2335 else
Avi Kivity9f8fe502010-12-05 17:30:00 +02002336 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
Joerg Roedeldefbba52009-08-07 11:49:30 +02002337
Joerg Roedel0460a972009-08-07 11:49:31 +02002338 copy_vmcb_control_area(hsave, vmcb);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002339
Avi Kivityf6e78472010-08-02 15:30:20 +03002340 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002341 svm->vcpu.arch.hflags |= HF_HIF_MASK;
2342 else
2343 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
2344
Joerg Roedel4b161842010-09-10 17:31:03 +02002345 if (nested_vmcb->control.nested_ctl) {
2346 kvm_mmu_unload(&svm->vcpu);
2347 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
2348 nested_svm_init_mmu_context(&svm->vcpu);
2349 }
2350
Alexander Graf3d6368e2008-11-25 20:17:07 +01002351 /* Load the nested guest state */
2352 svm->vmcb->save.es = nested_vmcb->save.es;
2353 svm->vmcb->save.cs = nested_vmcb->save.cs;
2354 svm->vmcb->save.ss = nested_vmcb->save.ss;
2355 svm->vmcb->save.ds = nested_vmcb->save.ds;
2356 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
2357 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
Avi Kivityf6e78472010-08-02 15:30:20 +03002358 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002359 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
2360 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
2361 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
2362 if (npt_enabled) {
2363 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
2364 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01002365 } else
Avi Kivity23902182010-06-10 17:02:16 +03002366 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01002367
2368 /* Guest paging mode is active - reset mmu */
2369 kvm_mmu_reset_context(&svm->vcpu);
2370
Joerg Roedeldefbba52009-08-07 11:49:30 +02002371 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002372 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
2373 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
2374 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
Joerg Roedele0231712010-02-24 18:59:10 +01002375
Alexander Graf3d6368e2008-11-25 20:17:07 +01002376 /* In case we don't even reach vcpu_run, the fields are not updated */
2377 svm->vmcb->save.rax = nested_vmcb->save.rax;
2378 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
2379 svm->vmcb->save.rip = nested_vmcb->save.rip;
2380 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
2381 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
2382 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
2383
Joerg Roedelf7138532010-03-01 15:34:40 +01002384 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002385 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002386
Joerg Roedelaad42c62009-08-07 11:49:34 +02002387 /* cache intercepts */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002388 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01002389 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +02002390 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
2391 svm->nested.intercept = nested_vmcb->control.intercept;
2392
Joerg Roedelf40f6a42010-12-03 15:25:15 +01002393 svm_flush_tlb(&svm->vcpu);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002394 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002395 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
2396 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
2397 else
2398 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
2399
Joerg Roedel88ab24a2010-02-19 16:23:06 +01002400 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
2401 /* We only want the cr8 intercept bits of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002402 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
2403 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Joerg Roedel88ab24a2010-02-19 16:23:06 +01002404 }
2405
Joerg Roedel0d945bd2010-05-05 16:04:45 +02002406 /* We don't want to see VMMCALLs from a nested guest */
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01002407 clr_intercept(svm, INTERCEPT_VMMCALL);
Joerg Roedel0d945bd2010-05-05 16:04:45 +02002408
Joerg Roedel88ab24a2010-02-19 16:23:06 +01002409 svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002410 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
2411 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
2412 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002413 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
2414 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
2415
Joerg Roedel7597f122010-02-19 16:23:00 +01002416 nested_svm_unmap(page);
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002417
Joerg Roedel20307532010-11-29 17:51:48 +01002418 /* Enter Guest-Mode */
2419 enter_guest_mode(&svm->vcpu);
2420
Joerg Roedel384c6362010-11-30 18:03:56 +01002421 /*
2422 * Merge guest and host intercepts - must be called with vcpu in
2423 * guest-mode to take affect here
2424 */
2425 recalc_intercepts(svm);
2426
Joerg Roedel06fc77722010-02-19 16:23:07 +01002427 svm->nested.vmcb = vmcb_gpa;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002428
Joerg Roedel2af91942009-08-07 11:49:28 +02002429 enable_gif(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002430
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01002431 mark_all_dirty(svm->vmcb);
2432
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002433 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002434}
2435
Joerg Roedel9966bf62009-08-07 11:49:40 +02002436static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
Alexander Graf55426752008-11-25 20:17:06 +01002437{
2438 to_vmcb->save.fs = from_vmcb->save.fs;
2439 to_vmcb->save.gs = from_vmcb->save.gs;
2440 to_vmcb->save.tr = from_vmcb->save.tr;
2441 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
2442 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
2443 to_vmcb->save.star = from_vmcb->save.star;
2444 to_vmcb->save.lstar = from_vmcb->save.lstar;
2445 to_vmcb->save.cstar = from_vmcb->save.cstar;
2446 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
2447 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
2448 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
2449 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
Alexander Graf55426752008-11-25 20:17:06 +01002450}
2451
Avi Kivity851ba692009-08-24 11:10:17 +03002452static int vmload_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01002453{
Joerg Roedel9966bf62009-08-07 11:49:40 +02002454 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002455 struct page *page;
Joerg Roedel9966bf62009-08-07 11:49:40 +02002456
Alexander Graf55426752008-11-25 20:17:06 +01002457 if (nested_svm_check_permissions(svm))
2458 return 1;
2459
2460 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2461 skip_emulated_instruction(&svm->vcpu);
2462
Joerg Roedel7597f122010-02-19 16:23:00 +01002463 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02002464 if (!nested_vmcb)
2465 return 1;
2466
2467 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01002468 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01002469
2470 return 1;
2471}
2472
Avi Kivity851ba692009-08-24 11:10:17 +03002473static int vmsave_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01002474{
Joerg Roedel9966bf62009-08-07 11:49:40 +02002475 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002476 struct page *page;
Joerg Roedel9966bf62009-08-07 11:49:40 +02002477
Alexander Graf55426752008-11-25 20:17:06 +01002478 if (nested_svm_check_permissions(svm))
2479 return 1;
2480
2481 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2482 skip_emulated_instruction(&svm->vcpu);
2483
Joerg Roedel7597f122010-02-19 16:23:00 +01002484 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02002485 if (!nested_vmcb)
2486 return 1;
2487
2488 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01002489 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01002490
2491 return 1;
2492}
2493
Avi Kivity851ba692009-08-24 11:10:17 +03002494static int vmrun_interception(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002495{
Alexander Graf3d6368e2008-11-25 20:17:07 +01002496 if (nested_svm_check_permissions(svm))
2497 return 1;
2498
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02002499 /* Save rip after vmrun instruction */
2500 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002501
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002502 if (!nested_svm_vmrun(svm))
Alexander Graf3d6368e2008-11-25 20:17:07 +01002503 return 1;
2504
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002505 if (!nested_svm_vmrun_msrpm(svm))
Joerg Roedel1f8da472009-08-07 11:49:43 +02002506 goto failed;
2507
2508 return 1;
2509
2510failed:
2511
2512 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
2513 svm->vmcb->control.exit_code_hi = 0;
2514 svm->vmcb->control.exit_info_1 = 0;
2515 svm->vmcb->control.exit_info_2 = 0;
2516
2517 nested_svm_vmexit(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002518
2519 return 1;
2520}
2521
Avi Kivity851ba692009-08-24 11:10:17 +03002522static int stgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01002523{
2524 if (nested_svm_check_permissions(svm))
2525 return 1;
2526
2527 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2528 skip_emulated_instruction(&svm->vcpu);
Avi Kivity3842d132010-07-27 12:30:24 +03002529 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +01002530
Joerg Roedel2af91942009-08-07 11:49:28 +02002531 enable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01002532
2533 return 1;
2534}
2535
Avi Kivity851ba692009-08-24 11:10:17 +03002536static int clgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01002537{
2538 if (nested_svm_check_permissions(svm))
2539 return 1;
2540
2541 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2542 skip_emulated_instruction(&svm->vcpu);
2543
Joerg Roedel2af91942009-08-07 11:49:28 +02002544 disable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01002545
2546 /* After a CLGI no interrupts should come */
2547 svm_clear_vintr(svm);
2548 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2549
Joerg Roedeldecdbf62010-12-03 11:45:52 +01002550 mark_dirty(svm->vmcb, VMCB_INTR);
2551
Alexander Graf1371d902008-11-25 20:17:04 +01002552 return 1;
2553}
2554
Avi Kivity851ba692009-08-24 11:10:17 +03002555static int invlpga_interception(struct vcpu_svm *svm)
Alexander Grafff092382009-06-15 15:21:24 +02002556{
2557 struct kvm_vcpu *vcpu = &svm->vcpu;
Alexander Grafff092382009-06-15 15:21:24 +02002558
Joerg Roedelec1ff792009-10-09 16:08:31 +02002559 trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
2560 vcpu->arch.regs[VCPU_REGS_RAX]);
2561
Alexander Grafff092382009-06-15 15:21:24 +02002562 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2563 kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
2564
2565 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2566 skip_emulated_instruction(&svm->vcpu);
2567 return 1;
2568}
2569
Joerg Roedel532a46b2009-10-09 16:08:32 +02002570static int skinit_interception(struct vcpu_svm *svm)
2571{
2572 trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
2573
2574 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2575 return 1;
2576}
2577
Joerg Roedel81dd35d2010-12-07 17:15:06 +01002578static int xsetbv_interception(struct vcpu_svm *svm)
2579{
2580 u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
2581 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
2582
2583 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
2584 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2585 skip_emulated_instruction(&svm->vcpu);
2586 }
2587
2588 return 1;
2589}
2590
Avi Kivity851ba692009-08-24 11:10:17 +03002591static int invalid_op_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002592{
Avi Kivity7ee5d9402007-11-25 15:22:50 +02002593 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002594 return 1;
2595}
2596
Avi Kivity851ba692009-08-24 11:10:17 +03002597static int task_switch_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002598{
Izik Eidus37817f22008-03-24 23:14:53 +02002599 u16 tss_selector;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002600 int reason;
2601 int int_type = svm->vmcb->control.exit_int_info &
2602 SVM_EXITINTINFO_TYPE_MASK;
Gleb Natapov8317c292009-04-12 13:37:02 +03002603 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002604 uint32_t type =
2605 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2606 uint32_t idt_v =
2607 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
Jan Kiszkae269fb22010-04-14 15:51:09 +02002608 bool has_error_code = false;
2609 u32 error_code = 0;
Izik Eidus37817f22008-03-24 23:14:53 +02002610
2611 tss_selector = (u16)svm->vmcb->control.exit_info_1;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002612
Izik Eidus37817f22008-03-24 23:14:53 +02002613 if (svm->vmcb->control.exit_info_2 &
2614 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002615 reason = TASK_SWITCH_IRET;
2616 else if (svm->vmcb->control.exit_info_2 &
2617 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2618 reason = TASK_SWITCH_JMP;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002619 else if (idt_v)
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002620 reason = TASK_SWITCH_GATE;
2621 else
2622 reason = TASK_SWITCH_CALL;
2623
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002624 if (reason == TASK_SWITCH_GATE) {
2625 switch (type) {
2626 case SVM_EXITINTINFO_TYPE_NMI:
2627 svm->vcpu.arch.nmi_injected = false;
2628 break;
2629 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszkae269fb22010-04-14 15:51:09 +02002630 if (svm->vmcb->control.exit_info_2 &
2631 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2632 has_error_code = true;
2633 error_code =
2634 (u32)svm->vmcb->control.exit_info_2;
2635 }
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002636 kvm_clear_exception_queue(&svm->vcpu);
2637 break;
2638 case SVM_EXITINTINFO_TYPE_INTR:
2639 kvm_clear_interrupt_queue(&svm->vcpu);
2640 break;
2641 default:
2642 break;
2643 }
2644 }
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002645
Gleb Natapov8317c292009-04-12 13:37:02 +03002646 if (reason != TASK_SWITCH_GATE ||
2647 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2648 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
Gleb Natapovf629cf82009-05-11 13:35:49 +03002649 (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
2650 skip_emulated_instruction(&svm->vcpu);
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002651
Gleb Natapovacb54512010-04-15 21:03:50 +03002652 if (kvm_task_switch(&svm->vcpu, tss_selector, reason,
2653 has_error_code, error_code) == EMULATE_FAIL) {
2654 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2655 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
2656 svm->vcpu.run->internal.ndata = 0;
2657 return 0;
2658 }
2659 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002660}
2661
Avi Kivity851ba692009-08-24 11:10:17 +03002662static int cpuid_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002663{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002664 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10002665 kvm_emulate_cpuid(&svm->vcpu);
Avi Kivity06465c52007-02-28 20:46:53 +02002666 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002667}
2668
Avi Kivity851ba692009-08-24 11:10:17 +03002669static int iret_interception(struct vcpu_svm *svm)
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002670{
2671 ++svm->vcpu.stat.nmi_window_exits;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01002672 clr_intercept(svm, INTERCEPT_IRET);
Gleb Natapov44c11432009-05-11 13:35:52 +03002673 svm->vcpu.arch.hflags |= HF_IRET_MASK;
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02002674 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002675 return 1;
2676}
2677
Avi Kivity851ba692009-08-24 11:10:17 +03002678static int invlpg_interception(struct vcpu_svm *svm)
Marcelo Tosattia7052892008-09-23 13:18:35 -03002679{
Andre Przywaradf4f31082010-12-21 11:12:06 +01002680 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2681 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
2682
2683 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
2684 skip_emulated_instruction(&svm->vcpu);
2685 return 1;
Marcelo Tosattia7052892008-09-23 13:18:35 -03002686}
2687
Avi Kivity851ba692009-08-24 11:10:17 +03002688static int emulate_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002689{
Andre Przywara51d8b662010-12-21 11:12:02 +01002690 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002691}
2692
Joerg Roedel628afd22011-04-04 12:39:36 +02002693bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
2694{
2695 unsigned long cr0 = svm->vcpu.arch.cr0;
2696 bool ret = false;
2697 u64 intercept;
2698
2699 intercept = svm->nested.intercept;
2700
2701 if (!is_guest_mode(&svm->vcpu) ||
2702 (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
2703 return false;
2704
2705 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2706 val &= ~SVM_CR0_SELECTIVE_MASK;
2707
2708 if (cr0 ^ val) {
2709 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2710 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2711 }
2712
2713 return ret;
2714}
2715
Andre Przywara7ff76d52010-12-21 11:12:04 +01002716#define CR_VALID (1ULL << 63)
2717
2718static int cr_interception(struct vcpu_svm *svm)
2719{
2720 int reg, cr;
2721 unsigned long val;
2722 int err;
2723
2724 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2725 return emulate_on_interception(svm);
2726
2727 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2728 return emulate_on_interception(svm);
2729
2730 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2731 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2732
2733 err = 0;
2734 if (cr >= 16) { /* mov to cr */
2735 cr -= 16;
2736 val = kvm_register_read(&svm->vcpu, reg);
2737 switch (cr) {
2738 case 0:
Joerg Roedel628afd22011-04-04 12:39:36 +02002739 if (!check_selective_cr0_intercepted(svm, val))
2740 err = kvm_set_cr0(&svm->vcpu, val);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002741 break;
2742 case 3:
2743 err = kvm_set_cr3(&svm->vcpu, val);
2744 break;
2745 case 4:
2746 err = kvm_set_cr4(&svm->vcpu, val);
2747 break;
2748 case 8:
2749 err = kvm_set_cr8(&svm->vcpu, val);
2750 break;
2751 default:
2752 WARN(1, "unhandled write to CR%d", cr);
2753 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2754 return 1;
2755 }
2756 } else { /* mov from cr */
2757 switch (cr) {
2758 case 0:
2759 val = kvm_read_cr0(&svm->vcpu);
2760 break;
2761 case 2:
2762 val = svm->vcpu.arch.cr2;
2763 break;
2764 case 3:
Avi Kivity9f8fe502010-12-05 17:30:00 +02002765 val = kvm_read_cr3(&svm->vcpu);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002766 break;
2767 case 4:
2768 val = kvm_read_cr4(&svm->vcpu);
2769 break;
2770 case 8:
2771 val = kvm_get_cr8(&svm->vcpu);
2772 break;
2773 default:
2774 WARN(1, "unhandled read from CR%d", cr);
2775 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2776 return 1;
2777 }
2778 kvm_register_write(&svm->vcpu, reg, val);
2779 }
2780 kvm_complete_insn_gp(&svm->vcpu, err);
2781
2782 return 1;
2783}
2784
Andre Przywaracae37972010-12-21 11:12:05 +01002785static int dr_interception(struct vcpu_svm *svm)
2786{
2787 int reg, dr;
2788 unsigned long val;
2789 int err;
2790
2791 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2792 return emulate_on_interception(svm);
2793
2794 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2795 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2796
2797 if (dr >= 16) { /* mov to DRn */
2798 val = kvm_register_read(&svm->vcpu, reg);
2799 kvm_set_dr(&svm->vcpu, dr - 16, val);
2800 } else {
2801 err = kvm_get_dr(&svm->vcpu, dr, &val);
2802 if (!err)
2803 kvm_register_write(&svm->vcpu, reg, val);
2804 }
2805
Joerg Roedel2c46d2a2011-02-09 18:29:39 +01002806 skip_emulated_instruction(&svm->vcpu);
2807
Andre Przywaracae37972010-12-21 11:12:05 +01002808 return 1;
2809}
2810
Avi Kivity851ba692009-08-24 11:10:17 +03002811static int cr8_write_interception(struct vcpu_svm *svm)
Joerg Roedel1d075432007-12-06 21:02:25 +01002812{
Avi Kivity851ba692009-08-24 11:10:17 +03002813 struct kvm_run *kvm_run = svm->vcpu.run;
Andre Przywaraeea1cff2010-12-21 11:12:00 +01002814 int r;
Avi Kivity851ba692009-08-24 11:10:17 +03002815
Gleb Natapov0a5fff192009-04-21 17:45:06 +03002816 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
2817 /* instruction emulation calls kvm_set_cr8() */
Andre Przywara7ff76d52010-12-21 11:12:04 +01002818 r = cr_interception(svm);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002819 if (irqchip_in_kernel(svm->vcpu.kvm)) {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002820 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002821 return r;
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002822 }
Gleb Natapov0a5fff192009-04-21 17:45:06 +03002823 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
Andre Przywara7ff76d52010-12-21 11:12:04 +01002824 return r;
Joerg Roedel1d075432007-12-06 21:02:25 +01002825 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
2826 return 0;
2827}
2828
Avi Kivity6aa8b732006-12-10 02:21:36 -08002829static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
2830{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002831 struct vcpu_svm *svm = to_svm(vcpu);
2832
Avi Kivity6aa8b732006-12-10 02:21:36 -08002833 switch (ecx) {
Jaswinder Singh Rajputaf24a4e2009-05-15 18:42:05 +05302834 case MSR_IA32_TSC: {
Joerg Roedel4cc70312010-11-30 18:04:01 +01002835 struct vmcb *vmcb = get_host_vmcb(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002836
Joerg Roedelfbc0db72011-03-25 09:44:46 +01002837 *data = vmcb->control.tsc_offset +
2838 svm_scale_tsc(vcpu, native_read_tsc());
2839
Avi Kivity6aa8b732006-12-10 02:21:36 -08002840 break;
2841 }
Brian Gerst8c065852010-07-17 09:03:26 -04002842 case MSR_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002843 *data = svm->vmcb->save.star;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002844 break;
Avi Kivity0e859ca2006-12-22 01:05:08 -08002845#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002846 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002847 *data = svm->vmcb->save.lstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002848 break;
2849 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002850 *data = svm->vmcb->save.cstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002851 break;
2852 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002853 *data = svm->vmcb->save.kernel_gs_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002854 break;
2855 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002856 *data = svm->vmcb->save.sfmask;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002857 break;
2858#endif
2859 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002860 *data = svm->vmcb->save.sysenter_cs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002861 break;
2862 case MSR_IA32_SYSENTER_EIP:
Andre Przywara017cb992009-05-28 11:56:31 +02002863 *data = svm->sysenter_eip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002864 break;
2865 case MSR_IA32_SYSENTER_ESP:
Andre Przywara017cb992009-05-28 11:56:31 +02002866 *data = svm->sysenter_esp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002867 break;
Joerg Roedele0231712010-02-24 18:59:10 +01002868 /*
2869 * Nobody will change the following 5 values in the VMCB so we can
2870 * safely return them on rdmsr. They will always be 0 until LBRV is
2871 * implemented.
2872 */
Joerg Roedela2938c82008-02-13 16:30:28 +01002873 case MSR_IA32_DEBUGCTLMSR:
2874 *data = svm->vmcb->save.dbgctl;
2875 break;
2876 case MSR_IA32_LASTBRANCHFROMIP:
2877 *data = svm->vmcb->save.br_from;
2878 break;
2879 case MSR_IA32_LASTBRANCHTOIP:
2880 *data = svm->vmcb->save.br_to;
2881 break;
2882 case MSR_IA32_LASTINTFROMIP:
2883 *data = svm->vmcb->save.last_excp_from;
2884 break;
2885 case MSR_IA32_LASTINTTOIP:
2886 *data = svm->vmcb->save.last_excp_to;
2887 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01002888 case MSR_VM_HSAVE_PA:
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002889 *data = svm->nested.hsave_msr;
Alexander Grafb286d5d2008-11-25 20:17:05 +01002890 break;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01002891 case MSR_VM_CR:
Joerg Roedel4a810182010-02-24 18:59:15 +01002892 *data = svm->nested.vm_cr_msr;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01002893 break;
Alexander Grafc8a73f12009-01-05 16:02:47 +01002894 case MSR_IA32_UCODE_REV:
2895 *data = 0x01000065;
2896 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002897 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08002898 return kvm_get_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002899 }
2900 return 0;
2901}
2902
Avi Kivity851ba692009-08-24 11:10:17 +03002903static int rdmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002904{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002905 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Avi Kivity6aa8b732006-12-10 02:21:36 -08002906 u64 data;
2907
Avi Kivity59200272010-01-25 19:47:02 +02002908 if (svm_get_msr(&svm->vcpu, ecx, &data)) {
2909 trace_kvm_msr_read_ex(ecx);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02002910 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity59200272010-01-25 19:47:02 +02002911 } else {
Marcelo Tosatti229456f2009-06-17 09:22:14 -03002912 trace_kvm_msr_read(ecx, data);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02002913
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002914 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002915 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002916 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10002917 skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002918 }
2919 return 1;
2920}
2921
Joerg Roedel4a810182010-02-24 18:59:15 +01002922static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
2923{
2924 struct vcpu_svm *svm = to_svm(vcpu);
2925 int svm_dis, chg_mask;
2926
2927 if (data & ~SVM_VM_CR_VALID_MASK)
2928 return 1;
2929
2930 chg_mask = SVM_VM_CR_VALID_MASK;
2931
2932 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
2933 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
2934
2935 svm->nested.vm_cr_msr &= ~chg_mask;
2936 svm->nested.vm_cr_msr |= (data & chg_mask);
2937
2938 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
2939
2940 /* check for svm_disable while efer.svme is set */
2941 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
2942 return 1;
2943
2944 return 0;
2945}
2946
Avi Kivity6aa8b732006-12-10 02:21:36 -08002947static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2948{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002949 struct vcpu_svm *svm = to_svm(vcpu);
2950
Avi Kivity6aa8b732006-12-10 02:21:36 -08002951 switch (ecx) {
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10002952 case MSR_IA32_TSC:
Zachary Amsden99e3e302010-08-19 22:07:17 -10002953 kvm_write_tsc(vcpu, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002954 break;
Brian Gerst8c065852010-07-17 09:03:26 -04002955 case MSR_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002956 svm->vmcb->save.star = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002957 break;
Robert P. J. Day49b14f22007-01-29 13:19:50 -08002958#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002959 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002960 svm->vmcb->save.lstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002961 break;
2962 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002963 svm->vmcb->save.cstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002964 break;
2965 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002966 svm->vmcb->save.kernel_gs_base = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002967 break;
2968 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002969 svm->vmcb->save.sfmask = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002970 break;
2971#endif
2972 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002973 svm->vmcb->save.sysenter_cs = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002974 break;
2975 case MSR_IA32_SYSENTER_EIP:
Andre Przywara017cb992009-05-28 11:56:31 +02002976 svm->sysenter_eip = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002977 svm->vmcb->save.sysenter_eip = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002978 break;
2979 case MSR_IA32_SYSENTER_ESP:
Andre Przywara017cb992009-05-28 11:56:31 +02002980 svm->sysenter_esp = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002981 svm->vmcb->save.sysenter_esp = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002982 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01002983 case MSR_IA32_DEBUGCTLMSR:
Avi Kivity2a6b20b2010-11-09 16:15:42 +02002984 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
Joerg Roedel24e09cb2008-02-13 18:58:47 +01002985 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08002986 __func__, data);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01002987 break;
2988 }
2989 if (data & DEBUGCTL_RESERVED_BITS)
2990 return 1;
2991
2992 svm->vmcb->save.dbgctl = data;
Joerg Roedelb53ba3f2010-12-03 11:45:59 +01002993 mark_dirty(svm->vmcb, VMCB_LBR);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01002994 if (data & (1ULL<<0))
2995 svm_enable_lbrv(svm);
2996 else
2997 svm_disable_lbrv(svm);
Joerg Roedela2938c82008-02-13 16:30:28 +01002998 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01002999 case MSR_VM_HSAVE_PA:
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02003000 svm->nested.hsave_msr = data;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003001 break;
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003002 case MSR_VM_CR:
Joerg Roedel4a810182010-02-24 18:59:15 +01003003 return svm_set_vm_cr(vcpu, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003004 case MSR_VM_IGNNE:
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003005 pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
3006 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003007 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08003008 return kvm_set_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003009 }
3010 return 0;
3011}
3012
Avi Kivity851ba692009-08-24 11:10:17 +03003013static int wrmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003014{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003015 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003016 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003017 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003018
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003019
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003020 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Avi Kivity59200272010-01-25 19:47:02 +02003021 if (svm_set_msr(&svm->vcpu, ecx, data)) {
3022 trace_kvm_msr_write_ex(ecx, data);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02003023 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity59200272010-01-25 19:47:02 +02003024 } else {
3025 trace_kvm_msr_write(ecx, data);
Rusty Russelle756fc62007-07-30 20:07:08 +10003026 skip_emulated_instruction(&svm->vcpu);
Avi Kivity59200272010-01-25 19:47:02 +02003027 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08003028 return 1;
3029}
3030
Avi Kivity851ba692009-08-24 11:10:17 +03003031static int msr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003032{
Rusty Russelle756fc62007-07-30 20:07:08 +10003033 if (svm->vmcb->control.exit_info_1)
Avi Kivity851ba692009-08-24 11:10:17 +03003034 return wrmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003035 else
Avi Kivity851ba692009-08-24 11:10:17 +03003036 return rdmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003037}
3038
Avi Kivity851ba692009-08-24 11:10:17 +03003039static int interrupt_window_interception(struct vcpu_svm *svm)
Dor Laorc1150d82007-01-05 16:36:24 -08003040{
Avi Kivity851ba692009-08-24 11:10:17 +03003041 struct kvm_run *kvm_run = svm->vcpu.run;
3042
Avi Kivity3842d132010-07-27 12:30:24 +03003043 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graff0b85052008-11-25 20:17:01 +01003044 svm_clear_vintr(svm);
Eddie Dong85f455f2007-07-06 12:20:49 +03003045 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
Joerg Roedeldecdbf62010-12-03 11:45:52 +01003046 mark_dirty(svm->vmcb, VMCB_INTR);
Dor Laorc1150d82007-01-05 16:36:24 -08003047 /*
3048 * If the user space waits to inject interrupts, exit as soon as
3049 * possible
3050 */
Gleb Natapov80618232009-04-21 17:44:56 +03003051 if (!irqchip_in_kernel(svm->vcpu.kvm) &&
3052 kvm_run->request_interrupt_window &&
3053 !kvm_cpu_has_interrupt(&svm->vcpu)) {
Rusty Russelle756fc62007-07-30 20:07:08 +10003054 ++svm->vcpu.stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08003055 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
3056 return 0;
3057 }
3058
3059 return 1;
3060}
3061
Mark Langsdorf565d0992009-10-06 14:25:02 -05003062static int pause_interception(struct vcpu_svm *svm)
3063{
3064 kvm_vcpu_on_spin(&(svm->vcpu));
3065 return 1;
3066}
3067
Avi Kivity851ba692009-08-24 11:10:17 +03003068static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
Andre Przywara7ff76d52010-12-21 11:12:04 +01003069 [SVM_EXIT_READ_CR0] = cr_interception,
3070 [SVM_EXIT_READ_CR3] = cr_interception,
3071 [SVM_EXIT_READ_CR4] = cr_interception,
3072 [SVM_EXIT_READ_CR8] = cr_interception,
Avi Kivityd2251572010-01-06 10:55:27 +02003073 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
Joerg Roedel628afd22011-04-04 12:39:36 +02003074 [SVM_EXIT_WRITE_CR0] = cr_interception,
Andre Przywara7ff76d52010-12-21 11:12:04 +01003075 [SVM_EXIT_WRITE_CR3] = cr_interception,
3076 [SVM_EXIT_WRITE_CR4] = cr_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003077 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
Andre Przywaracae37972010-12-21 11:12:05 +01003078 [SVM_EXIT_READ_DR0] = dr_interception,
3079 [SVM_EXIT_READ_DR1] = dr_interception,
3080 [SVM_EXIT_READ_DR2] = dr_interception,
3081 [SVM_EXIT_READ_DR3] = dr_interception,
3082 [SVM_EXIT_READ_DR4] = dr_interception,
3083 [SVM_EXIT_READ_DR5] = dr_interception,
3084 [SVM_EXIT_READ_DR6] = dr_interception,
3085 [SVM_EXIT_READ_DR7] = dr_interception,
3086 [SVM_EXIT_WRITE_DR0] = dr_interception,
3087 [SVM_EXIT_WRITE_DR1] = dr_interception,
3088 [SVM_EXIT_WRITE_DR2] = dr_interception,
3089 [SVM_EXIT_WRITE_DR3] = dr_interception,
3090 [SVM_EXIT_WRITE_DR4] = dr_interception,
3091 [SVM_EXIT_WRITE_DR5] = dr_interception,
3092 [SVM_EXIT_WRITE_DR6] = dr_interception,
3093 [SVM_EXIT_WRITE_DR7] = dr_interception,
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003094 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
3095 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05003096 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003097 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
3098 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
3099 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
3100 [SVM_EXIT_INTR] = intr_interception,
Joerg Roedelc47f0982008-04-30 17:56:00 +02003101 [SVM_EXIT_NMI] = nmi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003102 [SVM_EXIT_SMI] = nop_on_interception,
3103 [SVM_EXIT_INIT] = nop_on_interception,
Dor Laorc1150d82007-01-05 16:36:24 -08003104 [SVM_EXIT_VINTR] = interrupt_window_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003105 [SVM_EXIT_CPUID] = cpuid_interception,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003106 [SVM_EXIT_IRET] = iret_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02003107 [SVM_EXIT_INVD] = emulate_on_interception,
Mark Langsdorf565d0992009-10-06 14:25:02 -05003108 [SVM_EXIT_PAUSE] = pause_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003109 [SVM_EXIT_HLT] = halt_interception,
Marcelo Tosattia7052892008-09-23 13:18:35 -03003110 [SVM_EXIT_INVLPG] = invlpg_interception,
Alexander Grafff092382009-06-15 15:21:24 +02003111 [SVM_EXIT_INVLPGA] = invlpga_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003112 [SVM_EXIT_IOIO] = io_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003113 [SVM_EXIT_MSR] = msr_interception,
3114 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08003115 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
Alexander Graf3d6368e2008-11-25 20:17:07 +01003116 [SVM_EXIT_VMRUN] = vmrun_interception,
Avi Kivity02e235b2007-02-19 14:37:47 +02003117 [SVM_EXIT_VMMCALL] = vmmcall_interception,
Alexander Graf55426752008-11-25 20:17:06 +01003118 [SVM_EXIT_VMLOAD] = vmload_interception,
3119 [SVM_EXIT_VMSAVE] = vmsave_interception,
Alexander Graf1371d902008-11-25 20:17:04 +01003120 [SVM_EXIT_STGI] = stgi_interception,
3121 [SVM_EXIT_CLGI] = clgi_interception,
Joerg Roedel532a46b2009-10-09 16:08:32 +02003122 [SVM_EXIT_SKINIT] = skinit_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02003123 [SVM_EXIT_WBINVD] = emulate_on_interception,
Joerg Roedel916ce232007-03-21 19:47:00 +01003124 [SVM_EXIT_MONITOR] = invalid_op_interception,
3125 [SVM_EXIT_MWAIT] = invalid_op_interception,
Joerg Roedel81dd35d2010-12-07 17:15:06 +01003126 [SVM_EXIT_XSETBV] = xsetbv_interception,
Joerg Roedel709ddeb2008-02-07 13:47:45 +01003127 [SVM_EXIT_NPF] = pf_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003128};
3129
Joerg Roedel3f10c842010-05-05 16:04:42 +02003130void dump_vmcb(struct kvm_vcpu *vcpu)
3131{
3132 struct vcpu_svm *svm = to_svm(vcpu);
3133 struct vmcb_control_area *control = &svm->vmcb->control;
3134 struct vmcb_save_area *save = &svm->vmcb->save;
3135
3136 pr_err("VMCB Control Area:\n");
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003137 pr_err("cr_read: %04x\n", control->intercept_cr & 0xffff);
3138 pr_err("cr_write: %04x\n", control->intercept_cr >> 16);
Joerg Roedel3aed0412010-11-30 18:03:58 +01003139 pr_err("dr_read: %04x\n", control->intercept_dr & 0xffff);
3140 pr_err("dr_write: %04x\n", control->intercept_dr >> 16);
Joerg Roedel3f10c842010-05-05 16:04:42 +02003141 pr_err("exceptions: %08x\n", control->intercept_exceptions);
3142 pr_err("intercepts: %016llx\n", control->intercept);
3143 pr_err("pause filter count: %d\n", control->pause_filter_count);
3144 pr_err("iopm_base_pa: %016llx\n", control->iopm_base_pa);
3145 pr_err("msrpm_base_pa: %016llx\n", control->msrpm_base_pa);
3146 pr_err("tsc_offset: %016llx\n", control->tsc_offset);
3147 pr_err("asid: %d\n", control->asid);
3148 pr_err("tlb_ctl: %d\n", control->tlb_ctl);
3149 pr_err("int_ctl: %08x\n", control->int_ctl);
3150 pr_err("int_vector: %08x\n", control->int_vector);
3151 pr_err("int_state: %08x\n", control->int_state);
3152 pr_err("exit_code: %08x\n", control->exit_code);
3153 pr_err("exit_info1: %016llx\n", control->exit_info_1);
3154 pr_err("exit_info2: %016llx\n", control->exit_info_2);
3155 pr_err("exit_int_info: %08x\n", control->exit_int_info);
3156 pr_err("exit_int_info_err: %08x\n", control->exit_int_info_err);
3157 pr_err("nested_ctl: %lld\n", control->nested_ctl);
3158 pr_err("nested_cr3: %016llx\n", control->nested_cr3);
3159 pr_err("event_inj: %08x\n", control->event_inj);
3160 pr_err("event_inj_err: %08x\n", control->event_inj_err);
3161 pr_err("lbr_ctl: %lld\n", control->lbr_ctl);
3162 pr_err("next_rip: %016llx\n", control->next_rip);
3163 pr_err("VMCB State Save Area:\n");
3164 pr_err("es: s: %04x a: %04x l: %08x b: %016llx\n",
3165 save->es.selector, save->es.attrib,
3166 save->es.limit, save->es.base);
3167 pr_err("cs: s: %04x a: %04x l: %08x b: %016llx\n",
3168 save->cs.selector, save->cs.attrib,
3169 save->cs.limit, save->cs.base);
3170 pr_err("ss: s: %04x a: %04x l: %08x b: %016llx\n",
3171 save->ss.selector, save->ss.attrib,
3172 save->ss.limit, save->ss.base);
3173 pr_err("ds: s: %04x a: %04x l: %08x b: %016llx\n",
3174 save->ds.selector, save->ds.attrib,
3175 save->ds.limit, save->ds.base);
3176 pr_err("fs: s: %04x a: %04x l: %08x b: %016llx\n",
3177 save->fs.selector, save->fs.attrib,
3178 save->fs.limit, save->fs.base);
3179 pr_err("gs: s: %04x a: %04x l: %08x b: %016llx\n",
3180 save->gs.selector, save->gs.attrib,
3181 save->gs.limit, save->gs.base);
3182 pr_err("gdtr: s: %04x a: %04x l: %08x b: %016llx\n",
3183 save->gdtr.selector, save->gdtr.attrib,
3184 save->gdtr.limit, save->gdtr.base);
3185 pr_err("ldtr: s: %04x a: %04x l: %08x b: %016llx\n",
3186 save->ldtr.selector, save->ldtr.attrib,
3187 save->ldtr.limit, save->ldtr.base);
3188 pr_err("idtr: s: %04x a: %04x l: %08x b: %016llx\n",
3189 save->idtr.selector, save->idtr.attrib,
3190 save->idtr.limit, save->idtr.base);
3191 pr_err("tr: s: %04x a: %04x l: %08x b: %016llx\n",
3192 save->tr.selector, save->tr.attrib,
3193 save->tr.limit, save->tr.base);
3194 pr_err("cpl: %d efer: %016llx\n",
3195 save->cpl, save->efer);
3196 pr_err("cr0: %016llx cr2: %016llx\n",
3197 save->cr0, save->cr2);
3198 pr_err("cr3: %016llx cr4: %016llx\n",
3199 save->cr3, save->cr4);
3200 pr_err("dr6: %016llx dr7: %016llx\n",
3201 save->dr6, save->dr7);
3202 pr_err("rip: %016llx rflags: %016llx\n",
3203 save->rip, save->rflags);
3204 pr_err("rsp: %016llx rax: %016llx\n",
3205 save->rsp, save->rax);
3206 pr_err("star: %016llx lstar: %016llx\n",
3207 save->star, save->lstar);
3208 pr_err("cstar: %016llx sfmask: %016llx\n",
3209 save->cstar, save->sfmask);
3210 pr_err("kernel_gs_base: %016llx sysenter_cs: %016llx\n",
3211 save->kernel_gs_base, save->sysenter_cs);
3212 pr_err("sysenter_esp: %016llx sysenter_eip: %016llx\n",
3213 save->sysenter_esp, save->sysenter_eip);
3214 pr_err("gpat: %016llx dbgctl: %016llx\n",
3215 save->g_pat, save->dbgctl);
3216 pr_err("br_from: %016llx br_to: %016llx\n",
3217 save->br_from, save->br_to);
3218 pr_err("excp_from: %016llx excp_to: %016llx\n",
3219 save->last_excp_from, save->last_excp_to);
3220
3221}
3222
Avi Kivity586f9602010-11-18 13:09:54 +02003223static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
3224{
3225 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3226
3227 *info1 = control->exit_info_1;
3228 *info2 = control->exit_info_2;
3229}
3230
Avi Kivity851ba692009-08-24 11:10:17 +03003231static int handle_exit(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003232{
Avi Kivity04d2cc72007-09-10 18:10:54 +03003233 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity851ba692009-08-24 11:10:17 +03003234 struct kvm_run *kvm_run = vcpu->run;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003235 u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003236
Avi Kivityaa179112010-11-17 18:44:19 +02003237 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003238
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003239 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
Joerg Roedel2be4fc72010-04-22 12:33:09 +02003240 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3241 if (npt_enabled)
3242 vcpu->arch.cr3 = svm->vmcb->save.cr3;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003243
Joerg Roedelcd3ff652009-10-09 16:08:26 +02003244 if (unlikely(svm->nested.exit_required)) {
3245 nested_svm_vmexit(svm);
3246 svm->nested.exit_required = false;
3247
3248 return 1;
3249 }
3250
Joerg Roedel20307532010-11-29 17:51:48 +01003251 if (is_guest_mode(vcpu)) {
Joerg Roedel410e4d52009-08-07 11:49:44 +02003252 int vmexit;
3253
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02003254 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
3255 svm->vmcb->control.exit_info_1,
3256 svm->vmcb->control.exit_info_2,
3257 svm->vmcb->control.exit_int_info,
3258 svm->vmcb->control.exit_int_info_err);
3259
Joerg Roedel410e4d52009-08-07 11:49:44 +02003260 vmexit = nested_svm_exit_special(svm);
3261
3262 if (vmexit == NESTED_EXIT_CONTINUE)
3263 vmexit = nested_svm_exit_handled(svm);
3264
3265 if (vmexit == NESTED_EXIT_DONE)
Alexander Grafcf74a782008-11-25 20:17:08 +01003266 return 1;
Alexander Grafcf74a782008-11-25 20:17:08 +01003267 }
3268
Joerg Roedela5c38322009-08-07 11:49:32 +02003269 svm_complete_interrupts(svm);
3270
Avi Kivity04d2cc72007-09-10 18:10:54 +03003271 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
3272 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3273 kvm_run->fail_entry.hardware_entry_failure_reason
3274 = svm->vmcb->control.exit_code;
Joerg Roedel3f10c842010-05-05 16:04:42 +02003275 pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
3276 dump_vmcb(vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +03003277 return 0;
3278 }
3279
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003280 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
Joerg Roedel709ddeb2008-02-07 13:47:45 +01003281 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
Joerg Roedel55c5e462010-09-10 17:31:04 +02003282 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
3283 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003284 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
3285 "exit_code 0x%x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08003286 __func__, svm->vmcb->control.exit_int_info,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003287 exit_code);
3288
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +02003289 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
Joe Perches56919c52007-11-12 20:06:51 -08003290 || !svm_exit_handlers[exit_code]) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08003291 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
Avi Kivity364b6252007-04-16 14:28:40 +03003292 kvm_run->hw.hardware_exit_reason = exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003293 return 0;
3294 }
3295
Avi Kivity851ba692009-08-24 11:10:17 +03003296 return svm_exit_handlers[exit_code](svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003297}
3298
3299static void reload_tss(struct kvm_vcpu *vcpu)
3300{
3301 int cpu = raw_smp_processor_id();
3302
Tejun Heo0fe1e002009-10-29 22:34:14 +09003303 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
3304 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity6aa8b732006-12-10 02:21:36 -08003305 load_TR_desc();
3306}
3307
Rusty Russelle756fc62007-07-30 20:07:08 +10003308static void pre_svm_run(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003309{
3310 int cpu = raw_smp_processor_id();
3311
Tejun Heo0fe1e002009-10-29 22:34:14 +09003312 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003313
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03003314 /* FIXME: handle wraparound of asid_generation */
Tejun Heo0fe1e002009-10-29 22:34:14 +09003315 if (svm->asid_generation != sd->asid_generation)
3316 new_asid(svm, sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003317}
3318
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003319static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3320{
3321 struct vcpu_svm *svm = to_svm(vcpu);
3322
3323 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3324 vcpu->arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01003325 set_intercept(svm, INTERCEPT_IRET);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003326 ++vcpu->stat.nmi_injections;
3327}
Avi Kivity6aa8b732006-12-10 02:21:36 -08003328
Eddie Dong85f455f2007-07-06 12:20:49 +03003329static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003330{
3331 struct vmcb_control_area *control;
3332
Rusty Russelle756fc62007-07-30 20:07:08 +10003333 control = &svm->vmcb->control;
Eddie Dong85f455f2007-07-06 12:20:49 +03003334 control->int_vector = irq;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003335 control->int_ctl &= ~V_INTR_PRIO_MASK;
3336 control->int_ctl |= V_IRQ_MASK |
3337 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
Joerg Roedeldecdbf62010-12-03 11:45:52 +01003338 mark_dirty(svm->vmcb, VMCB_INTR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003339}
3340
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003341static void svm_set_irq(struct kvm_vcpu *vcpu)
Eddie Dong2a8067f2007-08-06 16:29:07 +03003342{
3343 struct vcpu_svm *svm = to_svm(vcpu);
3344
Joerg Roedel2af91942009-08-07 11:49:28 +02003345 BUG_ON(!(gif_set(svm)));
Alexander Grafcf74a782008-11-25 20:17:08 +01003346
Gleb Natapov9fb2d2b2010-05-23 14:28:26 +03003347 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
3348 ++vcpu->stat.irq_injections;
3349
Alexander Graf219b65d2009-06-15 15:21:25 +02003350 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3351 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
Eddie Dong2a8067f2007-08-06 16:29:07 +03003352}
3353
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003354static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3355{
3356 struct vcpu_svm *svm = to_svm(vcpu);
3357
Joerg Roedel20307532010-11-29 17:51:48 +01003358 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003359 return;
3360
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003361 if (irr == -1)
3362 return;
3363
3364 if (tpr >= irr)
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003365 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003366}
3367
3368static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
Joerg Roedelaaacfc92008-04-16 16:51:18 +02003369{
3370 struct vcpu_svm *svm = to_svm(vcpu);
3371 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel924584c2010-04-22 12:33:07 +02003372 int ret;
3373 ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
3374 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
3375 ret = ret && gif_set(svm) && nested_svm_nmi(svm);
3376
3377 return ret;
Joerg Roedelaaacfc92008-04-16 16:51:18 +02003378}
3379
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003380static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3381{
3382 struct vcpu_svm *svm = to_svm(vcpu);
3383
3384 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
3385}
3386
3387static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3388{
3389 struct vcpu_svm *svm = to_svm(vcpu);
3390
3391 if (masked) {
3392 svm->vcpu.arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01003393 set_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003394 } else {
3395 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01003396 clr_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003397 }
3398}
3399
Gleb Natapov78646122009-03-23 12:12:11 +02003400static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
3401{
3402 struct vcpu_svm *svm = to_svm(vcpu);
3403 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7fcdb512009-09-16 15:24:15 +02003404 int ret;
3405
3406 if (!gif_set(svm) ||
3407 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
3408 return 0;
3409
Avi Kivityf6e78472010-08-02 15:30:20 +03003410 ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
Joerg Roedel7fcdb512009-09-16 15:24:15 +02003411
Joerg Roedel20307532010-11-29 17:51:48 +01003412 if (is_guest_mode(vcpu))
Joerg Roedel7fcdb512009-09-16 15:24:15 +02003413 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
3414
3415 return ret;
Gleb Natapov78646122009-03-23 12:12:11 +02003416}
3417
Gleb Natapov9222be12009-04-23 17:14:37 +03003418static void enable_irq_window(struct kvm_vcpu *vcpu)
3419{
Alexander Graf219b65d2009-06-15 15:21:25 +02003420 struct vcpu_svm *svm = to_svm(vcpu);
Alexander Graf219b65d2009-06-15 15:21:25 +02003421
Joerg Roedele0231712010-02-24 18:59:10 +01003422 /*
3423 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3424 * 1, because that's a separate STGI/VMRUN intercept. The next time we
3425 * get that intercept, this function will be called again though and
3426 * we'll get the vintr intercept.
3427 */
Joerg Roedel8fe54652010-02-19 16:23:01 +01003428 if (gif_set(svm) && nested_svm_intr(svm)) {
Alexander Graf219b65d2009-06-15 15:21:25 +02003429 svm_set_vintr(svm);
3430 svm_inject_irq(svm, 0x0);
3431 }
Gleb Natapov9222be12009-04-23 17:14:37 +03003432}
3433
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003434static void enable_nmi_window(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003435{
Avi Kivity04d2cc72007-09-10 18:10:54 +03003436 struct vcpu_svm *svm = to_svm(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03003437
Gleb Natapov44c11432009-05-11 13:35:52 +03003438 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
3439 == HF_NMI_MASK)
3440 return; /* IRET will cause a vm exit */
3441
Joerg Roedele0231712010-02-24 18:59:10 +01003442 /*
3443 * Something prevents NMI from been injected. Single step over possible
3444 * problem (IRET or exception injection or interrupt shadow)
3445 */
Jan Kiszka6be7d302009-10-18 13:24:54 +02003446 svm->nmi_singlestep = true;
Gleb Natapov44c11432009-05-11 13:35:52 +03003447 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3448 update_db_intercept(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03003449}
3450
Izik Eiduscbc94022007-10-25 00:29:55 +02003451static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
3452{
3453 return 0;
3454}
3455
Avi Kivityd9e368d2007-06-07 19:18:30 +03003456static void svm_flush_tlb(struct kvm_vcpu *vcpu)
3457{
Joerg Roedel38e5e922010-12-03 15:25:16 +01003458 struct vcpu_svm *svm = to_svm(vcpu);
3459
3460 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3461 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3462 else
3463 svm->asid_generation--;
Avi Kivityd9e368d2007-06-07 19:18:30 +03003464}
3465
Avi Kivity04d2cc72007-09-10 18:10:54 +03003466static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
3467{
3468}
3469
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003470static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3471{
3472 struct vcpu_svm *svm = to_svm(vcpu);
3473
Joerg Roedel20307532010-11-29 17:51:48 +01003474 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003475 return;
3476
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003477 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003478 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
Gleb Natapov615d5192009-04-21 17:45:05 +03003479 kvm_set_cr8(vcpu, cr8);
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003480 }
3481}
3482
Joerg Roedel649d6862008-04-16 16:51:15 +02003483static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3484{
3485 struct vcpu_svm *svm = to_svm(vcpu);
3486 u64 cr8;
3487
Joerg Roedel20307532010-11-29 17:51:48 +01003488 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003489 return;
3490
Joerg Roedel649d6862008-04-16 16:51:15 +02003491 cr8 = kvm_get_cr8(vcpu);
3492 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3493 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3494}
3495
Gleb Natapov9222be12009-04-23 17:14:37 +03003496static void svm_complete_interrupts(struct vcpu_svm *svm)
3497{
3498 u8 vector;
3499 int type;
3500 u32 exitintinfo = svm->vmcb->control.exit_int_info;
Jan Kiszka66b71382010-02-23 17:47:56 +01003501 unsigned int3_injected = svm->int3_injected;
3502
3503 svm->int3_injected = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03003504
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02003505 /*
3506 * If we've made progress since setting HF_IRET_MASK, we've
3507 * executed an IRET and can allow NMI injection.
3508 */
3509 if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
3510 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
Gleb Natapov44c11432009-05-11 13:35:52 +03003511 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
Avi Kivity3842d132010-07-27 12:30:24 +03003512 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3513 }
Gleb Natapov44c11432009-05-11 13:35:52 +03003514
Gleb Natapov9222be12009-04-23 17:14:37 +03003515 svm->vcpu.arch.nmi_injected = false;
3516 kvm_clear_exception_queue(&svm->vcpu);
3517 kvm_clear_interrupt_queue(&svm->vcpu);
3518
3519 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3520 return;
3521
Avi Kivity3842d132010-07-27 12:30:24 +03003522 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3523
Gleb Natapov9222be12009-04-23 17:14:37 +03003524 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3525 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3526
3527 switch (type) {
3528 case SVM_EXITINTINFO_TYPE_NMI:
3529 svm->vcpu.arch.nmi_injected = true;
3530 break;
3531 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszka66b71382010-02-23 17:47:56 +01003532 /*
3533 * In case of software exceptions, do not reinject the vector,
3534 * but re-execute the instruction instead. Rewind RIP first
3535 * if we emulated INT3 before.
3536 */
3537 if (kvm_exception_is_soft(vector)) {
3538 if (vector == BP_VECTOR && int3_injected &&
3539 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
3540 kvm_rip_write(&svm->vcpu,
3541 kvm_rip_read(&svm->vcpu) -
3542 int3_injected);
Alexander Graf219b65d2009-06-15 15:21:25 +02003543 break;
Jan Kiszka66b71382010-02-23 17:47:56 +01003544 }
Gleb Natapov9222be12009-04-23 17:14:37 +03003545 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
3546 u32 err = svm->vmcb->control.exit_int_info_err;
Joerg Roedelce7ddec2010-04-22 12:33:13 +02003547 kvm_requeue_exception_e(&svm->vcpu, vector, err);
Gleb Natapov9222be12009-04-23 17:14:37 +03003548
3549 } else
Joerg Roedelce7ddec2010-04-22 12:33:13 +02003550 kvm_requeue_exception(&svm->vcpu, vector);
Gleb Natapov9222be12009-04-23 17:14:37 +03003551 break;
3552 case SVM_EXITINTINFO_TYPE_INTR:
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003553 kvm_queue_interrupt(&svm->vcpu, vector, false);
Gleb Natapov9222be12009-04-23 17:14:37 +03003554 break;
3555 default:
3556 break;
3557 }
3558}
3559
Avi Kivityb463a6f2010-07-20 15:06:17 +03003560static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3561{
3562 struct vcpu_svm *svm = to_svm(vcpu);
3563 struct vmcb_control_area *control = &svm->vmcb->control;
3564
3565 control->exit_int_info = control->event_inj;
3566 control->exit_int_info_err = control->event_inj_err;
3567 control->event_inj = 0;
3568 svm_complete_interrupts(svm);
3569}
3570
Avi Kivity80e31d42008-07-14 14:44:59 +03003571#ifdef CONFIG_X86_64
3572#define R "r"
3573#else
3574#define R "e"
3575#endif
3576
Avi Kivity851ba692009-08-24 11:10:17 +03003577static void svm_vcpu_run(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003578{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003579 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivityd9e368d2007-06-07 19:18:30 +03003580
Joerg Roedel2041a062010-04-22 12:33:08 +02003581 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3582 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3583 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3584
Joerg Roedelcd3ff652009-10-09 16:08:26 +02003585 /*
3586 * A vmexit emulation is required before the vcpu can be executed
3587 * again.
3588 */
3589 if (unlikely(svm->nested.exit_required))
3590 return;
3591
Rusty Russelle756fc62007-07-30 20:07:08 +10003592 pre_svm_run(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003593
Joerg Roedel649d6862008-04-16 16:51:15 +02003594 sync_lapic_to_cr8(vcpu);
3595
Joerg Roedelcda0ffd2009-08-07 11:49:45 +02003596 svm->vmcb->save.cr2 = vcpu->arch.cr2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003597
Avi Kivity04d2cc72007-09-10 18:10:54 +03003598 clgi();
3599
3600 local_irq_enable();
Avi Kivity36241b82006-12-22 01:05:20 -08003601
Avi Kivity6aa8b732006-12-10 02:21:36 -08003602 asm volatile (
Avi Kivity80e31d42008-07-14 14:44:59 +03003603 "push %%"R"bp; \n\t"
3604 "mov %c[rbx](%[svm]), %%"R"bx \n\t"
3605 "mov %c[rcx](%[svm]), %%"R"cx \n\t"
3606 "mov %c[rdx](%[svm]), %%"R"dx \n\t"
3607 "mov %c[rsi](%[svm]), %%"R"si \n\t"
3608 "mov %c[rdi](%[svm]), %%"R"di \n\t"
3609 "mov %c[rbp](%[svm]), %%"R"bp \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08003610#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10003611 "mov %c[r8](%[svm]), %%r8 \n\t"
3612 "mov %c[r9](%[svm]), %%r9 \n\t"
3613 "mov %c[r10](%[svm]), %%r10 \n\t"
3614 "mov %c[r11](%[svm]), %%r11 \n\t"
3615 "mov %c[r12](%[svm]), %%r12 \n\t"
3616 "mov %c[r13](%[svm]), %%r13 \n\t"
3617 "mov %c[r14](%[svm]), %%r14 \n\t"
3618 "mov %c[r15](%[svm]), %%r15 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003619#endif
3620
Avi Kivity6aa8b732006-12-10 02:21:36 -08003621 /* Enter guest mode */
Avi Kivity80e31d42008-07-14 14:44:59 +03003622 "push %%"R"ax \n\t"
3623 "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +03003624 __ex(SVM_VMLOAD) "\n\t"
3625 __ex(SVM_VMRUN) "\n\t"
3626 __ex(SVM_VMSAVE) "\n\t"
Avi Kivity80e31d42008-07-14 14:44:59 +03003627 "pop %%"R"ax \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003628
3629 /* Save guest registers, load host registers */
Avi Kivity80e31d42008-07-14 14:44:59 +03003630 "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
3631 "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
3632 "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
3633 "mov %%"R"si, %c[rsi](%[svm]) \n\t"
3634 "mov %%"R"di, %c[rdi](%[svm]) \n\t"
3635 "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08003636#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10003637 "mov %%r8, %c[r8](%[svm]) \n\t"
3638 "mov %%r9, %c[r9](%[svm]) \n\t"
3639 "mov %%r10, %c[r10](%[svm]) \n\t"
3640 "mov %%r11, %c[r11](%[svm]) \n\t"
3641 "mov %%r12, %c[r12](%[svm]) \n\t"
3642 "mov %%r13, %c[r13](%[svm]) \n\t"
3643 "mov %%r14, %c[r14](%[svm]) \n\t"
3644 "mov %%r15, %c[r15](%[svm]) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003645#endif
Avi Kivity80e31d42008-07-14 14:44:59 +03003646 "pop %%"R"bp"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003647 :
Rusty Russellfb3f0f52007-07-27 17:16:56 +10003648 : [svm]"a"(svm),
Avi Kivity6aa8b732006-12-10 02:21:36 -08003649 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003650 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
3651 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
3652 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
3653 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
3654 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
3655 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
Avi Kivity05b3e0c2006-12-13 00:33:45 -08003656#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003657 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
3658 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
3659 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
3660 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
3661 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
3662 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
3663 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
3664 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
Avi Kivity6aa8b732006-12-10 02:21:36 -08003665#endif
Laurent Vivier54a08c02007-10-25 14:18:53 +02003666 : "cc", "memory"
Avi Kivity80e31d42008-07-14 14:44:59 +03003667 , R"bx", R"cx", R"dx", R"si", R"di"
Laurent Vivier54a08c02007-10-25 14:18:53 +02003668#ifdef CONFIG_X86_64
Laurent Vivier54a08c02007-10-25 14:18:53 +02003669 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
3670#endif
3671 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08003672
Avi Kivity82ca2d12010-10-21 12:20:34 +02003673#ifdef CONFIG_X86_64
3674 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
3675#else
Avi Kivitydacccfd2010-10-21 12:20:33 +02003676 loadsegment(fs, svm->host.fs);
Avi Kivity831ca602011-03-08 16:09:51 +02003677#ifndef CONFIG_X86_32_LAZY_GS
3678 loadsegment(gs, svm->host.gs);
3679#endif
Avi Kivity9581d442010-10-19 16:46:55 +02003680#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -08003681
3682 reload_tss(vcpu);
3683
Avi Kivity56ba47d2007-11-07 17:14:18 +02003684 local_irq_disable();
3685
Avi Kivity13c34e02010-10-21 12:20:31 +02003686 vcpu->arch.cr2 = svm->vmcb->save.cr2;
3687 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
3688 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3689 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3690
Joerg Roedel3781c012011-01-14 16:45:02 +01003691 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3692 kvm_before_handle_nmi(&svm->vcpu);
3693
3694 stgi();
3695
3696 /* Any pending NMI will happen here */
3697
3698 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3699 kvm_after_handle_nmi(&svm->vcpu);
3700
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003701 sync_cr8_to_lapic(vcpu);
3702
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003703 svm->next_rip = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03003704
Joerg Roedel38e5e922010-12-03 15:25:16 +01003705 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
3706
Gleb Natapov631bc482010-10-14 11:22:52 +02003707 /* if exit due to PF check for async PF */
3708 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
3709 svm->apf_reason = kvm_read_and_reset_pf_reason();
3710
Avi Kivity6de4f3a2009-05-31 22:58:47 +03003711 if (npt_enabled) {
3712 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
3713 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
3714 }
Joerg Roedelfe5913e2010-05-17 14:43:34 +02003715
3716 /*
3717 * We need to handle MC intercepts here before the vcpu has a chance to
3718 * change the physical cpu
3719 */
3720 if (unlikely(svm->vmcb->control.exit_code ==
3721 SVM_EXIT_EXCP_BASE + MC_VECTOR))
3722 svm_handle_mce(svm);
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003723
3724 mark_all_clean(svm->vmcb);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003725}
3726
Avi Kivity80e31d42008-07-14 14:44:59 +03003727#undef R
3728
Avi Kivity6aa8b732006-12-10 02:21:36 -08003729static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3730{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003731 struct vcpu_svm *svm = to_svm(vcpu);
3732
3733 svm->vmcb->save.cr3 = root;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01003734 mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedelf40f6a42010-12-03 15:25:15 +01003735 svm_flush_tlb(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003736}
3737
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003738static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3739{
3740 struct vcpu_svm *svm = to_svm(vcpu);
3741
3742 svm->vmcb->control.nested_cr3 = root;
Joerg Roedelb2747162010-12-03 11:45:53 +01003743 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003744
3745 /* Also sync guest cr3 here in case we live migrate */
Avi Kivity9f8fe502010-12-05 17:30:00 +02003746 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
Joerg Roedeldcca1a62010-12-03 11:45:54 +01003747 mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003748
Joerg Roedelf40f6a42010-12-03 15:25:15 +01003749 svm_flush_tlb(vcpu);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003750}
3751
Avi Kivity6aa8b732006-12-10 02:21:36 -08003752static int is_disabled(void)
3753{
Joerg Roedel6031a612007-06-22 12:29:50 +03003754 u64 vm_cr;
3755
3756 rdmsrl(MSR_VM_CR, vm_cr);
3757 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
3758 return 1;
3759
Avi Kivity6aa8b732006-12-10 02:21:36 -08003760 return 0;
3761}
3762
Ingo Molnar102d8322007-02-19 14:37:47 +02003763static void
3764svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
3765{
3766 /*
3767 * Patch in the VMMCALL instruction:
3768 */
3769 hypercall[0] = 0x0f;
3770 hypercall[1] = 0x01;
3771 hypercall[2] = 0xd9;
Ingo Molnar102d8322007-02-19 14:37:47 +02003772}
3773
Yang, Sheng002c7f72007-07-31 14:23:01 +03003774static void svm_check_processor_compat(void *rtn)
3775{
3776 *(int *)rtn = 0;
3777}
3778
Avi Kivity774ead32007-12-26 13:57:04 +02003779static bool svm_cpu_has_accelerated_tpr(void)
3780{
3781 return false;
3782}
3783
Sheng Yang4b12f0d2009-04-27 20:35:42 +08003784static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
Sheng Yang64d4d522008-10-09 16:01:57 +08003785{
3786 return 0;
3787}
3788
Sheng Yang0e851882009-12-18 16:48:46 +08003789static void svm_cpuid_update(struct kvm_vcpu *vcpu)
3790{
3791}
3792
Joerg Roedeld4330ef2010-04-22 12:33:11 +02003793static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
3794{
Joerg Roedelc2c63a42010-04-22 12:33:12 +02003795 switch (func) {
Joerg Roedel4c62a2d2010-09-10 17:31:06 +02003796 case 0x80000001:
3797 if (nested)
3798 entry->ecx |= (1 << 2); /* Set SVM bit */
3799 break;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02003800 case 0x8000000A:
3801 entry->eax = 1; /* SVM revision 1 */
3802 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
3803 ASID emulation to nested SVM */
3804 entry->ecx = 0; /* Reserved */
Joerg Roedel7a190662010-07-27 18:14:21 +02003805 entry->edx = 0; /* Per default do not support any
3806 additional features */
3807
3808 /* Support next_rip if host supports it */
Avi Kivity2a6b20b2010-11-09 16:15:42 +02003809 if (boot_cpu_has(X86_FEATURE_NRIPS))
Joerg Roedel7a190662010-07-27 18:14:21 +02003810 entry->edx |= SVM_FEATURE_NRIP;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02003811
Joerg Roedel3d4aeaa2010-09-10 17:31:05 +02003812 /* Support NPT for the guest if enabled */
3813 if (npt_enabled)
3814 entry->edx |= SVM_FEATURE_NPT;
3815
Joerg Roedelc2c63a42010-04-22 12:33:12 +02003816 break;
3817 }
Joerg Roedeld4330ef2010-04-22 12:33:11 +02003818}
3819
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003820static const struct trace_print_flags svm_exit_reasons_str[] = {
Joerg Roedele0231712010-02-24 18:59:10 +01003821 { SVM_EXIT_READ_CR0, "read_cr0" },
3822 { SVM_EXIT_READ_CR3, "read_cr3" },
3823 { SVM_EXIT_READ_CR4, "read_cr4" },
3824 { SVM_EXIT_READ_CR8, "read_cr8" },
3825 { SVM_EXIT_WRITE_CR0, "write_cr0" },
3826 { SVM_EXIT_WRITE_CR3, "write_cr3" },
3827 { SVM_EXIT_WRITE_CR4, "write_cr4" },
3828 { SVM_EXIT_WRITE_CR8, "write_cr8" },
3829 { SVM_EXIT_READ_DR0, "read_dr0" },
3830 { SVM_EXIT_READ_DR1, "read_dr1" },
3831 { SVM_EXIT_READ_DR2, "read_dr2" },
3832 { SVM_EXIT_READ_DR3, "read_dr3" },
3833 { SVM_EXIT_WRITE_DR0, "write_dr0" },
3834 { SVM_EXIT_WRITE_DR1, "write_dr1" },
3835 { SVM_EXIT_WRITE_DR2, "write_dr2" },
3836 { SVM_EXIT_WRITE_DR3, "write_dr3" },
3837 { SVM_EXIT_WRITE_DR5, "write_dr5" },
3838 { SVM_EXIT_WRITE_DR7, "write_dr7" },
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003839 { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" },
3840 { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" },
3841 { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" },
3842 { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" },
3843 { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" },
3844 { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" },
3845 { SVM_EXIT_INTR, "interrupt" },
3846 { SVM_EXIT_NMI, "nmi" },
3847 { SVM_EXIT_SMI, "smi" },
3848 { SVM_EXIT_INIT, "init" },
3849 { SVM_EXIT_VINTR, "vintr" },
3850 { SVM_EXIT_CPUID, "cpuid" },
3851 { SVM_EXIT_INVD, "invd" },
3852 { SVM_EXIT_HLT, "hlt" },
3853 { SVM_EXIT_INVLPG, "invlpg" },
3854 { SVM_EXIT_INVLPGA, "invlpga" },
3855 { SVM_EXIT_IOIO, "io" },
3856 { SVM_EXIT_MSR, "msr" },
3857 { SVM_EXIT_TASK_SWITCH, "task_switch" },
3858 { SVM_EXIT_SHUTDOWN, "shutdown" },
3859 { SVM_EXIT_VMRUN, "vmrun" },
3860 { SVM_EXIT_VMMCALL, "hypercall" },
3861 { SVM_EXIT_VMLOAD, "vmload" },
3862 { SVM_EXIT_VMSAVE, "vmsave" },
3863 { SVM_EXIT_STGI, "stgi" },
3864 { SVM_EXIT_CLGI, "clgi" },
3865 { SVM_EXIT_SKINIT, "skinit" },
3866 { SVM_EXIT_WBINVD, "wbinvd" },
3867 { SVM_EXIT_MONITOR, "monitor" },
3868 { SVM_EXIT_MWAIT, "mwait" },
Joerg Roedel81dd35d2010-12-07 17:15:06 +01003869 { SVM_EXIT_XSETBV, "xsetbv" },
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003870 { SVM_EXIT_NPF, "npf" },
3871 { -1, NULL }
3872};
3873
Sheng Yang17cc3932010-01-05 19:02:27 +08003874static int svm_get_lpage_level(void)
Joerg Roedel344f4142009-07-27 16:30:48 +02003875{
Sheng Yang17cc3932010-01-05 19:02:27 +08003876 return PT_PDPE_LEVEL;
Joerg Roedel344f4142009-07-27 16:30:48 +02003877}
3878
Sheng Yang4e47c7a2009-12-18 16:48:47 +08003879static bool svm_rdtscp_supported(void)
3880{
3881 return false;
3882}
3883
Sheng Yangf5f48ee2010-06-30 12:25:15 +08003884static bool svm_has_wbinvd_exit(void)
3885{
3886 return true;
3887}
3888
Avi Kivity02daab22009-12-30 12:40:26 +02003889static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
3890{
3891 struct vcpu_svm *svm = to_svm(vcpu);
3892
Joerg Roedel18c918c2010-11-30 18:03:59 +01003893 set_exception_intercept(svm, NM_VECTOR);
Joerg Roedel66a562f2010-02-19 16:23:08 +01003894 update_cr0_intercept(svm);
Avi Kivity02daab22009-12-30 12:40:26 +02003895}
3896
Joerg Roedel80612522011-04-04 12:39:33 +02003897#define PRE_EX(exit) { .exit_code = (exit), \
3898 .stage = X86_ICPT_PRE_EXCEPT, \
3899 .valid = true }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003900#define POST_EX(exit) { .exit_code = (exit), \
3901 .stage = X86_ICPT_POST_EXCEPT, \
3902 .valid = true }
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003903#define POST_MEM(exit) { .exit_code = (exit), \
3904 .stage = X86_ICPT_POST_MEMACCESS, \
3905 .valid = true }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003906
3907static struct __x86_intercept {
3908 u32 exit_code;
3909 enum x86_intercept_stage stage;
3910 bool valid;
3911} x86_intercept_map[] = {
3912 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
3913 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
3914 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
3915 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
3916 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
Joerg Roedel3b88e412011-04-04 12:39:29 +02003917 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
3918 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02003919 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
3920 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
3921 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
3922 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
3923 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
3924 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
3925 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
3926 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003927 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
3928 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
3929 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
3930 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
3931 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
3932 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
3933 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
3934 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003935 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
3936 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
3937 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
Joerg Roedel80612522011-04-04 12:39:33 +02003938 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
3939 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
3940 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
3941 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
3942 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
3943 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
3944 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
3945 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
3946 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
Joerg Roedelbf608f82011-04-04 12:39:34 +02003947 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
3948 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
3949 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
3950 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
3951 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
3952 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
3953 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
Joerg Roedelf6511932011-04-04 12:39:35 +02003954 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
3955 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
3956 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
3957 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003958};
3959
Joerg Roedel80612522011-04-04 12:39:33 +02003960#undef PRE_EX
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003961#undef POST_EX
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003962#undef POST_MEM
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003963
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02003964static int svm_check_intercept(struct kvm_vcpu *vcpu,
3965 struct x86_instruction_info *info,
3966 enum x86_intercept_stage stage)
3967{
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003968 struct vcpu_svm *svm = to_svm(vcpu);
3969 int vmexit, ret = X86EMUL_CONTINUE;
3970 struct __x86_intercept icpt_info;
3971 struct vmcb *vmcb = svm->vmcb;
3972
3973 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
3974 goto out;
3975
3976 icpt_info = x86_intercept_map[info->intercept];
3977
3978 if (!icpt_info.valid || stage != icpt_info.stage)
3979 goto out;
3980
3981 switch (icpt_info.exit_code) {
3982 case SVM_EXIT_READ_CR0:
3983 if (info->intercept == x86_intercept_cr_read)
3984 icpt_info.exit_code += info->modrm_reg;
3985 break;
3986 case SVM_EXIT_WRITE_CR0: {
3987 unsigned long cr0, val;
3988 u64 intercept;
3989
3990 if (info->intercept == x86_intercept_cr_write)
3991 icpt_info.exit_code += info->modrm_reg;
3992
3993 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0)
3994 break;
3995
3996 intercept = svm->nested.intercept;
3997
3998 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
3999 break;
4000
4001 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
4002 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
4003
4004 if (info->intercept == x86_intercept_lmsw) {
4005 cr0 &= 0xfUL;
4006 val &= 0xfUL;
4007 /* lmsw can't clear PE - catch this here */
4008 if (cr0 & X86_CR0_PE)
4009 val |= X86_CR0_PE;
4010 }
4011
4012 if (cr0 ^ val)
4013 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
4014
4015 break;
4016 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02004017 case SVM_EXIT_READ_DR0:
4018 case SVM_EXIT_WRITE_DR0:
4019 icpt_info.exit_code += info->modrm_reg;
4020 break;
Joerg Roedel80612522011-04-04 12:39:33 +02004021 case SVM_EXIT_MSR:
4022 if (info->intercept == x86_intercept_wrmsr)
4023 vmcb->control.exit_info_1 = 1;
4024 else
4025 vmcb->control.exit_info_1 = 0;
4026 break;
Joerg Roedelbf608f82011-04-04 12:39:34 +02004027 case SVM_EXIT_PAUSE:
4028 /*
4029 * We get this for NOP only, but pause
4030 * is rep not, check this here
4031 */
4032 if (info->rep_prefix != REPE_PREFIX)
4033 goto out;
Joerg Roedelf6511932011-04-04 12:39:35 +02004034 case SVM_EXIT_IOIO: {
4035 u64 exit_info;
4036 u32 bytes;
4037
4038 exit_info = (vcpu->arch.regs[VCPU_REGS_RDX] & 0xffff) << 16;
4039
4040 if (info->intercept == x86_intercept_in ||
4041 info->intercept == x86_intercept_ins) {
4042 exit_info |= SVM_IOIO_TYPE_MASK;
4043 bytes = info->src_bytes;
4044 } else {
4045 bytes = info->dst_bytes;
4046 }
4047
4048 if (info->intercept == x86_intercept_outs ||
4049 info->intercept == x86_intercept_ins)
4050 exit_info |= SVM_IOIO_STR_MASK;
4051
4052 if (info->rep_prefix)
4053 exit_info |= SVM_IOIO_REP_MASK;
4054
4055 bytes = min(bytes, 4u);
4056
4057 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
4058
4059 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
4060
4061 vmcb->control.exit_info_1 = exit_info;
4062 vmcb->control.exit_info_2 = info->next_rip;
4063
4064 break;
4065 }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004066 default:
4067 break;
4068 }
4069
4070 vmcb->control.next_rip = info->next_rip;
4071 vmcb->control.exit_code = icpt_info.exit_code;
4072 vmexit = nested_svm_exit_handled(svm);
4073
4074 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
4075 : X86EMUL_CONTINUE;
4076
4077out:
4078 return ret;
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004079}
4080
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03004081static struct kvm_x86_ops svm_x86_ops = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08004082 .cpu_has_kvm_support = has_svm,
4083 .disabled_by_bios = is_disabled,
4084 .hardware_setup = svm_hardware_setup,
4085 .hardware_unsetup = svm_hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03004086 .check_processor_compatibility = svm_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004087 .hardware_enable = svm_hardware_enable,
4088 .hardware_disable = svm_hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02004089 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004090
4091 .vcpu_create = svm_create_vcpu,
4092 .vcpu_free = svm_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03004093 .vcpu_reset = svm_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004094
Avi Kivity04d2cc72007-09-10 18:10:54 +03004095 .prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004096 .vcpu_load = svm_vcpu_load,
4097 .vcpu_put = svm_vcpu_put,
4098
4099 .set_guest_debug = svm_guest_debug,
4100 .get_msr = svm_get_msr,
4101 .set_msr = svm_set_msr,
4102 .get_segment_base = svm_get_segment_base,
4103 .get_segment = svm_get_segment,
4104 .set_segment = svm_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02004105 .get_cpl = svm_get_cpl,
Rusty Russell1747fb72007-09-06 01:21:32 +10004106 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
Avi Kivitye8467fd2009-12-29 18:43:06 +02004107 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
Avi Kivityaff48ba2010-12-05 18:56:11 +02004108 .decache_cr3 = svm_decache_cr3,
Anthony Liguori25c4c272007-04-27 09:29:21 +03004109 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004110 .set_cr0 = svm_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004111 .set_cr3 = svm_set_cr3,
4112 .set_cr4 = svm_set_cr4,
4113 .set_efer = svm_set_efer,
4114 .get_idt = svm_get_idt,
4115 .set_idt = svm_set_idt,
4116 .get_gdt = svm_get_gdt,
4117 .set_gdt = svm_set_gdt,
Gleb Natapov020df072010-04-13 10:05:23 +03004118 .set_dr7 = svm_set_dr7,
Avi Kivity6de4f3a2009-05-31 22:58:47 +03004119 .cache_reg = svm_cache_reg,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004120 .get_rflags = svm_get_rflags,
4121 .set_rflags = svm_set_rflags,
Avi Kivity6b52d182010-01-21 15:31:47 +02004122 .fpu_activate = svm_fpu_activate,
Avi Kivity02daab22009-12-30 12:40:26 +02004123 .fpu_deactivate = svm_fpu_deactivate,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004124
Avi Kivity6aa8b732006-12-10 02:21:36 -08004125 .tlb_flush = svm_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004126
Avi Kivity6aa8b732006-12-10 02:21:36 -08004127 .run = svm_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03004128 .handle_exit = handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004129 .skip_emulated_instruction = skip_emulated_instruction,
Glauber Costa2809f5d2009-05-12 16:21:05 -04004130 .set_interrupt_shadow = svm_set_interrupt_shadow,
4131 .get_interrupt_shadow = svm_get_interrupt_shadow,
Ingo Molnar102d8322007-02-19 14:37:47 +02004132 .patch_hypercall = svm_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03004133 .set_irq = svm_set_irq,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004134 .set_nmi = svm_inject_nmi,
Avi Kivity298101d2007-11-25 13:41:11 +02004135 .queue_exception = svm_queue_exception,
Avi Kivityb463a6f2010-07-20 15:06:17 +03004136 .cancel_injection = svm_cancel_injection,
Gleb Natapov78646122009-03-23 12:12:11 +02004137 .interrupt_allowed = svm_interrupt_allowed,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004138 .nmi_allowed = svm_nmi_allowed,
Jan Kiszka3cfc3092009-11-12 01:04:25 +01004139 .get_nmi_mask = svm_get_nmi_mask,
4140 .set_nmi_mask = svm_set_nmi_mask,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004141 .enable_nmi_window = enable_nmi_window,
4142 .enable_irq_window = enable_irq_window,
4143 .update_cr8_intercept = update_cr8_intercept,
Izik Eiduscbc94022007-10-25 00:29:55 +02004144
4145 .set_tss_addr = svm_set_tss_addr,
Sheng Yang67253af2008-04-25 10:20:22 +08004146 .get_tdp_level = get_npt_level,
Sheng Yang4b12f0d2009-04-27 20:35:42 +08004147 .get_mt_mask = svm_get_mt_mask,
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004148
Avi Kivity586f9602010-11-18 13:09:54 +02004149 .get_exit_info = svm_get_exit_info,
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004150 .exit_reasons_str = svm_exit_reasons_str,
Avi Kivity586f9602010-11-18 13:09:54 +02004151
Sheng Yang17cc3932010-01-05 19:02:27 +08004152 .get_lpage_level = svm_get_lpage_level,
Sheng Yang0e851882009-12-18 16:48:46 +08004153
4154 .cpuid_update = svm_cpuid_update,
Sheng Yang4e47c7a2009-12-18 16:48:47 +08004155
4156 .rdtscp_supported = svm_rdtscp_supported,
Joerg Roedeld4330ef2010-04-22 12:33:11 +02004157
4158 .set_supported_cpuid = svm_set_supported_cpuid,
Sheng Yangf5f48ee2010-06-30 12:25:15 +08004159
4160 .has_wbinvd_exit = svm_has_wbinvd_exit,
Zachary Amsden99e3e302010-08-19 22:07:17 -10004161
4162 .write_tsc_offset = svm_write_tsc_offset,
Zachary Amsdene48672f2010-08-19 22:07:23 -10004163 .adjust_tsc_offset = svm_adjust_tsc_offset,
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02004164
4165 .set_tdp_cr3 = set_tdp_cr3,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004166
4167 .check_intercept = svm_check_intercept,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004168};
4169
4170static int __init svm_init(void)
4171{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08004172 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
Avi Kivity0ee75be2010-04-28 15:39:01 +03004173 __alignof__(struct vcpu_svm), THIS_MODULE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004174}
4175
4176static void __exit svm_exit(void)
4177{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08004178 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08004179}
4180
4181module_init(svm_init)
4182module_exit(svm_exit)