blob: fce3ba0f2079e58577073407bb68dc4126080f4e [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
Nicolas Kaiser9611c182010-10-06 14:23:22 +02007 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -08008 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
Avi Kivityedf88412007-12-16 11:02:48 +020017#include <linux/kvm_host.h>
18
Eddie Dong85f455f2007-07-06 12:20:49 +030019#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080020#include "mmu.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030021#include "kvm_cache_regs.h"
Gleb Natapovfe4c7b12009-03-23 11:23:18 +020022#include "x86.h"
Avi Kivitye4956062007-06-28 14:15:57 -040023
Avi Kivity6aa8b732006-12-10 02:21:36 -080024#include <linux/module.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020025#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080026#include <linux/vmalloc.h>
27#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040028#include <linux/sched.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030029#include <linux/ftrace_event.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080031
Joerg Roedel67ec6602010-05-17 14:43:35 +020032#include <asm/tlbflush.h>
Avi Kivitye4956062007-06-28 14:15:57 -040033#include <asm/desc.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020034#include <asm/kvm_para.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080035
Eduardo Habkost63d11422008-11-17 19:03:20 -020036#include <asm/virtext.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030037#include "trace.h"
Eduardo Habkost63d11422008-11-17 19:03:20 -020038
Avi Kivity4ecac3f2008-05-13 13:23:38 +030039#define __ex(x) __kvm_handle_fault_on_reboot(x)
40
Avi Kivity6aa8b732006-12-10 02:21:36 -080041MODULE_AUTHOR("Qumranet");
42MODULE_LICENSE("GPL");
43
44#define IOPM_ALLOC_ORDER 2
45#define MSRPM_ALLOC_ORDER 1
46
Avi Kivity6aa8b732006-12-10 02:21:36 -080047#define SEG_TYPE_LDT 2
48#define SEG_TYPE_BUSY_TSS16 3
49
Andre Przywara6bc31bd2010-04-11 23:07:28 +020050#define SVM_FEATURE_NPT (1 << 0)
51#define SVM_FEATURE_LBRV (1 << 1)
52#define SVM_FEATURE_SVML (1 << 2)
53#define SVM_FEATURE_NRIP (1 << 3)
Andre Przywaraddce97a2010-12-21 11:12:03 +010054#define SVM_FEATURE_TSC_RATE (1 << 4)
55#define SVM_FEATURE_VMCB_CLEAN (1 << 5)
56#define SVM_FEATURE_FLUSH_ASID (1 << 6)
57#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
Andre Przywara6bc31bd2010-04-11 23:07:28 +020058#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
Joerg Roedel80b77062007-03-30 17:02:14 +030059
Joerg Roedel410e4d52009-08-07 11:49:44 +020060#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
61#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
62#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
63
Joerg Roedel24e09cb2008-02-13 18:58:47 +010064#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
65
Joerg Roedelfbc0db72011-03-25 09:44:46 +010066#define TSC_RATIO_RSVD 0xffffff0000000000ULL
Joerg Roedel92a1f122011-03-25 09:44:51 +010067#define TSC_RATIO_MIN 0x0000000000000001ULL
68#define TSC_RATIO_MAX 0x000000ffffffffffULL
Joerg Roedelfbc0db72011-03-25 09:44:46 +010069
Joerg Roedel67ec6602010-05-17 14:43:35 +020070static bool erratum_383_found __read_mostly;
71
Avi Kivity6c8166a2009-05-31 18:15:37 +030072static const u32 host_save_user_msrs[] = {
73#ifdef CONFIG_X86_64
74 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
75 MSR_FS_BASE,
76#endif
77 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
78};
79
80#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
81
82struct kvm_vcpu;
83
Joerg Roedele6aa9ab2009-08-07 11:49:33 +020084struct nested_state {
85 struct vmcb *hsave;
86 u64 hsave_msr;
Joerg Roedel4a810182010-02-24 18:59:15 +010087 u64 vm_cr_msr;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +020088 u64 vmcb;
89
90 /* These are the merged vectors */
91 u32 *msrpm;
92
93 /* gpa pointers to the real vectors */
94 u64 vmcb_msrpm;
Joerg Roedelce2ac082010-03-01 15:34:39 +010095 u64 vmcb_iopm;
Joerg Roedelaad42c62009-08-07 11:49:34 +020096
Joerg Roedelcd3ff652009-10-09 16:08:26 +020097 /* A VMEXIT is required but not yet emulated */
98 bool exit_required;
99
Joerg Roedelaad42c62009-08-07 11:49:34 +0200100 /* cache for intercepts of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100101 u32 intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100102 u32 intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +0200103 u32 intercept_exceptions;
104 u64 intercept;
105
Joerg Roedel5bd2edc2010-09-10 17:31:02 +0200106 /* Nested Paging related state */
107 u64 nested_cr3;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200108};
109
Joerg Roedel323c3d82010-03-01 15:34:37 +0100110#define MSRPM_OFFSETS 16
111static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
112
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500113/*
114 * Set osvw_len to higher value when updated Revision Guides
115 * are published and we know what the new status bits are
116 */
117static uint64_t osvw_len = 4, osvw_status;
118
Avi Kivity6c8166a2009-05-31 18:15:37 +0300119struct vcpu_svm {
120 struct kvm_vcpu vcpu;
121 struct vmcb *vmcb;
122 unsigned long vmcb_pa;
123 struct svm_cpu_data *svm_data;
124 uint64_t asid_generation;
125 uint64_t sysenter_esp;
126 uint64_t sysenter_eip;
127
128 u64 next_rip;
129
130 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
Avi Kivityafe9e662010-10-21 12:20:32 +0200131 struct {
Avi Kivitydacccfd2010-10-21 12:20:33 +0200132 u16 fs;
133 u16 gs;
134 u16 ldt;
Avi Kivityafe9e662010-10-21 12:20:32 +0200135 u64 gs_base;
136 } host;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300137
138 u32 *msrpm;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300139
Avi Kivitybd3d1ec2011-02-03 15:29:52 +0200140 ulong nmi_iret_rip;
141
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200142 struct nested_state nested;
Jan Kiszka6be7d302009-10-18 13:24:54 +0200143
144 bool nmi_singlestep;
Jan Kiszka66b71382010-02-23 17:47:56 +0100145
146 unsigned int3_injected;
147 unsigned long int3_rip;
Gleb Natapov631bc482010-10-14 11:22:52 +0200148 u32 apf_reason;
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100149
150 u64 tsc_ratio;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300151};
152
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100153static DEFINE_PER_CPU(u64, current_tsc_ratio);
154#define TSC_RATIO_DEFAULT 0x0100000000ULL
155
Joerg Roedel455716f2010-03-01 15:34:35 +0100156#define MSR_INVALID 0xffffffffU
157
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100158static struct svm_direct_access_msrs {
159 u32 index; /* Index of the MSR */
160 bool always; /* True if intercept is always on */
161} direct_access_msrs[] = {
Brian Gerst8c065852010-07-17 09:03:26 -0400162 { .index = MSR_STAR, .always = true },
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100163 { .index = MSR_IA32_SYSENTER_CS, .always = true },
164#ifdef CONFIG_X86_64
165 { .index = MSR_GS_BASE, .always = true },
166 { .index = MSR_FS_BASE, .always = true },
167 { .index = MSR_KERNEL_GS_BASE, .always = true },
168 { .index = MSR_LSTAR, .always = true },
169 { .index = MSR_CSTAR, .always = true },
170 { .index = MSR_SYSCALL_MASK, .always = true },
171#endif
172 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
173 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
174 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
175 { .index = MSR_IA32_LASTINTTOIP, .always = false },
176 { .index = MSR_INVALID, .always = false },
Avi Kivity6aa8b732006-12-10 02:21:36 -0800177};
178
179/* enable NPT for AMD64 and X86 with PAE */
180#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
181static bool npt_enabled = true;
182#else
Joerg Roedele0231712010-02-24 18:59:10 +0100183static bool npt_enabled;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800184#endif
185static int npt = 1;
186
187module_param(npt, int, S_IRUGO);
188
Joerg Roedel4b6e4dc2009-08-07 11:49:48 +0200189static int nested = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800190module_param(nested, int, S_IRUGO);
191
192static void svm_flush_tlb(struct kvm_vcpu *vcpu);
Joerg Roedela5c38322009-08-07 11:49:32 +0200193static void svm_complete_interrupts(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800194
Joerg Roedel410e4d52009-08-07 11:49:44 +0200195static int nested_svm_exit_handled(struct vcpu_svm *svm);
Joerg Roedelb8e88bc2010-02-19 16:23:02 +0100196static int nested_svm_intercept(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800197static int nested_svm_vmexit(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800198static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
199 bool has_error_code, u32 error_code);
Joerg Roedel92a1f122011-03-25 09:44:51 +0100200static u64 __scale_tsc(u64 ratio, u64 tsc);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800201
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100202enum {
Joerg Roedel116a0a22010-12-03 11:45:49 +0100203 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
204 pause filter count */
Joerg Roedelf56838e2010-12-03 11:45:50 +0100205 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
Joerg Roedeld48086d2010-12-03 11:45:51 +0100206 VMCB_ASID, /* ASID */
Joerg Roedeldecdbf62010-12-03 11:45:52 +0100207 VMCB_INTR, /* int_ctl, int_vector */
Joerg Roedelb2747162010-12-03 11:45:53 +0100208 VMCB_NPT, /* npt_en, nCR3, gPAT */
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100209 VMCB_CR, /* CR0, CR3, CR4, EFER */
Joerg Roedel72214b92010-12-03 11:45:55 +0100210 VMCB_DR, /* DR6, DR7 */
Joerg Roedel17a703c2010-12-03 11:45:56 +0100211 VMCB_DT, /* GDT, IDT */
Joerg Roedel060d0c92010-12-03 11:45:57 +0100212 VMCB_SEG, /* CS, DS, SS, ES, CPL */
Joerg Roedel0574dec2010-12-03 11:45:58 +0100213 VMCB_CR2, /* CR2 only */
Joerg Roedelb53ba3f2010-12-03 11:45:59 +0100214 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100215 VMCB_DIRTY_MAX,
216};
217
Joerg Roedel0574dec2010-12-03 11:45:58 +0100218/* TPR and CR2 are always written before VMRUN */
219#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100220
221static inline void mark_all_dirty(struct vmcb *vmcb)
222{
223 vmcb->control.clean = 0;
224}
225
226static inline void mark_all_clean(struct vmcb *vmcb)
227{
228 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
229 & ~VMCB_ALWAYS_DIRTY_MASK;
230}
231
232static inline void mark_dirty(struct vmcb *vmcb, int bit)
233{
234 vmcb->control.clean &= ~(1 << bit);
235}
236
Avi Kivity6aa8b732006-12-10 02:21:36 -0800237static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
238{
239 return container_of(vcpu, struct vcpu_svm, vcpu);
240}
241
Joerg Roedel384c6362010-11-30 18:03:56 +0100242static void recalc_intercepts(struct vcpu_svm *svm)
243{
244 struct vmcb_control_area *c, *h;
245 struct nested_state *g;
246
Joerg Roedel116a0a22010-12-03 11:45:49 +0100247 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
248
Joerg Roedel384c6362010-11-30 18:03:56 +0100249 if (!is_guest_mode(&svm->vcpu))
250 return;
251
252 c = &svm->vmcb->control;
253 h = &svm->nested.hsave->control;
254 g = &svm->nested;
255
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100256 c->intercept_cr = h->intercept_cr | g->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100257 c->intercept_dr = h->intercept_dr | g->intercept_dr;
Joerg Roedel384c6362010-11-30 18:03:56 +0100258 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
259 c->intercept = h->intercept | g->intercept;
260}
261
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100262static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
263{
264 if (is_guest_mode(&svm->vcpu))
265 return svm->nested.hsave;
266 else
267 return svm->vmcb;
268}
269
270static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
271{
272 struct vmcb *vmcb = get_host_vmcb(svm);
273
274 vmcb->control.intercept_cr |= (1U << bit);
275
276 recalc_intercepts(svm);
277}
278
279static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
280{
281 struct vmcb *vmcb = get_host_vmcb(svm);
282
283 vmcb->control.intercept_cr &= ~(1U << bit);
284
285 recalc_intercepts(svm);
286}
287
288static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
289{
290 struct vmcb *vmcb = get_host_vmcb(svm);
291
292 return vmcb->control.intercept_cr & (1U << bit);
293}
294
Joerg Roedel3aed0412010-11-30 18:03:58 +0100295static inline void set_dr_intercept(struct vcpu_svm *svm, int bit)
296{
297 struct vmcb *vmcb = get_host_vmcb(svm);
298
299 vmcb->control.intercept_dr |= (1U << bit);
300
301 recalc_intercepts(svm);
302}
303
304static inline void clr_dr_intercept(struct vcpu_svm *svm, int bit)
305{
306 struct vmcb *vmcb = get_host_vmcb(svm);
307
308 vmcb->control.intercept_dr &= ~(1U << bit);
309
310 recalc_intercepts(svm);
311}
312
Joerg Roedel18c918c2010-11-30 18:03:59 +0100313static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
314{
315 struct vmcb *vmcb = get_host_vmcb(svm);
316
317 vmcb->control.intercept_exceptions |= (1U << bit);
318
319 recalc_intercepts(svm);
320}
321
322static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
323{
324 struct vmcb *vmcb = get_host_vmcb(svm);
325
326 vmcb->control.intercept_exceptions &= ~(1U << bit);
327
328 recalc_intercepts(svm);
329}
330
Joerg Roedel8a05a1b2010-11-30 18:04:00 +0100331static inline void set_intercept(struct vcpu_svm *svm, int bit)
332{
333 struct vmcb *vmcb = get_host_vmcb(svm);
334
335 vmcb->control.intercept |= (1ULL << bit);
336
337 recalc_intercepts(svm);
338}
339
340static inline void clr_intercept(struct vcpu_svm *svm, int bit)
341{
342 struct vmcb *vmcb = get_host_vmcb(svm);
343
344 vmcb->control.intercept &= ~(1ULL << bit);
345
346 recalc_intercepts(svm);
347}
348
Joerg Roedel2af91942009-08-07 11:49:28 +0200349static inline void enable_gif(struct vcpu_svm *svm)
350{
351 svm->vcpu.arch.hflags |= HF_GIF_MASK;
352}
353
354static inline void disable_gif(struct vcpu_svm *svm)
355{
356 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
357}
358
359static inline bool gif_set(struct vcpu_svm *svm)
360{
361 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
362}
363
Avi Kivity6aa8b732006-12-10 02:21:36 -0800364static unsigned long iopm_base;
365
366struct kvm_ldttss_desc {
367 u16 limit0;
368 u16 base0;
Joerg Roedele0231712010-02-24 18:59:10 +0100369 unsigned base1:8, type:5, dpl:2, p:1;
370 unsigned limit1:4, zero0:3, g:1, base2:8;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800371 u32 base3;
372 u32 zero1;
373} __attribute__((packed));
374
375struct svm_cpu_data {
376 int cpu;
377
Avi Kivity5008fdf2007-04-02 13:05:50 +0300378 u64 asid_generation;
379 u32 max_asid;
380 u32 next_asid;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800381 struct kvm_ldttss_desc *tss_desc;
382
383 struct page *save_area;
384};
385
386static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
387
388struct svm_init_data {
389 int cpu;
390 int r;
391};
392
393static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
394
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200395#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800396#define MSRS_RANGE_SIZE 2048
397#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
398
Joerg Roedel455716f2010-03-01 15:34:35 +0100399static u32 svm_msrpm_offset(u32 msr)
400{
401 u32 offset;
402 int i;
403
404 for (i = 0; i < NUM_MSR_MAPS; i++) {
405 if (msr < msrpm_ranges[i] ||
406 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
407 continue;
408
409 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
410 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
411
412 /* Now we have the u8 offset - but need the u32 offset */
413 return offset / 4;
414 }
415
416 /* MSR not in any range */
417 return MSR_INVALID;
418}
419
Avi Kivity6aa8b732006-12-10 02:21:36 -0800420#define MAX_INST_SIZE 15
421
Avi Kivity6aa8b732006-12-10 02:21:36 -0800422static inline void clgi(void)
423{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300424 asm volatile (__ex(SVM_CLGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800425}
426
427static inline void stgi(void)
428{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300429 asm volatile (__ex(SVM_STGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800430}
431
432static inline void invlpga(unsigned long addr, u32 asid)
433{
Joerg Roedele0231712010-02-24 18:59:10 +0100434 asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800435}
436
Joerg Roedel4b161842010-09-10 17:31:03 +0200437static int get_npt_level(void)
438{
439#ifdef CONFIG_X86_64
440 return PT64_ROOT_LEVEL;
441#else
442 return PT32E_ROOT_LEVEL;
443#endif
444}
445
Avi Kivity6aa8b732006-12-10 02:21:36 -0800446static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
447{
Zachary Amsden6dc696d2010-05-26 15:09:43 -1000448 vcpu->arch.efer = efer;
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100449 if (!npt_enabled && !(efer & EFER_LMA))
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600450 efer &= ~EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800451
Alexander Graf9962d032008-11-25 20:17:02 +0100452 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100453 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800454}
455
Avi Kivity6aa8b732006-12-10 02:21:36 -0800456static int is_external_interrupt(u32 info)
457{
458 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
459 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
460}
461
Glauber Costa2809f5d2009-05-12 16:21:05 -0400462static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
463{
464 struct vcpu_svm *svm = to_svm(vcpu);
465 u32 ret = 0;
466
467 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
Jan Kiszka48005f62010-02-19 19:38:07 +0100468 ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
Glauber Costa2809f5d2009-05-12 16:21:05 -0400469 return ret & mask;
470}
471
472static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
473{
474 struct vcpu_svm *svm = to_svm(vcpu);
475
476 if (mask == 0)
477 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
478 else
479 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
480
481}
482
Avi Kivity6aa8b732006-12-10 02:21:36 -0800483static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
484{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400485 struct vcpu_svm *svm = to_svm(vcpu);
486
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200487 if (svm->vmcb->control.next_rip != 0)
488 svm->next_rip = svm->vmcb->control.next_rip;
489
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400490 if (!svm->next_rip) {
Andre Przywara51d8b662010-12-21 11:12:02 +0100491 if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
Gleb Natapovf629cf82009-05-11 13:35:49 +0300492 EMULATE_DONE)
493 printk(KERN_DEBUG "%s: NOP\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800494 return;
495 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300496 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
497 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
498 __func__, kvm_rip_read(vcpu), svm->next_rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800499
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300500 kvm_rip_write(vcpu, svm->next_rip);
Glauber Costa2809f5d2009-05-12 16:21:05 -0400501 svm_set_interrupt_shadow(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800502}
503
Jan Kiszka116a4752010-02-23 17:47:54 +0100504static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200505 bool has_error_code, u32 error_code,
506 bool reinject)
Jan Kiszka116a4752010-02-23 17:47:54 +0100507{
508 struct vcpu_svm *svm = to_svm(vcpu);
509
Joerg Roedele0231712010-02-24 18:59:10 +0100510 /*
511 * If we are within a nested VM we'd better #VMEXIT and let the guest
512 * handle the exception
513 */
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200514 if (!reinject &&
515 nested_svm_check_exception(svm, nr, has_error_code, error_code))
Jan Kiszka116a4752010-02-23 17:47:54 +0100516 return;
517
Avi Kivity2a6b20b2010-11-09 16:15:42 +0200518 if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
Jan Kiszka66b71382010-02-23 17:47:56 +0100519 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
520
521 /*
522 * For guest debugging where we have to reinject #BP if some
523 * INT3 is guest-owned:
524 * Emulate nRIP by moving RIP forward. Will fail if injection
525 * raises a fault that is not intercepted. Still better than
526 * failing in all cases.
527 */
528 skip_emulated_instruction(&svm->vcpu);
529 rip = kvm_rip_read(&svm->vcpu);
530 svm->int3_rip = rip + svm->vmcb->save.cs.base;
531 svm->int3_injected = rip - old_rip;
532 }
533
Jan Kiszka116a4752010-02-23 17:47:54 +0100534 svm->vmcb->control.event_inj = nr
535 | SVM_EVTINJ_VALID
536 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
537 | SVM_EVTINJ_TYPE_EXEPT;
538 svm->vmcb->control.event_inj_err = error_code;
539}
540
Joerg Roedel67ec6602010-05-17 14:43:35 +0200541static void svm_init_erratum_383(void)
542{
543 u32 low, high;
544 int err;
545 u64 val;
546
Hans Rosenfeld1be85a62010-07-28 19:09:32 +0200547 if (!cpu_has_amd_erratum(amd_erratum_383))
Joerg Roedel67ec6602010-05-17 14:43:35 +0200548 return;
549
550 /* Use _safe variants to not break nested virtualization */
551 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
552 if (err)
553 return;
554
555 val |= (1ULL << 47);
556
557 low = lower_32_bits(val);
558 high = upper_32_bits(val);
559
560 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
561
562 erratum_383_found = true;
563}
564
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500565static void svm_init_osvw(struct kvm_vcpu *vcpu)
566{
567 /*
568 * Guests should see errata 400 and 415 as fixed (assuming that
569 * HLT and IO instructions are intercepted).
570 */
571 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
572 vcpu->arch.osvw.status = osvw_status & ~(6ULL);
573
574 /*
575 * By increasing VCPU's osvw.length to 3 we are telling the guest that
576 * all osvw.status bits inside that length, including bit 0 (which is
577 * reserved for erratum 298), are valid. However, if host processor's
578 * osvw_len is 0 then osvw_status[0] carries no information. We need to
579 * be conservative here and therefore we tell the guest that erratum 298
580 * is present (because we really don't know).
581 */
582 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
583 vcpu->arch.osvw.status |= 1;
584}
585
Avi Kivity6aa8b732006-12-10 02:21:36 -0800586static int has_svm(void)
587{
Eduardo Habkost63d11422008-11-17 19:03:20 -0200588 const char *msg;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800589
Eduardo Habkost63d11422008-11-17 19:03:20 -0200590 if (!cpu_has_svm(&msg)) {
Joe Perchesff81ff12009-01-08 11:05:17 -0800591 printk(KERN_INFO "has_svm: %s\n", msg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800592 return 0;
593 }
594
Avi Kivity6aa8b732006-12-10 02:21:36 -0800595 return 1;
596}
597
598static void svm_hardware_disable(void *garbage)
599{
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100600 /* Make sure we clean up behind us */
601 if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
602 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
603
Eduardo Habkost2c8dcee2008-11-17 19:03:21 -0200604 cpu_svm_disable();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800605}
606
Alexander Graf10474ae2009-09-15 11:37:46 +0200607static int svm_hardware_enable(void *garbage)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800608{
609
Tejun Heo0fe1e002009-10-29 22:34:14 +0900610 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800611 uint64_t efer;
Gleb Natapov89a27f42010-02-16 10:51:48 +0200612 struct desc_ptr gdt_descr;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800613 struct desc_struct *gdt;
614 int me = raw_smp_processor_id();
615
Alexander Graf10474ae2009-09-15 11:37:46 +0200616 rdmsrl(MSR_EFER, efer);
617 if (efer & EFER_SVME)
618 return -EBUSY;
619
Avi Kivity6aa8b732006-12-10 02:21:36 -0800620 if (!has_svm()) {
Zachary Amsdene6732a52009-09-29 11:38:36 -1000621 printk(KERN_ERR "svm_hardware_enable: err EOPNOTSUPP on %d\n",
622 me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200623 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800624 }
Tejun Heo0fe1e002009-10-29 22:34:14 +0900625 sd = per_cpu(svm_data, me);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800626
Tejun Heo0fe1e002009-10-29 22:34:14 +0900627 if (!sd) {
Zachary Amsdene6732a52009-09-29 11:38:36 -1000628 printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
Avi Kivity6aa8b732006-12-10 02:21:36 -0800629 me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200630 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800631 }
632
Tejun Heo0fe1e002009-10-29 22:34:14 +0900633 sd->asid_generation = 1;
634 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
635 sd->next_asid = sd->max_asid + 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800636
Gleb Natapovd6ab1ed2010-02-25 12:43:07 +0200637 native_store_gdt(&gdt_descr);
Gleb Natapov89a27f42010-02-16 10:51:48 +0200638 gdt = (struct desc_struct *)gdt_descr.address;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900639 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800640
Alexander Graf9962d032008-11-25 20:17:02 +0100641 wrmsrl(MSR_EFER, efer | EFER_SVME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800642
Linus Torvaldsd0316552009-12-14 09:58:24 -0800643 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
Alexander Graf10474ae2009-09-15 11:37:46 +0200644
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100645 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
646 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
647 __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT;
648 }
649
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500650
651 /*
652 * Get OSVW bits.
653 *
654 * Note that it is possible to have a system with mixed processor
655 * revisions and therefore different OSVW bits. If bits are not the same
656 * on different processors then choose the worst case (i.e. if erratum
657 * is present on one processor and not on another then assume that the
658 * erratum is present everywhere).
659 */
660 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
661 uint64_t len, status = 0;
662 int err;
663
664 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
665 if (!err)
666 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
667 &err);
668
669 if (err)
670 osvw_status = osvw_len = 0;
671 else {
672 if (len < osvw_len)
673 osvw_len = len;
674 osvw_status |= status;
675 osvw_status &= (1ULL << osvw_len) - 1;
676 }
677 } else
678 osvw_status = osvw_len = 0;
679
Joerg Roedel67ec6602010-05-17 14:43:35 +0200680 svm_init_erratum_383();
681
Alexander Graf10474ae2009-09-15 11:37:46 +0200682 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800683}
684
Joerg Roedel0da1db752008-07-02 16:02:11 +0200685static void svm_cpu_uninit(int cpu)
686{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900687 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
Joerg Roedel0da1db752008-07-02 16:02:11 +0200688
Tejun Heo0fe1e002009-10-29 22:34:14 +0900689 if (!sd)
Joerg Roedel0da1db752008-07-02 16:02:11 +0200690 return;
691
692 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900693 __free_page(sd->save_area);
694 kfree(sd);
Joerg Roedel0da1db752008-07-02 16:02:11 +0200695}
696
Avi Kivity6aa8b732006-12-10 02:21:36 -0800697static int svm_cpu_init(int cpu)
698{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900699 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800700 int r;
701
Tejun Heo0fe1e002009-10-29 22:34:14 +0900702 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
703 if (!sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800704 return -ENOMEM;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900705 sd->cpu = cpu;
706 sd->save_area = alloc_page(GFP_KERNEL);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800707 r = -ENOMEM;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900708 if (!sd->save_area)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800709 goto err_1;
710
Tejun Heo0fe1e002009-10-29 22:34:14 +0900711 per_cpu(svm_data, cpu) = sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800712
713 return 0;
714
715err_1:
Tejun Heo0fe1e002009-10-29 22:34:14 +0900716 kfree(sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800717 return r;
718
719}
720
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100721static bool valid_msr_intercept(u32 index)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800722{
723 int i;
724
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100725 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
726 if (direct_access_msrs[i].index == index)
727 return true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800728
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100729 return false;
730}
731
Avi Kivity6aa8b732006-12-10 02:21:36 -0800732static void set_msr_interception(u32 *msrpm, unsigned msr,
733 int read, int write)
734{
Joerg Roedel455716f2010-03-01 15:34:35 +0100735 u8 bit_read, bit_write;
736 unsigned long tmp;
737 u32 offset;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800738
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100739 /*
740 * If this warning triggers extend the direct_access_msrs list at the
741 * beginning of the file
742 */
743 WARN_ON(!valid_msr_intercept(msr));
744
Joerg Roedel455716f2010-03-01 15:34:35 +0100745 offset = svm_msrpm_offset(msr);
746 bit_read = 2 * (msr & 0x0f);
747 bit_write = 2 * (msr & 0x0f) + 1;
748 tmp = msrpm[offset];
Avi Kivity6aa8b732006-12-10 02:21:36 -0800749
Joerg Roedel455716f2010-03-01 15:34:35 +0100750 BUG_ON(offset == MSR_INVALID);
751
752 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
753 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
754
755 msrpm[offset] = tmp;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800756}
757
Joerg Roedelf65c2292008-02-13 18:58:46 +0100758static void svm_vcpu_init_msrpm(u32 *msrpm)
759{
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100760 int i;
761
Joerg Roedelf65c2292008-02-13 18:58:46 +0100762 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
763
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100764 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
765 if (!direct_access_msrs[i].always)
766 continue;
767
768 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
769 }
Joerg Roedelf65c2292008-02-13 18:58:46 +0100770}
771
Joerg Roedel323c3d82010-03-01 15:34:37 +0100772static void add_msr_offset(u32 offset)
773{
774 int i;
775
776 for (i = 0; i < MSRPM_OFFSETS; ++i) {
777
778 /* Offset already in list? */
779 if (msrpm_offsets[i] == offset)
780 return;
781
782 /* Slot used by another offset? */
783 if (msrpm_offsets[i] != MSR_INVALID)
784 continue;
785
786 /* Add offset to list */
787 msrpm_offsets[i] = offset;
788
789 return;
790 }
791
792 /*
793 * If this BUG triggers the msrpm_offsets table has an overflow. Just
794 * increase MSRPM_OFFSETS in this case.
795 */
796 BUG();
797}
798
799static void init_msrpm_offsets(void)
800{
801 int i;
802
803 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
804
805 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
806 u32 offset;
807
808 offset = svm_msrpm_offset(direct_access_msrs[i].index);
809 BUG_ON(offset == MSR_INVALID);
810
811 add_msr_offset(offset);
812 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800813}
814
Joerg Roedel24e09cb2008-02-13 18:58:47 +0100815static void svm_enable_lbrv(struct vcpu_svm *svm)
816{
817 u32 *msrpm = svm->msrpm;
818
819 svm->vmcb->control.lbr_ctl = 1;
820 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
821 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
822 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
823 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
824}
825
826static void svm_disable_lbrv(struct vcpu_svm *svm)
827{
828 u32 *msrpm = svm->msrpm;
829
830 svm->vmcb->control.lbr_ctl = 0;
831 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
832 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
833 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
834 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
835}
836
Avi Kivity6aa8b732006-12-10 02:21:36 -0800837static __init int svm_hardware_setup(void)
838{
839 int cpu;
840 struct page *iopm_pages;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100841 void *iopm_va;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800842 int r;
843
Avi Kivity6aa8b732006-12-10 02:21:36 -0800844 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
845
846 if (!iopm_pages)
847 return -ENOMEM;
Anthony Liguoric8681332007-04-30 09:48:11 +0300848
849 iopm_va = page_address(iopm_pages);
850 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800851 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
852
Joerg Roedel323c3d82010-03-01 15:34:37 +0100853 init_msrpm_offsets();
854
Joerg Roedel50a37eb2008-01-31 14:57:38 +0100855 if (boot_cpu_has(X86_FEATURE_NX))
856 kvm_enable_efer_bits(EFER_NX);
857
Alexander Graf1b2fd702009-02-02 16:23:51 +0100858 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
859 kvm_enable_efer_bits(EFER_FFXSR);
860
Joerg Roedel92a1f122011-03-25 09:44:51 +0100861 if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
862 u64 max;
863
864 kvm_has_tsc_control = true;
865
866 /*
867 * Make sure the user can only configure tsc_khz values that
868 * fit into a signed integer.
869 * A min value is not calculated needed because it will always
870 * be 1 on all machines and a value of 0 is used to disable
871 * tsc-scaling for the vcpu.
872 */
873 max = min(0x7fffffffULL, __scale_tsc(tsc_khz, TSC_RATIO_MAX));
874
875 kvm_max_guest_tsc_khz = max;
876 }
877
Alexander Graf236de052008-11-25 20:17:10 +0100878 if (nested) {
879 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
Joerg Roedeleec4b142010-05-05 16:04:44 +0200880 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
Alexander Graf236de052008-11-25 20:17:10 +0100881 }
882
Zachary Amsden3230bb42009-09-29 11:38:37 -1000883 for_each_possible_cpu(cpu) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800884 r = svm_cpu_init(cpu);
885 if (r)
Joerg Roedelf65c2292008-02-13 18:58:46 +0100886 goto err;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800887 }
Joerg Roedel33bd6a02008-02-07 13:47:38 +0100888
Avi Kivity2a6b20b2010-11-09 16:15:42 +0200889 if (!boot_cpu_has(X86_FEATURE_NPT))
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100890 npt_enabled = false;
891
Joerg Roedel6c7dac72008-02-07 13:47:40 +0100892 if (npt_enabled && !npt) {
893 printk(KERN_INFO "kvm: Nested Paging disabled\n");
894 npt_enabled = false;
895 }
896
Joerg Roedel18552672008-02-07 13:47:41 +0100897 if (npt_enabled) {
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100898 printk(KERN_INFO "kvm: Nested Paging enabled\n");
Joerg Roedel18552672008-02-07 13:47:41 +0100899 kvm_enable_tdp();
Joerg Roedel5f4cb662008-07-14 20:36:36 +0200900 } else
901 kvm_disable_tdp();
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100902
Avi Kivity6aa8b732006-12-10 02:21:36 -0800903 return 0;
904
Joerg Roedelf65c2292008-02-13 18:58:46 +0100905err:
Avi Kivity6aa8b732006-12-10 02:21:36 -0800906 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
907 iopm_base = 0;
908 return r;
909}
910
911static __exit void svm_hardware_unsetup(void)
912{
Joerg Roedel0da1db752008-07-02 16:02:11 +0200913 int cpu;
914
Zachary Amsden3230bb42009-09-29 11:38:37 -1000915 for_each_possible_cpu(cpu)
Joerg Roedel0da1db752008-07-02 16:02:11 +0200916 svm_cpu_uninit(cpu);
917
Avi Kivity6aa8b732006-12-10 02:21:36 -0800918 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
Joerg Roedelf65c2292008-02-13 18:58:46 +0100919 iopm_base = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800920}
921
922static void init_seg(struct vmcb_seg *seg)
923{
924 seg->selector = 0;
925 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
Joerg Roedele0231712010-02-24 18:59:10 +0100926 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800927 seg->limit = 0xffff;
928 seg->base = 0;
929}
930
931static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
932{
933 seg->selector = 0;
934 seg->attrib = SVM_SELECTOR_P_MASK | type;
935 seg->limit = 0xffff;
936 seg->base = 0;
937}
938
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100939static u64 __scale_tsc(u64 ratio, u64 tsc)
940{
941 u64 mult, frac, _tsc;
942
943 mult = ratio >> 32;
944 frac = ratio & ((1ULL << 32) - 1);
945
946 _tsc = tsc;
947 _tsc *= mult;
948 _tsc += (tsc >> 32) * frac;
949 _tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32;
950
951 return _tsc;
952}
953
954static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
955{
956 struct vcpu_svm *svm = to_svm(vcpu);
957 u64 _tsc = tsc;
958
959 if (svm->tsc_ratio != TSC_RATIO_DEFAULT)
960 _tsc = __scale_tsc(svm->tsc_ratio, tsc);
961
962 return _tsc;
963}
964
Joerg Roedel4051b182011-03-25 09:44:49 +0100965static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
966{
967 struct vcpu_svm *svm = to_svm(vcpu);
968 u64 ratio;
969 u64 khz;
970
971 /* TSC scaling supported? */
972 if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR))
973 return;
974
975 /* TSC-Scaling disabled or guest TSC same frequency as host TSC? */
976 if (user_tsc_khz == 0) {
977 vcpu->arch.virtual_tsc_khz = 0;
978 svm->tsc_ratio = TSC_RATIO_DEFAULT;
979 return;
980 }
981
982 khz = user_tsc_khz;
983
984 /* TSC scaling required - calculate ratio */
985 ratio = khz << 32;
986 do_div(ratio, tsc_khz);
987
988 if (ratio == 0 || ratio & TSC_RATIO_RSVD) {
989 WARN_ONCE(1, "Invalid TSC ratio - virtual-tsc-khz=%u\n",
990 user_tsc_khz);
991 return;
992 }
993 vcpu->arch.virtual_tsc_khz = user_tsc_khz;
994 svm->tsc_ratio = ratio;
995}
996
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -1000997static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
998{
999 struct vcpu_svm *svm = to_svm(vcpu);
1000 u64 g_tsc_offset = 0;
1001
Joerg Roedel20307532010-11-29 17:51:48 +01001002 if (is_guest_mode(vcpu)) {
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001003 g_tsc_offset = svm->vmcb->control.tsc_offset -
1004 svm->nested.hsave->control.tsc_offset;
1005 svm->nested.hsave->control.tsc_offset = offset;
1006 }
1007
1008 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
Joerg Roedel116a0a22010-12-03 11:45:49 +01001009
1010 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001011}
1012
Zachary Amsdene48672f2010-08-19 22:07:23 -10001013static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
1014{
1015 struct vcpu_svm *svm = to_svm(vcpu);
1016
1017 svm->vmcb->control.tsc_offset += adjustment;
Joerg Roedel20307532010-11-29 17:51:48 +01001018 if (is_guest_mode(vcpu))
Zachary Amsdene48672f2010-08-19 22:07:23 -10001019 svm->nested.hsave->control.tsc_offset += adjustment;
Joerg Roedel116a0a22010-12-03 11:45:49 +01001020 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Zachary Amsdene48672f2010-08-19 22:07:23 -10001021}
1022
Joerg Roedel857e4092011-03-25 09:44:50 +01001023static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1024{
1025 u64 tsc;
1026
1027 tsc = svm_scale_tsc(vcpu, native_read_tsc());
1028
1029 return target_tsc - tsc;
1030}
1031
Joerg Roedele6101a92008-02-13 18:58:45 +01001032static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001033{
Joerg Roedele6101a92008-02-13 18:58:45 +01001034 struct vmcb_control_area *control = &svm->vmcb->control;
1035 struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001036
Avi Kivitybff78272010-01-07 13:16:08 +02001037 svm->vcpu.fpu_active = 1;
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001038 svm->vcpu.arch.hflags = 0;
Avi Kivitybff78272010-01-07 13:16:08 +02001039
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001040 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1041 set_cr_intercept(svm, INTERCEPT_CR3_READ);
1042 set_cr_intercept(svm, INTERCEPT_CR4_READ);
1043 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1044 set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1045 set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
1046 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001047
Joerg Roedel3aed0412010-11-30 18:03:58 +01001048 set_dr_intercept(svm, INTERCEPT_DR0_READ);
1049 set_dr_intercept(svm, INTERCEPT_DR1_READ);
1050 set_dr_intercept(svm, INTERCEPT_DR2_READ);
1051 set_dr_intercept(svm, INTERCEPT_DR3_READ);
1052 set_dr_intercept(svm, INTERCEPT_DR4_READ);
1053 set_dr_intercept(svm, INTERCEPT_DR5_READ);
1054 set_dr_intercept(svm, INTERCEPT_DR6_READ);
1055 set_dr_intercept(svm, INTERCEPT_DR7_READ);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001056
Joerg Roedel3aed0412010-11-30 18:03:58 +01001057 set_dr_intercept(svm, INTERCEPT_DR0_WRITE);
1058 set_dr_intercept(svm, INTERCEPT_DR1_WRITE);
1059 set_dr_intercept(svm, INTERCEPT_DR2_WRITE);
1060 set_dr_intercept(svm, INTERCEPT_DR3_WRITE);
1061 set_dr_intercept(svm, INTERCEPT_DR4_WRITE);
1062 set_dr_intercept(svm, INTERCEPT_DR5_WRITE);
1063 set_dr_intercept(svm, INTERCEPT_DR6_WRITE);
1064 set_dr_intercept(svm, INTERCEPT_DR7_WRITE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001065
Joerg Roedel18c918c2010-11-30 18:03:59 +01001066 set_exception_intercept(svm, PF_VECTOR);
1067 set_exception_intercept(svm, UD_VECTOR);
1068 set_exception_intercept(svm, MC_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001069
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001070 set_intercept(svm, INTERCEPT_INTR);
1071 set_intercept(svm, INTERCEPT_NMI);
1072 set_intercept(svm, INTERCEPT_SMI);
1073 set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
Avi Kivity332b56e2011-11-10 14:57:24 +02001074 set_intercept(svm, INTERCEPT_RDPMC);
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001075 set_intercept(svm, INTERCEPT_CPUID);
1076 set_intercept(svm, INTERCEPT_INVD);
1077 set_intercept(svm, INTERCEPT_HLT);
1078 set_intercept(svm, INTERCEPT_INVLPG);
1079 set_intercept(svm, INTERCEPT_INVLPGA);
1080 set_intercept(svm, INTERCEPT_IOIO_PROT);
1081 set_intercept(svm, INTERCEPT_MSR_PROT);
1082 set_intercept(svm, INTERCEPT_TASK_SWITCH);
1083 set_intercept(svm, INTERCEPT_SHUTDOWN);
1084 set_intercept(svm, INTERCEPT_VMRUN);
1085 set_intercept(svm, INTERCEPT_VMMCALL);
1086 set_intercept(svm, INTERCEPT_VMLOAD);
1087 set_intercept(svm, INTERCEPT_VMSAVE);
1088 set_intercept(svm, INTERCEPT_STGI);
1089 set_intercept(svm, INTERCEPT_CLGI);
1090 set_intercept(svm, INTERCEPT_SKINIT);
1091 set_intercept(svm, INTERCEPT_WBINVD);
1092 set_intercept(svm, INTERCEPT_MONITOR);
1093 set_intercept(svm, INTERCEPT_MWAIT);
Joerg Roedel81dd35d2010-12-07 17:15:06 +01001094 set_intercept(svm, INTERCEPT_XSETBV);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001095
1096 control->iopm_base_pa = iopm_base;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001097 control->msrpm_base_pa = __pa(svm->msrpm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001098 control->int_ctl = V_INTR_MASKING_MASK;
1099
1100 init_seg(&save->es);
1101 init_seg(&save->ss);
1102 init_seg(&save->ds);
1103 init_seg(&save->fs);
1104 init_seg(&save->gs);
1105
1106 save->cs.selector = 0xf000;
1107 /* Executable/Readable Code Segment */
1108 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1109 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1110 save->cs.limit = 0xffff;
Avi Kivityd92899a2007-02-12 00:54:38 -08001111 /*
1112 * cs.base should really be 0xffff0000, but vmx can't handle that, so
1113 * be consistent with it.
1114 *
1115 * Replace when we have real mode working for vmx.
1116 */
1117 save->cs.base = 0xf0000;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001118
1119 save->gdtr.limit = 0xffff;
1120 save->idtr.limit = 0xffff;
1121
1122 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1123 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1124
Marcelo Tosattieaa48512010-08-31 19:13:14 -03001125 svm_set_efer(&svm->vcpu, 0);
Mike Dayd77c26f2007-10-08 09:02:08 -04001126 save->dr6 = 0xffff0ff0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001127 save->dr7 = 0x400;
Avi Kivityf6e78472010-08-02 15:30:20 +03001128 kvm_set_rflags(&svm->vcpu, 2);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001129 save->rip = 0x0000fff0;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001130 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001131
Joerg Roedele0231712010-02-24 18:59:10 +01001132 /*
1133 * This is the guest-visible cr0 value.
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001134 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
Avi Kivity6aa8b732006-12-10 02:21:36 -08001135 */
Marcelo Tosatti678041a2010-08-31 19:13:13 -03001136 svm->vcpu.arch.cr0 = 0;
1137 (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001138
Rusty Russell66aee912007-07-17 23:34:16 +10001139 save->cr4 = X86_CR4_PAE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001140 /* rdx = ?? */
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001141
1142 if (npt_enabled) {
1143 /* Setup VMCB for Nested Paging */
1144 control->nested_ctl = 1;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001145 clr_intercept(svm, INTERCEPT_INVLPG);
Joerg Roedel18c918c2010-11-30 18:03:59 +01001146 clr_exception_intercept(svm, PF_VECTOR);
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001147 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1148 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001149 save->g_pat = 0x0007040600070406ULL;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001150 save->cr3 = 0;
1151 save->cr4 = 0;
1152 }
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001153 svm->asid_generation = 0;
Alexander Graf1371d902008-11-25 20:17:04 +01001154
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001155 svm->nested.vmcb = 0;
Joerg Roedel2af91942009-08-07 11:49:28 +02001156 svm->vcpu.arch.hflags = 0;
1157
Avi Kivity2a6b20b2010-11-09 16:15:42 +02001158 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
Mark Langsdorf565d0992009-10-06 14:25:02 -05001159 control->pause_filter_count = 3000;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001160 set_intercept(svm, INTERCEPT_PAUSE);
Mark Langsdorf565d0992009-10-06 14:25:02 -05001161 }
1162
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001163 mark_all_dirty(svm->vmcb);
1164
Joerg Roedel2af91942009-08-07 11:49:28 +02001165 enable_gif(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001166}
1167
Avi Kivitye00c8cf2007-10-21 11:00:39 +02001168static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
Avi Kivity04d2cc72007-09-10 18:10:54 +03001169{
1170 struct vcpu_svm *svm = to_svm(vcpu);
1171
Joerg Roedele6101a92008-02-13 18:58:45 +01001172 init_vmcb(svm);
Avi Kivity70433382007-11-07 12:57:23 +02001173
Gleb Natapovc5af89b2009-06-09 15:56:26 +03001174 if (!kvm_vcpu_is_bsp(vcpu)) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001175 kvm_rip_write(vcpu, 0);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001176 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
1177 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
Avi Kivity70433382007-11-07 12:57:23 +02001178 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001179 vcpu->arch.regs_avail = ~0;
1180 vcpu->arch.regs_dirty = ~0;
Avi Kivitye00c8cf2007-10-21 11:00:39 +02001181
1182 return 0;
Avi Kivity04d2cc72007-09-10 18:10:54 +03001183}
1184
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001185static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001186{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001187 struct vcpu_svm *svm;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001188 struct page *page;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001189 struct page *msrpm_pages;
Alexander Grafb286d5d2008-11-25 20:17:05 +01001190 struct page *hsave_page;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001191 struct page *nested_msrpm_pages;
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001192 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001193
Rusty Russellc16f8622007-07-30 21:12:19 +10001194 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001195 if (!svm) {
1196 err = -ENOMEM;
1197 goto out;
1198 }
1199
Joerg Roedelfbc0db72011-03-25 09:44:46 +01001200 svm->tsc_ratio = TSC_RATIO_DEFAULT;
1201
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001202 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
1203 if (err)
1204 goto free_svm;
1205
Joerg Roedelf65c2292008-02-13 18:58:46 +01001206 err = -ENOMEM;
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001207 page = alloc_page(GFP_KERNEL);
1208 if (!page)
1209 goto uninit;
1210
Joerg Roedelf65c2292008-02-13 18:58:46 +01001211 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1212 if (!msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001213 goto free_page1;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001214
1215 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1216 if (!nested_msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001217 goto free_page2;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001218
Alexander Grafb286d5d2008-11-25 20:17:05 +01001219 hsave_page = alloc_page(GFP_KERNEL);
1220 if (!hsave_page)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001221 goto free_page3;
1222
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001223 svm->nested.hsave = page_address(hsave_page);
Alexander Grafb286d5d2008-11-25 20:17:05 +01001224
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001225 svm->msrpm = page_address(msrpm_pages);
1226 svm_vcpu_init_msrpm(svm->msrpm);
1227
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001228 svm->nested.msrpm = page_address(nested_msrpm_pages);
Joerg Roedel323c3d82010-03-01 15:34:37 +01001229 svm_vcpu_init_msrpm(svm->nested.msrpm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01001230
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001231 svm->vmcb = page_address(page);
1232 clear_page(svm->vmcb);
1233 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
1234 svm->asid_generation = 0;
Joerg Roedele6101a92008-02-13 18:58:45 +01001235 init_vmcb(svm);
Zachary Amsden99e3e302010-08-19 22:07:17 -10001236 kvm_write_tsc(&svm->vcpu, 0);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001237
Jan Kiszka10ab25c2010-05-25 16:01:50 +02001238 err = fx_init(&svm->vcpu);
1239 if (err)
1240 goto free_page4;
1241
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001242 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
Gleb Natapovc5af89b2009-06-09 15:56:26 +03001243 if (kvm_vcpu_is_bsp(&svm->vcpu))
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001244 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001245
Boris Ostrovsky2b036c62012-01-09 14:00:35 -05001246 svm_init_osvw(&svm->vcpu);
1247
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001248 return &svm->vcpu;
Avi Kivity36241b82006-12-22 01:05:20 -08001249
Jan Kiszka10ab25c2010-05-25 16:01:50 +02001250free_page4:
1251 __free_page(hsave_page);
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001252free_page3:
1253 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1254free_page2:
1255 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1256free_page1:
1257 __free_page(page);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001258uninit:
1259 kvm_vcpu_uninit(&svm->vcpu);
1260free_svm:
Rusty Russella4770342007-08-01 14:46:11 +10001261 kmem_cache_free(kvm_vcpu_cache, svm);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001262out:
1263 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001264}
1265
1266static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1267{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001268 struct vcpu_svm *svm = to_svm(vcpu);
1269
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001270 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
Joerg Roedelf65c2292008-02-13 18:58:46 +01001271 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001272 __free_page(virt_to_page(svm->nested.hsave));
1273 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001274 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +10001275 kmem_cache_free(kvm_vcpu_cache, svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001276}
1277
Avi Kivity15ad7142007-07-11 18:17:21 +03001278static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001279{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001280 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03001281 int i;
Avi Kivity0cc50642007-03-25 12:07:27 +02001282
Avi Kivity0cc50642007-03-25 12:07:27 +02001283 if (unlikely(cpu != vcpu->cpu)) {
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03001284 svm->asid_generation = 0;
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001285 mark_all_dirty(svm->vmcb);
Avi Kivity0cc50642007-03-25 12:07:27 +02001286 }
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001287
Avi Kivity82ca2d12010-10-21 12:20:34 +02001288#ifdef CONFIG_X86_64
1289 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
1290#endif
Avi Kivitydacccfd2010-10-21 12:20:33 +02001291 savesegment(fs, svm->host.fs);
1292 savesegment(gs, svm->host.gs);
1293 svm->host.ldt = kvm_read_ldt();
1294
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001295 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001296 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Joerg Roedelfbc0db72011-03-25 09:44:46 +01001297
1298 if (static_cpu_has(X86_FEATURE_TSCRATEMSR) &&
1299 svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) {
1300 __get_cpu_var(current_tsc_ratio) = svm->tsc_ratio;
1301 wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
1302 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001303}
1304
1305static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1306{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001307 struct vcpu_svm *svm = to_svm(vcpu);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001308 int i;
1309
Avi Kivitye1beb1d2007-11-18 13:50:24 +02001310 ++vcpu->stat.host_state_reload;
Avi Kivitydacccfd2010-10-21 12:20:33 +02001311 kvm_load_ldt(svm->host.ldt);
1312#ifdef CONFIG_X86_64
1313 loadsegment(fs, svm->host.fs);
Avi Kivitydacccfd2010-10-21 12:20:33 +02001314 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
Joerg Roedel893a5ab2011-01-14 16:45:01 +01001315 load_gs_index(svm->host.gs);
Avi Kivitydacccfd2010-10-21 12:20:33 +02001316#else
Avi Kivity831ca602011-03-08 16:09:51 +02001317#ifdef CONFIG_X86_32_LAZY_GS
Avi Kivitydacccfd2010-10-21 12:20:33 +02001318 loadsegment(gs, svm->host.gs);
1319#endif
Avi Kivity831ca602011-03-08 16:09:51 +02001320#endif
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001321 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001322 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001323}
1324
Avi Kivity6aa8b732006-12-10 02:21:36 -08001325static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1326{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001327 return to_svm(vcpu)->vmcb->save.rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001328}
1329
1330static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1331{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001332 to_svm(vcpu)->vmcb->save.rflags = rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001333}
1334
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001335static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1336{
1337 switch (reg) {
1338 case VCPU_EXREG_PDPTR:
1339 BUG_ON(!npt_enabled);
Avi Kivity9f8fe502010-12-05 17:30:00 +02001340 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001341 break;
1342 default:
1343 BUG();
1344 }
1345}
1346
Alexander Graff0b85052008-11-25 20:17:01 +01001347static void svm_set_vintr(struct vcpu_svm *svm)
1348{
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001349 set_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01001350}
1351
1352static void svm_clear_vintr(struct vcpu_svm *svm)
1353{
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001354 clr_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01001355}
1356
Avi Kivity6aa8b732006-12-10 02:21:36 -08001357static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1358{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001359 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001360
1361 switch (seg) {
1362 case VCPU_SREG_CS: return &save->cs;
1363 case VCPU_SREG_DS: return &save->ds;
1364 case VCPU_SREG_ES: return &save->es;
1365 case VCPU_SREG_FS: return &save->fs;
1366 case VCPU_SREG_GS: return &save->gs;
1367 case VCPU_SREG_SS: return &save->ss;
1368 case VCPU_SREG_TR: return &save->tr;
1369 case VCPU_SREG_LDTR: return &save->ldtr;
1370 }
1371 BUG();
Al Viro8b6d44c2007-02-09 16:38:40 +00001372 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001373}
1374
1375static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1376{
1377 struct vmcb_seg *s = svm_seg(vcpu, seg);
1378
1379 return s->base;
1380}
1381
1382static void svm_get_segment(struct kvm_vcpu *vcpu,
1383 struct kvm_segment *var, int seg)
1384{
1385 struct vmcb_seg *s = svm_seg(vcpu, seg);
1386
1387 var->base = s->base;
1388 var->limit = s->limit;
1389 var->selector = s->selector;
1390 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1391 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1392 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1393 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1394 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1395 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1396 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1397 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
Amit Shah25022ac2008-10-27 09:04:17 +00001398
Joerg Roedele0231712010-02-24 18:59:10 +01001399 /*
1400 * AMD's VMCB does not have an explicit unusable field, so emulate it
Andre Przywara19bca6a2009-04-28 12:45:30 +02001401 * for cross vendor migration purposes by "not present"
1402 */
1403 var->unusable = !var->present || (var->type == 0);
1404
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001405 switch (seg) {
1406 case VCPU_SREG_CS:
1407 /*
1408 * SVM always stores 0 for the 'G' bit in the CS selector in
1409 * the VMCB on a VMEXIT. This hurts cross-vendor migration:
1410 * Intel's VMENTRY has a check on the 'G' bit.
1411 */
Amit Shah25022ac2008-10-27 09:04:17 +00001412 var->g = s->limit > 0xfffff;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001413 break;
1414 case VCPU_SREG_TR:
1415 /*
1416 * Work around a bug where the busy flag in the tr selector
1417 * isn't exposed
1418 */
Amit Shahc0d09822008-10-27 09:04:18 +00001419 var->type |= 0x2;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001420 break;
1421 case VCPU_SREG_DS:
1422 case VCPU_SREG_ES:
1423 case VCPU_SREG_FS:
1424 case VCPU_SREG_GS:
1425 /*
1426 * The accessed bit must always be set in the segment
1427 * descriptor cache, although it can be cleared in the
1428 * descriptor, the cached bit always remains at 1. Since
1429 * Intel has a check on this, set it here to support
1430 * cross-vendor migration.
1431 */
1432 if (!var->unusable)
1433 var->type |= 0x1;
1434 break;
Andre Przywarab586eb02009-04-28 12:45:43 +02001435 case VCPU_SREG_SS:
Joerg Roedele0231712010-02-24 18:59:10 +01001436 /*
1437 * On AMD CPUs sometimes the DB bit in the segment
Andre Przywarab586eb02009-04-28 12:45:43 +02001438 * descriptor is left as 1, although the whole segment has
1439 * been made unusable. Clear it here to pass an Intel VMX
1440 * entry check when cross vendor migrating.
1441 */
1442 if (var->unusable)
1443 var->db = 0;
1444 break;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001445 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001446}
1447
Izik Eidus2e4d2652008-03-24 19:38:34 +02001448static int svm_get_cpl(struct kvm_vcpu *vcpu)
1449{
1450 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1451
1452 return save->cpl;
1453}
1454
Gleb Natapov89a27f42010-02-16 10:51:48 +02001455static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001456{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001457 struct vcpu_svm *svm = to_svm(vcpu);
1458
Gleb Natapov89a27f42010-02-16 10:51:48 +02001459 dt->size = svm->vmcb->save.idtr.limit;
1460 dt->address = svm->vmcb->save.idtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001461}
1462
Gleb Natapov89a27f42010-02-16 10:51:48 +02001463static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001464{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001465 struct vcpu_svm *svm = to_svm(vcpu);
1466
Gleb Natapov89a27f42010-02-16 10:51:48 +02001467 svm->vmcb->save.idtr.limit = dt->size;
1468 svm->vmcb->save.idtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01001469 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001470}
1471
Gleb Natapov89a27f42010-02-16 10:51:48 +02001472static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001473{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001474 struct vcpu_svm *svm = to_svm(vcpu);
1475
Gleb Natapov89a27f42010-02-16 10:51:48 +02001476 dt->size = svm->vmcb->save.gdtr.limit;
1477 dt->address = svm->vmcb->save.gdtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001478}
1479
Gleb Natapov89a27f42010-02-16 10:51:48 +02001480static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001481{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001482 struct vcpu_svm *svm = to_svm(vcpu);
1483
Gleb Natapov89a27f42010-02-16 10:51:48 +02001484 svm->vmcb->save.gdtr.limit = dt->size;
1485 svm->vmcb->save.gdtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01001486 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001487}
1488
Avi Kivitye8467fd2009-12-29 18:43:06 +02001489static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
1490{
1491}
1492
Avi Kivityaff48ba2010-12-05 18:56:11 +02001493static void svm_decache_cr3(struct kvm_vcpu *vcpu)
1494{
1495}
1496
Anthony Liguori25c4c272007-04-27 09:29:21 +03001497static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -08001498{
1499}
1500
Avi Kivityd2251572010-01-06 10:55:27 +02001501static void update_cr0_intercept(struct vcpu_svm *svm)
1502{
1503 ulong gcr0 = svm->vcpu.arch.cr0;
1504 u64 *hcr0 = &svm->vmcb->save.cr0;
1505
1506 if (!svm->vcpu.fpu_active)
1507 *hcr0 |= SVM_CR0_SELECTIVE_MASK;
1508 else
1509 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
1510 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
1511
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001512 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02001513
1514 if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001515 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
1516 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02001517 } else {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001518 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1519 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02001520 }
1521}
1522
Avi Kivity6aa8b732006-12-10 02:21:36 -08001523static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1524{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001525 struct vcpu_svm *svm = to_svm(vcpu);
1526
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001527#ifdef CONFIG_X86_64
Avi Kivityf6801df2010-01-21 15:31:50 +02001528 if (vcpu->arch.efer & EFER_LME) {
Rusty Russell707d92f2007-07-17 23:19:08 +10001529 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02001530 vcpu->arch.efer |= EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06001531 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001532 }
1533
Mike Dayd77c26f2007-10-08 09:02:08 -04001534 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02001535 vcpu->arch.efer &= ~EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06001536 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001537 }
1538 }
1539#endif
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001540 vcpu->arch.cr0 = cr0;
Avi Kivity888f9f32010-01-10 12:14:04 +02001541
1542 if (!npt_enabled)
1543 cr0 |= X86_CR0_PG | X86_CR0_WP;
Avi Kivity02daab22009-12-30 12:40:26 +02001544
1545 if (!vcpu->fpu_active)
Joerg Roedel334df502008-01-21 13:09:33 +01001546 cr0 |= X86_CR0_TS;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001547 /*
1548 * re-enable caching here because the QEMU bios
1549 * does not do it - this results in some delay at
1550 * reboot
1551 */
1552 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001553 svm->vmcb->save.cr0 = cr0;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001554 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02001555 update_cr0_intercept(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001556}
1557
Nadav Har'El5e1746d2011-05-25 23:03:24 +03001558static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001559{
Joerg Roedel6394b642008-04-09 14:15:29 +02001560 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
Joerg Roedele5eab0c2008-09-09 19:11:51 +02001561 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
1562
Nadav Har'El5e1746d2011-05-25 23:03:24 +03001563 if (cr4 & X86_CR4_VMXE)
1564 return 1;
1565
Joerg Roedele5eab0c2008-09-09 19:11:51 +02001566 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001567 svm_flush_tlb(vcpu);
Joerg Roedel6394b642008-04-09 14:15:29 +02001568
Joerg Roedelec077262008-04-09 14:15:28 +02001569 vcpu->arch.cr4 = cr4;
1570 if (!npt_enabled)
1571 cr4 |= X86_CR4_PAE;
Joerg Roedel6394b642008-04-09 14:15:29 +02001572 cr4 |= host_cr4_mce;
Joerg Roedelec077262008-04-09 14:15:28 +02001573 to_svm(vcpu)->vmcb->save.cr4 = cr4;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001574 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Nadav Har'El5e1746d2011-05-25 23:03:24 +03001575 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001576}
1577
1578static void svm_set_segment(struct kvm_vcpu *vcpu,
1579 struct kvm_segment *var, int seg)
1580{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001581 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001582 struct vmcb_seg *s = svm_seg(vcpu, seg);
1583
1584 s->base = var->base;
1585 s->limit = var->limit;
1586 s->selector = var->selector;
1587 if (var->unusable)
1588 s->attrib = 0;
1589 else {
1590 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1591 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1592 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1593 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
1594 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1595 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1596 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1597 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1598 }
1599 if (seg == VCPU_SREG_CS)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001600 svm->vmcb->save.cpl
1601 = (svm->vmcb->save.cs.attrib
Avi Kivity6aa8b732006-12-10 02:21:36 -08001602 >> SVM_SELECTOR_DPL_SHIFT) & 3;
1603
Joerg Roedel060d0c92010-12-03 11:45:57 +01001604 mark_dirty(svm->vmcb, VMCB_SEG);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001605}
1606
Gleb Natapov44c11432009-05-11 13:35:52 +03001607static void update_db_intercept(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001608{
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001609 struct vcpu_svm *svm = to_svm(vcpu);
1610
Joerg Roedel18c918c2010-11-30 18:03:59 +01001611 clr_exception_intercept(svm, DB_VECTOR);
1612 clr_exception_intercept(svm, BP_VECTOR);
Gleb Natapov44c11432009-05-11 13:35:52 +03001613
Jan Kiszka6be7d302009-10-18 13:24:54 +02001614 if (svm->nmi_singlestep)
Joerg Roedel18c918c2010-11-30 18:03:59 +01001615 set_exception_intercept(svm, DB_VECTOR);
Gleb Natapov44c11432009-05-11 13:35:52 +03001616
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001617 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1618 if (vcpu->guest_debug &
1619 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
Joerg Roedel18c918c2010-11-30 18:03:59 +01001620 set_exception_intercept(svm, DB_VECTOR);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001621 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
Joerg Roedel18c918c2010-11-30 18:03:59 +01001622 set_exception_intercept(svm, BP_VECTOR);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001623 } else
1624 vcpu->guest_debug = 0;
Gleb Natapov44c11432009-05-11 13:35:52 +03001625}
1626
Jan Kiszka355be0b2009-10-03 00:31:21 +02001627static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
Gleb Natapov44c11432009-05-11 13:35:52 +03001628{
Gleb Natapov44c11432009-05-11 13:35:52 +03001629 struct vcpu_svm *svm = to_svm(vcpu);
1630
Jan Kiszkaae675ef2008-12-15 13:52:10 +01001631 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1632 svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
1633 else
1634 svm->vmcb->save.dr7 = vcpu->arch.dr7;
1635
Joerg Roedel72214b92010-12-03 11:45:55 +01001636 mark_dirty(svm->vmcb, VMCB_DR);
1637
Jan Kiszka355be0b2009-10-03 00:31:21 +02001638 update_db_intercept(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001639}
1640
Tejun Heo0fe1e002009-10-29 22:34:14 +09001641static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001642{
Tejun Heo0fe1e002009-10-29 22:34:14 +09001643 if (sd->next_asid > sd->max_asid) {
1644 ++sd->asid_generation;
1645 sd->next_asid = 1;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001646 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001647 }
1648
Tejun Heo0fe1e002009-10-29 22:34:14 +09001649 svm->asid_generation = sd->asid_generation;
1650 svm->vmcb->control.asid = sd->next_asid++;
Joerg Roedeld48086d2010-12-03 11:45:51 +01001651
1652 mark_dirty(svm->vmcb, VMCB_ASID);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001653}
1654
Gleb Natapov020df072010-04-13 10:05:23 +03001655static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001656{
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001657 struct vcpu_svm *svm = to_svm(vcpu);
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001658
Gleb Natapov020df072010-04-13 10:05:23 +03001659 svm->vmcb->save.dr7 = value;
Joerg Roedel72214b92010-12-03 11:45:55 +01001660 mark_dirty(svm->vmcb, VMCB_DR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001661}
1662
Avi Kivity851ba692009-08-24 11:10:17 +03001663static int pf_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001664{
Gleb Natapov631bc482010-10-14 11:22:52 +02001665 u64 fault_address = svm->vmcb->control.exit_info_2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001666 u32 error_code;
Gleb Natapov631bc482010-10-14 11:22:52 +02001667 int r = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001668
Gleb Natapov631bc482010-10-14 11:22:52 +02001669 switch (svm->apf_reason) {
1670 default:
1671 error_code = svm->vmcb->control.exit_info_1;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001672
Gleb Natapov631bc482010-10-14 11:22:52 +02001673 trace_kvm_page_fault(fault_address, error_code);
1674 if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
1675 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
Andre Przywaradc25e892010-12-21 11:12:07 +01001676 r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
1677 svm->vmcb->control.insn_bytes,
1678 svm->vmcb->control.insn_len);
Gleb Natapov631bc482010-10-14 11:22:52 +02001679 break;
1680 case KVM_PV_REASON_PAGE_NOT_PRESENT:
1681 svm->apf_reason = 0;
1682 local_irq_disable();
1683 kvm_async_pf_task_wait(fault_address);
1684 local_irq_enable();
1685 break;
1686 case KVM_PV_REASON_PAGE_READY:
1687 svm->apf_reason = 0;
1688 local_irq_disable();
1689 kvm_async_pf_task_wake(fault_address);
1690 local_irq_enable();
1691 break;
1692 }
1693 return r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001694}
1695
Avi Kivity851ba692009-08-24 11:10:17 +03001696static int db_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001697{
Avi Kivity851ba692009-08-24 11:10:17 +03001698 struct kvm_run *kvm_run = svm->vcpu.run;
1699
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001700 if (!(svm->vcpu.guest_debug &
Gleb Natapov44c11432009-05-11 13:35:52 +03001701 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
Jan Kiszka6be7d302009-10-18 13:24:54 +02001702 !svm->nmi_singlestep) {
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001703 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1704 return 1;
1705 }
Gleb Natapov44c11432009-05-11 13:35:52 +03001706
Jan Kiszka6be7d302009-10-18 13:24:54 +02001707 if (svm->nmi_singlestep) {
1708 svm->nmi_singlestep = false;
Gleb Natapov44c11432009-05-11 13:35:52 +03001709 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
1710 svm->vmcb->save.rflags &=
1711 ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1712 update_db_intercept(&svm->vcpu);
1713 }
1714
1715 if (svm->vcpu.guest_debug &
Joerg Roedele0231712010-02-24 18:59:10 +01001716 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
Gleb Natapov44c11432009-05-11 13:35:52 +03001717 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1718 kvm_run->debug.arch.pc =
1719 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1720 kvm_run->debug.arch.exception = DB_VECTOR;
1721 return 0;
1722 }
1723
1724 return 1;
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001725}
1726
Avi Kivity851ba692009-08-24 11:10:17 +03001727static int bp_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001728{
Avi Kivity851ba692009-08-24 11:10:17 +03001729 struct kvm_run *kvm_run = svm->vcpu.run;
1730
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001731 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1732 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1733 kvm_run->debug.arch.exception = BP_VECTOR;
1734 return 0;
1735}
1736
Avi Kivity851ba692009-08-24 11:10:17 +03001737static int ud_interception(struct vcpu_svm *svm)
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001738{
1739 int er;
1740
Andre Przywara51d8b662010-12-21 11:12:02 +01001741 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001742 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001743 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001744 return 1;
1745}
1746
Avi Kivity6b52d182010-01-21 15:31:47 +02001747static void svm_fpu_activate(struct kvm_vcpu *vcpu)
Anthony Liguori7807fa62007-04-23 09:17:21 -05001748{
Avi Kivity6b52d182010-01-21 15:31:47 +02001749 struct vcpu_svm *svm = to_svm(vcpu);
Joerg Roedel66a562f2010-02-19 16:23:08 +01001750
Joerg Roedel18c918c2010-11-30 18:03:59 +01001751 clr_exception_intercept(svm, NM_VECTOR);
Joerg Roedel66a562f2010-02-19 16:23:08 +01001752
Rusty Russelle756fc62007-07-30 20:07:08 +10001753 svm->vcpu.fpu_active = 1;
Avi Kivityd2251572010-01-06 10:55:27 +02001754 update_cr0_intercept(svm);
Avi Kivity6b52d182010-01-21 15:31:47 +02001755}
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001756
Avi Kivity6b52d182010-01-21 15:31:47 +02001757static int nm_interception(struct vcpu_svm *svm)
1758{
1759 svm_fpu_activate(&svm->vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001760 return 1;
Anthony Liguori7807fa62007-04-23 09:17:21 -05001761}
1762
Joerg Roedel67ec6602010-05-17 14:43:35 +02001763static bool is_erratum_383(void)
1764{
1765 int err, i;
1766 u64 value;
1767
1768 if (!erratum_383_found)
1769 return false;
1770
1771 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
1772 if (err)
1773 return false;
1774
1775 /* Bit 62 may or may not be set for this mce */
1776 value &= ~(1ULL << 62);
1777
1778 if (value != 0xb600000000010015ULL)
1779 return false;
1780
1781 /* Clear MCi_STATUS registers */
1782 for (i = 0; i < 6; ++i)
1783 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
1784
1785 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
1786 if (!err) {
1787 u32 low, high;
1788
1789 value &= ~(1ULL << 2);
1790 low = lower_32_bits(value);
1791 high = upper_32_bits(value);
1792
1793 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
1794 }
1795
1796 /* Flush tlb to evict multi-match entries */
1797 __flush_tlb_all();
1798
1799 return true;
1800}
1801
Joerg Roedelfe5913e2010-05-17 14:43:34 +02001802static void svm_handle_mce(struct vcpu_svm *svm)
Joerg Roedel53371b52008-04-09 14:15:30 +02001803{
Joerg Roedel67ec6602010-05-17 14:43:35 +02001804 if (is_erratum_383()) {
1805 /*
1806 * Erratum 383 triggered. Guest state is corrupt so kill the
1807 * guest.
1808 */
1809 pr_err("KVM: Guest triggered AMD Erratum 383\n");
1810
Avi Kivitya8eeb042010-05-10 12:34:53 +03001811 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
Joerg Roedel67ec6602010-05-17 14:43:35 +02001812
1813 return;
1814 }
1815
Joerg Roedel53371b52008-04-09 14:15:30 +02001816 /*
1817 * On an #MC intercept the MCE handler is not called automatically in
1818 * the host. So do it by hand here.
1819 */
1820 asm volatile (
1821 "int $0x12\n");
1822 /* not sure if we ever come back to this point */
1823
Joerg Roedelfe5913e2010-05-17 14:43:34 +02001824 return;
1825}
1826
1827static int mc_interception(struct vcpu_svm *svm)
1828{
Joerg Roedel53371b52008-04-09 14:15:30 +02001829 return 1;
1830}
1831
Avi Kivity851ba692009-08-24 11:10:17 +03001832static int shutdown_interception(struct vcpu_svm *svm)
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001833{
Avi Kivity851ba692009-08-24 11:10:17 +03001834 struct kvm_run *kvm_run = svm->vcpu.run;
1835
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001836 /*
1837 * VMCB is undefined after a SHUTDOWN intercept
1838 * so reinitialize it.
1839 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001840 clear_page(svm->vmcb);
Joerg Roedele6101a92008-02-13 18:58:45 +01001841 init_vmcb(svm);
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001842
1843 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1844 return 0;
1845}
1846
Avi Kivity851ba692009-08-24 11:10:17 +03001847static int io_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001848{
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001849 struct kvm_vcpu *vcpu = &svm->vcpu;
Mike Dayd77c26f2007-10-08 09:02:08 -04001850 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
Jan Kiszka34c33d12009-02-08 13:28:15 +01001851 int size, in, string;
Avi Kivity039576c2007-03-20 12:46:50 +02001852 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001853
Rusty Russelle756fc62007-07-30 20:07:08 +10001854 ++svm->vcpu.stat.io_exits;
Laurent Viviere70669a2007-08-05 10:36:40 +03001855 string = (io_info & SVM_IOIO_STR_MASK) != 0;
Avi Kivity039576c2007-03-20 12:46:50 +02001856 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001857 if (string || in)
Andre Przywara51d8b662010-12-21 11:12:02 +01001858 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001859
Avi Kivity039576c2007-03-20 12:46:50 +02001860 port = io_info >> 16;
1861 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001862 svm->next_rip = svm->vmcb->control.exit_info_2;
Guillaume Thouvenine93f36b2008-10-28 10:51:30 +01001863 skip_emulated_instruction(&svm->vcpu);
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001864
1865 return kvm_fast_pio_out(vcpu, size, port);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001866}
1867
Avi Kivity851ba692009-08-24 11:10:17 +03001868static int nmi_interception(struct vcpu_svm *svm)
Joerg Roedelc47f0982008-04-30 17:56:00 +02001869{
1870 return 1;
1871}
1872
Avi Kivity851ba692009-08-24 11:10:17 +03001873static int intr_interception(struct vcpu_svm *svm)
Joerg Roedela0698052008-04-30 17:56:01 +02001874{
1875 ++svm->vcpu.stat.irq_exits;
1876 return 1;
1877}
1878
Avi Kivity851ba692009-08-24 11:10:17 +03001879static int nop_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001880{
1881 return 1;
1882}
1883
Avi Kivity851ba692009-08-24 11:10:17 +03001884static int halt_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001885{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001886 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
Rusty Russelle756fc62007-07-30 20:07:08 +10001887 skip_emulated_instruction(&svm->vcpu);
1888 return kvm_emulate_halt(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001889}
1890
Avi Kivity851ba692009-08-24 11:10:17 +03001891static int vmmcall_interception(struct vcpu_svm *svm)
Avi Kivity02e235b2007-02-19 14:37:47 +02001892{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001893 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Rusty Russelle756fc62007-07-30 20:07:08 +10001894 skip_emulated_instruction(&svm->vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001895 kvm_emulate_hypercall(&svm->vcpu);
1896 return 1;
Avi Kivity02e235b2007-02-19 14:37:47 +02001897}
1898
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001899static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
1900{
1901 struct vcpu_svm *svm = to_svm(vcpu);
1902
1903 return svm->nested.nested_cr3;
1904}
1905
Avi Kivitye4e517b2011-07-28 11:36:17 +03001906static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
1907{
1908 struct vcpu_svm *svm = to_svm(vcpu);
1909 u64 cr3 = svm->nested.nested_cr3;
1910 u64 pdpte;
1911 int ret;
1912
1913 ret = kvm_read_guest_page(vcpu->kvm, gpa_to_gfn(cr3), &pdpte,
1914 offset_in_page(cr3) + index * 8, 8);
1915 if (ret)
1916 return 0;
1917 return pdpte;
1918}
1919
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001920static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
1921 unsigned long root)
1922{
1923 struct vcpu_svm *svm = to_svm(vcpu);
1924
1925 svm->vmcb->control.nested_cr3 = root;
Joerg Roedelb2747162010-12-03 11:45:53 +01001926 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001927 svm_flush_tlb(vcpu);
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001928}
1929
Avi Kivity6389ee92010-11-29 16:12:30 +02001930static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
1931 struct x86_exception *fault)
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001932{
1933 struct vcpu_svm *svm = to_svm(vcpu);
1934
1935 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
1936 svm->vmcb->control.exit_code_hi = 0;
Avi Kivity6389ee92010-11-29 16:12:30 +02001937 svm->vmcb->control.exit_info_1 = fault->error_code;
1938 svm->vmcb->control.exit_info_2 = fault->address;
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001939
1940 nested_svm_vmexit(svm);
1941}
1942
Joerg Roedel4b161842010-09-10 17:31:03 +02001943static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
1944{
1945 int r;
1946
1947 r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
1948
1949 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
1950 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
Avi Kivitye4e517b2011-07-28 11:36:17 +03001951 vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
Joerg Roedel4b161842010-09-10 17:31:03 +02001952 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
1953 vcpu->arch.mmu.shadow_root_level = get_npt_level();
1954 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
1955
1956 return r;
1957}
1958
1959static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
1960{
1961 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
1962}
1963
Alexander Grafc0725422008-11-25 20:17:03 +01001964static int nested_svm_check_permissions(struct vcpu_svm *svm)
1965{
Avi Kivityf6801df2010-01-21 15:31:50 +02001966 if (!(svm->vcpu.arch.efer & EFER_SVME)
Alexander Grafc0725422008-11-25 20:17:03 +01001967 || !is_paging(&svm->vcpu)) {
1968 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1969 return 1;
1970 }
1971
1972 if (svm->vmcb->save.cpl) {
1973 kvm_inject_gp(&svm->vcpu, 0);
1974 return 1;
1975 }
1976
1977 return 0;
1978}
1979
Alexander Grafcf74a782008-11-25 20:17:08 +01001980static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1981 bool has_error_code, u32 error_code)
1982{
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01001983 int vmexit;
1984
Joerg Roedel20307532010-11-29 17:51:48 +01001985 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel0295ad72009-08-07 11:49:37 +02001986 return 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01001987
Joerg Roedel0295ad72009-08-07 11:49:37 +02001988 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1989 svm->vmcb->control.exit_code_hi = 0;
1990 svm->vmcb->control.exit_info_1 = error_code;
1991 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1992
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01001993 vmexit = nested_svm_intercept(svm);
1994 if (vmexit == NESTED_EXIT_DONE)
1995 svm->nested.exit_required = true;
1996
1997 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01001998}
1999
Joerg Roedel8fe54652010-02-19 16:23:01 +01002000/* This function returns true if it is save to enable the irq window */
2001static inline bool nested_svm_intr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002002{
Joerg Roedel20307532010-11-29 17:51:48 +01002003 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002004 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002005
Joerg Roedel26666952009-08-07 11:49:46 +02002006 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002007 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002008
Joerg Roedel26666952009-08-07 11:49:46 +02002009 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002010 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01002011
Gleb Natapova0a07cd2010-09-20 10:15:32 +02002012 /*
2013 * if vmexit was already requested (by intercepted exception
2014 * for instance) do not overwrite it with "external interrupt"
2015 * vmexit.
2016 */
2017 if (svm->nested.exit_required)
2018 return false;
2019
Joerg Roedel197717d2010-02-24 18:59:19 +01002020 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
2021 svm->vmcb->control.exit_info_1 = 0;
2022 svm->vmcb->control.exit_info_2 = 0;
Joerg Roedel26666952009-08-07 11:49:46 +02002023
Joerg Roedelcd3ff652009-10-09 16:08:26 +02002024 if (svm->nested.intercept & 1ULL) {
2025 /*
2026 * The #vmexit can't be emulated here directly because this
2027 * code path runs with irqs and preemtion disabled. A
2028 * #vmexit emulation might sleep. Only signal request for
2029 * the #vmexit here.
2030 */
2031 svm->nested.exit_required = true;
Joerg Roedel236649d2009-10-09 16:08:30 +02002032 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
Joerg Roedel8fe54652010-02-19 16:23:01 +01002033 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01002034 }
2035
Joerg Roedel8fe54652010-02-19 16:23:01 +01002036 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002037}
2038
Joerg Roedel887f5002010-02-24 18:59:12 +01002039/* This function returns true if it is save to enable the nmi window */
2040static inline bool nested_svm_nmi(struct vcpu_svm *svm)
2041{
Joerg Roedel20307532010-11-29 17:51:48 +01002042 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel887f5002010-02-24 18:59:12 +01002043 return true;
2044
2045 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
2046 return true;
2047
2048 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
2049 svm->nested.exit_required = true;
2050
2051 return false;
2052}
2053
Joerg Roedel7597f122010-02-19 16:23:00 +01002054static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002055{
2056 struct page *page;
2057
Joerg Roedel6c3bd3d2010-02-19 16:23:04 +01002058 might_sleep();
2059
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002060 page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002061 if (is_error_page(page))
2062 goto error;
2063
Joerg Roedel7597f122010-02-19 16:23:00 +01002064 *_page = page;
2065
2066 return kmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002067
2068error:
2069 kvm_release_page_clean(page);
2070 kvm_inject_gp(&svm->vcpu, 0);
2071
2072 return NULL;
2073}
2074
Joerg Roedel7597f122010-02-19 16:23:00 +01002075static void nested_svm_unmap(struct page *page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002076{
Joerg Roedel7597f122010-02-19 16:23:00 +01002077 kunmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002078 kvm_release_page_dirty(page);
2079}
2080
Joerg Roedelce2ac082010-03-01 15:34:39 +01002081static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002082{
Joerg Roedelce2ac082010-03-01 15:34:39 +01002083 unsigned port;
2084 u8 val, bit;
2085 u64 gpa;
2086
2087 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
2088 return NESTED_EXIT_HOST;
2089
2090 port = svm->vmcb->control.exit_info_1 >> 16;
2091 gpa = svm->nested.vmcb_iopm + (port / 8);
2092 bit = port % 8;
2093 val = 0;
2094
2095 if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1))
2096 val &= (1 << bit);
2097
2098 return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
2099}
2100
Joerg Roedeld2477822010-03-01 15:34:34 +01002101static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002102{
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002103 u32 offset, msr, value;
2104 int write, mask;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002105
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002106 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
Joerg Roedeld2477822010-03-01 15:34:34 +01002107 return NESTED_EXIT_HOST;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002108
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002109 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2110 offset = svm_msrpm_offset(msr);
2111 write = svm->vmcb->control.exit_info_1 & 1;
2112 mask = 1 << ((2 * (msr & 0xf)) + write);
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002113
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002114 if (offset == MSR_INVALID)
2115 return NESTED_EXIT_DONE;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002116
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002117 /* Offset is in 32 bit units but need in 8 bit units */
2118 offset *= 4;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002119
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002120 if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4))
2121 return NESTED_EXIT_DONE;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002122
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002123 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002124}
2125
Joerg Roedel410e4d52009-08-07 11:49:44 +02002126static int nested_svm_exit_special(struct vcpu_svm *svm)
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002127{
Alexander Grafcf74a782008-11-25 20:17:08 +01002128 u32 exit_code = svm->vmcb->control.exit_code;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002129
Joerg Roedel410e4d52009-08-07 11:49:44 +02002130 switch (exit_code) {
2131 case SVM_EXIT_INTR:
2132 case SVM_EXIT_NMI:
Joerg Roedelff47a492010-04-22 12:33:14 +02002133 case SVM_EXIT_EXCP_BASE + MC_VECTOR:
Joerg Roedel410e4d52009-08-07 11:49:44 +02002134 return NESTED_EXIT_HOST;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002135 case SVM_EXIT_NPF:
Joerg Roedele0231712010-02-24 18:59:10 +01002136 /* For now we are always handling NPFs when using them */
Joerg Roedel410e4d52009-08-07 11:49:44 +02002137 if (npt_enabled)
2138 return NESTED_EXIT_HOST;
2139 break;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002140 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
Gleb Natapov631bc482010-10-14 11:22:52 +02002141 /* When we're shadowing, trap PFs, but not async PF */
2142 if (!npt_enabled && svm->apf_reason == 0)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002143 return NESTED_EXIT_HOST;
2144 break;
Joerg Roedel66a562f2010-02-19 16:23:08 +01002145 case SVM_EXIT_EXCP_BASE + NM_VECTOR:
2146 nm_interception(svm);
2147 break;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002148 default:
2149 break;
Alexander Grafcf74a782008-11-25 20:17:08 +01002150 }
2151
Joerg Roedel410e4d52009-08-07 11:49:44 +02002152 return NESTED_EXIT_CONTINUE;
2153}
2154
2155/*
2156 * If this function returns true, this #vmexit was already handled
2157 */
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002158static int nested_svm_intercept(struct vcpu_svm *svm)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002159{
2160 u32 exit_code = svm->vmcb->control.exit_code;
2161 int vmexit = NESTED_EXIT_HOST;
2162
Alexander Grafcf74a782008-11-25 20:17:08 +01002163 switch (exit_code) {
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002164 case SVM_EXIT_MSR:
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002165 vmexit = nested_svm_exit_handled_msr(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002166 break;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002167 case SVM_EXIT_IOIO:
2168 vmexit = nested_svm_intercept_ioio(svm);
2169 break;
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002170 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
2171 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
2172 if (svm->nested.intercept_cr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002173 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002174 break;
2175 }
Joerg Roedel3aed0412010-11-30 18:03:58 +01002176 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
2177 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
2178 if (svm->nested.intercept_dr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002179 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002180 break;
2181 }
2182 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
2183 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
Joerg Roedelaad42c62009-08-07 11:49:34 +02002184 if (svm->nested.intercept_exceptions & excp_bits)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002185 vmexit = NESTED_EXIT_DONE;
Gleb Natapov631bc482010-10-14 11:22:52 +02002186 /* async page fault always cause vmexit */
2187 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
2188 svm->apf_reason != 0)
2189 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002190 break;
2191 }
Joerg Roedel228070b2010-04-22 12:33:10 +02002192 case SVM_EXIT_ERR: {
2193 vmexit = NESTED_EXIT_DONE;
2194 break;
2195 }
Alexander Grafcf74a782008-11-25 20:17:08 +01002196 default: {
2197 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
Joerg Roedelaad42c62009-08-07 11:49:34 +02002198 if (svm->nested.intercept & exit_bits)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002199 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002200 }
2201 }
2202
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002203 return vmexit;
2204}
2205
2206static int nested_svm_exit_handled(struct vcpu_svm *svm)
2207{
2208 int vmexit;
2209
2210 vmexit = nested_svm_intercept(svm);
2211
2212 if (vmexit == NESTED_EXIT_DONE)
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002213 nested_svm_vmexit(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002214
2215 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01002216}
2217
Joerg Roedel0460a972009-08-07 11:49:31 +02002218static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
2219{
2220 struct vmcb_control_area *dst = &dst_vmcb->control;
2221 struct vmcb_control_area *from = &from_vmcb->control;
2222
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002223 dst->intercept_cr = from->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01002224 dst->intercept_dr = from->intercept_dr;
Joerg Roedel0460a972009-08-07 11:49:31 +02002225 dst->intercept_exceptions = from->intercept_exceptions;
2226 dst->intercept = from->intercept;
2227 dst->iopm_base_pa = from->iopm_base_pa;
2228 dst->msrpm_base_pa = from->msrpm_base_pa;
2229 dst->tsc_offset = from->tsc_offset;
2230 dst->asid = from->asid;
2231 dst->tlb_ctl = from->tlb_ctl;
2232 dst->int_ctl = from->int_ctl;
2233 dst->int_vector = from->int_vector;
2234 dst->int_state = from->int_state;
2235 dst->exit_code = from->exit_code;
2236 dst->exit_code_hi = from->exit_code_hi;
2237 dst->exit_info_1 = from->exit_info_1;
2238 dst->exit_info_2 = from->exit_info_2;
2239 dst->exit_int_info = from->exit_int_info;
2240 dst->exit_int_info_err = from->exit_int_info_err;
2241 dst->nested_ctl = from->nested_ctl;
2242 dst->event_inj = from->event_inj;
2243 dst->event_inj_err = from->event_inj_err;
2244 dst->nested_cr3 = from->nested_cr3;
2245 dst->lbr_ctl = from->lbr_ctl;
2246}
2247
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002248static int nested_svm_vmexit(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002249{
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002250 struct vmcb *nested_vmcb;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002251 struct vmcb *hsave = svm->nested.hsave;
Joerg Roedel33740e42009-08-07 11:49:29 +02002252 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002253 struct page *page;
Alexander Grafcf74a782008-11-25 20:17:08 +01002254
Joerg Roedel17897f32009-10-09 16:08:29 +02002255 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
2256 vmcb->control.exit_info_1,
2257 vmcb->control.exit_info_2,
2258 vmcb->control.exit_int_info,
Stefan Hajnoczie097e5f2011-07-22 12:46:52 +01002259 vmcb->control.exit_int_info_err,
2260 KVM_ISA_SVM);
Joerg Roedel17897f32009-10-09 16:08:29 +02002261
Joerg Roedel7597f122010-02-19 16:23:00 +01002262 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002263 if (!nested_vmcb)
2264 return 1;
2265
Joerg Roedel20307532010-11-29 17:51:48 +01002266 /* Exit Guest-Mode */
2267 leave_guest_mode(&svm->vcpu);
Joerg Roedel06fc77722010-02-19 16:23:07 +01002268 svm->nested.vmcb = 0;
2269
Alexander Grafcf74a782008-11-25 20:17:08 +01002270 /* Give the current vmcb to the guest */
Joerg Roedel33740e42009-08-07 11:49:29 +02002271 disable_gif(svm);
2272
2273 nested_vmcb->save.es = vmcb->save.es;
2274 nested_vmcb->save.cs = vmcb->save.cs;
2275 nested_vmcb->save.ss = vmcb->save.ss;
2276 nested_vmcb->save.ds = vmcb->save.ds;
2277 nested_vmcb->save.gdtr = vmcb->save.gdtr;
2278 nested_vmcb->save.idtr = vmcb->save.idtr;
Joerg Roedel3f6a9d12010-07-27 18:14:20 +02002279 nested_vmcb->save.efer = svm->vcpu.arch.efer;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01002280 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
Avi Kivity9f8fe502010-12-05 17:30:00 +02002281 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
Joerg Roedel33740e42009-08-07 11:49:29 +02002282 nested_vmcb->save.cr2 = vmcb->save.cr2;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01002283 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
Avi Kivityf6e78472010-08-02 15:30:20 +03002284 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
Joerg Roedel33740e42009-08-07 11:49:29 +02002285 nested_vmcb->save.rip = vmcb->save.rip;
2286 nested_vmcb->save.rsp = vmcb->save.rsp;
2287 nested_vmcb->save.rax = vmcb->save.rax;
2288 nested_vmcb->save.dr7 = vmcb->save.dr7;
2289 nested_vmcb->save.dr6 = vmcb->save.dr6;
2290 nested_vmcb->save.cpl = vmcb->save.cpl;
2291
2292 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
2293 nested_vmcb->control.int_vector = vmcb->control.int_vector;
2294 nested_vmcb->control.int_state = vmcb->control.int_state;
2295 nested_vmcb->control.exit_code = vmcb->control.exit_code;
2296 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
2297 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
2298 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
2299 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
2300 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
Joerg Roedel7a190662010-07-27 18:14:21 +02002301 nested_vmcb->control.next_rip = vmcb->control.next_rip;
Alexander Graf8d23c462009-10-09 16:08:25 +02002302
2303 /*
2304 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
2305 * to make sure that we do not lose injected events. So check event_inj
2306 * here and copy it to exit_int_info if it is valid.
2307 * Exit_int_info and event_inj can't be both valid because the case
2308 * below only happens on a VMRUN instruction intercept which has
2309 * no valid exit_int_info set.
2310 */
2311 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
2312 struct vmcb_control_area *nc = &nested_vmcb->control;
2313
2314 nc->exit_int_info = vmcb->control.event_inj;
2315 nc->exit_int_info_err = vmcb->control.event_inj_err;
2316 }
2317
Joerg Roedel33740e42009-08-07 11:49:29 +02002318 nested_vmcb->control.tlb_ctl = 0;
2319 nested_vmcb->control.event_inj = 0;
2320 nested_vmcb->control.event_inj_err = 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01002321
2322 /* We always set V_INTR_MASKING and remember the old value in hflags */
2323 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
2324 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
2325
Alexander Grafcf74a782008-11-25 20:17:08 +01002326 /* Restore the original control entries */
Joerg Roedel0460a972009-08-07 11:49:31 +02002327 copy_vmcb_control_area(vmcb, hsave);
Alexander Grafcf74a782008-11-25 20:17:08 +01002328
Alexander Graf219b65d2009-06-15 15:21:25 +02002329 kvm_clear_exception_queue(&svm->vcpu);
2330 kvm_clear_interrupt_queue(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01002331
Joerg Roedel4b161842010-09-10 17:31:03 +02002332 svm->nested.nested_cr3 = 0;
2333
Alexander Grafcf74a782008-11-25 20:17:08 +01002334 /* Restore selected save entries */
2335 svm->vmcb->save.es = hsave->save.es;
2336 svm->vmcb->save.cs = hsave->save.cs;
2337 svm->vmcb->save.ss = hsave->save.ss;
2338 svm->vmcb->save.ds = hsave->save.ds;
2339 svm->vmcb->save.gdtr = hsave->save.gdtr;
2340 svm->vmcb->save.idtr = hsave->save.idtr;
Avi Kivityf6e78472010-08-02 15:30:20 +03002341 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
Alexander Grafcf74a782008-11-25 20:17:08 +01002342 svm_set_efer(&svm->vcpu, hsave->save.efer);
2343 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
2344 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
2345 if (npt_enabled) {
2346 svm->vmcb->save.cr3 = hsave->save.cr3;
2347 svm->vcpu.arch.cr3 = hsave->save.cr3;
2348 } else {
Avi Kivity23902182010-06-10 17:02:16 +03002349 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
Alexander Grafcf74a782008-11-25 20:17:08 +01002350 }
2351 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
2352 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
2353 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
2354 svm->vmcb->save.dr7 = 0;
2355 svm->vmcb->save.cpl = 0;
2356 svm->vmcb->control.exit_int_info = 0;
2357
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01002358 mark_all_dirty(svm->vmcb);
2359
Joerg Roedel7597f122010-02-19 16:23:00 +01002360 nested_svm_unmap(page);
Alexander Grafcf74a782008-11-25 20:17:08 +01002361
Joerg Roedel4b161842010-09-10 17:31:03 +02002362 nested_svm_uninit_mmu_context(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01002363 kvm_mmu_reset_context(&svm->vcpu);
2364 kvm_mmu_load(&svm->vcpu);
2365
2366 return 0;
2367}
Alexander Graf3d6368e2008-11-25 20:17:07 +01002368
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002369static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002370{
Joerg Roedel323c3d82010-03-01 15:34:37 +01002371 /*
2372 * This function merges the msr permission bitmaps of kvm and the
2373 * nested vmcb. It is omptimized in that it only merges the parts where
2374 * the kvm msr permission bitmap may contain zero bits
2375 */
Alexander Graf3d6368e2008-11-25 20:17:07 +01002376 int i;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002377
Joerg Roedel323c3d82010-03-01 15:34:37 +01002378 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
2379 return true;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002380
Joerg Roedel323c3d82010-03-01 15:34:37 +01002381 for (i = 0; i < MSRPM_OFFSETS; i++) {
2382 u32 value, p;
2383 u64 offset;
2384
2385 if (msrpm_offsets[i] == 0xffffffff)
2386 break;
2387
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002388 p = msrpm_offsets[i];
2389 offset = svm->nested.vmcb_msrpm + (p * 4);
Joerg Roedel323c3d82010-03-01 15:34:37 +01002390
2391 if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
2392 return false;
2393
2394 svm->nested.msrpm[p] = svm->msrpm[p] | value;
2395 }
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002396
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002397 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002398
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002399 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002400}
2401
Joerg Roedel52c65a302010-08-02 16:46:44 +02002402static bool nested_vmcb_checks(struct vmcb *vmcb)
2403{
2404 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
2405 return false;
2406
Joerg Roedeldbe77582010-08-02 16:46:45 +02002407 if (vmcb->control.asid == 0)
2408 return false;
2409
Joerg Roedel4b161842010-09-10 17:31:03 +02002410 if (vmcb->control.nested_ctl && !npt_enabled)
2411 return false;
2412
Joerg Roedel52c65a302010-08-02 16:46:44 +02002413 return true;
2414}
2415
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002416static bool nested_svm_vmrun(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002417{
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002418 struct vmcb *nested_vmcb;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002419 struct vmcb *hsave = svm->nested.hsave;
Joerg Roedeldefbba52009-08-07 11:49:30 +02002420 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002421 struct page *page;
Joerg Roedel06fc77722010-02-19 16:23:07 +01002422 u64 vmcb_gpa;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002423
Joerg Roedel06fc77722010-02-19 16:23:07 +01002424 vmcb_gpa = svm->vmcb->save.rax;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002425
Joerg Roedel7597f122010-02-19 16:23:00 +01002426 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002427 if (!nested_vmcb)
2428 return false;
2429
Joerg Roedel52c65a302010-08-02 16:46:44 +02002430 if (!nested_vmcb_checks(nested_vmcb)) {
2431 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
2432 nested_vmcb->control.exit_code_hi = 0;
2433 nested_vmcb->control.exit_info_1 = 0;
2434 nested_vmcb->control.exit_info_2 = 0;
2435
2436 nested_svm_unmap(page);
2437
2438 return false;
2439 }
2440
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02002441 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
Joerg Roedel0ac406d2009-10-09 16:08:27 +02002442 nested_vmcb->save.rip,
2443 nested_vmcb->control.int_ctl,
2444 nested_vmcb->control.event_inj,
2445 nested_vmcb->control.nested_ctl);
2446
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002447 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
2448 nested_vmcb->control.intercept_cr >> 16,
Joerg Roedel2e554e82010-02-24 18:59:14 +01002449 nested_vmcb->control.intercept_exceptions,
2450 nested_vmcb->control.intercept);
2451
Alexander Graf3d6368e2008-11-25 20:17:07 +01002452 /* Clear internal status */
Alexander Graf219b65d2009-06-15 15:21:25 +02002453 kvm_clear_exception_queue(&svm->vcpu);
2454 kvm_clear_interrupt_queue(&svm->vcpu);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002455
Joerg Roedele0231712010-02-24 18:59:10 +01002456 /*
2457 * Save the old vmcb, so we don't need to pick what we save, but can
2458 * restore everything when a VMEXIT occurs
2459 */
Joerg Roedeldefbba52009-08-07 11:49:30 +02002460 hsave->save.es = vmcb->save.es;
2461 hsave->save.cs = vmcb->save.cs;
2462 hsave->save.ss = vmcb->save.ss;
2463 hsave->save.ds = vmcb->save.ds;
2464 hsave->save.gdtr = vmcb->save.gdtr;
2465 hsave->save.idtr = vmcb->save.idtr;
Avi Kivityf6801df2010-01-21 15:31:50 +02002466 hsave->save.efer = svm->vcpu.arch.efer;
Avi Kivity4d4ec082009-12-29 18:07:30 +02002467 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
Joerg Roedeldefbba52009-08-07 11:49:30 +02002468 hsave->save.cr4 = svm->vcpu.arch.cr4;
Avi Kivityf6e78472010-08-02 15:30:20 +03002469 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02002470 hsave->save.rip = kvm_rip_read(&svm->vcpu);
Joerg Roedeldefbba52009-08-07 11:49:30 +02002471 hsave->save.rsp = vmcb->save.rsp;
2472 hsave->save.rax = vmcb->save.rax;
2473 if (npt_enabled)
2474 hsave->save.cr3 = vmcb->save.cr3;
2475 else
Avi Kivity9f8fe502010-12-05 17:30:00 +02002476 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
Joerg Roedeldefbba52009-08-07 11:49:30 +02002477
Joerg Roedel0460a972009-08-07 11:49:31 +02002478 copy_vmcb_control_area(hsave, vmcb);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002479
Avi Kivityf6e78472010-08-02 15:30:20 +03002480 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002481 svm->vcpu.arch.hflags |= HF_HIF_MASK;
2482 else
2483 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
2484
Joerg Roedel4b161842010-09-10 17:31:03 +02002485 if (nested_vmcb->control.nested_ctl) {
2486 kvm_mmu_unload(&svm->vcpu);
2487 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
2488 nested_svm_init_mmu_context(&svm->vcpu);
2489 }
2490
Alexander Graf3d6368e2008-11-25 20:17:07 +01002491 /* Load the nested guest state */
2492 svm->vmcb->save.es = nested_vmcb->save.es;
2493 svm->vmcb->save.cs = nested_vmcb->save.cs;
2494 svm->vmcb->save.ss = nested_vmcb->save.ss;
2495 svm->vmcb->save.ds = nested_vmcb->save.ds;
2496 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
2497 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
Avi Kivityf6e78472010-08-02 15:30:20 +03002498 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002499 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
2500 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
2501 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
2502 if (npt_enabled) {
2503 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
2504 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01002505 } else
Avi Kivity23902182010-06-10 17:02:16 +03002506 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01002507
2508 /* Guest paging mode is active - reset mmu */
2509 kvm_mmu_reset_context(&svm->vcpu);
2510
Joerg Roedeldefbba52009-08-07 11:49:30 +02002511 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002512 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
2513 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
2514 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
Joerg Roedele0231712010-02-24 18:59:10 +01002515
Alexander Graf3d6368e2008-11-25 20:17:07 +01002516 /* In case we don't even reach vcpu_run, the fields are not updated */
2517 svm->vmcb->save.rax = nested_vmcb->save.rax;
2518 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
2519 svm->vmcb->save.rip = nested_vmcb->save.rip;
2520 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
2521 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
2522 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
2523
Joerg Roedelf7138532010-03-01 15:34:40 +01002524 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002525 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002526
Joerg Roedelaad42c62009-08-07 11:49:34 +02002527 /* cache intercepts */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002528 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01002529 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +02002530 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
2531 svm->nested.intercept = nested_vmcb->control.intercept;
2532
Joerg Roedelf40f6a42010-12-03 15:25:15 +01002533 svm_flush_tlb(&svm->vcpu);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002534 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002535 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
2536 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
2537 else
2538 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
2539
Joerg Roedel88ab24a2010-02-19 16:23:06 +01002540 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
2541 /* We only want the cr8 intercept bits of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002542 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
2543 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Joerg Roedel88ab24a2010-02-19 16:23:06 +01002544 }
2545
Joerg Roedel0d945bd2010-05-05 16:04:45 +02002546 /* We don't want to see VMMCALLs from a nested guest */
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01002547 clr_intercept(svm, INTERCEPT_VMMCALL);
Joerg Roedel0d945bd2010-05-05 16:04:45 +02002548
Joerg Roedel88ab24a2010-02-19 16:23:06 +01002549 svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002550 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
2551 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
2552 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002553 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
2554 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
2555
Joerg Roedel7597f122010-02-19 16:23:00 +01002556 nested_svm_unmap(page);
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002557
Joerg Roedel20307532010-11-29 17:51:48 +01002558 /* Enter Guest-Mode */
2559 enter_guest_mode(&svm->vcpu);
2560
Joerg Roedel384c6362010-11-30 18:03:56 +01002561 /*
2562 * Merge guest and host intercepts - must be called with vcpu in
2563 * guest-mode to take affect here
2564 */
2565 recalc_intercepts(svm);
2566
Joerg Roedel06fc77722010-02-19 16:23:07 +01002567 svm->nested.vmcb = vmcb_gpa;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002568
Joerg Roedel2af91942009-08-07 11:49:28 +02002569 enable_gif(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002570
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01002571 mark_all_dirty(svm->vmcb);
2572
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002573 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002574}
2575
Joerg Roedel9966bf62009-08-07 11:49:40 +02002576static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
Alexander Graf55426752008-11-25 20:17:06 +01002577{
2578 to_vmcb->save.fs = from_vmcb->save.fs;
2579 to_vmcb->save.gs = from_vmcb->save.gs;
2580 to_vmcb->save.tr = from_vmcb->save.tr;
2581 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
2582 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
2583 to_vmcb->save.star = from_vmcb->save.star;
2584 to_vmcb->save.lstar = from_vmcb->save.lstar;
2585 to_vmcb->save.cstar = from_vmcb->save.cstar;
2586 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
2587 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
2588 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
2589 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
Alexander Graf55426752008-11-25 20:17:06 +01002590}
2591
Avi Kivity851ba692009-08-24 11:10:17 +03002592static int vmload_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01002593{
Joerg Roedel9966bf62009-08-07 11:49:40 +02002594 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002595 struct page *page;
Joerg Roedel9966bf62009-08-07 11:49:40 +02002596
Alexander Graf55426752008-11-25 20:17:06 +01002597 if (nested_svm_check_permissions(svm))
2598 return 1;
2599
Joerg Roedel7597f122010-02-19 16:23:00 +01002600 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02002601 if (!nested_vmcb)
2602 return 1;
2603
Joerg Roedele3e9ed32011-04-06 12:30:03 +02002604 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2605 skip_emulated_instruction(&svm->vcpu);
2606
Joerg Roedel9966bf62009-08-07 11:49:40 +02002607 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01002608 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01002609
2610 return 1;
2611}
2612
Avi Kivity851ba692009-08-24 11:10:17 +03002613static int vmsave_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01002614{
Joerg Roedel9966bf62009-08-07 11:49:40 +02002615 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002616 struct page *page;
Joerg Roedel9966bf62009-08-07 11:49:40 +02002617
Alexander Graf55426752008-11-25 20:17:06 +01002618 if (nested_svm_check_permissions(svm))
2619 return 1;
2620
Joerg Roedel7597f122010-02-19 16:23:00 +01002621 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02002622 if (!nested_vmcb)
2623 return 1;
2624
Joerg Roedele3e9ed32011-04-06 12:30:03 +02002625 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2626 skip_emulated_instruction(&svm->vcpu);
2627
Joerg Roedel9966bf62009-08-07 11:49:40 +02002628 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01002629 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01002630
2631 return 1;
2632}
2633
Avi Kivity851ba692009-08-24 11:10:17 +03002634static int vmrun_interception(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002635{
Alexander Graf3d6368e2008-11-25 20:17:07 +01002636 if (nested_svm_check_permissions(svm))
2637 return 1;
2638
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02002639 /* Save rip after vmrun instruction */
2640 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002641
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002642 if (!nested_svm_vmrun(svm))
Alexander Graf3d6368e2008-11-25 20:17:07 +01002643 return 1;
2644
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002645 if (!nested_svm_vmrun_msrpm(svm))
Joerg Roedel1f8da472009-08-07 11:49:43 +02002646 goto failed;
2647
2648 return 1;
2649
2650failed:
2651
2652 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
2653 svm->vmcb->control.exit_code_hi = 0;
2654 svm->vmcb->control.exit_info_1 = 0;
2655 svm->vmcb->control.exit_info_2 = 0;
2656
2657 nested_svm_vmexit(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002658
2659 return 1;
2660}
2661
Avi Kivity851ba692009-08-24 11:10:17 +03002662static int stgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01002663{
2664 if (nested_svm_check_permissions(svm))
2665 return 1;
2666
2667 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2668 skip_emulated_instruction(&svm->vcpu);
Avi Kivity3842d132010-07-27 12:30:24 +03002669 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +01002670
Joerg Roedel2af91942009-08-07 11:49:28 +02002671 enable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01002672
2673 return 1;
2674}
2675
Avi Kivity851ba692009-08-24 11:10:17 +03002676static int clgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01002677{
2678 if (nested_svm_check_permissions(svm))
2679 return 1;
2680
2681 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2682 skip_emulated_instruction(&svm->vcpu);
2683
Joerg Roedel2af91942009-08-07 11:49:28 +02002684 disable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01002685
2686 /* After a CLGI no interrupts should come */
2687 svm_clear_vintr(svm);
2688 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2689
Joerg Roedeldecdbf62010-12-03 11:45:52 +01002690 mark_dirty(svm->vmcb, VMCB_INTR);
2691
Alexander Graf1371d902008-11-25 20:17:04 +01002692 return 1;
2693}
2694
Avi Kivity851ba692009-08-24 11:10:17 +03002695static int invlpga_interception(struct vcpu_svm *svm)
Alexander Grafff092382009-06-15 15:21:24 +02002696{
2697 struct kvm_vcpu *vcpu = &svm->vcpu;
Alexander Grafff092382009-06-15 15:21:24 +02002698
Joerg Roedelec1ff792009-10-09 16:08:31 +02002699 trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
2700 vcpu->arch.regs[VCPU_REGS_RAX]);
2701
Alexander Grafff092382009-06-15 15:21:24 +02002702 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2703 kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
2704
2705 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2706 skip_emulated_instruction(&svm->vcpu);
2707 return 1;
2708}
2709
Joerg Roedel532a46b2009-10-09 16:08:32 +02002710static int skinit_interception(struct vcpu_svm *svm)
2711{
2712 trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
2713
2714 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2715 return 1;
2716}
2717
Joerg Roedel81dd35d2010-12-07 17:15:06 +01002718static int xsetbv_interception(struct vcpu_svm *svm)
2719{
2720 u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
2721 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
2722
2723 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
2724 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2725 skip_emulated_instruction(&svm->vcpu);
2726 }
2727
2728 return 1;
2729}
2730
Avi Kivity851ba692009-08-24 11:10:17 +03002731static int invalid_op_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002732{
Avi Kivity7ee5d9402007-11-25 15:22:50 +02002733 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002734 return 1;
2735}
2736
Avi Kivity851ba692009-08-24 11:10:17 +03002737static int task_switch_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002738{
Izik Eidus37817f22008-03-24 23:14:53 +02002739 u16 tss_selector;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002740 int reason;
2741 int int_type = svm->vmcb->control.exit_int_info &
2742 SVM_EXITINTINFO_TYPE_MASK;
Gleb Natapov8317c292009-04-12 13:37:02 +03002743 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002744 uint32_t type =
2745 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2746 uint32_t idt_v =
2747 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
Jan Kiszkae269fb22010-04-14 15:51:09 +02002748 bool has_error_code = false;
2749 u32 error_code = 0;
Izik Eidus37817f22008-03-24 23:14:53 +02002750
2751 tss_selector = (u16)svm->vmcb->control.exit_info_1;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002752
Izik Eidus37817f22008-03-24 23:14:53 +02002753 if (svm->vmcb->control.exit_info_2 &
2754 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002755 reason = TASK_SWITCH_IRET;
2756 else if (svm->vmcb->control.exit_info_2 &
2757 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2758 reason = TASK_SWITCH_JMP;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002759 else if (idt_v)
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002760 reason = TASK_SWITCH_GATE;
2761 else
2762 reason = TASK_SWITCH_CALL;
2763
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002764 if (reason == TASK_SWITCH_GATE) {
2765 switch (type) {
2766 case SVM_EXITINTINFO_TYPE_NMI:
2767 svm->vcpu.arch.nmi_injected = false;
2768 break;
2769 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszkae269fb22010-04-14 15:51:09 +02002770 if (svm->vmcb->control.exit_info_2 &
2771 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2772 has_error_code = true;
2773 error_code =
2774 (u32)svm->vmcb->control.exit_info_2;
2775 }
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002776 kvm_clear_exception_queue(&svm->vcpu);
2777 break;
2778 case SVM_EXITINTINFO_TYPE_INTR:
2779 kvm_clear_interrupt_queue(&svm->vcpu);
2780 break;
2781 default:
2782 break;
2783 }
2784 }
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002785
Gleb Natapov8317c292009-04-12 13:37:02 +03002786 if (reason != TASK_SWITCH_GATE ||
2787 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2788 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
Gleb Natapovf629cf82009-05-11 13:35:49 +03002789 (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
2790 skip_emulated_instruction(&svm->vcpu);
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002791
Gleb Natapovacb54512010-04-15 21:03:50 +03002792 if (kvm_task_switch(&svm->vcpu, tss_selector, reason,
2793 has_error_code, error_code) == EMULATE_FAIL) {
2794 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2795 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
2796 svm->vcpu.run->internal.ndata = 0;
2797 return 0;
2798 }
2799 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002800}
2801
Avi Kivity851ba692009-08-24 11:10:17 +03002802static int cpuid_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002803{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002804 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10002805 kvm_emulate_cpuid(&svm->vcpu);
Avi Kivity06465c52007-02-28 20:46:53 +02002806 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002807}
2808
Avi Kivity851ba692009-08-24 11:10:17 +03002809static int iret_interception(struct vcpu_svm *svm)
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002810{
2811 ++svm->vcpu.stat.nmi_window_exits;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01002812 clr_intercept(svm, INTERCEPT_IRET);
Gleb Natapov44c11432009-05-11 13:35:52 +03002813 svm->vcpu.arch.hflags |= HF_IRET_MASK;
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02002814 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002815 return 1;
2816}
2817
Avi Kivity851ba692009-08-24 11:10:17 +03002818static int invlpg_interception(struct vcpu_svm *svm)
Marcelo Tosattia7052892008-09-23 13:18:35 -03002819{
Andre Przywaradf4f31082010-12-21 11:12:06 +01002820 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2821 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
2822
2823 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
2824 skip_emulated_instruction(&svm->vcpu);
2825 return 1;
Marcelo Tosattia7052892008-09-23 13:18:35 -03002826}
2827
Avi Kivity851ba692009-08-24 11:10:17 +03002828static int emulate_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002829{
Andre Przywara51d8b662010-12-21 11:12:02 +01002830 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002831}
2832
Avi Kivity332b56e2011-11-10 14:57:24 +02002833static int rdpmc_interception(struct vcpu_svm *svm)
2834{
2835 int err;
2836
2837 if (!static_cpu_has(X86_FEATURE_NRIPS))
2838 return emulate_on_interception(svm);
2839
2840 err = kvm_rdpmc(&svm->vcpu);
2841 kvm_complete_insn_gp(&svm->vcpu, err);
2842
2843 return 1;
2844}
2845
Joerg Roedel628afd22011-04-04 12:39:36 +02002846bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
2847{
2848 unsigned long cr0 = svm->vcpu.arch.cr0;
2849 bool ret = false;
2850 u64 intercept;
2851
2852 intercept = svm->nested.intercept;
2853
2854 if (!is_guest_mode(&svm->vcpu) ||
2855 (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
2856 return false;
2857
2858 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2859 val &= ~SVM_CR0_SELECTIVE_MASK;
2860
2861 if (cr0 ^ val) {
2862 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2863 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2864 }
2865
2866 return ret;
2867}
2868
Andre Przywara7ff76d52010-12-21 11:12:04 +01002869#define CR_VALID (1ULL << 63)
2870
2871static int cr_interception(struct vcpu_svm *svm)
2872{
2873 int reg, cr;
2874 unsigned long val;
2875 int err;
2876
2877 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2878 return emulate_on_interception(svm);
2879
2880 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2881 return emulate_on_interception(svm);
2882
2883 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2884 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2885
2886 err = 0;
2887 if (cr >= 16) { /* mov to cr */
2888 cr -= 16;
2889 val = kvm_register_read(&svm->vcpu, reg);
2890 switch (cr) {
2891 case 0:
Joerg Roedel628afd22011-04-04 12:39:36 +02002892 if (!check_selective_cr0_intercepted(svm, val))
2893 err = kvm_set_cr0(&svm->vcpu, val);
Joerg Roedel977b2d02011-04-18 11:42:52 +02002894 else
2895 return 1;
2896
Andre Przywara7ff76d52010-12-21 11:12:04 +01002897 break;
2898 case 3:
2899 err = kvm_set_cr3(&svm->vcpu, val);
2900 break;
2901 case 4:
2902 err = kvm_set_cr4(&svm->vcpu, val);
2903 break;
2904 case 8:
2905 err = kvm_set_cr8(&svm->vcpu, val);
2906 break;
2907 default:
2908 WARN(1, "unhandled write to CR%d", cr);
2909 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2910 return 1;
2911 }
2912 } else { /* mov from cr */
2913 switch (cr) {
2914 case 0:
2915 val = kvm_read_cr0(&svm->vcpu);
2916 break;
2917 case 2:
2918 val = svm->vcpu.arch.cr2;
2919 break;
2920 case 3:
Avi Kivity9f8fe502010-12-05 17:30:00 +02002921 val = kvm_read_cr3(&svm->vcpu);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002922 break;
2923 case 4:
2924 val = kvm_read_cr4(&svm->vcpu);
2925 break;
2926 case 8:
2927 val = kvm_get_cr8(&svm->vcpu);
2928 break;
2929 default:
2930 WARN(1, "unhandled read from CR%d", cr);
2931 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2932 return 1;
2933 }
2934 kvm_register_write(&svm->vcpu, reg, val);
2935 }
2936 kvm_complete_insn_gp(&svm->vcpu, err);
2937
2938 return 1;
2939}
2940
Andre Przywaracae37972010-12-21 11:12:05 +01002941static int dr_interception(struct vcpu_svm *svm)
2942{
2943 int reg, dr;
2944 unsigned long val;
2945 int err;
2946
2947 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2948 return emulate_on_interception(svm);
2949
2950 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2951 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2952
2953 if (dr >= 16) { /* mov to DRn */
2954 val = kvm_register_read(&svm->vcpu, reg);
2955 kvm_set_dr(&svm->vcpu, dr - 16, val);
2956 } else {
2957 err = kvm_get_dr(&svm->vcpu, dr, &val);
2958 if (!err)
2959 kvm_register_write(&svm->vcpu, reg, val);
2960 }
2961
Joerg Roedel2c46d2a2011-02-09 18:29:39 +01002962 skip_emulated_instruction(&svm->vcpu);
2963
Andre Przywaracae37972010-12-21 11:12:05 +01002964 return 1;
2965}
2966
Avi Kivity851ba692009-08-24 11:10:17 +03002967static int cr8_write_interception(struct vcpu_svm *svm)
Joerg Roedel1d075432007-12-06 21:02:25 +01002968{
Avi Kivity851ba692009-08-24 11:10:17 +03002969 struct kvm_run *kvm_run = svm->vcpu.run;
Andre Przywaraeea1cff2010-12-21 11:12:00 +01002970 int r;
Avi Kivity851ba692009-08-24 11:10:17 +03002971
Gleb Natapov0a5fff192009-04-21 17:45:06 +03002972 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
2973 /* instruction emulation calls kvm_set_cr8() */
Andre Przywara7ff76d52010-12-21 11:12:04 +01002974 r = cr_interception(svm);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002975 if (irqchip_in_kernel(svm->vcpu.kvm)) {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002976 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002977 return r;
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002978 }
Gleb Natapov0a5fff192009-04-21 17:45:06 +03002979 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
Andre Przywara7ff76d52010-12-21 11:12:04 +01002980 return r;
Joerg Roedel1d075432007-12-06 21:02:25 +01002981 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
2982 return 0;
2983}
2984
Nadav Har'Eld5c17852011-08-02 15:54:20 +03002985u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu)
2986{
2987 struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
2988 return vmcb->control.tsc_offset +
2989 svm_scale_tsc(vcpu, native_read_tsc());
2990}
2991
Avi Kivity6aa8b732006-12-10 02:21:36 -08002992static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
2993{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002994 struct vcpu_svm *svm = to_svm(vcpu);
2995
Avi Kivity6aa8b732006-12-10 02:21:36 -08002996 switch (ecx) {
Jaswinder Singh Rajputaf24a4e2009-05-15 18:42:05 +05302997 case MSR_IA32_TSC: {
Nadav Har'El45133ec2011-08-02 15:55:23 +03002998 *data = svm->vmcb->control.tsc_offset +
Joerg Roedelfbc0db72011-03-25 09:44:46 +01002999 svm_scale_tsc(vcpu, native_read_tsc());
3000
Avi Kivity6aa8b732006-12-10 02:21:36 -08003001 break;
3002 }
Brian Gerst8c065852010-07-17 09:03:26 -04003003 case MSR_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003004 *data = svm->vmcb->save.star;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003005 break;
Avi Kivity0e859ca2006-12-22 01:05:08 -08003006#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08003007 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003008 *data = svm->vmcb->save.lstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003009 break;
3010 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003011 *data = svm->vmcb->save.cstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003012 break;
3013 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003014 *data = svm->vmcb->save.kernel_gs_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003015 break;
3016 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003017 *data = svm->vmcb->save.sfmask;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003018 break;
3019#endif
3020 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003021 *data = svm->vmcb->save.sysenter_cs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003022 break;
3023 case MSR_IA32_SYSENTER_EIP:
Andre Przywara017cb992009-05-28 11:56:31 +02003024 *data = svm->sysenter_eip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003025 break;
3026 case MSR_IA32_SYSENTER_ESP:
Andre Przywara017cb992009-05-28 11:56:31 +02003027 *data = svm->sysenter_esp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003028 break;
Joerg Roedele0231712010-02-24 18:59:10 +01003029 /*
3030 * Nobody will change the following 5 values in the VMCB so we can
3031 * safely return them on rdmsr. They will always be 0 until LBRV is
3032 * implemented.
3033 */
Joerg Roedela2938c82008-02-13 16:30:28 +01003034 case MSR_IA32_DEBUGCTLMSR:
3035 *data = svm->vmcb->save.dbgctl;
3036 break;
3037 case MSR_IA32_LASTBRANCHFROMIP:
3038 *data = svm->vmcb->save.br_from;
3039 break;
3040 case MSR_IA32_LASTBRANCHTOIP:
3041 *data = svm->vmcb->save.br_to;
3042 break;
3043 case MSR_IA32_LASTINTFROMIP:
3044 *data = svm->vmcb->save.last_excp_from;
3045 break;
3046 case MSR_IA32_LASTINTTOIP:
3047 *data = svm->vmcb->save.last_excp_to;
3048 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003049 case MSR_VM_HSAVE_PA:
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02003050 *data = svm->nested.hsave_msr;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003051 break;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01003052 case MSR_VM_CR:
Joerg Roedel4a810182010-02-24 18:59:15 +01003053 *data = svm->nested.vm_cr_msr;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01003054 break;
Alexander Grafc8a73f12009-01-05 16:02:47 +01003055 case MSR_IA32_UCODE_REV:
3056 *data = 0x01000065;
3057 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003058 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08003059 return kvm_get_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003060 }
3061 return 0;
3062}
3063
Avi Kivity851ba692009-08-24 11:10:17 +03003064static int rdmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003065{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003066 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Avi Kivity6aa8b732006-12-10 02:21:36 -08003067 u64 data;
3068
Avi Kivity59200272010-01-25 19:47:02 +02003069 if (svm_get_msr(&svm->vcpu, ecx, &data)) {
3070 trace_kvm_msr_read_ex(ecx);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02003071 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity59200272010-01-25 19:47:02 +02003072 } else {
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003073 trace_kvm_msr_read(ecx, data);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003074
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003075 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003076 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003077 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10003078 skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003079 }
3080 return 1;
3081}
3082
Joerg Roedel4a810182010-02-24 18:59:15 +01003083static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
3084{
3085 struct vcpu_svm *svm = to_svm(vcpu);
3086 int svm_dis, chg_mask;
3087
3088 if (data & ~SVM_VM_CR_VALID_MASK)
3089 return 1;
3090
3091 chg_mask = SVM_VM_CR_VALID_MASK;
3092
3093 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
3094 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
3095
3096 svm->nested.vm_cr_msr &= ~chg_mask;
3097 svm->nested.vm_cr_msr |= (data & chg_mask);
3098
3099 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
3100
3101 /* check for svm_disable while efer.svme is set */
3102 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
3103 return 1;
3104
3105 return 0;
3106}
3107
Avi Kivity6aa8b732006-12-10 02:21:36 -08003108static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
3109{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003110 struct vcpu_svm *svm = to_svm(vcpu);
3111
Avi Kivity6aa8b732006-12-10 02:21:36 -08003112 switch (ecx) {
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10003113 case MSR_IA32_TSC:
Zachary Amsden99e3e302010-08-19 22:07:17 -10003114 kvm_write_tsc(vcpu, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003115 break;
Brian Gerst8c065852010-07-17 09:03:26 -04003116 case MSR_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003117 svm->vmcb->save.star = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003118 break;
Robert P. J. Day49b14f22007-01-29 13:19:50 -08003119#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08003120 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003121 svm->vmcb->save.lstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003122 break;
3123 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003124 svm->vmcb->save.cstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003125 break;
3126 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003127 svm->vmcb->save.kernel_gs_base = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003128 break;
3129 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003130 svm->vmcb->save.sfmask = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003131 break;
3132#endif
3133 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003134 svm->vmcb->save.sysenter_cs = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003135 break;
3136 case MSR_IA32_SYSENTER_EIP:
Andre Przywara017cb992009-05-28 11:56:31 +02003137 svm->sysenter_eip = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003138 svm->vmcb->save.sysenter_eip = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003139 break;
3140 case MSR_IA32_SYSENTER_ESP:
Andre Przywara017cb992009-05-28 11:56:31 +02003141 svm->sysenter_esp = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003142 svm->vmcb->save.sysenter_esp = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003143 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01003144 case MSR_IA32_DEBUGCTLMSR:
Avi Kivity2a6b20b2010-11-09 16:15:42 +02003145 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
Joerg Roedel24e09cb2008-02-13 18:58:47 +01003146 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08003147 __func__, data);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01003148 break;
3149 }
3150 if (data & DEBUGCTL_RESERVED_BITS)
3151 return 1;
3152
3153 svm->vmcb->save.dbgctl = data;
Joerg Roedelb53ba3f2010-12-03 11:45:59 +01003154 mark_dirty(svm->vmcb, VMCB_LBR);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01003155 if (data & (1ULL<<0))
3156 svm_enable_lbrv(svm);
3157 else
3158 svm_disable_lbrv(svm);
Joerg Roedela2938c82008-02-13 16:30:28 +01003159 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003160 case MSR_VM_HSAVE_PA:
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02003161 svm->nested.hsave_msr = data;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003162 break;
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003163 case MSR_VM_CR:
Joerg Roedel4a810182010-02-24 18:59:15 +01003164 return svm_set_vm_cr(vcpu, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003165 case MSR_VM_IGNNE:
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003166 pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
3167 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003168 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08003169 return kvm_set_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003170 }
3171 return 0;
3172}
3173
Avi Kivity851ba692009-08-24 11:10:17 +03003174static int wrmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003175{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003176 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003177 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003178 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003179
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003180
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003181 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Avi Kivity59200272010-01-25 19:47:02 +02003182 if (svm_set_msr(&svm->vcpu, ecx, data)) {
3183 trace_kvm_msr_write_ex(ecx, data);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02003184 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity59200272010-01-25 19:47:02 +02003185 } else {
3186 trace_kvm_msr_write(ecx, data);
Rusty Russelle756fc62007-07-30 20:07:08 +10003187 skip_emulated_instruction(&svm->vcpu);
Avi Kivity59200272010-01-25 19:47:02 +02003188 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08003189 return 1;
3190}
3191
Avi Kivity851ba692009-08-24 11:10:17 +03003192static int msr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003193{
Rusty Russelle756fc62007-07-30 20:07:08 +10003194 if (svm->vmcb->control.exit_info_1)
Avi Kivity851ba692009-08-24 11:10:17 +03003195 return wrmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003196 else
Avi Kivity851ba692009-08-24 11:10:17 +03003197 return rdmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003198}
3199
Avi Kivity851ba692009-08-24 11:10:17 +03003200static int interrupt_window_interception(struct vcpu_svm *svm)
Dor Laorc1150d82007-01-05 16:36:24 -08003201{
Avi Kivity851ba692009-08-24 11:10:17 +03003202 struct kvm_run *kvm_run = svm->vcpu.run;
3203
Avi Kivity3842d132010-07-27 12:30:24 +03003204 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graff0b85052008-11-25 20:17:01 +01003205 svm_clear_vintr(svm);
Eddie Dong85f455f2007-07-06 12:20:49 +03003206 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
Joerg Roedeldecdbf62010-12-03 11:45:52 +01003207 mark_dirty(svm->vmcb, VMCB_INTR);
Dor Laorc1150d82007-01-05 16:36:24 -08003208 /*
3209 * If the user space waits to inject interrupts, exit as soon as
3210 * possible
3211 */
Gleb Natapov80618232009-04-21 17:44:56 +03003212 if (!irqchip_in_kernel(svm->vcpu.kvm) &&
3213 kvm_run->request_interrupt_window &&
3214 !kvm_cpu_has_interrupt(&svm->vcpu)) {
Rusty Russelle756fc62007-07-30 20:07:08 +10003215 ++svm->vcpu.stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08003216 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
3217 return 0;
3218 }
3219
3220 return 1;
3221}
3222
Mark Langsdorf565d0992009-10-06 14:25:02 -05003223static int pause_interception(struct vcpu_svm *svm)
3224{
3225 kvm_vcpu_on_spin(&(svm->vcpu));
3226 return 1;
3227}
3228
Avi Kivity851ba692009-08-24 11:10:17 +03003229static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
Andre Przywara7ff76d52010-12-21 11:12:04 +01003230 [SVM_EXIT_READ_CR0] = cr_interception,
3231 [SVM_EXIT_READ_CR3] = cr_interception,
3232 [SVM_EXIT_READ_CR4] = cr_interception,
3233 [SVM_EXIT_READ_CR8] = cr_interception,
Avi Kivityd2251572010-01-06 10:55:27 +02003234 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
Joerg Roedel628afd22011-04-04 12:39:36 +02003235 [SVM_EXIT_WRITE_CR0] = cr_interception,
Andre Przywara7ff76d52010-12-21 11:12:04 +01003236 [SVM_EXIT_WRITE_CR3] = cr_interception,
3237 [SVM_EXIT_WRITE_CR4] = cr_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003238 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
Andre Przywaracae37972010-12-21 11:12:05 +01003239 [SVM_EXIT_READ_DR0] = dr_interception,
3240 [SVM_EXIT_READ_DR1] = dr_interception,
3241 [SVM_EXIT_READ_DR2] = dr_interception,
3242 [SVM_EXIT_READ_DR3] = dr_interception,
3243 [SVM_EXIT_READ_DR4] = dr_interception,
3244 [SVM_EXIT_READ_DR5] = dr_interception,
3245 [SVM_EXIT_READ_DR6] = dr_interception,
3246 [SVM_EXIT_READ_DR7] = dr_interception,
3247 [SVM_EXIT_WRITE_DR0] = dr_interception,
3248 [SVM_EXIT_WRITE_DR1] = dr_interception,
3249 [SVM_EXIT_WRITE_DR2] = dr_interception,
3250 [SVM_EXIT_WRITE_DR3] = dr_interception,
3251 [SVM_EXIT_WRITE_DR4] = dr_interception,
3252 [SVM_EXIT_WRITE_DR5] = dr_interception,
3253 [SVM_EXIT_WRITE_DR6] = dr_interception,
3254 [SVM_EXIT_WRITE_DR7] = dr_interception,
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003255 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
3256 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05003257 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003258 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
3259 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
3260 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
3261 [SVM_EXIT_INTR] = intr_interception,
Joerg Roedelc47f0982008-04-30 17:56:00 +02003262 [SVM_EXIT_NMI] = nmi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003263 [SVM_EXIT_SMI] = nop_on_interception,
3264 [SVM_EXIT_INIT] = nop_on_interception,
Dor Laorc1150d82007-01-05 16:36:24 -08003265 [SVM_EXIT_VINTR] = interrupt_window_interception,
Avi Kivity332b56e2011-11-10 14:57:24 +02003266 [SVM_EXIT_RDPMC] = rdpmc_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003267 [SVM_EXIT_CPUID] = cpuid_interception,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003268 [SVM_EXIT_IRET] = iret_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02003269 [SVM_EXIT_INVD] = emulate_on_interception,
Mark Langsdorf565d0992009-10-06 14:25:02 -05003270 [SVM_EXIT_PAUSE] = pause_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003271 [SVM_EXIT_HLT] = halt_interception,
Marcelo Tosattia7052892008-09-23 13:18:35 -03003272 [SVM_EXIT_INVLPG] = invlpg_interception,
Alexander Grafff092382009-06-15 15:21:24 +02003273 [SVM_EXIT_INVLPGA] = invlpga_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003274 [SVM_EXIT_IOIO] = io_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003275 [SVM_EXIT_MSR] = msr_interception,
3276 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08003277 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
Alexander Graf3d6368e2008-11-25 20:17:07 +01003278 [SVM_EXIT_VMRUN] = vmrun_interception,
Avi Kivity02e235b2007-02-19 14:37:47 +02003279 [SVM_EXIT_VMMCALL] = vmmcall_interception,
Alexander Graf55426752008-11-25 20:17:06 +01003280 [SVM_EXIT_VMLOAD] = vmload_interception,
3281 [SVM_EXIT_VMSAVE] = vmsave_interception,
Alexander Graf1371d902008-11-25 20:17:04 +01003282 [SVM_EXIT_STGI] = stgi_interception,
3283 [SVM_EXIT_CLGI] = clgi_interception,
Joerg Roedel532a46b2009-10-09 16:08:32 +02003284 [SVM_EXIT_SKINIT] = skinit_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02003285 [SVM_EXIT_WBINVD] = emulate_on_interception,
Joerg Roedel916ce232007-03-21 19:47:00 +01003286 [SVM_EXIT_MONITOR] = invalid_op_interception,
3287 [SVM_EXIT_MWAIT] = invalid_op_interception,
Joerg Roedel81dd35d2010-12-07 17:15:06 +01003288 [SVM_EXIT_XSETBV] = xsetbv_interception,
Joerg Roedel709ddeb2008-02-07 13:47:45 +01003289 [SVM_EXIT_NPF] = pf_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003290};
3291
Joe Perchesae8cc052011-04-24 22:00:50 -07003292static void dump_vmcb(struct kvm_vcpu *vcpu)
Joerg Roedel3f10c842010-05-05 16:04:42 +02003293{
3294 struct vcpu_svm *svm = to_svm(vcpu);
3295 struct vmcb_control_area *control = &svm->vmcb->control;
3296 struct vmcb_save_area *save = &svm->vmcb->save;
3297
3298 pr_err("VMCB Control Area:\n");
Joe Perchesae8cc052011-04-24 22:00:50 -07003299 pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
3300 pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
3301 pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
3302 pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
3303 pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
3304 pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
3305 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
3306 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
3307 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
3308 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
3309 pr_err("%-20s%d\n", "asid:", control->asid);
3310 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
3311 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
3312 pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
3313 pr_err("%-20s%08x\n", "int_state:", control->int_state);
3314 pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
3315 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
3316 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
3317 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
3318 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
3319 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
3320 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
3321 pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
3322 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
3323 pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl);
3324 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
Joerg Roedel3f10c842010-05-05 16:04:42 +02003325 pr_err("VMCB State Save Area:\n");
Joe Perchesae8cc052011-04-24 22:00:50 -07003326 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3327 "es:",
3328 save->es.selector, save->es.attrib,
3329 save->es.limit, save->es.base);
3330 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3331 "cs:",
3332 save->cs.selector, save->cs.attrib,
3333 save->cs.limit, save->cs.base);
3334 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3335 "ss:",
3336 save->ss.selector, save->ss.attrib,
3337 save->ss.limit, save->ss.base);
3338 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3339 "ds:",
3340 save->ds.selector, save->ds.attrib,
3341 save->ds.limit, save->ds.base);
3342 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3343 "fs:",
3344 save->fs.selector, save->fs.attrib,
3345 save->fs.limit, save->fs.base);
3346 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3347 "gs:",
3348 save->gs.selector, save->gs.attrib,
3349 save->gs.limit, save->gs.base);
3350 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3351 "gdtr:",
3352 save->gdtr.selector, save->gdtr.attrib,
3353 save->gdtr.limit, save->gdtr.base);
3354 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3355 "ldtr:",
3356 save->ldtr.selector, save->ldtr.attrib,
3357 save->ldtr.limit, save->ldtr.base);
3358 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3359 "idtr:",
3360 save->idtr.selector, save->idtr.attrib,
3361 save->idtr.limit, save->idtr.base);
3362 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3363 "tr:",
3364 save->tr.selector, save->tr.attrib,
3365 save->tr.limit, save->tr.base);
Joerg Roedel3f10c842010-05-05 16:04:42 +02003366 pr_err("cpl: %d efer: %016llx\n",
3367 save->cpl, save->efer);
Joe Perchesae8cc052011-04-24 22:00:50 -07003368 pr_err("%-15s %016llx %-13s %016llx\n",
3369 "cr0:", save->cr0, "cr2:", save->cr2);
3370 pr_err("%-15s %016llx %-13s %016llx\n",
3371 "cr3:", save->cr3, "cr4:", save->cr4);
3372 pr_err("%-15s %016llx %-13s %016llx\n",
3373 "dr6:", save->dr6, "dr7:", save->dr7);
3374 pr_err("%-15s %016llx %-13s %016llx\n",
3375 "rip:", save->rip, "rflags:", save->rflags);
3376 pr_err("%-15s %016llx %-13s %016llx\n",
3377 "rsp:", save->rsp, "rax:", save->rax);
3378 pr_err("%-15s %016llx %-13s %016llx\n",
3379 "star:", save->star, "lstar:", save->lstar);
3380 pr_err("%-15s %016llx %-13s %016llx\n",
3381 "cstar:", save->cstar, "sfmask:", save->sfmask);
3382 pr_err("%-15s %016llx %-13s %016llx\n",
3383 "kernel_gs_base:", save->kernel_gs_base,
3384 "sysenter_cs:", save->sysenter_cs);
3385 pr_err("%-15s %016llx %-13s %016llx\n",
3386 "sysenter_esp:", save->sysenter_esp,
3387 "sysenter_eip:", save->sysenter_eip);
3388 pr_err("%-15s %016llx %-13s %016llx\n",
3389 "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
3390 pr_err("%-15s %016llx %-13s %016llx\n",
3391 "br_from:", save->br_from, "br_to:", save->br_to);
3392 pr_err("%-15s %016llx %-13s %016llx\n",
3393 "excp_from:", save->last_excp_from,
3394 "excp_to:", save->last_excp_to);
Joerg Roedel3f10c842010-05-05 16:04:42 +02003395}
3396
Avi Kivity586f9602010-11-18 13:09:54 +02003397static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
3398{
3399 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3400
3401 *info1 = control->exit_info_1;
3402 *info2 = control->exit_info_2;
3403}
3404
Avi Kivity851ba692009-08-24 11:10:17 +03003405static int handle_exit(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003406{
Avi Kivity04d2cc72007-09-10 18:10:54 +03003407 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity851ba692009-08-24 11:10:17 +03003408 struct kvm_run *kvm_run = vcpu->run;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003409 u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003410
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003411 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
Joerg Roedel2be4fc72010-04-22 12:33:09 +02003412 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3413 if (npt_enabled)
3414 vcpu->arch.cr3 = svm->vmcb->save.cr3;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003415
Joerg Roedelcd3ff652009-10-09 16:08:26 +02003416 if (unlikely(svm->nested.exit_required)) {
3417 nested_svm_vmexit(svm);
3418 svm->nested.exit_required = false;
3419
3420 return 1;
3421 }
3422
Joerg Roedel20307532010-11-29 17:51:48 +01003423 if (is_guest_mode(vcpu)) {
Joerg Roedel410e4d52009-08-07 11:49:44 +02003424 int vmexit;
3425
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02003426 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
3427 svm->vmcb->control.exit_info_1,
3428 svm->vmcb->control.exit_info_2,
3429 svm->vmcb->control.exit_int_info,
Stefan Hajnoczie097e5f2011-07-22 12:46:52 +01003430 svm->vmcb->control.exit_int_info_err,
3431 KVM_ISA_SVM);
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02003432
Joerg Roedel410e4d52009-08-07 11:49:44 +02003433 vmexit = nested_svm_exit_special(svm);
3434
3435 if (vmexit == NESTED_EXIT_CONTINUE)
3436 vmexit = nested_svm_exit_handled(svm);
3437
3438 if (vmexit == NESTED_EXIT_DONE)
Alexander Grafcf74a782008-11-25 20:17:08 +01003439 return 1;
Alexander Grafcf74a782008-11-25 20:17:08 +01003440 }
3441
Joerg Roedela5c38322009-08-07 11:49:32 +02003442 svm_complete_interrupts(svm);
3443
Avi Kivity04d2cc72007-09-10 18:10:54 +03003444 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
3445 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3446 kvm_run->fail_entry.hardware_entry_failure_reason
3447 = svm->vmcb->control.exit_code;
Joerg Roedel3f10c842010-05-05 16:04:42 +02003448 pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
3449 dump_vmcb(vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +03003450 return 0;
3451 }
3452
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003453 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
Joerg Roedel709ddeb2008-02-07 13:47:45 +01003454 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
Joerg Roedel55c5e462010-09-10 17:31:04 +02003455 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
3456 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003457 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
3458 "exit_code 0x%x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08003459 __func__, svm->vmcb->control.exit_int_info,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003460 exit_code);
3461
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +02003462 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
Joe Perches56919c52007-11-12 20:06:51 -08003463 || !svm_exit_handlers[exit_code]) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08003464 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
Avi Kivity364b6252007-04-16 14:28:40 +03003465 kvm_run->hw.hardware_exit_reason = exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003466 return 0;
3467 }
3468
Avi Kivity851ba692009-08-24 11:10:17 +03003469 return svm_exit_handlers[exit_code](svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003470}
3471
3472static void reload_tss(struct kvm_vcpu *vcpu)
3473{
3474 int cpu = raw_smp_processor_id();
3475
Tejun Heo0fe1e002009-10-29 22:34:14 +09003476 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
3477 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity6aa8b732006-12-10 02:21:36 -08003478 load_TR_desc();
3479}
3480
Rusty Russelle756fc62007-07-30 20:07:08 +10003481static void pre_svm_run(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003482{
3483 int cpu = raw_smp_processor_id();
3484
Tejun Heo0fe1e002009-10-29 22:34:14 +09003485 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003486
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03003487 /* FIXME: handle wraparound of asid_generation */
Tejun Heo0fe1e002009-10-29 22:34:14 +09003488 if (svm->asid_generation != sd->asid_generation)
3489 new_asid(svm, sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003490}
3491
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003492static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3493{
3494 struct vcpu_svm *svm = to_svm(vcpu);
3495
3496 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3497 vcpu->arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01003498 set_intercept(svm, INTERCEPT_IRET);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003499 ++vcpu->stat.nmi_injections;
3500}
Avi Kivity6aa8b732006-12-10 02:21:36 -08003501
Eddie Dong85f455f2007-07-06 12:20:49 +03003502static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003503{
3504 struct vmcb_control_area *control;
3505
Rusty Russelle756fc62007-07-30 20:07:08 +10003506 control = &svm->vmcb->control;
Eddie Dong85f455f2007-07-06 12:20:49 +03003507 control->int_vector = irq;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003508 control->int_ctl &= ~V_INTR_PRIO_MASK;
3509 control->int_ctl |= V_IRQ_MASK |
3510 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
Joerg Roedeldecdbf62010-12-03 11:45:52 +01003511 mark_dirty(svm->vmcb, VMCB_INTR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003512}
3513
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003514static void svm_set_irq(struct kvm_vcpu *vcpu)
Eddie Dong2a8067f2007-08-06 16:29:07 +03003515{
3516 struct vcpu_svm *svm = to_svm(vcpu);
3517
Joerg Roedel2af91942009-08-07 11:49:28 +02003518 BUG_ON(!(gif_set(svm)));
Alexander Grafcf74a782008-11-25 20:17:08 +01003519
Gleb Natapov9fb2d2b2010-05-23 14:28:26 +03003520 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
3521 ++vcpu->stat.irq_injections;
3522
Alexander Graf219b65d2009-06-15 15:21:25 +02003523 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3524 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
Eddie Dong2a8067f2007-08-06 16:29:07 +03003525}
3526
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003527static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3528{
3529 struct vcpu_svm *svm = to_svm(vcpu);
3530
Joerg Roedel20307532010-11-29 17:51:48 +01003531 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003532 return;
3533
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003534 if (irr == -1)
3535 return;
3536
3537 if (tpr >= irr)
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003538 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003539}
3540
3541static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
Joerg Roedelaaacfc92008-04-16 16:51:18 +02003542{
3543 struct vcpu_svm *svm = to_svm(vcpu);
3544 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel924584c2010-04-22 12:33:07 +02003545 int ret;
3546 ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
3547 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
3548 ret = ret && gif_set(svm) && nested_svm_nmi(svm);
3549
3550 return ret;
Joerg Roedelaaacfc92008-04-16 16:51:18 +02003551}
3552
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003553static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3554{
3555 struct vcpu_svm *svm = to_svm(vcpu);
3556
3557 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
3558}
3559
3560static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3561{
3562 struct vcpu_svm *svm = to_svm(vcpu);
3563
3564 if (masked) {
3565 svm->vcpu.arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01003566 set_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003567 } else {
3568 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01003569 clr_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003570 }
3571}
3572
Gleb Natapov78646122009-03-23 12:12:11 +02003573static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
3574{
3575 struct vcpu_svm *svm = to_svm(vcpu);
3576 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7fcdb512009-09-16 15:24:15 +02003577 int ret;
3578
3579 if (!gif_set(svm) ||
3580 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
3581 return 0;
3582
Avi Kivityf6e78472010-08-02 15:30:20 +03003583 ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
Joerg Roedel7fcdb512009-09-16 15:24:15 +02003584
Joerg Roedel20307532010-11-29 17:51:48 +01003585 if (is_guest_mode(vcpu))
Joerg Roedel7fcdb512009-09-16 15:24:15 +02003586 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
3587
3588 return ret;
Gleb Natapov78646122009-03-23 12:12:11 +02003589}
3590
Gleb Natapov9222be12009-04-23 17:14:37 +03003591static void enable_irq_window(struct kvm_vcpu *vcpu)
3592{
Alexander Graf219b65d2009-06-15 15:21:25 +02003593 struct vcpu_svm *svm = to_svm(vcpu);
Alexander Graf219b65d2009-06-15 15:21:25 +02003594
Joerg Roedele0231712010-02-24 18:59:10 +01003595 /*
3596 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3597 * 1, because that's a separate STGI/VMRUN intercept. The next time we
3598 * get that intercept, this function will be called again though and
3599 * we'll get the vintr intercept.
3600 */
Joerg Roedel8fe54652010-02-19 16:23:01 +01003601 if (gif_set(svm) && nested_svm_intr(svm)) {
Alexander Graf219b65d2009-06-15 15:21:25 +02003602 svm_set_vintr(svm);
3603 svm_inject_irq(svm, 0x0);
3604 }
Gleb Natapov9222be12009-04-23 17:14:37 +03003605}
3606
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003607static void enable_nmi_window(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003608{
Avi Kivity04d2cc72007-09-10 18:10:54 +03003609 struct vcpu_svm *svm = to_svm(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03003610
Gleb Natapov44c11432009-05-11 13:35:52 +03003611 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
3612 == HF_NMI_MASK)
3613 return; /* IRET will cause a vm exit */
3614
Joerg Roedele0231712010-02-24 18:59:10 +01003615 /*
3616 * Something prevents NMI from been injected. Single step over possible
3617 * problem (IRET or exception injection or interrupt shadow)
3618 */
Jan Kiszka6be7d302009-10-18 13:24:54 +02003619 svm->nmi_singlestep = true;
Gleb Natapov44c11432009-05-11 13:35:52 +03003620 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3621 update_db_intercept(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03003622}
3623
Izik Eiduscbc94022007-10-25 00:29:55 +02003624static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
3625{
3626 return 0;
3627}
3628
Avi Kivityd9e368d2007-06-07 19:18:30 +03003629static void svm_flush_tlb(struct kvm_vcpu *vcpu)
3630{
Joerg Roedel38e5e922010-12-03 15:25:16 +01003631 struct vcpu_svm *svm = to_svm(vcpu);
3632
3633 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3634 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3635 else
3636 svm->asid_generation--;
Avi Kivityd9e368d2007-06-07 19:18:30 +03003637}
3638
Avi Kivity04d2cc72007-09-10 18:10:54 +03003639static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
3640{
3641}
3642
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003643static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3644{
3645 struct vcpu_svm *svm = to_svm(vcpu);
3646
Joerg Roedel20307532010-11-29 17:51:48 +01003647 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003648 return;
3649
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003650 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003651 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
Gleb Natapov615d5192009-04-21 17:45:05 +03003652 kvm_set_cr8(vcpu, cr8);
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003653 }
3654}
3655
Joerg Roedel649d6862008-04-16 16:51:15 +02003656static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3657{
3658 struct vcpu_svm *svm = to_svm(vcpu);
3659 u64 cr8;
3660
Joerg Roedel20307532010-11-29 17:51:48 +01003661 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003662 return;
3663
Joerg Roedel649d6862008-04-16 16:51:15 +02003664 cr8 = kvm_get_cr8(vcpu);
3665 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3666 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3667}
3668
Gleb Natapov9222be12009-04-23 17:14:37 +03003669static void svm_complete_interrupts(struct vcpu_svm *svm)
3670{
3671 u8 vector;
3672 int type;
3673 u32 exitintinfo = svm->vmcb->control.exit_int_info;
Jan Kiszka66b71382010-02-23 17:47:56 +01003674 unsigned int3_injected = svm->int3_injected;
3675
3676 svm->int3_injected = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03003677
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02003678 /*
3679 * If we've made progress since setting HF_IRET_MASK, we've
3680 * executed an IRET and can allow NMI injection.
3681 */
3682 if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
3683 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
Gleb Natapov44c11432009-05-11 13:35:52 +03003684 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
Avi Kivity3842d132010-07-27 12:30:24 +03003685 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3686 }
Gleb Natapov44c11432009-05-11 13:35:52 +03003687
Gleb Natapov9222be12009-04-23 17:14:37 +03003688 svm->vcpu.arch.nmi_injected = false;
3689 kvm_clear_exception_queue(&svm->vcpu);
3690 kvm_clear_interrupt_queue(&svm->vcpu);
3691
3692 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3693 return;
3694
Avi Kivity3842d132010-07-27 12:30:24 +03003695 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3696
Gleb Natapov9222be12009-04-23 17:14:37 +03003697 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3698 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3699
3700 switch (type) {
3701 case SVM_EXITINTINFO_TYPE_NMI:
3702 svm->vcpu.arch.nmi_injected = true;
3703 break;
3704 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszka66b71382010-02-23 17:47:56 +01003705 /*
3706 * In case of software exceptions, do not reinject the vector,
3707 * but re-execute the instruction instead. Rewind RIP first
3708 * if we emulated INT3 before.
3709 */
3710 if (kvm_exception_is_soft(vector)) {
3711 if (vector == BP_VECTOR && int3_injected &&
3712 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
3713 kvm_rip_write(&svm->vcpu,
3714 kvm_rip_read(&svm->vcpu) -
3715 int3_injected);
Alexander Graf219b65d2009-06-15 15:21:25 +02003716 break;
Jan Kiszka66b71382010-02-23 17:47:56 +01003717 }
Gleb Natapov9222be12009-04-23 17:14:37 +03003718 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
3719 u32 err = svm->vmcb->control.exit_int_info_err;
Joerg Roedelce7ddec2010-04-22 12:33:13 +02003720 kvm_requeue_exception_e(&svm->vcpu, vector, err);
Gleb Natapov9222be12009-04-23 17:14:37 +03003721
3722 } else
Joerg Roedelce7ddec2010-04-22 12:33:13 +02003723 kvm_requeue_exception(&svm->vcpu, vector);
Gleb Natapov9222be12009-04-23 17:14:37 +03003724 break;
3725 case SVM_EXITINTINFO_TYPE_INTR:
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003726 kvm_queue_interrupt(&svm->vcpu, vector, false);
Gleb Natapov9222be12009-04-23 17:14:37 +03003727 break;
3728 default:
3729 break;
3730 }
3731}
3732
Avi Kivityb463a6f2010-07-20 15:06:17 +03003733static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3734{
3735 struct vcpu_svm *svm = to_svm(vcpu);
3736 struct vmcb_control_area *control = &svm->vmcb->control;
3737
3738 control->exit_int_info = control->event_inj;
3739 control->exit_int_info_err = control->event_inj_err;
3740 control->event_inj = 0;
3741 svm_complete_interrupts(svm);
3742}
3743
Avi Kivity80e31d42008-07-14 14:44:59 +03003744#ifdef CONFIG_X86_64
3745#define R "r"
3746#else
3747#define R "e"
3748#endif
3749
Avi Kivity851ba692009-08-24 11:10:17 +03003750static void svm_vcpu_run(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003751{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003752 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivityd9e368d2007-06-07 19:18:30 +03003753
Joerg Roedel2041a062010-04-22 12:33:08 +02003754 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3755 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3756 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3757
Joerg Roedelcd3ff652009-10-09 16:08:26 +02003758 /*
3759 * A vmexit emulation is required before the vcpu can be executed
3760 * again.
3761 */
3762 if (unlikely(svm->nested.exit_required))
3763 return;
3764
Rusty Russelle756fc62007-07-30 20:07:08 +10003765 pre_svm_run(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003766
Joerg Roedel649d6862008-04-16 16:51:15 +02003767 sync_lapic_to_cr8(vcpu);
3768
Joerg Roedelcda0ffd2009-08-07 11:49:45 +02003769 svm->vmcb->save.cr2 = vcpu->arch.cr2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003770
Avi Kivity04d2cc72007-09-10 18:10:54 +03003771 clgi();
3772
3773 local_irq_enable();
Avi Kivity36241b82006-12-22 01:05:20 -08003774
Avi Kivity6aa8b732006-12-10 02:21:36 -08003775 asm volatile (
Avi Kivity80e31d42008-07-14 14:44:59 +03003776 "push %%"R"bp; \n\t"
3777 "mov %c[rbx](%[svm]), %%"R"bx \n\t"
3778 "mov %c[rcx](%[svm]), %%"R"cx \n\t"
3779 "mov %c[rdx](%[svm]), %%"R"dx \n\t"
3780 "mov %c[rsi](%[svm]), %%"R"si \n\t"
3781 "mov %c[rdi](%[svm]), %%"R"di \n\t"
3782 "mov %c[rbp](%[svm]), %%"R"bp \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08003783#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10003784 "mov %c[r8](%[svm]), %%r8 \n\t"
3785 "mov %c[r9](%[svm]), %%r9 \n\t"
3786 "mov %c[r10](%[svm]), %%r10 \n\t"
3787 "mov %c[r11](%[svm]), %%r11 \n\t"
3788 "mov %c[r12](%[svm]), %%r12 \n\t"
3789 "mov %c[r13](%[svm]), %%r13 \n\t"
3790 "mov %c[r14](%[svm]), %%r14 \n\t"
3791 "mov %c[r15](%[svm]), %%r15 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003792#endif
3793
Avi Kivity6aa8b732006-12-10 02:21:36 -08003794 /* Enter guest mode */
Avi Kivity80e31d42008-07-14 14:44:59 +03003795 "push %%"R"ax \n\t"
3796 "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +03003797 __ex(SVM_VMLOAD) "\n\t"
3798 __ex(SVM_VMRUN) "\n\t"
3799 __ex(SVM_VMSAVE) "\n\t"
Avi Kivity80e31d42008-07-14 14:44:59 +03003800 "pop %%"R"ax \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003801
3802 /* Save guest registers, load host registers */
Avi Kivity80e31d42008-07-14 14:44:59 +03003803 "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
3804 "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
3805 "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
3806 "mov %%"R"si, %c[rsi](%[svm]) \n\t"
3807 "mov %%"R"di, %c[rdi](%[svm]) \n\t"
3808 "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08003809#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10003810 "mov %%r8, %c[r8](%[svm]) \n\t"
3811 "mov %%r9, %c[r9](%[svm]) \n\t"
3812 "mov %%r10, %c[r10](%[svm]) \n\t"
3813 "mov %%r11, %c[r11](%[svm]) \n\t"
3814 "mov %%r12, %c[r12](%[svm]) \n\t"
3815 "mov %%r13, %c[r13](%[svm]) \n\t"
3816 "mov %%r14, %c[r14](%[svm]) \n\t"
3817 "mov %%r15, %c[r15](%[svm]) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003818#endif
Avi Kivity80e31d42008-07-14 14:44:59 +03003819 "pop %%"R"bp"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003820 :
Rusty Russellfb3f0f52007-07-27 17:16:56 +10003821 : [svm]"a"(svm),
Avi Kivity6aa8b732006-12-10 02:21:36 -08003822 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003823 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
3824 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
3825 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
3826 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
3827 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
3828 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
Avi Kivity05b3e0c2006-12-13 00:33:45 -08003829#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003830 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
3831 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
3832 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
3833 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
3834 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
3835 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
3836 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
3837 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
Avi Kivity6aa8b732006-12-10 02:21:36 -08003838#endif
Laurent Vivier54a08c02007-10-25 14:18:53 +02003839 : "cc", "memory"
Avi Kivity80e31d42008-07-14 14:44:59 +03003840 , R"bx", R"cx", R"dx", R"si", R"di"
Laurent Vivier54a08c02007-10-25 14:18:53 +02003841#ifdef CONFIG_X86_64
Laurent Vivier54a08c02007-10-25 14:18:53 +02003842 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
3843#endif
3844 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08003845
Avi Kivity82ca2d12010-10-21 12:20:34 +02003846#ifdef CONFIG_X86_64
3847 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
3848#else
Avi Kivitydacccfd2010-10-21 12:20:33 +02003849 loadsegment(fs, svm->host.fs);
Avi Kivity831ca602011-03-08 16:09:51 +02003850#ifndef CONFIG_X86_32_LAZY_GS
3851 loadsegment(gs, svm->host.gs);
3852#endif
Avi Kivity9581d442010-10-19 16:46:55 +02003853#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -08003854
3855 reload_tss(vcpu);
3856
Avi Kivity56ba47d2007-11-07 17:14:18 +02003857 local_irq_disable();
3858
Avi Kivity13c34e02010-10-21 12:20:31 +02003859 vcpu->arch.cr2 = svm->vmcb->save.cr2;
3860 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
3861 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3862 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3863
Jan Kiszka1e2b1dd2011-09-12 10:52:24 +02003864 trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
3865
Joerg Roedel3781c012011-01-14 16:45:02 +01003866 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3867 kvm_before_handle_nmi(&svm->vcpu);
3868
3869 stgi();
3870
3871 /* Any pending NMI will happen here */
3872
3873 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3874 kvm_after_handle_nmi(&svm->vcpu);
3875
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003876 sync_cr8_to_lapic(vcpu);
3877
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003878 svm->next_rip = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03003879
Joerg Roedel38e5e922010-12-03 15:25:16 +01003880 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
3881
Gleb Natapov631bc482010-10-14 11:22:52 +02003882 /* if exit due to PF check for async PF */
3883 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
3884 svm->apf_reason = kvm_read_and_reset_pf_reason();
3885
Avi Kivity6de4f3a2009-05-31 22:58:47 +03003886 if (npt_enabled) {
3887 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
3888 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
3889 }
Joerg Roedelfe5913e2010-05-17 14:43:34 +02003890
3891 /*
3892 * We need to handle MC intercepts here before the vcpu has a chance to
3893 * change the physical cpu
3894 */
3895 if (unlikely(svm->vmcb->control.exit_code ==
3896 SVM_EXIT_EXCP_BASE + MC_VECTOR))
3897 svm_handle_mce(svm);
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003898
3899 mark_all_clean(svm->vmcb);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003900}
3901
Avi Kivity80e31d42008-07-14 14:44:59 +03003902#undef R
3903
Avi Kivity6aa8b732006-12-10 02:21:36 -08003904static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3905{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003906 struct vcpu_svm *svm = to_svm(vcpu);
3907
3908 svm->vmcb->save.cr3 = root;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01003909 mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedelf40f6a42010-12-03 15:25:15 +01003910 svm_flush_tlb(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003911}
3912
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003913static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3914{
3915 struct vcpu_svm *svm = to_svm(vcpu);
3916
3917 svm->vmcb->control.nested_cr3 = root;
Joerg Roedelb2747162010-12-03 11:45:53 +01003918 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003919
3920 /* Also sync guest cr3 here in case we live migrate */
Avi Kivity9f8fe502010-12-05 17:30:00 +02003921 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
Joerg Roedeldcca1a62010-12-03 11:45:54 +01003922 mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003923
Joerg Roedelf40f6a42010-12-03 15:25:15 +01003924 svm_flush_tlb(vcpu);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003925}
3926
Avi Kivity6aa8b732006-12-10 02:21:36 -08003927static int is_disabled(void)
3928{
Joerg Roedel6031a612007-06-22 12:29:50 +03003929 u64 vm_cr;
3930
3931 rdmsrl(MSR_VM_CR, vm_cr);
3932 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
3933 return 1;
3934
Avi Kivity6aa8b732006-12-10 02:21:36 -08003935 return 0;
3936}
3937
Ingo Molnar102d8322007-02-19 14:37:47 +02003938static void
3939svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
3940{
3941 /*
3942 * Patch in the VMMCALL instruction:
3943 */
3944 hypercall[0] = 0x0f;
3945 hypercall[1] = 0x01;
3946 hypercall[2] = 0xd9;
Ingo Molnar102d8322007-02-19 14:37:47 +02003947}
3948
Yang, Sheng002c7f72007-07-31 14:23:01 +03003949static void svm_check_processor_compat(void *rtn)
3950{
3951 *(int *)rtn = 0;
3952}
3953
Avi Kivity774ead32007-12-26 13:57:04 +02003954static bool svm_cpu_has_accelerated_tpr(void)
3955{
3956 return false;
3957}
3958
Sheng Yang4b12f0d2009-04-27 20:35:42 +08003959static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
Sheng Yang64d4d522008-10-09 16:01:57 +08003960{
3961 return 0;
3962}
3963
Sheng Yang0e851882009-12-18 16:48:46 +08003964static void svm_cpuid_update(struct kvm_vcpu *vcpu)
3965{
3966}
3967
Joerg Roedeld4330ef2010-04-22 12:33:11 +02003968static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
3969{
Joerg Roedelc2c63a42010-04-22 12:33:12 +02003970 switch (func) {
Joerg Roedel4c62a2d2010-09-10 17:31:06 +02003971 case 0x80000001:
3972 if (nested)
3973 entry->ecx |= (1 << 2); /* Set SVM bit */
3974 break;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02003975 case 0x8000000A:
3976 entry->eax = 1; /* SVM revision 1 */
3977 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
3978 ASID emulation to nested SVM */
3979 entry->ecx = 0; /* Reserved */
Joerg Roedel7a190662010-07-27 18:14:21 +02003980 entry->edx = 0; /* Per default do not support any
3981 additional features */
3982
3983 /* Support next_rip if host supports it */
Avi Kivity2a6b20b2010-11-09 16:15:42 +02003984 if (boot_cpu_has(X86_FEATURE_NRIPS))
Joerg Roedel7a190662010-07-27 18:14:21 +02003985 entry->edx |= SVM_FEATURE_NRIP;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02003986
Joerg Roedel3d4aeaa2010-09-10 17:31:05 +02003987 /* Support NPT for the guest if enabled */
3988 if (npt_enabled)
3989 entry->edx |= SVM_FEATURE_NPT;
3990
Joerg Roedelc2c63a42010-04-22 12:33:12 +02003991 break;
3992 }
Joerg Roedeld4330ef2010-04-22 12:33:11 +02003993}
3994
Sheng Yang17cc3932010-01-05 19:02:27 +08003995static int svm_get_lpage_level(void)
Joerg Roedel344f4142009-07-27 16:30:48 +02003996{
Sheng Yang17cc3932010-01-05 19:02:27 +08003997 return PT_PDPE_LEVEL;
Joerg Roedel344f4142009-07-27 16:30:48 +02003998}
3999
Sheng Yang4e47c7a2009-12-18 16:48:47 +08004000static bool svm_rdtscp_supported(void)
4001{
4002 return false;
4003}
4004
Sheng Yangf5f48ee2010-06-30 12:25:15 +08004005static bool svm_has_wbinvd_exit(void)
4006{
4007 return true;
4008}
4009
Avi Kivity02daab22009-12-30 12:40:26 +02004010static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
4011{
4012 struct vcpu_svm *svm = to_svm(vcpu);
4013
Joerg Roedel18c918c2010-11-30 18:03:59 +01004014 set_exception_intercept(svm, NM_VECTOR);
Joerg Roedel66a562f2010-02-19 16:23:08 +01004015 update_cr0_intercept(svm);
Avi Kivity02daab22009-12-30 12:40:26 +02004016}
4017
Joerg Roedel80612522011-04-04 12:39:33 +02004018#define PRE_EX(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03004019 .stage = X86_ICPT_PRE_EXCEPT, }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004020#define POST_EX(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03004021 .stage = X86_ICPT_POST_EXCEPT, }
Joerg Roedeld7eb8202011-04-04 12:39:32 +02004022#define POST_MEM(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03004023 .stage = X86_ICPT_POST_MEMACCESS, }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004024
4025static struct __x86_intercept {
4026 u32 exit_code;
4027 enum x86_intercept_stage stage;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004028} x86_intercept_map[] = {
4029 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
4030 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
4031 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
4032 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
4033 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
Joerg Roedel3b88e412011-04-04 12:39:29 +02004034 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
4035 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02004036 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
4037 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
4038 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
4039 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
4040 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
4041 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
4042 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
4043 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
Joerg Roedel01de8b02011-04-04 12:39:31 +02004044 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
4045 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
4046 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
4047 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
4048 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
4049 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
4050 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
4051 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02004052 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
4053 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
4054 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
Joerg Roedel80612522011-04-04 12:39:33 +02004055 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
4056 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
4057 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
4058 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
4059 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
4060 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
4061 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
4062 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
4063 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
Joerg Roedelbf608f82011-04-04 12:39:34 +02004064 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
4065 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
4066 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
4067 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
4068 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
4069 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
4070 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
Joerg Roedelf6511932011-04-04 12:39:35 +02004071 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
4072 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
4073 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
4074 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004075};
4076
Joerg Roedel80612522011-04-04 12:39:33 +02004077#undef PRE_EX
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004078#undef POST_EX
Joerg Roedeld7eb8202011-04-04 12:39:32 +02004079#undef POST_MEM
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004080
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004081static int svm_check_intercept(struct kvm_vcpu *vcpu,
4082 struct x86_instruction_info *info,
4083 enum x86_intercept_stage stage)
4084{
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004085 struct vcpu_svm *svm = to_svm(vcpu);
4086 int vmexit, ret = X86EMUL_CONTINUE;
4087 struct __x86_intercept icpt_info;
4088 struct vmcb *vmcb = svm->vmcb;
4089
4090 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
4091 goto out;
4092
4093 icpt_info = x86_intercept_map[info->intercept];
4094
Avi Kivity40e19b52011-04-21 12:35:41 +03004095 if (stage != icpt_info.stage)
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004096 goto out;
4097
4098 switch (icpt_info.exit_code) {
4099 case SVM_EXIT_READ_CR0:
4100 if (info->intercept == x86_intercept_cr_read)
4101 icpt_info.exit_code += info->modrm_reg;
4102 break;
4103 case SVM_EXIT_WRITE_CR0: {
4104 unsigned long cr0, val;
4105 u64 intercept;
4106
4107 if (info->intercept == x86_intercept_cr_write)
4108 icpt_info.exit_code += info->modrm_reg;
4109
4110 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0)
4111 break;
4112
4113 intercept = svm->nested.intercept;
4114
4115 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
4116 break;
4117
4118 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
4119 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
4120
4121 if (info->intercept == x86_intercept_lmsw) {
4122 cr0 &= 0xfUL;
4123 val &= 0xfUL;
4124 /* lmsw can't clear PE - catch this here */
4125 if (cr0 & X86_CR0_PE)
4126 val |= X86_CR0_PE;
4127 }
4128
4129 if (cr0 ^ val)
4130 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
4131
4132 break;
4133 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02004134 case SVM_EXIT_READ_DR0:
4135 case SVM_EXIT_WRITE_DR0:
4136 icpt_info.exit_code += info->modrm_reg;
4137 break;
Joerg Roedel80612522011-04-04 12:39:33 +02004138 case SVM_EXIT_MSR:
4139 if (info->intercept == x86_intercept_wrmsr)
4140 vmcb->control.exit_info_1 = 1;
4141 else
4142 vmcb->control.exit_info_1 = 0;
4143 break;
Joerg Roedelbf608f82011-04-04 12:39:34 +02004144 case SVM_EXIT_PAUSE:
4145 /*
4146 * We get this for NOP only, but pause
4147 * is rep not, check this here
4148 */
4149 if (info->rep_prefix != REPE_PREFIX)
4150 goto out;
Joerg Roedelf6511932011-04-04 12:39:35 +02004151 case SVM_EXIT_IOIO: {
4152 u64 exit_info;
4153 u32 bytes;
4154
4155 exit_info = (vcpu->arch.regs[VCPU_REGS_RDX] & 0xffff) << 16;
4156
4157 if (info->intercept == x86_intercept_in ||
4158 info->intercept == x86_intercept_ins) {
4159 exit_info |= SVM_IOIO_TYPE_MASK;
4160 bytes = info->src_bytes;
4161 } else {
4162 bytes = info->dst_bytes;
4163 }
4164
4165 if (info->intercept == x86_intercept_outs ||
4166 info->intercept == x86_intercept_ins)
4167 exit_info |= SVM_IOIO_STR_MASK;
4168
4169 if (info->rep_prefix)
4170 exit_info |= SVM_IOIO_REP_MASK;
4171
4172 bytes = min(bytes, 4u);
4173
4174 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
4175
4176 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
4177
4178 vmcb->control.exit_info_1 = exit_info;
4179 vmcb->control.exit_info_2 = info->next_rip;
4180
4181 break;
4182 }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004183 default:
4184 break;
4185 }
4186
4187 vmcb->control.next_rip = info->next_rip;
4188 vmcb->control.exit_code = icpt_info.exit_code;
4189 vmexit = nested_svm_exit_handled(svm);
4190
4191 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
4192 : X86EMUL_CONTINUE;
4193
4194out:
4195 return ret;
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004196}
4197
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03004198static struct kvm_x86_ops svm_x86_ops = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08004199 .cpu_has_kvm_support = has_svm,
4200 .disabled_by_bios = is_disabled,
4201 .hardware_setup = svm_hardware_setup,
4202 .hardware_unsetup = svm_hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03004203 .check_processor_compatibility = svm_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004204 .hardware_enable = svm_hardware_enable,
4205 .hardware_disable = svm_hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02004206 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004207
4208 .vcpu_create = svm_create_vcpu,
4209 .vcpu_free = svm_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03004210 .vcpu_reset = svm_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004211
Avi Kivity04d2cc72007-09-10 18:10:54 +03004212 .prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004213 .vcpu_load = svm_vcpu_load,
4214 .vcpu_put = svm_vcpu_put,
4215
4216 .set_guest_debug = svm_guest_debug,
4217 .get_msr = svm_get_msr,
4218 .set_msr = svm_set_msr,
4219 .get_segment_base = svm_get_segment_base,
4220 .get_segment = svm_get_segment,
4221 .set_segment = svm_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02004222 .get_cpl = svm_get_cpl,
Rusty Russell1747fb72007-09-06 01:21:32 +10004223 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
Avi Kivitye8467fd2009-12-29 18:43:06 +02004224 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
Avi Kivityaff48ba2010-12-05 18:56:11 +02004225 .decache_cr3 = svm_decache_cr3,
Anthony Liguori25c4c272007-04-27 09:29:21 +03004226 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004227 .set_cr0 = svm_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004228 .set_cr3 = svm_set_cr3,
4229 .set_cr4 = svm_set_cr4,
4230 .set_efer = svm_set_efer,
4231 .get_idt = svm_get_idt,
4232 .set_idt = svm_set_idt,
4233 .get_gdt = svm_get_gdt,
4234 .set_gdt = svm_set_gdt,
Gleb Natapov020df072010-04-13 10:05:23 +03004235 .set_dr7 = svm_set_dr7,
Avi Kivity6de4f3a2009-05-31 22:58:47 +03004236 .cache_reg = svm_cache_reg,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004237 .get_rflags = svm_get_rflags,
4238 .set_rflags = svm_set_rflags,
Avi Kivity6b52d182010-01-21 15:31:47 +02004239 .fpu_activate = svm_fpu_activate,
Avi Kivity02daab22009-12-30 12:40:26 +02004240 .fpu_deactivate = svm_fpu_deactivate,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004241
Avi Kivity6aa8b732006-12-10 02:21:36 -08004242 .tlb_flush = svm_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004243
Avi Kivity6aa8b732006-12-10 02:21:36 -08004244 .run = svm_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03004245 .handle_exit = handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004246 .skip_emulated_instruction = skip_emulated_instruction,
Glauber Costa2809f5d2009-05-12 16:21:05 -04004247 .set_interrupt_shadow = svm_set_interrupt_shadow,
4248 .get_interrupt_shadow = svm_get_interrupt_shadow,
Ingo Molnar102d8322007-02-19 14:37:47 +02004249 .patch_hypercall = svm_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03004250 .set_irq = svm_set_irq,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004251 .set_nmi = svm_inject_nmi,
Avi Kivity298101d2007-11-25 13:41:11 +02004252 .queue_exception = svm_queue_exception,
Avi Kivityb463a6f2010-07-20 15:06:17 +03004253 .cancel_injection = svm_cancel_injection,
Gleb Natapov78646122009-03-23 12:12:11 +02004254 .interrupt_allowed = svm_interrupt_allowed,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004255 .nmi_allowed = svm_nmi_allowed,
Jan Kiszka3cfc3092009-11-12 01:04:25 +01004256 .get_nmi_mask = svm_get_nmi_mask,
4257 .set_nmi_mask = svm_set_nmi_mask,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004258 .enable_nmi_window = enable_nmi_window,
4259 .enable_irq_window = enable_irq_window,
4260 .update_cr8_intercept = update_cr8_intercept,
Izik Eiduscbc94022007-10-25 00:29:55 +02004261
4262 .set_tss_addr = svm_set_tss_addr,
Sheng Yang67253af2008-04-25 10:20:22 +08004263 .get_tdp_level = get_npt_level,
Sheng Yang4b12f0d2009-04-27 20:35:42 +08004264 .get_mt_mask = svm_get_mt_mask,
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004265
Avi Kivity586f9602010-11-18 13:09:54 +02004266 .get_exit_info = svm_get_exit_info,
Avi Kivity586f9602010-11-18 13:09:54 +02004267
Sheng Yang17cc3932010-01-05 19:02:27 +08004268 .get_lpage_level = svm_get_lpage_level,
Sheng Yang0e851882009-12-18 16:48:46 +08004269
4270 .cpuid_update = svm_cpuid_update,
Sheng Yang4e47c7a2009-12-18 16:48:47 +08004271
4272 .rdtscp_supported = svm_rdtscp_supported,
Joerg Roedeld4330ef2010-04-22 12:33:11 +02004273
4274 .set_supported_cpuid = svm_set_supported_cpuid,
Sheng Yangf5f48ee2010-06-30 12:25:15 +08004275
4276 .has_wbinvd_exit = svm_has_wbinvd_exit,
Zachary Amsden99e3e302010-08-19 22:07:17 -10004277
Joerg Roedel4051b182011-03-25 09:44:49 +01004278 .set_tsc_khz = svm_set_tsc_khz,
Zachary Amsden99e3e302010-08-19 22:07:17 -10004279 .write_tsc_offset = svm_write_tsc_offset,
Zachary Amsdene48672f2010-08-19 22:07:23 -10004280 .adjust_tsc_offset = svm_adjust_tsc_offset,
Joerg Roedel857e4092011-03-25 09:44:50 +01004281 .compute_tsc_offset = svm_compute_tsc_offset,
Nadav Har'Eld5c17852011-08-02 15:54:20 +03004282 .read_l1_tsc = svm_read_l1_tsc,
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02004283
4284 .set_tdp_cr3 = set_tdp_cr3,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004285
4286 .check_intercept = svm_check_intercept,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004287};
4288
4289static int __init svm_init(void)
4290{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08004291 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
Avi Kivity0ee75be2010-04-28 15:39:01 +03004292 __alignof__(struct vcpu_svm), THIS_MODULE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004293}
4294
4295static void __exit svm_exit(void)
4296{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08004297 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08004298}
4299
4300module_init(svm_init)
4301module_exit(svm_exit)