blob: 22e88a4a51c715c41aa5701b12a9744d01d89fe7 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 *
8 * Authors:
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
14 *
15 */
Avi Kivityedf88412007-12-16 11:02:48 +020016#include <linux/kvm_host.h>
17
Avi Kivitye4956062007-06-28 14:15:57 -040018#include "kvm_svm.h"
Eddie Dong85f455f2007-07-06 12:20:49 +030019#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080020#include "mmu.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030021#include "kvm_cache_regs.h"
Avi Kivitye4956062007-06-28 14:15:57 -040022
Avi Kivity6aa8b732006-12-10 02:21:36 -080023#include <linux/module.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020024#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/vmalloc.h>
26#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040027#include <linux/sched.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028
Avi Kivitye4956062007-06-28 14:15:57 -040029#include <asm/desc.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080030
Eduardo Habkost63d11422008-11-17 19:03:20 -020031#include <asm/virtext.h>
32
Avi Kivity4ecac3f2008-05-13 13:23:38 +030033#define __ex(x) __kvm_handle_fault_on_reboot(x)
34
Avi Kivity6aa8b732006-12-10 02:21:36 -080035MODULE_AUTHOR("Qumranet");
36MODULE_LICENSE("GPL");
37
38#define IOPM_ALLOC_ORDER 2
39#define MSRPM_ALLOC_ORDER 1
40
Avi Kivity6aa8b732006-12-10 02:21:36 -080041#define SEG_TYPE_LDT 2
42#define SEG_TYPE_BUSY_TSS16 3
43
Joerg Roedel80b77062007-03-30 17:02:14 +030044#define SVM_FEATURE_NPT (1 << 0)
45#define SVM_FEATURE_LBRV (1 << 1)
Amit Shah94c935a12008-08-18 13:11:46 +030046#define SVM_FEATURE_SVML (1 << 2)
Joerg Roedel80b77062007-03-30 17:02:14 +030047
Joerg Roedel24e09cb2008-02-13 18:58:47 +010048#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
49
Alexander Grafc0725422008-11-25 20:17:03 +010050/* Turn on to get debugging output*/
51/* #define NESTED_DEBUG */
52
53#ifdef NESTED_DEBUG
54#define nsvm_printk(fmt, args...) printk(KERN_INFO fmt, ## args)
55#else
56#define nsvm_printk(fmt, args...) do {} while(0)
57#endif
58
Joerg Roedel709ddeb2008-02-07 13:47:45 +010059/* enable NPT for AMD64 and X86 with PAE */
60#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
61static bool npt_enabled = true;
62#else
Joerg Roedele3da3ac2008-02-07 13:47:39 +010063static bool npt_enabled = false;
Joerg Roedel709ddeb2008-02-07 13:47:45 +010064#endif
Joerg Roedel6c7dac72008-02-07 13:47:40 +010065static int npt = 1;
66
67module_param(npt, int, S_IRUGO);
Joerg Roedele3da3ac2008-02-07 13:47:39 +010068
Alexander Graf236de052008-11-25 20:17:10 +010069static int nested = 0;
70module_param(nested, int, S_IRUGO);
71
Avi Kivity04d2cc72007-09-10 18:10:54 +030072static void kvm_reput_irq(struct vcpu_svm *svm);
Joerg Roedel44874f82008-08-27 14:18:43 +020073static void svm_flush_tlb(struct kvm_vcpu *vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +030074
Alexander Grafcf74a782008-11-25 20:17:08 +010075static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override);
76static int nested_svm_vmexit(struct vcpu_svm *svm);
77static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb,
78 void *arg2, void *opaque);
79static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
80 bool has_error_code, u32 error_code);
81
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040082static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
83{
Rusty Russellfb3f0f52007-07-27 17:16:56 +100084 return container_of(vcpu, struct vcpu_svm, vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040085}
86
Alexander Graf3d6368e2008-11-25 20:17:07 +010087static inline bool is_nested(struct vcpu_svm *svm)
88{
89 return svm->nested_vmcb;
90}
91
Harvey Harrison4866d5e2008-02-19 10:32:02 -080092static unsigned long iopm_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -080093
94struct kvm_ldttss_desc {
95 u16 limit0;
96 u16 base0;
97 unsigned base1 : 8, type : 5, dpl : 2, p : 1;
98 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
99 u32 base3;
100 u32 zero1;
101} __attribute__((packed));
102
103struct svm_cpu_data {
104 int cpu;
105
Avi Kivity5008fdf2007-04-02 13:05:50 +0300106 u64 asid_generation;
107 u32 max_asid;
108 u32 next_asid;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800109 struct kvm_ldttss_desc *tss_desc;
110
111 struct page *save_area;
112};
113
114static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
Joerg Roedel80b77062007-03-30 17:02:14 +0300115static uint32_t svm_features;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800116
117struct svm_init_data {
118 int cpu;
119 int r;
120};
121
122static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
123
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200124#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800125#define MSRS_RANGE_SIZE 2048
126#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
127
128#define MAX_INST_SIZE 15
129
Joerg Roedel80b77062007-03-30 17:02:14 +0300130static inline u32 svm_has(u32 feat)
131{
132 return svm_features & feat;
133}
134
Avi Kivity6aa8b732006-12-10 02:21:36 -0800135static inline u8 pop_irq(struct kvm_vcpu *vcpu)
136{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800137 int word_index = __ffs(vcpu->arch.irq_summary);
138 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800139 int irq = word_index * BITS_PER_LONG + bit_index;
140
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800141 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
142 if (!vcpu->arch.irq_pending[word_index])
143 clear_bit(word_index, &vcpu->arch.irq_summary);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800144 return irq;
145}
146
147static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
148{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800149 set_bit(irq, vcpu->arch.irq_pending);
150 set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800151}
152
153static inline void clgi(void)
154{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300155 asm volatile (__ex(SVM_CLGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800156}
157
158static inline void stgi(void)
159{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300160 asm volatile (__ex(SVM_STGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800161}
162
163static inline void invlpga(unsigned long addr, u32 asid)
164{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300165 asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800166}
167
168static inline unsigned long kvm_read_cr2(void)
169{
170 unsigned long cr2;
171
172 asm volatile ("mov %%cr2, %0" : "=r" (cr2));
173 return cr2;
174}
175
176static inline void kvm_write_cr2(unsigned long val)
177{
178 asm volatile ("mov %0, %%cr2" :: "r" (val));
179}
180
Avi Kivity6aa8b732006-12-10 02:21:36 -0800181static inline void force_new_asid(struct kvm_vcpu *vcpu)
182{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400183 to_svm(vcpu)->asid_generation--;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800184}
185
186static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
187{
188 force_new_asid(vcpu);
189}
190
191static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
192{
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100193 if (!npt_enabled && !(efer & EFER_LMA))
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600194 efer &= ~EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800195
Alexander Graf9962d032008-11-25 20:17:02 +0100196 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800197 vcpu->arch.shadow_efer = efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800198}
199
Avi Kivity298101d2007-11-25 13:41:11 +0200200static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
201 bool has_error_code, u32 error_code)
202{
203 struct vcpu_svm *svm = to_svm(vcpu);
204
Alexander Grafcf74a782008-11-25 20:17:08 +0100205 /* If we are within a nested VM we'd better #VMEXIT and let the
206 guest handle the exception */
207 if (nested_svm_check_exception(svm, nr, has_error_code, error_code))
208 return;
209
Avi Kivity298101d2007-11-25 13:41:11 +0200210 svm->vmcb->control.event_inj = nr
211 | SVM_EVTINJ_VALID
212 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
213 | SVM_EVTINJ_TYPE_EXEPT;
214 svm->vmcb->control.event_inj_err = error_code;
215}
216
217static bool svm_exception_injected(struct kvm_vcpu *vcpu)
218{
219 struct vcpu_svm *svm = to_svm(vcpu);
220
221 return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID);
222}
223
Avi Kivity6aa8b732006-12-10 02:21:36 -0800224static int is_external_interrupt(u32 info)
225{
226 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
227 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
228}
229
230static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
231{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400232 struct vcpu_svm *svm = to_svm(vcpu);
233
234 if (!svm->next_rip) {
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800235 printk(KERN_DEBUG "%s: NOP\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800236 return;
237 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300238 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
239 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
240 __func__, kvm_rip_read(vcpu), svm->next_rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800241
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300242 kvm_rip_write(vcpu, svm->next_rip);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400243 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
Dor Laorc1150d82007-01-05 16:36:24 -0800244
Alexander Graf1371d902008-11-25 20:17:04 +0100245 vcpu->arch.interrupt_window_open = (svm->vcpu.arch.hflags & HF_GIF_MASK);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800246}
247
248static int has_svm(void)
249{
Eduardo Habkost63d11422008-11-17 19:03:20 -0200250 const char *msg;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800251
Eduardo Habkost63d11422008-11-17 19:03:20 -0200252 if (!cpu_has_svm(&msg)) {
Joe Perchesff81ff12009-01-08 11:05:17 -0800253 printk(KERN_INFO "has_svm: %s\n", msg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800254 return 0;
255 }
256
Avi Kivity6aa8b732006-12-10 02:21:36 -0800257 return 1;
258}
259
260static void svm_hardware_disable(void *garbage)
261{
Eduardo Habkost2c8dcee2008-11-17 19:03:21 -0200262 cpu_svm_disable();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800263}
264
265static void svm_hardware_enable(void *garbage)
266{
267
268 struct svm_cpu_data *svm_data;
269 uint64_t efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800270 struct desc_ptr gdt_descr;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800271 struct desc_struct *gdt;
272 int me = raw_smp_processor_id();
273
274 if (!has_svm()) {
275 printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
276 return;
277 }
278 svm_data = per_cpu(svm_data, me);
279
280 if (!svm_data) {
281 printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
282 me);
283 return;
284 }
285
286 svm_data->asid_generation = 1;
287 svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
288 svm_data->next_asid = svm_data->max_asid + 1;
289
Mike Dayd77c26f2007-10-08 09:02:08 -0400290 asm volatile ("sgdt %0" : "=m"(gdt_descr));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800291 gdt = (struct desc_struct *)gdt_descr.address;
292 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
293
294 rdmsrl(MSR_EFER, efer);
Alexander Graf9962d032008-11-25 20:17:02 +0100295 wrmsrl(MSR_EFER, efer | EFER_SVME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800296
297 wrmsrl(MSR_VM_HSAVE_PA,
298 page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
299}
300
Joerg Roedel0da1db752008-07-02 16:02:11 +0200301static void svm_cpu_uninit(int cpu)
302{
303 struct svm_cpu_data *svm_data
304 = per_cpu(svm_data, raw_smp_processor_id());
305
306 if (!svm_data)
307 return;
308
309 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
310 __free_page(svm_data->save_area);
311 kfree(svm_data);
312}
313
Avi Kivity6aa8b732006-12-10 02:21:36 -0800314static int svm_cpu_init(int cpu)
315{
316 struct svm_cpu_data *svm_data;
317 int r;
318
319 svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
320 if (!svm_data)
321 return -ENOMEM;
322 svm_data->cpu = cpu;
323 svm_data->save_area = alloc_page(GFP_KERNEL);
324 r = -ENOMEM;
325 if (!svm_data->save_area)
326 goto err_1;
327
328 per_cpu(svm_data, cpu) = svm_data;
329
330 return 0;
331
332err_1:
333 kfree(svm_data);
334 return r;
335
336}
337
Rusty Russellbfc733a2007-07-31 20:42:42 +1000338static void set_msr_interception(u32 *msrpm, unsigned msr,
339 int read, int write)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800340{
341 int i;
342
343 for (i = 0; i < NUM_MSR_MAPS; i++) {
344 if (msr >= msrpm_ranges[i] &&
345 msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
346 u32 msr_offset = (i * MSRS_IN_RANGE + msr -
347 msrpm_ranges[i]) * 2;
348
349 u32 *base = msrpm + (msr_offset / 32);
350 u32 msr_shift = msr_offset % 32;
351 u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
352 *base = (*base & ~(0x3 << msr_shift)) |
353 (mask << msr_shift);
Rusty Russellbfc733a2007-07-31 20:42:42 +1000354 return;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800355 }
356 }
Rusty Russellbfc733a2007-07-31 20:42:42 +1000357 BUG();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800358}
359
Joerg Roedelf65c2292008-02-13 18:58:46 +0100360static void svm_vcpu_init_msrpm(u32 *msrpm)
361{
362 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
363
364#ifdef CONFIG_X86_64
365 set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
366 set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
367 set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
368 set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
369 set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
370 set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
371#endif
372 set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
373 set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
374 set_msr_interception(msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
375 set_msr_interception(msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
376}
377
Joerg Roedel24e09cb2008-02-13 18:58:47 +0100378static void svm_enable_lbrv(struct vcpu_svm *svm)
379{
380 u32 *msrpm = svm->msrpm;
381
382 svm->vmcb->control.lbr_ctl = 1;
383 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
384 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
385 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
386 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
387}
388
389static void svm_disable_lbrv(struct vcpu_svm *svm)
390{
391 u32 *msrpm = svm->msrpm;
392
393 svm->vmcb->control.lbr_ctl = 0;
394 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
395 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
396 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
397 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
398}
399
Avi Kivity6aa8b732006-12-10 02:21:36 -0800400static __init int svm_hardware_setup(void)
401{
402 int cpu;
403 struct page *iopm_pages;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100404 void *iopm_va;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800405 int r;
406
Avi Kivity6aa8b732006-12-10 02:21:36 -0800407 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
408
409 if (!iopm_pages)
410 return -ENOMEM;
Anthony Liguoric8681332007-04-30 09:48:11 +0300411
412 iopm_va = page_address(iopm_pages);
413 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
414 clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800415 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
416
Joerg Roedel50a37eb2008-01-31 14:57:38 +0100417 if (boot_cpu_has(X86_FEATURE_NX))
418 kvm_enable_efer_bits(EFER_NX);
419
Alexander Graf1b2fd702009-02-02 16:23:51 +0100420 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
421 kvm_enable_efer_bits(EFER_FFXSR);
422
Alexander Graf236de052008-11-25 20:17:10 +0100423 if (nested) {
424 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
425 kvm_enable_efer_bits(EFER_SVME);
426 }
427
Avi Kivity6aa8b732006-12-10 02:21:36 -0800428 for_each_online_cpu(cpu) {
429 r = svm_cpu_init(cpu);
430 if (r)
Joerg Roedelf65c2292008-02-13 18:58:46 +0100431 goto err;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800432 }
Joerg Roedel33bd6a02008-02-07 13:47:38 +0100433
434 svm_features = cpuid_edx(SVM_CPUID_FUNC);
435
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100436 if (!svm_has(SVM_FEATURE_NPT))
437 npt_enabled = false;
438
Joerg Roedel6c7dac72008-02-07 13:47:40 +0100439 if (npt_enabled && !npt) {
440 printk(KERN_INFO "kvm: Nested Paging disabled\n");
441 npt_enabled = false;
442 }
443
Joerg Roedel18552672008-02-07 13:47:41 +0100444 if (npt_enabled) {
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100445 printk(KERN_INFO "kvm: Nested Paging enabled\n");
Joerg Roedel18552672008-02-07 13:47:41 +0100446 kvm_enable_tdp();
Joerg Roedel5f4cb662008-07-14 20:36:36 +0200447 } else
448 kvm_disable_tdp();
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100449
Avi Kivity6aa8b732006-12-10 02:21:36 -0800450 return 0;
451
Joerg Roedelf65c2292008-02-13 18:58:46 +0100452err:
Avi Kivity6aa8b732006-12-10 02:21:36 -0800453 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
454 iopm_base = 0;
455 return r;
456}
457
458static __exit void svm_hardware_unsetup(void)
459{
Joerg Roedel0da1db752008-07-02 16:02:11 +0200460 int cpu;
461
462 for_each_online_cpu(cpu)
463 svm_cpu_uninit(cpu);
464
Avi Kivity6aa8b732006-12-10 02:21:36 -0800465 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
Joerg Roedelf65c2292008-02-13 18:58:46 +0100466 iopm_base = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800467}
468
469static void init_seg(struct vmcb_seg *seg)
470{
471 seg->selector = 0;
472 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
473 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
474 seg->limit = 0xffff;
475 seg->base = 0;
476}
477
478static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
479{
480 seg->selector = 0;
481 seg->attrib = SVM_SELECTOR_P_MASK | type;
482 seg->limit = 0xffff;
483 seg->base = 0;
484}
485
Joerg Roedele6101a92008-02-13 18:58:45 +0100486static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800487{
Joerg Roedele6101a92008-02-13 18:58:45 +0100488 struct vmcb_control_area *control = &svm->vmcb->control;
489 struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800490
491 control->intercept_cr_read = INTERCEPT_CR0_MASK |
492 INTERCEPT_CR3_MASK |
Joerg Roedel649d6862008-04-16 16:51:15 +0200493 INTERCEPT_CR4_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800494
495 control->intercept_cr_write = INTERCEPT_CR0_MASK |
496 INTERCEPT_CR3_MASK |
Avi Kivity80a81192007-12-06 19:50:00 +0200497 INTERCEPT_CR4_MASK |
498 INTERCEPT_CR8_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800499
500 control->intercept_dr_read = INTERCEPT_DR0_MASK |
501 INTERCEPT_DR1_MASK |
502 INTERCEPT_DR2_MASK |
503 INTERCEPT_DR3_MASK;
504
505 control->intercept_dr_write = INTERCEPT_DR0_MASK |
506 INTERCEPT_DR1_MASK |
507 INTERCEPT_DR2_MASK |
508 INTERCEPT_DR3_MASK |
509 INTERCEPT_DR5_MASK |
510 INTERCEPT_DR7_MASK;
511
Anthony Liguori7aa81cc2007-09-17 14:57:50 -0500512 control->intercept_exceptions = (1 << PF_VECTOR) |
Joerg Roedel53371b52008-04-09 14:15:30 +0200513 (1 << UD_VECTOR) |
514 (1 << MC_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800515
516
517 control->intercept = (1ULL << INTERCEPT_INTR) |
518 (1ULL << INTERCEPT_NMI) |
Joerg Roedel01525272007-02-19 14:37:47 +0200519 (1ULL << INTERCEPT_SMI) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800520 (1ULL << INTERCEPT_CPUID) |
Avi Kivitycf5a94d2007-10-28 16:11:58 +0200521 (1ULL << INTERCEPT_INVD) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800522 (1ULL << INTERCEPT_HLT) |
Marcelo Tosattia7052892008-09-23 13:18:35 -0300523 (1ULL << INTERCEPT_INVLPG) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800524 (1ULL << INTERCEPT_INVLPGA) |
525 (1ULL << INTERCEPT_IOIO_PROT) |
526 (1ULL << INTERCEPT_MSR_PROT) |
527 (1ULL << INTERCEPT_TASK_SWITCH) |
Joerg Roedel46fe4dd2007-01-26 00:56:42 -0800528 (1ULL << INTERCEPT_SHUTDOWN) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800529 (1ULL << INTERCEPT_VMRUN) |
530 (1ULL << INTERCEPT_VMMCALL) |
531 (1ULL << INTERCEPT_VMLOAD) |
532 (1ULL << INTERCEPT_VMSAVE) |
533 (1ULL << INTERCEPT_STGI) |
534 (1ULL << INTERCEPT_CLGI) |
Joerg Roedel916ce232007-03-21 19:47:00 +0100535 (1ULL << INTERCEPT_SKINIT) |
Avi Kivitycf5a94d2007-10-28 16:11:58 +0200536 (1ULL << INTERCEPT_WBINVD) |
Joerg Roedel916ce232007-03-21 19:47:00 +0100537 (1ULL << INTERCEPT_MONITOR) |
538 (1ULL << INTERCEPT_MWAIT);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800539
540 control->iopm_base_pa = iopm_base;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100541 control->msrpm_base_pa = __pa(svm->msrpm);
Avi Kivity0cc50642007-03-25 12:07:27 +0200542 control->tsc_offset = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800543 control->int_ctl = V_INTR_MASKING_MASK;
544
545 init_seg(&save->es);
546 init_seg(&save->ss);
547 init_seg(&save->ds);
548 init_seg(&save->fs);
549 init_seg(&save->gs);
550
551 save->cs.selector = 0xf000;
552 /* Executable/Readable Code Segment */
553 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
554 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
555 save->cs.limit = 0xffff;
Avi Kivityd92899a2007-02-12 00:54:38 -0800556 /*
557 * cs.base should really be 0xffff0000, but vmx can't handle that, so
558 * be consistent with it.
559 *
560 * Replace when we have real mode working for vmx.
561 */
562 save->cs.base = 0xf0000;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800563
564 save->gdtr.limit = 0xffff;
565 save->idtr.limit = 0xffff;
566
567 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
568 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
569
Alexander Graf9962d032008-11-25 20:17:02 +0100570 save->efer = EFER_SVME;
Mike Dayd77c26f2007-10-08 09:02:08 -0400571 save->dr6 = 0xffff0ff0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800572 save->dr7 = 0x400;
573 save->rflags = 2;
574 save->rip = 0x0000fff0;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300575 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800576
577 /*
578 * cr0 val on cpu init should be 0x60000010, we enable cpu
579 * cache by default. the orderly way is to enable cache in bios.
580 */
Rusty Russell707d92fa2007-07-17 23:19:08 +1000581 save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
Rusty Russell66aee912007-07-17 23:34:16 +1000582 save->cr4 = X86_CR4_PAE;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800583 /* rdx = ?? */
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100584
585 if (npt_enabled) {
586 /* Setup VMCB for Nested Paging */
587 control->nested_ctl = 1;
Marcelo Tosattia7052892008-09-23 13:18:35 -0300588 control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
589 (1ULL << INTERCEPT_INVLPG));
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100590 control->intercept_exceptions &= ~(1 << PF_VECTOR);
591 control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK|
592 INTERCEPT_CR3_MASK);
593 control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK|
594 INTERCEPT_CR3_MASK);
595 save->g_pat = 0x0007040600070406ULL;
596 /* enable caching because the QEMU Bios doesn't enable it */
597 save->cr0 = X86_CR0_ET;
598 save->cr3 = 0;
599 save->cr4 = 0;
600 }
Avi Kivitya79d2f12008-04-14 13:10:21 +0300601 force_new_asid(&svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +0100602
Alexander Graf3d6368e2008-11-25 20:17:07 +0100603 svm->nested_vmcb = 0;
Alexander Graf1371d902008-11-25 20:17:04 +0100604 svm->vcpu.arch.hflags = HF_GIF_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800605}
606
Avi Kivitye00c8cf2007-10-21 11:00:39 +0200607static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
Avi Kivity04d2cc72007-09-10 18:10:54 +0300608{
609 struct vcpu_svm *svm = to_svm(vcpu);
610
Joerg Roedele6101a92008-02-13 18:58:45 +0100611 init_vmcb(svm);
Avi Kivity70433382007-11-07 12:57:23 +0200612
613 if (vcpu->vcpu_id != 0) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300614 kvm_rip_write(vcpu, 0);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800615 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
616 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
Avi Kivity70433382007-11-07 12:57:23 +0200617 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300618 vcpu->arch.regs_avail = ~0;
619 vcpu->arch.regs_dirty = ~0;
Avi Kivitye00c8cf2007-10-21 11:00:39 +0200620
621 return 0;
Avi Kivity04d2cc72007-09-10 18:10:54 +0300622}
623
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000624static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800625{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400626 struct vcpu_svm *svm;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800627 struct page *page;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100628 struct page *msrpm_pages;
Alexander Grafb286d5d2008-11-25 20:17:05 +0100629 struct page *hsave_page;
Alexander Graf3d6368e2008-11-25 20:17:07 +0100630 struct page *nested_msrpm_pages;
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000631 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800632
Rusty Russellc16f8622007-07-30 21:12:19 +1000633 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000634 if (!svm) {
635 err = -ENOMEM;
636 goto out;
637 }
638
639 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
640 if (err)
641 goto free_svm;
642
Avi Kivity6aa8b732006-12-10 02:21:36 -0800643 page = alloc_page(GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000644 if (!page) {
645 err = -ENOMEM;
646 goto uninit;
647 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800648
Joerg Roedelf65c2292008-02-13 18:58:46 +0100649 err = -ENOMEM;
650 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
651 if (!msrpm_pages)
652 goto uninit;
Alexander Graf3d6368e2008-11-25 20:17:07 +0100653
654 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
655 if (!nested_msrpm_pages)
656 goto uninit;
657
Joerg Roedelf65c2292008-02-13 18:58:46 +0100658 svm->msrpm = page_address(msrpm_pages);
659 svm_vcpu_init_msrpm(svm->msrpm);
660
Alexander Grafb286d5d2008-11-25 20:17:05 +0100661 hsave_page = alloc_page(GFP_KERNEL);
662 if (!hsave_page)
663 goto uninit;
664 svm->hsave = page_address(hsave_page);
665
Alexander Graf3d6368e2008-11-25 20:17:07 +0100666 svm->nested_msrpm = page_address(nested_msrpm_pages);
667
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400668 svm->vmcb = page_address(page);
669 clear_page(svm->vmcb);
670 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
671 svm->asid_generation = 0;
Joerg Roedele6101a92008-02-13 18:58:45 +0100672 init_vmcb(svm);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400673
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000674 fx_init(&svm->vcpu);
675 svm->vcpu.fpu_active = 1;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800676 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000677 if (svm->vcpu.vcpu_id == 0)
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800678 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800679
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000680 return &svm->vcpu;
Avi Kivity36241b82006-12-22 01:05:20 -0800681
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000682uninit:
683 kvm_vcpu_uninit(&svm->vcpu);
684free_svm:
Rusty Russella4770342007-08-01 14:46:11 +1000685 kmem_cache_free(kvm_vcpu_cache, svm);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000686out:
687 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800688}
689
690static void svm_free_vcpu(struct kvm_vcpu *vcpu)
691{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400692 struct vcpu_svm *svm = to_svm(vcpu);
693
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000694 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
Joerg Roedelf65c2292008-02-13 18:58:46 +0100695 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
Alexander Grafb286d5d2008-11-25 20:17:05 +0100696 __free_page(virt_to_page(svm->hsave));
Alexander Graf3d6368e2008-11-25 20:17:07 +0100697 __free_pages(virt_to_page(svm->nested_msrpm), MSRPM_ALLOC_ORDER);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000698 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +1000699 kmem_cache_free(kvm_vcpu_cache, svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800700}
701
Avi Kivity15ad7142007-07-11 18:17:21 +0300702static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800703{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400704 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +0300705 int i;
Avi Kivity0cc50642007-03-25 12:07:27 +0200706
Avi Kivity0cc50642007-03-25 12:07:27 +0200707 if (unlikely(cpu != vcpu->cpu)) {
708 u64 tsc_this, delta;
709
710 /*
711 * Make sure that the guest sees a monotonically
712 * increasing TSC.
713 */
714 rdtscll(tsc_this);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800715 delta = vcpu->arch.host_tsc - tsc_this;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400716 svm->vmcb->control.tsc_offset += delta;
Avi Kivity0cc50642007-03-25 12:07:27 +0200717 vcpu->cpu = cpu;
Marcelo Tosatti2f599712008-05-27 12:10:20 -0300718 kvm_migrate_timers(vcpu);
Avi Kivity0cc50642007-03-25 12:07:27 +0200719 }
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300720
721 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400722 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800723}
724
725static void svm_vcpu_put(struct kvm_vcpu *vcpu)
726{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400727 struct vcpu_svm *svm = to_svm(vcpu);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300728 int i;
729
Avi Kivitye1beb1d2007-11-18 13:50:24 +0200730 ++vcpu->stat.host_state_reload;
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300731 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400732 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300733
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800734 rdtscll(vcpu->arch.host_tsc);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800735}
736
Avi Kivity6aa8b732006-12-10 02:21:36 -0800737static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
738{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400739 return to_svm(vcpu)->vmcb->save.rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800740}
741
742static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
743{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400744 to_svm(vcpu)->vmcb->save.rflags = rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800745}
746
Alexander Graff0b85052008-11-25 20:17:01 +0100747static void svm_set_vintr(struct vcpu_svm *svm)
748{
749 svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
750}
751
752static void svm_clear_vintr(struct vcpu_svm *svm)
753{
754 svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
755}
756
Avi Kivity6aa8b732006-12-10 02:21:36 -0800757static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
758{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400759 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800760
761 switch (seg) {
762 case VCPU_SREG_CS: return &save->cs;
763 case VCPU_SREG_DS: return &save->ds;
764 case VCPU_SREG_ES: return &save->es;
765 case VCPU_SREG_FS: return &save->fs;
766 case VCPU_SREG_GS: return &save->gs;
767 case VCPU_SREG_SS: return &save->ss;
768 case VCPU_SREG_TR: return &save->tr;
769 case VCPU_SREG_LDTR: return &save->ldtr;
770 }
771 BUG();
Al Viro8b6d44c2007-02-09 16:38:40 +0000772 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800773}
774
775static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
776{
777 struct vmcb_seg *s = svm_seg(vcpu, seg);
778
779 return s->base;
780}
781
782static void svm_get_segment(struct kvm_vcpu *vcpu,
783 struct kvm_segment *var, int seg)
784{
785 struct vmcb_seg *s = svm_seg(vcpu, seg);
786
787 var->base = s->base;
788 var->limit = s->limit;
789 var->selector = s->selector;
790 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
791 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
792 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
793 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
794 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
795 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
796 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
797 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
Amit Shah25022ac2008-10-27 09:04:17 +0000798
799 /*
800 * SVM always stores 0 for the 'G' bit in the CS selector in
801 * the VMCB on a VMEXIT. This hurts cross-vendor migration:
802 * Intel's VMENTRY has a check on the 'G' bit.
803 */
804 if (seg == VCPU_SREG_CS)
805 var->g = s->limit > 0xfffff;
806
Amit Shahc0d09822008-10-27 09:04:18 +0000807 /*
808 * Work around a bug where the busy flag in the tr selector
809 * isn't exposed
810 */
811 if (seg == VCPU_SREG_TR)
812 var->type |= 0x2;
813
Avi Kivity6aa8b732006-12-10 02:21:36 -0800814 var->unusable = !var->present;
815}
816
Izik Eidus2e4d2652008-03-24 19:38:34 +0200817static int svm_get_cpl(struct kvm_vcpu *vcpu)
818{
819 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
820
821 return save->cpl;
822}
823
Avi Kivity6aa8b732006-12-10 02:21:36 -0800824static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
825{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400826 struct vcpu_svm *svm = to_svm(vcpu);
827
828 dt->limit = svm->vmcb->save.idtr.limit;
829 dt->base = svm->vmcb->save.idtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800830}
831
832static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
833{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400834 struct vcpu_svm *svm = to_svm(vcpu);
835
836 svm->vmcb->save.idtr.limit = dt->limit;
837 svm->vmcb->save.idtr.base = dt->base ;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800838}
839
840static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
841{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400842 struct vcpu_svm *svm = to_svm(vcpu);
843
844 dt->limit = svm->vmcb->save.gdtr.limit;
845 dt->base = svm->vmcb->save.gdtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800846}
847
848static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
849{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400850 struct vcpu_svm *svm = to_svm(vcpu);
851
852 svm->vmcb->save.gdtr.limit = dt->limit;
853 svm->vmcb->save.gdtr.base = dt->base ;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800854}
855
Anthony Liguori25c4c272007-04-27 09:29:21 +0300856static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -0800857{
858}
859
Avi Kivity6aa8b732006-12-10 02:21:36 -0800860static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
861{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400862 struct vcpu_svm *svm = to_svm(vcpu);
863
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800864#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800865 if (vcpu->arch.shadow_efer & EFER_LME) {
Rusty Russell707d92fa2007-07-17 23:19:08 +1000866 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800867 vcpu->arch.shadow_efer |= EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600868 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800869 }
870
Mike Dayd77c26f2007-10-08 09:02:08 -0400871 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800872 vcpu->arch.shadow_efer &= ~EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600873 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800874 }
875 }
876#endif
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100877 if (npt_enabled)
878 goto set;
879
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800880 if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400881 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
Anthony Liguori7807fa62007-04-23 09:17:21 -0500882 vcpu->fpu_active = 1;
883 }
884
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800885 vcpu->arch.cr0 = cr0;
Rusty Russell707d92fa2007-07-17 23:19:08 +1000886 cr0 |= X86_CR0_PG | X86_CR0_WP;
Joerg Roedel6b390b62008-01-29 13:01:27 +0100887 if (!vcpu->fpu_active) {
888 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
Joerg Roedel334df502008-01-21 13:09:33 +0100889 cr0 |= X86_CR0_TS;
Joerg Roedel6b390b62008-01-29 13:01:27 +0100890 }
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100891set:
892 /*
893 * re-enable caching here because the QEMU bios
894 * does not do it - this results in some delay at
895 * reboot
896 */
897 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400898 svm->vmcb->save.cr0 = cr0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800899}
900
901static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
902{
Joerg Roedel6394b642008-04-09 14:15:29 +0200903 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
Joerg Roedele5eab0c2008-09-09 19:11:51 +0200904 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
905
906 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
907 force_new_asid(vcpu);
Joerg Roedel6394b642008-04-09 14:15:29 +0200908
Joerg Roedelec077262008-04-09 14:15:28 +0200909 vcpu->arch.cr4 = cr4;
910 if (!npt_enabled)
911 cr4 |= X86_CR4_PAE;
Joerg Roedel6394b642008-04-09 14:15:29 +0200912 cr4 |= host_cr4_mce;
Joerg Roedelec077262008-04-09 14:15:28 +0200913 to_svm(vcpu)->vmcb->save.cr4 = cr4;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800914}
915
916static void svm_set_segment(struct kvm_vcpu *vcpu,
917 struct kvm_segment *var, int seg)
918{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400919 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800920 struct vmcb_seg *s = svm_seg(vcpu, seg);
921
922 s->base = var->base;
923 s->limit = var->limit;
924 s->selector = var->selector;
925 if (var->unusable)
926 s->attrib = 0;
927 else {
928 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
929 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
930 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
931 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
932 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
933 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
934 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
935 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
936 }
937 if (seg == VCPU_SREG_CS)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400938 svm->vmcb->save.cpl
939 = (svm->vmcb->save.cs.attrib
Avi Kivity6aa8b732006-12-10 02:21:36 -0800940 >> SVM_SELECTOR_DPL_SHIFT) & 3;
941
942}
943
Jan Kiszkad0bfb942008-12-15 13:52:10 +0100944static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800945{
Jan Kiszkad0bfb942008-12-15 13:52:10 +0100946 int old_debug = vcpu->guest_debug;
947 struct vcpu_svm *svm = to_svm(vcpu);
948
949 vcpu->guest_debug = dbg->control;
950
951 svm->vmcb->control.intercept_exceptions &=
952 ~((1 << DB_VECTOR) | (1 << BP_VECTOR));
953 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
954 if (vcpu->guest_debug &
955 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
956 svm->vmcb->control.intercept_exceptions |=
957 1 << DB_VECTOR;
958 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
959 svm->vmcb->control.intercept_exceptions |=
960 1 << BP_VECTOR;
961 } else
962 vcpu->guest_debug = 0;
963
Jan Kiszkaae675ef2008-12-15 13:52:10 +0100964 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
965 svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
966 else
967 svm->vmcb->save.dr7 = vcpu->arch.dr7;
968
Jan Kiszkad0bfb942008-12-15 13:52:10 +0100969 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
970 svm->vmcb->save.rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
971 else if (old_debug & KVM_GUESTDBG_SINGLESTEP)
972 svm->vmcb->save.rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
973
974 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800975}
976
Eddie Dong2a8067f2007-08-06 16:29:07 +0300977static int svm_get_irq(struct kvm_vcpu *vcpu)
978{
979 struct vcpu_svm *svm = to_svm(vcpu);
980 u32 exit_int_info = svm->vmcb->control.exit_int_info;
981
982 if (is_external_interrupt(exit_int_info))
983 return exit_int_info & SVM_EVTINJ_VEC_MASK;
984 return -1;
985}
986
Avi Kivity6aa8b732006-12-10 02:21:36 -0800987static void load_host_msrs(struct kvm_vcpu *vcpu)
988{
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300989#ifdef CONFIG_X86_64
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400990 wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300991#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -0800992}
993
994static void save_host_msrs(struct kvm_vcpu *vcpu)
995{
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300996#ifdef CONFIG_X86_64
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400997 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300998#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -0800999}
1000
Rusty Russelle756fc62007-07-30 20:07:08 +10001001static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001002{
1003 if (svm_data->next_asid > svm_data->max_asid) {
1004 ++svm_data->asid_generation;
1005 svm_data->next_asid = 1;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001006 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001007 }
1008
Rusty Russelle756fc62007-07-30 20:07:08 +10001009 svm->vcpu.cpu = svm_data->cpu;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001010 svm->asid_generation = svm_data->asid_generation;
1011 svm->vmcb->control.asid = svm_data->next_asid++;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001012}
1013
Avi Kivity6aa8b732006-12-10 02:21:36 -08001014static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
1015{
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001016 struct vcpu_svm *svm = to_svm(vcpu);
1017 unsigned long val;
1018
1019 switch (dr) {
1020 case 0 ... 3:
1021 val = vcpu->arch.db[dr];
1022 break;
1023 case 6:
1024 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1025 val = vcpu->arch.dr6;
1026 else
1027 val = svm->vmcb->save.dr6;
1028 break;
1029 case 7:
1030 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1031 val = vcpu->arch.dr7;
1032 else
1033 val = svm->vmcb->save.dr7;
1034 break;
1035 default:
1036 val = 0;
1037 }
1038
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001039 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
1040 return val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001041}
1042
1043static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
1044 int *exception)
1045{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001046 struct vcpu_svm *svm = to_svm(vcpu);
1047
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001048 KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)value, handler);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001049
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001050 *exception = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001051
1052 switch (dr) {
1053 case 0 ... 3:
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001054 vcpu->arch.db[dr] = value;
1055 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1056 vcpu->arch.eff_db[dr] = value;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001057 return;
1058 case 4 ... 5:
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001059 if (vcpu->arch.cr4 & X86_CR4_DE)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001060 *exception = UD_VECTOR;
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001061 return;
1062 case 6:
1063 if (value & 0xffffffff00000000ULL) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001064 *exception = GP_VECTOR;
1065 return;
1066 }
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001067 vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001068 return;
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001069 case 7:
1070 if (value & 0xffffffff00000000ULL) {
1071 *exception = GP_VECTOR;
1072 return;
1073 }
1074 vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
1075 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1076 svm->vmcb->save.dr7 = vcpu->arch.dr7;
1077 vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
1078 }
1079 return;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001080 default:
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001081 /* FIXME: Possible case? */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001082 printk(KERN_DEBUG "%s: unexpected dr %u\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001083 __func__, dr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001084 *exception = UD_VECTOR;
1085 return;
1086 }
1087}
1088
Rusty Russelle756fc62007-07-30 20:07:08 +10001089static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001090{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001091 u32 exit_int_info = svm->vmcb->control.exit_int_info;
Rusty Russelle756fc62007-07-30 20:07:08 +10001092 struct kvm *kvm = svm->vcpu.kvm;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001093 u64 fault_address;
1094 u32 error_code;
Avi Kivity577bdc42008-07-19 08:57:05 +03001095 bool event_injection = false;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001096
Eddie Dong85f455f2007-07-06 12:20:49 +03001097 if (!irqchip_in_kernel(kvm) &&
Avi Kivity577bdc42008-07-19 08:57:05 +03001098 is_external_interrupt(exit_int_info)) {
1099 event_injection = true;
Rusty Russelle756fc62007-07-30 20:07:08 +10001100 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
Avi Kivity577bdc42008-07-19 08:57:05 +03001101 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001102
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001103 fault_address = svm->vmcb->control.exit_info_2;
1104 error_code = svm->vmcb->control.exit_info_1;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001105
1106 if (!npt_enabled)
1107 KVMTRACE_3D(PAGE_FAULT, &svm->vcpu, error_code,
1108 (u32)fault_address, (u32)(fault_address >> 32),
1109 handler);
Joerg Roedeld2ebb412008-04-30 17:56:04 +02001110 else
1111 KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
1112 (u32)fault_address, (u32)(fault_address >> 32),
1113 handler);
Joerg Roedel44874f82008-08-27 14:18:43 +02001114 /*
1115 * FIXME: Tis shouldn't be necessary here, but there is a flush
1116 * missing in the MMU code. Until we find this bug, flush the
1117 * complete TLB here on an NPF
1118 */
1119 if (npt_enabled)
1120 svm_flush_tlb(&svm->vcpu);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001121
Avi Kivity48d15032008-08-28 18:27:15 +03001122 if (!npt_enabled && event_injection)
Avi Kivity577bdc42008-07-19 08:57:05 +03001123 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
Avi Kivity30677142007-10-28 18:48:59 +02001124 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001125}
1126
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001127static int db_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1128{
1129 if (!(svm->vcpu.guest_debug &
1130 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
1131 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1132 return 1;
1133 }
1134 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1135 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1136 kvm_run->debug.arch.exception = DB_VECTOR;
1137 return 0;
1138}
1139
1140static int bp_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1141{
1142 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1143 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1144 kvm_run->debug.arch.exception = BP_VECTOR;
1145 return 0;
1146}
1147
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001148static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1149{
1150 int er;
1151
Sheng Yang571008d2008-01-02 14:49:22 +08001152 er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001153 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001154 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001155 return 1;
1156}
1157
Rusty Russelle756fc62007-07-30 20:07:08 +10001158static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Anthony Liguori7807fa62007-04-23 09:17:21 -05001159{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001160 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001161 if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001162 svm->vmcb->save.cr0 &= ~X86_CR0_TS;
Rusty Russelle756fc62007-07-30 20:07:08 +10001163 svm->vcpu.fpu_active = 1;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001164
1165 return 1;
Anthony Liguori7807fa62007-04-23 09:17:21 -05001166}
1167
Joerg Roedel53371b52008-04-09 14:15:30 +02001168static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1169{
1170 /*
1171 * On an #MC intercept the MCE handler is not called automatically in
1172 * the host. So do it by hand here.
1173 */
1174 asm volatile (
1175 "int $0x12\n");
1176 /* not sure if we ever come back to this point */
1177
1178 return 1;
1179}
1180
Rusty Russelle756fc62007-07-30 20:07:08 +10001181static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001182{
1183 /*
1184 * VMCB is undefined after a SHUTDOWN intercept
1185 * so reinitialize it.
1186 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001187 clear_page(svm->vmcb);
Joerg Roedele6101a92008-02-13 18:58:45 +01001188 init_vmcb(svm);
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001189
1190 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1191 return 0;
1192}
1193
Rusty Russelle756fc62007-07-30 20:07:08 +10001194static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001195{
Mike Dayd77c26f2007-10-08 09:02:08 -04001196 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
Jan Kiszka34c33d12009-02-08 13:28:15 +01001197 int size, in, string;
Avi Kivity039576c2007-03-20 12:46:50 +02001198 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001199
Rusty Russelle756fc62007-07-30 20:07:08 +10001200 ++svm->vcpu.stat.io_exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001201
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001202 svm->next_rip = svm->vmcb->control.exit_info_2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001203
Laurent Viviere70669a2007-08-05 10:36:40 +03001204 string = (io_info & SVM_IOIO_STR_MASK) != 0;
1205
1206 if (string) {
Laurent Vivier34273182007-09-18 11:27:37 +02001207 if (emulate_instruction(&svm->vcpu,
1208 kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
Laurent Viviere70669a2007-08-05 10:36:40 +03001209 return 0;
1210 return 1;
1211 }
1212
Avi Kivity039576c2007-03-20 12:46:50 +02001213 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1214 port = io_info >> 16;
1215 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001216
Guillaume Thouvenine93f36b2008-10-28 10:51:30 +01001217 skip_emulated_instruction(&svm->vcpu);
Laurent Vivier3090dd72007-08-05 10:43:32 +03001218 return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001219}
1220
Joerg Roedelc47f0982008-04-30 17:56:00 +02001221static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1222{
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001223 KVMTRACE_0D(NMI, &svm->vcpu, handler);
Joerg Roedelc47f0982008-04-30 17:56:00 +02001224 return 1;
1225}
1226
Joerg Roedela0698052008-04-30 17:56:01 +02001227static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1228{
1229 ++svm->vcpu.stat.irq_exits;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001230 KVMTRACE_0D(INTR, &svm->vcpu, handler);
Joerg Roedela0698052008-04-30 17:56:01 +02001231 return 1;
1232}
1233
Rusty Russelle756fc62007-07-30 20:07:08 +10001234static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001235{
1236 return 1;
1237}
1238
Rusty Russelle756fc62007-07-30 20:07:08 +10001239static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001240{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001241 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
Rusty Russelle756fc62007-07-30 20:07:08 +10001242 skip_emulated_instruction(&svm->vcpu);
1243 return kvm_emulate_halt(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001244}
1245
Rusty Russelle756fc62007-07-30 20:07:08 +10001246static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity02e235b2007-02-19 14:37:47 +02001247{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001248 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Rusty Russelle756fc62007-07-30 20:07:08 +10001249 skip_emulated_instruction(&svm->vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001250 kvm_emulate_hypercall(&svm->vcpu);
1251 return 1;
Avi Kivity02e235b2007-02-19 14:37:47 +02001252}
1253
Alexander Grafc0725422008-11-25 20:17:03 +01001254static int nested_svm_check_permissions(struct vcpu_svm *svm)
1255{
1256 if (!(svm->vcpu.arch.shadow_efer & EFER_SVME)
1257 || !is_paging(&svm->vcpu)) {
1258 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1259 return 1;
1260 }
1261
1262 if (svm->vmcb->save.cpl) {
1263 kvm_inject_gp(&svm->vcpu, 0);
1264 return 1;
1265 }
1266
1267 return 0;
1268}
1269
Alexander Grafcf74a782008-11-25 20:17:08 +01001270static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1271 bool has_error_code, u32 error_code)
1272{
1273 if (is_nested(svm)) {
1274 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1275 svm->vmcb->control.exit_code_hi = 0;
1276 svm->vmcb->control.exit_info_1 = error_code;
1277 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1278 if (nested_svm_exit_handled(svm, false)) {
1279 nsvm_printk("VMexit -> EXCP 0x%x\n", nr);
1280
1281 nested_svm_vmexit(svm);
1282 return 1;
1283 }
1284 }
1285
1286 return 0;
1287}
1288
1289static inline int nested_svm_intr(struct vcpu_svm *svm)
1290{
1291 if (is_nested(svm)) {
1292 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1293 return 0;
1294
1295 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
1296 return 0;
1297
1298 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
1299
1300 if (nested_svm_exit_handled(svm, false)) {
1301 nsvm_printk("VMexit -> INTR\n");
1302 nested_svm_vmexit(svm);
1303 return 1;
1304 }
1305 }
1306
1307 return 0;
1308}
1309
Alexander Grafc0725422008-11-25 20:17:03 +01001310static struct page *nested_svm_get_page(struct vcpu_svm *svm, u64 gpa)
1311{
1312 struct page *page;
1313
1314 down_read(&current->mm->mmap_sem);
1315 page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
1316 up_read(&current->mm->mmap_sem);
1317
1318 if (is_error_page(page)) {
1319 printk(KERN_INFO "%s: could not find page at 0x%llx\n",
1320 __func__, gpa);
1321 kvm_release_page_clean(page);
1322 kvm_inject_gp(&svm->vcpu, 0);
1323 return NULL;
1324 }
1325 return page;
1326}
1327
1328static int nested_svm_do(struct vcpu_svm *svm,
1329 u64 arg1_gpa, u64 arg2_gpa, void *opaque,
1330 int (*handler)(struct vcpu_svm *svm,
1331 void *arg1,
1332 void *arg2,
1333 void *opaque))
1334{
1335 struct page *arg1_page;
1336 struct page *arg2_page = NULL;
1337 void *arg1;
1338 void *arg2 = NULL;
1339 int retval;
1340
1341 arg1_page = nested_svm_get_page(svm, arg1_gpa);
1342 if(arg1_page == NULL)
1343 return 1;
1344
1345 if (arg2_gpa) {
1346 arg2_page = nested_svm_get_page(svm, arg2_gpa);
1347 if(arg2_page == NULL) {
1348 kvm_release_page_clean(arg1_page);
1349 return 1;
1350 }
1351 }
1352
1353 arg1 = kmap_atomic(arg1_page, KM_USER0);
1354 if (arg2_gpa)
1355 arg2 = kmap_atomic(arg2_page, KM_USER1);
1356
1357 retval = handler(svm, arg1, arg2, opaque);
1358
1359 kunmap_atomic(arg1, KM_USER0);
1360 if (arg2_gpa)
1361 kunmap_atomic(arg2, KM_USER1);
1362
1363 kvm_release_page_dirty(arg1_page);
1364 if (arg2_gpa)
1365 kvm_release_page_dirty(arg2_page);
1366
1367 return retval;
1368}
1369
Alexander Grafcf74a782008-11-25 20:17:08 +01001370static int nested_svm_exit_handled_real(struct vcpu_svm *svm,
1371 void *arg1,
1372 void *arg2,
1373 void *opaque)
1374{
1375 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1376 bool kvm_overrides = *(bool *)opaque;
1377 u32 exit_code = svm->vmcb->control.exit_code;
1378
1379 if (kvm_overrides) {
1380 switch (exit_code) {
1381 case SVM_EXIT_INTR:
1382 case SVM_EXIT_NMI:
1383 return 0;
1384 /* For now we are always handling NPFs when using them */
1385 case SVM_EXIT_NPF:
1386 if (npt_enabled)
1387 return 0;
1388 break;
1389 /* When we're shadowing, trap PFs */
1390 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
1391 if (!npt_enabled)
1392 return 0;
1393 break;
1394 default:
1395 break;
1396 }
1397 }
1398
1399 switch (exit_code) {
1400 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
1401 u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
1402 if (nested_vmcb->control.intercept_cr_read & cr_bits)
1403 return 1;
1404 break;
1405 }
1406 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
1407 u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
1408 if (nested_vmcb->control.intercept_cr_write & cr_bits)
1409 return 1;
1410 break;
1411 }
1412 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
1413 u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
1414 if (nested_vmcb->control.intercept_dr_read & dr_bits)
1415 return 1;
1416 break;
1417 }
1418 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
1419 u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
1420 if (nested_vmcb->control.intercept_dr_write & dr_bits)
1421 return 1;
1422 break;
1423 }
1424 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1425 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1426 if (nested_vmcb->control.intercept_exceptions & excp_bits)
1427 return 1;
1428 break;
1429 }
1430 default: {
1431 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
1432 nsvm_printk("exit code: 0x%x\n", exit_code);
1433 if (nested_vmcb->control.intercept & exit_bits)
1434 return 1;
1435 }
1436 }
1437
1438 return 0;
1439}
1440
1441static int nested_svm_exit_handled_msr(struct vcpu_svm *svm,
1442 void *arg1, void *arg2,
1443 void *opaque)
1444{
1445 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1446 u8 *msrpm = (u8 *)arg2;
1447 u32 t0, t1;
1448 u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1449 u32 param = svm->vmcb->control.exit_info_1 & 1;
1450
1451 if (!(nested_vmcb->control.intercept & (1ULL << INTERCEPT_MSR_PROT)))
1452 return 0;
1453
1454 switch(msr) {
1455 case 0 ... 0x1fff:
1456 t0 = (msr * 2) % 8;
1457 t1 = msr / 8;
1458 break;
1459 case 0xc0000000 ... 0xc0001fff:
1460 t0 = (8192 + msr - 0xc0000000) * 2;
1461 t1 = (t0 / 8);
1462 t0 %= 8;
1463 break;
1464 case 0xc0010000 ... 0xc0011fff:
1465 t0 = (16384 + msr - 0xc0010000) * 2;
1466 t1 = (t0 / 8);
1467 t0 %= 8;
1468 break;
1469 default:
1470 return 1;
1471 break;
1472 }
1473 if (msrpm[t1] & ((1 << param) << t0))
1474 return 1;
1475
1476 return 0;
1477}
1478
1479static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override)
1480{
1481 bool k = kvm_override;
1482
1483 switch (svm->vmcb->control.exit_code) {
1484 case SVM_EXIT_MSR:
1485 return nested_svm_do(svm, svm->nested_vmcb,
1486 svm->nested_vmcb_msrpm, NULL,
1487 nested_svm_exit_handled_msr);
1488 default: break;
1489 }
1490
1491 return nested_svm_do(svm, svm->nested_vmcb, 0, &k,
1492 nested_svm_exit_handled_real);
1493}
1494
1495static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
1496 void *arg2, void *opaque)
1497{
1498 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1499 struct vmcb *hsave = svm->hsave;
1500 u64 nested_save[] = { nested_vmcb->save.cr0,
1501 nested_vmcb->save.cr3,
1502 nested_vmcb->save.cr4,
1503 nested_vmcb->save.efer,
1504 nested_vmcb->control.intercept_cr_read,
1505 nested_vmcb->control.intercept_cr_write,
1506 nested_vmcb->control.intercept_dr_read,
1507 nested_vmcb->control.intercept_dr_write,
1508 nested_vmcb->control.intercept_exceptions,
1509 nested_vmcb->control.intercept,
1510 nested_vmcb->control.msrpm_base_pa,
1511 nested_vmcb->control.iopm_base_pa,
1512 nested_vmcb->control.tsc_offset };
1513
1514 /* Give the current vmcb to the guest */
1515 memcpy(nested_vmcb, svm->vmcb, sizeof(struct vmcb));
1516 nested_vmcb->save.cr0 = nested_save[0];
1517 if (!npt_enabled)
1518 nested_vmcb->save.cr3 = nested_save[1];
1519 nested_vmcb->save.cr4 = nested_save[2];
1520 nested_vmcb->save.efer = nested_save[3];
1521 nested_vmcb->control.intercept_cr_read = nested_save[4];
1522 nested_vmcb->control.intercept_cr_write = nested_save[5];
1523 nested_vmcb->control.intercept_dr_read = nested_save[6];
1524 nested_vmcb->control.intercept_dr_write = nested_save[7];
1525 nested_vmcb->control.intercept_exceptions = nested_save[8];
1526 nested_vmcb->control.intercept = nested_save[9];
1527 nested_vmcb->control.msrpm_base_pa = nested_save[10];
1528 nested_vmcb->control.iopm_base_pa = nested_save[11];
1529 nested_vmcb->control.tsc_offset = nested_save[12];
1530
1531 /* We always set V_INTR_MASKING and remember the old value in hflags */
1532 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1533 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1534
1535 if ((nested_vmcb->control.int_ctl & V_IRQ_MASK) &&
1536 (nested_vmcb->control.int_vector)) {
1537 nsvm_printk("WARNING: IRQ 0x%x still enabled on #VMEXIT\n",
1538 nested_vmcb->control.int_vector);
1539 }
1540
1541 /* Restore the original control entries */
1542 svm->vmcb->control = hsave->control;
1543
1544 /* Kill any pending exceptions */
1545 if (svm->vcpu.arch.exception.pending == true)
1546 nsvm_printk("WARNING: Pending Exception\n");
1547 svm->vcpu.arch.exception.pending = false;
1548
1549 /* Restore selected save entries */
1550 svm->vmcb->save.es = hsave->save.es;
1551 svm->vmcb->save.cs = hsave->save.cs;
1552 svm->vmcb->save.ss = hsave->save.ss;
1553 svm->vmcb->save.ds = hsave->save.ds;
1554 svm->vmcb->save.gdtr = hsave->save.gdtr;
1555 svm->vmcb->save.idtr = hsave->save.idtr;
1556 svm->vmcb->save.rflags = hsave->save.rflags;
1557 svm_set_efer(&svm->vcpu, hsave->save.efer);
1558 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
1559 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
1560 if (npt_enabled) {
1561 svm->vmcb->save.cr3 = hsave->save.cr3;
1562 svm->vcpu.arch.cr3 = hsave->save.cr3;
1563 } else {
1564 kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
1565 }
1566 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
1567 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
1568 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
1569 svm->vmcb->save.dr7 = 0;
1570 svm->vmcb->save.cpl = 0;
1571 svm->vmcb->control.exit_int_info = 0;
1572
1573 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
1574 /* Exit nested SVM mode */
1575 svm->nested_vmcb = 0;
1576
1577 return 0;
1578}
1579
1580static int nested_svm_vmexit(struct vcpu_svm *svm)
1581{
1582 nsvm_printk("VMexit\n");
1583 if (nested_svm_do(svm, svm->nested_vmcb, 0,
1584 NULL, nested_svm_vmexit_real))
1585 return 1;
1586
1587 kvm_mmu_reset_context(&svm->vcpu);
1588 kvm_mmu_load(&svm->vcpu);
1589
1590 return 0;
1591}
Alexander Graf3d6368e2008-11-25 20:17:07 +01001592
1593static int nested_svm_vmrun_msrpm(struct vcpu_svm *svm, void *arg1,
1594 void *arg2, void *opaque)
1595{
1596 int i;
1597 u32 *nested_msrpm = (u32*)arg1;
1598 for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
1599 svm->nested_msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
1600 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested_msrpm);
1601
1602 return 0;
1603}
1604
1605static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
1606 void *arg2, void *opaque)
1607{
1608 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1609 struct vmcb *hsave = svm->hsave;
1610
1611 /* nested_vmcb is our indicator if nested SVM is activated */
1612 svm->nested_vmcb = svm->vmcb->save.rax;
1613
1614 /* Clear internal status */
1615 svm->vcpu.arch.exception.pending = false;
1616
1617 /* Save the old vmcb, so we don't need to pick what we save, but
1618 can restore everything when a VMEXIT occurs */
1619 memcpy(hsave, svm->vmcb, sizeof(struct vmcb));
1620 /* We need to remember the original CR3 in the SPT case */
1621 if (!npt_enabled)
1622 hsave->save.cr3 = svm->vcpu.arch.cr3;
1623 hsave->save.cr4 = svm->vcpu.arch.cr4;
1624 hsave->save.rip = svm->next_rip;
1625
1626 if (svm->vmcb->save.rflags & X86_EFLAGS_IF)
1627 svm->vcpu.arch.hflags |= HF_HIF_MASK;
1628 else
1629 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
1630
1631 /* Load the nested guest state */
1632 svm->vmcb->save.es = nested_vmcb->save.es;
1633 svm->vmcb->save.cs = nested_vmcb->save.cs;
1634 svm->vmcb->save.ss = nested_vmcb->save.ss;
1635 svm->vmcb->save.ds = nested_vmcb->save.ds;
1636 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
1637 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
1638 svm->vmcb->save.rflags = nested_vmcb->save.rflags;
1639 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
1640 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
1641 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
1642 if (npt_enabled) {
1643 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
1644 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
1645 } else {
1646 kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
1647 kvm_mmu_reset_context(&svm->vcpu);
1648 }
1649 svm->vmcb->save.cr2 = nested_vmcb->save.cr2;
1650 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
1651 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
1652 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
1653 /* In case we don't even reach vcpu_run, the fields are not updated */
1654 svm->vmcb->save.rax = nested_vmcb->save.rax;
1655 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
1656 svm->vmcb->save.rip = nested_vmcb->save.rip;
1657 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
1658 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
1659 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
1660
1661 /* We don't want a nested guest to be more powerful than the guest,
1662 so all intercepts are ORed */
1663 svm->vmcb->control.intercept_cr_read |=
1664 nested_vmcb->control.intercept_cr_read;
1665 svm->vmcb->control.intercept_cr_write |=
1666 nested_vmcb->control.intercept_cr_write;
1667 svm->vmcb->control.intercept_dr_read |=
1668 nested_vmcb->control.intercept_dr_read;
1669 svm->vmcb->control.intercept_dr_write |=
1670 nested_vmcb->control.intercept_dr_write;
1671 svm->vmcb->control.intercept_exceptions |=
1672 nested_vmcb->control.intercept_exceptions;
1673
1674 svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
1675
1676 svm->nested_vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
1677
1678 force_new_asid(&svm->vcpu);
1679 svm->vmcb->control.exit_int_info = nested_vmcb->control.exit_int_info;
1680 svm->vmcb->control.exit_int_info_err = nested_vmcb->control.exit_int_info_err;
1681 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
1682 if (nested_vmcb->control.int_ctl & V_IRQ_MASK) {
1683 nsvm_printk("nSVM Injecting Interrupt: 0x%x\n",
1684 nested_vmcb->control.int_ctl);
1685 }
1686 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
1687 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
1688 else
1689 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
1690
1691 nsvm_printk("nSVM exit_int_info: 0x%x | int_state: 0x%x\n",
1692 nested_vmcb->control.exit_int_info,
1693 nested_vmcb->control.int_state);
1694
1695 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
1696 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
1697 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
1698 if (nested_vmcb->control.event_inj & SVM_EVTINJ_VALID)
1699 nsvm_printk("Injecting Event: 0x%x\n",
1700 nested_vmcb->control.event_inj);
1701 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
1702 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
1703
1704 svm->vcpu.arch.hflags |= HF_GIF_MASK;
1705
1706 return 0;
1707}
1708
Alexander Graf55426752008-11-25 20:17:06 +01001709static int nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
1710{
1711 to_vmcb->save.fs = from_vmcb->save.fs;
1712 to_vmcb->save.gs = from_vmcb->save.gs;
1713 to_vmcb->save.tr = from_vmcb->save.tr;
1714 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
1715 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
1716 to_vmcb->save.star = from_vmcb->save.star;
1717 to_vmcb->save.lstar = from_vmcb->save.lstar;
1718 to_vmcb->save.cstar = from_vmcb->save.cstar;
1719 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
1720 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
1721 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
1722 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
1723
1724 return 1;
1725}
1726
1727static int nested_svm_vmload(struct vcpu_svm *svm, void *nested_vmcb,
1728 void *arg2, void *opaque)
1729{
1730 return nested_svm_vmloadsave((struct vmcb *)nested_vmcb, svm->vmcb);
1731}
1732
1733static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb,
1734 void *arg2, void *opaque)
1735{
1736 return nested_svm_vmloadsave(svm->vmcb, (struct vmcb *)nested_vmcb);
1737}
1738
1739static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1740{
1741 if (nested_svm_check_permissions(svm))
1742 return 1;
1743
1744 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1745 skip_emulated_instruction(&svm->vcpu);
1746
1747 nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmload);
1748
1749 return 1;
1750}
1751
1752static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1753{
1754 if (nested_svm_check_permissions(svm))
1755 return 1;
1756
1757 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1758 skip_emulated_instruction(&svm->vcpu);
1759
1760 nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmsave);
1761
1762 return 1;
1763}
1764
Alexander Graf3d6368e2008-11-25 20:17:07 +01001765static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1766{
1767 nsvm_printk("VMrun\n");
1768 if (nested_svm_check_permissions(svm))
1769 return 1;
1770
1771 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1772 skip_emulated_instruction(&svm->vcpu);
1773
1774 if (nested_svm_do(svm, svm->vmcb->save.rax, 0,
1775 NULL, nested_svm_vmrun))
1776 return 1;
1777
1778 if (nested_svm_do(svm, svm->nested_vmcb_msrpm, 0,
1779 NULL, nested_svm_vmrun_msrpm))
1780 return 1;
1781
1782 return 1;
1783}
1784
Alexander Graf1371d902008-11-25 20:17:04 +01001785static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1786{
1787 if (nested_svm_check_permissions(svm))
1788 return 1;
1789
1790 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1791 skip_emulated_instruction(&svm->vcpu);
1792
1793 svm->vcpu.arch.hflags |= HF_GIF_MASK;
1794
1795 return 1;
1796}
1797
1798static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1799{
1800 if (nested_svm_check_permissions(svm))
1801 return 1;
1802
1803 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1804 skip_emulated_instruction(&svm->vcpu);
1805
1806 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
1807
1808 /* After a CLGI no interrupts should come */
1809 svm_clear_vintr(svm);
1810 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
1811
1812 return 1;
1813}
1814
Rusty Russelle756fc62007-07-30 20:07:08 +10001815static int invalid_op_interception(struct vcpu_svm *svm,
1816 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001817{
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001818 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001819 return 1;
1820}
1821
Rusty Russelle756fc62007-07-30 20:07:08 +10001822static int task_switch_interception(struct vcpu_svm *svm,
1823 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001824{
Izik Eidus37817f22008-03-24 23:14:53 +02001825 u16 tss_selector;
1826
1827 tss_selector = (u16)svm->vmcb->control.exit_info_1;
1828 if (svm->vmcb->control.exit_info_2 &
1829 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
1830 return kvm_task_switch(&svm->vcpu, tss_selector,
1831 TASK_SWITCH_IRET);
1832 if (svm->vmcb->control.exit_info_2 &
1833 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
1834 return kvm_task_switch(&svm->vcpu, tss_selector,
1835 TASK_SWITCH_JMP);
1836 return kvm_task_switch(&svm->vcpu, tss_selector, TASK_SWITCH_CALL);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001837}
1838
Rusty Russelle756fc62007-07-30 20:07:08 +10001839static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001840{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001841 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10001842 kvm_emulate_cpuid(&svm->vcpu);
Avi Kivity06465c52007-02-28 20:46:53 +02001843 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001844}
1845
Marcelo Tosattia7052892008-09-23 13:18:35 -03001846static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1847{
1848 if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE)
1849 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
1850 return 1;
1851}
1852
Rusty Russelle756fc62007-07-30 20:07:08 +10001853static int emulate_on_interception(struct vcpu_svm *svm,
1854 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001855{
Laurent Vivier34273182007-09-18 11:27:37 +02001856 if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE)
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001857 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001858 return 1;
1859}
1860
Joerg Roedel1d075432007-12-06 21:02:25 +01001861static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1862{
1863 emulate_instruction(&svm->vcpu, NULL, 0, 0, 0);
1864 if (irqchip_in_kernel(svm->vcpu.kvm))
1865 return 1;
1866 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
1867 return 0;
1868}
1869
Avi Kivity6aa8b732006-12-10 02:21:36 -08001870static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1871{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001872 struct vcpu_svm *svm = to_svm(vcpu);
1873
Avi Kivity6aa8b732006-12-10 02:21:36 -08001874 switch (ecx) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001875 case MSR_IA32_TIME_STAMP_COUNTER: {
1876 u64 tsc;
1877
1878 rdtscll(tsc);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001879 *data = svm->vmcb->control.tsc_offset + tsc;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001880 break;
1881 }
Avi Kivity0e859ca2006-12-22 01:05:08 -08001882 case MSR_K6_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001883 *data = svm->vmcb->save.star;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001884 break;
Avi Kivity0e859ca2006-12-22 01:05:08 -08001885#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001886 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001887 *data = svm->vmcb->save.lstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001888 break;
1889 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001890 *data = svm->vmcb->save.cstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001891 break;
1892 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001893 *data = svm->vmcb->save.kernel_gs_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001894 break;
1895 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001896 *data = svm->vmcb->save.sfmask;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001897 break;
1898#endif
1899 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001900 *data = svm->vmcb->save.sysenter_cs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001901 break;
1902 case MSR_IA32_SYSENTER_EIP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001903 *data = svm->vmcb->save.sysenter_eip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001904 break;
1905 case MSR_IA32_SYSENTER_ESP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001906 *data = svm->vmcb->save.sysenter_esp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001907 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01001908 /* Nobody will change the following 5 values in the VMCB so
1909 we can safely return them on rdmsr. They will always be 0
1910 until LBRV is implemented. */
1911 case MSR_IA32_DEBUGCTLMSR:
1912 *data = svm->vmcb->save.dbgctl;
1913 break;
1914 case MSR_IA32_LASTBRANCHFROMIP:
1915 *data = svm->vmcb->save.br_from;
1916 break;
1917 case MSR_IA32_LASTBRANCHTOIP:
1918 *data = svm->vmcb->save.br_to;
1919 break;
1920 case MSR_IA32_LASTINTFROMIP:
1921 *data = svm->vmcb->save.last_excp_from;
1922 break;
1923 case MSR_IA32_LASTINTTOIP:
1924 *data = svm->vmcb->save.last_excp_to;
1925 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01001926 case MSR_VM_HSAVE_PA:
1927 *data = svm->hsave_msr;
1928 break;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01001929 case MSR_VM_CR:
1930 *data = 0;
1931 break;
Alexander Grafc8a73f12009-01-05 16:02:47 +01001932 case MSR_IA32_UCODE_REV:
1933 *data = 0x01000065;
1934 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001935 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08001936 return kvm_get_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001937 }
1938 return 0;
1939}
1940
Rusty Russelle756fc62007-07-30 20:07:08 +10001941static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001942{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001943 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Avi Kivity6aa8b732006-12-10 02:21:36 -08001944 u64 data;
1945
Rusty Russelle756fc62007-07-30 20:07:08 +10001946 if (svm_get_msr(&svm->vcpu, ecx, &data))
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02001947 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001948 else {
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001949 KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data,
1950 (u32)(data >> 32), handler);
1951
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001952 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001953 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001954 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10001955 skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001956 }
1957 return 1;
1958}
1959
1960static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1961{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001962 struct vcpu_svm *svm = to_svm(vcpu);
1963
Avi Kivity6aa8b732006-12-10 02:21:36 -08001964 switch (ecx) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001965 case MSR_IA32_TIME_STAMP_COUNTER: {
1966 u64 tsc;
1967
1968 rdtscll(tsc);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001969 svm->vmcb->control.tsc_offset = data - tsc;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001970 break;
1971 }
Avi Kivity0e859ca2006-12-22 01:05:08 -08001972 case MSR_K6_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001973 svm->vmcb->save.star = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001974 break;
Robert P. J. Day49b14f22007-01-29 13:19:50 -08001975#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001976 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001977 svm->vmcb->save.lstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001978 break;
1979 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001980 svm->vmcb->save.cstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001981 break;
1982 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001983 svm->vmcb->save.kernel_gs_base = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001984 break;
1985 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001986 svm->vmcb->save.sfmask = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001987 break;
1988#endif
1989 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001990 svm->vmcb->save.sysenter_cs = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001991 break;
1992 case MSR_IA32_SYSENTER_EIP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001993 svm->vmcb->save.sysenter_eip = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001994 break;
1995 case MSR_IA32_SYSENTER_ESP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001996 svm->vmcb->save.sysenter_esp = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001997 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01001998 case MSR_IA32_DEBUGCTLMSR:
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001999 if (!svm_has(SVM_FEATURE_LBRV)) {
2000 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08002001 __func__, data);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01002002 break;
2003 }
2004 if (data & DEBUGCTL_RESERVED_BITS)
2005 return 1;
2006
2007 svm->vmcb->save.dbgctl = data;
2008 if (data & (1ULL<<0))
2009 svm_enable_lbrv(svm);
2010 else
2011 svm_disable_lbrv(svm);
Joerg Roedela2938c82008-02-13 16:30:28 +01002012 break;
Joerg Roedel62b9aba2007-12-11 15:36:57 +01002013 case MSR_K7_EVNTSEL0:
2014 case MSR_K7_EVNTSEL1:
2015 case MSR_K7_EVNTSEL2:
2016 case MSR_K7_EVNTSEL3:
Chris Lalancette14ae51b2008-05-05 13:05:16 -04002017 case MSR_K7_PERFCTR0:
2018 case MSR_K7_PERFCTR1:
2019 case MSR_K7_PERFCTR2:
2020 case MSR_K7_PERFCTR3:
Joerg Roedel62b9aba2007-12-11 15:36:57 +01002021 /*
Chris Lalancette14ae51b2008-05-05 13:05:16 -04002022 * Just discard all writes to the performance counters; this
2023 * should keep both older linux and windows 64-bit guests
2024 * happy
Joerg Roedel62b9aba2007-12-11 15:36:57 +01002025 */
Chris Lalancette14ae51b2008-05-05 13:05:16 -04002026 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data);
2027
Joerg Roedel62b9aba2007-12-11 15:36:57 +01002028 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01002029 case MSR_VM_HSAVE_PA:
2030 svm->hsave_msr = data;
2031 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002032 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08002033 return kvm_set_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002034 }
2035 return 0;
2036}
2037
Rusty Russelle756fc62007-07-30 20:07:08 +10002038static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002039{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002040 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002041 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002042 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02002043
2044 KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32),
2045 handler);
2046
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002047 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10002048 if (svm_set_msr(&svm->vcpu, ecx, data))
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02002049 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002050 else
Rusty Russelle756fc62007-07-30 20:07:08 +10002051 skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002052 return 1;
2053}
2054
Rusty Russelle756fc62007-07-30 20:07:08 +10002055static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002056{
Rusty Russelle756fc62007-07-30 20:07:08 +10002057 if (svm->vmcb->control.exit_info_1)
2058 return wrmsr_interception(svm, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002059 else
Rusty Russelle756fc62007-07-30 20:07:08 +10002060 return rdmsr_interception(svm, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002061}
2062
Rusty Russelle756fc62007-07-30 20:07:08 +10002063static int interrupt_window_interception(struct vcpu_svm *svm,
Dor Laorc1150d82007-01-05 16:36:24 -08002064 struct kvm_run *kvm_run)
2065{
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02002066 KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler);
2067
Alexander Graff0b85052008-11-25 20:17:01 +01002068 svm_clear_vintr(svm);
Eddie Dong85f455f2007-07-06 12:20:49 +03002069 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
Dor Laorc1150d82007-01-05 16:36:24 -08002070 /*
2071 * If the user space waits to inject interrupts, exit as soon as
2072 * possible
2073 */
2074 if (kvm_run->request_interrupt_window &&
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002075 !svm->vcpu.arch.irq_summary) {
Rusty Russelle756fc62007-07-30 20:07:08 +10002076 ++svm->vcpu.stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08002077 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
2078 return 0;
2079 }
2080
2081 return 1;
2082}
2083
Rusty Russelle756fc62007-07-30 20:07:08 +10002084static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002085 struct kvm_run *kvm_run) = {
2086 [SVM_EXIT_READ_CR0] = emulate_on_interception,
2087 [SVM_EXIT_READ_CR3] = emulate_on_interception,
2088 [SVM_EXIT_READ_CR4] = emulate_on_interception,
Avi Kivity80a81192007-12-06 19:50:00 +02002089 [SVM_EXIT_READ_CR8] = emulate_on_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002090 /* for now: */
2091 [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
2092 [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
2093 [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
Joerg Roedel1d075432007-12-06 21:02:25 +01002094 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002095 [SVM_EXIT_READ_DR0] = emulate_on_interception,
2096 [SVM_EXIT_READ_DR1] = emulate_on_interception,
2097 [SVM_EXIT_READ_DR2] = emulate_on_interception,
2098 [SVM_EXIT_READ_DR3] = emulate_on_interception,
2099 [SVM_EXIT_WRITE_DR0] = emulate_on_interception,
2100 [SVM_EXIT_WRITE_DR1] = emulate_on_interception,
2101 [SVM_EXIT_WRITE_DR2] = emulate_on_interception,
2102 [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
2103 [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
2104 [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002105 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
2106 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002107 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002108 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
Anthony Liguori7807fa62007-04-23 09:17:21 -05002109 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
Joerg Roedel53371b52008-04-09 14:15:30 +02002110 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
Joerg Roedela0698052008-04-30 17:56:01 +02002111 [SVM_EXIT_INTR] = intr_interception,
Joerg Roedelc47f0982008-04-30 17:56:00 +02002112 [SVM_EXIT_NMI] = nmi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002113 [SVM_EXIT_SMI] = nop_on_interception,
2114 [SVM_EXIT_INIT] = nop_on_interception,
Dor Laorc1150d82007-01-05 16:36:24 -08002115 [SVM_EXIT_VINTR] = interrupt_window_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002116 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
2117 [SVM_EXIT_CPUID] = cpuid_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02002118 [SVM_EXIT_INVD] = emulate_on_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002119 [SVM_EXIT_HLT] = halt_interception,
Marcelo Tosattia7052892008-09-23 13:18:35 -03002120 [SVM_EXIT_INVLPG] = invlpg_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002121 [SVM_EXIT_INVLPGA] = invalid_op_interception,
2122 [SVM_EXIT_IOIO] = io_interception,
2123 [SVM_EXIT_MSR] = msr_interception,
2124 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002125 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
Alexander Graf3d6368e2008-11-25 20:17:07 +01002126 [SVM_EXIT_VMRUN] = vmrun_interception,
Avi Kivity02e235b2007-02-19 14:37:47 +02002127 [SVM_EXIT_VMMCALL] = vmmcall_interception,
Alexander Graf55426752008-11-25 20:17:06 +01002128 [SVM_EXIT_VMLOAD] = vmload_interception,
2129 [SVM_EXIT_VMSAVE] = vmsave_interception,
Alexander Graf1371d902008-11-25 20:17:04 +01002130 [SVM_EXIT_STGI] = stgi_interception,
2131 [SVM_EXIT_CLGI] = clgi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002132 [SVM_EXIT_SKINIT] = invalid_op_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02002133 [SVM_EXIT_WBINVD] = emulate_on_interception,
Joerg Roedel916ce232007-03-21 19:47:00 +01002134 [SVM_EXIT_MONITOR] = invalid_op_interception,
2135 [SVM_EXIT_MWAIT] = invalid_op_interception,
Joerg Roedel709ddeb2008-02-07 13:47:45 +01002136 [SVM_EXIT_NPF] = pf_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002137};
2138
Avi Kivity04d2cc72007-09-10 18:10:54 +03002139static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002140{
Avi Kivity04d2cc72007-09-10 18:10:54 +03002141 struct vcpu_svm *svm = to_svm(vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002142 u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002143
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02002144 KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip,
2145 (u32)((u64)svm->vmcb->save.rip >> 32), entryexit);
2146
Alexander Grafcf74a782008-11-25 20:17:08 +01002147 if (is_nested(svm)) {
2148 nsvm_printk("nested handle_exit: 0x%x | 0x%lx | 0x%lx | 0x%lx\n",
2149 exit_code, svm->vmcb->control.exit_info_1,
2150 svm->vmcb->control.exit_info_2, svm->vmcb->save.rip);
2151 if (nested_svm_exit_handled(svm, true)) {
2152 nested_svm_vmexit(svm);
2153 nsvm_printk("-> #VMEXIT\n");
2154 return 1;
2155 }
2156 }
2157
Joerg Roedel709ddeb2008-02-07 13:47:45 +01002158 if (npt_enabled) {
2159 int mmu_reload = 0;
2160 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
2161 svm_set_cr0(vcpu, svm->vmcb->save.cr0);
2162 mmu_reload = 1;
2163 }
2164 vcpu->arch.cr0 = svm->vmcb->save.cr0;
2165 vcpu->arch.cr3 = svm->vmcb->save.cr3;
2166 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
2167 if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
2168 kvm_inject_gp(vcpu, 0);
2169 return 1;
2170 }
2171 }
2172 if (mmu_reload) {
2173 kvm_mmu_reset_context(vcpu);
2174 kvm_mmu_load(vcpu);
2175 }
2176 }
2177
Avi Kivity04d2cc72007-09-10 18:10:54 +03002178 kvm_reput_irq(svm);
2179
2180 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
2181 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2182 kvm_run->fail_entry.hardware_entry_failure_reason
2183 = svm->vmcb->control.exit_code;
2184 return 0;
2185 }
2186
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002187 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
Joerg Roedel709ddeb2008-02-07 13:47:45 +01002188 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
2189 exit_code != SVM_EXIT_NPF)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002190 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
2191 "exit_code 0x%x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08002192 __func__, svm->vmcb->control.exit_int_info,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002193 exit_code);
2194
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +02002195 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
Joe Perches56919c52007-11-12 20:06:51 -08002196 || !svm_exit_handlers[exit_code]) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08002197 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
Avi Kivity364b6252007-04-16 14:28:40 +03002198 kvm_run->hw.hardware_exit_reason = exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002199 return 0;
2200 }
2201
Rusty Russelle756fc62007-07-30 20:07:08 +10002202 return svm_exit_handlers[exit_code](svm, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002203}
2204
2205static void reload_tss(struct kvm_vcpu *vcpu)
2206{
2207 int cpu = raw_smp_processor_id();
2208
2209 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
Mike Dayd77c26f2007-10-08 09:02:08 -04002210 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity6aa8b732006-12-10 02:21:36 -08002211 load_TR_desc();
2212}
2213
Rusty Russelle756fc62007-07-30 20:07:08 +10002214static void pre_svm_run(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002215{
2216 int cpu = raw_smp_processor_id();
2217
2218 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
2219
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002220 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
Rusty Russelle756fc62007-07-30 20:07:08 +10002221 if (svm->vcpu.cpu != cpu ||
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002222 svm->asid_generation != svm_data->asid_generation)
Rusty Russelle756fc62007-07-30 20:07:08 +10002223 new_asid(svm, svm_data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002224}
2225
2226
Eddie Dong85f455f2007-07-06 12:20:49 +03002227static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002228{
2229 struct vmcb_control_area *control;
2230
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02002231 KVMTRACE_1D(INJ_VIRQ, &svm->vcpu, (u32)irq, handler);
2232
Avi Kivityfa89a812008-09-01 15:57:51 +03002233 ++svm->vcpu.stat.irq_injections;
Rusty Russelle756fc62007-07-30 20:07:08 +10002234 control = &svm->vmcb->control;
Eddie Dong85f455f2007-07-06 12:20:49 +03002235 control->int_vector = irq;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002236 control->int_ctl &= ~V_INTR_PRIO_MASK;
2237 control->int_ctl |= V_IRQ_MASK |
2238 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
2239}
2240
Eddie Dong2a8067f2007-08-06 16:29:07 +03002241static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
2242{
2243 struct vcpu_svm *svm = to_svm(vcpu);
2244
Alexander Grafcf74a782008-11-25 20:17:08 +01002245 nested_svm_intr(svm);
2246
Eddie Dong2a8067f2007-08-06 16:29:07 +03002247 svm_inject_irq(svm, irq);
2248}
2249
Joerg Roedelaaacfc92008-04-16 16:51:18 +02002250static void update_cr8_intercept(struct kvm_vcpu *vcpu)
2251{
2252 struct vcpu_svm *svm = to_svm(vcpu);
2253 struct vmcb *vmcb = svm->vmcb;
2254 int max_irr, tpr;
2255
2256 if (!irqchip_in_kernel(vcpu->kvm) || vcpu->arch.apic->vapic_addr)
2257 return;
2258
2259 vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
2260
2261 max_irr = kvm_lapic_find_highest_irr(vcpu);
2262 if (max_irr == -1)
2263 return;
2264
2265 tpr = kvm_lapic_get_cr8(vcpu) << 4;
2266
2267 if (tpr >= (max_irr & 0xf0))
2268 vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
2269}
2270
Avi Kivity04d2cc72007-09-10 18:10:54 +03002271static void svm_intr_assist(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002272{
Avi Kivity04d2cc72007-09-10 18:10:54 +03002273 struct vcpu_svm *svm = to_svm(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03002274 struct vmcb *vmcb = svm->vmcb;
2275 int intr_vector = -1;
2276
2277 if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) &&
2278 ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) {
2279 intr_vector = vmcb->control.exit_int_info &
2280 SVM_EVTINJ_VEC_MASK;
2281 vmcb->control.exit_int_info = 0;
2282 svm_inject_irq(svm, intr_vector);
Joerg Roedelaaacfc92008-04-16 16:51:18 +02002283 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03002284 }
2285
2286 if (vmcb->control.int_ctl & V_IRQ_MASK)
Joerg Roedelaaacfc92008-04-16 16:51:18 +02002287 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03002288
Eddie Dong1b9778d2007-09-03 16:56:58 +03002289 if (!kvm_cpu_has_interrupt(vcpu))
Joerg Roedelaaacfc92008-04-16 16:51:18 +02002290 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03002291
Alexander Grafcf74a782008-11-25 20:17:08 +01002292 if (nested_svm_intr(svm))
2293 goto out;
2294
Alexander Graf1371d902008-11-25 20:17:04 +01002295 if (!(svm->vcpu.arch.hflags & HF_GIF_MASK))
2296 goto out;
2297
Eddie Dong85f455f2007-07-06 12:20:49 +03002298 if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
2299 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
2300 (vmcb->control.event_inj & SVM_EVTINJ_VALID)) {
2301 /* unable to deliver irq, set pending irq */
Alexander Graff0b85052008-11-25 20:17:01 +01002302 svm_set_vintr(svm);
Eddie Dong85f455f2007-07-06 12:20:49 +03002303 svm_inject_irq(svm, 0x0);
Joerg Roedelaaacfc92008-04-16 16:51:18 +02002304 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03002305 }
2306 /* Okay, we can deliver the interrupt: grab it and update PIC state. */
Eddie Dong1b9778d2007-09-03 16:56:58 +03002307 intr_vector = kvm_cpu_get_interrupt(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03002308 svm_inject_irq(svm, intr_vector);
Joerg Roedelaaacfc92008-04-16 16:51:18 +02002309out:
2310 update_cr8_intercept(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03002311}
2312
2313static void kvm_reput_irq(struct vcpu_svm *svm)
2314{
Rusty Russelle756fc62007-07-30 20:07:08 +10002315 struct vmcb_control_area *control = &svm->vmcb->control;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002316
Eddie Dong7017fc32007-07-18 11:34:57 +03002317 if ((control->int_ctl & V_IRQ_MASK)
2318 && !irqchip_in_kernel(svm->vcpu.kvm)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08002319 control->int_ctl &= ~V_IRQ_MASK;
Rusty Russelle756fc62007-07-30 20:07:08 +10002320 push_irq(&svm->vcpu, control->int_vector);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002321 }
Dor Laorc1150d82007-01-05 16:36:24 -08002322
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002323 svm->vcpu.arch.interrupt_window_open =
Alexander Graf1371d902008-11-25 20:17:04 +01002324 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
2325 (svm->vcpu.arch.hflags & HF_GIF_MASK);
Dor Laorc1150d82007-01-05 16:36:24 -08002326}
2327
Eddie Dong85f455f2007-07-06 12:20:49 +03002328static void svm_do_inject_vector(struct vcpu_svm *svm)
2329{
2330 struct kvm_vcpu *vcpu = &svm->vcpu;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002331 int word_index = __ffs(vcpu->arch.irq_summary);
2332 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
Eddie Dong85f455f2007-07-06 12:20:49 +03002333 int irq = word_index * BITS_PER_LONG + bit_index;
2334
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002335 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
2336 if (!vcpu->arch.irq_pending[word_index])
2337 clear_bit(word_index, &vcpu->arch.irq_summary);
Eddie Dong85f455f2007-07-06 12:20:49 +03002338 svm_inject_irq(svm, irq);
2339}
2340
Avi Kivity04d2cc72007-09-10 18:10:54 +03002341static void do_interrupt_requests(struct kvm_vcpu *vcpu,
Dor Laorc1150d82007-01-05 16:36:24 -08002342 struct kvm_run *kvm_run)
2343{
Avi Kivity04d2cc72007-09-10 18:10:54 +03002344 struct vcpu_svm *svm = to_svm(vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002345 struct vmcb_control_area *control = &svm->vmcb->control;
Dor Laorc1150d82007-01-05 16:36:24 -08002346
Alexander Grafcf74a782008-11-25 20:17:08 +01002347 if (nested_svm_intr(svm))
2348 return;
2349
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002350 svm->vcpu.arch.interrupt_window_open =
Dor Laorc1150d82007-01-05 16:36:24 -08002351 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
Alexander Graf1371d902008-11-25 20:17:04 +01002352 (svm->vmcb->save.rflags & X86_EFLAGS_IF) &&
2353 (svm->vcpu.arch.hflags & HF_GIF_MASK));
Dor Laorc1150d82007-01-05 16:36:24 -08002354
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002355 if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary)
Dor Laorc1150d82007-01-05 16:36:24 -08002356 /*
2357 * If interrupts enabled, and not blocked by sti or mov ss. Good.
2358 */
Eddie Dong85f455f2007-07-06 12:20:49 +03002359 svm_do_inject_vector(svm);
Dor Laorc1150d82007-01-05 16:36:24 -08002360
2361 /*
2362 * Interrupts blocked. Wait for unblock.
2363 */
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002364 if (!svm->vcpu.arch.interrupt_window_open &&
2365 (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window))
Alexander Graff0b85052008-11-25 20:17:01 +01002366 svm_set_vintr(svm);
2367 else
2368 svm_clear_vintr(svm);
Dor Laorc1150d82007-01-05 16:36:24 -08002369}
2370
Izik Eiduscbc94022007-10-25 00:29:55 +02002371static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
2372{
2373 return 0;
2374}
2375
Avi Kivityd9e368d2007-06-07 19:18:30 +03002376static void svm_flush_tlb(struct kvm_vcpu *vcpu)
2377{
2378 force_new_asid(vcpu);
2379}
2380
Avi Kivity04d2cc72007-09-10 18:10:54 +03002381static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
2382{
2383}
2384
Joerg Roedeld7bf8222008-04-16 16:51:17 +02002385static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
2386{
2387 struct vcpu_svm *svm = to_svm(vcpu);
2388
2389 if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
2390 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
2391 kvm_lapic_set_tpr(vcpu, cr8);
2392 }
2393}
2394
Joerg Roedel649d6862008-04-16 16:51:15 +02002395static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
2396{
2397 struct vcpu_svm *svm = to_svm(vcpu);
2398 u64 cr8;
2399
2400 if (!irqchip_in_kernel(vcpu->kvm))
2401 return;
2402
2403 cr8 = kvm_get_cr8(vcpu);
2404 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
2405 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
2406}
2407
Avi Kivity80e31d42008-07-14 14:44:59 +03002408#ifdef CONFIG_X86_64
2409#define R "r"
2410#else
2411#define R "e"
2412#endif
2413
Avi Kivity04d2cc72007-09-10 18:10:54 +03002414static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002415{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002416 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002417 u16 fs_selector;
2418 u16 gs_selector;
2419 u16 ldt_selector;
Avi Kivityd9e368d2007-06-07 19:18:30 +03002420
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002421 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
2422 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
2423 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
2424
Rusty Russelle756fc62007-07-30 20:07:08 +10002425 pre_svm_run(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002426
Joerg Roedel649d6862008-04-16 16:51:15 +02002427 sync_lapic_to_cr8(vcpu);
2428
Avi Kivity6aa8b732006-12-10 02:21:36 -08002429 save_host_msrs(vcpu);
Avi Kivityd6e88ae2008-07-10 16:53:33 +03002430 fs_selector = kvm_read_fs();
2431 gs_selector = kvm_read_gs();
2432 ldt_selector = kvm_read_ldt();
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002433 svm->host_cr2 = kvm_read_cr2();
Alexander Graf3d6368e2008-11-25 20:17:07 +01002434 if (!is_nested(svm))
2435 svm->vmcb->save.cr2 = vcpu->arch.cr2;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01002436 /* required for live migration with NPT */
2437 if (npt_enabled)
2438 svm->vmcb->save.cr3 = vcpu->arch.cr3;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002439
Avi Kivity04d2cc72007-09-10 18:10:54 +03002440 clgi();
2441
2442 local_irq_enable();
Avi Kivity36241b82006-12-22 01:05:20 -08002443
Avi Kivity6aa8b732006-12-10 02:21:36 -08002444 asm volatile (
Avi Kivity80e31d42008-07-14 14:44:59 +03002445 "push %%"R"bp; \n\t"
2446 "mov %c[rbx](%[svm]), %%"R"bx \n\t"
2447 "mov %c[rcx](%[svm]), %%"R"cx \n\t"
2448 "mov %c[rdx](%[svm]), %%"R"dx \n\t"
2449 "mov %c[rsi](%[svm]), %%"R"si \n\t"
2450 "mov %c[rdi](%[svm]), %%"R"di \n\t"
2451 "mov %c[rbp](%[svm]), %%"R"bp \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002452#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002453 "mov %c[r8](%[svm]), %%r8 \n\t"
2454 "mov %c[r9](%[svm]), %%r9 \n\t"
2455 "mov %c[r10](%[svm]), %%r10 \n\t"
2456 "mov %c[r11](%[svm]), %%r11 \n\t"
2457 "mov %c[r12](%[svm]), %%r12 \n\t"
2458 "mov %c[r13](%[svm]), %%r13 \n\t"
2459 "mov %c[r14](%[svm]), %%r14 \n\t"
2460 "mov %c[r15](%[svm]), %%r15 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002461#endif
2462
Avi Kivity6aa8b732006-12-10 02:21:36 -08002463 /* Enter guest mode */
Avi Kivity80e31d42008-07-14 14:44:59 +03002464 "push %%"R"ax \n\t"
2465 "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +03002466 __ex(SVM_VMLOAD) "\n\t"
2467 __ex(SVM_VMRUN) "\n\t"
2468 __ex(SVM_VMSAVE) "\n\t"
Avi Kivity80e31d42008-07-14 14:44:59 +03002469 "pop %%"R"ax \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002470
2471 /* Save guest registers, load host registers */
Avi Kivity80e31d42008-07-14 14:44:59 +03002472 "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
2473 "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
2474 "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
2475 "mov %%"R"si, %c[rsi](%[svm]) \n\t"
2476 "mov %%"R"di, %c[rdi](%[svm]) \n\t"
2477 "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002478#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002479 "mov %%r8, %c[r8](%[svm]) \n\t"
2480 "mov %%r9, %c[r9](%[svm]) \n\t"
2481 "mov %%r10, %c[r10](%[svm]) \n\t"
2482 "mov %%r11, %c[r11](%[svm]) \n\t"
2483 "mov %%r12, %c[r12](%[svm]) \n\t"
2484 "mov %%r13, %c[r13](%[svm]) \n\t"
2485 "mov %%r14, %c[r14](%[svm]) \n\t"
2486 "mov %%r15, %c[r15](%[svm]) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002487#endif
Avi Kivity80e31d42008-07-14 14:44:59 +03002488 "pop %%"R"bp"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002489 :
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002490 : [svm]"a"(svm),
Avi Kivity6aa8b732006-12-10 02:21:36 -08002491 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002492 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
2493 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
2494 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
2495 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
2496 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
2497 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002498#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002499 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
2500 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
2501 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
2502 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
2503 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
2504 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
2505 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
2506 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002507#endif
Laurent Vivier54a08c02007-10-25 14:18:53 +02002508 : "cc", "memory"
Avi Kivity80e31d42008-07-14 14:44:59 +03002509 , R"bx", R"cx", R"dx", R"si", R"di"
Laurent Vivier54a08c02007-10-25 14:18:53 +02002510#ifdef CONFIG_X86_64
Laurent Vivier54a08c02007-10-25 14:18:53 +02002511 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
2512#endif
2513 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08002514
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002515 vcpu->arch.cr2 = svm->vmcb->save.cr2;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002516 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
2517 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
2518 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002519
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002520 kvm_write_cr2(svm->host_cr2);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002521
Avi Kivityd6e88ae2008-07-10 16:53:33 +03002522 kvm_load_fs(fs_selector);
2523 kvm_load_gs(gs_selector);
2524 kvm_load_ldt(ldt_selector);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002525 load_host_msrs(vcpu);
2526
2527 reload_tss(vcpu);
2528
Avi Kivity56ba47d2007-11-07 17:14:18 +02002529 local_irq_disable();
2530
2531 stgi();
2532
Joerg Roedeld7bf8222008-04-16 16:51:17 +02002533 sync_cr8_to_lapic(vcpu);
2534
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002535 svm->next_rip = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002536}
2537
Avi Kivity80e31d42008-07-14 14:44:59 +03002538#undef R
2539
Avi Kivity6aa8b732006-12-10 02:21:36 -08002540static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
2541{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002542 struct vcpu_svm *svm = to_svm(vcpu);
2543
Joerg Roedel709ddeb2008-02-07 13:47:45 +01002544 if (npt_enabled) {
2545 svm->vmcb->control.nested_cr3 = root;
2546 force_new_asid(vcpu);
2547 return;
2548 }
2549
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002550 svm->vmcb->save.cr3 = root;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002551 force_new_asid(vcpu);
Anthony Liguori7807fa62007-04-23 09:17:21 -05002552
2553 if (vcpu->fpu_active) {
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002554 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
2555 svm->vmcb->save.cr0 |= X86_CR0_TS;
Anthony Liguori7807fa62007-04-23 09:17:21 -05002556 vcpu->fpu_active = 0;
2557 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08002558}
2559
Avi Kivity6aa8b732006-12-10 02:21:36 -08002560static int is_disabled(void)
2561{
Joerg Roedel6031a612007-06-22 12:29:50 +03002562 u64 vm_cr;
2563
2564 rdmsrl(MSR_VM_CR, vm_cr);
2565 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
2566 return 1;
2567
Avi Kivity6aa8b732006-12-10 02:21:36 -08002568 return 0;
2569}
2570
Ingo Molnar102d8322007-02-19 14:37:47 +02002571static void
2572svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
2573{
2574 /*
2575 * Patch in the VMMCALL instruction:
2576 */
2577 hypercall[0] = 0x0f;
2578 hypercall[1] = 0x01;
2579 hypercall[2] = 0xd9;
Ingo Molnar102d8322007-02-19 14:37:47 +02002580}
2581
Yang, Sheng002c7f72007-07-31 14:23:01 +03002582static void svm_check_processor_compat(void *rtn)
2583{
2584 *(int *)rtn = 0;
2585}
2586
Avi Kivity774ead32007-12-26 13:57:04 +02002587static bool svm_cpu_has_accelerated_tpr(void)
2588{
2589 return false;
2590}
2591
Sheng Yang67253af2008-04-25 10:20:22 +08002592static int get_npt_level(void)
2593{
2594#ifdef CONFIG_X86_64
2595 return PT64_ROOT_LEVEL;
2596#else
2597 return PT32E_ROOT_LEVEL;
2598#endif
2599}
2600
Sheng Yang64d4d522008-10-09 16:01:57 +08002601static int svm_get_mt_mask_shift(void)
2602{
2603 return 0;
2604}
2605
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002606static struct kvm_x86_ops svm_x86_ops = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08002607 .cpu_has_kvm_support = has_svm,
2608 .disabled_by_bios = is_disabled,
2609 .hardware_setup = svm_hardware_setup,
2610 .hardware_unsetup = svm_hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03002611 .check_processor_compatibility = svm_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002612 .hardware_enable = svm_hardware_enable,
2613 .hardware_disable = svm_hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02002614 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002615
2616 .vcpu_create = svm_create_vcpu,
2617 .vcpu_free = svm_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03002618 .vcpu_reset = svm_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002619
Avi Kivity04d2cc72007-09-10 18:10:54 +03002620 .prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002621 .vcpu_load = svm_vcpu_load,
2622 .vcpu_put = svm_vcpu_put,
2623
2624 .set_guest_debug = svm_guest_debug,
2625 .get_msr = svm_get_msr,
2626 .set_msr = svm_set_msr,
2627 .get_segment_base = svm_get_segment_base,
2628 .get_segment = svm_get_segment,
2629 .set_segment = svm_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02002630 .get_cpl = svm_get_cpl,
Rusty Russell1747fb72007-09-06 01:21:32 +10002631 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
Anthony Liguori25c4c272007-04-27 09:29:21 +03002632 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002633 .set_cr0 = svm_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002634 .set_cr3 = svm_set_cr3,
2635 .set_cr4 = svm_set_cr4,
2636 .set_efer = svm_set_efer,
2637 .get_idt = svm_get_idt,
2638 .set_idt = svm_set_idt,
2639 .get_gdt = svm_get_gdt,
2640 .set_gdt = svm_set_gdt,
2641 .get_dr = svm_get_dr,
2642 .set_dr = svm_set_dr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002643 .get_rflags = svm_get_rflags,
2644 .set_rflags = svm_set_rflags,
2645
Avi Kivity6aa8b732006-12-10 02:21:36 -08002646 .tlb_flush = svm_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002647
Avi Kivity6aa8b732006-12-10 02:21:36 -08002648 .run = svm_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03002649 .handle_exit = handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002650 .skip_emulated_instruction = skip_emulated_instruction,
Ingo Molnar102d8322007-02-19 14:37:47 +02002651 .patch_hypercall = svm_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03002652 .get_irq = svm_get_irq,
2653 .set_irq = svm_set_irq,
Avi Kivity298101d2007-11-25 13:41:11 +02002654 .queue_exception = svm_queue_exception,
2655 .exception_injected = svm_exception_injected,
Avi Kivity04d2cc72007-09-10 18:10:54 +03002656 .inject_pending_irq = svm_intr_assist,
2657 .inject_pending_vectors = do_interrupt_requests,
Izik Eiduscbc94022007-10-25 00:29:55 +02002658
2659 .set_tss_addr = svm_set_tss_addr,
Sheng Yang67253af2008-04-25 10:20:22 +08002660 .get_tdp_level = get_npt_level,
Sheng Yang64d4d522008-10-09 16:01:57 +08002661 .get_mt_mask_shift = svm_get_mt_mask_shift,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002662};
2663
2664static int __init svm_init(void)
2665{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08002666 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
Rusty Russellc16f8622007-07-30 21:12:19 +10002667 THIS_MODULE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002668}
2669
2670static void __exit svm_exit(void)
2671{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08002672 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08002673}
2674
2675module_init(svm_init)
2676module_exit(svm_exit)