blob: 743aebd7bfcc93e1dfc3e9f86ec528540f9ef155 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 *
8 * Authors:
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
14 *
15 */
Avi Kivityedf88412007-12-16 11:02:48 +020016#include <linux/kvm_host.h>
17
Avi Kivitye4956062007-06-28 14:15:57 -040018#include "kvm_svm.h"
Eddie Dong85f455f2007-07-06 12:20:49 +030019#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080020#include "mmu.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030021#include "kvm_cache_regs.h"
Avi Kivitye4956062007-06-28 14:15:57 -040022
Avi Kivity6aa8b732006-12-10 02:21:36 -080023#include <linux/module.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020024#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/vmalloc.h>
26#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040027#include <linux/sched.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028
Avi Kivitye4956062007-06-28 14:15:57 -040029#include <asm/desc.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080030
Avi Kivity4ecac3f2008-05-13 13:23:38 +030031#define __ex(x) __kvm_handle_fault_on_reboot(x)
32
Avi Kivity6aa8b732006-12-10 02:21:36 -080033MODULE_AUTHOR("Qumranet");
34MODULE_LICENSE("GPL");
35
36#define IOPM_ALLOC_ORDER 2
37#define MSRPM_ALLOC_ORDER 1
38
Avi Kivity6aa8b732006-12-10 02:21:36 -080039#define DR7_GD_MASK (1 << 13)
40#define DR6_BD_MASK (1 << 13)
Avi Kivity6aa8b732006-12-10 02:21:36 -080041
42#define SEG_TYPE_LDT 2
43#define SEG_TYPE_BUSY_TSS16 3
44
Joerg Roedel80b77062007-03-30 17:02:14 +030045#define SVM_FEATURE_NPT (1 << 0)
46#define SVM_FEATURE_LBRV (1 << 1)
Amit Shah94c935a12008-08-18 13:11:46 +030047#define SVM_FEATURE_SVML (1 << 2)
Joerg Roedel80b77062007-03-30 17:02:14 +030048
Joerg Roedel24e09cb2008-02-13 18:58:47 +010049#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
50
Joerg Roedel709ddeb2008-02-07 13:47:45 +010051/* enable NPT for AMD64 and X86 with PAE */
52#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
53static bool npt_enabled = true;
54#else
Joerg Roedele3da3ac2008-02-07 13:47:39 +010055static bool npt_enabled = false;
Joerg Roedel709ddeb2008-02-07 13:47:45 +010056#endif
Joerg Roedel6c7dac72008-02-07 13:47:40 +010057static int npt = 1;
58
59module_param(npt, int, S_IRUGO);
Joerg Roedele3da3ac2008-02-07 13:47:39 +010060
Avi Kivity04d2cc72007-09-10 18:10:54 +030061static void kvm_reput_irq(struct vcpu_svm *svm);
Joerg Roedel44874f82008-08-27 14:18:43 +020062static void svm_flush_tlb(struct kvm_vcpu *vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +030063
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040064static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
65{
Rusty Russellfb3f0f52007-07-27 17:16:56 +100066 return container_of(vcpu, struct vcpu_svm, vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040067}
68
Harvey Harrison4866d5e2008-02-19 10:32:02 -080069static unsigned long iopm_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -080070
71struct kvm_ldttss_desc {
72 u16 limit0;
73 u16 base0;
74 unsigned base1 : 8, type : 5, dpl : 2, p : 1;
75 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
76 u32 base3;
77 u32 zero1;
78} __attribute__((packed));
79
80struct svm_cpu_data {
81 int cpu;
82
Avi Kivity5008fdf2007-04-02 13:05:50 +030083 u64 asid_generation;
84 u32 max_asid;
85 u32 next_asid;
Avi Kivity6aa8b732006-12-10 02:21:36 -080086 struct kvm_ldttss_desc *tss_desc;
87
88 struct page *save_area;
89};
90
91static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
Joerg Roedel80b77062007-03-30 17:02:14 +030092static uint32_t svm_features;
Avi Kivity6aa8b732006-12-10 02:21:36 -080093
94struct svm_init_data {
95 int cpu;
96 int r;
97};
98
99static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
100
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200101#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800102#define MSRS_RANGE_SIZE 2048
103#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
104
105#define MAX_INST_SIZE 15
106
Joerg Roedel80b77062007-03-30 17:02:14 +0300107static inline u32 svm_has(u32 feat)
108{
109 return svm_features & feat;
110}
111
Avi Kivity6aa8b732006-12-10 02:21:36 -0800112static inline u8 pop_irq(struct kvm_vcpu *vcpu)
113{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800114 int word_index = __ffs(vcpu->arch.irq_summary);
115 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800116 int irq = word_index * BITS_PER_LONG + bit_index;
117
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800118 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
119 if (!vcpu->arch.irq_pending[word_index])
120 clear_bit(word_index, &vcpu->arch.irq_summary);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800121 return irq;
122}
123
124static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
125{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800126 set_bit(irq, vcpu->arch.irq_pending);
127 set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800128}
129
130static inline void clgi(void)
131{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300132 asm volatile (__ex(SVM_CLGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800133}
134
135static inline void stgi(void)
136{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300137 asm volatile (__ex(SVM_STGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800138}
139
140static inline void invlpga(unsigned long addr, u32 asid)
141{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300142 asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800143}
144
145static inline unsigned long kvm_read_cr2(void)
146{
147 unsigned long cr2;
148
149 asm volatile ("mov %%cr2, %0" : "=r" (cr2));
150 return cr2;
151}
152
153static inline void kvm_write_cr2(unsigned long val)
154{
155 asm volatile ("mov %0, %%cr2" :: "r" (val));
156}
157
158static inline unsigned long read_dr6(void)
159{
160 unsigned long dr6;
161
162 asm volatile ("mov %%dr6, %0" : "=r" (dr6));
163 return dr6;
164}
165
166static inline void write_dr6(unsigned long val)
167{
168 asm volatile ("mov %0, %%dr6" :: "r" (val));
169}
170
171static inline unsigned long read_dr7(void)
172{
173 unsigned long dr7;
174
175 asm volatile ("mov %%dr7, %0" : "=r" (dr7));
176 return dr7;
177}
178
179static inline void write_dr7(unsigned long val)
180{
181 asm volatile ("mov %0, %%dr7" :: "r" (val));
182}
183
Avi Kivity6aa8b732006-12-10 02:21:36 -0800184static inline void force_new_asid(struct kvm_vcpu *vcpu)
185{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400186 to_svm(vcpu)->asid_generation--;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800187}
188
189static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
190{
191 force_new_asid(vcpu);
192}
193
194static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
195{
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100196 if (!npt_enabled && !(efer & EFER_LMA))
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600197 efer &= ~EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800198
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400199 to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800200 vcpu->arch.shadow_efer = efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800201}
202
Avi Kivity298101d2007-11-25 13:41:11 +0200203static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
204 bool has_error_code, u32 error_code)
205{
206 struct vcpu_svm *svm = to_svm(vcpu);
207
208 svm->vmcb->control.event_inj = nr
209 | SVM_EVTINJ_VALID
210 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
211 | SVM_EVTINJ_TYPE_EXEPT;
212 svm->vmcb->control.event_inj_err = error_code;
213}
214
215static bool svm_exception_injected(struct kvm_vcpu *vcpu)
216{
217 struct vcpu_svm *svm = to_svm(vcpu);
218
219 return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID);
220}
221
Avi Kivity6aa8b732006-12-10 02:21:36 -0800222static int is_external_interrupt(u32 info)
223{
224 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
225 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
226}
227
228static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
229{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400230 struct vcpu_svm *svm = to_svm(vcpu);
231
232 if (!svm->next_rip) {
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800233 printk(KERN_DEBUG "%s: NOP\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800234 return;
235 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300236 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
237 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
238 __func__, kvm_rip_read(vcpu), svm->next_rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800239
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300240 kvm_rip_write(vcpu, svm->next_rip);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400241 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
Dor Laorc1150d82007-01-05 16:36:24 -0800242
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800243 vcpu->arch.interrupt_window_open = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800244}
245
246static int has_svm(void)
247{
248 uint32_t eax, ebx, ecx, edx;
249
Avi Kivity1e885462006-12-29 16:49:34 -0800250 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800251 printk(KERN_INFO "has_svm: not amd\n");
252 return 0;
253 }
254
255 cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
256 if (eax < SVM_CPUID_FUNC) {
257 printk(KERN_INFO "has_svm: can't execute cpuid_8000000a\n");
258 return 0;
259 }
260
261 cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
262 if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) {
263 printk(KERN_DEBUG "has_svm: svm not available\n");
264 return 0;
265 }
266 return 1;
267}
268
269static void svm_hardware_disable(void *garbage)
270{
Joerg Roedel0da1db752008-07-02 16:02:11 +0200271 uint64_t efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800272
Joerg Roedel0da1db752008-07-02 16:02:11 +0200273 wrmsrl(MSR_VM_HSAVE_PA, 0);
274 rdmsrl(MSR_EFER, efer);
275 wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800276}
277
278static void svm_hardware_enable(void *garbage)
279{
280
281 struct svm_cpu_data *svm_data;
282 uint64_t efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800283 struct desc_ptr gdt_descr;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800284 struct desc_struct *gdt;
285 int me = raw_smp_processor_id();
286
287 if (!has_svm()) {
288 printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
289 return;
290 }
291 svm_data = per_cpu(svm_data, me);
292
293 if (!svm_data) {
294 printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
295 me);
296 return;
297 }
298
299 svm_data->asid_generation = 1;
300 svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
301 svm_data->next_asid = svm_data->max_asid + 1;
302
Mike Dayd77c26f2007-10-08 09:02:08 -0400303 asm volatile ("sgdt %0" : "=m"(gdt_descr));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800304 gdt = (struct desc_struct *)gdt_descr.address;
305 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
306
307 rdmsrl(MSR_EFER, efer);
308 wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK);
309
310 wrmsrl(MSR_VM_HSAVE_PA,
311 page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
312}
313
Joerg Roedel0da1db752008-07-02 16:02:11 +0200314static void svm_cpu_uninit(int cpu)
315{
316 struct svm_cpu_data *svm_data
317 = per_cpu(svm_data, raw_smp_processor_id());
318
319 if (!svm_data)
320 return;
321
322 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
323 __free_page(svm_data->save_area);
324 kfree(svm_data);
325}
326
Avi Kivity6aa8b732006-12-10 02:21:36 -0800327static int svm_cpu_init(int cpu)
328{
329 struct svm_cpu_data *svm_data;
330 int r;
331
332 svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
333 if (!svm_data)
334 return -ENOMEM;
335 svm_data->cpu = cpu;
336 svm_data->save_area = alloc_page(GFP_KERNEL);
337 r = -ENOMEM;
338 if (!svm_data->save_area)
339 goto err_1;
340
341 per_cpu(svm_data, cpu) = svm_data;
342
343 return 0;
344
345err_1:
346 kfree(svm_data);
347 return r;
348
349}
350
Rusty Russellbfc733a2007-07-31 20:42:42 +1000351static void set_msr_interception(u32 *msrpm, unsigned msr,
352 int read, int write)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800353{
354 int i;
355
356 for (i = 0; i < NUM_MSR_MAPS; i++) {
357 if (msr >= msrpm_ranges[i] &&
358 msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
359 u32 msr_offset = (i * MSRS_IN_RANGE + msr -
360 msrpm_ranges[i]) * 2;
361
362 u32 *base = msrpm + (msr_offset / 32);
363 u32 msr_shift = msr_offset % 32;
364 u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
365 *base = (*base & ~(0x3 << msr_shift)) |
366 (mask << msr_shift);
Rusty Russellbfc733a2007-07-31 20:42:42 +1000367 return;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800368 }
369 }
Rusty Russellbfc733a2007-07-31 20:42:42 +1000370 BUG();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800371}
372
Joerg Roedelf65c2292008-02-13 18:58:46 +0100373static void svm_vcpu_init_msrpm(u32 *msrpm)
374{
375 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
376
377#ifdef CONFIG_X86_64
378 set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
379 set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
380 set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
381 set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
382 set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
383 set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
384#endif
385 set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
386 set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
387 set_msr_interception(msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
388 set_msr_interception(msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
389}
390
Joerg Roedel24e09cb2008-02-13 18:58:47 +0100391static void svm_enable_lbrv(struct vcpu_svm *svm)
392{
393 u32 *msrpm = svm->msrpm;
394
395 svm->vmcb->control.lbr_ctl = 1;
396 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
397 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
398 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
399 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
400}
401
402static void svm_disable_lbrv(struct vcpu_svm *svm)
403{
404 u32 *msrpm = svm->msrpm;
405
406 svm->vmcb->control.lbr_ctl = 0;
407 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
408 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
409 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
410 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
411}
412
Avi Kivity6aa8b732006-12-10 02:21:36 -0800413static __init int svm_hardware_setup(void)
414{
415 int cpu;
416 struct page *iopm_pages;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100417 void *iopm_va;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800418 int r;
419
Avi Kivity6aa8b732006-12-10 02:21:36 -0800420 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
421
422 if (!iopm_pages)
423 return -ENOMEM;
Anthony Liguoric8681332007-04-30 09:48:11 +0300424
425 iopm_va = page_address(iopm_pages);
426 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
427 clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800428 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
429
Joerg Roedel50a37eb2008-01-31 14:57:38 +0100430 if (boot_cpu_has(X86_FEATURE_NX))
431 kvm_enable_efer_bits(EFER_NX);
432
Avi Kivity6aa8b732006-12-10 02:21:36 -0800433 for_each_online_cpu(cpu) {
434 r = svm_cpu_init(cpu);
435 if (r)
Joerg Roedelf65c2292008-02-13 18:58:46 +0100436 goto err;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800437 }
Joerg Roedel33bd6a02008-02-07 13:47:38 +0100438
439 svm_features = cpuid_edx(SVM_CPUID_FUNC);
440
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100441 if (!svm_has(SVM_FEATURE_NPT))
442 npt_enabled = false;
443
Joerg Roedel6c7dac72008-02-07 13:47:40 +0100444 if (npt_enabled && !npt) {
445 printk(KERN_INFO "kvm: Nested Paging disabled\n");
446 npt_enabled = false;
447 }
448
Joerg Roedel18552672008-02-07 13:47:41 +0100449 if (npt_enabled) {
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100450 printk(KERN_INFO "kvm: Nested Paging enabled\n");
Joerg Roedel18552672008-02-07 13:47:41 +0100451 kvm_enable_tdp();
Joerg Roedel5f4cb662008-07-14 20:36:36 +0200452 } else
453 kvm_disable_tdp();
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100454
Avi Kivity6aa8b732006-12-10 02:21:36 -0800455 return 0;
456
Joerg Roedelf65c2292008-02-13 18:58:46 +0100457err:
Avi Kivity6aa8b732006-12-10 02:21:36 -0800458 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
459 iopm_base = 0;
460 return r;
461}
462
463static __exit void svm_hardware_unsetup(void)
464{
Joerg Roedel0da1db752008-07-02 16:02:11 +0200465 int cpu;
466
467 for_each_online_cpu(cpu)
468 svm_cpu_uninit(cpu);
469
Avi Kivity6aa8b732006-12-10 02:21:36 -0800470 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
Joerg Roedelf65c2292008-02-13 18:58:46 +0100471 iopm_base = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800472}
473
474static void init_seg(struct vmcb_seg *seg)
475{
476 seg->selector = 0;
477 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
478 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
479 seg->limit = 0xffff;
480 seg->base = 0;
481}
482
483static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
484{
485 seg->selector = 0;
486 seg->attrib = SVM_SELECTOR_P_MASK | type;
487 seg->limit = 0xffff;
488 seg->base = 0;
489}
490
Joerg Roedele6101a92008-02-13 18:58:45 +0100491static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800492{
Joerg Roedele6101a92008-02-13 18:58:45 +0100493 struct vmcb_control_area *control = &svm->vmcb->control;
494 struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800495
496 control->intercept_cr_read = INTERCEPT_CR0_MASK |
497 INTERCEPT_CR3_MASK |
Joerg Roedel649d6862008-04-16 16:51:15 +0200498 INTERCEPT_CR4_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800499
500 control->intercept_cr_write = INTERCEPT_CR0_MASK |
501 INTERCEPT_CR3_MASK |
Avi Kivity80a81192007-12-06 19:50:00 +0200502 INTERCEPT_CR4_MASK |
503 INTERCEPT_CR8_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800504
505 control->intercept_dr_read = INTERCEPT_DR0_MASK |
506 INTERCEPT_DR1_MASK |
507 INTERCEPT_DR2_MASK |
508 INTERCEPT_DR3_MASK;
509
510 control->intercept_dr_write = INTERCEPT_DR0_MASK |
511 INTERCEPT_DR1_MASK |
512 INTERCEPT_DR2_MASK |
513 INTERCEPT_DR3_MASK |
514 INTERCEPT_DR5_MASK |
515 INTERCEPT_DR7_MASK;
516
Anthony Liguori7aa81cc2007-09-17 14:57:50 -0500517 control->intercept_exceptions = (1 << PF_VECTOR) |
Joerg Roedel53371b52008-04-09 14:15:30 +0200518 (1 << UD_VECTOR) |
519 (1 << MC_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800520
521
522 control->intercept = (1ULL << INTERCEPT_INTR) |
523 (1ULL << INTERCEPT_NMI) |
Joerg Roedel01525272007-02-19 14:37:47 +0200524 (1ULL << INTERCEPT_SMI) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800525 (1ULL << INTERCEPT_CPUID) |
Avi Kivitycf5a94d2007-10-28 16:11:58 +0200526 (1ULL << INTERCEPT_INVD) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800527 (1ULL << INTERCEPT_HLT) |
Marcelo Tosattia7052892008-09-23 13:18:35 -0300528 (1ULL << INTERCEPT_INVLPG) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800529 (1ULL << INTERCEPT_INVLPGA) |
530 (1ULL << INTERCEPT_IOIO_PROT) |
531 (1ULL << INTERCEPT_MSR_PROT) |
532 (1ULL << INTERCEPT_TASK_SWITCH) |
Joerg Roedel46fe4dd2007-01-26 00:56:42 -0800533 (1ULL << INTERCEPT_SHUTDOWN) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800534 (1ULL << INTERCEPT_VMRUN) |
535 (1ULL << INTERCEPT_VMMCALL) |
536 (1ULL << INTERCEPT_VMLOAD) |
537 (1ULL << INTERCEPT_VMSAVE) |
538 (1ULL << INTERCEPT_STGI) |
539 (1ULL << INTERCEPT_CLGI) |
Joerg Roedel916ce232007-03-21 19:47:00 +0100540 (1ULL << INTERCEPT_SKINIT) |
Avi Kivitycf5a94d2007-10-28 16:11:58 +0200541 (1ULL << INTERCEPT_WBINVD) |
Joerg Roedel916ce232007-03-21 19:47:00 +0100542 (1ULL << INTERCEPT_MONITOR) |
543 (1ULL << INTERCEPT_MWAIT);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800544
545 control->iopm_base_pa = iopm_base;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100546 control->msrpm_base_pa = __pa(svm->msrpm);
Avi Kivity0cc50642007-03-25 12:07:27 +0200547 control->tsc_offset = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800548 control->int_ctl = V_INTR_MASKING_MASK;
549
550 init_seg(&save->es);
551 init_seg(&save->ss);
552 init_seg(&save->ds);
553 init_seg(&save->fs);
554 init_seg(&save->gs);
555
556 save->cs.selector = 0xf000;
557 /* Executable/Readable Code Segment */
558 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
559 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
560 save->cs.limit = 0xffff;
Avi Kivityd92899a2007-02-12 00:54:38 -0800561 /*
562 * cs.base should really be 0xffff0000, but vmx can't handle that, so
563 * be consistent with it.
564 *
565 * Replace when we have real mode working for vmx.
566 */
567 save->cs.base = 0xf0000;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800568
569 save->gdtr.limit = 0xffff;
570 save->idtr.limit = 0xffff;
571
572 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
573 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
574
575 save->efer = MSR_EFER_SVME_MASK;
Mike Dayd77c26f2007-10-08 09:02:08 -0400576 save->dr6 = 0xffff0ff0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800577 save->dr7 = 0x400;
578 save->rflags = 2;
579 save->rip = 0x0000fff0;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300580 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800581
582 /*
583 * cr0 val on cpu init should be 0x60000010, we enable cpu
584 * cache by default. the orderly way is to enable cache in bios.
585 */
Rusty Russell707d92fa2007-07-17 23:19:08 +1000586 save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
Rusty Russell66aee912007-07-17 23:34:16 +1000587 save->cr4 = X86_CR4_PAE;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800588 /* rdx = ?? */
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100589
590 if (npt_enabled) {
591 /* Setup VMCB for Nested Paging */
592 control->nested_ctl = 1;
Marcelo Tosattia7052892008-09-23 13:18:35 -0300593 control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
594 (1ULL << INTERCEPT_INVLPG));
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100595 control->intercept_exceptions &= ~(1 << PF_VECTOR);
596 control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK|
597 INTERCEPT_CR3_MASK);
598 control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK|
599 INTERCEPT_CR3_MASK);
600 save->g_pat = 0x0007040600070406ULL;
601 /* enable caching because the QEMU Bios doesn't enable it */
602 save->cr0 = X86_CR0_ET;
603 save->cr3 = 0;
604 save->cr4 = 0;
605 }
Avi Kivitya79d2f12008-04-14 13:10:21 +0300606 force_new_asid(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800607}
608
Avi Kivitye00c8cf2007-10-21 11:00:39 +0200609static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
Avi Kivity04d2cc72007-09-10 18:10:54 +0300610{
611 struct vcpu_svm *svm = to_svm(vcpu);
612
Joerg Roedele6101a92008-02-13 18:58:45 +0100613 init_vmcb(svm);
Avi Kivity70433382007-11-07 12:57:23 +0200614
615 if (vcpu->vcpu_id != 0) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300616 kvm_rip_write(vcpu, 0);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800617 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
618 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
Avi Kivity70433382007-11-07 12:57:23 +0200619 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300620 vcpu->arch.regs_avail = ~0;
621 vcpu->arch.regs_dirty = ~0;
Avi Kivitye00c8cf2007-10-21 11:00:39 +0200622
623 return 0;
Avi Kivity04d2cc72007-09-10 18:10:54 +0300624}
625
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000626static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800627{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400628 struct vcpu_svm *svm;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800629 struct page *page;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100630 struct page *msrpm_pages;
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000631 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800632
Rusty Russellc16f8622007-07-30 21:12:19 +1000633 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000634 if (!svm) {
635 err = -ENOMEM;
636 goto out;
637 }
638
639 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
640 if (err)
641 goto free_svm;
642
Avi Kivity6aa8b732006-12-10 02:21:36 -0800643 page = alloc_page(GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000644 if (!page) {
645 err = -ENOMEM;
646 goto uninit;
647 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800648
Joerg Roedelf65c2292008-02-13 18:58:46 +0100649 err = -ENOMEM;
650 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
651 if (!msrpm_pages)
652 goto uninit;
653 svm->msrpm = page_address(msrpm_pages);
654 svm_vcpu_init_msrpm(svm->msrpm);
655
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400656 svm->vmcb = page_address(page);
657 clear_page(svm->vmcb);
658 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
659 svm->asid_generation = 0;
660 memset(svm->db_regs, 0, sizeof(svm->db_regs));
Joerg Roedele6101a92008-02-13 18:58:45 +0100661 init_vmcb(svm);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400662
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000663 fx_init(&svm->vcpu);
664 svm->vcpu.fpu_active = 1;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800665 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000666 if (svm->vcpu.vcpu_id == 0)
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800667 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800668
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000669 return &svm->vcpu;
Avi Kivity36241b82006-12-22 01:05:20 -0800670
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000671uninit:
672 kvm_vcpu_uninit(&svm->vcpu);
673free_svm:
Rusty Russella4770342007-08-01 14:46:11 +1000674 kmem_cache_free(kvm_vcpu_cache, svm);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000675out:
676 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800677}
678
679static void svm_free_vcpu(struct kvm_vcpu *vcpu)
680{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400681 struct vcpu_svm *svm = to_svm(vcpu);
682
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000683 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
Joerg Roedelf65c2292008-02-13 18:58:46 +0100684 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000685 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +1000686 kmem_cache_free(kvm_vcpu_cache, svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800687}
688
Avi Kivity15ad7142007-07-11 18:17:21 +0300689static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800690{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400691 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +0300692 int i;
Avi Kivity0cc50642007-03-25 12:07:27 +0200693
Avi Kivity0cc50642007-03-25 12:07:27 +0200694 if (unlikely(cpu != vcpu->cpu)) {
695 u64 tsc_this, delta;
696
697 /*
698 * Make sure that the guest sees a monotonically
699 * increasing TSC.
700 */
701 rdtscll(tsc_this);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800702 delta = vcpu->arch.host_tsc - tsc_this;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400703 svm->vmcb->control.tsc_offset += delta;
Avi Kivity0cc50642007-03-25 12:07:27 +0200704 vcpu->cpu = cpu;
Marcelo Tosatti2f599712008-05-27 12:10:20 -0300705 kvm_migrate_timers(vcpu);
Avi Kivity0cc50642007-03-25 12:07:27 +0200706 }
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300707
708 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400709 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800710}
711
712static void svm_vcpu_put(struct kvm_vcpu *vcpu)
713{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400714 struct vcpu_svm *svm = to_svm(vcpu);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300715 int i;
716
Avi Kivitye1beb1d2007-11-18 13:50:24 +0200717 ++vcpu->stat.host_state_reload;
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300718 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400719 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300720
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800721 rdtscll(vcpu->arch.host_tsc);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800722}
723
Avi Kivity6aa8b732006-12-10 02:21:36 -0800724static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
725{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400726 return to_svm(vcpu)->vmcb->save.rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800727}
728
729static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
730{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400731 to_svm(vcpu)->vmcb->save.rflags = rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800732}
733
734static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
735{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400736 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800737
738 switch (seg) {
739 case VCPU_SREG_CS: return &save->cs;
740 case VCPU_SREG_DS: return &save->ds;
741 case VCPU_SREG_ES: return &save->es;
742 case VCPU_SREG_FS: return &save->fs;
743 case VCPU_SREG_GS: return &save->gs;
744 case VCPU_SREG_SS: return &save->ss;
745 case VCPU_SREG_TR: return &save->tr;
746 case VCPU_SREG_LDTR: return &save->ldtr;
747 }
748 BUG();
Al Viro8b6d44c2007-02-09 16:38:40 +0000749 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800750}
751
752static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
753{
754 struct vmcb_seg *s = svm_seg(vcpu, seg);
755
756 return s->base;
757}
758
759static void svm_get_segment(struct kvm_vcpu *vcpu,
760 struct kvm_segment *var, int seg)
761{
762 struct vmcb_seg *s = svm_seg(vcpu, seg);
763
764 var->base = s->base;
765 var->limit = s->limit;
766 var->selector = s->selector;
767 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
768 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
769 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
770 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
771 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
772 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
773 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
774 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
Amit Shah25022ac2008-10-27 09:04:17 +0000775
776 /*
777 * SVM always stores 0 for the 'G' bit in the CS selector in
778 * the VMCB on a VMEXIT. This hurts cross-vendor migration:
779 * Intel's VMENTRY has a check on the 'G' bit.
780 */
781 if (seg == VCPU_SREG_CS)
782 var->g = s->limit > 0xfffff;
783
Amit Shahc0d09822008-10-27 09:04:18 +0000784 /*
785 * Work around a bug where the busy flag in the tr selector
786 * isn't exposed
787 */
788 if (seg == VCPU_SREG_TR)
789 var->type |= 0x2;
790
Avi Kivity6aa8b732006-12-10 02:21:36 -0800791 var->unusable = !var->present;
792}
793
Izik Eidus2e4d2652008-03-24 19:38:34 +0200794static int svm_get_cpl(struct kvm_vcpu *vcpu)
795{
796 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
797
798 return save->cpl;
799}
800
Avi Kivity6aa8b732006-12-10 02:21:36 -0800801static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
802{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400803 struct vcpu_svm *svm = to_svm(vcpu);
804
805 dt->limit = svm->vmcb->save.idtr.limit;
806 dt->base = svm->vmcb->save.idtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800807}
808
809static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
810{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400811 struct vcpu_svm *svm = to_svm(vcpu);
812
813 svm->vmcb->save.idtr.limit = dt->limit;
814 svm->vmcb->save.idtr.base = dt->base ;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800815}
816
817static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
818{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400819 struct vcpu_svm *svm = to_svm(vcpu);
820
821 dt->limit = svm->vmcb->save.gdtr.limit;
822 dt->base = svm->vmcb->save.gdtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800823}
824
825static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
826{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400827 struct vcpu_svm *svm = to_svm(vcpu);
828
829 svm->vmcb->save.gdtr.limit = dt->limit;
830 svm->vmcb->save.gdtr.base = dt->base ;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800831}
832
Anthony Liguori25c4c272007-04-27 09:29:21 +0300833static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -0800834{
835}
836
Avi Kivity6aa8b732006-12-10 02:21:36 -0800837static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
838{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400839 struct vcpu_svm *svm = to_svm(vcpu);
840
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800841#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800842 if (vcpu->arch.shadow_efer & EFER_LME) {
Rusty Russell707d92fa2007-07-17 23:19:08 +1000843 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800844 vcpu->arch.shadow_efer |= EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600845 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800846 }
847
Mike Dayd77c26f2007-10-08 09:02:08 -0400848 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800849 vcpu->arch.shadow_efer &= ~EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600850 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800851 }
852 }
853#endif
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100854 if (npt_enabled)
855 goto set;
856
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800857 if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400858 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
Anthony Liguori7807fa62007-04-23 09:17:21 -0500859 vcpu->fpu_active = 1;
860 }
861
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800862 vcpu->arch.cr0 = cr0;
Rusty Russell707d92fa2007-07-17 23:19:08 +1000863 cr0 |= X86_CR0_PG | X86_CR0_WP;
Joerg Roedel6b390b62008-01-29 13:01:27 +0100864 if (!vcpu->fpu_active) {
865 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
Joerg Roedel334df502008-01-21 13:09:33 +0100866 cr0 |= X86_CR0_TS;
Joerg Roedel6b390b62008-01-29 13:01:27 +0100867 }
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100868set:
869 /*
870 * re-enable caching here because the QEMU bios
871 * does not do it - this results in some delay at
872 * reboot
873 */
874 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400875 svm->vmcb->save.cr0 = cr0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800876}
877
878static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
879{
Joerg Roedel6394b642008-04-09 14:15:29 +0200880 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
Joerg Roedele5eab0c2008-09-09 19:11:51 +0200881 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
882
883 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
884 force_new_asid(vcpu);
Joerg Roedel6394b642008-04-09 14:15:29 +0200885
Joerg Roedelec077262008-04-09 14:15:28 +0200886 vcpu->arch.cr4 = cr4;
887 if (!npt_enabled)
888 cr4 |= X86_CR4_PAE;
Joerg Roedel6394b642008-04-09 14:15:29 +0200889 cr4 |= host_cr4_mce;
Joerg Roedelec077262008-04-09 14:15:28 +0200890 to_svm(vcpu)->vmcb->save.cr4 = cr4;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800891}
892
893static void svm_set_segment(struct kvm_vcpu *vcpu,
894 struct kvm_segment *var, int seg)
895{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400896 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800897 struct vmcb_seg *s = svm_seg(vcpu, seg);
898
899 s->base = var->base;
900 s->limit = var->limit;
901 s->selector = var->selector;
902 if (var->unusable)
903 s->attrib = 0;
904 else {
905 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
906 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
907 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
908 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
909 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
910 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
911 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
912 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
913 }
914 if (seg == VCPU_SREG_CS)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400915 svm->vmcb->save.cpl
916 = (svm->vmcb->save.cs.attrib
Avi Kivity6aa8b732006-12-10 02:21:36 -0800917 >> SVM_SELECTOR_DPL_SHIFT) & 3;
918
919}
920
Avi Kivity6aa8b732006-12-10 02:21:36 -0800921static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
922{
923 return -EOPNOTSUPP;
924}
925
Eddie Dong2a8067f2007-08-06 16:29:07 +0300926static int svm_get_irq(struct kvm_vcpu *vcpu)
927{
928 struct vcpu_svm *svm = to_svm(vcpu);
929 u32 exit_int_info = svm->vmcb->control.exit_int_info;
930
931 if (is_external_interrupt(exit_int_info))
932 return exit_int_info & SVM_EVTINJ_VEC_MASK;
933 return -1;
934}
935
Avi Kivity6aa8b732006-12-10 02:21:36 -0800936static void load_host_msrs(struct kvm_vcpu *vcpu)
937{
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300938#ifdef CONFIG_X86_64
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400939 wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300940#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -0800941}
942
943static void save_host_msrs(struct kvm_vcpu *vcpu)
944{
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300945#ifdef CONFIG_X86_64
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400946 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300947#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -0800948}
949
Rusty Russelle756fc62007-07-30 20:07:08 +1000950static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800951{
952 if (svm_data->next_asid > svm_data->max_asid) {
953 ++svm_data->asid_generation;
954 svm_data->next_asid = 1;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400955 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800956 }
957
Rusty Russelle756fc62007-07-30 20:07:08 +1000958 svm->vcpu.cpu = svm_data->cpu;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400959 svm->asid_generation = svm_data->asid_generation;
960 svm->vmcb->control.asid = svm_data->next_asid++;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800961}
962
Avi Kivity6aa8b732006-12-10 02:21:36 -0800963static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
964{
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +0200965 unsigned long val = to_svm(vcpu)->db_regs[dr];
966 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
967 return val;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800968}
969
970static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
971 int *exception)
972{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400973 struct vcpu_svm *svm = to_svm(vcpu);
974
Avi Kivity6aa8b732006-12-10 02:21:36 -0800975 *exception = 0;
976
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400977 if (svm->vmcb->save.dr7 & DR7_GD_MASK) {
978 svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
979 svm->vmcb->save.dr6 |= DR6_BD_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800980 *exception = DB_VECTOR;
981 return;
982 }
983
984 switch (dr) {
985 case 0 ... 3:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400986 svm->db_regs[dr] = value;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800987 return;
988 case 4 ... 5:
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800989 if (vcpu->arch.cr4 & X86_CR4_DE) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800990 *exception = UD_VECTOR;
991 return;
992 }
993 case 7: {
994 if (value & ~((1ULL << 32) - 1)) {
995 *exception = GP_VECTOR;
996 return;
997 }
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400998 svm->vmcb->save.dr7 = value;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800999 return;
1000 }
1001 default:
1002 printk(KERN_DEBUG "%s: unexpected dr %u\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001003 __func__, dr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001004 *exception = UD_VECTOR;
1005 return;
1006 }
1007}
1008
Rusty Russelle756fc62007-07-30 20:07:08 +10001009static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001010{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001011 u32 exit_int_info = svm->vmcb->control.exit_int_info;
Rusty Russelle756fc62007-07-30 20:07:08 +10001012 struct kvm *kvm = svm->vcpu.kvm;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001013 u64 fault_address;
1014 u32 error_code;
Avi Kivity577bdc42008-07-19 08:57:05 +03001015 bool event_injection = false;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001016
Eddie Dong85f455f2007-07-06 12:20:49 +03001017 if (!irqchip_in_kernel(kvm) &&
Avi Kivity577bdc42008-07-19 08:57:05 +03001018 is_external_interrupt(exit_int_info)) {
1019 event_injection = true;
Rusty Russelle756fc62007-07-30 20:07:08 +10001020 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
Avi Kivity577bdc42008-07-19 08:57:05 +03001021 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001022
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001023 fault_address = svm->vmcb->control.exit_info_2;
1024 error_code = svm->vmcb->control.exit_info_1;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001025
1026 if (!npt_enabled)
1027 KVMTRACE_3D(PAGE_FAULT, &svm->vcpu, error_code,
1028 (u32)fault_address, (u32)(fault_address >> 32),
1029 handler);
Joerg Roedeld2ebb412008-04-30 17:56:04 +02001030 else
1031 KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
1032 (u32)fault_address, (u32)(fault_address >> 32),
1033 handler);
Joerg Roedel44874f82008-08-27 14:18:43 +02001034 /*
1035 * FIXME: Tis shouldn't be necessary here, but there is a flush
1036 * missing in the MMU code. Until we find this bug, flush the
1037 * complete TLB here on an NPF
1038 */
1039 if (npt_enabled)
1040 svm_flush_tlb(&svm->vcpu);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001041
Avi Kivity48d15032008-08-28 18:27:15 +03001042 if (!npt_enabled && event_injection)
Avi Kivity577bdc42008-07-19 08:57:05 +03001043 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
Avi Kivity30677142007-10-28 18:48:59 +02001044 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001045}
1046
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001047static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1048{
1049 int er;
1050
Sheng Yang571008d2008-01-02 14:49:22 +08001051 er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001052 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001053 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001054 return 1;
1055}
1056
Rusty Russelle756fc62007-07-30 20:07:08 +10001057static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Anthony Liguori7807fa62007-04-23 09:17:21 -05001058{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001059 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001060 if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001061 svm->vmcb->save.cr0 &= ~X86_CR0_TS;
Rusty Russelle756fc62007-07-30 20:07:08 +10001062 svm->vcpu.fpu_active = 1;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001063
1064 return 1;
Anthony Liguori7807fa62007-04-23 09:17:21 -05001065}
1066
Joerg Roedel53371b52008-04-09 14:15:30 +02001067static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1068{
1069 /*
1070 * On an #MC intercept the MCE handler is not called automatically in
1071 * the host. So do it by hand here.
1072 */
1073 asm volatile (
1074 "int $0x12\n");
1075 /* not sure if we ever come back to this point */
1076
1077 return 1;
1078}
1079
Rusty Russelle756fc62007-07-30 20:07:08 +10001080static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001081{
1082 /*
1083 * VMCB is undefined after a SHUTDOWN intercept
1084 * so reinitialize it.
1085 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001086 clear_page(svm->vmcb);
Joerg Roedele6101a92008-02-13 18:58:45 +01001087 init_vmcb(svm);
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001088
1089 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1090 return 0;
1091}
1092
Rusty Russelle756fc62007-07-30 20:07:08 +10001093static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001094{
Mike Dayd77c26f2007-10-08 09:02:08 -04001095 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
Avi Kivity039576c2007-03-20 12:46:50 +02001096 int size, down, in, string, rep;
1097 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001098
Rusty Russelle756fc62007-07-30 20:07:08 +10001099 ++svm->vcpu.stat.io_exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001100
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001101 svm->next_rip = svm->vmcb->control.exit_info_2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001102
Laurent Viviere70669a2007-08-05 10:36:40 +03001103 string = (io_info & SVM_IOIO_STR_MASK) != 0;
1104
1105 if (string) {
Laurent Vivier34273182007-09-18 11:27:37 +02001106 if (emulate_instruction(&svm->vcpu,
1107 kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
Laurent Viviere70669a2007-08-05 10:36:40 +03001108 return 0;
1109 return 1;
1110 }
1111
Avi Kivity039576c2007-03-20 12:46:50 +02001112 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1113 port = io_info >> 16;
1114 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
Avi Kivity039576c2007-03-20 12:46:50 +02001115 rep = (io_info & SVM_IOIO_REP_MASK) != 0;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001116 down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001117
Laurent Vivier3090dd72007-08-05 10:43:32 +03001118 return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001119}
1120
Joerg Roedelc47f0982008-04-30 17:56:00 +02001121static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1122{
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001123 KVMTRACE_0D(NMI, &svm->vcpu, handler);
Joerg Roedelc47f0982008-04-30 17:56:00 +02001124 return 1;
1125}
1126
Joerg Roedela0698052008-04-30 17:56:01 +02001127static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1128{
1129 ++svm->vcpu.stat.irq_exits;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001130 KVMTRACE_0D(INTR, &svm->vcpu, handler);
Joerg Roedela0698052008-04-30 17:56:01 +02001131 return 1;
1132}
1133
Rusty Russelle756fc62007-07-30 20:07:08 +10001134static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001135{
1136 return 1;
1137}
1138
Rusty Russelle756fc62007-07-30 20:07:08 +10001139static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001140{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001141 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
Rusty Russelle756fc62007-07-30 20:07:08 +10001142 skip_emulated_instruction(&svm->vcpu);
1143 return kvm_emulate_halt(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001144}
1145
Rusty Russelle756fc62007-07-30 20:07:08 +10001146static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity02e235b2007-02-19 14:37:47 +02001147{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001148 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Rusty Russelle756fc62007-07-30 20:07:08 +10001149 skip_emulated_instruction(&svm->vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001150 kvm_emulate_hypercall(&svm->vcpu);
1151 return 1;
Avi Kivity02e235b2007-02-19 14:37:47 +02001152}
1153
Rusty Russelle756fc62007-07-30 20:07:08 +10001154static int invalid_op_interception(struct vcpu_svm *svm,
1155 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001156{
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001157 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001158 return 1;
1159}
1160
Rusty Russelle756fc62007-07-30 20:07:08 +10001161static int task_switch_interception(struct vcpu_svm *svm,
1162 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001163{
Izik Eidus37817f22008-03-24 23:14:53 +02001164 u16 tss_selector;
1165
1166 tss_selector = (u16)svm->vmcb->control.exit_info_1;
1167 if (svm->vmcb->control.exit_info_2 &
1168 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
1169 return kvm_task_switch(&svm->vcpu, tss_selector,
1170 TASK_SWITCH_IRET);
1171 if (svm->vmcb->control.exit_info_2 &
1172 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
1173 return kvm_task_switch(&svm->vcpu, tss_selector,
1174 TASK_SWITCH_JMP);
1175 return kvm_task_switch(&svm->vcpu, tss_selector, TASK_SWITCH_CALL);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001176}
1177
Rusty Russelle756fc62007-07-30 20:07:08 +10001178static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001179{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001180 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10001181 kvm_emulate_cpuid(&svm->vcpu);
Avi Kivity06465c52007-02-28 20:46:53 +02001182 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001183}
1184
Marcelo Tosattia7052892008-09-23 13:18:35 -03001185static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1186{
1187 if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE)
1188 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
1189 return 1;
1190}
1191
Rusty Russelle756fc62007-07-30 20:07:08 +10001192static int emulate_on_interception(struct vcpu_svm *svm,
1193 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001194{
Laurent Vivier34273182007-09-18 11:27:37 +02001195 if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE)
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001196 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001197 return 1;
1198}
1199
Joerg Roedel1d075432007-12-06 21:02:25 +01001200static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1201{
1202 emulate_instruction(&svm->vcpu, NULL, 0, 0, 0);
1203 if (irqchip_in_kernel(svm->vcpu.kvm))
1204 return 1;
1205 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
1206 return 0;
1207}
1208
Avi Kivity6aa8b732006-12-10 02:21:36 -08001209static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1210{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001211 struct vcpu_svm *svm = to_svm(vcpu);
1212
Avi Kivity6aa8b732006-12-10 02:21:36 -08001213 switch (ecx) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001214 case MSR_IA32_TIME_STAMP_COUNTER: {
1215 u64 tsc;
1216
1217 rdtscll(tsc);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001218 *data = svm->vmcb->control.tsc_offset + tsc;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001219 break;
1220 }
Avi Kivity0e859ca2006-12-22 01:05:08 -08001221 case MSR_K6_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001222 *data = svm->vmcb->save.star;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001223 break;
Avi Kivity0e859ca2006-12-22 01:05:08 -08001224#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001225 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001226 *data = svm->vmcb->save.lstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001227 break;
1228 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001229 *data = svm->vmcb->save.cstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001230 break;
1231 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001232 *data = svm->vmcb->save.kernel_gs_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001233 break;
1234 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001235 *data = svm->vmcb->save.sfmask;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001236 break;
1237#endif
1238 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001239 *data = svm->vmcb->save.sysenter_cs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001240 break;
1241 case MSR_IA32_SYSENTER_EIP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001242 *data = svm->vmcb->save.sysenter_eip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001243 break;
1244 case MSR_IA32_SYSENTER_ESP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001245 *data = svm->vmcb->save.sysenter_esp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001246 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01001247 /* Nobody will change the following 5 values in the VMCB so
1248 we can safely return them on rdmsr. They will always be 0
1249 until LBRV is implemented. */
1250 case MSR_IA32_DEBUGCTLMSR:
1251 *data = svm->vmcb->save.dbgctl;
1252 break;
1253 case MSR_IA32_LASTBRANCHFROMIP:
1254 *data = svm->vmcb->save.br_from;
1255 break;
1256 case MSR_IA32_LASTBRANCHTOIP:
1257 *data = svm->vmcb->save.br_to;
1258 break;
1259 case MSR_IA32_LASTINTFROMIP:
1260 *data = svm->vmcb->save.last_excp_from;
1261 break;
1262 case MSR_IA32_LASTINTTOIP:
1263 *data = svm->vmcb->save.last_excp_to;
1264 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001265 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08001266 return kvm_get_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001267 }
1268 return 0;
1269}
1270
Rusty Russelle756fc62007-07-30 20:07:08 +10001271static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001272{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001273 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Avi Kivity6aa8b732006-12-10 02:21:36 -08001274 u64 data;
1275
Rusty Russelle756fc62007-07-30 20:07:08 +10001276 if (svm_get_msr(&svm->vcpu, ecx, &data))
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02001277 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001278 else {
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001279 KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data,
1280 (u32)(data >> 32), handler);
1281
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001282 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001283 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001284 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10001285 skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001286 }
1287 return 1;
1288}
1289
1290static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1291{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001292 struct vcpu_svm *svm = to_svm(vcpu);
1293
Avi Kivity6aa8b732006-12-10 02:21:36 -08001294 switch (ecx) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001295 case MSR_IA32_TIME_STAMP_COUNTER: {
1296 u64 tsc;
1297
1298 rdtscll(tsc);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001299 svm->vmcb->control.tsc_offset = data - tsc;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001300 break;
1301 }
Avi Kivity0e859ca2006-12-22 01:05:08 -08001302 case MSR_K6_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001303 svm->vmcb->save.star = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001304 break;
Robert P. J. Day49b14f22007-01-29 13:19:50 -08001305#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001306 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001307 svm->vmcb->save.lstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001308 break;
1309 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001310 svm->vmcb->save.cstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001311 break;
1312 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001313 svm->vmcb->save.kernel_gs_base = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001314 break;
1315 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001316 svm->vmcb->save.sfmask = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001317 break;
1318#endif
1319 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001320 svm->vmcb->save.sysenter_cs = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001321 break;
1322 case MSR_IA32_SYSENTER_EIP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001323 svm->vmcb->save.sysenter_eip = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001324 break;
1325 case MSR_IA32_SYSENTER_ESP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001326 svm->vmcb->save.sysenter_esp = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001327 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01001328 case MSR_IA32_DEBUGCTLMSR:
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001329 if (!svm_has(SVM_FEATURE_LBRV)) {
1330 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001331 __func__, data);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001332 break;
1333 }
1334 if (data & DEBUGCTL_RESERVED_BITS)
1335 return 1;
1336
1337 svm->vmcb->save.dbgctl = data;
1338 if (data & (1ULL<<0))
1339 svm_enable_lbrv(svm);
1340 else
1341 svm_disable_lbrv(svm);
Joerg Roedela2938c82008-02-13 16:30:28 +01001342 break;
Joerg Roedel62b9aba2007-12-11 15:36:57 +01001343 case MSR_K7_EVNTSEL0:
1344 case MSR_K7_EVNTSEL1:
1345 case MSR_K7_EVNTSEL2:
1346 case MSR_K7_EVNTSEL3:
Chris Lalancette14ae51b2008-05-05 13:05:16 -04001347 case MSR_K7_PERFCTR0:
1348 case MSR_K7_PERFCTR1:
1349 case MSR_K7_PERFCTR2:
1350 case MSR_K7_PERFCTR3:
Joerg Roedel62b9aba2007-12-11 15:36:57 +01001351 /*
Chris Lalancette14ae51b2008-05-05 13:05:16 -04001352 * Just discard all writes to the performance counters; this
1353 * should keep both older linux and windows 64-bit guests
1354 * happy
Joerg Roedel62b9aba2007-12-11 15:36:57 +01001355 */
Chris Lalancette14ae51b2008-05-05 13:05:16 -04001356 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data);
1357
Joerg Roedel62b9aba2007-12-11 15:36:57 +01001358 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001359 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08001360 return kvm_set_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001361 }
1362 return 0;
1363}
1364
Rusty Russelle756fc62007-07-30 20:07:08 +10001365static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001366{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001367 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001368 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001369 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001370
1371 KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32),
1372 handler);
1373
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001374 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10001375 if (svm_set_msr(&svm->vcpu, ecx, data))
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02001376 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001377 else
Rusty Russelle756fc62007-07-30 20:07:08 +10001378 skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001379 return 1;
1380}
1381
Rusty Russelle756fc62007-07-30 20:07:08 +10001382static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001383{
Rusty Russelle756fc62007-07-30 20:07:08 +10001384 if (svm->vmcb->control.exit_info_1)
1385 return wrmsr_interception(svm, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001386 else
Rusty Russelle756fc62007-07-30 20:07:08 +10001387 return rdmsr_interception(svm, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001388}
1389
Rusty Russelle756fc62007-07-30 20:07:08 +10001390static int interrupt_window_interception(struct vcpu_svm *svm,
Dor Laorc1150d82007-01-05 16:36:24 -08001391 struct kvm_run *kvm_run)
1392{
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001393 KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler);
1394
Eddie Dong85f455f2007-07-06 12:20:49 +03001395 svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
1396 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
Dor Laorc1150d82007-01-05 16:36:24 -08001397 /*
1398 * If the user space waits to inject interrupts, exit as soon as
1399 * possible
1400 */
1401 if (kvm_run->request_interrupt_window &&
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001402 !svm->vcpu.arch.irq_summary) {
Rusty Russelle756fc62007-07-30 20:07:08 +10001403 ++svm->vcpu.stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08001404 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1405 return 0;
1406 }
1407
1408 return 1;
1409}
1410
Rusty Russelle756fc62007-07-30 20:07:08 +10001411static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001412 struct kvm_run *kvm_run) = {
1413 [SVM_EXIT_READ_CR0] = emulate_on_interception,
1414 [SVM_EXIT_READ_CR3] = emulate_on_interception,
1415 [SVM_EXIT_READ_CR4] = emulate_on_interception,
Avi Kivity80a81192007-12-06 19:50:00 +02001416 [SVM_EXIT_READ_CR8] = emulate_on_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001417 /* for now: */
1418 [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
1419 [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
1420 [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
Joerg Roedel1d075432007-12-06 21:02:25 +01001421 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001422 [SVM_EXIT_READ_DR0] = emulate_on_interception,
1423 [SVM_EXIT_READ_DR1] = emulate_on_interception,
1424 [SVM_EXIT_READ_DR2] = emulate_on_interception,
1425 [SVM_EXIT_READ_DR3] = emulate_on_interception,
1426 [SVM_EXIT_WRITE_DR0] = emulate_on_interception,
1427 [SVM_EXIT_WRITE_DR1] = emulate_on_interception,
1428 [SVM_EXIT_WRITE_DR2] = emulate_on_interception,
1429 [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
1430 [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
1431 [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001432 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001433 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
Anthony Liguori7807fa62007-04-23 09:17:21 -05001434 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
Joerg Roedel53371b52008-04-09 14:15:30 +02001435 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
Joerg Roedela0698052008-04-30 17:56:01 +02001436 [SVM_EXIT_INTR] = intr_interception,
Joerg Roedelc47f0982008-04-30 17:56:00 +02001437 [SVM_EXIT_NMI] = nmi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001438 [SVM_EXIT_SMI] = nop_on_interception,
1439 [SVM_EXIT_INIT] = nop_on_interception,
Dor Laorc1150d82007-01-05 16:36:24 -08001440 [SVM_EXIT_VINTR] = interrupt_window_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001441 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
1442 [SVM_EXIT_CPUID] = cpuid_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02001443 [SVM_EXIT_INVD] = emulate_on_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001444 [SVM_EXIT_HLT] = halt_interception,
Marcelo Tosattia7052892008-09-23 13:18:35 -03001445 [SVM_EXIT_INVLPG] = invlpg_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001446 [SVM_EXIT_INVLPGA] = invalid_op_interception,
1447 [SVM_EXIT_IOIO] = io_interception,
1448 [SVM_EXIT_MSR] = msr_interception,
1449 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001450 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001451 [SVM_EXIT_VMRUN] = invalid_op_interception,
Avi Kivity02e235b2007-02-19 14:37:47 +02001452 [SVM_EXIT_VMMCALL] = vmmcall_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001453 [SVM_EXIT_VMLOAD] = invalid_op_interception,
1454 [SVM_EXIT_VMSAVE] = invalid_op_interception,
1455 [SVM_EXIT_STGI] = invalid_op_interception,
1456 [SVM_EXIT_CLGI] = invalid_op_interception,
1457 [SVM_EXIT_SKINIT] = invalid_op_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02001458 [SVM_EXIT_WBINVD] = emulate_on_interception,
Joerg Roedel916ce232007-03-21 19:47:00 +01001459 [SVM_EXIT_MONITOR] = invalid_op_interception,
1460 [SVM_EXIT_MWAIT] = invalid_op_interception,
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001461 [SVM_EXIT_NPF] = pf_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001462};
1463
Avi Kivity04d2cc72007-09-10 18:10:54 +03001464static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001465{
Avi Kivity04d2cc72007-09-10 18:10:54 +03001466 struct vcpu_svm *svm = to_svm(vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001467 u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001468
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001469 KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip,
1470 (u32)((u64)svm->vmcb->save.rip >> 32), entryexit);
1471
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001472 if (npt_enabled) {
1473 int mmu_reload = 0;
1474 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
1475 svm_set_cr0(vcpu, svm->vmcb->save.cr0);
1476 mmu_reload = 1;
1477 }
1478 vcpu->arch.cr0 = svm->vmcb->save.cr0;
1479 vcpu->arch.cr3 = svm->vmcb->save.cr3;
1480 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1481 if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
1482 kvm_inject_gp(vcpu, 0);
1483 return 1;
1484 }
1485 }
1486 if (mmu_reload) {
1487 kvm_mmu_reset_context(vcpu);
1488 kvm_mmu_load(vcpu);
1489 }
1490 }
1491
Avi Kivity04d2cc72007-09-10 18:10:54 +03001492 kvm_reput_irq(svm);
1493
1494 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
1495 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1496 kvm_run->fail_entry.hardware_entry_failure_reason
1497 = svm->vmcb->control.exit_code;
1498 return 0;
1499 }
1500
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001501 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001502 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1503 exit_code != SVM_EXIT_NPF)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001504 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
1505 "exit_code 0x%x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001506 __func__, svm->vmcb->control.exit_int_info,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001507 exit_code);
1508
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +02001509 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
Joe Perches56919c52007-11-12 20:06:51 -08001510 || !svm_exit_handlers[exit_code]) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001511 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
Avi Kivity364b6252007-04-16 14:28:40 +03001512 kvm_run->hw.hardware_exit_reason = exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001513 return 0;
1514 }
1515
Rusty Russelle756fc62007-07-30 20:07:08 +10001516 return svm_exit_handlers[exit_code](svm, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001517}
1518
1519static void reload_tss(struct kvm_vcpu *vcpu)
1520{
1521 int cpu = raw_smp_processor_id();
1522
1523 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
Mike Dayd77c26f2007-10-08 09:02:08 -04001524 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001525 load_TR_desc();
1526}
1527
Rusty Russelle756fc62007-07-30 20:07:08 +10001528static void pre_svm_run(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001529{
1530 int cpu = raw_smp_processor_id();
1531
1532 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
1533
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001534 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
Rusty Russelle756fc62007-07-30 20:07:08 +10001535 if (svm->vcpu.cpu != cpu ||
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001536 svm->asid_generation != svm_data->asid_generation)
Rusty Russelle756fc62007-07-30 20:07:08 +10001537 new_asid(svm, svm_data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001538}
1539
1540
Eddie Dong85f455f2007-07-06 12:20:49 +03001541static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001542{
1543 struct vmcb_control_area *control;
1544
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001545 KVMTRACE_1D(INJ_VIRQ, &svm->vcpu, (u32)irq, handler);
1546
Avi Kivityfa89a812008-09-01 15:57:51 +03001547 ++svm->vcpu.stat.irq_injections;
Rusty Russelle756fc62007-07-30 20:07:08 +10001548 control = &svm->vmcb->control;
Eddie Dong85f455f2007-07-06 12:20:49 +03001549 control->int_vector = irq;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001550 control->int_ctl &= ~V_INTR_PRIO_MASK;
1551 control->int_ctl |= V_IRQ_MASK |
1552 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1553}
1554
Eddie Dong2a8067f2007-08-06 16:29:07 +03001555static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
1556{
1557 struct vcpu_svm *svm = to_svm(vcpu);
1558
1559 svm_inject_irq(svm, irq);
1560}
1561
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001562static void update_cr8_intercept(struct kvm_vcpu *vcpu)
1563{
1564 struct vcpu_svm *svm = to_svm(vcpu);
1565 struct vmcb *vmcb = svm->vmcb;
1566 int max_irr, tpr;
1567
1568 if (!irqchip_in_kernel(vcpu->kvm) || vcpu->arch.apic->vapic_addr)
1569 return;
1570
1571 vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
1572
1573 max_irr = kvm_lapic_find_highest_irr(vcpu);
1574 if (max_irr == -1)
1575 return;
1576
1577 tpr = kvm_lapic_get_cr8(vcpu) << 4;
1578
1579 if (tpr >= (max_irr & 0xf0))
1580 vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
1581}
1582
Avi Kivity04d2cc72007-09-10 18:10:54 +03001583static void svm_intr_assist(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001584{
Avi Kivity04d2cc72007-09-10 18:10:54 +03001585 struct vcpu_svm *svm = to_svm(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03001586 struct vmcb *vmcb = svm->vmcb;
1587 int intr_vector = -1;
1588
1589 if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) &&
1590 ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) {
1591 intr_vector = vmcb->control.exit_int_info &
1592 SVM_EVTINJ_VEC_MASK;
1593 vmcb->control.exit_int_info = 0;
1594 svm_inject_irq(svm, intr_vector);
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001595 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001596 }
1597
1598 if (vmcb->control.int_ctl & V_IRQ_MASK)
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001599 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001600
Eddie Dong1b9778d2007-09-03 16:56:58 +03001601 if (!kvm_cpu_has_interrupt(vcpu))
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001602 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001603
1604 if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
1605 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
1606 (vmcb->control.event_inj & SVM_EVTINJ_VALID)) {
1607 /* unable to deliver irq, set pending irq */
1608 vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
1609 svm_inject_irq(svm, 0x0);
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001610 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001611 }
1612 /* Okay, we can deliver the interrupt: grab it and update PIC state. */
Eddie Dong1b9778d2007-09-03 16:56:58 +03001613 intr_vector = kvm_cpu_get_interrupt(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03001614 svm_inject_irq(svm, intr_vector);
Eddie Dong1b9778d2007-09-03 16:56:58 +03001615 kvm_timer_intr_post(vcpu, intr_vector);
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001616out:
1617 update_cr8_intercept(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03001618}
1619
1620static void kvm_reput_irq(struct vcpu_svm *svm)
1621{
Rusty Russelle756fc62007-07-30 20:07:08 +10001622 struct vmcb_control_area *control = &svm->vmcb->control;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001623
Eddie Dong7017fc32007-07-18 11:34:57 +03001624 if ((control->int_ctl & V_IRQ_MASK)
1625 && !irqchip_in_kernel(svm->vcpu.kvm)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001626 control->int_ctl &= ~V_IRQ_MASK;
Rusty Russelle756fc62007-07-30 20:07:08 +10001627 push_irq(&svm->vcpu, control->int_vector);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001628 }
Dor Laorc1150d82007-01-05 16:36:24 -08001629
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001630 svm->vcpu.arch.interrupt_window_open =
Dor Laorc1150d82007-01-05 16:36:24 -08001631 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
1632}
1633
Eddie Dong85f455f2007-07-06 12:20:49 +03001634static void svm_do_inject_vector(struct vcpu_svm *svm)
1635{
1636 struct kvm_vcpu *vcpu = &svm->vcpu;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001637 int word_index = __ffs(vcpu->arch.irq_summary);
1638 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
Eddie Dong85f455f2007-07-06 12:20:49 +03001639 int irq = word_index * BITS_PER_LONG + bit_index;
1640
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001641 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
1642 if (!vcpu->arch.irq_pending[word_index])
1643 clear_bit(word_index, &vcpu->arch.irq_summary);
Eddie Dong85f455f2007-07-06 12:20:49 +03001644 svm_inject_irq(svm, irq);
1645}
1646
Avi Kivity04d2cc72007-09-10 18:10:54 +03001647static void do_interrupt_requests(struct kvm_vcpu *vcpu,
Dor Laorc1150d82007-01-05 16:36:24 -08001648 struct kvm_run *kvm_run)
1649{
Avi Kivity04d2cc72007-09-10 18:10:54 +03001650 struct vcpu_svm *svm = to_svm(vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001651 struct vmcb_control_area *control = &svm->vmcb->control;
Dor Laorc1150d82007-01-05 16:36:24 -08001652
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001653 svm->vcpu.arch.interrupt_window_open =
Dor Laorc1150d82007-01-05 16:36:24 -08001654 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001655 (svm->vmcb->save.rflags & X86_EFLAGS_IF));
Dor Laorc1150d82007-01-05 16:36:24 -08001656
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001657 if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary)
Dor Laorc1150d82007-01-05 16:36:24 -08001658 /*
1659 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1660 */
Eddie Dong85f455f2007-07-06 12:20:49 +03001661 svm_do_inject_vector(svm);
Dor Laorc1150d82007-01-05 16:36:24 -08001662
1663 /*
1664 * Interrupts blocked. Wait for unblock.
1665 */
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001666 if (!svm->vcpu.arch.interrupt_window_open &&
1667 (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window))
Dor Laorc1150d82007-01-05 16:36:24 -08001668 control->intercept |= 1ULL << INTERCEPT_VINTR;
Mike Dayd77c26f2007-10-08 09:02:08 -04001669 else
Dor Laorc1150d82007-01-05 16:36:24 -08001670 control->intercept &= ~(1ULL << INTERCEPT_VINTR);
1671}
1672
Izik Eiduscbc94022007-10-25 00:29:55 +02001673static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
1674{
1675 return 0;
1676}
1677
Avi Kivity6aa8b732006-12-10 02:21:36 -08001678static void save_db_regs(unsigned long *db_regs)
1679{
Avi Kivity5aff4582006-12-13 00:33:45 -08001680 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
1681 asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
1682 asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
1683 asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001684}
1685
1686static void load_db_regs(unsigned long *db_regs)
1687{
Avi Kivity5aff4582006-12-13 00:33:45 -08001688 asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
1689 asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
1690 asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
1691 asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001692}
1693
Avi Kivityd9e368d2007-06-07 19:18:30 +03001694static void svm_flush_tlb(struct kvm_vcpu *vcpu)
1695{
1696 force_new_asid(vcpu);
1697}
1698
Avi Kivity04d2cc72007-09-10 18:10:54 +03001699static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
1700{
1701}
1702
Joerg Roedeld7bf8222008-04-16 16:51:17 +02001703static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
1704{
1705 struct vcpu_svm *svm = to_svm(vcpu);
1706
1707 if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
1708 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
1709 kvm_lapic_set_tpr(vcpu, cr8);
1710 }
1711}
1712
Joerg Roedel649d6862008-04-16 16:51:15 +02001713static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
1714{
1715 struct vcpu_svm *svm = to_svm(vcpu);
1716 u64 cr8;
1717
1718 if (!irqchip_in_kernel(vcpu->kvm))
1719 return;
1720
1721 cr8 = kvm_get_cr8(vcpu);
1722 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
1723 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
1724}
1725
Avi Kivity80e31d42008-07-14 14:44:59 +03001726#ifdef CONFIG_X86_64
1727#define R "r"
1728#else
1729#define R "e"
1730#endif
1731
Avi Kivity04d2cc72007-09-10 18:10:54 +03001732static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001733{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001734 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001735 u16 fs_selector;
1736 u16 gs_selector;
1737 u16 ldt_selector;
Avi Kivityd9e368d2007-06-07 19:18:30 +03001738
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001739 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
1740 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
1741 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
1742
Rusty Russelle756fc62007-07-30 20:07:08 +10001743 pre_svm_run(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001744
Joerg Roedel649d6862008-04-16 16:51:15 +02001745 sync_lapic_to_cr8(vcpu);
1746
Avi Kivity6aa8b732006-12-10 02:21:36 -08001747 save_host_msrs(vcpu);
Avi Kivityd6e88ae2008-07-10 16:53:33 +03001748 fs_selector = kvm_read_fs();
1749 gs_selector = kvm_read_gs();
1750 ldt_selector = kvm_read_ldt();
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001751 svm->host_cr2 = kvm_read_cr2();
1752 svm->host_dr6 = read_dr6();
1753 svm->host_dr7 = read_dr7();
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001754 svm->vmcb->save.cr2 = vcpu->arch.cr2;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001755 /* required for live migration with NPT */
1756 if (npt_enabled)
1757 svm->vmcb->save.cr3 = vcpu->arch.cr3;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001758
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001759 if (svm->vmcb->save.dr7 & 0xff) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001760 write_dr7(0);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001761 save_db_regs(svm->host_db_regs);
1762 load_db_regs(svm->db_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001763 }
Avi Kivity36241b82006-12-22 01:05:20 -08001764
Avi Kivity04d2cc72007-09-10 18:10:54 +03001765 clgi();
1766
1767 local_irq_enable();
Avi Kivity36241b82006-12-22 01:05:20 -08001768
Avi Kivity6aa8b732006-12-10 02:21:36 -08001769 asm volatile (
Avi Kivity80e31d42008-07-14 14:44:59 +03001770 "push %%"R"bp; \n\t"
1771 "mov %c[rbx](%[svm]), %%"R"bx \n\t"
1772 "mov %c[rcx](%[svm]), %%"R"cx \n\t"
1773 "mov %c[rdx](%[svm]), %%"R"dx \n\t"
1774 "mov %c[rsi](%[svm]), %%"R"si \n\t"
1775 "mov %c[rdi](%[svm]), %%"R"di \n\t"
1776 "mov %c[rbp](%[svm]), %%"R"bp \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001777#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001778 "mov %c[r8](%[svm]), %%r8 \n\t"
1779 "mov %c[r9](%[svm]), %%r9 \n\t"
1780 "mov %c[r10](%[svm]), %%r10 \n\t"
1781 "mov %c[r11](%[svm]), %%r11 \n\t"
1782 "mov %c[r12](%[svm]), %%r12 \n\t"
1783 "mov %c[r13](%[svm]), %%r13 \n\t"
1784 "mov %c[r14](%[svm]), %%r14 \n\t"
1785 "mov %c[r15](%[svm]), %%r15 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001786#endif
1787
Avi Kivity6aa8b732006-12-10 02:21:36 -08001788 /* Enter guest mode */
Avi Kivity80e31d42008-07-14 14:44:59 +03001789 "push %%"R"ax \n\t"
1790 "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +03001791 __ex(SVM_VMLOAD) "\n\t"
1792 __ex(SVM_VMRUN) "\n\t"
1793 __ex(SVM_VMSAVE) "\n\t"
Avi Kivity80e31d42008-07-14 14:44:59 +03001794 "pop %%"R"ax \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001795
1796 /* Save guest registers, load host registers */
Avi Kivity80e31d42008-07-14 14:44:59 +03001797 "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
1798 "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
1799 "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
1800 "mov %%"R"si, %c[rsi](%[svm]) \n\t"
1801 "mov %%"R"di, %c[rdi](%[svm]) \n\t"
1802 "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001803#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001804 "mov %%r8, %c[r8](%[svm]) \n\t"
1805 "mov %%r9, %c[r9](%[svm]) \n\t"
1806 "mov %%r10, %c[r10](%[svm]) \n\t"
1807 "mov %%r11, %c[r11](%[svm]) \n\t"
1808 "mov %%r12, %c[r12](%[svm]) \n\t"
1809 "mov %%r13, %c[r13](%[svm]) \n\t"
1810 "mov %%r14, %c[r14](%[svm]) \n\t"
1811 "mov %%r15, %c[r15](%[svm]) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001812#endif
Avi Kivity80e31d42008-07-14 14:44:59 +03001813 "pop %%"R"bp"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001814 :
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001815 : [svm]"a"(svm),
Avi Kivity6aa8b732006-12-10 02:21:36 -08001816 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001817 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
1818 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
1819 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
1820 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
1821 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
1822 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001823#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001824 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
1825 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
1826 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
1827 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
1828 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
1829 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
1830 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
1831 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001832#endif
Laurent Vivier54a08c02007-10-25 14:18:53 +02001833 : "cc", "memory"
Avi Kivity80e31d42008-07-14 14:44:59 +03001834 , R"bx", R"cx", R"dx", R"si", R"di"
Laurent Vivier54a08c02007-10-25 14:18:53 +02001835#ifdef CONFIG_X86_64
Laurent Vivier54a08c02007-10-25 14:18:53 +02001836 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
1837#endif
1838 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08001839
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001840 if ((svm->vmcb->save.dr7 & 0xff))
1841 load_db_regs(svm->host_db_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001842
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001843 vcpu->arch.cr2 = svm->vmcb->save.cr2;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001844 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
1845 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
1846 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001847
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001848 write_dr6(svm->host_dr6);
1849 write_dr7(svm->host_dr7);
1850 kvm_write_cr2(svm->host_cr2);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001851
Avi Kivityd6e88ae2008-07-10 16:53:33 +03001852 kvm_load_fs(fs_selector);
1853 kvm_load_gs(gs_selector);
1854 kvm_load_ldt(ldt_selector);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001855 load_host_msrs(vcpu);
1856
1857 reload_tss(vcpu);
1858
Avi Kivity56ba47d2007-11-07 17:14:18 +02001859 local_irq_disable();
1860
1861 stgi();
1862
Joerg Roedeld7bf8222008-04-16 16:51:17 +02001863 sync_cr8_to_lapic(vcpu);
1864
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001865 svm->next_rip = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001866}
1867
Avi Kivity80e31d42008-07-14 14:44:59 +03001868#undef R
1869
Avi Kivity6aa8b732006-12-10 02:21:36 -08001870static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
1871{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001872 struct vcpu_svm *svm = to_svm(vcpu);
1873
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001874 if (npt_enabled) {
1875 svm->vmcb->control.nested_cr3 = root;
1876 force_new_asid(vcpu);
1877 return;
1878 }
1879
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001880 svm->vmcb->save.cr3 = root;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001881 force_new_asid(vcpu);
Anthony Liguori7807fa62007-04-23 09:17:21 -05001882
1883 if (vcpu->fpu_active) {
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001884 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
1885 svm->vmcb->save.cr0 |= X86_CR0_TS;
Anthony Liguori7807fa62007-04-23 09:17:21 -05001886 vcpu->fpu_active = 0;
1887 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001888}
1889
Avi Kivity6aa8b732006-12-10 02:21:36 -08001890static int is_disabled(void)
1891{
Joerg Roedel6031a612007-06-22 12:29:50 +03001892 u64 vm_cr;
1893
1894 rdmsrl(MSR_VM_CR, vm_cr);
1895 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
1896 return 1;
1897
Avi Kivity6aa8b732006-12-10 02:21:36 -08001898 return 0;
1899}
1900
Ingo Molnar102d8322007-02-19 14:37:47 +02001901static void
1902svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1903{
1904 /*
1905 * Patch in the VMMCALL instruction:
1906 */
1907 hypercall[0] = 0x0f;
1908 hypercall[1] = 0x01;
1909 hypercall[2] = 0xd9;
Ingo Molnar102d8322007-02-19 14:37:47 +02001910}
1911
Yang, Sheng002c7f72007-07-31 14:23:01 +03001912static void svm_check_processor_compat(void *rtn)
1913{
1914 *(int *)rtn = 0;
1915}
1916
Avi Kivity774ead32007-12-26 13:57:04 +02001917static bool svm_cpu_has_accelerated_tpr(void)
1918{
1919 return false;
1920}
1921
Sheng Yang67253af2008-04-25 10:20:22 +08001922static int get_npt_level(void)
1923{
1924#ifdef CONFIG_X86_64
1925 return PT64_ROOT_LEVEL;
1926#else
1927 return PT32E_ROOT_LEVEL;
1928#endif
1929}
1930
Sheng Yang64d4d522008-10-09 16:01:57 +08001931static int svm_get_mt_mask_shift(void)
1932{
1933 return 0;
1934}
1935
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001936static struct kvm_x86_ops svm_x86_ops = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001937 .cpu_has_kvm_support = has_svm,
1938 .disabled_by_bios = is_disabled,
1939 .hardware_setup = svm_hardware_setup,
1940 .hardware_unsetup = svm_hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03001941 .check_processor_compatibility = svm_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001942 .hardware_enable = svm_hardware_enable,
1943 .hardware_disable = svm_hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02001944 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001945
1946 .vcpu_create = svm_create_vcpu,
1947 .vcpu_free = svm_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03001948 .vcpu_reset = svm_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001949
Avi Kivity04d2cc72007-09-10 18:10:54 +03001950 .prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001951 .vcpu_load = svm_vcpu_load,
1952 .vcpu_put = svm_vcpu_put,
1953
1954 .set_guest_debug = svm_guest_debug,
1955 .get_msr = svm_get_msr,
1956 .set_msr = svm_set_msr,
1957 .get_segment_base = svm_get_segment_base,
1958 .get_segment = svm_get_segment,
1959 .set_segment = svm_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02001960 .get_cpl = svm_get_cpl,
Rusty Russell1747fb72007-09-06 01:21:32 +10001961 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
Anthony Liguori25c4c272007-04-27 09:29:21 +03001962 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001963 .set_cr0 = svm_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001964 .set_cr3 = svm_set_cr3,
1965 .set_cr4 = svm_set_cr4,
1966 .set_efer = svm_set_efer,
1967 .get_idt = svm_get_idt,
1968 .set_idt = svm_set_idt,
1969 .get_gdt = svm_get_gdt,
1970 .set_gdt = svm_set_gdt,
1971 .get_dr = svm_get_dr,
1972 .set_dr = svm_set_dr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001973 .get_rflags = svm_get_rflags,
1974 .set_rflags = svm_set_rflags,
1975
Avi Kivity6aa8b732006-12-10 02:21:36 -08001976 .tlb_flush = svm_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001977
Avi Kivity6aa8b732006-12-10 02:21:36 -08001978 .run = svm_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03001979 .handle_exit = handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001980 .skip_emulated_instruction = skip_emulated_instruction,
Ingo Molnar102d8322007-02-19 14:37:47 +02001981 .patch_hypercall = svm_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03001982 .get_irq = svm_get_irq,
1983 .set_irq = svm_set_irq,
Avi Kivity298101d2007-11-25 13:41:11 +02001984 .queue_exception = svm_queue_exception,
1985 .exception_injected = svm_exception_injected,
Avi Kivity04d2cc72007-09-10 18:10:54 +03001986 .inject_pending_irq = svm_intr_assist,
1987 .inject_pending_vectors = do_interrupt_requests,
Izik Eiduscbc94022007-10-25 00:29:55 +02001988
1989 .set_tss_addr = svm_set_tss_addr,
Sheng Yang67253af2008-04-25 10:20:22 +08001990 .get_tdp_level = get_npt_level,
Sheng Yang64d4d522008-10-09 16:01:57 +08001991 .get_mt_mask_shift = svm_get_mt_mask_shift,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001992};
1993
1994static int __init svm_init(void)
1995{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08001996 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
Rusty Russellc16f8622007-07-30 21:12:19 +10001997 THIS_MODULE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001998}
1999
2000static void __exit svm_exit(void)
2001{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08002002 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08002003}
2004
2005module_init(svm_init)
2006module_exit(svm_exit)