blob: 218949cce1a0975eac7cd566bfa1cd75471a975d [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 *
8 * Authors:
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
14 *
15 */
Avi Kivityedf88412007-12-16 11:02:48 +020016#include <linux/kvm_host.h>
17
Avi Kivitye4956062007-06-28 14:15:57 -040018#include "kvm_svm.h"
Eddie Dong85f455f2007-07-06 12:20:49 +030019#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080020#include "mmu.h"
Avi Kivitye4956062007-06-28 14:15:57 -040021
Avi Kivity6aa8b732006-12-10 02:21:36 -080022#include <linux/module.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020023#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080024#include <linux/vmalloc.h>
25#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040026#include <linux/sched.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080027
Avi Kivitye4956062007-06-28 14:15:57 -040028#include <asm/desc.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080029
30MODULE_AUTHOR("Qumranet");
31MODULE_LICENSE("GPL");
32
33#define IOPM_ALLOC_ORDER 2
34#define MSRPM_ALLOC_ORDER 1
35
36#define DB_VECTOR 1
37#define UD_VECTOR 6
38#define GP_VECTOR 13
39
40#define DR7_GD_MASK (1 << 13)
41#define DR6_BD_MASK (1 << 13)
Avi Kivity6aa8b732006-12-10 02:21:36 -080042
43#define SEG_TYPE_LDT 2
44#define SEG_TYPE_BUSY_TSS16 3
45
Joerg Roedel80b77062007-03-30 17:02:14 +030046#define SVM_FEATURE_NPT (1 << 0)
47#define SVM_FEATURE_LBRV (1 << 1)
48#define SVM_DEATURE_SVML (1 << 2)
49
Joerg Roedel24e09cb2008-02-13 18:58:47 +010050#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
51
Joerg Roedel709ddeb2008-02-07 13:47:45 +010052/* enable NPT for AMD64 and X86 with PAE */
53#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
54static bool npt_enabled = true;
55#else
Joerg Roedele3da3ac2008-02-07 13:47:39 +010056static bool npt_enabled = false;
Joerg Roedel709ddeb2008-02-07 13:47:45 +010057#endif
Joerg Roedel6c7dac72008-02-07 13:47:40 +010058static int npt = 1;
59
60module_param(npt, int, S_IRUGO);
Joerg Roedele3da3ac2008-02-07 13:47:39 +010061
Avi Kivity04d2cc72007-09-10 18:10:54 +030062static void kvm_reput_irq(struct vcpu_svm *svm);
63
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040064static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
65{
Rusty Russellfb3f0f52007-07-27 17:16:56 +100066 return container_of(vcpu, struct vcpu_svm, vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040067}
68
Harvey Harrison4866d5e2008-02-19 10:32:02 -080069static unsigned long iopm_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -080070
71struct kvm_ldttss_desc {
72 u16 limit0;
73 u16 base0;
74 unsigned base1 : 8, type : 5, dpl : 2, p : 1;
75 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
76 u32 base3;
77 u32 zero1;
78} __attribute__((packed));
79
80struct svm_cpu_data {
81 int cpu;
82
Avi Kivity5008fdf2007-04-02 13:05:50 +030083 u64 asid_generation;
84 u32 max_asid;
85 u32 next_asid;
Avi Kivity6aa8b732006-12-10 02:21:36 -080086 struct kvm_ldttss_desc *tss_desc;
87
88 struct page *save_area;
89};
90
91static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
Joerg Roedel80b77062007-03-30 17:02:14 +030092static uint32_t svm_features;
Avi Kivity6aa8b732006-12-10 02:21:36 -080093
94struct svm_init_data {
95 int cpu;
96 int r;
97};
98
99static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
100
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200101#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800102#define MSRS_RANGE_SIZE 2048
103#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
104
105#define MAX_INST_SIZE 15
106
Joerg Roedel80b77062007-03-30 17:02:14 +0300107static inline u32 svm_has(u32 feat)
108{
109 return svm_features & feat;
110}
111
Avi Kivity6aa8b732006-12-10 02:21:36 -0800112static inline u8 pop_irq(struct kvm_vcpu *vcpu)
113{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800114 int word_index = __ffs(vcpu->arch.irq_summary);
115 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800116 int irq = word_index * BITS_PER_LONG + bit_index;
117
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800118 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
119 if (!vcpu->arch.irq_pending[word_index])
120 clear_bit(word_index, &vcpu->arch.irq_summary);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800121 return irq;
122}
123
124static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
125{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800126 set_bit(irq, vcpu->arch.irq_pending);
127 set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800128}
129
130static inline void clgi(void)
131{
132 asm volatile (SVM_CLGI);
133}
134
135static inline void stgi(void)
136{
137 asm volatile (SVM_STGI);
138}
139
140static inline void invlpga(unsigned long addr, u32 asid)
141{
142 asm volatile (SVM_INVLPGA :: "a"(addr), "c"(asid));
143}
144
145static inline unsigned long kvm_read_cr2(void)
146{
147 unsigned long cr2;
148
149 asm volatile ("mov %%cr2, %0" : "=r" (cr2));
150 return cr2;
151}
152
153static inline void kvm_write_cr2(unsigned long val)
154{
155 asm volatile ("mov %0, %%cr2" :: "r" (val));
156}
157
158static inline unsigned long read_dr6(void)
159{
160 unsigned long dr6;
161
162 asm volatile ("mov %%dr6, %0" : "=r" (dr6));
163 return dr6;
164}
165
166static inline void write_dr6(unsigned long val)
167{
168 asm volatile ("mov %0, %%dr6" :: "r" (val));
169}
170
171static inline unsigned long read_dr7(void)
172{
173 unsigned long dr7;
174
175 asm volatile ("mov %%dr7, %0" : "=r" (dr7));
176 return dr7;
177}
178
179static inline void write_dr7(unsigned long val)
180{
181 asm volatile ("mov %0, %%dr7" :: "r" (val));
182}
183
Avi Kivity6aa8b732006-12-10 02:21:36 -0800184static inline void force_new_asid(struct kvm_vcpu *vcpu)
185{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400186 to_svm(vcpu)->asid_generation--;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800187}
188
189static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
190{
191 force_new_asid(vcpu);
192}
193
194static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
195{
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100196 if (!npt_enabled && !(efer & EFER_LMA))
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600197 efer &= ~EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800198
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400199 to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800200 vcpu->arch.shadow_efer = efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800201}
202
Avi Kivity298101d2007-11-25 13:41:11 +0200203static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
204 bool has_error_code, u32 error_code)
205{
206 struct vcpu_svm *svm = to_svm(vcpu);
207
208 svm->vmcb->control.event_inj = nr
209 | SVM_EVTINJ_VALID
210 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
211 | SVM_EVTINJ_TYPE_EXEPT;
212 svm->vmcb->control.event_inj_err = error_code;
213}
214
215static bool svm_exception_injected(struct kvm_vcpu *vcpu)
216{
217 struct vcpu_svm *svm = to_svm(vcpu);
218
219 return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID);
220}
221
Avi Kivity6aa8b732006-12-10 02:21:36 -0800222static int is_external_interrupt(u32 info)
223{
224 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
225 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
226}
227
228static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
229{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400230 struct vcpu_svm *svm = to_svm(vcpu);
231
232 if (!svm->next_rip) {
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800233 printk(KERN_DEBUG "%s: NOP\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800234 return;
235 }
Mike Dayd77c26f2007-10-08 09:02:08 -0400236 if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800237 printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800238 __func__,
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400239 svm->vmcb->save.rip,
240 svm->next_rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800241
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800242 vcpu->arch.rip = svm->vmcb->save.rip = svm->next_rip;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400243 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
Dor Laorc1150d82007-01-05 16:36:24 -0800244
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800245 vcpu->arch.interrupt_window_open = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800246}
247
248static int has_svm(void)
249{
250 uint32_t eax, ebx, ecx, edx;
251
Avi Kivity1e885462006-12-29 16:49:34 -0800252 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800253 printk(KERN_INFO "has_svm: not amd\n");
254 return 0;
255 }
256
257 cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
258 if (eax < SVM_CPUID_FUNC) {
259 printk(KERN_INFO "has_svm: can't execute cpuid_8000000a\n");
260 return 0;
261 }
262
263 cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
264 if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) {
265 printk(KERN_DEBUG "has_svm: svm not available\n");
266 return 0;
267 }
268 return 1;
269}
270
271static void svm_hardware_disable(void *garbage)
272{
273 struct svm_cpu_data *svm_data
274 = per_cpu(svm_data, raw_smp_processor_id());
275
276 if (svm_data) {
277 uint64_t efer;
278
279 wrmsrl(MSR_VM_HSAVE_PA, 0);
280 rdmsrl(MSR_EFER, efer);
281 wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
Al Viro8b6d44c2007-02-09 16:38:40 +0000282 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800283 __free_page(svm_data->save_area);
284 kfree(svm_data);
285 }
286}
287
288static void svm_hardware_enable(void *garbage)
289{
290
291 struct svm_cpu_data *svm_data;
292 uint64_t efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800293 struct desc_ptr gdt_descr;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800294 struct desc_struct *gdt;
295 int me = raw_smp_processor_id();
296
297 if (!has_svm()) {
298 printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
299 return;
300 }
301 svm_data = per_cpu(svm_data, me);
302
303 if (!svm_data) {
304 printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
305 me);
306 return;
307 }
308
309 svm_data->asid_generation = 1;
310 svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
311 svm_data->next_asid = svm_data->max_asid + 1;
312
Mike Dayd77c26f2007-10-08 09:02:08 -0400313 asm volatile ("sgdt %0" : "=m"(gdt_descr));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800314 gdt = (struct desc_struct *)gdt_descr.address;
315 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
316
317 rdmsrl(MSR_EFER, efer);
318 wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK);
319
320 wrmsrl(MSR_VM_HSAVE_PA,
321 page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
322}
323
324static int svm_cpu_init(int cpu)
325{
326 struct svm_cpu_data *svm_data;
327 int r;
328
329 svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
330 if (!svm_data)
331 return -ENOMEM;
332 svm_data->cpu = cpu;
333 svm_data->save_area = alloc_page(GFP_KERNEL);
334 r = -ENOMEM;
335 if (!svm_data->save_area)
336 goto err_1;
337
338 per_cpu(svm_data, cpu) = svm_data;
339
340 return 0;
341
342err_1:
343 kfree(svm_data);
344 return r;
345
346}
347
Rusty Russellbfc733a2007-07-31 20:42:42 +1000348static void set_msr_interception(u32 *msrpm, unsigned msr,
349 int read, int write)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800350{
351 int i;
352
353 for (i = 0; i < NUM_MSR_MAPS; i++) {
354 if (msr >= msrpm_ranges[i] &&
355 msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
356 u32 msr_offset = (i * MSRS_IN_RANGE + msr -
357 msrpm_ranges[i]) * 2;
358
359 u32 *base = msrpm + (msr_offset / 32);
360 u32 msr_shift = msr_offset % 32;
361 u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
362 *base = (*base & ~(0x3 << msr_shift)) |
363 (mask << msr_shift);
Rusty Russellbfc733a2007-07-31 20:42:42 +1000364 return;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800365 }
366 }
Rusty Russellbfc733a2007-07-31 20:42:42 +1000367 BUG();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800368}
369
Joerg Roedelf65c2292008-02-13 18:58:46 +0100370static void svm_vcpu_init_msrpm(u32 *msrpm)
371{
372 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
373
374#ifdef CONFIG_X86_64
375 set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
376 set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
377 set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
378 set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
379 set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
380 set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
381#endif
382 set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
383 set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
384 set_msr_interception(msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
385 set_msr_interception(msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
386}
387
Joerg Roedel24e09cb2008-02-13 18:58:47 +0100388static void svm_enable_lbrv(struct vcpu_svm *svm)
389{
390 u32 *msrpm = svm->msrpm;
391
392 svm->vmcb->control.lbr_ctl = 1;
393 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
394 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
395 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
396 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
397}
398
399static void svm_disable_lbrv(struct vcpu_svm *svm)
400{
401 u32 *msrpm = svm->msrpm;
402
403 svm->vmcb->control.lbr_ctl = 0;
404 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
405 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
406 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
407 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
408}
409
Avi Kivity6aa8b732006-12-10 02:21:36 -0800410static __init int svm_hardware_setup(void)
411{
412 int cpu;
413 struct page *iopm_pages;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100414 void *iopm_va;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800415 int r;
416
Avi Kivity6aa8b732006-12-10 02:21:36 -0800417 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
418
419 if (!iopm_pages)
420 return -ENOMEM;
Anthony Liguoric8681332007-04-30 09:48:11 +0300421
422 iopm_va = page_address(iopm_pages);
423 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
424 clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800425 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
426
Joerg Roedel50a37eb2008-01-31 14:57:38 +0100427 if (boot_cpu_has(X86_FEATURE_NX))
428 kvm_enable_efer_bits(EFER_NX);
429
Avi Kivity6aa8b732006-12-10 02:21:36 -0800430 for_each_online_cpu(cpu) {
431 r = svm_cpu_init(cpu);
432 if (r)
Joerg Roedelf65c2292008-02-13 18:58:46 +0100433 goto err;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800434 }
Joerg Roedel33bd6a02008-02-07 13:47:38 +0100435
436 svm_features = cpuid_edx(SVM_CPUID_FUNC);
437
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100438 if (!svm_has(SVM_FEATURE_NPT))
439 npt_enabled = false;
440
Joerg Roedel6c7dac72008-02-07 13:47:40 +0100441 if (npt_enabled && !npt) {
442 printk(KERN_INFO "kvm: Nested Paging disabled\n");
443 npt_enabled = false;
444 }
445
Joerg Roedel18552672008-02-07 13:47:41 +0100446 if (npt_enabled) {
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100447 printk(KERN_INFO "kvm: Nested Paging enabled\n");
Joerg Roedel18552672008-02-07 13:47:41 +0100448 kvm_enable_tdp();
449 }
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100450
Avi Kivity6aa8b732006-12-10 02:21:36 -0800451 return 0;
452
Joerg Roedelf65c2292008-02-13 18:58:46 +0100453err:
Avi Kivity6aa8b732006-12-10 02:21:36 -0800454 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
455 iopm_base = 0;
456 return r;
457}
458
459static __exit void svm_hardware_unsetup(void)
460{
Avi Kivity6aa8b732006-12-10 02:21:36 -0800461 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
Joerg Roedelf65c2292008-02-13 18:58:46 +0100462 iopm_base = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800463}
464
465static void init_seg(struct vmcb_seg *seg)
466{
467 seg->selector = 0;
468 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
469 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
470 seg->limit = 0xffff;
471 seg->base = 0;
472}
473
474static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
475{
476 seg->selector = 0;
477 seg->attrib = SVM_SELECTOR_P_MASK | type;
478 seg->limit = 0xffff;
479 seg->base = 0;
480}
481
Joerg Roedele6101a92008-02-13 18:58:45 +0100482static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800483{
Joerg Roedele6101a92008-02-13 18:58:45 +0100484 struct vmcb_control_area *control = &svm->vmcb->control;
485 struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800486
487 control->intercept_cr_read = INTERCEPT_CR0_MASK |
488 INTERCEPT_CR3_MASK |
Joerg Roedel649d6862008-04-16 16:51:15 +0200489 INTERCEPT_CR4_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800490
491 control->intercept_cr_write = INTERCEPT_CR0_MASK |
492 INTERCEPT_CR3_MASK |
Avi Kivity80a81192007-12-06 19:50:00 +0200493 INTERCEPT_CR4_MASK |
494 INTERCEPT_CR8_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800495
496 control->intercept_dr_read = INTERCEPT_DR0_MASK |
497 INTERCEPT_DR1_MASK |
498 INTERCEPT_DR2_MASK |
499 INTERCEPT_DR3_MASK;
500
501 control->intercept_dr_write = INTERCEPT_DR0_MASK |
502 INTERCEPT_DR1_MASK |
503 INTERCEPT_DR2_MASK |
504 INTERCEPT_DR3_MASK |
505 INTERCEPT_DR5_MASK |
506 INTERCEPT_DR7_MASK;
507
Anthony Liguori7aa81cc2007-09-17 14:57:50 -0500508 control->intercept_exceptions = (1 << PF_VECTOR) |
Joerg Roedel53371b52008-04-09 14:15:30 +0200509 (1 << UD_VECTOR) |
510 (1 << MC_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800511
512
513 control->intercept = (1ULL << INTERCEPT_INTR) |
514 (1ULL << INTERCEPT_NMI) |
Joerg Roedel01525272007-02-19 14:37:47 +0200515 (1ULL << INTERCEPT_SMI) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800516 (1ULL << INTERCEPT_CPUID) |
Avi Kivitycf5a94d2007-10-28 16:11:58 +0200517 (1ULL << INTERCEPT_INVD) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800518 (1ULL << INTERCEPT_HLT) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800519 (1ULL << INTERCEPT_INVLPGA) |
520 (1ULL << INTERCEPT_IOIO_PROT) |
521 (1ULL << INTERCEPT_MSR_PROT) |
522 (1ULL << INTERCEPT_TASK_SWITCH) |
Joerg Roedel46fe4dd2007-01-26 00:56:42 -0800523 (1ULL << INTERCEPT_SHUTDOWN) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800524 (1ULL << INTERCEPT_VMRUN) |
525 (1ULL << INTERCEPT_VMMCALL) |
526 (1ULL << INTERCEPT_VMLOAD) |
527 (1ULL << INTERCEPT_VMSAVE) |
528 (1ULL << INTERCEPT_STGI) |
529 (1ULL << INTERCEPT_CLGI) |
Joerg Roedel916ce232007-03-21 19:47:00 +0100530 (1ULL << INTERCEPT_SKINIT) |
Avi Kivitycf5a94d2007-10-28 16:11:58 +0200531 (1ULL << INTERCEPT_WBINVD) |
Joerg Roedel916ce232007-03-21 19:47:00 +0100532 (1ULL << INTERCEPT_MONITOR) |
533 (1ULL << INTERCEPT_MWAIT);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800534
535 control->iopm_base_pa = iopm_base;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100536 control->msrpm_base_pa = __pa(svm->msrpm);
Avi Kivity0cc50642007-03-25 12:07:27 +0200537 control->tsc_offset = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800538 control->int_ctl = V_INTR_MASKING_MASK;
539
540 init_seg(&save->es);
541 init_seg(&save->ss);
542 init_seg(&save->ds);
543 init_seg(&save->fs);
544 init_seg(&save->gs);
545
546 save->cs.selector = 0xf000;
547 /* Executable/Readable Code Segment */
548 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
549 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
550 save->cs.limit = 0xffff;
Avi Kivityd92899a2007-02-12 00:54:38 -0800551 /*
552 * cs.base should really be 0xffff0000, but vmx can't handle that, so
553 * be consistent with it.
554 *
555 * Replace when we have real mode working for vmx.
556 */
557 save->cs.base = 0xf0000;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800558
559 save->gdtr.limit = 0xffff;
560 save->idtr.limit = 0xffff;
561
562 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
563 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
564
565 save->efer = MSR_EFER_SVME_MASK;
Mike Dayd77c26f2007-10-08 09:02:08 -0400566 save->dr6 = 0xffff0ff0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800567 save->dr7 = 0x400;
568 save->rflags = 2;
569 save->rip = 0x0000fff0;
570
571 /*
572 * cr0 val on cpu init should be 0x60000010, we enable cpu
573 * cache by default. the orderly way is to enable cache in bios.
574 */
Rusty Russell707d92f2007-07-17 23:19:08 +1000575 save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
Rusty Russell66aee912007-07-17 23:34:16 +1000576 save->cr4 = X86_CR4_PAE;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800577 /* rdx = ?? */
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100578
579 if (npt_enabled) {
580 /* Setup VMCB for Nested Paging */
581 control->nested_ctl = 1;
Joerg Roedel35649902008-04-09 16:04:32 +0200582 control->intercept &= ~(1ULL << INTERCEPT_TASK_SWITCH);
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100583 control->intercept_exceptions &= ~(1 << PF_VECTOR);
584 control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK|
585 INTERCEPT_CR3_MASK);
586 control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK|
587 INTERCEPT_CR3_MASK);
588 save->g_pat = 0x0007040600070406ULL;
589 /* enable caching because the QEMU Bios doesn't enable it */
590 save->cr0 = X86_CR0_ET;
591 save->cr3 = 0;
592 save->cr4 = 0;
593 }
Avi Kivitya79d2f12008-04-14 13:10:21 +0300594 force_new_asid(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800595}
596
Avi Kivitye00c8cf2007-10-21 11:00:39 +0200597static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
Avi Kivity04d2cc72007-09-10 18:10:54 +0300598{
599 struct vcpu_svm *svm = to_svm(vcpu);
600
Joerg Roedele6101a92008-02-13 18:58:45 +0100601 init_vmcb(svm);
Avi Kivity70433382007-11-07 12:57:23 +0200602
603 if (vcpu->vcpu_id != 0) {
604 svm->vmcb->save.rip = 0;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800605 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
606 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
Avi Kivity70433382007-11-07 12:57:23 +0200607 }
Avi Kivitye00c8cf2007-10-21 11:00:39 +0200608
609 return 0;
Avi Kivity04d2cc72007-09-10 18:10:54 +0300610}
611
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000612static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800613{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400614 struct vcpu_svm *svm;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800615 struct page *page;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100616 struct page *msrpm_pages;
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000617 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800618
Rusty Russellc16f8622007-07-30 21:12:19 +1000619 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000620 if (!svm) {
621 err = -ENOMEM;
622 goto out;
623 }
624
625 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
626 if (err)
627 goto free_svm;
628
Avi Kivity6aa8b732006-12-10 02:21:36 -0800629 page = alloc_page(GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000630 if (!page) {
631 err = -ENOMEM;
632 goto uninit;
633 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800634
Joerg Roedelf65c2292008-02-13 18:58:46 +0100635 err = -ENOMEM;
636 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
637 if (!msrpm_pages)
638 goto uninit;
639 svm->msrpm = page_address(msrpm_pages);
640 svm_vcpu_init_msrpm(svm->msrpm);
641
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400642 svm->vmcb = page_address(page);
643 clear_page(svm->vmcb);
644 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
645 svm->asid_generation = 0;
646 memset(svm->db_regs, 0, sizeof(svm->db_regs));
Joerg Roedele6101a92008-02-13 18:58:45 +0100647 init_vmcb(svm);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400648
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000649 fx_init(&svm->vcpu);
650 svm->vcpu.fpu_active = 1;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800651 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000652 if (svm->vcpu.vcpu_id == 0)
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800653 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800654
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000655 return &svm->vcpu;
Avi Kivity36241b82006-12-22 01:05:20 -0800656
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000657uninit:
658 kvm_vcpu_uninit(&svm->vcpu);
659free_svm:
Rusty Russella4770342007-08-01 14:46:11 +1000660 kmem_cache_free(kvm_vcpu_cache, svm);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000661out:
662 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800663}
664
665static void svm_free_vcpu(struct kvm_vcpu *vcpu)
666{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400667 struct vcpu_svm *svm = to_svm(vcpu);
668
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000669 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
Joerg Roedelf65c2292008-02-13 18:58:46 +0100670 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000671 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +1000672 kmem_cache_free(kvm_vcpu_cache, svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800673}
674
Avi Kivity15ad7142007-07-11 18:17:21 +0300675static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800676{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400677 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +0300678 int i;
Avi Kivity0cc50642007-03-25 12:07:27 +0200679
Avi Kivity0cc50642007-03-25 12:07:27 +0200680 if (unlikely(cpu != vcpu->cpu)) {
681 u64 tsc_this, delta;
682
683 /*
684 * Make sure that the guest sees a monotonically
685 * increasing TSC.
686 */
687 rdtscll(tsc_this);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800688 delta = vcpu->arch.host_tsc - tsc_this;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400689 svm->vmcb->control.tsc_offset += delta;
Avi Kivity0cc50642007-03-25 12:07:27 +0200690 vcpu->cpu = cpu;
Marcelo Tosatti2f599712008-05-27 12:10:20 -0300691 kvm_migrate_timers(vcpu);
Avi Kivity0cc50642007-03-25 12:07:27 +0200692 }
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300693
694 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400695 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800696}
697
698static void svm_vcpu_put(struct kvm_vcpu *vcpu)
699{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400700 struct vcpu_svm *svm = to_svm(vcpu);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300701 int i;
702
Avi Kivitye1beb1d2007-11-18 13:50:24 +0200703 ++vcpu->stat.host_state_reload;
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300704 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400705 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300706
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800707 rdtscll(vcpu->arch.host_tsc);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800708}
709
Avi Kivity774c47f2007-02-12 00:54:47 -0800710static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
711{
712}
713
Avi Kivity6aa8b732006-12-10 02:21:36 -0800714static void svm_cache_regs(struct kvm_vcpu *vcpu)
715{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400716 struct vcpu_svm *svm = to_svm(vcpu);
717
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800718 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
719 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
720 vcpu->arch.rip = svm->vmcb->save.rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800721}
722
723static void svm_decache_regs(struct kvm_vcpu *vcpu)
724{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400725 struct vcpu_svm *svm = to_svm(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800726 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
727 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
728 svm->vmcb->save.rip = vcpu->arch.rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800729}
730
731static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
732{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400733 return to_svm(vcpu)->vmcb->save.rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800734}
735
736static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
737{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400738 to_svm(vcpu)->vmcb->save.rflags = rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800739}
740
741static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
742{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400743 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800744
745 switch (seg) {
746 case VCPU_SREG_CS: return &save->cs;
747 case VCPU_SREG_DS: return &save->ds;
748 case VCPU_SREG_ES: return &save->es;
749 case VCPU_SREG_FS: return &save->fs;
750 case VCPU_SREG_GS: return &save->gs;
751 case VCPU_SREG_SS: return &save->ss;
752 case VCPU_SREG_TR: return &save->tr;
753 case VCPU_SREG_LDTR: return &save->ldtr;
754 }
755 BUG();
Al Viro8b6d44c2007-02-09 16:38:40 +0000756 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800757}
758
759static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
760{
761 struct vmcb_seg *s = svm_seg(vcpu, seg);
762
763 return s->base;
764}
765
766static void svm_get_segment(struct kvm_vcpu *vcpu,
767 struct kvm_segment *var, int seg)
768{
769 struct vmcb_seg *s = svm_seg(vcpu, seg);
770
771 var->base = s->base;
772 var->limit = s->limit;
773 var->selector = s->selector;
774 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
775 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
776 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
777 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
778 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
779 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
780 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
781 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
782 var->unusable = !var->present;
783}
784
Izik Eidus2e4d2652008-03-24 19:38:34 +0200785static int svm_get_cpl(struct kvm_vcpu *vcpu)
786{
787 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
788
789 return save->cpl;
790}
791
Avi Kivity6aa8b732006-12-10 02:21:36 -0800792static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
793{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400794 struct vcpu_svm *svm = to_svm(vcpu);
795
796 dt->limit = svm->vmcb->save.idtr.limit;
797 dt->base = svm->vmcb->save.idtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800798}
799
800static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
801{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400802 struct vcpu_svm *svm = to_svm(vcpu);
803
804 svm->vmcb->save.idtr.limit = dt->limit;
805 svm->vmcb->save.idtr.base = dt->base ;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800806}
807
808static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
809{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400810 struct vcpu_svm *svm = to_svm(vcpu);
811
812 dt->limit = svm->vmcb->save.gdtr.limit;
813 dt->base = svm->vmcb->save.gdtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800814}
815
816static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
817{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400818 struct vcpu_svm *svm = to_svm(vcpu);
819
820 svm->vmcb->save.gdtr.limit = dt->limit;
821 svm->vmcb->save.gdtr.base = dt->base ;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800822}
823
Anthony Liguori25c4c272007-04-27 09:29:21 +0300824static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -0800825{
826}
827
Avi Kivity6aa8b732006-12-10 02:21:36 -0800828static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
829{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400830 struct vcpu_svm *svm = to_svm(vcpu);
831
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800832#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800833 if (vcpu->arch.shadow_efer & EFER_LME) {
Rusty Russell707d92f2007-07-17 23:19:08 +1000834 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800835 vcpu->arch.shadow_efer |= EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600836 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800837 }
838
Mike Dayd77c26f2007-10-08 09:02:08 -0400839 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800840 vcpu->arch.shadow_efer &= ~EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600841 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800842 }
843 }
844#endif
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100845 if (npt_enabled)
846 goto set;
847
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800848 if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400849 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
Anthony Liguori7807fa62007-04-23 09:17:21 -0500850 vcpu->fpu_active = 1;
851 }
852
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800853 vcpu->arch.cr0 = cr0;
Rusty Russell707d92f2007-07-17 23:19:08 +1000854 cr0 |= X86_CR0_PG | X86_CR0_WP;
Joerg Roedel6b390b62008-01-29 13:01:27 +0100855 if (!vcpu->fpu_active) {
856 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
Joerg Roedel334df502008-01-21 13:09:33 +0100857 cr0 |= X86_CR0_TS;
Joerg Roedel6b390b62008-01-29 13:01:27 +0100858 }
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100859set:
860 /*
861 * re-enable caching here because the QEMU bios
862 * does not do it - this results in some delay at
863 * reboot
864 */
865 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400866 svm->vmcb->save.cr0 = cr0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800867}
868
869static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
870{
Joerg Roedel6394b642008-04-09 14:15:29 +0200871 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
872
Joerg Roedelec077262008-04-09 14:15:28 +0200873 vcpu->arch.cr4 = cr4;
874 if (!npt_enabled)
875 cr4 |= X86_CR4_PAE;
Joerg Roedel6394b642008-04-09 14:15:29 +0200876 cr4 |= host_cr4_mce;
Joerg Roedelec077262008-04-09 14:15:28 +0200877 to_svm(vcpu)->vmcb->save.cr4 = cr4;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800878}
879
880static void svm_set_segment(struct kvm_vcpu *vcpu,
881 struct kvm_segment *var, int seg)
882{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400883 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800884 struct vmcb_seg *s = svm_seg(vcpu, seg);
885
886 s->base = var->base;
887 s->limit = var->limit;
888 s->selector = var->selector;
889 if (var->unusable)
890 s->attrib = 0;
891 else {
892 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
893 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
894 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
895 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
896 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
897 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
898 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
899 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
900 }
901 if (seg == VCPU_SREG_CS)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400902 svm->vmcb->save.cpl
903 = (svm->vmcb->save.cs.attrib
Avi Kivity6aa8b732006-12-10 02:21:36 -0800904 >> SVM_SELECTOR_DPL_SHIFT) & 3;
905
906}
907
Avi Kivity6aa8b732006-12-10 02:21:36 -0800908static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
909{
910 return -EOPNOTSUPP;
911}
912
Eddie Dong2a8067f2007-08-06 16:29:07 +0300913static int svm_get_irq(struct kvm_vcpu *vcpu)
914{
915 struct vcpu_svm *svm = to_svm(vcpu);
916 u32 exit_int_info = svm->vmcb->control.exit_int_info;
917
918 if (is_external_interrupt(exit_int_info))
919 return exit_int_info & SVM_EVTINJ_VEC_MASK;
920 return -1;
921}
922
Avi Kivity6aa8b732006-12-10 02:21:36 -0800923static void load_host_msrs(struct kvm_vcpu *vcpu)
924{
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300925#ifdef CONFIG_X86_64
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400926 wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300927#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -0800928}
929
930static void save_host_msrs(struct kvm_vcpu *vcpu)
931{
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300932#ifdef CONFIG_X86_64
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400933 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300934#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -0800935}
936
Rusty Russelle756fc62007-07-30 20:07:08 +1000937static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800938{
939 if (svm_data->next_asid > svm_data->max_asid) {
940 ++svm_data->asid_generation;
941 svm_data->next_asid = 1;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400942 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800943 }
944
Rusty Russelle756fc62007-07-30 20:07:08 +1000945 svm->vcpu.cpu = svm_data->cpu;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400946 svm->asid_generation = svm_data->asid_generation;
947 svm->vmcb->control.asid = svm_data->next_asid++;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800948}
949
Avi Kivity6aa8b732006-12-10 02:21:36 -0800950static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
951{
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +0200952 unsigned long val = to_svm(vcpu)->db_regs[dr];
953 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
954 return val;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800955}
956
957static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
958 int *exception)
959{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400960 struct vcpu_svm *svm = to_svm(vcpu);
961
Avi Kivity6aa8b732006-12-10 02:21:36 -0800962 *exception = 0;
963
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400964 if (svm->vmcb->save.dr7 & DR7_GD_MASK) {
965 svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
966 svm->vmcb->save.dr6 |= DR6_BD_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800967 *exception = DB_VECTOR;
968 return;
969 }
970
971 switch (dr) {
972 case 0 ... 3:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400973 svm->db_regs[dr] = value;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800974 return;
975 case 4 ... 5:
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800976 if (vcpu->arch.cr4 & X86_CR4_DE) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800977 *exception = UD_VECTOR;
978 return;
979 }
980 case 7: {
981 if (value & ~((1ULL << 32) - 1)) {
982 *exception = GP_VECTOR;
983 return;
984 }
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400985 svm->vmcb->save.dr7 = value;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800986 return;
987 }
988 default:
989 printk(KERN_DEBUG "%s: unexpected dr %u\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800990 __func__, dr);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800991 *exception = UD_VECTOR;
992 return;
993 }
994}
995
Rusty Russelle756fc62007-07-30 20:07:08 +1000996static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800997{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400998 u32 exit_int_info = svm->vmcb->control.exit_int_info;
Rusty Russelle756fc62007-07-30 20:07:08 +1000999 struct kvm *kvm = svm->vcpu.kvm;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001000 u64 fault_address;
1001 u32 error_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001002
Eddie Dong85f455f2007-07-06 12:20:49 +03001003 if (!irqchip_in_kernel(kvm) &&
1004 is_external_interrupt(exit_int_info))
Rusty Russelle756fc62007-07-30 20:07:08 +10001005 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001006
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001007 fault_address = svm->vmcb->control.exit_info_2;
1008 error_code = svm->vmcb->control.exit_info_1;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001009
1010 if (!npt_enabled)
1011 KVMTRACE_3D(PAGE_FAULT, &svm->vcpu, error_code,
1012 (u32)fault_address, (u32)(fault_address >> 32),
1013 handler);
Joerg Roedeld2ebb412008-04-30 17:56:04 +02001014 else
1015 KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
1016 (u32)fault_address, (u32)(fault_address >> 32),
1017 handler);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001018
Avi Kivity30677142007-10-28 18:48:59 +02001019 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001020}
1021
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001022static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1023{
1024 int er;
1025
Sheng Yang571008d2008-01-02 14:49:22 +08001026 er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001027 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001028 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001029 return 1;
1030}
1031
Rusty Russelle756fc62007-07-30 20:07:08 +10001032static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Anthony Liguori7807fa62007-04-23 09:17:21 -05001033{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001034 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001035 if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001036 svm->vmcb->save.cr0 &= ~X86_CR0_TS;
Rusty Russelle756fc62007-07-30 20:07:08 +10001037 svm->vcpu.fpu_active = 1;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001038
1039 return 1;
Anthony Liguori7807fa62007-04-23 09:17:21 -05001040}
1041
Joerg Roedel53371b52008-04-09 14:15:30 +02001042static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1043{
1044 /*
1045 * On an #MC intercept the MCE handler is not called automatically in
1046 * the host. So do it by hand here.
1047 */
1048 asm volatile (
1049 "int $0x12\n");
1050 /* not sure if we ever come back to this point */
1051
1052 return 1;
1053}
1054
Rusty Russelle756fc62007-07-30 20:07:08 +10001055static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001056{
1057 /*
1058 * VMCB is undefined after a SHUTDOWN intercept
1059 * so reinitialize it.
1060 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001061 clear_page(svm->vmcb);
Joerg Roedele6101a92008-02-13 18:58:45 +01001062 init_vmcb(svm);
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001063
1064 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1065 return 0;
1066}
1067
Rusty Russelle756fc62007-07-30 20:07:08 +10001068static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001069{
Mike Dayd77c26f2007-10-08 09:02:08 -04001070 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
Avi Kivity039576c2007-03-20 12:46:50 +02001071 int size, down, in, string, rep;
1072 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001073
Rusty Russelle756fc62007-07-30 20:07:08 +10001074 ++svm->vcpu.stat.io_exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001075
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001076 svm->next_rip = svm->vmcb->control.exit_info_2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001077
Laurent Viviere70669a2007-08-05 10:36:40 +03001078 string = (io_info & SVM_IOIO_STR_MASK) != 0;
1079
1080 if (string) {
Laurent Vivier34273182007-09-18 11:27:37 +02001081 if (emulate_instruction(&svm->vcpu,
1082 kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
Laurent Viviere70669a2007-08-05 10:36:40 +03001083 return 0;
1084 return 1;
1085 }
1086
Avi Kivity039576c2007-03-20 12:46:50 +02001087 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1088 port = io_info >> 16;
1089 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
Avi Kivity039576c2007-03-20 12:46:50 +02001090 rep = (io_info & SVM_IOIO_REP_MASK) != 0;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001091 down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001092
Laurent Vivier3090dd72007-08-05 10:43:32 +03001093 return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001094}
1095
Joerg Roedelc47f0982008-04-30 17:56:00 +02001096static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1097{
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001098 KVMTRACE_0D(NMI, &svm->vcpu, handler);
Joerg Roedelc47f0982008-04-30 17:56:00 +02001099 return 1;
1100}
1101
Joerg Roedela0698052008-04-30 17:56:01 +02001102static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1103{
1104 ++svm->vcpu.stat.irq_exits;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001105 KVMTRACE_0D(INTR, &svm->vcpu, handler);
Joerg Roedela0698052008-04-30 17:56:01 +02001106 return 1;
1107}
1108
Rusty Russelle756fc62007-07-30 20:07:08 +10001109static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001110{
1111 return 1;
1112}
1113
Rusty Russelle756fc62007-07-30 20:07:08 +10001114static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001115{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001116 svm->next_rip = svm->vmcb->save.rip + 1;
Rusty Russelle756fc62007-07-30 20:07:08 +10001117 skip_emulated_instruction(&svm->vcpu);
1118 return kvm_emulate_halt(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001119}
1120
Rusty Russelle756fc62007-07-30 20:07:08 +10001121static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity02e235b2007-02-19 14:37:47 +02001122{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001123 svm->next_rip = svm->vmcb->save.rip + 3;
Rusty Russelle756fc62007-07-30 20:07:08 +10001124 skip_emulated_instruction(&svm->vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001125 kvm_emulate_hypercall(&svm->vcpu);
1126 return 1;
Avi Kivity02e235b2007-02-19 14:37:47 +02001127}
1128
Rusty Russelle756fc62007-07-30 20:07:08 +10001129static int invalid_op_interception(struct vcpu_svm *svm,
1130 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001131{
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001132 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001133 return 1;
1134}
1135
Rusty Russelle756fc62007-07-30 20:07:08 +10001136static int task_switch_interception(struct vcpu_svm *svm,
1137 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001138{
Izik Eidus37817f22008-03-24 23:14:53 +02001139 u16 tss_selector;
1140
1141 tss_selector = (u16)svm->vmcb->control.exit_info_1;
1142 if (svm->vmcb->control.exit_info_2 &
1143 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
1144 return kvm_task_switch(&svm->vcpu, tss_selector,
1145 TASK_SWITCH_IRET);
1146 if (svm->vmcb->control.exit_info_2 &
1147 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
1148 return kvm_task_switch(&svm->vcpu, tss_selector,
1149 TASK_SWITCH_JMP);
1150 return kvm_task_switch(&svm->vcpu, tss_selector, TASK_SWITCH_CALL);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001151}
1152
Rusty Russelle756fc62007-07-30 20:07:08 +10001153static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001154{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001155 svm->next_rip = svm->vmcb->save.rip + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10001156 kvm_emulate_cpuid(&svm->vcpu);
Avi Kivity06465c52007-02-28 20:46:53 +02001157 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001158}
1159
Rusty Russelle756fc62007-07-30 20:07:08 +10001160static int emulate_on_interception(struct vcpu_svm *svm,
1161 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001162{
Laurent Vivier34273182007-09-18 11:27:37 +02001163 if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE)
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001164 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001165 return 1;
1166}
1167
Joerg Roedel1d075432007-12-06 21:02:25 +01001168static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1169{
1170 emulate_instruction(&svm->vcpu, NULL, 0, 0, 0);
1171 if (irqchip_in_kernel(svm->vcpu.kvm))
1172 return 1;
1173 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
1174 return 0;
1175}
1176
Avi Kivity6aa8b732006-12-10 02:21:36 -08001177static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1178{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001179 struct vcpu_svm *svm = to_svm(vcpu);
1180
Avi Kivity6aa8b732006-12-10 02:21:36 -08001181 switch (ecx) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001182 case MSR_IA32_TIME_STAMP_COUNTER: {
1183 u64 tsc;
1184
1185 rdtscll(tsc);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001186 *data = svm->vmcb->control.tsc_offset + tsc;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001187 break;
1188 }
Avi Kivity0e859ca2006-12-22 01:05:08 -08001189 case MSR_K6_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001190 *data = svm->vmcb->save.star;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001191 break;
Avi Kivity0e859ca2006-12-22 01:05:08 -08001192#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001193 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001194 *data = svm->vmcb->save.lstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001195 break;
1196 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001197 *data = svm->vmcb->save.cstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001198 break;
1199 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001200 *data = svm->vmcb->save.kernel_gs_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001201 break;
1202 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001203 *data = svm->vmcb->save.sfmask;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001204 break;
1205#endif
1206 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001207 *data = svm->vmcb->save.sysenter_cs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001208 break;
1209 case MSR_IA32_SYSENTER_EIP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001210 *data = svm->vmcb->save.sysenter_eip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001211 break;
1212 case MSR_IA32_SYSENTER_ESP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001213 *data = svm->vmcb->save.sysenter_esp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001214 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01001215 /* Nobody will change the following 5 values in the VMCB so
1216 we can safely return them on rdmsr. They will always be 0
1217 until LBRV is implemented. */
1218 case MSR_IA32_DEBUGCTLMSR:
1219 *data = svm->vmcb->save.dbgctl;
1220 break;
1221 case MSR_IA32_LASTBRANCHFROMIP:
1222 *data = svm->vmcb->save.br_from;
1223 break;
1224 case MSR_IA32_LASTBRANCHTOIP:
1225 *data = svm->vmcb->save.br_to;
1226 break;
1227 case MSR_IA32_LASTINTFROMIP:
1228 *data = svm->vmcb->save.last_excp_from;
1229 break;
1230 case MSR_IA32_LASTINTTOIP:
1231 *data = svm->vmcb->save.last_excp_to;
1232 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001233 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08001234 return kvm_get_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001235 }
1236 return 0;
1237}
1238
Rusty Russelle756fc62007-07-30 20:07:08 +10001239static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001240{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001241 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Avi Kivity6aa8b732006-12-10 02:21:36 -08001242 u64 data;
1243
Rusty Russelle756fc62007-07-30 20:07:08 +10001244 if (svm_get_msr(&svm->vcpu, ecx, &data))
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02001245 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001246 else {
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001247 KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data,
1248 (u32)(data >> 32), handler);
1249
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001250 svm->vmcb->save.rax = data & 0xffffffff;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001251 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001252 svm->next_rip = svm->vmcb->save.rip + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10001253 skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001254 }
1255 return 1;
1256}
1257
1258static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1259{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001260 struct vcpu_svm *svm = to_svm(vcpu);
1261
Avi Kivity6aa8b732006-12-10 02:21:36 -08001262 switch (ecx) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001263 case MSR_IA32_TIME_STAMP_COUNTER: {
1264 u64 tsc;
1265
1266 rdtscll(tsc);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001267 svm->vmcb->control.tsc_offset = data - tsc;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001268 break;
1269 }
Avi Kivity0e859ca2006-12-22 01:05:08 -08001270 case MSR_K6_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001271 svm->vmcb->save.star = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001272 break;
Robert P. J. Day49b14f22007-01-29 13:19:50 -08001273#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001274 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001275 svm->vmcb->save.lstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001276 break;
1277 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001278 svm->vmcb->save.cstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001279 break;
1280 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001281 svm->vmcb->save.kernel_gs_base = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001282 break;
1283 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001284 svm->vmcb->save.sfmask = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001285 break;
1286#endif
1287 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001288 svm->vmcb->save.sysenter_cs = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001289 break;
1290 case MSR_IA32_SYSENTER_EIP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001291 svm->vmcb->save.sysenter_eip = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001292 break;
1293 case MSR_IA32_SYSENTER_ESP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001294 svm->vmcb->save.sysenter_esp = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001295 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01001296 case MSR_IA32_DEBUGCTLMSR:
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001297 if (!svm_has(SVM_FEATURE_LBRV)) {
1298 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001299 __func__, data);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001300 break;
1301 }
1302 if (data & DEBUGCTL_RESERVED_BITS)
1303 return 1;
1304
1305 svm->vmcb->save.dbgctl = data;
1306 if (data & (1ULL<<0))
1307 svm_enable_lbrv(svm);
1308 else
1309 svm_disable_lbrv(svm);
Joerg Roedela2938c82008-02-13 16:30:28 +01001310 break;
Joerg Roedel62b9aba2007-12-11 15:36:57 +01001311 case MSR_K7_EVNTSEL0:
1312 case MSR_K7_EVNTSEL1:
1313 case MSR_K7_EVNTSEL2:
1314 case MSR_K7_EVNTSEL3:
1315 /*
1316 * only support writing 0 to the performance counters for now
1317 * to make Windows happy. Should be replaced by a real
1318 * performance counter emulation later.
1319 */
1320 if (data != 0)
1321 goto unhandled;
1322 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001323 default:
Joerg Roedel62b9aba2007-12-11 15:36:57 +01001324 unhandled:
Avi Kivity3bab1f52006-12-29 16:49:48 -08001325 return kvm_set_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001326 }
1327 return 0;
1328}
1329
Rusty Russelle756fc62007-07-30 20:07:08 +10001330static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001331{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001332 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001333 u64 data = (svm->vmcb->save.rax & -1u)
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001334 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001335
1336 KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32),
1337 handler);
1338
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001339 svm->next_rip = svm->vmcb->save.rip + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10001340 if (svm_set_msr(&svm->vcpu, ecx, data))
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02001341 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001342 else
Rusty Russelle756fc62007-07-30 20:07:08 +10001343 skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001344 return 1;
1345}
1346
Rusty Russelle756fc62007-07-30 20:07:08 +10001347static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001348{
Rusty Russelle756fc62007-07-30 20:07:08 +10001349 if (svm->vmcb->control.exit_info_1)
1350 return wrmsr_interception(svm, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001351 else
Rusty Russelle756fc62007-07-30 20:07:08 +10001352 return rdmsr_interception(svm, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001353}
1354
Rusty Russelle756fc62007-07-30 20:07:08 +10001355static int interrupt_window_interception(struct vcpu_svm *svm,
Dor Laorc1150d82007-01-05 16:36:24 -08001356 struct kvm_run *kvm_run)
1357{
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001358 KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler);
1359
Eddie Dong85f455f2007-07-06 12:20:49 +03001360 svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
1361 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
Dor Laorc1150d82007-01-05 16:36:24 -08001362 /*
1363 * If the user space waits to inject interrupts, exit as soon as
1364 * possible
1365 */
1366 if (kvm_run->request_interrupt_window &&
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001367 !svm->vcpu.arch.irq_summary) {
Rusty Russelle756fc62007-07-30 20:07:08 +10001368 ++svm->vcpu.stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08001369 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1370 return 0;
1371 }
1372
1373 return 1;
1374}
1375
Rusty Russelle756fc62007-07-30 20:07:08 +10001376static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001377 struct kvm_run *kvm_run) = {
1378 [SVM_EXIT_READ_CR0] = emulate_on_interception,
1379 [SVM_EXIT_READ_CR3] = emulate_on_interception,
1380 [SVM_EXIT_READ_CR4] = emulate_on_interception,
Avi Kivity80a81192007-12-06 19:50:00 +02001381 [SVM_EXIT_READ_CR8] = emulate_on_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001382 /* for now: */
1383 [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
1384 [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
1385 [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
Joerg Roedel1d075432007-12-06 21:02:25 +01001386 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001387 [SVM_EXIT_READ_DR0] = emulate_on_interception,
1388 [SVM_EXIT_READ_DR1] = emulate_on_interception,
1389 [SVM_EXIT_READ_DR2] = emulate_on_interception,
1390 [SVM_EXIT_READ_DR3] = emulate_on_interception,
1391 [SVM_EXIT_WRITE_DR0] = emulate_on_interception,
1392 [SVM_EXIT_WRITE_DR1] = emulate_on_interception,
1393 [SVM_EXIT_WRITE_DR2] = emulate_on_interception,
1394 [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
1395 [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
1396 [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001397 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001398 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
Anthony Liguori7807fa62007-04-23 09:17:21 -05001399 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
Joerg Roedel53371b52008-04-09 14:15:30 +02001400 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
Joerg Roedela0698052008-04-30 17:56:01 +02001401 [SVM_EXIT_INTR] = intr_interception,
Joerg Roedelc47f0982008-04-30 17:56:00 +02001402 [SVM_EXIT_NMI] = nmi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001403 [SVM_EXIT_SMI] = nop_on_interception,
1404 [SVM_EXIT_INIT] = nop_on_interception,
Dor Laorc1150d82007-01-05 16:36:24 -08001405 [SVM_EXIT_VINTR] = interrupt_window_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001406 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
1407 [SVM_EXIT_CPUID] = cpuid_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02001408 [SVM_EXIT_INVD] = emulate_on_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001409 [SVM_EXIT_HLT] = halt_interception,
1410 [SVM_EXIT_INVLPG] = emulate_on_interception,
1411 [SVM_EXIT_INVLPGA] = invalid_op_interception,
1412 [SVM_EXIT_IOIO] = io_interception,
1413 [SVM_EXIT_MSR] = msr_interception,
1414 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001415 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001416 [SVM_EXIT_VMRUN] = invalid_op_interception,
Avi Kivity02e235b2007-02-19 14:37:47 +02001417 [SVM_EXIT_VMMCALL] = vmmcall_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001418 [SVM_EXIT_VMLOAD] = invalid_op_interception,
1419 [SVM_EXIT_VMSAVE] = invalid_op_interception,
1420 [SVM_EXIT_STGI] = invalid_op_interception,
1421 [SVM_EXIT_CLGI] = invalid_op_interception,
1422 [SVM_EXIT_SKINIT] = invalid_op_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02001423 [SVM_EXIT_WBINVD] = emulate_on_interception,
Joerg Roedel916ce232007-03-21 19:47:00 +01001424 [SVM_EXIT_MONITOR] = invalid_op_interception,
1425 [SVM_EXIT_MWAIT] = invalid_op_interception,
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001426 [SVM_EXIT_NPF] = pf_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001427};
1428
Avi Kivity04d2cc72007-09-10 18:10:54 +03001429static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001430{
Avi Kivity04d2cc72007-09-10 18:10:54 +03001431 struct vcpu_svm *svm = to_svm(vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001432 u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001433
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001434 KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip,
1435 (u32)((u64)svm->vmcb->save.rip >> 32), entryexit);
1436
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001437 if (npt_enabled) {
1438 int mmu_reload = 0;
1439 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
1440 svm_set_cr0(vcpu, svm->vmcb->save.cr0);
1441 mmu_reload = 1;
1442 }
1443 vcpu->arch.cr0 = svm->vmcb->save.cr0;
1444 vcpu->arch.cr3 = svm->vmcb->save.cr3;
1445 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1446 if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
1447 kvm_inject_gp(vcpu, 0);
1448 return 1;
1449 }
1450 }
1451 if (mmu_reload) {
1452 kvm_mmu_reset_context(vcpu);
1453 kvm_mmu_load(vcpu);
1454 }
1455 }
1456
Avi Kivity04d2cc72007-09-10 18:10:54 +03001457 kvm_reput_irq(svm);
1458
1459 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
1460 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1461 kvm_run->fail_entry.hardware_entry_failure_reason
1462 = svm->vmcb->control.exit_code;
1463 return 0;
1464 }
1465
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001466 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001467 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1468 exit_code != SVM_EXIT_NPF)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001469 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
1470 "exit_code 0x%x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001471 __func__, svm->vmcb->control.exit_int_info,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001472 exit_code);
1473
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +02001474 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
Joe Perches56919c52007-11-12 20:06:51 -08001475 || !svm_exit_handlers[exit_code]) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001476 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
Avi Kivity364b6252007-04-16 14:28:40 +03001477 kvm_run->hw.hardware_exit_reason = exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001478 return 0;
1479 }
1480
Rusty Russelle756fc62007-07-30 20:07:08 +10001481 return svm_exit_handlers[exit_code](svm, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001482}
1483
1484static void reload_tss(struct kvm_vcpu *vcpu)
1485{
1486 int cpu = raw_smp_processor_id();
1487
1488 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
Mike Dayd77c26f2007-10-08 09:02:08 -04001489 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001490 load_TR_desc();
1491}
1492
Rusty Russelle756fc62007-07-30 20:07:08 +10001493static void pre_svm_run(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001494{
1495 int cpu = raw_smp_processor_id();
1496
1497 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
1498
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001499 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
Rusty Russelle756fc62007-07-30 20:07:08 +10001500 if (svm->vcpu.cpu != cpu ||
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001501 svm->asid_generation != svm_data->asid_generation)
Rusty Russelle756fc62007-07-30 20:07:08 +10001502 new_asid(svm, svm_data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001503}
1504
1505
Eddie Dong85f455f2007-07-06 12:20:49 +03001506static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001507{
1508 struct vmcb_control_area *control;
1509
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001510 KVMTRACE_1D(INJ_VIRQ, &svm->vcpu, (u32)irq, handler);
1511
Rusty Russelle756fc62007-07-30 20:07:08 +10001512 control = &svm->vmcb->control;
Eddie Dong85f455f2007-07-06 12:20:49 +03001513 control->int_vector = irq;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001514 control->int_ctl &= ~V_INTR_PRIO_MASK;
1515 control->int_ctl |= V_IRQ_MASK |
1516 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1517}
1518
Eddie Dong2a8067f2007-08-06 16:29:07 +03001519static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
1520{
1521 struct vcpu_svm *svm = to_svm(vcpu);
1522
1523 svm_inject_irq(svm, irq);
1524}
1525
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001526static void update_cr8_intercept(struct kvm_vcpu *vcpu)
1527{
1528 struct vcpu_svm *svm = to_svm(vcpu);
1529 struct vmcb *vmcb = svm->vmcb;
1530 int max_irr, tpr;
1531
1532 if (!irqchip_in_kernel(vcpu->kvm) || vcpu->arch.apic->vapic_addr)
1533 return;
1534
1535 vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
1536
1537 max_irr = kvm_lapic_find_highest_irr(vcpu);
1538 if (max_irr == -1)
1539 return;
1540
1541 tpr = kvm_lapic_get_cr8(vcpu) << 4;
1542
1543 if (tpr >= (max_irr & 0xf0))
1544 vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
1545}
1546
Avi Kivity04d2cc72007-09-10 18:10:54 +03001547static void svm_intr_assist(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001548{
Avi Kivity04d2cc72007-09-10 18:10:54 +03001549 struct vcpu_svm *svm = to_svm(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03001550 struct vmcb *vmcb = svm->vmcb;
1551 int intr_vector = -1;
1552
1553 if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) &&
1554 ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) {
1555 intr_vector = vmcb->control.exit_int_info &
1556 SVM_EVTINJ_VEC_MASK;
1557 vmcb->control.exit_int_info = 0;
1558 svm_inject_irq(svm, intr_vector);
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001559 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001560 }
1561
1562 if (vmcb->control.int_ctl & V_IRQ_MASK)
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001563 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001564
Eddie Dong1b9778d2007-09-03 16:56:58 +03001565 if (!kvm_cpu_has_interrupt(vcpu))
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001566 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001567
1568 if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
1569 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
1570 (vmcb->control.event_inj & SVM_EVTINJ_VALID)) {
1571 /* unable to deliver irq, set pending irq */
1572 vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
1573 svm_inject_irq(svm, 0x0);
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001574 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001575 }
1576 /* Okay, we can deliver the interrupt: grab it and update PIC state. */
Eddie Dong1b9778d2007-09-03 16:56:58 +03001577 intr_vector = kvm_cpu_get_interrupt(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03001578 svm_inject_irq(svm, intr_vector);
Eddie Dong1b9778d2007-09-03 16:56:58 +03001579 kvm_timer_intr_post(vcpu, intr_vector);
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001580out:
1581 update_cr8_intercept(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03001582}
1583
1584static void kvm_reput_irq(struct vcpu_svm *svm)
1585{
Rusty Russelle756fc62007-07-30 20:07:08 +10001586 struct vmcb_control_area *control = &svm->vmcb->control;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001587
Eddie Dong7017fc32007-07-18 11:34:57 +03001588 if ((control->int_ctl & V_IRQ_MASK)
1589 && !irqchip_in_kernel(svm->vcpu.kvm)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001590 control->int_ctl &= ~V_IRQ_MASK;
Rusty Russelle756fc62007-07-30 20:07:08 +10001591 push_irq(&svm->vcpu, control->int_vector);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001592 }
Dor Laorc1150d82007-01-05 16:36:24 -08001593
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001594 svm->vcpu.arch.interrupt_window_open =
Dor Laorc1150d82007-01-05 16:36:24 -08001595 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
1596}
1597
Eddie Dong85f455f2007-07-06 12:20:49 +03001598static void svm_do_inject_vector(struct vcpu_svm *svm)
1599{
1600 struct kvm_vcpu *vcpu = &svm->vcpu;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001601 int word_index = __ffs(vcpu->arch.irq_summary);
1602 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
Eddie Dong85f455f2007-07-06 12:20:49 +03001603 int irq = word_index * BITS_PER_LONG + bit_index;
1604
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001605 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
1606 if (!vcpu->arch.irq_pending[word_index])
1607 clear_bit(word_index, &vcpu->arch.irq_summary);
Eddie Dong85f455f2007-07-06 12:20:49 +03001608 svm_inject_irq(svm, irq);
1609}
1610
Avi Kivity04d2cc72007-09-10 18:10:54 +03001611static void do_interrupt_requests(struct kvm_vcpu *vcpu,
Dor Laorc1150d82007-01-05 16:36:24 -08001612 struct kvm_run *kvm_run)
1613{
Avi Kivity04d2cc72007-09-10 18:10:54 +03001614 struct vcpu_svm *svm = to_svm(vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001615 struct vmcb_control_area *control = &svm->vmcb->control;
Dor Laorc1150d82007-01-05 16:36:24 -08001616
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001617 svm->vcpu.arch.interrupt_window_open =
Dor Laorc1150d82007-01-05 16:36:24 -08001618 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001619 (svm->vmcb->save.rflags & X86_EFLAGS_IF));
Dor Laorc1150d82007-01-05 16:36:24 -08001620
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001621 if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary)
Dor Laorc1150d82007-01-05 16:36:24 -08001622 /*
1623 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1624 */
Eddie Dong85f455f2007-07-06 12:20:49 +03001625 svm_do_inject_vector(svm);
Dor Laorc1150d82007-01-05 16:36:24 -08001626
1627 /*
1628 * Interrupts blocked. Wait for unblock.
1629 */
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001630 if (!svm->vcpu.arch.interrupt_window_open &&
1631 (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window))
Dor Laorc1150d82007-01-05 16:36:24 -08001632 control->intercept |= 1ULL << INTERCEPT_VINTR;
Mike Dayd77c26f2007-10-08 09:02:08 -04001633 else
Dor Laorc1150d82007-01-05 16:36:24 -08001634 control->intercept &= ~(1ULL << INTERCEPT_VINTR);
1635}
1636
Izik Eiduscbc94022007-10-25 00:29:55 +02001637static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
1638{
1639 return 0;
1640}
1641
Avi Kivity6aa8b732006-12-10 02:21:36 -08001642static void save_db_regs(unsigned long *db_regs)
1643{
Avi Kivity5aff4582006-12-13 00:33:45 -08001644 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
1645 asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
1646 asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
1647 asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001648}
1649
1650static void load_db_regs(unsigned long *db_regs)
1651{
Avi Kivity5aff4582006-12-13 00:33:45 -08001652 asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
1653 asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
1654 asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
1655 asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001656}
1657
Avi Kivityd9e368d2007-06-07 19:18:30 +03001658static void svm_flush_tlb(struct kvm_vcpu *vcpu)
1659{
1660 force_new_asid(vcpu);
1661}
1662
Avi Kivity04d2cc72007-09-10 18:10:54 +03001663static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
1664{
1665}
1666
Joerg Roedeld7bf8222008-04-16 16:51:17 +02001667static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
1668{
1669 struct vcpu_svm *svm = to_svm(vcpu);
1670
1671 if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
1672 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
1673 kvm_lapic_set_tpr(vcpu, cr8);
1674 }
1675}
1676
Joerg Roedel649d6862008-04-16 16:51:15 +02001677static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
1678{
1679 struct vcpu_svm *svm = to_svm(vcpu);
1680 u64 cr8;
1681
1682 if (!irqchip_in_kernel(vcpu->kvm))
1683 return;
1684
1685 cr8 = kvm_get_cr8(vcpu);
1686 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
1687 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
1688}
1689
Avi Kivity04d2cc72007-09-10 18:10:54 +03001690static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001691{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001692 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001693 u16 fs_selector;
1694 u16 gs_selector;
1695 u16 ldt_selector;
Avi Kivityd9e368d2007-06-07 19:18:30 +03001696
Rusty Russelle756fc62007-07-30 20:07:08 +10001697 pre_svm_run(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001698
Joerg Roedel649d6862008-04-16 16:51:15 +02001699 sync_lapic_to_cr8(vcpu);
1700
Avi Kivity6aa8b732006-12-10 02:21:36 -08001701 save_host_msrs(vcpu);
1702 fs_selector = read_fs();
1703 gs_selector = read_gs();
1704 ldt_selector = read_ldt();
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001705 svm->host_cr2 = kvm_read_cr2();
1706 svm->host_dr6 = read_dr6();
1707 svm->host_dr7 = read_dr7();
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001708 svm->vmcb->save.cr2 = vcpu->arch.cr2;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001709 /* required for live migration with NPT */
1710 if (npt_enabled)
1711 svm->vmcb->save.cr3 = vcpu->arch.cr3;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001712
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001713 if (svm->vmcb->save.dr7 & 0xff) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001714 write_dr7(0);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001715 save_db_regs(svm->host_db_regs);
1716 load_db_regs(svm->db_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001717 }
Avi Kivity36241b82006-12-22 01:05:20 -08001718
Avi Kivity04d2cc72007-09-10 18:10:54 +03001719 clgi();
1720
1721 local_irq_enable();
Avi Kivity36241b82006-12-22 01:05:20 -08001722
Avi Kivity6aa8b732006-12-10 02:21:36 -08001723 asm volatile (
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001724#ifdef CONFIG_X86_64
Laurent Vivier54a08c02007-10-25 14:18:53 +02001725 "push %%rbp; \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001726#else
Laurent Vivierfe7935d2007-10-25 14:18:54 +02001727 "push %%ebp; \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001728#endif
1729
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001730#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001731 "mov %c[rbx](%[svm]), %%rbx \n\t"
1732 "mov %c[rcx](%[svm]), %%rcx \n\t"
1733 "mov %c[rdx](%[svm]), %%rdx \n\t"
1734 "mov %c[rsi](%[svm]), %%rsi \n\t"
1735 "mov %c[rdi](%[svm]), %%rdi \n\t"
1736 "mov %c[rbp](%[svm]), %%rbp \n\t"
1737 "mov %c[r8](%[svm]), %%r8 \n\t"
1738 "mov %c[r9](%[svm]), %%r9 \n\t"
1739 "mov %c[r10](%[svm]), %%r10 \n\t"
1740 "mov %c[r11](%[svm]), %%r11 \n\t"
1741 "mov %c[r12](%[svm]), %%r12 \n\t"
1742 "mov %c[r13](%[svm]), %%r13 \n\t"
1743 "mov %c[r14](%[svm]), %%r14 \n\t"
1744 "mov %c[r15](%[svm]), %%r15 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001745#else
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001746 "mov %c[rbx](%[svm]), %%ebx \n\t"
1747 "mov %c[rcx](%[svm]), %%ecx \n\t"
1748 "mov %c[rdx](%[svm]), %%edx \n\t"
1749 "mov %c[rsi](%[svm]), %%esi \n\t"
1750 "mov %c[rdi](%[svm]), %%edi \n\t"
1751 "mov %c[rbp](%[svm]), %%ebp \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001752#endif
1753
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001754#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001755 /* Enter guest mode */
1756 "push %%rax \n\t"
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001757 "mov %c[vmcb](%[svm]), %%rax \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001758 SVM_VMLOAD "\n\t"
1759 SVM_VMRUN "\n\t"
1760 SVM_VMSAVE "\n\t"
1761 "pop %%rax \n\t"
1762#else
1763 /* Enter guest mode */
1764 "push %%eax \n\t"
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001765 "mov %c[vmcb](%[svm]), %%eax \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001766 SVM_VMLOAD "\n\t"
1767 SVM_VMRUN "\n\t"
1768 SVM_VMSAVE "\n\t"
1769 "pop %%eax \n\t"
1770#endif
1771
1772 /* Save guest registers, load host registers */
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001773#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001774 "mov %%rbx, %c[rbx](%[svm]) \n\t"
1775 "mov %%rcx, %c[rcx](%[svm]) \n\t"
1776 "mov %%rdx, %c[rdx](%[svm]) \n\t"
1777 "mov %%rsi, %c[rsi](%[svm]) \n\t"
1778 "mov %%rdi, %c[rdi](%[svm]) \n\t"
1779 "mov %%rbp, %c[rbp](%[svm]) \n\t"
1780 "mov %%r8, %c[r8](%[svm]) \n\t"
1781 "mov %%r9, %c[r9](%[svm]) \n\t"
1782 "mov %%r10, %c[r10](%[svm]) \n\t"
1783 "mov %%r11, %c[r11](%[svm]) \n\t"
1784 "mov %%r12, %c[r12](%[svm]) \n\t"
1785 "mov %%r13, %c[r13](%[svm]) \n\t"
1786 "mov %%r14, %c[r14](%[svm]) \n\t"
1787 "mov %%r15, %c[r15](%[svm]) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001788
Laurent Vivier54a08c02007-10-25 14:18:53 +02001789 "pop %%rbp; \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001790#else
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001791 "mov %%ebx, %c[rbx](%[svm]) \n\t"
1792 "mov %%ecx, %c[rcx](%[svm]) \n\t"
1793 "mov %%edx, %c[rdx](%[svm]) \n\t"
1794 "mov %%esi, %c[rsi](%[svm]) \n\t"
1795 "mov %%edi, %c[rdi](%[svm]) \n\t"
1796 "mov %%ebp, %c[rbp](%[svm]) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001797
Laurent Vivierfe7935d2007-10-25 14:18:54 +02001798 "pop %%ebp; \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001799#endif
1800 :
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001801 : [svm]"a"(svm),
Avi Kivity6aa8b732006-12-10 02:21:36 -08001802 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001803 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
1804 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
1805 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
1806 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
1807 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
1808 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001809#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001810 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
1811 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
1812 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
1813 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
1814 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
1815 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
1816 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
1817 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001818#endif
Laurent Vivier54a08c02007-10-25 14:18:53 +02001819 : "cc", "memory"
1820#ifdef CONFIG_X86_64
1821 , "rbx", "rcx", "rdx", "rsi", "rdi"
1822 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
Laurent Vivierfe7935d2007-10-25 14:18:54 +02001823#else
1824 , "ebx", "ecx", "edx" , "esi", "edi"
Laurent Vivier54a08c02007-10-25 14:18:53 +02001825#endif
1826 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08001827
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001828 if ((svm->vmcb->save.dr7 & 0xff))
1829 load_db_regs(svm->host_db_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001830
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001831 vcpu->arch.cr2 = svm->vmcb->save.cr2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001832
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001833 write_dr6(svm->host_dr6);
1834 write_dr7(svm->host_dr7);
1835 kvm_write_cr2(svm->host_cr2);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001836
1837 load_fs(fs_selector);
1838 load_gs(gs_selector);
1839 load_ldt(ldt_selector);
1840 load_host_msrs(vcpu);
1841
1842 reload_tss(vcpu);
1843
Avi Kivity56ba47d2007-11-07 17:14:18 +02001844 local_irq_disable();
1845
1846 stgi();
1847
Joerg Roedeld7bf8222008-04-16 16:51:17 +02001848 sync_cr8_to_lapic(vcpu);
1849
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001850 svm->next_rip = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001851}
1852
Avi Kivity6aa8b732006-12-10 02:21:36 -08001853static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
1854{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001855 struct vcpu_svm *svm = to_svm(vcpu);
1856
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001857 if (npt_enabled) {
1858 svm->vmcb->control.nested_cr3 = root;
1859 force_new_asid(vcpu);
1860 return;
1861 }
1862
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001863 svm->vmcb->save.cr3 = root;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001864 force_new_asid(vcpu);
Anthony Liguori7807fa62007-04-23 09:17:21 -05001865
1866 if (vcpu->fpu_active) {
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001867 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
1868 svm->vmcb->save.cr0 |= X86_CR0_TS;
Anthony Liguori7807fa62007-04-23 09:17:21 -05001869 vcpu->fpu_active = 0;
1870 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001871}
1872
Avi Kivity6aa8b732006-12-10 02:21:36 -08001873static int is_disabled(void)
1874{
Joerg Roedel6031a612007-06-22 12:29:50 +03001875 u64 vm_cr;
1876
1877 rdmsrl(MSR_VM_CR, vm_cr);
1878 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
1879 return 1;
1880
Avi Kivity6aa8b732006-12-10 02:21:36 -08001881 return 0;
1882}
1883
Ingo Molnar102d8322007-02-19 14:37:47 +02001884static void
1885svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1886{
1887 /*
1888 * Patch in the VMMCALL instruction:
1889 */
1890 hypercall[0] = 0x0f;
1891 hypercall[1] = 0x01;
1892 hypercall[2] = 0xd9;
Ingo Molnar102d8322007-02-19 14:37:47 +02001893}
1894
Yang, Sheng002c7f72007-07-31 14:23:01 +03001895static void svm_check_processor_compat(void *rtn)
1896{
1897 *(int *)rtn = 0;
1898}
1899
Avi Kivity774ead32007-12-26 13:57:04 +02001900static bool svm_cpu_has_accelerated_tpr(void)
1901{
1902 return false;
1903}
1904
Sheng Yang67253af2008-04-25 10:20:22 +08001905static int get_npt_level(void)
1906{
1907#ifdef CONFIG_X86_64
1908 return PT64_ROOT_LEVEL;
1909#else
1910 return PT32E_ROOT_LEVEL;
1911#endif
1912}
1913
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001914static struct kvm_x86_ops svm_x86_ops = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001915 .cpu_has_kvm_support = has_svm,
1916 .disabled_by_bios = is_disabled,
1917 .hardware_setup = svm_hardware_setup,
1918 .hardware_unsetup = svm_hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03001919 .check_processor_compatibility = svm_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001920 .hardware_enable = svm_hardware_enable,
1921 .hardware_disable = svm_hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02001922 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001923
1924 .vcpu_create = svm_create_vcpu,
1925 .vcpu_free = svm_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03001926 .vcpu_reset = svm_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001927
Avi Kivity04d2cc72007-09-10 18:10:54 +03001928 .prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001929 .vcpu_load = svm_vcpu_load,
1930 .vcpu_put = svm_vcpu_put,
Avi Kivity774c47f2007-02-12 00:54:47 -08001931 .vcpu_decache = svm_vcpu_decache,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001932
1933 .set_guest_debug = svm_guest_debug,
1934 .get_msr = svm_get_msr,
1935 .set_msr = svm_set_msr,
1936 .get_segment_base = svm_get_segment_base,
1937 .get_segment = svm_get_segment,
1938 .set_segment = svm_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02001939 .get_cpl = svm_get_cpl,
Rusty Russell1747fb72007-09-06 01:21:32 +10001940 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
Anthony Liguori25c4c272007-04-27 09:29:21 +03001941 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001942 .set_cr0 = svm_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001943 .set_cr3 = svm_set_cr3,
1944 .set_cr4 = svm_set_cr4,
1945 .set_efer = svm_set_efer,
1946 .get_idt = svm_get_idt,
1947 .set_idt = svm_set_idt,
1948 .get_gdt = svm_get_gdt,
1949 .set_gdt = svm_set_gdt,
1950 .get_dr = svm_get_dr,
1951 .set_dr = svm_set_dr,
1952 .cache_regs = svm_cache_regs,
1953 .decache_regs = svm_decache_regs,
1954 .get_rflags = svm_get_rflags,
1955 .set_rflags = svm_set_rflags,
1956
Avi Kivity6aa8b732006-12-10 02:21:36 -08001957 .tlb_flush = svm_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001958
Avi Kivity6aa8b732006-12-10 02:21:36 -08001959 .run = svm_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03001960 .handle_exit = handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001961 .skip_emulated_instruction = skip_emulated_instruction,
Ingo Molnar102d8322007-02-19 14:37:47 +02001962 .patch_hypercall = svm_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03001963 .get_irq = svm_get_irq,
1964 .set_irq = svm_set_irq,
Avi Kivity298101d2007-11-25 13:41:11 +02001965 .queue_exception = svm_queue_exception,
1966 .exception_injected = svm_exception_injected,
Avi Kivity04d2cc72007-09-10 18:10:54 +03001967 .inject_pending_irq = svm_intr_assist,
1968 .inject_pending_vectors = do_interrupt_requests,
Izik Eiduscbc94022007-10-25 00:29:55 +02001969
1970 .set_tss_addr = svm_set_tss_addr,
Sheng Yang67253af2008-04-25 10:20:22 +08001971 .get_tdp_level = get_npt_level,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001972};
1973
1974static int __init svm_init(void)
1975{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08001976 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
Rusty Russellc16f8622007-07-30 21:12:19 +10001977 THIS_MODULE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001978}
1979
1980static void __exit svm_exit(void)
1981{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08001982 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08001983}
1984
1985module_init(svm_init)
1986module_exit(svm_exit)