blob: 90abd3c58c652f1f9f71b7ec976da70d314f4bba [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18#include "kvm.h"
19#include "vmx.h"
Avi Kivity6aa8b732006-12-10 02:21:36 -080020#include <linux/module.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020021#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080022#include <linux/mm.h>
23#include <linux/highmem.h>
Ingo Molnar07031e12007-01-10 23:15:38 -080024#include <linux/profile.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040025#include <linux/sched.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080026#include <asm/io.h>
Anthony Liguori3b3be0d2006-12-13 00:33:43 -080027#include <asm/desc.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028
29#include "segment_descriptor.h"
30
Avi Kivity6aa8b732006-12-10 02:21:36 -080031MODULE_AUTHOR("Qumranet");
32MODULE_LICENSE("GPL");
33
34static DEFINE_PER_CPU(struct vmcs *, vmxarea);
35static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
36
He, Qingfdef3ad2007-04-30 09:45:24 +030037static struct page *vmx_io_bitmap_a;
38static struct page *vmx_io_bitmap_b;
39
Avi Kivity05b3e0c2006-12-13 00:33:45 -080040#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -080041#define HOST_IS_64 1
42#else
43#define HOST_IS_64 0
44#endif
Eddie Dong2cc51562007-05-21 07:28:09 +030045#define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
Avi Kivity6aa8b732006-12-10 02:21:36 -080046
47static struct vmcs_descriptor {
48 int size;
49 int order;
50 u32 revision_id;
51} vmcs_descriptor;
52
53#define VMX_SEGMENT_FIELD(seg) \
54 [VCPU_SREG_##seg] = { \
55 .selector = GUEST_##seg##_SELECTOR, \
56 .base = GUEST_##seg##_BASE, \
57 .limit = GUEST_##seg##_LIMIT, \
58 .ar_bytes = GUEST_##seg##_AR_BYTES, \
59 }
60
61static struct kvm_vmx_segment_field {
62 unsigned selector;
63 unsigned base;
64 unsigned limit;
65 unsigned ar_bytes;
66} kvm_vmx_segment_fields[] = {
67 VMX_SEGMENT_FIELD(CS),
68 VMX_SEGMENT_FIELD(DS),
69 VMX_SEGMENT_FIELD(ES),
70 VMX_SEGMENT_FIELD(FS),
71 VMX_SEGMENT_FIELD(GS),
72 VMX_SEGMENT_FIELD(SS),
73 VMX_SEGMENT_FIELD(TR),
74 VMX_SEGMENT_FIELD(LDTR),
75};
76
Avi Kivity4d56c8a2007-04-19 14:28:44 +030077/*
78 * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
79 * away by decrementing the array size.
80 */
Avi Kivity6aa8b732006-12-10 02:21:36 -080081static const u32 vmx_msr_index[] = {
Avi Kivity05b3e0c2006-12-13 00:33:45 -080082#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -080083 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
84#endif
85 MSR_EFER, MSR_K6_STAR,
86};
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020087#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
Avi Kivity6aa8b732006-12-10 02:21:36 -080088
Eddie Dong2cc51562007-05-21 07:28:09 +030089static inline u64 msr_efer_save_restore_bits(struct vmx_msr_entry msr)
90{
91 return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
92}
93
94static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
95{
96 int efer_offset = vcpu->msr_offset_efer;
97 return msr_efer_save_restore_bits(vcpu->host_msrs[efer_offset]) !=
98 msr_efer_save_restore_bits(vcpu->guest_msrs[efer_offset]);
99}
100
Avi Kivity6aa8b732006-12-10 02:21:36 -0800101static inline int is_page_fault(u32 intr_info)
102{
103 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
104 INTR_INFO_VALID_MASK)) ==
105 (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
106}
107
Anthony Liguori2ab455c2007-04-27 09:29:49 +0300108static inline int is_no_device(u32 intr_info)
109{
110 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
111 INTR_INFO_VALID_MASK)) ==
112 (INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
113}
114
Avi Kivity6aa8b732006-12-10 02:21:36 -0800115static inline int is_external_interrupt(u32 intr_info)
116{
117 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
118 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
119}
120
Eddie Donga75beee2007-05-17 18:55:15 +0300121static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
Avi Kivity7725f0b2006-12-13 00:34:01 -0800122{
123 int i;
124
125 for (i = 0; i < vcpu->nmsrs; ++i)
126 if (vcpu->guest_msrs[i].index == msr)
Eddie Donga75beee2007-05-17 18:55:15 +0300127 return i;
128 return -1;
129}
130
131static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
132{
133 int i;
134
135 i = __find_msr_index(vcpu, msr);
136 if (i >= 0)
137 return &vcpu->guest_msrs[i];
Al Viro8b6d44c2007-02-09 16:38:40 +0000138 return NULL;
Avi Kivity7725f0b2006-12-13 00:34:01 -0800139}
140
Avi Kivity6aa8b732006-12-10 02:21:36 -0800141static void vmcs_clear(struct vmcs *vmcs)
142{
143 u64 phys_addr = __pa(vmcs);
144 u8 error;
145
146 asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0"
147 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
148 : "cc", "memory");
149 if (error)
150 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
151 vmcs, phys_addr);
152}
153
154static void __vcpu_clear(void *arg)
155{
156 struct kvm_vcpu *vcpu = arg;
Ingo Molnard3b2c332007-01-05 16:36:23 -0800157 int cpu = raw_smp_processor_id();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800158
159 if (vcpu->cpu == cpu)
160 vmcs_clear(vcpu->vmcs);
161 if (per_cpu(current_vmcs, cpu) == vcpu->vmcs)
162 per_cpu(current_vmcs, cpu) = NULL;
163}
164
Avi Kivity8d0be2b2007-02-12 00:54:46 -0800165static void vcpu_clear(struct kvm_vcpu *vcpu)
166{
167 if (vcpu->cpu != raw_smp_processor_id() && vcpu->cpu != -1)
168 smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1);
169 else
170 __vcpu_clear(vcpu);
171 vcpu->launched = 0;
172}
173
Avi Kivity6aa8b732006-12-10 02:21:36 -0800174static unsigned long vmcs_readl(unsigned long field)
175{
176 unsigned long value;
177
178 asm volatile (ASM_VMX_VMREAD_RDX_RAX
179 : "=a"(value) : "d"(field) : "cc");
180 return value;
181}
182
183static u16 vmcs_read16(unsigned long field)
184{
185 return vmcs_readl(field);
186}
187
188static u32 vmcs_read32(unsigned long field)
189{
190 return vmcs_readl(field);
191}
192
193static u64 vmcs_read64(unsigned long field)
194{
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800195#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800196 return vmcs_readl(field);
197#else
198 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
199#endif
200}
201
Avi Kivitye52de1b2007-01-05 16:36:56 -0800202static noinline void vmwrite_error(unsigned long field, unsigned long value)
203{
204 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
205 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
206 dump_stack();
207}
208
Avi Kivity6aa8b732006-12-10 02:21:36 -0800209static void vmcs_writel(unsigned long field, unsigned long value)
210{
211 u8 error;
212
213 asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
214 : "=q"(error) : "a"(value), "d"(field) : "cc" );
Avi Kivitye52de1b2007-01-05 16:36:56 -0800215 if (unlikely(error))
216 vmwrite_error(field, value);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800217}
218
219static void vmcs_write16(unsigned long field, u16 value)
220{
221 vmcs_writel(field, value);
222}
223
224static void vmcs_write32(unsigned long field, u32 value)
225{
226 vmcs_writel(field, value);
227}
228
229static void vmcs_write64(unsigned long field, u64 value)
230{
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800231#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800232 vmcs_writel(field, value);
233#else
234 vmcs_writel(field, value);
235 asm volatile ("");
236 vmcs_writel(field+1, value >> 32);
237#endif
238}
239
Anthony Liguori2ab455c2007-04-27 09:29:49 +0300240static void vmcs_clear_bits(unsigned long field, u32 mask)
241{
242 vmcs_writel(field, vmcs_readl(field) & ~mask);
243}
244
245static void vmcs_set_bits(unsigned long field, u32 mask)
246{
247 vmcs_writel(field, vmcs_readl(field) | mask);
248}
249
Avi Kivityabd3f2d2007-05-02 17:57:40 +0300250static void update_exception_bitmap(struct kvm_vcpu *vcpu)
251{
252 u32 eb;
253
254 eb = 1u << PF_VECTOR;
255 if (!vcpu->fpu_active)
256 eb |= 1u << NM_VECTOR;
257 if (vcpu->guest_debug.enabled)
258 eb |= 1u << 1;
259 if (vcpu->rmode.active)
260 eb = ~0;
261 vmcs_write32(EXCEPTION_BITMAP, eb);
262}
263
Avi Kivity33ed6322007-05-02 16:54:03 +0300264static void reload_tss(void)
265{
266#ifndef CONFIG_X86_64
267
268 /*
269 * VT restores TR but not its size. Useless.
270 */
271 struct descriptor_table gdt;
272 struct segment_descriptor *descs;
273
274 get_gdt(&gdt);
275 descs = (void *)gdt.base;
276 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
277 load_TR_desc();
278#endif
279}
280
Eddie Dong2cc51562007-05-21 07:28:09 +0300281static void load_transition_efer(struct kvm_vcpu *vcpu)
282{
283 u64 trans_efer;
284 int efer_offset = vcpu->msr_offset_efer;
285
286 trans_efer = vcpu->host_msrs[efer_offset].data;
287 trans_efer &= ~EFER_SAVE_RESTORE_BITS;
288 trans_efer |= msr_efer_save_restore_bits(
289 vcpu->guest_msrs[efer_offset]);
290 wrmsrl(MSR_EFER, trans_efer);
291 vcpu->stat.efer_reload++;
292}
293
Avi Kivity33ed6322007-05-02 16:54:03 +0300294static void vmx_save_host_state(struct kvm_vcpu *vcpu)
295{
296 struct vmx_host_state *hs = &vcpu->vmx_host_state;
297
298 if (hs->loaded)
299 return;
300
301 hs->loaded = 1;
302 /*
303 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
304 * allow segment selectors with cpl > 0 or ti == 1.
305 */
306 hs->ldt_sel = read_ldt();
307 hs->fs_gs_ldt_reload_needed = hs->ldt_sel;
308 hs->fs_sel = read_fs();
309 if (!(hs->fs_sel & 7))
310 vmcs_write16(HOST_FS_SELECTOR, hs->fs_sel);
311 else {
312 vmcs_write16(HOST_FS_SELECTOR, 0);
313 hs->fs_gs_ldt_reload_needed = 1;
314 }
315 hs->gs_sel = read_gs();
316 if (!(hs->gs_sel & 7))
317 vmcs_write16(HOST_GS_SELECTOR, hs->gs_sel);
318 else {
319 vmcs_write16(HOST_GS_SELECTOR, 0);
320 hs->fs_gs_ldt_reload_needed = 1;
321 }
322
323#ifdef CONFIG_X86_64
324 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
325 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
326#else
327 vmcs_writel(HOST_FS_BASE, segment_base(hs->fs_sel));
328 vmcs_writel(HOST_GS_BASE, segment_base(hs->gs_sel));
329#endif
Avi Kivity707c0872007-05-02 17:33:43 +0300330
331#ifdef CONFIG_X86_64
332 if (is_long_mode(vcpu)) {
Eddie Donga75beee2007-05-17 18:55:15 +0300333 save_msrs(vcpu->host_msrs + vcpu->msr_offset_kernel_gs_base, 1);
Avi Kivity707c0872007-05-02 17:33:43 +0300334 }
335#endif
Eddie Donga75beee2007-05-17 18:55:15 +0300336 load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
Eddie Dong2cc51562007-05-21 07:28:09 +0300337 if (msr_efer_need_save_restore(vcpu))
338 load_transition_efer(vcpu);
Avi Kivity33ed6322007-05-02 16:54:03 +0300339}
340
341static void vmx_load_host_state(struct kvm_vcpu *vcpu)
342{
343 struct vmx_host_state *hs = &vcpu->vmx_host_state;
344
345 if (!hs->loaded)
346 return;
347
348 hs->loaded = 0;
349 if (hs->fs_gs_ldt_reload_needed) {
350 load_ldt(hs->ldt_sel);
351 load_fs(hs->fs_sel);
352 /*
353 * If we have to reload gs, we must take care to
354 * preserve our gs base.
355 */
356 local_irq_disable();
357 load_gs(hs->gs_sel);
358#ifdef CONFIG_X86_64
359 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
360#endif
361 local_irq_enable();
362
363 reload_tss();
364 }
Eddie Donga75beee2007-05-17 18:55:15 +0300365 save_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
366 load_msrs(vcpu->host_msrs, vcpu->save_nmsrs);
Eddie Dong2cc51562007-05-21 07:28:09 +0300367 if (msr_efer_need_save_restore(vcpu))
368 load_msrs(vcpu->host_msrs + vcpu->msr_offset_efer, 1);
Avi Kivity33ed6322007-05-02 16:54:03 +0300369}
370
Avi Kivity6aa8b732006-12-10 02:21:36 -0800371/*
372 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
373 * vcpu mutex is already taken.
374 */
Avi Kivitybccf2152007-02-21 18:04:26 +0200375static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800376{
377 u64 phys_addr = __pa(vcpu->vmcs);
378 int cpu;
379
380 cpu = get_cpu();
381
Avi Kivity8d0be2b2007-02-12 00:54:46 -0800382 if (vcpu->cpu != cpu)
383 vcpu_clear(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800384
385 if (per_cpu(current_vmcs, cpu) != vcpu->vmcs) {
386 u8 error;
387
388 per_cpu(current_vmcs, cpu) = vcpu->vmcs;
389 asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
390 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
391 : "cc");
392 if (error)
393 printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
394 vcpu->vmcs, phys_addr);
395 }
396
397 if (vcpu->cpu != cpu) {
398 struct descriptor_table dt;
399 unsigned long sysenter_esp;
400
401 vcpu->cpu = cpu;
402 /*
403 * Linux uses per-cpu TSS and GDT, so set these when switching
404 * processors.
405 */
406 vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
407 get_gdt(&dt);
408 vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */
409
410 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
411 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
412 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800413}
414
415static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
416{
Avi Kivity33ed6322007-05-02 16:54:03 +0300417 vmx_load_host_state(vcpu);
Avi Kivity7702fd12007-06-14 16:27:40 +0300418 kvm_put_guest_fpu(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800419 put_cpu();
420}
421
Avi Kivity5fd86fc2007-05-02 20:40:00 +0300422static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
423{
424 if (vcpu->fpu_active)
425 return;
426 vcpu->fpu_active = 1;
427 vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
428 if (vcpu->cr0 & CR0_TS_MASK)
429 vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
430 update_exception_bitmap(vcpu);
431}
432
433static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
434{
435 if (!vcpu->fpu_active)
436 return;
437 vcpu->fpu_active = 0;
438 vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
439 update_exception_bitmap(vcpu);
440}
441
Avi Kivity774c47f2007-02-12 00:54:47 -0800442static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
443{
444 vcpu_clear(vcpu);
445}
446
Avi Kivity6aa8b732006-12-10 02:21:36 -0800447static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
448{
449 return vmcs_readl(GUEST_RFLAGS);
450}
451
452static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
453{
454 vmcs_writel(GUEST_RFLAGS, rflags);
455}
456
457static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
458{
459 unsigned long rip;
460 u32 interruptibility;
461
462 rip = vmcs_readl(GUEST_RIP);
463 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
464 vmcs_writel(GUEST_RIP, rip);
465
466 /*
467 * We emulated an instruction, so temporary interrupt blocking
468 * should be removed, if set.
469 */
470 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
471 if (interruptibility & 3)
472 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
473 interruptibility & ~3);
Dor Laorc1150d82007-01-05 16:36:24 -0800474 vcpu->interrupt_window_open = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800475}
476
477static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
478{
479 printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n",
480 vmcs_readl(GUEST_RIP));
481 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
482 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
483 GP_VECTOR |
484 INTR_TYPE_EXCEPTION |
485 INTR_INFO_DELIEVER_CODE_MASK |
486 INTR_INFO_VALID_MASK);
487}
488
489/*
Eddie Donga75beee2007-05-17 18:55:15 +0300490 * Swap MSR entry in host/guest MSR entry array.
491 */
492void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
493{
494 struct vmx_msr_entry tmp;
495 tmp = vcpu->guest_msrs[to];
496 vcpu->guest_msrs[to] = vcpu->guest_msrs[from];
497 vcpu->guest_msrs[from] = tmp;
498 tmp = vcpu->host_msrs[to];
499 vcpu->host_msrs[to] = vcpu->host_msrs[from];
500 vcpu->host_msrs[from] = tmp;
501}
502
503/*
Avi Kivitye38aea32007-04-19 13:22:48 +0300504 * Set up the vmcs to automatically save and restore system
505 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
506 * mode, as fiddling with msrs is very expensive.
507 */
508static void setup_msrs(struct kvm_vcpu *vcpu)
509{
Eddie Dong2cc51562007-05-21 07:28:09 +0300510 int save_nmsrs;
Avi Kivitye38aea32007-04-19 13:22:48 +0300511
Eddie Donga75beee2007-05-17 18:55:15 +0300512 save_nmsrs = 0;
Avi Kivity4d56c8a2007-04-19 14:28:44 +0300513#ifdef CONFIG_X86_64
Eddie Donga75beee2007-05-17 18:55:15 +0300514 if (is_long_mode(vcpu)) {
Eddie Dong2cc51562007-05-21 07:28:09 +0300515 int index;
516
Eddie Donga75beee2007-05-17 18:55:15 +0300517 index = __find_msr_index(vcpu, MSR_SYSCALL_MASK);
518 if (index >= 0)
519 move_msr_up(vcpu, index, save_nmsrs++);
520 index = __find_msr_index(vcpu, MSR_LSTAR);
521 if (index >= 0)
522 move_msr_up(vcpu, index, save_nmsrs++);
523 index = __find_msr_index(vcpu, MSR_CSTAR);
524 if (index >= 0)
525 move_msr_up(vcpu, index, save_nmsrs++);
526 index = __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
527 if (index >= 0)
528 move_msr_up(vcpu, index, save_nmsrs++);
529 /*
530 * MSR_K6_STAR is only needed on long mode guests, and only
531 * if efer.sce is enabled.
532 */
533 index = __find_msr_index(vcpu, MSR_K6_STAR);
534 if ((index >= 0) && (vcpu->shadow_efer & EFER_SCE))
535 move_msr_up(vcpu, index, save_nmsrs++);
Avi Kivity4d56c8a2007-04-19 14:28:44 +0300536 }
Eddie Donga75beee2007-05-17 18:55:15 +0300537#endif
538 vcpu->save_nmsrs = save_nmsrs;
Avi Kivity4d56c8a2007-04-19 14:28:44 +0300539
Eddie Donga75beee2007-05-17 18:55:15 +0300540#ifdef CONFIG_X86_64
541 vcpu->msr_offset_kernel_gs_base =
542 __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
543#endif
Eddie Dong2cc51562007-05-21 07:28:09 +0300544 vcpu->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
Avi Kivitye38aea32007-04-19 13:22:48 +0300545}
546
547/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800548 * reads and returns guest's timestamp counter "register"
549 * guest_tsc = host_tsc + tsc_offset -- 21.3
550 */
551static u64 guest_read_tsc(void)
552{
553 u64 host_tsc, tsc_offset;
554
555 rdtscll(host_tsc);
556 tsc_offset = vmcs_read64(TSC_OFFSET);
557 return host_tsc + tsc_offset;
558}
559
560/*
561 * writes 'guest_tsc' into guest's timestamp counter "register"
562 * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
563 */
564static void guest_write_tsc(u64 guest_tsc)
565{
566 u64 host_tsc;
567
568 rdtscll(host_tsc);
569 vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
570}
571
Avi Kivity6aa8b732006-12-10 02:21:36 -0800572/*
573 * Reads an msr value (of 'msr_index') into 'pdata'.
574 * Returns 0 on success, non-0 otherwise.
575 * Assumes vcpu_load() was already called.
576 */
577static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
578{
579 u64 data;
580 struct vmx_msr_entry *msr;
581
582 if (!pdata) {
583 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
584 return -EINVAL;
585 }
586
587 switch (msr_index) {
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800588#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800589 case MSR_FS_BASE:
590 data = vmcs_readl(GUEST_FS_BASE);
591 break;
592 case MSR_GS_BASE:
593 data = vmcs_readl(GUEST_GS_BASE);
594 break;
595 case MSR_EFER:
Avi Kivity3bab1f52006-12-29 16:49:48 -0800596 return kvm_get_msr_common(vcpu, msr_index, pdata);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800597#endif
598 case MSR_IA32_TIME_STAMP_COUNTER:
599 data = guest_read_tsc();
600 break;
601 case MSR_IA32_SYSENTER_CS:
602 data = vmcs_read32(GUEST_SYSENTER_CS);
603 break;
604 case MSR_IA32_SYSENTER_EIP:
Avi Kivityf5b42c32007-03-06 12:05:53 +0200605 data = vmcs_readl(GUEST_SYSENTER_EIP);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800606 break;
607 case MSR_IA32_SYSENTER_ESP:
Avi Kivityf5b42c32007-03-06 12:05:53 +0200608 data = vmcs_readl(GUEST_SYSENTER_ESP);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800609 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800610 default:
611 msr = find_msr_entry(vcpu, msr_index);
Avi Kivity3bab1f52006-12-29 16:49:48 -0800612 if (msr) {
613 data = msr->data;
614 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800615 }
Avi Kivity3bab1f52006-12-29 16:49:48 -0800616 return kvm_get_msr_common(vcpu, msr_index, pdata);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800617 }
618
619 *pdata = data;
620 return 0;
621}
622
623/*
624 * Writes msr value into into the appropriate "register".
625 * Returns 0 on success, non-0 otherwise.
626 * Assumes vcpu_load() was already called.
627 */
628static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
629{
630 struct vmx_msr_entry *msr;
Eddie Dong2cc51562007-05-21 07:28:09 +0300631 int ret = 0;
632
Avi Kivity6aa8b732006-12-10 02:21:36 -0800633 switch (msr_index) {
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800634#ifdef CONFIG_X86_64
Avi Kivity3bab1f52006-12-29 16:49:48 -0800635 case MSR_EFER:
Eddie Dong2cc51562007-05-21 07:28:09 +0300636 ret = kvm_set_msr_common(vcpu, msr_index, data);
637 if (vcpu->vmx_host_state.loaded)
638 load_transition_efer(vcpu);
639 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800640 case MSR_FS_BASE:
641 vmcs_writel(GUEST_FS_BASE, data);
642 break;
643 case MSR_GS_BASE:
644 vmcs_writel(GUEST_GS_BASE, data);
645 break;
646#endif
647 case MSR_IA32_SYSENTER_CS:
648 vmcs_write32(GUEST_SYSENTER_CS, data);
649 break;
650 case MSR_IA32_SYSENTER_EIP:
Avi Kivityf5b42c32007-03-06 12:05:53 +0200651 vmcs_writel(GUEST_SYSENTER_EIP, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800652 break;
653 case MSR_IA32_SYSENTER_ESP:
Avi Kivityf5b42c32007-03-06 12:05:53 +0200654 vmcs_writel(GUEST_SYSENTER_ESP, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800655 break;
Avi Kivityd27d4ac2007-02-19 14:37:46 +0200656 case MSR_IA32_TIME_STAMP_COUNTER:
Avi Kivity6aa8b732006-12-10 02:21:36 -0800657 guest_write_tsc(data);
658 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800659 default:
660 msr = find_msr_entry(vcpu, msr_index);
Avi Kivity3bab1f52006-12-29 16:49:48 -0800661 if (msr) {
662 msr->data = data;
Eddie Donga75beee2007-05-17 18:55:15 +0300663 if (vcpu->vmx_host_state.loaded)
Eddie Dong2cc51562007-05-21 07:28:09 +0300664 load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
Avi Kivity3bab1f52006-12-29 16:49:48 -0800665 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800666 }
Eddie Dong2cc51562007-05-21 07:28:09 +0300667 ret = kvm_set_msr_common(vcpu, msr_index, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800668 }
669
Eddie Dong2cc51562007-05-21 07:28:09 +0300670 return ret;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800671}
672
673/*
674 * Sync the rsp and rip registers into the vcpu structure. This allows
675 * registers to be accessed by indexing vcpu->regs.
676 */
677static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
678{
679 vcpu->regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
680 vcpu->rip = vmcs_readl(GUEST_RIP);
681}
682
683/*
684 * Syncs rsp and rip back into the vmcs. Should be called after possible
685 * modification.
686 */
687static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
688{
689 vmcs_writel(GUEST_RSP, vcpu->regs[VCPU_REGS_RSP]);
690 vmcs_writel(GUEST_RIP, vcpu->rip);
691}
692
693static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
694{
695 unsigned long dr7 = 0x400;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800696 int old_singlestep;
697
Avi Kivity6aa8b732006-12-10 02:21:36 -0800698 old_singlestep = vcpu->guest_debug.singlestep;
699
700 vcpu->guest_debug.enabled = dbg->enabled;
701 if (vcpu->guest_debug.enabled) {
702 int i;
703
704 dr7 |= 0x200; /* exact */
705 for (i = 0; i < 4; ++i) {
706 if (!dbg->breakpoints[i].enabled)
707 continue;
708 vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
709 dr7 |= 2 << (i*2); /* global enable */
710 dr7 |= 0 << (i*4+16); /* execution breakpoint */
711 }
712
Avi Kivity6aa8b732006-12-10 02:21:36 -0800713 vcpu->guest_debug.singlestep = dbg->singlestep;
Avi Kivityabd3f2d2007-05-02 17:57:40 +0300714 } else
Avi Kivity6aa8b732006-12-10 02:21:36 -0800715 vcpu->guest_debug.singlestep = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800716
717 if (old_singlestep && !vcpu->guest_debug.singlestep) {
718 unsigned long flags;
719
720 flags = vmcs_readl(GUEST_RFLAGS);
721 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
722 vmcs_writel(GUEST_RFLAGS, flags);
723 }
724
Avi Kivityabd3f2d2007-05-02 17:57:40 +0300725 update_exception_bitmap(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800726 vmcs_writel(GUEST_DR7, dr7);
727
728 return 0;
729}
730
731static __init int cpu_has_kvm_support(void)
732{
733 unsigned long ecx = cpuid_ecx(1);
734 return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
735}
736
737static __init int vmx_disabled_by_bios(void)
738{
739 u64 msr;
740
741 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
742 return (msr & 5) == 1; /* locked but not enabled */
743}
744
Avi Kivity774c47f2007-02-12 00:54:47 -0800745static void hardware_enable(void *garbage)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800746{
747 int cpu = raw_smp_processor_id();
748 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
749 u64 old;
750
751 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
Avi Kivitybfdc0c22006-12-13 00:34:16 -0800752 if ((old & 5) != 5)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800753 /* enable and lock */
754 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | 5);
755 write_cr4(read_cr4() | CR4_VMXE); /* FIXME: not cpu hotplug safe */
756 asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
757 : "memory", "cc");
758}
759
760static void hardware_disable(void *garbage)
761{
762 asm volatile (ASM_VMX_VMXOFF : : : "cc");
763}
764
765static __init void setup_vmcs_descriptor(void)
766{
767 u32 vmx_msr_low, vmx_msr_high;
768
Nguyen Anh Quynhc68876f2006-12-29 16:49:54 -0800769 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800770 vmcs_descriptor.size = vmx_msr_high & 0x1fff;
771 vmcs_descriptor.order = get_order(vmcs_descriptor.size);
772 vmcs_descriptor.revision_id = vmx_msr_low;
Nguyen Anh Quynhc68876f2006-12-29 16:49:54 -0800773}
Avi Kivity6aa8b732006-12-10 02:21:36 -0800774
775static struct vmcs *alloc_vmcs_cpu(int cpu)
776{
777 int node = cpu_to_node(cpu);
778 struct page *pages;
779 struct vmcs *vmcs;
780
781 pages = alloc_pages_node(node, GFP_KERNEL, vmcs_descriptor.order);
782 if (!pages)
783 return NULL;
784 vmcs = page_address(pages);
785 memset(vmcs, 0, vmcs_descriptor.size);
786 vmcs->revision_id = vmcs_descriptor.revision_id; /* vmcs revision id */
787 return vmcs;
788}
789
790static struct vmcs *alloc_vmcs(void)
791{
Ingo Molnard3b2c332007-01-05 16:36:23 -0800792 return alloc_vmcs_cpu(raw_smp_processor_id());
Avi Kivity6aa8b732006-12-10 02:21:36 -0800793}
794
795static void free_vmcs(struct vmcs *vmcs)
796{
797 free_pages((unsigned long)vmcs, vmcs_descriptor.order);
798}
799
Sam Ravnborg39959582007-06-01 00:47:13 -0700800static void free_kvm_area(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800801{
802 int cpu;
803
804 for_each_online_cpu(cpu)
805 free_vmcs(per_cpu(vmxarea, cpu));
806}
807
808extern struct vmcs *alloc_vmcs_cpu(int cpu);
809
810static __init int alloc_kvm_area(void)
811{
812 int cpu;
813
814 for_each_online_cpu(cpu) {
815 struct vmcs *vmcs;
816
817 vmcs = alloc_vmcs_cpu(cpu);
818 if (!vmcs) {
819 free_kvm_area();
820 return -ENOMEM;
821 }
822
823 per_cpu(vmxarea, cpu) = vmcs;
824 }
825 return 0;
826}
827
828static __init int hardware_setup(void)
829{
830 setup_vmcs_descriptor();
831 return alloc_kvm_area();
832}
833
834static __exit void hardware_unsetup(void)
835{
836 free_kvm_area();
837}
838
Avi Kivity6aa8b732006-12-10 02:21:36 -0800839static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
840{
841 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
842
Avi Kivity6af11b92007-03-19 13:18:10 +0200843 if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800844 vmcs_write16(sf->selector, save->selector);
845 vmcs_writel(sf->base, save->base);
846 vmcs_write32(sf->limit, save->limit);
847 vmcs_write32(sf->ar_bytes, save->ar);
848 } else {
849 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
850 << AR_DPL_SHIFT;
851 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
852 }
853}
854
855static void enter_pmode(struct kvm_vcpu *vcpu)
856{
857 unsigned long flags;
858
859 vcpu->rmode.active = 0;
860
861 vmcs_writel(GUEST_TR_BASE, vcpu->rmode.tr.base);
862 vmcs_write32(GUEST_TR_LIMIT, vcpu->rmode.tr.limit);
863 vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
864
865 flags = vmcs_readl(GUEST_RFLAGS);
866 flags &= ~(IOPL_MASK | X86_EFLAGS_VM);
867 flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
868 vmcs_writel(GUEST_RFLAGS, flags);
869
870 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~CR4_VME_MASK) |
871 (vmcs_readl(CR4_READ_SHADOW) & CR4_VME_MASK));
872
873 update_exception_bitmap(vcpu);
874
875 fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->rmode.es);
876 fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->rmode.ds);
877 fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->rmode.gs);
878 fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->rmode.fs);
879
880 vmcs_write16(GUEST_SS_SELECTOR, 0);
881 vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
882
883 vmcs_write16(GUEST_CS_SELECTOR,
884 vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
885 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
886}
887
888static int rmode_tss_base(struct kvm* kvm)
889{
890 gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
891 return base_gfn << PAGE_SHIFT;
892}
893
894static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
895{
896 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
897
898 save->selector = vmcs_read16(sf->selector);
899 save->base = vmcs_readl(sf->base);
900 save->limit = vmcs_read32(sf->limit);
901 save->ar = vmcs_read32(sf->ar_bytes);
902 vmcs_write16(sf->selector, vmcs_readl(sf->base) >> 4);
903 vmcs_write32(sf->limit, 0xffff);
904 vmcs_write32(sf->ar_bytes, 0xf3);
905}
906
907static void enter_rmode(struct kvm_vcpu *vcpu)
908{
909 unsigned long flags;
910
911 vcpu->rmode.active = 1;
912
913 vcpu->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
914 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
915
916 vcpu->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
917 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
918
919 vcpu->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
920 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
921
922 flags = vmcs_readl(GUEST_RFLAGS);
923 vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
924
925 flags |= IOPL_MASK | X86_EFLAGS_VM;
926
927 vmcs_writel(GUEST_RFLAGS, flags);
928 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | CR4_VME_MASK);
929 update_exception_bitmap(vcpu);
930
931 vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
932 vmcs_write32(GUEST_SS_LIMIT, 0xffff);
933 vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
934
935 vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
Michael Riepeabacf8d2006-12-22 01:05:45 -0800936 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
Avi Kivity8cb5b032007-03-20 18:40:40 +0200937 if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
938 vmcs_writel(GUEST_CS_BASE, 0xf0000);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800939 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
940
941 fix_rmode_seg(VCPU_SREG_ES, &vcpu->rmode.es);
942 fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
943 fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
944 fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
945}
946
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800947#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800948
949static void enter_lmode(struct kvm_vcpu *vcpu)
950{
951 u32 guest_tr_ar;
952
953 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
954 if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
955 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
956 __FUNCTION__);
957 vmcs_write32(GUEST_TR_AR_BYTES,
958 (guest_tr_ar & ~AR_TYPE_MASK)
959 | AR_TYPE_BUSY_64_TSS);
960 }
961
962 vcpu->shadow_efer |= EFER_LMA;
963
964 find_msr_entry(vcpu, MSR_EFER)->data |= EFER_LMA | EFER_LME;
965 vmcs_write32(VM_ENTRY_CONTROLS,
966 vmcs_read32(VM_ENTRY_CONTROLS)
967 | VM_ENTRY_CONTROLS_IA32E_MASK);
968}
969
970static void exit_lmode(struct kvm_vcpu *vcpu)
971{
972 vcpu->shadow_efer &= ~EFER_LMA;
973
974 vmcs_write32(VM_ENTRY_CONTROLS,
975 vmcs_read32(VM_ENTRY_CONTROLS)
976 & ~VM_ENTRY_CONTROLS_IA32E_MASK);
977}
978
979#endif
980
Anthony Liguori25c4c272007-04-27 09:29:21 +0300981static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -0800982{
Avi Kivity399badf2007-01-05 16:36:38 -0800983 vcpu->cr4 &= KVM_GUEST_CR4_MASK;
984 vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
985}
986
Avi Kivity6aa8b732006-12-10 02:21:36 -0800987static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
988{
Avi Kivity5fd86fc2007-05-02 20:40:00 +0300989 vmx_fpu_deactivate(vcpu);
990
Avi Kivity6aa8b732006-12-10 02:21:36 -0800991 if (vcpu->rmode.active && (cr0 & CR0_PE_MASK))
992 enter_pmode(vcpu);
993
994 if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK))
995 enter_rmode(vcpu);
996
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800997#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800998 if (vcpu->shadow_efer & EFER_LME) {
999 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK))
1000 enter_lmode(vcpu);
1001 if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK))
1002 exit_lmode(vcpu);
1003 }
1004#endif
1005
1006 vmcs_writel(CR0_READ_SHADOW, cr0);
1007 vmcs_writel(GUEST_CR0,
1008 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
1009 vcpu->cr0 = cr0;
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001010
1011 if (!(cr0 & CR0_TS_MASK) || !(cr0 & CR0_PE_MASK))
1012 vmx_fpu_activate(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001013}
1014
Avi Kivity6aa8b732006-12-10 02:21:36 -08001015static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1016{
1017 vmcs_writel(GUEST_CR3, cr3);
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001018 if (vcpu->cr0 & CR0_PE_MASK)
1019 vmx_fpu_deactivate(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001020}
1021
1022static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1023{
1024 vmcs_writel(CR4_READ_SHADOW, cr4);
1025 vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ?
1026 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
1027 vcpu->cr4 = cr4;
1028}
1029
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001030#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001031
1032static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1033{
1034 struct vmx_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER);
1035
1036 vcpu->shadow_efer = efer;
1037 if (efer & EFER_LMA) {
1038 vmcs_write32(VM_ENTRY_CONTROLS,
1039 vmcs_read32(VM_ENTRY_CONTROLS) |
1040 VM_ENTRY_CONTROLS_IA32E_MASK);
1041 msr->data = efer;
1042
1043 } else {
1044 vmcs_write32(VM_ENTRY_CONTROLS,
1045 vmcs_read32(VM_ENTRY_CONTROLS) &
1046 ~VM_ENTRY_CONTROLS_IA32E_MASK);
1047
1048 msr->data = efer & ~EFER_LME;
1049 }
Avi Kivitye38aea32007-04-19 13:22:48 +03001050 setup_msrs(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001051}
1052
1053#endif
1054
1055static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1056{
1057 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1058
1059 return vmcs_readl(sf->base);
1060}
1061
1062static void vmx_get_segment(struct kvm_vcpu *vcpu,
1063 struct kvm_segment *var, int seg)
1064{
1065 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1066 u32 ar;
1067
1068 var->base = vmcs_readl(sf->base);
1069 var->limit = vmcs_read32(sf->limit);
1070 var->selector = vmcs_read16(sf->selector);
1071 ar = vmcs_read32(sf->ar_bytes);
1072 if (ar & AR_UNUSABLE_MASK)
1073 ar = 0;
1074 var->type = ar & 15;
1075 var->s = (ar >> 4) & 1;
1076 var->dpl = (ar >> 5) & 3;
1077 var->present = (ar >> 7) & 1;
1078 var->avl = (ar >> 12) & 1;
1079 var->l = (ar >> 13) & 1;
1080 var->db = (ar >> 14) & 1;
1081 var->g = (ar >> 15) & 1;
1082 var->unusable = (ar >> 16) & 1;
1083}
1084
Avi Kivity653e3102007-05-07 10:55:37 +03001085static u32 vmx_segment_access_rights(struct kvm_segment *var)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001086{
Avi Kivity6aa8b732006-12-10 02:21:36 -08001087 u32 ar;
1088
Avi Kivity653e3102007-05-07 10:55:37 +03001089 if (var->unusable)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001090 ar = 1 << 16;
1091 else {
1092 ar = var->type & 15;
1093 ar |= (var->s & 1) << 4;
1094 ar |= (var->dpl & 3) << 5;
1095 ar |= (var->present & 1) << 7;
1096 ar |= (var->avl & 1) << 12;
1097 ar |= (var->l & 1) << 13;
1098 ar |= (var->db & 1) << 14;
1099 ar |= (var->g & 1) << 15;
1100 }
Uri Lublinf7fbf1f2006-12-13 00:34:00 -08001101 if (ar == 0) /* a 0 value means unusable */
1102 ar = AR_UNUSABLE_MASK;
Avi Kivity653e3102007-05-07 10:55:37 +03001103
1104 return ar;
1105}
1106
1107static void vmx_set_segment(struct kvm_vcpu *vcpu,
1108 struct kvm_segment *var, int seg)
1109{
1110 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1111 u32 ar;
1112
1113 if (vcpu->rmode.active && seg == VCPU_SREG_TR) {
1114 vcpu->rmode.tr.selector = var->selector;
1115 vcpu->rmode.tr.base = var->base;
1116 vcpu->rmode.tr.limit = var->limit;
1117 vcpu->rmode.tr.ar = vmx_segment_access_rights(var);
1118 return;
1119 }
1120 vmcs_writel(sf->base, var->base);
1121 vmcs_write32(sf->limit, var->limit);
1122 vmcs_write16(sf->selector, var->selector);
1123 if (vcpu->rmode.active && var->s) {
1124 /*
1125 * Hack real-mode segments into vm86 compatibility.
1126 */
1127 if (var->base == 0xffff0000 && var->selector == 0xf000)
1128 vmcs_writel(sf->base, 0xf0000);
1129 ar = 0xf3;
1130 } else
1131 ar = vmx_segment_access_rights(var);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001132 vmcs_write32(sf->ar_bytes, ar);
1133}
1134
Avi Kivity6aa8b732006-12-10 02:21:36 -08001135static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1136{
1137 u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
1138
1139 *db = (ar >> 14) & 1;
1140 *l = (ar >> 13) & 1;
1141}
1142
1143static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1144{
1145 dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
1146 dt->base = vmcs_readl(GUEST_IDTR_BASE);
1147}
1148
1149static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1150{
1151 vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
1152 vmcs_writel(GUEST_IDTR_BASE, dt->base);
1153}
1154
1155static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1156{
1157 dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
1158 dt->base = vmcs_readl(GUEST_GDTR_BASE);
1159}
1160
1161static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1162{
1163 vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
1164 vmcs_writel(GUEST_GDTR_BASE, dt->base);
1165}
1166
1167static int init_rmode_tss(struct kvm* kvm)
1168{
1169 struct page *p1, *p2, *p3;
1170 gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
1171 char *page;
1172
Avi Kivity954bbbc2007-03-30 14:02:32 +03001173 p1 = gfn_to_page(kvm, fn++);
1174 p2 = gfn_to_page(kvm, fn++);
1175 p3 = gfn_to_page(kvm, fn);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001176
1177 if (!p1 || !p2 || !p3) {
1178 kvm_printf(kvm,"%s: gfn_to_page failed\n", __FUNCTION__);
1179 return 0;
1180 }
1181
1182 page = kmap_atomic(p1, KM_USER0);
1183 memset(page, 0, PAGE_SIZE);
1184 *(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
1185 kunmap_atomic(page, KM_USER0);
1186
1187 page = kmap_atomic(p2, KM_USER0);
1188 memset(page, 0, PAGE_SIZE);
1189 kunmap_atomic(page, KM_USER0);
1190
1191 page = kmap_atomic(p3, KM_USER0);
1192 memset(page, 0, PAGE_SIZE);
1193 *(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
1194 kunmap_atomic(page, KM_USER0);
1195
1196 return 1;
1197}
1198
1199static void vmcs_write32_fixedbits(u32 msr, u32 vmcs_field, u32 val)
1200{
1201 u32 msr_high, msr_low;
1202
1203 rdmsr(msr, msr_low, msr_high);
1204
1205 val &= msr_high;
1206 val |= msr_low;
1207 vmcs_write32(vmcs_field, val);
1208}
1209
1210static void seg_setup(int seg)
1211{
1212 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1213
1214 vmcs_write16(sf->selector, 0);
1215 vmcs_writel(sf->base, 0);
1216 vmcs_write32(sf->limit, 0xffff);
1217 vmcs_write32(sf->ar_bytes, 0x93);
1218}
1219
1220/*
1221 * Sets up the vmcs for emulated real mode.
1222 */
1223static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1224{
1225 u32 host_sysenter_cs;
1226 u32 junk;
1227 unsigned long a;
1228 struct descriptor_table dt;
1229 int i;
1230 int ret = 0;
Avi Kivitycd2276a2007-05-14 20:41:13 +03001231 unsigned long kvm_vmx_return;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001232
1233 if (!init_rmode_tss(vcpu->kvm)) {
1234 ret = -ENOMEM;
1235 goto out;
1236 }
1237
1238 memset(vcpu->regs, 0, sizeof(vcpu->regs));
1239 vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
1240 vcpu->cr8 = 0;
1241 vcpu->apic_base = 0xfee00000 |
1242 /*for vcpu 0*/ MSR_IA32_APICBASE_BSP |
1243 MSR_IA32_APICBASE_ENABLE;
1244
1245 fx_init(vcpu);
1246
1247 /*
1248 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
1249 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
1250 */
1251 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
1252 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
1253 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1254 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1255
1256 seg_setup(VCPU_SREG_DS);
1257 seg_setup(VCPU_SREG_ES);
1258 seg_setup(VCPU_SREG_FS);
1259 seg_setup(VCPU_SREG_GS);
1260 seg_setup(VCPU_SREG_SS);
1261
1262 vmcs_write16(GUEST_TR_SELECTOR, 0);
1263 vmcs_writel(GUEST_TR_BASE, 0);
1264 vmcs_write32(GUEST_TR_LIMIT, 0xffff);
1265 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1266
1267 vmcs_write16(GUEST_LDTR_SELECTOR, 0);
1268 vmcs_writel(GUEST_LDTR_BASE, 0);
1269 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
1270 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
1271
1272 vmcs_write32(GUEST_SYSENTER_CS, 0);
1273 vmcs_writel(GUEST_SYSENTER_ESP, 0);
1274 vmcs_writel(GUEST_SYSENTER_EIP, 0);
1275
1276 vmcs_writel(GUEST_RFLAGS, 0x02);
1277 vmcs_writel(GUEST_RIP, 0xfff0);
1278 vmcs_writel(GUEST_RSP, 0);
1279
Avi Kivity6aa8b732006-12-10 02:21:36 -08001280 //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0
1281 vmcs_writel(GUEST_DR7, 0x400);
1282
1283 vmcs_writel(GUEST_GDTR_BASE, 0);
1284 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
1285
1286 vmcs_writel(GUEST_IDTR_BASE, 0);
1287 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
1288
1289 vmcs_write32(GUEST_ACTIVITY_STATE, 0);
1290 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
1291 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
1292
1293 /* I/O */
He, Qingfdef3ad2007-04-30 09:45:24 +03001294 vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
1295 vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001296
1297 guest_write_tsc(0);
1298
1299 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
1300
1301 /* Special registers */
1302 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
1303
1304 /* Control */
Nguyen Anh Quynhc68876f2006-12-29 16:49:54 -08001305 vmcs_write32_fixedbits(MSR_IA32_VMX_PINBASED_CTLS,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001306 PIN_BASED_VM_EXEC_CONTROL,
1307 PIN_BASED_EXT_INTR_MASK /* 20.6.1 */
1308 | PIN_BASED_NMI_EXITING /* 20.6.1 */
1309 );
Nguyen Anh Quynhc68876f2006-12-29 16:49:54 -08001310 vmcs_write32_fixedbits(MSR_IA32_VMX_PROCBASED_CTLS,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001311 CPU_BASED_VM_EXEC_CONTROL,
1312 CPU_BASED_HLT_EXITING /* 20.6.2 */
1313 | CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */
1314 | CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */
He, Qingfdef3ad2007-04-30 09:45:24 +03001315 | CPU_BASED_ACTIVATE_IO_BITMAP /* 20.6.2 */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001316 | CPU_BASED_MOV_DR_EXITING
1317 | CPU_BASED_USE_TSC_OFFSETING /* 21.3 */
1318 );
1319
Avi Kivity6aa8b732006-12-10 02:21:36 -08001320 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
1321 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
1322 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
1323
1324 vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */
1325 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
1326 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
1327
1328 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
1329 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1330 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1331 vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */
1332 vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */
1333 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001334#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001335 rdmsrl(MSR_FS_BASE, a);
1336 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
1337 rdmsrl(MSR_GS_BASE, a);
1338 vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
1339#else
1340 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
1341 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
1342#endif
1343
1344 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
1345
1346 get_idt(&dt);
1347 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
1348
Avi Kivitycd2276a2007-05-14 20:41:13 +03001349 asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
1350 vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
Eddie Dong2cc51562007-05-21 07:28:09 +03001351 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1352 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
1353 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001354
1355 rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
1356 vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
1357 rdmsrl(MSR_IA32_SYSENTER_ESP, a);
1358 vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */
1359 rdmsrl(MSR_IA32_SYSENTER_EIP, a);
1360 vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
1361
Avi Kivity6aa8b732006-12-10 02:21:36 -08001362 for (i = 0; i < NR_VMX_MSR; ++i) {
1363 u32 index = vmx_msr_index[i];
1364 u32 data_low, data_high;
1365 u64 data;
1366 int j = vcpu->nmsrs;
1367
1368 if (rdmsr_safe(index, &data_low, &data_high) < 0)
1369 continue;
Avi Kivity432bd6c2007-01-31 23:48:13 -08001370 if (wrmsr_safe(index, data_low, data_high) < 0)
1371 continue;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001372 data = data_low | ((u64)data_high << 32);
1373 vcpu->host_msrs[j].index = index;
1374 vcpu->host_msrs[j].reserved = 0;
1375 vcpu->host_msrs[j].data = data;
1376 vcpu->guest_msrs[j] = vcpu->host_msrs[j];
1377 ++vcpu->nmsrs;
1378 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001379
Avi Kivitye38aea32007-04-19 13:22:48 +03001380 setup_msrs(vcpu);
1381
Nguyen Anh Quynhc68876f2006-12-29 16:49:54 -08001382 vmcs_write32_fixedbits(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_CONTROLS,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001383 (HOST_IS_64 << 9)); /* 22.2,1, 20.7.1 */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001384
1385 /* 22.2.1, 20.8.1 */
Nguyen Anh Quynhc68876f2006-12-29 16:49:54 -08001386 vmcs_write32_fixedbits(MSR_IA32_VMX_ENTRY_CTLS,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001387 VM_ENTRY_CONTROLS, 0);
1388 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
1389
Michael Riepe3b99ab22006-12-13 00:34:15 -08001390#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001391 vmcs_writel(VIRTUAL_APIC_PAGE_ADDR, 0);
1392 vmcs_writel(TPR_THRESHOLD, 0);
Michael Riepe3b99ab22006-12-13 00:34:15 -08001393#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -08001394
Anthony Liguori25c4c272007-04-27 09:29:21 +03001395 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001396 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
1397
1398 vcpu->cr0 = 0x60000010;
1399 vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode
1400 vmx_set_cr4(vcpu, 0);
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001401#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001402 vmx_set_efer(vcpu, 0);
1403#endif
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001404 vmx_fpu_activate(vcpu);
Avi Kivityabd3f2d2007-05-02 17:57:40 +03001405 update_exception_bitmap(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001406
1407 return 0;
1408
Avi Kivity6aa8b732006-12-10 02:21:36 -08001409out:
1410 return ret;
1411}
1412
1413static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
1414{
1415 u16 ent[2];
1416 u16 cs;
1417 u16 ip;
1418 unsigned long flags;
1419 unsigned long ss_base = vmcs_readl(GUEST_SS_BASE);
1420 u16 sp = vmcs_readl(GUEST_RSP);
1421 u32 ss_limit = vmcs_read32(GUEST_SS_LIMIT);
1422
Eric Sesterhenn / Snakebyte39649942007-04-09 16:15:05 +02001423 if (sp > ss_limit || sp < 6 ) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001424 vcpu_printf(vcpu, "%s: #SS, rsp 0x%lx ss 0x%lx limit 0x%x\n",
1425 __FUNCTION__,
1426 vmcs_readl(GUEST_RSP),
1427 vmcs_readl(GUEST_SS_BASE),
1428 vmcs_read32(GUEST_SS_LIMIT));
1429 return;
1430 }
1431
1432 if (kvm_read_guest(vcpu, irq * sizeof(ent), sizeof(ent), &ent) !=
1433 sizeof(ent)) {
1434 vcpu_printf(vcpu, "%s: read guest err\n", __FUNCTION__);
1435 return;
1436 }
1437
1438 flags = vmcs_readl(GUEST_RFLAGS);
1439 cs = vmcs_readl(GUEST_CS_BASE) >> 4;
1440 ip = vmcs_readl(GUEST_RIP);
1441
1442
1443 if (kvm_write_guest(vcpu, ss_base + sp - 2, 2, &flags) != 2 ||
1444 kvm_write_guest(vcpu, ss_base + sp - 4, 2, &cs) != 2 ||
1445 kvm_write_guest(vcpu, ss_base + sp - 6, 2, &ip) != 2) {
1446 vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__);
1447 return;
1448 }
1449
1450 vmcs_writel(GUEST_RFLAGS, flags &
1451 ~( X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF));
1452 vmcs_write16(GUEST_CS_SELECTOR, ent[1]) ;
1453 vmcs_writel(GUEST_CS_BASE, ent[1] << 4);
1454 vmcs_writel(GUEST_RIP, ent[0]);
1455 vmcs_writel(GUEST_RSP, (vmcs_readl(GUEST_RSP) & ~0xffff) | (sp - 6));
1456}
1457
1458static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1459{
1460 int word_index = __ffs(vcpu->irq_summary);
1461 int bit_index = __ffs(vcpu->irq_pending[word_index]);
1462 int irq = word_index * BITS_PER_LONG + bit_index;
1463
1464 clear_bit(bit_index, &vcpu->irq_pending[word_index]);
1465 if (!vcpu->irq_pending[word_index])
1466 clear_bit(word_index, &vcpu->irq_summary);
1467
1468 if (vcpu->rmode.active) {
1469 inject_rmode_irq(vcpu, irq);
1470 return;
1471 }
1472 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1473 irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1474}
1475
Dor Laorc1150d82007-01-05 16:36:24 -08001476
1477static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1478 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001479{
Dor Laorc1150d82007-01-05 16:36:24 -08001480 u32 cpu_based_vm_exec_control;
1481
1482 vcpu->interrupt_window_open =
1483 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
1484 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
1485
1486 if (vcpu->interrupt_window_open &&
1487 vcpu->irq_summary &&
1488 !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001489 /*
Dor Laorc1150d82007-01-05 16:36:24 -08001490 * If interrupts enabled, and not blocked by sti or mov ss. Good.
Avi Kivity6aa8b732006-12-10 02:21:36 -08001491 */
1492 kvm_do_inject_irq(vcpu);
Dor Laorc1150d82007-01-05 16:36:24 -08001493
1494 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
1495 if (!vcpu->interrupt_window_open &&
1496 (vcpu->irq_summary || kvm_run->request_interrupt_window))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001497 /*
1498 * Interrupts blocked. Wait for unblock.
1499 */
Dor Laorc1150d82007-01-05 16:36:24 -08001500 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
1501 else
1502 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
1503 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001504}
1505
1506static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
1507{
1508 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
1509
1510 set_debugreg(dbg->bp[0], 0);
1511 set_debugreg(dbg->bp[1], 1);
1512 set_debugreg(dbg->bp[2], 2);
1513 set_debugreg(dbg->bp[3], 3);
1514
1515 if (dbg->singlestep) {
1516 unsigned long flags;
1517
1518 flags = vmcs_readl(GUEST_RFLAGS);
1519 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1520 vmcs_writel(GUEST_RFLAGS, flags);
1521 }
1522}
1523
1524static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1525 int vec, u32 err_code)
1526{
1527 if (!vcpu->rmode.active)
1528 return 0;
1529
Nitin A Kambleb3f37702007-05-17 15:50:34 +03001530 /*
1531 * Instruction with address size override prefix opcode 0x67
1532 * Cause the #SS fault with 0 error code in VM86 mode.
1533 */
1534 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001535 if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
1536 return 1;
1537 return 0;
1538}
1539
1540static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1541{
1542 u32 intr_info, error_code;
1543 unsigned long cr2, rip;
1544 u32 vect_info;
1545 enum emulation_result er;
Avi Kivitye2dec932007-01-05 16:36:54 -08001546 int r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001547
1548 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1549 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1550
1551 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
1552 !is_page_fault(intr_info)) {
1553 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
1554 "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
1555 }
1556
1557 if (is_external_interrupt(vect_info)) {
1558 int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
1559 set_bit(irq, vcpu->irq_pending);
1560 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
1561 }
1562
1563 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
1564 asm ("int $2");
1565 return 1;
1566 }
Anthony Liguori2ab455c2007-04-27 09:29:49 +03001567
1568 if (is_no_device(intr_info)) {
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001569 vmx_fpu_activate(vcpu);
Anthony Liguori2ab455c2007-04-27 09:29:49 +03001570 return 1;
1571 }
1572
Avi Kivity6aa8b732006-12-10 02:21:36 -08001573 error_code = 0;
1574 rip = vmcs_readl(GUEST_RIP);
1575 if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
1576 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
1577 if (is_page_fault(intr_info)) {
1578 cr2 = vmcs_readl(EXIT_QUALIFICATION);
1579
1580 spin_lock(&vcpu->kvm->lock);
Avi Kivitye2dec932007-01-05 16:36:54 -08001581 r = kvm_mmu_page_fault(vcpu, cr2, error_code);
1582 if (r < 0) {
1583 spin_unlock(&vcpu->kvm->lock);
1584 return r;
1585 }
1586 if (!r) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001587 spin_unlock(&vcpu->kvm->lock);
1588 return 1;
1589 }
1590
1591 er = emulate_instruction(vcpu, kvm_run, cr2, error_code);
1592 spin_unlock(&vcpu->kvm->lock);
1593
1594 switch (er) {
1595 case EMULATE_DONE:
1596 return 1;
1597 case EMULATE_DO_MMIO:
Avi Kivity1165f5f2007-04-19 17:27:43 +03001598 ++vcpu->stat.mmio_exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001599 kvm_run->exit_reason = KVM_EXIT_MMIO;
1600 return 0;
1601 case EMULATE_FAIL:
1602 vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
1603 break;
1604 default:
1605 BUG();
1606 }
1607 }
1608
1609 if (vcpu->rmode.active &&
1610 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
1611 error_code))
1612 return 1;
1613
1614 if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
1615 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1616 return 0;
1617 }
1618 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
1619 kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
1620 kvm_run->ex.error_code = error_code;
1621 return 0;
1622}
1623
1624static int handle_external_interrupt(struct kvm_vcpu *vcpu,
1625 struct kvm_run *kvm_run)
1626{
Avi Kivity1165f5f2007-04-19 17:27:43 +03001627 ++vcpu->stat.irq_exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001628 return 1;
1629}
1630
Avi Kivity988ad742007-02-12 00:54:36 -08001631static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1632{
1633 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1634 return 0;
1635}
Avi Kivity6aa8b732006-12-10 02:21:36 -08001636
Avi Kivity039576c2007-03-20 12:46:50 +02001637static int get_io_count(struct kvm_vcpu *vcpu, unsigned long *count)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001638{
1639 u64 inst;
1640 gva_t rip;
1641 int countr_size;
1642 int i, n;
1643
1644 if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_VM)) {
1645 countr_size = 2;
1646 } else {
1647 u32 cs_ar = vmcs_read32(GUEST_CS_AR_BYTES);
1648
1649 countr_size = (cs_ar & AR_L_MASK) ? 8:
1650 (cs_ar & AR_DB_MASK) ? 4: 2;
1651 }
1652
1653 rip = vmcs_readl(GUEST_RIP);
1654 if (countr_size != 8)
1655 rip += vmcs_readl(GUEST_CS_BASE);
1656
1657 n = kvm_read_guest(vcpu, rip, sizeof(inst), &inst);
1658
1659 for (i = 0; i < n; i++) {
1660 switch (((u8*)&inst)[i]) {
1661 case 0xf0:
1662 case 0xf2:
1663 case 0xf3:
1664 case 0x2e:
1665 case 0x36:
1666 case 0x3e:
1667 case 0x26:
1668 case 0x64:
1669 case 0x65:
1670 case 0x66:
1671 break;
1672 case 0x67:
1673 countr_size = (countr_size == 2) ? 4: (countr_size >> 1);
1674 default:
1675 goto done;
1676 }
1677 }
1678 return 0;
1679done:
1680 countr_size *= 8;
1681 *count = vcpu->regs[VCPU_REGS_RCX] & (~0ULL >> (64 - countr_size));
Avi Kivity039576c2007-03-20 12:46:50 +02001682 //printk("cx: %lx\n", vcpu->regs[VCPU_REGS_RCX]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001683 return 1;
1684}
1685
1686static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1687{
1688 u64 exit_qualification;
Avi Kivity039576c2007-03-20 12:46:50 +02001689 int size, down, in, string, rep;
1690 unsigned port;
1691 unsigned long count;
1692 gva_t address;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001693
Avi Kivity1165f5f2007-04-19 17:27:43 +03001694 ++vcpu->stat.io_exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001695 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
Avi Kivity039576c2007-03-20 12:46:50 +02001696 in = (exit_qualification & 8) != 0;
1697 size = (exit_qualification & 7) + 1;
1698 string = (exit_qualification & 16) != 0;
1699 down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
1700 count = 1;
1701 rep = (exit_qualification & 32) != 0;
1702 port = exit_qualification >> 16;
1703 address = 0;
1704 if (string) {
1705 if (rep && !get_io_count(vcpu, &count))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001706 return 1;
Avi Kivity039576c2007-03-20 12:46:50 +02001707 address = vmcs_readl(GUEST_LINEAR_ADDRESS);
1708 }
1709 return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
1710 address, rep, port);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001711}
1712
Ingo Molnar102d8322007-02-19 14:37:47 +02001713static void
1714vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1715{
1716 /*
1717 * Patch in the VMCALL instruction:
1718 */
1719 hypercall[0] = 0x0f;
1720 hypercall[1] = 0x01;
1721 hypercall[2] = 0xc1;
1722 hypercall[3] = 0xc3;
1723}
1724
Avi Kivity6aa8b732006-12-10 02:21:36 -08001725static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1726{
1727 u64 exit_qualification;
1728 int cr;
1729 int reg;
1730
1731 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1732 cr = exit_qualification & 15;
1733 reg = (exit_qualification >> 8) & 15;
1734 switch ((exit_qualification >> 4) & 3) {
1735 case 0: /* mov to cr */
1736 switch (cr) {
1737 case 0:
1738 vcpu_load_rsp_rip(vcpu);
1739 set_cr0(vcpu, vcpu->regs[reg]);
1740 skip_emulated_instruction(vcpu);
1741 return 1;
1742 case 3:
1743 vcpu_load_rsp_rip(vcpu);
1744 set_cr3(vcpu, vcpu->regs[reg]);
1745 skip_emulated_instruction(vcpu);
1746 return 1;
1747 case 4:
1748 vcpu_load_rsp_rip(vcpu);
1749 set_cr4(vcpu, vcpu->regs[reg]);
1750 skip_emulated_instruction(vcpu);
1751 return 1;
1752 case 8:
1753 vcpu_load_rsp_rip(vcpu);
1754 set_cr8(vcpu, vcpu->regs[reg]);
1755 skip_emulated_instruction(vcpu);
1756 return 1;
1757 };
1758 break;
Anthony Liguori25c4c272007-04-27 09:29:21 +03001759 case 2: /* clts */
1760 vcpu_load_rsp_rip(vcpu);
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001761 vmx_fpu_deactivate(vcpu);
Anthony Liguori2ab455c2007-04-27 09:29:49 +03001762 vcpu->cr0 &= ~CR0_TS_MASK;
1763 vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001764 vmx_fpu_activate(vcpu);
Anthony Liguori25c4c272007-04-27 09:29:21 +03001765 skip_emulated_instruction(vcpu);
1766 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001767 case 1: /*mov from cr*/
1768 switch (cr) {
1769 case 3:
1770 vcpu_load_rsp_rip(vcpu);
1771 vcpu->regs[reg] = vcpu->cr3;
1772 vcpu_put_rsp_rip(vcpu);
1773 skip_emulated_instruction(vcpu);
1774 return 1;
1775 case 8:
Avi Kivity6aa8b732006-12-10 02:21:36 -08001776 vcpu_load_rsp_rip(vcpu);
1777 vcpu->regs[reg] = vcpu->cr8;
1778 vcpu_put_rsp_rip(vcpu);
1779 skip_emulated_instruction(vcpu);
1780 return 1;
1781 }
1782 break;
1783 case 3: /* lmsw */
1784 lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
1785
1786 skip_emulated_instruction(vcpu);
1787 return 1;
1788 default:
1789 break;
1790 }
1791 kvm_run->exit_reason = 0;
1792 printk(KERN_ERR "kvm: unhandled control register: op %d cr %d\n",
1793 (int)(exit_qualification >> 4) & 3, cr);
1794 return 0;
1795}
1796
1797static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1798{
1799 u64 exit_qualification;
1800 unsigned long val;
1801 int dr, reg;
1802
1803 /*
1804 * FIXME: this code assumes the host is debugging the guest.
1805 * need to deal with guest debugging itself too.
1806 */
1807 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1808 dr = exit_qualification & 7;
1809 reg = (exit_qualification >> 8) & 15;
1810 vcpu_load_rsp_rip(vcpu);
1811 if (exit_qualification & 16) {
1812 /* mov from dr */
1813 switch (dr) {
1814 case 6:
1815 val = 0xffff0ff0;
1816 break;
1817 case 7:
1818 val = 0x400;
1819 break;
1820 default:
1821 val = 0;
1822 }
1823 vcpu->regs[reg] = val;
1824 } else {
1825 /* mov to dr */
1826 }
1827 vcpu_put_rsp_rip(vcpu);
1828 skip_emulated_instruction(vcpu);
1829 return 1;
1830}
1831
1832static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1833{
Avi Kivity06465c52007-02-28 20:46:53 +02001834 kvm_emulate_cpuid(vcpu);
1835 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001836}
1837
1838static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1839{
1840 u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1841 u64 data;
1842
1843 if (vmx_get_msr(vcpu, ecx, &data)) {
1844 vmx_inject_gp(vcpu, 0);
1845 return 1;
1846 }
1847
1848 /* FIXME: handling of bits 32:63 of rax, rdx */
1849 vcpu->regs[VCPU_REGS_RAX] = data & -1u;
1850 vcpu->regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
1851 skip_emulated_instruction(vcpu);
1852 return 1;
1853}
1854
1855static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1856{
1857 u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1858 u64 data = (vcpu->regs[VCPU_REGS_RAX] & -1u)
1859 | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
1860
1861 if (vmx_set_msr(vcpu, ecx, data) != 0) {
1862 vmx_inject_gp(vcpu, 0);
1863 return 1;
1864 }
1865
1866 skip_emulated_instruction(vcpu);
1867 return 1;
1868}
1869
Dor Laorc1150d82007-01-05 16:36:24 -08001870static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1871 struct kvm_run *kvm_run)
1872{
1873 kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0;
1874 kvm_run->cr8 = vcpu->cr8;
1875 kvm_run->apic_base = vcpu->apic_base;
1876 kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
1877 vcpu->irq_summary == 0);
1878}
1879
Avi Kivity6aa8b732006-12-10 02:21:36 -08001880static int handle_interrupt_window(struct kvm_vcpu *vcpu,
1881 struct kvm_run *kvm_run)
1882{
Dor Laorc1150d82007-01-05 16:36:24 -08001883 /*
1884 * If the user space waits to inject interrupts, exit as soon as
1885 * possible
1886 */
1887 if (kvm_run->request_interrupt_window &&
Dor Laor022a9302007-01-05 16:37:00 -08001888 !vcpu->irq_summary) {
Dor Laorc1150d82007-01-05 16:36:24 -08001889 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
Avi Kivity1165f5f2007-04-19 17:27:43 +03001890 ++vcpu->stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08001891 return 0;
1892 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001893 return 1;
1894}
1895
1896static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1897{
1898 skip_emulated_instruction(vcpu);
Avi Kivityd3bef152007-06-05 15:53:05 +03001899 return kvm_emulate_halt(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001900}
1901
Ingo Molnarc21415e2007-02-19 14:37:47 +02001902static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1903{
Dor Laor510043d2007-02-19 18:25:43 +02001904 skip_emulated_instruction(vcpu);
Avi Kivity270fd9b2007-02-19 14:37:47 +02001905 return kvm_hypercall(vcpu, kvm_run);
Ingo Molnarc21415e2007-02-19 14:37:47 +02001906}
1907
Avi Kivity6aa8b732006-12-10 02:21:36 -08001908/*
1909 * The exit handlers return 1 if the exit was handled fully and guest execution
1910 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
1911 * to be done to userspace and return 0.
1912 */
1913static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
1914 struct kvm_run *kvm_run) = {
1915 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
1916 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
Avi Kivity988ad742007-02-12 00:54:36 -08001917 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001918 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001919 [EXIT_REASON_CR_ACCESS] = handle_cr,
1920 [EXIT_REASON_DR_ACCESS] = handle_dr,
1921 [EXIT_REASON_CPUID] = handle_cpuid,
1922 [EXIT_REASON_MSR_READ] = handle_rdmsr,
1923 [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
1924 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
1925 [EXIT_REASON_HLT] = handle_halt,
Ingo Molnarc21415e2007-02-19 14:37:47 +02001926 [EXIT_REASON_VMCALL] = handle_vmcall,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001927};
1928
1929static const int kvm_vmx_max_exit_handlers =
Robert P. J. Day50a34852007-06-03 13:35:29 -04001930 ARRAY_SIZE(kvm_vmx_exit_handlers);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001931
1932/*
1933 * The guest has exited. See if we can fix it or if we need userspace
1934 * assistance.
1935 */
1936static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1937{
1938 u32 vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1939 u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
1940
1941 if ( (vectoring_info & VECTORING_INFO_VALID_MASK) &&
1942 exit_reason != EXIT_REASON_EXCEPTION_NMI )
1943 printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
1944 "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001945 if (exit_reason < kvm_vmx_max_exit_handlers
1946 && kvm_vmx_exit_handlers[exit_reason])
1947 return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
1948 else {
1949 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
1950 kvm_run->hw.hardware_exit_reason = exit_reason;
1951 }
1952 return 0;
1953}
1954
Dor Laorc1150d82007-01-05 16:36:24 -08001955/*
1956 * Check if userspace requested an interrupt window, and that the
1957 * interrupt window is open.
1958 *
1959 * No need to exit to userspace if we already have an interrupt queued.
1960 */
1961static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1962 struct kvm_run *kvm_run)
1963{
1964 return (!vcpu->irq_summary &&
1965 kvm_run->request_interrupt_window &&
1966 vcpu->interrupt_window_open &&
1967 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
1968}
1969
Avi Kivity6aa8b732006-12-10 02:21:36 -08001970static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1971{
1972 u8 fail;
Avi Kivitye2dec932007-01-05 16:36:54 -08001973 int r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001974
Avi Kivitye6adf282007-04-30 16:07:54 +03001975preempted:
Avi Kivitycccf7482007-01-22 20:40:39 -08001976 if (!vcpu->mmio_read_completed)
1977 do_interrupt_requests(vcpu, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001978
1979 if (vcpu->guest_debug.enabled)
1980 kvm_guest_debug_pre(vcpu);
1981
Avi Kivitye6adf282007-04-30 16:07:54 +03001982again:
Avi Kivity33ed6322007-05-02 16:54:03 +03001983 vmx_save_host_state(vcpu);
Avi Kivitye6adf282007-04-30 16:07:54 +03001984 kvm_load_guest_fpu(vcpu);
1985
Avi Kivity17c3ba92007-06-04 15:58:30 +03001986 r = kvm_mmu_reload(vcpu);
1987 if (unlikely(r))
1988 goto out;
1989
Avi Kivitye6adf282007-04-30 16:07:54 +03001990 /*
1991 * Loading guest fpu may have cleared host cr0.ts
1992 */
1993 vmcs_writel(HOST_CR0, read_cr0());
1994
Avi Kivity6aa8b732006-12-10 02:21:36 -08001995 asm (
1996 /* Store host registers */
1997 "pushf \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001998#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001999 "push %%rax; push %%rbx; push %%rdx;"
2000 "push %%rsi; push %%rdi; push %%rbp;"
2001 "push %%r8; push %%r9; push %%r10; push %%r11;"
2002 "push %%r12; push %%r13; push %%r14; push %%r15;"
2003 "push %%rcx \n\t"
2004 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
2005#else
2006 "pusha; push %%ecx \n\t"
2007 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
2008#endif
2009 /* Check if vmlaunch of vmresume is needed */
2010 "cmp $0, %1 \n\t"
2011 /* Load guest registers. Don't clobber flags. */
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002012#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002013 "mov %c[cr2](%3), %%rax \n\t"
2014 "mov %%rax, %%cr2 \n\t"
2015 "mov %c[rax](%3), %%rax \n\t"
2016 "mov %c[rbx](%3), %%rbx \n\t"
2017 "mov %c[rdx](%3), %%rdx \n\t"
2018 "mov %c[rsi](%3), %%rsi \n\t"
2019 "mov %c[rdi](%3), %%rdi \n\t"
2020 "mov %c[rbp](%3), %%rbp \n\t"
2021 "mov %c[r8](%3), %%r8 \n\t"
2022 "mov %c[r9](%3), %%r9 \n\t"
2023 "mov %c[r10](%3), %%r10 \n\t"
2024 "mov %c[r11](%3), %%r11 \n\t"
2025 "mov %c[r12](%3), %%r12 \n\t"
2026 "mov %c[r13](%3), %%r13 \n\t"
2027 "mov %c[r14](%3), %%r14 \n\t"
2028 "mov %c[r15](%3), %%r15 \n\t"
2029 "mov %c[rcx](%3), %%rcx \n\t" /* kills %3 (rcx) */
2030#else
2031 "mov %c[cr2](%3), %%eax \n\t"
2032 "mov %%eax, %%cr2 \n\t"
2033 "mov %c[rax](%3), %%eax \n\t"
2034 "mov %c[rbx](%3), %%ebx \n\t"
2035 "mov %c[rdx](%3), %%edx \n\t"
2036 "mov %c[rsi](%3), %%esi \n\t"
2037 "mov %c[rdi](%3), %%edi \n\t"
2038 "mov %c[rbp](%3), %%ebp \n\t"
2039 "mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
2040#endif
2041 /* Enter guest mode */
Avi Kivitycd2276a2007-05-14 20:41:13 +03002042 "jne .Llaunched \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002043 ASM_VMX_VMLAUNCH "\n\t"
Avi Kivitycd2276a2007-05-14 20:41:13 +03002044 "jmp .Lkvm_vmx_return \n\t"
2045 ".Llaunched: " ASM_VMX_VMRESUME "\n\t"
2046 ".Lkvm_vmx_return: "
Avi Kivity6aa8b732006-12-10 02:21:36 -08002047 /* Save guest registers, load host registers, keep flags */
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002048#ifdef CONFIG_X86_64
Ingo Molnar96958232007-02-12 00:54:33 -08002049 "xchg %3, (%%rsp) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002050 "mov %%rax, %c[rax](%3) \n\t"
2051 "mov %%rbx, %c[rbx](%3) \n\t"
Ingo Molnar96958232007-02-12 00:54:33 -08002052 "pushq (%%rsp); popq %c[rcx](%3) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002053 "mov %%rdx, %c[rdx](%3) \n\t"
2054 "mov %%rsi, %c[rsi](%3) \n\t"
2055 "mov %%rdi, %c[rdi](%3) \n\t"
2056 "mov %%rbp, %c[rbp](%3) \n\t"
2057 "mov %%r8, %c[r8](%3) \n\t"
2058 "mov %%r9, %c[r9](%3) \n\t"
2059 "mov %%r10, %c[r10](%3) \n\t"
2060 "mov %%r11, %c[r11](%3) \n\t"
2061 "mov %%r12, %c[r12](%3) \n\t"
2062 "mov %%r13, %c[r13](%3) \n\t"
2063 "mov %%r14, %c[r14](%3) \n\t"
2064 "mov %%r15, %c[r15](%3) \n\t"
2065 "mov %%cr2, %%rax \n\t"
2066 "mov %%rax, %c[cr2](%3) \n\t"
Ingo Molnar96958232007-02-12 00:54:33 -08002067 "mov (%%rsp), %3 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002068
2069 "pop %%rcx; pop %%r15; pop %%r14; pop %%r13; pop %%r12;"
2070 "pop %%r11; pop %%r10; pop %%r9; pop %%r8;"
2071 "pop %%rbp; pop %%rdi; pop %%rsi;"
2072 "pop %%rdx; pop %%rbx; pop %%rax \n\t"
2073#else
Ingo Molnar96958232007-02-12 00:54:33 -08002074 "xchg %3, (%%esp) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002075 "mov %%eax, %c[rax](%3) \n\t"
2076 "mov %%ebx, %c[rbx](%3) \n\t"
Ingo Molnar96958232007-02-12 00:54:33 -08002077 "pushl (%%esp); popl %c[rcx](%3) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002078 "mov %%edx, %c[rdx](%3) \n\t"
2079 "mov %%esi, %c[rsi](%3) \n\t"
2080 "mov %%edi, %c[rdi](%3) \n\t"
2081 "mov %%ebp, %c[rbp](%3) \n\t"
2082 "mov %%cr2, %%eax \n\t"
2083 "mov %%eax, %c[cr2](%3) \n\t"
Ingo Molnar96958232007-02-12 00:54:33 -08002084 "mov (%%esp), %3 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002085
2086 "pop %%ecx; popa \n\t"
2087#endif
2088 "setbe %0 \n\t"
2089 "popf \n\t"
Herbert Xue0015482007-01-23 14:10:00 +11002090 : "=q" (fail)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002091 : "r"(vcpu->launched), "d"((unsigned long)HOST_RSP),
2092 "c"(vcpu),
2093 [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
2094 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
2095 [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
2096 [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
2097 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
2098 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
2099 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002100#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002101 [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
2102 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
2103 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
2104 [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
2105 [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
2106 [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
2107 [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
2108 [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])),
2109#endif
2110 [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
2111 : "cc", "memory" );
2112
Avi Kivity1165f5f2007-04-19 17:27:43 +03002113 ++vcpu->stat.exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002114
Dor Laorc1150d82007-01-05 16:36:24 -08002115 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002116
Avi Kivity6aa8b732006-12-10 02:21:36 -08002117 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
Avi Kivity6aa8b732006-12-10 02:21:36 -08002118
Avi Kivity05e0c8c2007-04-30 16:15:58 +03002119 if (unlikely(fail)) {
Avi Kivity8eb7d332007-03-04 14:17:08 +02002120 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2121 kvm_run->fail_entry.hardware_entry_failure_reason
2122 = vmcs_read32(VM_INSTRUCTION_ERROR);
Avi Kivitye2dec932007-01-05 16:36:54 -08002123 r = 0;
Avi Kivity05e0c8c2007-04-30 16:15:58 +03002124 goto out;
2125 }
2126 /*
2127 * Profile KVM exit RIPs:
2128 */
2129 if (unlikely(prof_on == KVM_PROFILING))
2130 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
Jeremy Fitzhardinge464d1a72007-02-13 13:26:20 +01002131
Avi Kivity05e0c8c2007-04-30 16:15:58 +03002132 vcpu->launched = 1;
2133 r = kvm_handle_exit(kvm_run, vcpu);
2134 if (r > 0) {
2135 /* Give scheduler a change to reschedule. */
2136 if (signal_pending(current)) {
2137 r = -EINTR;
2138 kvm_run->exit_reason = KVM_EXIT_INTR;
2139 ++vcpu->stat.signal_exits;
2140 goto out;
2141 }
Dor Laorc1150d82007-01-05 16:36:24 -08002142
Avi Kivity05e0c8c2007-04-30 16:15:58 +03002143 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2144 r = -EINTR;
2145 kvm_run->exit_reason = KVM_EXIT_INTR;
2146 ++vcpu->stat.request_irq_exits;
2147 goto out;
2148 }
2149 if (!need_resched()) {
2150 ++vcpu->stat.light_exits;
2151 goto again;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002152 }
2153 }
Dor Laorc1150d82007-01-05 16:36:24 -08002154
Avi Kivitye6adf282007-04-30 16:07:54 +03002155out:
Avi Kivitye6adf282007-04-30 16:07:54 +03002156 if (r > 0) {
2157 kvm_resched(vcpu);
2158 goto preempted;
2159 }
2160
Dor Laorc1150d82007-01-05 16:36:24 -08002161 post_kvm_run_save(vcpu, kvm_run);
Avi Kivitye2dec932007-01-05 16:36:54 -08002162 return r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002163}
2164
2165static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
2166{
2167 vmcs_writel(GUEST_CR3, vmcs_readl(GUEST_CR3));
2168}
2169
2170static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
2171 unsigned long addr,
2172 u32 err_code)
2173{
2174 u32 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
2175
Avi Kivity1165f5f2007-04-19 17:27:43 +03002176 ++vcpu->stat.pf_guest;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002177
2178 if (is_page_fault(vect_info)) {
2179 printk(KERN_DEBUG "inject_page_fault: "
2180 "double fault 0x%lx @ 0x%lx\n",
2181 addr, vmcs_readl(GUEST_RIP));
2182 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 0);
2183 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2184 DF_VECTOR |
2185 INTR_TYPE_EXCEPTION |
2186 INTR_INFO_DELIEVER_CODE_MASK |
2187 INTR_INFO_VALID_MASK);
2188 return;
2189 }
2190 vcpu->cr2 = addr;
2191 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, err_code);
2192 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2193 PF_VECTOR |
2194 INTR_TYPE_EXCEPTION |
2195 INTR_INFO_DELIEVER_CODE_MASK |
2196 INTR_INFO_VALID_MASK);
2197
2198}
2199
2200static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
2201{
2202 if (vcpu->vmcs) {
2203 on_each_cpu(__vcpu_clear, vcpu, 0, 1);
2204 free_vmcs(vcpu->vmcs);
2205 vcpu->vmcs = NULL;
2206 }
2207}
2208
2209static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
2210{
2211 vmx_free_vmcs(vcpu);
2212}
2213
2214static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
2215{
2216 struct vmcs *vmcs;
2217
Ingo Molnar965b58a2007-01-05 16:36:23 -08002218 vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2219 if (!vcpu->guest_msrs)
2220 return -ENOMEM;
2221
2222 vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2223 if (!vcpu->host_msrs)
2224 goto out_free_guest_msrs;
2225
Avi Kivity6aa8b732006-12-10 02:21:36 -08002226 vmcs = alloc_vmcs();
2227 if (!vmcs)
Ingo Molnar965b58a2007-01-05 16:36:23 -08002228 goto out_free_msrs;
2229
Avi Kivity6aa8b732006-12-10 02:21:36 -08002230 vmcs_clear(vmcs);
2231 vcpu->vmcs = vmcs;
2232 vcpu->launched = 0;
Ingo Molnar965b58a2007-01-05 16:36:23 -08002233
Avi Kivity6aa8b732006-12-10 02:21:36 -08002234 return 0;
Ingo Molnar965b58a2007-01-05 16:36:23 -08002235
2236out_free_msrs:
2237 kfree(vcpu->host_msrs);
2238 vcpu->host_msrs = NULL;
2239
2240out_free_guest_msrs:
2241 kfree(vcpu->guest_msrs);
2242 vcpu->guest_msrs = NULL;
2243
2244 return -ENOMEM;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002245}
2246
2247static struct kvm_arch_ops vmx_arch_ops = {
2248 .cpu_has_kvm_support = cpu_has_kvm_support,
2249 .disabled_by_bios = vmx_disabled_by_bios,
2250 .hardware_setup = hardware_setup,
2251 .hardware_unsetup = hardware_unsetup,
2252 .hardware_enable = hardware_enable,
2253 .hardware_disable = hardware_disable,
2254
2255 .vcpu_create = vmx_create_vcpu,
2256 .vcpu_free = vmx_free_vcpu,
2257
2258 .vcpu_load = vmx_vcpu_load,
2259 .vcpu_put = vmx_vcpu_put,
Avi Kivity774c47f2007-02-12 00:54:47 -08002260 .vcpu_decache = vmx_vcpu_decache,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002261
2262 .set_guest_debug = set_guest_debug,
2263 .get_msr = vmx_get_msr,
2264 .set_msr = vmx_set_msr,
2265 .get_segment_base = vmx_get_segment_base,
2266 .get_segment = vmx_get_segment,
2267 .set_segment = vmx_set_segment,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002268 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
Anthony Liguori25c4c272007-04-27 09:29:21 +03002269 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002270 .set_cr0 = vmx_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002271 .set_cr3 = vmx_set_cr3,
2272 .set_cr4 = vmx_set_cr4,
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002273#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002274 .set_efer = vmx_set_efer,
2275#endif
2276 .get_idt = vmx_get_idt,
2277 .set_idt = vmx_set_idt,
2278 .get_gdt = vmx_get_gdt,
2279 .set_gdt = vmx_set_gdt,
2280 .cache_regs = vcpu_load_rsp_rip,
2281 .decache_regs = vcpu_put_rsp_rip,
2282 .get_rflags = vmx_get_rflags,
2283 .set_rflags = vmx_set_rflags,
2284
2285 .tlb_flush = vmx_flush_tlb,
2286 .inject_page_fault = vmx_inject_page_fault,
2287
2288 .inject_gp = vmx_inject_gp,
2289
2290 .run = vmx_vcpu_run,
2291 .skip_emulated_instruction = skip_emulated_instruction,
2292 .vcpu_setup = vmx_vcpu_setup,
Ingo Molnar102d8322007-02-19 14:37:47 +02002293 .patch_hypercall = vmx_patch_hypercall,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002294};
2295
2296static int __init vmx_init(void)
2297{
He, Qingfdef3ad2007-04-30 09:45:24 +03002298 void *iova;
2299 int r;
2300
2301 vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2302 if (!vmx_io_bitmap_a)
2303 return -ENOMEM;
2304
2305 vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2306 if (!vmx_io_bitmap_b) {
2307 r = -ENOMEM;
2308 goto out;
2309 }
2310
2311 /*
2312 * Allow direct access to the PC debug port (it is often used for I/O
2313 * delays, but the vmexits simply slow things down).
2314 */
2315 iova = kmap(vmx_io_bitmap_a);
2316 memset(iova, 0xff, PAGE_SIZE);
2317 clear_bit(0x80, iova);
Avi Kivitycd0536d2007-05-08 11:34:07 +03002318 kunmap(vmx_io_bitmap_a);
He, Qingfdef3ad2007-04-30 09:45:24 +03002319
2320 iova = kmap(vmx_io_bitmap_b);
2321 memset(iova, 0xff, PAGE_SIZE);
Avi Kivitycd0536d2007-05-08 11:34:07 +03002322 kunmap(vmx_io_bitmap_b);
He, Qingfdef3ad2007-04-30 09:45:24 +03002323
2324 r = kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
2325 if (r)
2326 goto out1;
2327
2328 return 0;
2329
2330out1:
2331 __free_page(vmx_io_bitmap_b);
2332out:
2333 __free_page(vmx_io_bitmap_a);
2334 return r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002335}
2336
2337static void __exit vmx_exit(void)
2338{
He, Qingfdef3ad2007-04-30 09:45:24 +03002339 __free_page(vmx_io_bitmap_b);
2340 __free_page(vmx_io_bitmap_a);
2341
Avi Kivity6aa8b732006-12-10 02:21:36 -08002342 kvm_exit_arch();
2343}
2344
2345module_init(vmx_init)
2346module_exit(vmx_exit)