blob: 203ee7d0ed58d92d6c0befff3d307ffbc5e7532e [file] [log] [blame]
Carsten Otte043405e2007-10-10 17:16:19 +02001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +03007 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
Carsten Otte043405e2007-10-10 17:16:19 +02009 *
10 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +030013 * Amit Shah <amit.shah@qumranet.com>
14 * Ben-Ami Yassour <benami@il.ibm.com>
Carsten Otte043405e2007-10-10 17:16:19 +020015 *
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
18 *
19 */
20
Avi Kivityedf88412007-12-16 11:02:48 +020021#include <linux/kvm_host.h>
Carsten Otte313a3dc2007-10-11 19:16:52 +020022#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080023#include "mmu.h"
Sheng Yang78376992008-01-28 05:10:22 +080024#include "i8254.h"
Izik Eidus37817f22008-03-24 23:14:53 +020025#include "tss.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030026#include "kvm_cache_regs.h"
Avi Kivity26eef702008-07-03 14:59:22 +030027#include "x86.h"
Carsten Otte313a3dc2007-10-11 19:16:52 +020028
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -020029#include <linux/clocksource.h>
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +030030#include <linux/interrupt.h>
Carsten Otte313a3dc2007-10-11 19:16:52 +020031#include <linux/kvm.h>
32#include <linux/fs.h>
33#include <linux/vmalloc.h>
Carsten Otte5fb76f92007-10-29 16:08:51 +010034#include <linux/module.h>
Zhang Xiantao0de10342007-11-20 16:25:04 +080035#include <linux/mman.h>
Marcelo Tosatti2bacc552007-12-12 10:46:12 -050036#include <linux/highmem.h>
Joerg Roedel19de40a2008-12-03 14:43:34 +010037#include <linux/iommu.h>
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030038#include <linux/intel-iommu.h>
Gerd Hoffmannc8076602009-02-04 17:52:04 +010039#include <linux/cpufreq.h>
Avi Kivity18863bd2009-09-07 11:12:18 +030040#include <linux/user-return-notifier.h>
Marcelo Tosattia983fb22009-12-23 14:35:23 -020041#include <linux/srcu.h>
Avi Kivityaec51dc2009-07-01 16:01:02 +030042#include <trace/events/kvm.h>
43#undef TRACE_INCLUDE_FILE
Marcelo Tosatti229456f2009-06-17 09:22:14 -030044#define CREATE_TRACE_POINTS
45#include "trace.h"
Carsten Otte043405e2007-10-10 17:16:19 +020046
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +020047#include <asm/debugreg.h>
Carsten Otte043405e2007-10-10 17:16:19 +020048#include <asm/uaccess.h>
Zhang Xiantaod825ed02007-11-14 20:08:51 +080049#include <asm/msr.h>
Avi Kivitya5f61302008-02-20 17:57:21 +020050#include <asm/desc.h>
Sheng Yang0bed3b52008-10-09 16:01:54 +080051#include <asm/mtrr.h>
Huang Ying890ca9a2009-05-11 16:48:15 +080052#include <asm/mce.h>
Carsten Otte043405e2007-10-10 17:16:19 +020053
Carsten Otte313a3dc2007-10-11 19:16:52 +020054#define MAX_IO_MSRS 256
Carsten Ottea03490e2007-10-29 16:09:35 +010055#define CR0_RESERVED_BITS \
56 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
57 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
58 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
59#define CR4_RESERVED_BITS \
60 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
61 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
62 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
63 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
64
65#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
Huang Ying890ca9a2009-05-11 16:48:15 +080066
67#define KVM_MAX_MCE_BANKS 32
68#define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
69
Joerg Roedel50a37eb2008-01-31 14:57:38 +010070/* EFER defaults:
71 * - enable syscall per default because its emulated by KVM
72 * - enable LME and LMA per default on 64 bit KVM
73 */
74#ifdef CONFIG_X86_64
75static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
76#else
77static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
78#endif
Carsten Otte313a3dc2007-10-11 19:16:52 +020079
Avi Kivityba1389b2007-11-18 16:24:12 +020080#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
81#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Hollis Blanchard417bc302007-10-31 17:24:23 -050082
Gleb Natapovcb142eb2009-08-09 15:17:40 +030083static void update_cr8_intercept(struct kvm_vcpu *vcpu);
Avi Kivity674eea02008-02-11 18:37:23 +020084static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
85 struct kvm_cpuid_entry2 __user *entries);
86
Zhang Xiantao97896d02007-11-14 20:09:30 +080087struct kvm_x86_ops *kvm_x86_ops;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030088EXPORT_SYMBOL_GPL(kvm_x86_ops);
Zhang Xiantao97896d02007-11-14 20:09:30 +080089
Andre Przywaraed85c062009-06-25 12:36:49 +020090int ignore_msrs = 0;
91module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
92
Avi Kivity18863bd2009-09-07 11:12:18 +030093#define KVM_NR_SHARED_MSRS 16
94
95struct kvm_shared_msrs_global {
96 int nr;
Sheng Yang2bf78fa2009-12-18 16:48:44 +080097 u32 msrs[KVM_NR_SHARED_MSRS];
Avi Kivity18863bd2009-09-07 11:12:18 +030098};
99
100struct kvm_shared_msrs {
101 struct user_return_notifier urn;
102 bool registered;
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800103 struct kvm_shared_msr_values {
104 u64 host;
105 u64 curr;
106 } values[KVM_NR_SHARED_MSRS];
Avi Kivity18863bd2009-09-07 11:12:18 +0300107};
108
109static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
110static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
111
Hollis Blanchard417bc302007-10-31 17:24:23 -0500112struct kvm_stats_debugfs_item debugfs_entries[] = {
Avi Kivityba1389b2007-11-18 16:24:12 +0200113 { "pf_fixed", VCPU_STAT(pf_fixed) },
114 { "pf_guest", VCPU_STAT(pf_guest) },
115 { "tlb_flush", VCPU_STAT(tlb_flush) },
116 { "invlpg", VCPU_STAT(invlpg) },
117 { "exits", VCPU_STAT(exits) },
118 { "io_exits", VCPU_STAT(io_exits) },
119 { "mmio_exits", VCPU_STAT(mmio_exits) },
120 { "signal_exits", VCPU_STAT(signal_exits) },
121 { "irq_window", VCPU_STAT(irq_window_exits) },
Sheng Yangf08864b2008-05-15 18:23:25 +0800122 { "nmi_window", VCPU_STAT(nmi_window_exits) },
Avi Kivityba1389b2007-11-18 16:24:12 +0200123 { "halt_exits", VCPU_STAT(halt_exits) },
124 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Amit Shahf11c3a82008-02-21 01:00:30 +0530125 { "hypercalls", VCPU_STAT(hypercalls) },
Avi Kivityba1389b2007-11-18 16:24:12 +0200126 { "request_irq", VCPU_STAT(request_irq_exits) },
127 { "irq_exits", VCPU_STAT(irq_exits) },
128 { "host_state_reload", VCPU_STAT(host_state_reload) },
129 { "efer_reload", VCPU_STAT(efer_reload) },
130 { "fpu_reload", VCPU_STAT(fpu_reload) },
131 { "insn_emulation", VCPU_STAT(insn_emulation) },
132 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
Avi Kivityfa89a812008-09-01 15:57:51 +0300133 { "irq_injections", VCPU_STAT(irq_injections) },
Jan Kiszkac4abb7c2008-09-26 09:30:55 +0200134 { "nmi_injections", VCPU_STAT(nmi_injections) },
Avi Kivity4cee5762007-11-18 16:37:07 +0200135 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
136 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
137 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
138 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
139 { "mmu_flooded", VM_STAT(mmu_flooded) },
140 { "mmu_recycled", VM_STAT(mmu_recycled) },
Avi Kivitydfc5aa02007-12-18 19:47:18 +0200141 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
Marcelo Tosatti4731d4c2008-09-23 13:18:39 -0300142 { "mmu_unsync", VM_STAT(mmu_unsync) },
Avi Kivity0f74a242007-11-20 23:01:14 +0200143 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300144 { "largepages", VM_STAT(lpages) },
Hollis Blanchard417bc302007-10-31 17:24:23 -0500145 { NULL }
146};
147
Avi Kivity18863bd2009-09-07 11:12:18 +0300148static void kvm_on_user_return(struct user_return_notifier *urn)
149{
150 unsigned slot;
Avi Kivity18863bd2009-09-07 11:12:18 +0300151 struct kvm_shared_msrs *locals
152 = container_of(urn, struct kvm_shared_msrs, urn);
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800153 struct kvm_shared_msr_values *values;
Avi Kivity18863bd2009-09-07 11:12:18 +0300154
155 for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800156 values = &locals->values[slot];
157 if (values->host != values->curr) {
158 wrmsrl(shared_msrs_global.msrs[slot], values->host);
159 values->curr = values->host;
Avi Kivity18863bd2009-09-07 11:12:18 +0300160 }
161 }
162 locals->registered = false;
163 user_return_notifier_unregister(urn);
164}
165
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800166static void shared_msr_update(unsigned slot, u32 msr)
Avi Kivity18863bd2009-09-07 11:12:18 +0300167{
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800168 struct kvm_shared_msrs *smsr;
Avi Kivity18863bd2009-09-07 11:12:18 +0300169 u64 value;
170
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800171 smsr = &__get_cpu_var(shared_msrs);
172 /* only read, and nobody should modify it at this time,
173 * so don't need lock */
174 if (slot >= shared_msrs_global.nr) {
175 printk(KERN_ERR "kvm: invalid MSR slot!");
176 return;
177 }
178 rdmsrl_safe(msr, &value);
179 smsr->values[slot].host = value;
180 smsr->values[slot].curr = value;
181}
182
183void kvm_define_shared_msr(unsigned slot, u32 msr)
184{
Avi Kivity18863bd2009-09-07 11:12:18 +0300185 if (slot >= shared_msrs_global.nr)
186 shared_msrs_global.nr = slot + 1;
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800187 shared_msrs_global.msrs[slot] = msr;
188 /* we need ensured the shared_msr_global have been updated */
189 smp_wmb();
Avi Kivity18863bd2009-09-07 11:12:18 +0300190}
191EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
192
193static void kvm_shared_msr_cpu_online(void)
194{
195 unsigned i;
Avi Kivity18863bd2009-09-07 11:12:18 +0300196
197 for (i = 0; i < shared_msrs_global.nr; ++i)
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800198 shared_msr_update(i, shared_msrs_global.msrs[i]);
Avi Kivity18863bd2009-09-07 11:12:18 +0300199}
200
Avi Kivityd5696722009-12-02 12:28:47 +0200201void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
Avi Kivity18863bd2009-09-07 11:12:18 +0300202{
203 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
204
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800205 if (((value ^ smsr->values[slot].curr) & mask) == 0)
Avi Kivity18863bd2009-09-07 11:12:18 +0300206 return;
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800207 smsr->values[slot].curr = value;
208 wrmsrl(shared_msrs_global.msrs[slot], value);
Avi Kivity18863bd2009-09-07 11:12:18 +0300209 if (!smsr->registered) {
210 smsr->urn.on_user_return = kvm_on_user_return;
211 user_return_notifier_register(&smsr->urn);
212 smsr->registered = true;
213 }
214}
215EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
216
Avi Kivity3548bab2009-11-28 14:18:47 +0200217static void drop_user_return_notifiers(void *ignore)
218{
219 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
220
221 if (smsr->registered)
222 kvm_on_user_return(&smsr->urn);
223}
224
Carsten Otte5fb76f92007-10-29 16:08:51 +0100225unsigned long segment_base(u16 selector)
226{
227 struct descriptor_table gdt;
Avi Kivitya5f61302008-02-20 17:57:21 +0200228 struct desc_struct *d;
Carsten Otte5fb76f92007-10-29 16:08:51 +0100229 unsigned long table_base;
230 unsigned long v;
231
232 if (selector == 0)
233 return 0;
234
Akinobu Mitab792c342009-07-19 00:00:01 +0900235 kvm_get_gdt(&gdt);
Carsten Otte5fb76f92007-10-29 16:08:51 +0100236 table_base = gdt.base;
237
238 if (selector & 4) { /* from ldt */
Akinobu Mitab792c342009-07-19 00:00:01 +0900239 u16 ldt_selector = kvm_read_ldt();
Carsten Otte5fb76f92007-10-29 16:08:51 +0100240
Carsten Otte5fb76f92007-10-29 16:08:51 +0100241 table_base = segment_base(ldt_selector);
242 }
Avi Kivitya5f61302008-02-20 17:57:21 +0200243 d = (struct desc_struct *)(table_base + (selector & ~7));
Akinobu Mita46a359e2009-07-18 23:58:32 +0900244 v = get_desc_base(d);
Carsten Otte5fb76f92007-10-29 16:08:51 +0100245#ifdef CONFIG_X86_64
Avi Kivitya5f61302008-02-20 17:57:21 +0200246 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
247 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
Carsten Otte5fb76f92007-10-29 16:08:51 +0100248#endif
249 return v;
250}
251EXPORT_SYMBOL_GPL(segment_base);
252
Carsten Otte6866b832007-10-29 16:09:10 +0100253u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
254{
255 if (irqchip_in_kernel(vcpu->kvm))
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800256 return vcpu->arch.apic_base;
Carsten Otte6866b832007-10-29 16:09:10 +0100257 else
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800258 return vcpu->arch.apic_base;
Carsten Otte6866b832007-10-29 16:09:10 +0100259}
260EXPORT_SYMBOL_GPL(kvm_get_apic_base);
261
262void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
263{
264 /* TODO: reserve bits check */
265 if (irqchip_in_kernel(vcpu->kvm))
266 kvm_lapic_set_base(vcpu, data);
267 else
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800268 vcpu->arch.apic_base = data;
Carsten Otte6866b832007-10-29 16:09:10 +0100269}
270EXPORT_SYMBOL_GPL(kvm_set_apic_base);
271
Eddie Dong3fd28fc2009-11-19 17:54:07 +0200272#define EXCPT_BENIGN 0
273#define EXCPT_CONTRIBUTORY 1
274#define EXCPT_PF 2
275
276static int exception_class(int vector)
277{
278 switch (vector) {
279 case PF_VECTOR:
280 return EXCPT_PF;
281 case DE_VECTOR:
282 case TS_VECTOR:
283 case NP_VECTOR:
284 case SS_VECTOR:
285 case GP_VECTOR:
286 return EXCPT_CONTRIBUTORY;
287 default:
288 break;
289 }
290 return EXCPT_BENIGN;
291}
292
293static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
294 unsigned nr, bool has_error, u32 error_code)
295{
296 u32 prev_nr;
297 int class1, class2;
298
299 if (!vcpu->arch.exception.pending) {
300 queue:
301 vcpu->arch.exception.pending = true;
302 vcpu->arch.exception.has_error_code = has_error;
303 vcpu->arch.exception.nr = nr;
304 vcpu->arch.exception.error_code = error_code;
305 return;
306 }
307
308 /* to check exception */
309 prev_nr = vcpu->arch.exception.nr;
310 if (prev_nr == DF_VECTOR) {
311 /* triple fault -> shutdown */
312 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
313 return;
314 }
315 class1 = exception_class(prev_nr);
316 class2 = exception_class(nr);
317 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
318 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
319 /* generate double fault per SDM Table 5-5 */
320 vcpu->arch.exception.pending = true;
321 vcpu->arch.exception.has_error_code = true;
322 vcpu->arch.exception.nr = DF_VECTOR;
323 vcpu->arch.exception.error_code = 0;
324 } else
325 /* replace previous exception with a new one in a hope
326 that instruction re-execution will regenerate lost
327 exception */
328 goto queue;
329}
330
Avi Kivity298101d2007-11-25 13:41:11 +0200331void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
332{
Eddie Dong3fd28fc2009-11-19 17:54:07 +0200333 kvm_multiple_exception(vcpu, nr, false, 0);
Avi Kivity298101d2007-11-25 13:41:11 +0200334}
335EXPORT_SYMBOL_GPL(kvm_queue_exception);
336
Avi Kivityc3c91fe2007-11-25 14:04:58 +0200337void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
338 u32 error_code)
339{
340 ++vcpu->stat.pf_guest;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800341 vcpu->arch.cr2 = addr;
Avi Kivityc3c91fe2007-11-25 14:04:58 +0200342 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
343}
344
Sheng Yang3419ffc2008-05-15 09:52:48 +0800345void kvm_inject_nmi(struct kvm_vcpu *vcpu)
346{
347 vcpu->arch.nmi_pending = 1;
348}
349EXPORT_SYMBOL_GPL(kvm_inject_nmi);
350
Avi Kivity298101d2007-11-25 13:41:11 +0200351void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
352{
Eddie Dong3fd28fc2009-11-19 17:54:07 +0200353 kvm_multiple_exception(vcpu, nr, true, error_code);
Avi Kivity298101d2007-11-25 13:41:11 +0200354}
355EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
356
Carsten Ottea03490e2007-10-29 16:09:35 +0100357/*
Avi Kivity0a79b002009-09-01 12:03:25 +0300358 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
359 * a #GP and return false.
360 */
361bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
Carsten Otte043405e2007-10-10 17:16:19 +0200362{
Avi Kivity0a79b002009-09-01 12:03:25 +0300363 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
364 return true;
365 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
366 return false;
Carsten Ottea03490e2007-10-29 16:09:35 +0100367}
Avi Kivity0a79b002009-09-01 12:03:25 +0300368EXPORT_SYMBOL_GPL(kvm_require_cpl);
Carsten Ottea03490e2007-10-29 16:09:35 +0100369
370/*
371 * Load the pae pdptrs. Return true is they are all valid.
372 */
373int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
374{
375 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
376 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
377 int i;
378 int ret;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800379 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
Carsten Ottea03490e2007-10-29 16:09:35 +0100380
Carsten Ottea03490e2007-10-29 16:09:35 +0100381 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
382 offset * sizeof(u64), sizeof(pdpte));
383 if (ret < 0) {
384 ret = 0;
385 goto out;
386 }
387 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
Avi Kivity43a37952009-06-10 14:12:05 +0300388 if (is_present_gpte(pdpte[i]) &&
Dong, Eddie20c466b2009-03-31 23:03:45 +0800389 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
Carsten Ottea03490e2007-10-29 16:09:35 +0100390 ret = 0;
391 goto out;
392 }
393 }
394 ret = 1;
395
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800396 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
Avi Kivity6de4f3a2009-05-31 22:58:47 +0300397 __set_bit(VCPU_EXREG_PDPTR,
398 (unsigned long *)&vcpu->arch.regs_avail);
399 __set_bit(VCPU_EXREG_PDPTR,
400 (unsigned long *)&vcpu->arch.regs_dirty);
Carsten Ottea03490e2007-10-29 16:09:35 +0100401out:
Carsten Ottea03490e2007-10-29 16:09:35 +0100402
403 return ret;
404}
Joerg Roedelcc4b6872008-02-07 13:47:43 +0100405EXPORT_SYMBOL_GPL(load_pdptrs);
Carsten Ottea03490e2007-10-29 16:09:35 +0100406
Avi Kivityd835dfe2007-11-21 02:57:59 +0200407static bool pdptrs_changed(struct kvm_vcpu *vcpu)
408{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800409 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
Avi Kivityd835dfe2007-11-21 02:57:59 +0200410 bool changed = true;
411 int r;
412
413 if (is_long_mode(vcpu) || !is_pae(vcpu))
414 return false;
415
Avi Kivity6de4f3a2009-05-31 22:58:47 +0300416 if (!test_bit(VCPU_EXREG_PDPTR,
417 (unsigned long *)&vcpu->arch.regs_avail))
418 return true;
419
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800420 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
Avi Kivityd835dfe2007-11-21 02:57:59 +0200421 if (r < 0)
422 goto out;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800423 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
Avi Kivityd835dfe2007-11-21 02:57:59 +0200424out:
Avi Kivityd835dfe2007-11-21 02:57:59 +0200425
426 return changed;
427}
428
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200429void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
Carsten Ottea03490e2007-10-29 16:09:35 +0100430{
Avi Kivityf9a48e62010-01-06 19:10:22 +0200431 cr0 |= X86_CR0_ET;
432
Gleb Natapovab344822010-01-21 15:28:46 +0200433#ifdef CONFIG_X86_64
434 if (cr0 & 0xffffffff00000000UL) {
Carsten Ottea03490e2007-10-29 16:09:35 +0100435 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
Avi Kivity4d4ec082009-12-29 18:07:30 +0200436 cr0, kvm_read_cr0(vcpu));
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200437 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100438 return;
439 }
Gleb Natapovab344822010-01-21 15:28:46 +0200440#endif
441
442 cr0 &= ~CR0_RESERVED_BITS;
Carsten Ottea03490e2007-10-29 16:09:35 +0100443
444 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
445 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200446 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100447 return;
448 }
449
450 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
451 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
452 "and a clear PE flag\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200453 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100454 return;
455 }
456
457 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
458#ifdef CONFIG_X86_64
Avi Kivityf6801df2010-01-21 15:31:50 +0200459 if ((vcpu->arch.efer & EFER_LME)) {
Carsten Ottea03490e2007-10-29 16:09:35 +0100460 int cs_db, cs_l;
461
462 if (!is_pae(vcpu)) {
463 printk(KERN_DEBUG "set_cr0: #GP, start paging "
464 "in long mode while PAE is disabled\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200465 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100466 return;
467 }
468 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
469 if (cs_l) {
470 printk(KERN_DEBUG "set_cr0: #GP, start paging "
471 "in long mode while CS.L == 1\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200472 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100473 return;
474
475 }
476 } else
477#endif
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800478 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
Carsten Ottea03490e2007-10-29 16:09:35 +0100479 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
480 "reserved bits\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200481 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100482 return;
483 }
484
485 }
486
487 kvm_x86_ops->set_cr0(vcpu, cr0);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800488 vcpu->arch.cr0 = cr0;
Carsten Ottea03490e2007-10-29 16:09:35 +0100489
Carsten Ottea03490e2007-10-29 16:09:35 +0100490 kvm_mmu_reset_context(vcpu);
Carsten Ottea03490e2007-10-29 16:09:35 +0100491 return;
492}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200493EXPORT_SYMBOL_GPL(kvm_set_cr0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100494
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200495void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
Carsten Ottea03490e2007-10-29 16:09:35 +0100496{
Avi Kivity4d4ec082009-12-29 18:07:30 +0200497 kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0ful) | (msw & 0x0f));
Carsten Ottea03490e2007-10-29 16:09:35 +0100498}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200499EXPORT_SYMBOL_GPL(kvm_lmsw);
Carsten Ottea03490e2007-10-29 16:09:35 +0100500
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200501void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Carsten Ottea03490e2007-10-29 16:09:35 +0100502{
Avi Kivityfc78f512009-12-07 12:16:48 +0200503 unsigned long old_cr4 = kvm_read_cr4(vcpu);
Avi Kivitya2edf572009-05-24 22:19:00 +0300504 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
505
Carsten Ottea03490e2007-10-29 16:09:35 +0100506 if (cr4 & CR4_RESERVED_BITS) {
507 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200508 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100509 return;
510 }
511
512 if (is_long_mode(vcpu)) {
513 if (!(cr4 & X86_CR4_PAE)) {
514 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
515 "in long mode\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200516 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100517 return;
518 }
Avi Kivitya2edf572009-05-24 22:19:00 +0300519 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
520 && ((cr4 ^ old_cr4) & pdptr_bits)
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800521 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
Carsten Ottea03490e2007-10-29 16:09:35 +0100522 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200523 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100524 return;
525 }
526
527 if (cr4 & X86_CR4_VMXE) {
528 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200529 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100530 return;
531 }
532 kvm_x86_ops->set_cr4(vcpu, cr4);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800533 vcpu->arch.cr4 = cr4;
Avi Kivity5a41acc2009-01-11 17:19:35 +0200534 vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
Carsten Ottea03490e2007-10-29 16:09:35 +0100535 kvm_mmu_reset_context(vcpu);
Carsten Ottea03490e2007-10-29 16:09:35 +0100536}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200537EXPORT_SYMBOL_GPL(kvm_set_cr4);
Carsten Ottea03490e2007-10-29 16:09:35 +0100538
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200539void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
Carsten Ottea03490e2007-10-29 16:09:35 +0100540{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800541 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
Marcelo Tosatti0ba73cd2008-09-23 13:18:34 -0300542 kvm_mmu_sync_roots(vcpu);
Avi Kivityd835dfe2007-11-21 02:57:59 +0200543 kvm_mmu_flush_tlb(vcpu);
544 return;
545 }
546
Carsten Ottea03490e2007-10-29 16:09:35 +0100547 if (is_long_mode(vcpu)) {
548 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
549 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200550 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100551 return;
552 }
553 } else {
554 if (is_pae(vcpu)) {
555 if (cr3 & CR3_PAE_RESERVED_BITS) {
556 printk(KERN_DEBUG
557 "set_cr3: #GP, reserved bits\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200558 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100559 return;
560 }
561 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
562 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
563 "reserved bits\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200564 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100565 return;
566 }
567 }
568 /*
569 * We don't check reserved bits in nonpae mode, because
570 * this isn't enforced, and VMware depends on this.
571 */
572 }
573
Carsten Ottea03490e2007-10-29 16:09:35 +0100574 /*
575 * Does the new cr3 value map to physical memory? (Note, we
576 * catch an invalid cr3 even in real-mode, because it would
577 * cause trouble later on when we turn on paging anyway.)
578 *
579 * A real CPU would silently accept an invalid cr3 and would
580 * attempt to use it - with largely undefined (and often hard
581 * to debug) behavior on the guest side.
582 */
583 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200584 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100585 else {
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800586 vcpu->arch.cr3 = cr3;
587 vcpu->arch.mmu.new_cr3(vcpu);
Carsten Ottea03490e2007-10-29 16:09:35 +0100588 }
Carsten Ottea03490e2007-10-29 16:09:35 +0100589}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200590EXPORT_SYMBOL_GPL(kvm_set_cr3);
Carsten Ottea03490e2007-10-29 16:09:35 +0100591
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200592void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
Carsten Ottea03490e2007-10-29 16:09:35 +0100593{
594 if (cr8 & CR8_RESERVED_BITS) {
595 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200596 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100597 return;
598 }
599 if (irqchip_in_kernel(vcpu->kvm))
600 kvm_lapic_set_tpr(vcpu, cr8);
601 else
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800602 vcpu->arch.cr8 = cr8;
Carsten Ottea03490e2007-10-29 16:09:35 +0100603}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200604EXPORT_SYMBOL_GPL(kvm_set_cr8);
Carsten Ottea03490e2007-10-29 16:09:35 +0100605
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200606unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
Carsten Ottea03490e2007-10-29 16:09:35 +0100607{
608 if (irqchip_in_kernel(vcpu->kvm))
609 return kvm_lapic_get_cr8(vcpu);
610 else
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800611 return vcpu->arch.cr8;
Carsten Ottea03490e2007-10-29 16:09:35 +0100612}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200613EXPORT_SYMBOL_GPL(kvm_get_cr8);
Carsten Ottea03490e2007-10-29 16:09:35 +0100614
Alexander Grafd8017472008-11-25 20:17:11 +0100615static inline u32 bit(int bitno)
616{
617 return 1 << (bitno & 31);
618}
619
Carsten Otte043405e2007-10-10 17:16:19 +0200620/*
621 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
622 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
623 *
624 * This list is modified at module load time to reflect the
Glauber Costae3267cb2009-10-06 13:24:50 -0400625 * capabilities of the host cpu. This capabilities test skips MSRs that are
626 * kvm-specific. Those are put in the beginning of the list.
Carsten Otte043405e2007-10-10 17:16:19 +0200627 */
Glauber Costae3267cb2009-10-06 13:24:50 -0400628
Gleb Natapov10388a02010-01-17 15:51:23 +0200629#define KVM_SAVE_MSRS_BEGIN 5
Carsten Otte043405e2007-10-10 17:16:19 +0200630static u32 msrs_to_save[] = {
Glauber Costae3267cb2009-10-06 13:24:50 -0400631 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
Gleb Natapov55cd8e52010-01-17 15:51:22 +0200632 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
Gleb Natapov10388a02010-01-17 15:51:23 +0200633 HV_X64_MSR_APIC_ASSIST_PAGE,
Carsten Otte043405e2007-10-10 17:16:19 +0200634 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
635 MSR_K6_STAR,
636#ifdef CONFIG_X86_64
637 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
638#endif
Glauber Costae3267cb2009-10-06 13:24:50 -0400639 MSR_IA32_TSC, MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
Carsten Otte043405e2007-10-10 17:16:19 +0200640};
641
642static unsigned num_msrs_to_save;
643
644static u32 emulated_msrs[] = {
645 MSR_IA32_MISC_ENABLE,
646};
647
Carsten Otte15c4a642007-10-30 18:44:17 +0100648static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
649{
Joerg Roedelf2b4b7d2008-01-31 14:57:37 +0100650 if (efer & efer_reserved_bits) {
Carsten Otte15c4a642007-10-30 18:44:17 +0100651 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
652 efer);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200653 kvm_inject_gp(vcpu, 0);
Carsten Otte15c4a642007-10-30 18:44:17 +0100654 return;
655 }
656
657 if (is_paging(vcpu)
Avi Kivityf6801df2010-01-21 15:31:50 +0200658 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
Carsten Otte15c4a642007-10-30 18:44:17 +0100659 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200660 kvm_inject_gp(vcpu, 0);
Carsten Otte15c4a642007-10-30 18:44:17 +0100661 return;
662 }
663
Alexander Graf1b2fd702009-02-02 16:23:51 +0100664 if (efer & EFER_FFXSR) {
665 struct kvm_cpuid_entry2 *feat;
666
667 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
668 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
669 printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
670 kvm_inject_gp(vcpu, 0);
671 return;
672 }
673 }
674
Alexander Grafd8017472008-11-25 20:17:11 +0100675 if (efer & EFER_SVME) {
676 struct kvm_cpuid_entry2 *feat;
677
678 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
679 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
680 printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
681 kvm_inject_gp(vcpu, 0);
682 return;
683 }
684 }
685
Carsten Otte15c4a642007-10-30 18:44:17 +0100686 kvm_x86_ops->set_efer(vcpu, efer);
687
688 efer &= ~EFER_LMA;
Avi Kivityf6801df2010-01-21 15:31:50 +0200689 efer |= vcpu->arch.efer & EFER_LMA;
Carsten Otte15c4a642007-10-30 18:44:17 +0100690
Avi Kivityf6801df2010-01-21 15:31:50 +0200691 vcpu->arch.efer = efer;
Avi Kivity9645bb562009-03-31 11:31:54 +0300692
693 vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
694 kvm_mmu_reset_context(vcpu);
Carsten Otte15c4a642007-10-30 18:44:17 +0100695}
696
Joerg Roedelf2b4b7d2008-01-31 14:57:37 +0100697void kvm_enable_efer_bits(u64 mask)
698{
699 efer_reserved_bits &= ~mask;
700}
701EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
702
703
Carsten Otte15c4a642007-10-30 18:44:17 +0100704/*
705 * Writes msr value into into the appropriate "register".
706 * Returns 0 on success, non-0 otherwise.
707 * Assumes vcpu_load() was already called.
708 */
709int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
710{
711 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
712}
713
Carsten Otte313a3dc2007-10-11 19:16:52 +0200714/*
715 * Adapt set_msr() to msr_io()'s calling convention
716 */
717static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
718{
719 return kvm_set_msr(vcpu, index, *data);
720}
721
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200722static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
723{
724 static int version;
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200725 struct pvclock_wall_clock wc;
Jason Wang923de3c2010-01-27 19:13:49 +0800726 struct timespec boot;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200727
728 if (!wall_clock)
729 return;
730
731 version++;
732
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200733 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
734
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200735 /*
736 * The guest calculates current wall clock time by adding
737 * system time (updated by kvm_write_guest_time below) to the
738 * wall clock specified here. guest system time equals host
739 * system time for us, thus we must fill in host boot time here.
740 */
Jason Wang923de3c2010-01-27 19:13:49 +0800741 getboottime(&boot);
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200742
743 wc.sec = boot.tv_sec;
744 wc.nsec = boot.tv_nsec;
745 wc.version = version;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200746
747 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
748
749 version++;
750 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200751}
752
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200753static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
754{
755 uint32_t quotient, remainder;
756
757 /* Don't try to replace with do_div(), this one calculates
758 * "(dividend << 32) / divisor" */
759 __asm__ ( "divl %4"
760 : "=a" (quotient), "=d" (remainder)
761 : "0" (0), "1" (dividend), "r" (divisor) );
762 return quotient;
763}
764
765static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
766{
767 uint64_t nsecs = 1000000000LL;
768 int32_t shift = 0;
769 uint64_t tps64;
770 uint32_t tps32;
771
772 tps64 = tsc_khz * 1000LL;
773 while (tps64 > nsecs*2) {
774 tps64 >>= 1;
775 shift--;
776 }
777
778 tps32 = (uint32_t)tps64;
779 while (tps32 <= (uint32_t)nsecs) {
780 tps32 <<= 1;
781 shift++;
782 }
783
784 hv_clock->tsc_shift = shift;
785 hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
786
787 pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
Harvey Harrison80a914d2008-10-15 22:01:25 -0700788 __func__, tsc_khz, hv_clock->tsc_shift,
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200789 hv_clock->tsc_to_system_mul);
790}
791
Gerd Hoffmannc8076602009-02-04 17:52:04 +0100792static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
793
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200794static void kvm_write_guest_time(struct kvm_vcpu *v)
795{
796 struct timespec ts;
797 unsigned long flags;
798 struct kvm_vcpu_arch *vcpu = &v->arch;
799 void *shared_kaddr;
Avi Kivity463656c2009-04-12 15:49:07 +0300800 unsigned long this_tsc_khz;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200801
802 if ((!vcpu->time_page))
803 return;
804
Avi Kivity463656c2009-04-12 15:49:07 +0300805 this_tsc_khz = get_cpu_var(cpu_tsc_khz);
806 if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
807 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
808 vcpu->hv_clock_tsc_khz = this_tsc_khz;
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200809 }
Avi Kivity463656c2009-04-12 15:49:07 +0300810 put_cpu_var(cpu_tsc_khz);
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200811
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200812 /* Keep irq disabled to prevent changes to the clock */
813 local_irq_save(flags);
Jaswinder Singh Rajputaf24a4e2009-05-15 18:42:05 +0530814 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200815 ktime_get_ts(&ts);
Jason Wang923de3c2010-01-27 19:13:49 +0800816 monotonic_to_bootbased(&ts);
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200817 local_irq_restore(flags);
818
819 /* With all the info we got, fill in the values */
820
821 vcpu->hv_clock.system_time = ts.tv_nsec +
Glauber Costaafbcf7a2009-10-16 15:28:36 -0400822 (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
823
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200824 /*
825 * The interface expects us to write an even number signaling that the
826 * update is finished. Since the guest won't see the intermediate
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200827 * state, we just increase by 2 at the end.
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200828 */
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200829 vcpu->hv_clock.version += 2;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200830
831 shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
832
833 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200834 sizeof(vcpu->hv_clock));
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200835
836 kunmap_atomic(shared_kaddr, KM_USER0);
837
838 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
839}
840
Gerd Hoffmannc8076602009-02-04 17:52:04 +0100841static int kvm_request_guest_time_update(struct kvm_vcpu *v)
842{
843 struct kvm_vcpu_arch *vcpu = &v->arch;
844
845 if (!vcpu->time_page)
846 return 0;
847 set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
848 return 1;
849}
850
Avi Kivity9ba075a2008-05-26 20:06:35 +0300851static bool msr_mtrr_valid(unsigned msr)
852{
853 switch (msr) {
854 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
855 case MSR_MTRRfix64K_00000:
856 case MSR_MTRRfix16K_80000:
857 case MSR_MTRRfix16K_A0000:
858 case MSR_MTRRfix4K_C0000:
859 case MSR_MTRRfix4K_C8000:
860 case MSR_MTRRfix4K_D0000:
861 case MSR_MTRRfix4K_D8000:
862 case MSR_MTRRfix4K_E0000:
863 case MSR_MTRRfix4K_E8000:
864 case MSR_MTRRfix4K_F0000:
865 case MSR_MTRRfix4K_F8000:
866 case MSR_MTRRdefType:
867 case MSR_IA32_CR_PAT:
868 return true;
869 case 0x2f8:
870 return true;
871 }
872 return false;
873}
874
Marcelo Tosattid6289b92009-06-22 15:27:56 -0300875static bool valid_pat_type(unsigned t)
876{
877 return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
878}
879
880static bool valid_mtrr_type(unsigned t)
881{
882 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
883}
884
885static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
886{
887 int i;
888
889 if (!msr_mtrr_valid(msr))
890 return false;
891
892 if (msr == MSR_IA32_CR_PAT) {
893 for (i = 0; i < 8; i++)
894 if (!valid_pat_type((data >> (i * 8)) & 0xff))
895 return false;
896 return true;
897 } else if (msr == MSR_MTRRdefType) {
898 if (data & ~0xcff)
899 return false;
900 return valid_mtrr_type(data & 0xff);
901 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
902 for (i = 0; i < 8 ; i++)
903 if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
904 return false;
905 return true;
906 }
907
908 /* variable MTRRs */
909 return valid_mtrr_type(data & 0xff);
910}
911
Avi Kivity9ba075a2008-05-26 20:06:35 +0300912static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
913{
Sheng Yang0bed3b52008-10-09 16:01:54 +0800914 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
915
Marcelo Tosattid6289b92009-06-22 15:27:56 -0300916 if (!mtrr_valid(vcpu, msr, data))
Avi Kivity9ba075a2008-05-26 20:06:35 +0300917 return 1;
918
Sheng Yang0bed3b52008-10-09 16:01:54 +0800919 if (msr == MSR_MTRRdefType) {
920 vcpu->arch.mtrr_state.def_type = data;
921 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
922 } else if (msr == MSR_MTRRfix64K_00000)
923 p[0] = data;
924 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
925 p[1 + msr - MSR_MTRRfix16K_80000] = data;
926 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
927 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
928 else if (msr == MSR_IA32_CR_PAT)
929 vcpu->arch.pat = data;
930 else { /* Variable MTRRs */
931 int idx, is_mtrr_mask;
932 u64 *pt;
933
934 idx = (msr - 0x200) / 2;
935 is_mtrr_mask = msr - 0x200 - 2 * idx;
936 if (!is_mtrr_mask)
937 pt =
938 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
939 else
940 pt =
941 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
942 *pt = data;
943 }
944
945 kvm_mmu_reset_context(vcpu);
Avi Kivity9ba075a2008-05-26 20:06:35 +0300946 return 0;
947}
Carsten Otte15c4a642007-10-30 18:44:17 +0100948
Huang Ying890ca9a2009-05-11 16:48:15 +0800949static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
950{
951 u64 mcg_cap = vcpu->arch.mcg_cap;
952 unsigned bank_num = mcg_cap & 0xff;
953
954 switch (msr) {
955 case MSR_IA32_MCG_STATUS:
956 vcpu->arch.mcg_status = data;
957 break;
958 case MSR_IA32_MCG_CTL:
959 if (!(mcg_cap & MCG_CTL_P))
960 return 1;
961 if (data != 0 && data != ~(u64)0)
962 return -1;
963 vcpu->arch.mcg_ctl = data;
964 break;
965 default:
966 if (msr >= MSR_IA32_MC0_CTL &&
967 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
968 u32 offset = msr - MSR_IA32_MC0_CTL;
969 /* only 0 or all 1s can be written to IA32_MCi_CTL */
970 if ((offset & 0x3) == 0 &&
971 data != 0 && data != ~(u64)0)
972 return -1;
973 vcpu->arch.mce_banks[offset] = data;
974 break;
975 }
976 return 1;
977 }
978 return 0;
979}
980
Ed Swierkffde22a2009-10-15 15:21:43 -0700981static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
982{
983 struct kvm *kvm = vcpu->kvm;
984 int lm = is_long_mode(vcpu);
985 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
986 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
987 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
988 : kvm->arch.xen_hvm_config.blob_size_32;
989 u32 page_num = data & ~PAGE_MASK;
990 u64 page_addr = data & PAGE_MASK;
991 u8 *page;
992 int r;
993
994 r = -E2BIG;
995 if (page_num >= blob_size)
996 goto out;
997 r = -ENOMEM;
998 page = kzalloc(PAGE_SIZE, GFP_KERNEL);
999 if (!page)
1000 goto out;
1001 r = -EFAULT;
1002 if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
1003 goto out_free;
1004 if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
1005 goto out_free;
1006 r = 0;
1007out_free:
1008 kfree(page);
1009out:
1010 return r;
1011}
1012
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001013static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1014{
1015 return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
1016}
1017
1018static bool kvm_hv_msr_partition_wide(u32 msr)
1019{
1020 bool r = false;
1021 switch (msr) {
1022 case HV_X64_MSR_GUEST_OS_ID:
1023 case HV_X64_MSR_HYPERCALL:
1024 r = true;
1025 break;
1026 }
1027
1028 return r;
1029}
1030
1031static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1032{
1033 struct kvm *kvm = vcpu->kvm;
1034
1035 switch (msr) {
1036 case HV_X64_MSR_GUEST_OS_ID:
1037 kvm->arch.hv_guest_os_id = data;
1038 /* setting guest os id to zero disables hypercall page */
1039 if (!kvm->arch.hv_guest_os_id)
1040 kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1041 break;
1042 case HV_X64_MSR_HYPERCALL: {
1043 u64 gfn;
1044 unsigned long addr;
1045 u8 instructions[4];
1046
1047 /* if guest os id is not set hypercall should remain disabled */
1048 if (!kvm->arch.hv_guest_os_id)
1049 break;
1050 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1051 kvm->arch.hv_hypercall = data;
1052 break;
1053 }
1054 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1055 addr = gfn_to_hva(kvm, gfn);
1056 if (kvm_is_error_hva(addr))
1057 return 1;
1058 kvm_x86_ops->patch_hypercall(vcpu, instructions);
1059 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
1060 if (copy_to_user((void __user *)addr, instructions, 4))
1061 return 1;
1062 kvm->arch.hv_hypercall = data;
1063 break;
1064 }
1065 default:
1066 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1067 "data 0x%llx\n", msr, data);
1068 return 1;
1069 }
1070 return 0;
1071}
1072
1073static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1074{
Gleb Natapov10388a02010-01-17 15:51:23 +02001075 switch (msr) {
1076 case HV_X64_MSR_APIC_ASSIST_PAGE: {
1077 unsigned long addr;
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001078
Gleb Natapov10388a02010-01-17 15:51:23 +02001079 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
1080 vcpu->arch.hv_vapic = data;
1081 break;
1082 }
1083 addr = gfn_to_hva(vcpu->kvm, data >>
1084 HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
1085 if (kvm_is_error_hva(addr))
1086 return 1;
1087 if (clear_user((void __user *)addr, PAGE_SIZE))
1088 return 1;
1089 vcpu->arch.hv_vapic = data;
1090 break;
1091 }
1092 case HV_X64_MSR_EOI:
1093 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1094 case HV_X64_MSR_ICR:
1095 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1096 case HV_X64_MSR_TPR:
1097 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1098 default:
1099 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1100 "data 0x%llx\n", msr, data);
1101 return 1;
1102 }
1103
1104 return 0;
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001105}
1106
Carsten Otte15c4a642007-10-30 18:44:17 +01001107int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1108{
1109 switch (msr) {
Carsten Otte15c4a642007-10-30 18:44:17 +01001110 case MSR_EFER:
1111 set_efer(vcpu, data);
1112 break;
Andre Przywara8f1589d2009-06-24 12:44:33 +02001113 case MSR_K7_HWCR:
1114 data &= ~(u64)0x40; /* ignore flush filter disable */
1115 if (data != 0) {
1116 pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
1117 data);
1118 return 1;
1119 }
Carsten Otte15c4a642007-10-30 18:44:17 +01001120 break;
Andre Przywaraf7c6d142009-07-02 15:04:14 +02001121 case MSR_FAM10H_MMIO_CONF_BASE:
1122 if (data != 0) {
1123 pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
1124 "0x%llx\n", data);
1125 return 1;
1126 }
Carsten Otte15c4a642007-10-30 18:44:17 +01001127 break;
Andre Przywarac323c0e2009-06-24 15:37:05 +02001128 case MSR_AMD64_NB_CFG:
Joerg Roedelc7ac6792008-02-11 20:28:27 +01001129 break;
Alexander Grafb5e2fec2008-07-22 08:00:45 +02001130 case MSR_IA32_DEBUGCTLMSR:
1131 if (!data) {
1132 /* We support the non-activated case already */
1133 break;
1134 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
1135 /* Values other than LBR and BTF are vendor-specific,
1136 thus reserved and should throw a #GP */
1137 return 1;
1138 }
1139 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
1140 __func__, data);
1141 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001142 case MSR_IA32_UCODE_REV:
1143 case MSR_IA32_UCODE_WRITE:
Avi Kivity61a6bd62008-12-29 17:32:28 +02001144 case MSR_VM_HSAVE_PA:
Andre Przywara6098ca92009-07-03 16:00:14 +02001145 case MSR_AMD64_PATCH_LOADER:
Carsten Otte15c4a642007-10-30 18:44:17 +01001146 break;
Avi Kivity9ba075a2008-05-26 20:06:35 +03001147 case 0x200 ... 0x2ff:
1148 return set_msr_mtrr(vcpu, msr, data);
Carsten Otte15c4a642007-10-30 18:44:17 +01001149 case MSR_IA32_APICBASE:
1150 kvm_set_apic_base(vcpu, data);
1151 break;
Gleb Natapov0105d1a2009-07-05 17:39:36 +03001152 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1153 return kvm_x2apic_msr_write(vcpu, msr, data);
Carsten Otte15c4a642007-10-30 18:44:17 +01001154 case MSR_IA32_MISC_ENABLE:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001155 vcpu->arch.ia32_misc_enable_msr = data;
Carsten Otte15c4a642007-10-30 18:44:17 +01001156 break;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001157 case MSR_KVM_WALL_CLOCK:
1158 vcpu->kvm->arch.wall_clock = data;
1159 kvm_write_wall_clock(vcpu->kvm, data);
1160 break;
1161 case MSR_KVM_SYSTEM_TIME: {
1162 if (vcpu->arch.time_page) {
1163 kvm_release_page_dirty(vcpu->arch.time_page);
1164 vcpu->arch.time_page = NULL;
1165 }
1166
1167 vcpu->arch.time = data;
1168
1169 /* we verify if the enable bit is set... */
1170 if (!(data & 1))
1171 break;
1172
1173 /* ...but clean it before doing the actual write */
1174 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
1175
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001176 vcpu->arch.time_page =
1177 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001178
1179 if (is_error_page(vcpu->arch.time_page)) {
1180 kvm_release_page_clean(vcpu->arch.time_page);
1181 vcpu->arch.time_page = NULL;
1182 }
1183
Gerd Hoffmannc8076602009-02-04 17:52:04 +01001184 kvm_request_guest_time_update(vcpu);
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001185 break;
1186 }
Huang Ying890ca9a2009-05-11 16:48:15 +08001187 case MSR_IA32_MCG_CTL:
1188 case MSR_IA32_MCG_STATUS:
1189 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1190 return set_msr_mce(vcpu, msr, data);
Andre Przywara71db6022009-06-12 22:01:29 +02001191
1192 /* Performance counters are not protected by a CPUID bit,
1193 * so we should check all of them in the generic path for the sake of
1194 * cross vendor migration.
1195 * Writing a zero into the event select MSRs disables them,
1196 * which we perfectly emulate ;-). Any other value should be at least
1197 * reported, some guests depend on them.
1198 */
1199 case MSR_P6_EVNTSEL0:
1200 case MSR_P6_EVNTSEL1:
1201 case MSR_K7_EVNTSEL0:
1202 case MSR_K7_EVNTSEL1:
1203 case MSR_K7_EVNTSEL2:
1204 case MSR_K7_EVNTSEL3:
1205 if (data != 0)
1206 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1207 "0x%x data 0x%llx\n", msr, data);
1208 break;
1209 /* at least RHEL 4 unconditionally writes to the perfctr registers,
1210 * so we ignore writes to make it happy.
1211 */
1212 case MSR_P6_PERFCTR0:
1213 case MSR_P6_PERFCTR1:
1214 case MSR_K7_PERFCTR0:
1215 case MSR_K7_PERFCTR1:
1216 case MSR_K7_PERFCTR2:
1217 case MSR_K7_PERFCTR3:
1218 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1219 "0x%x data 0x%llx\n", msr, data);
1220 break;
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001221 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1222 if (kvm_hv_msr_partition_wide(msr)) {
1223 int r;
1224 mutex_lock(&vcpu->kvm->lock);
1225 r = set_msr_hyperv_pw(vcpu, msr, data);
1226 mutex_unlock(&vcpu->kvm->lock);
1227 return r;
1228 } else
1229 return set_msr_hyperv(vcpu, msr, data);
1230 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001231 default:
Ed Swierkffde22a2009-10-15 15:21:43 -07001232 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
1233 return xen_hvm_config(vcpu, data);
Andre Przywaraed85c062009-06-25 12:36:49 +02001234 if (!ignore_msrs) {
1235 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
1236 msr, data);
1237 return 1;
1238 } else {
1239 pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
1240 msr, data);
1241 break;
1242 }
Carsten Otte15c4a642007-10-30 18:44:17 +01001243 }
1244 return 0;
1245}
1246EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1247
1248
1249/*
1250 * Reads an msr value (of 'msr_index') into 'pdata'.
1251 * Returns 0 on success, non-0 otherwise.
1252 * Assumes vcpu_load() was already called.
1253 */
1254int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1255{
1256 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
1257}
1258
Avi Kivity9ba075a2008-05-26 20:06:35 +03001259static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1260{
Sheng Yang0bed3b52008-10-09 16:01:54 +08001261 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1262
Avi Kivity9ba075a2008-05-26 20:06:35 +03001263 if (!msr_mtrr_valid(msr))
1264 return 1;
1265
Sheng Yang0bed3b52008-10-09 16:01:54 +08001266 if (msr == MSR_MTRRdefType)
1267 *pdata = vcpu->arch.mtrr_state.def_type +
1268 (vcpu->arch.mtrr_state.enabled << 10);
1269 else if (msr == MSR_MTRRfix64K_00000)
1270 *pdata = p[0];
1271 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1272 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
1273 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1274 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
1275 else if (msr == MSR_IA32_CR_PAT)
1276 *pdata = vcpu->arch.pat;
1277 else { /* Variable MTRRs */
1278 int idx, is_mtrr_mask;
1279 u64 *pt;
1280
1281 idx = (msr - 0x200) / 2;
1282 is_mtrr_mask = msr - 0x200 - 2 * idx;
1283 if (!is_mtrr_mask)
1284 pt =
1285 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1286 else
1287 pt =
1288 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1289 *pdata = *pt;
1290 }
1291
Avi Kivity9ba075a2008-05-26 20:06:35 +03001292 return 0;
1293}
1294
Huang Ying890ca9a2009-05-11 16:48:15 +08001295static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1296{
1297 u64 data;
1298 u64 mcg_cap = vcpu->arch.mcg_cap;
1299 unsigned bank_num = mcg_cap & 0xff;
1300
1301 switch (msr) {
1302 case MSR_IA32_P5_MC_ADDR:
1303 case MSR_IA32_P5_MC_TYPE:
1304 data = 0;
1305 break;
1306 case MSR_IA32_MCG_CAP:
1307 data = vcpu->arch.mcg_cap;
1308 break;
1309 case MSR_IA32_MCG_CTL:
1310 if (!(mcg_cap & MCG_CTL_P))
1311 return 1;
1312 data = vcpu->arch.mcg_ctl;
1313 break;
1314 case MSR_IA32_MCG_STATUS:
1315 data = vcpu->arch.mcg_status;
1316 break;
1317 default:
1318 if (msr >= MSR_IA32_MC0_CTL &&
1319 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1320 u32 offset = msr - MSR_IA32_MC0_CTL;
1321 data = vcpu->arch.mce_banks[offset];
1322 break;
1323 }
1324 return 1;
1325 }
1326 *pdata = data;
1327 return 0;
1328}
1329
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001330static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1331{
1332 u64 data = 0;
1333 struct kvm *kvm = vcpu->kvm;
1334
1335 switch (msr) {
1336 case HV_X64_MSR_GUEST_OS_ID:
1337 data = kvm->arch.hv_guest_os_id;
1338 break;
1339 case HV_X64_MSR_HYPERCALL:
1340 data = kvm->arch.hv_hypercall;
1341 break;
1342 default:
1343 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1344 return 1;
1345 }
1346
1347 *pdata = data;
1348 return 0;
1349}
1350
1351static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1352{
1353 u64 data = 0;
1354
1355 switch (msr) {
1356 case HV_X64_MSR_VP_INDEX: {
1357 int r;
1358 struct kvm_vcpu *v;
1359 kvm_for_each_vcpu(r, v, vcpu->kvm)
1360 if (v == vcpu)
1361 data = r;
1362 break;
1363 }
Gleb Natapov10388a02010-01-17 15:51:23 +02001364 case HV_X64_MSR_EOI:
1365 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1366 case HV_X64_MSR_ICR:
1367 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1368 case HV_X64_MSR_TPR:
1369 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001370 default:
1371 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1372 return 1;
1373 }
1374 *pdata = data;
1375 return 0;
1376}
1377
Carsten Otte15c4a642007-10-30 18:44:17 +01001378int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1379{
1380 u64 data;
1381
1382 switch (msr) {
Carsten Otte15c4a642007-10-30 18:44:17 +01001383 case MSR_IA32_PLATFORM_ID:
Carsten Otte15c4a642007-10-30 18:44:17 +01001384 case MSR_IA32_UCODE_REV:
Carsten Otte15c4a642007-10-30 18:44:17 +01001385 case MSR_IA32_EBL_CR_POWERON:
Alexander Grafb5e2fec2008-07-22 08:00:45 +02001386 case MSR_IA32_DEBUGCTLMSR:
1387 case MSR_IA32_LASTBRANCHFROMIP:
1388 case MSR_IA32_LASTBRANCHTOIP:
1389 case MSR_IA32_LASTINTFROMIP:
1390 case MSR_IA32_LASTINTTOIP:
Jaswinder Singh Rajput60af2ec2009-05-14 11:00:10 +05301391 case MSR_K8_SYSCFG:
1392 case MSR_K7_HWCR:
Avi Kivity61a6bd62008-12-29 17:32:28 +02001393 case MSR_VM_HSAVE_PA:
Amit Shah1f3ee612009-06-30 16:24:28 +05301394 case MSR_P6_PERFCTR0:
1395 case MSR_P6_PERFCTR1:
Amit Shah7fe29e02009-03-20 12:39:00 +05301396 case MSR_P6_EVNTSEL0:
1397 case MSR_P6_EVNTSEL1:
Amit Shah9e699622009-06-15 13:25:34 +05301398 case MSR_K7_EVNTSEL0:
Amit Shah1f3ee612009-06-30 16:24:28 +05301399 case MSR_K7_PERFCTR0:
Andre Przywara1fdbd482009-06-24 12:44:34 +02001400 case MSR_K8_INT_PENDING_MSG:
Andre Przywarac323c0e2009-06-24 15:37:05 +02001401 case MSR_AMD64_NB_CFG:
Andre Przywaraf7c6d142009-07-02 15:04:14 +02001402 case MSR_FAM10H_MMIO_CONF_BASE:
Carsten Otte15c4a642007-10-30 18:44:17 +01001403 data = 0;
1404 break;
Avi Kivity9ba075a2008-05-26 20:06:35 +03001405 case MSR_MTRRcap:
1406 data = 0x500 | KVM_NR_VAR_MTRR;
1407 break;
1408 case 0x200 ... 0x2ff:
1409 return get_msr_mtrr(vcpu, msr, pdata);
Carsten Otte15c4a642007-10-30 18:44:17 +01001410 case 0xcd: /* fsb frequency */
1411 data = 3;
1412 break;
1413 case MSR_IA32_APICBASE:
1414 data = kvm_get_apic_base(vcpu);
1415 break;
Gleb Natapov0105d1a2009-07-05 17:39:36 +03001416 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1417 return kvm_x2apic_msr_read(vcpu, msr, pdata);
1418 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001419 case MSR_IA32_MISC_ENABLE:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001420 data = vcpu->arch.ia32_misc_enable_msr;
Carsten Otte15c4a642007-10-30 18:44:17 +01001421 break;
Alexander Graf847f0ad2008-02-21 12:11:01 +01001422 case MSR_IA32_PERF_STATUS:
1423 /* TSC increment by tick */
1424 data = 1000ULL;
1425 /* CPU multiplier */
1426 data |= (((uint64_t)4ULL) << 40);
1427 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001428 case MSR_EFER:
Avi Kivityf6801df2010-01-21 15:31:50 +02001429 data = vcpu->arch.efer;
Carsten Otte15c4a642007-10-30 18:44:17 +01001430 break;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001431 case MSR_KVM_WALL_CLOCK:
1432 data = vcpu->kvm->arch.wall_clock;
1433 break;
1434 case MSR_KVM_SYSTEM_TIME:
1435 data = vcpu->arch.time;
1436 break;
Huang Ying890ca9a2009-05-11 16:48:15 +08001437 case MSR_IA32_P5_MC_ADDR:
1438 case MSR_IA32_P5_MC_TYPE:
1439 case MSR_IA32_MCG_CAP:
1440 case MSR_IA32_MCG_CTL:
1441 case MSR_IA32_MCG_STATUS:
1442 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1443 return get_msr_mce(vcpu, msr, pdata);
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001444 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1445 if (kvm_hv_msr_partition_wide(msr)) {
1446 int r;
1447 mutex_lock(&vcpu->kvm->lock);
1448 r = get_msr_hyperv_pw(vcpu, msr, pdata);
1449 mutex_unlock(&vcpu->kvm->lock);
1450 return r;
1451 } else
1452 return get_msr_hyperv(vcpu, msr, pdata);
1453 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001454 default:
Andre Przywaraed85c062009-06-25 12:36:49 +02001455 if (!ignore_msrs) {
1456 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
1457 return 1;
1458 } else {
1459 pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
1460 data = 0;
1461 }
1462 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001463 }
1464 *pdata = data;
1465 return 0;
1466}
1467EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1468
Carsten Otte313a3dc2007-10-11 19:16:52 +02001469/*
1470 * Read or write a bunch of msrs. All parameters are kernel addresses.
1471 *
1472 * @return number of msrs set successfully.
1473 */
1474static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
1475 struct kvm_msr_entry *entries,
1476 int (*do_msr)(struct kvm_vcpu *vcpu,
1477 unsigned index, u64 *data))
1478{
Marcelo Tosattif656ce02009-12-23 14:35:25 -02001479 int i, idx;
Carsten Otte313a3dc2007-10-11 19:16:52 +02001480
1481 vcpu_load(vcpu);
1482
Marcelo Tosattif656ce02009-12-23 14:35:25 -02001483 idx = srcu_read_lock(&vcpu->kvm->srcu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001484 for (i = 0; i < msrs->nmsrs; ++i)
1485 if (do_msr(vcpu, entries[i].index, &entries[i].data))
1486 break;
Marcelo Tosattif656ce02009-12-23 14:35:25 -02001487 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001488
1489 vcpu_put(vcpu);
1490
1491 return i;
1492}
1493
1494/*
1495 * Read or write a bunch of msrs. Parameters are user addresses.
1496 *
1497 * @return number of msrs set successfully.
1498 */
1499static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
1500 int (*do_msr)(struct kvm_vcpu *vcpu,
1501 unsigned index, u64 *data),
1502 int writeback)
1503{
1504 struct kvm_msrs msrs;
1505 struct kvm_msr_entry *entries;
1506 int r, n;
1507 unsigned size;
1508
1509 r = -EFAULT;
1510 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
1511 goto out;
1512
1513 r = -E2BIG;
1514 if (msrs.nmsrs >= MAX_IO_MSRS)
1515 goto out;
1516
1517 r = -ENOMEM;
1518 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
1519 entries = vmalloc(size);
1520 if (!entries)
1521 goto out;
1522
1523 r = -EFAULT;
1524 if (copy_from_user(entries, user_msrs->entries, size))
1525 goto out_free;
1526
1527 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
1528 if (r < 0)
1529 goto out_free;
1530
1531 r = -EFAULT;
1532 if (writeback && copy_to_user(user_msrs->entries, entries, size))
1533 goto out_free;
1534
1535 r = n;
1536
1537out_free:
1538 vfree(entries);
1539out:
1540 return r;
1541}
1542
Zhang Xiantao018d00d2007-11-15 23:07:47 +08001543int kvm_dev_ioctl_check_extension(long ext)
1544{
1545 int r;
1546
1547 switch (ext) {
1548 case KVM_CAP_IRQCHIP:
1549 case KVM_CAP_HLT:
1550 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
Zhang Xiantao018d00d2007-11-15 23:07:47 +08001551 case KVM_CAP_SET_TSS_ADDR:
Dan Kenigsberg07716712007-11-21 17:10:04 +02001552 case KVM_CAP_EXT_CPUID:
Gerd Hoffmannc8076602009-02-04 17:52:04 +01001553 case KVM_CAP_CLOCKSOURCE:
Sheng Yang78376992008-01-28 05:10:22 +08001554 case KVM_CAP_PIT:
Marcelo Tosattia28e4f52008-02-22 12:21:36 -05001555 case KVM_CAP_NOP_IO_DELAY:
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001556 case KVM_CAP_MP_STATE:
Avi Kivityed848622008-07-29 11:30:57 +03001557 case KVM_CAP_SYNC_MMU:
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02001558 case KVM_CAP_REINJECT_CONTROL:
Gleb Natapov49256632009-02-04 17:28:14 +02001559 case KVM_CAP_IRQ_INJECT_STATUS:
Sheng Yange56d5322009-03-12 21:45:39 +08001560 case KVM_CAP_ASSIGN_DEV_IRQ:
Gregory Haskins721eecbf2009-05-20 10:30:49 -04001561 case KVM_CAP_IRQFD:
Gregory Haskinsd34e6b12009-07-07 17:08:49 -04001562 case KVM_CAP_IOEVENTFD:
Jan Kiszkac5ff41c2009-05-14 22:42:53 +02001563 case KVM_CAP_PIT2:
Beth Kone9f42752009-07-07 11:50:38 -04001564 case KVM_CAP_PIT_STATE2:
Sheng Yangb927a3c2009-07-21 10:42:48 +08001565 case KVM_CAP_SET_IDENTITY_MAP_ADDR:
Ed Swierkffde22a2009-10-15 15:21:43 -07001566 case KVM_CAP_XEN_HVM:
Glauber Costaafbcf7a2009-10-16 15:28:36 -04001567 case KVM_CAP_ADJUST_CLOCK:
Jan Kiszka3cfc3092009-11-12 01:04:25 +01001568 case KVM_CAP_VCPU_EVENTS:
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001569 case KVM_CAP_HYPERV:
Gleb Natapov10388a02010-01-17 15:51:23 +02001570 case KVM_CAP_HYPERV_VAPIC:
Gleb Natapovc25bc162010-01-17 15:51:24 +02001571 case KVM_CAP_HYPERV_SPIN:
Zhai, Edwinab9f4ec2010-01-29 14:38:44 +08001572 case KVM_CAP_PCI_SEGMENT:
Zhang Xiantao018d00d2007-11-15 23:07:47 +08001573 r = 1;
1574 break;
Laurent Vivier542472b2008-05-30 16:05:55 +02001575 case KVM_CAP_COALESCED_MMIO:
1576 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1577 break;
Avi Kivity774ead32007-12-26 13:57:04 +02001578 case KVM_CAP_VAPIC:
1579 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1580 break;
Avi Kivityf7252302008-02-20 11:53:16 +02001581 case KVM_CAP_NR_VCPUS:
1582 r = KVM_MAX_VCPUS;
1583 break;
Avi Kivitya988b912008-02-20 11:59:20 +02001584 case KVM_CAP_NR_MEMSLOTS:
1585 r = KVM_MEMORY_SLOTS;
1586 break;
Marcelo Tosattia68a6a72009-10-01 19:28:39 -03001587 case KVM_CAP_PV_MMU: /* obsolete */
1588 r = 0;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05001589 break;
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +03001590 case KVM_CAP_IOMMU:
Joerg Roedel19de40a2008-12-03 14:43:34 +01001591 r = iommu_found();
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +03001592 break;
Huang Ying890ca9a2009-05-11 16:48:15 +08001593 case KVM_CAP_MCE:
1594 r = KVM_MAX_MCE_BANKS;
1595 break;
Zhang Xiantao018d00d2007-11-15 23:07:47 +08001596 default:
1597 r = 0;
1598 break;
1599 }
1600 return r;
1601
1602}
1603
Carsten Otte043405e2007-10-10 17:16:19 +02001604long kvm_arch_dev_ioctl(struct file *filp,
1605 unsigned int ioctl, unsigned long arg)
1606{
1607 void __user *argp = (void __user *)arg;
1608 long r;
1609
1610 switch (ioctl) {
1611 case KVM_GET_MSR_INDEX_LIST: {
1612 struct kvm_msr_list __user *user_msr_list = argp;
1613 struct kvm_msr_list msr_list;
1614 unsigned n;
1615
1616 r = -EFAULT;
1617 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1618 goto out;
1619 n = msr_list.nmsrs;
1620 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1621 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1622 goto out;
1623 r = -E2BIG;
Jan Kiszkae125e7b2009-07-02 21:45:47 +02001624 if (n < msr_list.nmsrs)
Carsten Otte043405e2007-10-10 17:16:19 +02001625 goto out;
1626 r = -EFAULT;
1627 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1628 num_msrs_to_save * sizeof(u32)))
1629 goto out;
Jan Kiszkae125e7b2009-07-02 21:45:47 +02001630 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
Carsten Otte043405e2007-10-10 17:16:19 +02001631 &emulated_msrs,
1632 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1633 goto out;
1634 r = 0;
1635 break;
1636 }
Avi Kivity674eea02008-02-11 18:37:23 +02001637 case KVM_GET_SUPPORTED_CPUID: {
1638 struct kvm_cpuid2 __user *cpuid_arg = argp;
1639 struct kvm_cpuid2 cpuid;
1640
1641 r = -EFAULT;
1642 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1643 goto out;
1644 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
Amit Shah19355472009-01-14 16:56:00 +00001645 cpuid_arg->entries);
Avi Kivity674eea02008-02-11 18:37:23 +02001646 if (r)
1647 goto out;
1648
1649 r = -EFAULT;
1650 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1651 goto out;
1652 r = 0;
1653 break;
1654 }
Huang Ying890ca9a2009-05-11 16:48:15 +08001655 case KVM_X86_GET_MCE_CAP_SUPPORTED: {
1656 u64 mce_cap;
1657
1658 mce_cap = KVM_MCE_CAP_SUPPORTED;
1659 r = -EFAULT;
1660 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
1661 goto out;
1662 r = 0;
1663 break;
1664 }
Carsten Otte043405e2007-10-10 17:16:19 +02001665 default:
1666 r = -EINVAL;
1667 }
1668out:
1669 return r;
1670}
1671
Carsten Otte313a3dc2007-10-11 19:16:52 +02001672void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1673{
1674 kvm_x86_ops->vcpu_load(vcpu, cpu);
Zachary Amsden6b7d7e72009-10-09 16:26:08 -10001675 if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) {
1676 unsigned long khz = cpufreq_quick_get(cpu);
1677 if (!khz)
1678 khz = tsc_khz;
1679 per_cpu(cpu_tsc_khz, cpu) = khz;
1680 }
Gerd Hoffmannc8076602009-02-04 17:52:04 +01001681 kvm_request_guest_time_update(vcpu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001682}
1683
1684void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1685{
Amit Shah9327fd12007-11-15 18:38:46 +02001686 kvm_put_guest_fpu(vcpu);
Avi Kivity02daab22009-12-30 12:40:26 +02001687 kvm_x86_ops->vcpu_put(vcpu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001688}
1689
Dan Kenigsberg07716712007-11-21 17:10:04 +02001690static int is_efer_nx(void)
Carsten Otte313a3dc2007-10-11 19:16:52 +02001691{
Avi Kivitye286e862009-05-03 18:50:55 +03001692 unsigned long long efer = 0;
Carsten Otte313a3dc2007-10-11 19:16:52 +02001693
Avi Kivitye286e862009-05-03 18:50:55 +03001694 rdmsrl_safe(MSR_EFER, &efer);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001695 return efer & EFER_NX;
1696}
1697
1698static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1699{
1700 int i;
1701 struct kvm_cpuid_entry2 *e, *entry;
1702
Carsten Otte313a3dc2007-10-11 19:16:52 +02001703 entry = NULL;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001704 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1705 e = &vcpu->arch.cpuid_entries[i];
Carsten Otte313a3dc2007-10-11 19:16:52 +02001706 if (e->function == 0x80000001) {
1707 entry = e;
1708 break;
1709 }
1710 }
Dan Kenigsberg07716712007-11-21 17:10:04 +02001711 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
Carsten Otte313a3dc2007-10-11 19:16:52 +02001712 entry->edx &= ~(1 << 20);
1713 printk(KERN_INFO "kvm: guest NX capability removed\n");
1714 }
1715}
1716
Dan Kenigsberg07716712007-11-21 17:10:04 +02001717/* when an old userspace process fills a new kernel module */
Carsten Otte313a3dc2007-10-11 19:16:52 +02001718static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1719 struct kvm_cpuid *cpuid,
1720 struct kvm_cpuid_entry __user *entries)
1721{
Dan Kenigsberg07716712007-11-21 17:10:04 +02001722 int r, i;
1723 struct kvm_cpuid_entry *cpuid_entries;
1724
1725 r = -E2BIG;
1726 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1727 goto out;
1728 r = -ENOMEM;
1729 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1730 if (!cpuid_entries)
1731 goto out;
1732 r = -EFAULT;
1733 if (copy_from_user(cpuid_entries, entries,
1734 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1735 goto out_free;
1736 for (i = 0; i < cpuid->nent; i++) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001737 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1738 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1739 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1740 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1741 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1742 vcpu->arch.cpuid_entries[i].index = 0;
1743 vcpu->arch.cpuid_entries[i].flags = 0;
1744 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1745 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1746 vcpu->arch.cpuid_entries[i].padding[2] = 0;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001747 }
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001748 vcpu->arch.cpuid_nent = cpuid->nent;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001749 cpuid_fix_nx_cap(vcpu);
1750 r = 0;
Gleb Natapovfc61b802009-07-05 17:39:35 +03001751 kvm_apic_set_version(vcpu);
Sheng Yang0e851882009-12-18 16:48:46 +08001752 kvm_x86_ops->cpuid_update(vcpu);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001753
1754out_free:
1755 vfree(cpuid_entries);
1756out:
1757 return r;
1758}
1759
1760static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
Amit Shah19355472009-01-14 16:56:00 +00001761 struct kvm_cpuid2 *cpuid,
1762 struct kvm_cpuid_entry2 __user *entries)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001763{
Carsten Otte313a3dc2007-10-11 19:16:52 +02001764 int r;
1765
1766 r = -E2BIG;
1767 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1768 goto out;
1769 r = -EFAULT;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001770 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
Dan Kenigsberg07716712007-11-21 17:10:04 +02001771 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
Carsten Otte313a3dc2007-10-11 19:16:52 +02001772 goto out;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001773 vcpu->arch.cpuid_nent = cpuid->nent;
Gleb Natapovfc61b802009-07-05 17:39:35 +03001774 kvm_apic_set_version(vcpu);
Sheng Yang0e851882009-12-18 16:48:46 +08001775 kvm_x86_ops->cpuid_update(vcpu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001776 return 0;
1777
1778out:
1779 return r;
1780}
1781
Dan Kenigsberg07716712007-11-21 17:10:04 +02001782static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
Amit Shah19355472009-01-14 16:56:00 +00001783 struct kvm_cpuid2 *cpuid,
1784 struct kvm_cpuid_entry2 __user *entries)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001785{
1786 int r;
1787
1788 r = -E2BIG;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001789 if (cpuid->nent < vcpu->arch.cpuid_nent)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001790 goto out;
1791 r = -EFAULT;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001792 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
Amit Shah19355472009-01-14 16:56:00 +00001793 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
Dan Kenigsberg07716712007-11-21 17:10:04 +02001794 goto out;
1795 return 0;
1796
1797out:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001798 cpuid->nent = vcpu->arch.cpuid_nent;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001799 return r;
1800}
1801
Dan Kenigsberg07716712007-11-21 17:10:04 +02001802static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
Amit Shah19355472009-01-14 16:56:00 +00001803 u32 index)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001804{
1805 entry->function = function;
1806 entry->index = index;
1807 cpuid_count(entry->function, entry->index,
Amit Shah19355472009-01-14 16:56:00 +00001808 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001809 entry->flags = 0;
1810}
1811
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001812#define F(x) bit(X86_FEATURE_##x)
1813
Dan Kenigsberg07716712007-11-21 17:10:04 +02001814static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1815 u32 index, int *nent, int maxnent)
1816{
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001817 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001818#ifdef CONFIG_X86_64
Sheng Yang17cc3932010-01-05 19:02:27 +08001819 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
1820 ? F(GBPAGES) : 0;
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001821 unsigned f_lm = F(LM);
1822#else
Sheng Yang17cc3932010-01-05 19:02:27 +08001823 unsigned f_gbpages = 0;
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001824 unsigned f_lm = 0;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001825#endif
Sheng Yang4e47c7a2009-12-18 16:48:47 +08001826 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001827
1828 /* cpuid 1.edx */
1829 const u32 kvm_supported_word0_x86_features =
1830 F(FPU) | F(VME) | F(DE) | F(PSE) |
1831 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1832 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
1833 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1834 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
1835 0 /* Reserved, DS, ACPI */ | F(MMX) |
1836 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
1837 0 /* HTT, TM, Reserved, PBE */;
1838 /* cpuid 0x80000001.edx */
1839 const u32 kvm_supported_word1_x86_features =
1840 F(FPU) | F(VME) | F(DE) | F(PSE) |
1841 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1842 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
1843 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1844 F(PAT) | F(PSE36) | 0 /* Reserved */ |
1845 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
Sheng Yang4e47c7a2009-12-18 16:48:47 +08001846 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001847 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
1848 /* cpuid 1.ecx */
1849 const u32 kvm_supported_word4_x86_features =
Avi Kivityd149c732009-05-10 14:41:56 +03001850 F(XMM3) | 0 /* Reserved, DTES64, MONITOR */ |
1851 0 /* DS-CPL, VMX, SMX, EST */ |
1852 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
1853 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
1854 0 /* Reserved, DCA */ | F(XMM4_1) |
Gleb Natapov0105d1a2009-07-05 17:39:36 +03001855 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
Avi Kivityd149c732009-05-10 14:41:56 +03001856 0 /* Reserved, XSAVE, OSXSAVE */;
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001857 /* cpuid 0x80000001.ecx */
Dan Kenigsberg07716712007-11-21 17:10:04 +02001858 const u32 kvm_supported_word6_x86_features =
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001859 F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
1860 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
1861 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
1862 0 /* SKINIT */ | 0 /* WDT */;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001863
Amit Shah19355472009-01-14 16:56:00 +00001864 /* all calls to cpuid_count() should be made on the same cpu */
Dan Kenigsberg07716712007-11-21 17:10:04 +02001865 get_cpu();
1866 do_cpuid_1_ent(entry, function, index);
1867 ++*nent;
1868
1869 switch (function) {
1870 case 0:
1871 entry->eax = min(entry->eax, (u32)0xb);
1872 break;
1873 case 1:
1874 entry->edx &= kvm_supported_word0_x86_features;
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001875 entry->ecx &= kvm_supported_word4_x86_features;
Gleb Natapov0d1de2d92009-07-12 16:10:55 +03001876 /* we support x2apic emulation even if host does not support
1877 * it since we emulate x2apic in software */
1878 entry->ecx |= F(X2APIC);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001879 break;
1880 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1881 * may return different values. This forces us to get_cpu() before
1882 * issuing the first command, and also to emulate this annoying behavior
1883 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
1884 case 2: {
1885 int t, times = entry->eax & 0xff;
1886
1887 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
Nitin A Kamble0fdf8e52008-11-05 15:56:21 -08001888 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001889 for (t = 1; t < times && *nent < maxnent; ++t) {
1890 do_cpuid_1_ent(&entry[t], function, 0);
1891 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1892 ++*nent;
1893 }
1894 break;
1895 }
1896 /* function 4 and 0xb have additional index. */
1897 case 4: {
Harvey Harrison14af3f32008-02-19 10:25:50 -08001898 int i, cache_type;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001899
1900 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1901 /* read more entries until cache_type is zero */
Harvey Harrison14af3f32008-02-19 10:25:50 -08001902 for (i = 1; *nent < maxnent; ++i) {
1903 cache_type = entry[i - 1].eax & 0x1f;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001904 if (!cache_type)
1905 break;
Harvey Harrison14af3f32008-02-19 10:25:50 -08001906 do_cpuid_1_ent(&entry[i], function, i);
1907 entry[i].flags |=
Dan Kenigsberg07716712007-11-21 17:10:04 +02001908 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1909 ++*nent;
1910 }
1911 break;
1912 }
1913 case 0xb: {
Harvey Harrison14af3f32008-02-19 10:25:50 -08001914 int i, level_type;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001915
1916 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1917 /* read more entries until level_type is zero */
Harvey Harrison14af3f32008-02-19 10:25:50 -08001918 for (i = 1; *nent < maxnent; ++i) {
Nitin A Kamble0853d2c2008-11-05 15:37:36 -08001919 level_type = entry[i - 1].ecx & 0xff00;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001920 if (!level_type)
1921 break;
Harvey Harrison14af3f32008-02-19 10:25:50 -08001922 do_cpuid_1_ent(&entry[i], function, i);
1923 entry[i].flags |=
Dan Kenigsberg07716712007-11-21 17:10:04 +02001924 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1925 ++*nent;
1926 }
1927 break;
1928 }
1929 case 0x80000000:
1930 entry->eax = min(entry->eax, 0x8000001a);
1931 break;
1932 case 0x80000001:
1933 entry->edx &= kvm_supported_word1_x86_features;
1934 entry->ecx &= kvm_supported_word6_x86_features;
1935 break;
1936 }
1937 put_cpu();
1938}
1939
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001940#undef F
1941
Avi Kivity674eea02008-02-11 18:37:23 +02001942static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
Amit Shah19355472009-01-14 16:56:00 +00001943 struct kvm_cpuid_entry2 __user *entries)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001944{
1945 struct kvm_cpuid_entry2 *cpuid_entries;
1946 int limit, nent = 0, r = -E2BIG;
1947 u32 func;
1948
1949 if (cpuid->nent < 1)
1950 goto out;
Avi Kivity6a544352009-10-04 16:45:13 +02001951 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1952 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001953 r = -ENOMEM;
1954 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1955 if (!cpuid_entries)
1956 goto out;
1957
1958 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1959 limit = cpuid_entries[0].eax;
1960 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1961 do_cpuid_ent(&cpuid_entries[nent], func, 0,
Amit Shah19355472009-01-14 16:56:00 +00001962 &nent, cpuid->nent);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001963 r = -E2BIG;
1964 if (nent >= cpuid->nent)
1965 goto out_free;
1966
1967 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1968 limit = cpuid_entries[nent - 1].eax;
1969 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1970 do_cpuid_ent(&cpuid_entries[nent], func, 0,
Amit Shah19355472009-01-14 16:56:00 +00001971 &nent, cpuid->nent);
Mark McLoughlincb007642009-05-12 12:36:44 +01001972 r = -E2BIG;
1973 if (nent >= cpuid->nent)
1974 goto out_free;
1975
Dan Kenigsberg07716712007-11-21 17:10:04 +02001976 r = -EFAULT;
1977 if (copy_to_user(entries, cpuid_entries,
Amit Shah19355472009-01-14 16:56:00 +00001978 nent * sizeof(struct kvm_cpuid_entry2)))
Dan Kenigsberg07716712007-11-21 17:10:04 +02001979 goto out_free;
1980 cpuid->nent = nent;
1981 r = 0;
1982
1983out_free:
1984 vfree(cpuid_entries);
1985out:
1986 return r;
1987}
1988
Carsten Otte313a3dc2007-10-11 19:16:52 +02001989static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1990 struct kvm_lapic_state *s)
1991{
1992 vcpu_load(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001993 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001994 vcpu_put(vcpu);
1995
1996 return 0;
1997}
1998
1999static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2000 struct kvm_lapic_state *s)
2001{
2002 vcpu_load(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002003 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002004 kvm_apic_post_state_restore(vcpu);
Gleb Natapovcb142eb2009-08-09 15:17:40 +03002005 update_cr8_intercept(vcpu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002006 vcpu_put(vcpu);
2007
2008 return 0;
2009}
2010
Zhang Xiantaof77bc6a2007-11-21 04:36:41 +08002011static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2012 struct kvm_interrupt *irq)
2013{
2014 if (irq->irq < 0 || irq->irq >= 256)
2015 return -EINVAL;
2016 if (irqchip_in_kernel(vcpu->kvm))
2017 return -ENXIO;
2018 vcpu_load(vcpu);
2019
Gleb Natapov66fd3f72009-05-11 13:35:50 +03002020 kvm_queue_interrupt(vcpu, irq->irq, false);
Zhang Xiantaof77bc6a2007-11-21 04:36:41 +08002021
2022 vcpu_put(vcpu);
2023
2024 return 0;
2025}
2026
Jan Kiszkac4abb7c2008-09-26 09:30:55 +02002027static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
2028{
2029 vcpu_load(vcpu);
2030 kvm_inject_nmi(vcpu);
2031 vcpu_put(vcpu);
2032
2033 return 0;
2034}
2035
Avi Kivityb209749f2007-10-22 16:50:39 +02002036static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
2037 struct kvm_tpr_access_ctl *tac)
2038{
2039 if (tac->flags)
2040 return -EINVAL;
2041 vcpu->arch.tpr_access_reporting = !!tac->enabled;
2042 return 0;
2043}
2044
Huang Ying890ca9a2009-05-11 16:48:15 +08002045static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
2046 u64 mcg_cap)
2047{
2048 int r;
2049 unsigned bank_num = mcg_cap & 0xff, bank;
2050
2051 r = -EINVAL;
Jan Kiszkaa9e38c3e2009-10-23 09:37:00 +02002052 if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
Huang Ying890ca9a2009-05-11 16:48:15 +08002053 goto out;
2054 if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
2055 goto out;
2056 r = 0;
2057 vcpu->arch.mcg_cap = mcg_cap;
2058 /* Init IA32_MCG_CTL to all 1s */
2059 if (mcg_cap & MCG_CTL_P)
2060 vcpu->arch.mcg_ctl = ~(u64)0;
2061 /* Init IA32_MCi_CTL to all 1s */
2062 for (bank = 0; bank < bank_num; bank++)
2063 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
2064out:
2065 return r;
2066}
2067
2068static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
2069 struct kvm_x86_mce *mce)
2070{
2071 u64 mcg_cap = vcpu->arch.mcg_cap;
2072 unsigned bank_num = mcg_cap & 0xff;
2073 u64 *banks = vcpu->arch.mce_banks;
2074
2075 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
2076 return -EINVAL;
2077 /*
2078 * if IA32_MCG_CTL is not all 1s, the uncorrected error
2079 * reporting is disabled
2080 */
2081 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
2082 vcpu->arch.mcg_ctl != ~(u64)0)
2083 return 0;
2084 banks += 4 * mce->bank;
2085 /*
2086 * if IA32_MCi_CTL is not all 1s, the uncorrected error
2087 * reporting is disabled for the bank
2088 */
2089 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
2090 return 0;
2091 if (mce->status & MCI_STATUS_UC) {
2092 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
Avi Kivityfc78f512009-12-07 12:16:48 +02002093 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
Huang Ying890ca9a2009-05-11 16:48:15 +08002094 printk(KERN_DEBUG "kvm: set_mce: "
2095 "injects mce exception while "
2096 "previous one is in progress!\n");
2097 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
2098 return 0;
2099 }
2100 if (banks[1] & MCI_STATUS_VAL)
2101 mce->status |= MCI_STATUS_OVER;
2102 banks[2] = mce->addr;
2103 banks[3] = mce->misc;
2104 vcpu->arch.mcg_status = mce->mcg_status;
2105 banks[1] = mce->status;
2106 kvm_queue_exception(vcpu, MC_VECTOR);
2107 } else if (!(banks[1] & MCI_STATUS_VAL)
2108 || !(banks[1] & MCI_STATUS_UC)) {
2109 if (banks[1] & MCI_STATUS_VAL)
2110 mce->status |= MCI_STATUS_OVER;
2111 banks[2] = mce->addr;
2112 banks[3] = mce->misc;
2113 banks[1] = mce->status;
2114 } else
2115 banks[1] |= MCI_STATUS_OVER;
2116 return 0;
2117}
2118
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002119static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2120 struct kvm_vcpu_events *events)
2121{
2122 vcpu_load(vcpu);
2123
2124 events->exception.injected = vcpu->arch.exception.pending;
2125 events->exception.nr = vcpu->arch.exception.nr;
2126 events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2127 events->exception.error_code = vcpu->arch.exception.error_code;
2128
2129 events->interrupt.injected = vcpu->arch.interrupt.pending;
2130 events->interrupt.nr = vcpu->arch.interrupt.nr;
2131 events->interrupt.soft = vcpu->arch.interrupt.soft;
2132
2133 events->nmi.injected = vcpu->arch.nmi_injected;
2134 events->nmi.pending = vcpu->arch.nmi_pending;
2135 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2136
2137 events->sipi_vector = vcpu->arch.sipi_vector;
2138
Jan Kiszkadab4b912009-12-06 18:24:15 +01002139 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2140 | KVM_VCPUEVENT_VALID_SIPI_VECTOR);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002141
2142 vcpu_put(vcpu);
2143}
2144
2145static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2146 struct kvm_vcpu_events *events)
2147{
Jan Kiszkadab4b912009-12-06 18:24:15 +01002148 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
2149 | KVM_VCPUEVENT_VALID_SIPI_VECTOR))
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002150 return -EINVAL;
2151
2152 vcpu_load(vcpu);
2153
2154 vcpu->arch.exception.pending = events->exception.injected;
2155 vcpu->arch.exception.nr = events->exception.nr;
2156 vcpu->arch.exception.has_error_code = events->exception.has_error_code;
2157 vcpu->arch.exception.error_code = events->exception.error_code;
2158
2159 vcpu->arch.interrupt.pending = events->interrupt.injected;
2160 vcpu->arch.interrupt.nr = events->interrupt.nr;
2161 vcpu->arch.interrupt.soft = events->interrupt.soft;
2162 if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
2163 kvm_pic_clear_isr_ack(vcpu->kvm);
2164
2165 vcpu->arch.nmi_injected = events->nmi.injected;
Jan Kiszkadab4b912009-12-06 18:24:15 +01002166 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
2167 vcpu->arch.nmi_pending = events->nmi.pending;
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002168 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
2169
Jan Kiszkadab4b912009-12-06 18:24:15 +01002170 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
2171 vcpu->arch.sipi_vector = events->sipi_vector;
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002172
2173 vcpu_put(vcpu);
2174
2175 return 0;
2176}
2177
Carsten Otte313a3dc2007-10-11 19:16:52 +02002178long kvm_arch_vcpu_ioctl(struct file *filp,
2179 unsigned int ioctl, unsigned long arg)
2180{
2181 struct kvm_vcpu *vcpu = filp->private_data;
2182 void __user *argp = (void __user *)arg;
2183 int r;
Dave Hansenb772ff32008-08-11 10:01:47 -07002184 struct kvm_lapic_state *lapic = NULL;
Carsten Otte313a3dc2007-10-11 19:16:52 +02002185
2186 switch (ioctl) {
2187 case KVM_GET_LAPIC: {
Marcelo Tosatti2204ae32009-10-29 13:44:16 -02002188 r = -EINVAL;
2189 if (!vcpu->arch.apic)
2190 goto out;
Dave Hansenb772ff32008-08-11 10:01:47 -07002191 lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002192
Dave Hansenb772ff32008-08-11 10:01:47 -07002193 r = -ENOMEM;
2194 if (!lapic)
2195 goto out;
2196 r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002197 if (r)
2198 goto out;
2199 r = -EFAULT;
Dave Hansenb772ff32008-08-11 10:01:47 -07002200 if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
Carsten Otte313a3dc2007-10-11 19:16:52 +02002201 goto out;
2202 r = 0;
2203 break;
2204 }
2205 case KVM_SET_LAPIC: {
Marcelo Tosatti2204ae32009-10-29 13:44:16 -02002206 r = -EINVAL;
2207 if (!vcpu->arch.apic)
2208 goto out;
Dave Hansenb772ff32008-08-11 10:01:47 -07002209 lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
2210 r = -ENOMEM;
2211 if (!lapic)
Carsten Otte313a3dc2007-10-11 19:16:52 +02002212 goto out;
Dave Hansenb772ff32008-08-11 10:01:47 -07002213 r = -EFAULT;
2214 if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
2215 goto out;
2216 r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002217 if (r)
2218 goto out;
2219 r = 0;
2220 break;
2221 }
Zhang Xiantaof77bc6a2007-11-21 04:36:41 +08002222 case KVM_INTERRUPT: {
2223 struct kvm_interrupt irq;
2224
2225 r = -EFAULT;
2226 if (copy_from_user(&irq, argp, sizeof irq))
2227 goto out;
2228 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2229 if (r)
2230 goto out;
2231 r = 0;
2232 break;
2233 }
Jan Kiszkac4abb7c2008-09-26 09:30:55 +02002234 case KVM_NMI: {
2235 r = kvm_vcpu_ioctl_nmi(vcpu);
2236 if (r)
2237 goto out;
2238 r = 0;
2239 break;
2240 }
Carsten Otte313a3dc2007-10-11 19:16:52 +02002241 case KVM_SET_CPUID: {
2242 struct kvm_cpuid __user *cpuid_arg = argp;
2243 struct kvm_cpuid cpuid;
2244
2245 r = -EFAULT;
2246 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2247 goto out;
2248 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
2249 if (r)
2250 goto out;
2251 break;
2252 }
Dan Kenigsberg07716712007-11-21 17:10:04 +02002253 case KVM_SET_CPUID2: {
2254 struct kvm_cpuid2 __user *cpuid_arg = argp;
2255 struct kvm_cpuid2 cpuid;
2256
2257 r = -EFAULT;
2258 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2259 goto out;
2260 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
Amit Shah19355472009-01-14 16:56:00 +00002261 cpuid_arg->entries);
Dan Kenigsberg07716712007-11-21 17:10:04 +02002262 if (r)
2263 goto out;
2264 break;
2265 }
2266 case KVM_GET_CPUID2: {
2267 struct kvm_cpuid2 __user *cpuid_arg = argp;
2268 struct kvm_cpuid2 cpuid;
2269
2270 r = -EFAULT;
2271 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2272 goto out;
2273 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
Amit Shah19355472009-01-14 16:56:00 +00002274 cpuid_arg->entries);
Dan Kenigsberg07716712007-11-21 17:10:04 +02002275 if (r)
2276 goto out;
2277 r = -EFAULT;
2278 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2279 goto out;
2280 r = 0;
2281 break;
2282 }
Carsten Otte313a3dc2007-10-11 19:16:52 +02002283 case KVM_GET_MSRS:
2284 r = msr_io(vcpu, argp, kvm_get_msr, 1);
2285 break;
2286 case KVM_SET_MSRS:
2287 r = msr_io(vcpu, argp, do_set_msr, 0);
2288 break;
Avi Kivityb209749f2007-10-22 16:50:39 +02002289 case KVM_TPR_ACCESS_REPORTING: {
2290 struct kvm_tpr_access_ctl tac;
2291
2292 r = -EFAULT;
2293 if (copy_from_user(&tac, argp, sizeof tac))
2294 goto out;
2295 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
2296 if (r)
2297 goto out;
2298 r = -EFAULT;
2299 if (copy_to_user(argp, &tac, sizeof tac))
2300 goto out;
2301 r = 0;
2302 break;
2303 };
Avi Kivityb93463a2007-10-25 16:52:32 +02002304 case KVM_SET_VAPIC_ADDR: {
2305 struct kvm_vapic_addr va;
2306
2307 r = -EINVAL;
2308 if (!irqchip_in_kernel(vcpu->kvm))
2309 goto out;
2310 r = -EFAULT;
2311 if (copy_from_user(&va, argp, sizeof va))
2312 goto out;
2313 r = 0;
2314 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
2315 break;
2316 }
Huang Ying890ca9a2009-05-11 16:48:15 +08002317 case KVM_X86_SETUP_MCE: {
2318 u64 mcg_cap;
2319
2320 r = -EFAULT;
2321 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
2322 goto out;
2323 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
2324 break;
2325 }
2326 case KVM_X86_SET_MCE: {
2327 struct kvm_x86_mce mce;
2328
2329 r = -EFAULT;
2330 if (copy_from_user(&mce, argp, sizeof mce))
2331 goto out;
2332 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
2333 break;
2334 }
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002335 case KVM_GET_VCPU_EVENTS: {
2336 struct kvm_vcpu_events events;
2337
2338 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
2339
2340 r = -EFAULT;
2341 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
2342 break;
2343 r = 0;
2344 break;
2345 }
2346 case KVM_SET_VCPU_EVENTS: {
2347 struct kvm_vcpu_events events;
2348
2349 r = -EFAULT;
2350 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
2351 break;
2352
2353 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
2354 break;
2355 }
Carsten Otte313a3dc2007-10-11 19:16:52 +02002356 default:
2357 r = -EINVAL;
2358 }
2359out:
Wei Yongjun7a6ce842009-03-31 16:47:44 +08002360 kfree(lapic);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002361 return r;
2362}
2363
Carsten Otte1fe779f2007-10-29 16:08:35 +01002364static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
2365{
2366 int ret;
2367
2368 if (addr > (unsigned int)(-3 * PAGE_SIZE))
2369 return -1;
2370 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
2371 return ret;
2372}
2373
Sheng Yangb927a3c2009-07-21 10:42:48 +08002374static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
2375 u64 ident_addr)
2376{
2377 kvm->arch.ept_identity_map_addr = ident_addr;
2378 return 0;
2379}
2380
Carsten Otte1fe779f2007-10-29 16:08:35 +01002381static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
2382 u32 kvm_nr_mmu_pages)
2383{
2384 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
2385 return -EINVAL;
2386
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002387 mutex_lock(&kvm->slots_lock);
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03002388 spin_lock(&kvm->mmu_lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002389
2390 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08002391 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002392
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03002393 spin_unlock(&kvm->mmu_lock);
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002394 mutex_unlock(&kvm->slots_lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002395 return 0;
2396}
2397
2398static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
2399{
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08002400 return kvm->arch.n_alloc_mmu_pages;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002401}
2402
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002403gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
2404{
2405 int i;
2406 struct kvm_mem_alias *alias;
2407 struct kvm_mem_aliases *aliases;
2408
2409 aliases = rcu_dereference(kvm->arch.aliases);
2410
2411 for (i = 0; i < aliases->naliases; ++i) {
2412 alias = &aliases->aliases[i];
2413 if (alias->flags & KVM_ALIAS_INVALID)
2414 continue;
2415 if (gfn >= alias->base_gfn
2416 && gfn < alias->base_gfn + alias->npages)
2417 return alias->target_gfn + gfn - alias->base_gfn;
2418 }
2419 return gfn;
2420}
2421
Zhang Xiantaoe9f85cd2007-11-22 11:20:33 +08002422gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
2423{
2424 int i;
2425 struct kvm_mem_alias *alias;
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002426 struct kvm_mem_aliases *aliases;
2427
2428 aliases = rcu_dereference(kvm->arch.aliases);
Zhang Xiantaoe9f85cd2007-11-22 11:20:33 +08002429
Marcelo Tosattifef9cce2009-12-23 14:35:17 -02002430 for (i = 0; i < aliases->naliases; ++i) {
2431 alias = &aliases->aliases[i];
Zhang Xiantaoe9f85cd2007-11-22 11:20:33 +08002432 if (gfn >= alias->base_gfn
2433 && gfn < alias->base_gfn + alias->npages)
2434 return alias->target_gfn + gfn - alias->base_gfn;
2435 }
2436 return gfn;
2437}
2438
Carsten Otte1fe779f2007-10-29 16:08:35 +01002439/*
2440 * Set a new alias region. Aliases map a portion of physical memory into
2441 * another portion. This is useful for memory windows, for example the PC
2442 * VGA region.
2443 */
2444static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
2445 struct kvm_memory_alias *alias)
2446{
2447 int r, n;
2448 struct kvm_mem_alias *p;
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002449 struct kvm_mem_aliases *aliases, *old_aliases;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002450
2451 r = -EINVAL;
2452 /* General sanity checks */
2453 if (alias->memory_size & (PAGE_SIZE - 1))
2454 goto out;
2455 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
2456 goto out;
2457 if (alias->slot >= KVM_ALIAS_SLOTS)
2458 goto out;
2459 if (alias->guest_phys_addr + alias->memory_size
2460 < alias->guest_phys_addr)
2461 goto out;
2462 if (alias->target_phys_addr + alias->memory_size
2463 < alias->target_phys_addr)
2464 goto out;
2465
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002466 r = -ENOMEM;
2467 aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
2468 if (!aliases)
2469 goto out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002470
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002471 mutex_lock(&kvm->slots_lock);
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002472
2473 /* invalidate any gfn reference in case of deletion/shrinking */
2474 memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
2475 aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID;
2476 old_aliases = kvm->arch.aliases;
2477 rcu_assign_pointer(kvm->arch.aliases, aliases);
2478 synchronize_srcu_expedited(&kvm->srcu);
2479 kvm_mmu_zap_all(kvm);
2480 kfree(old_aliases);
2481
2482 r = -ENOMEM;
2483 aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
2484 if (!aliases)
2485 goto out_unlock;
2486
2487 memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
Marcelo Tosattifef9cce2009-12-23 14:35:17 -02002488
2489 p = &aliases->aliases[alias->slot];
Carsten Otte1fe779f2007-10-29 16:08:35 +01002490 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
2491 p->npages = alias->memory_size >> PAGE_SHIFT;
2492 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002493 p->flags &= ~(KVM_ALIAS_INVALID);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002494
2495 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
Marcelo Tosattifef9cce2009-12-23 14:35:17 -02002496 if (aliases->aliases[n - 1].npages)
Carsten Otte1fe779f2007-10-29 16:08:35 +01002497 break;
Marcelo Tosattifef9cce2009-12-23 14:35:17 -02002498 aliases->naliases = n;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002499
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002500 old_aliases = kvm->arch.aliases;
2501 rcu_assign_pointer(kvm->arch.aliases, aliases);
2502 synchronize_srcu_expedited(&kvm->srcu);
2503 kfree(old_aliases);
2504 r = 0;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002505
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002506out_unlock:
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002507 mutex_unlock(&kvm->slots_lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002508out:
2509 return r;
2510}
2511
2512static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2513{
2514 int r;
2515
2516 r = 0;
2517 switch (chip->chip_id) {
2518 case KVM_IRQCHIP_PIC_MASTER:
2519 memcpy(&chip->chip.pic,
2520 &pic_irqchip(kvm)->pics[0],
2521 sizeof(struct kvm_pic_state));
2522 break;
2523 case KVM_IRQCHIP_PIC_SLAVE:
2524 memcpy(&chip->chip.pic,
2525 &pic_irqchip(kvm)->pics[1],
2526 sizeof(struct kvm_pic_state));
2527 break;
2528 case KVM_IRQCHIP_IOAPIC:
Gleb Natapoveba02262009-08-24 11:54:25 +03002529 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002530 break;
2531 default:
2532 r = -EINVAL;
2533 break;
2534 }
2535 return r;
2536}
2537
2538static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2539{
2540 int r;
2541
2542 r = 0;
2543 switch (chip->chip_id) {
2544 case KVM_IRQCHIP_PIC_MASTER:
Thomas Gleixnerfa8273e2010-02-17 14:00:41 +00002545 raw_spin_lock(&pic_irqchip(kvm)->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002546 memcpy(&pic_irqchip(kvm)->pics[0],
2547 &chip->chip.pic,
2548 sizeof(struct kvm_pic_state));
Thomas Gleixnerfa8273e2010-02-17 14:00:41 +00002549 raw_spin_unlock(&pic_irqchip(kvm)->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002550 break;
2551 case KVM_IRQCHIP_PIC_SLAVE:
Thomas Gleixnerfa8273e2010-02-17 14:00:41 +00002552 raw_spin_lock(&pic_irqchip(kvm)->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002553 memcpy(&pic_irqchip(kvm)->pics[1],
2554 &chip->chip.pic,
2555 sizeof(struct kvm_pic_state));
Thomas Gleixnerfa8273e2010-02-17 14:00:41 +00002556 raw_spin_unlock(&pic_irqchip(kvm)->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002557 break;
2558 case KVM_IRQCHIP_IOAPIC:
Gleb Natapoveba02262009-08-24 11:54:25 +03002559 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002560 break;
2561 default:
2562 r = -EINVAL;
2563 break;
2564 }
2565 kvm_pic_update_irq(pic_irqchip(kvm));
2566 return r;
2567}
2568
Sheng Yange0f63cb2008-03-04 00:50:59 +08002569static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2570{
2571 int r = 0;
2572
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002573 mutex_lock(&kvm->arch.vpit->pit_state.lock);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002574 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002575 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002576 return r;
2577}
2578
2579static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2580{
2581 int r = 0;
2582
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002583 mutex_lock(&kvm->arch.vpit->pit_state.lock);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002584 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
Beth Kone9f42752009-07-07 11:50:38 -04002585 kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
2586 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2587 return r;
2588}
2589
2590static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2591{
2592 int r = 0;
2593
2594 mutex_lock(&kvm->arch.vpit->pit_state.lock);
2595 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
2596 sizeof(ps->channels));
2597 ps->flags = kvm->arch.vpit->pit_state.flags;
2598 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2599 return r;
2600}
2601
2602static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2603{
2604 int r = 0, start = 0;
2605 u32 prev_legacy, cur_legacy;
2606 mutex_lock(&kvm->arch.vpit->pit_state.lock);
2607 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
2608 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
2609 if (!prev_legacy && cur_legacy)
2610 start = 1;
2611 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
2612 sizeof(kvm->arch.vpit->pit_state.channels));
2613 kvm->arch.vpit->pit_state.flags = ps->flags;
2614 kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002615 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002616 return r;
2617}
2618
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02002619static int kvm_vm_ioctl_reinject(struct kvm *kvm,
2620 struct kvm_reinject_control *control)
2621{
2622 if (!kvm->arch.vpit)
2623 return -ENXIO;
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002624 mutex_lock(&kvm->arch.vpit->pit_state.lock);
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02002625 kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002626 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02002627 return 0;
2628}
2629
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002630/*
2631 * Get (and clear) the dirty memory log for a memory slot.
2632 */
2633int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2634 struct kvm_dirty_log *log)
2635{
Marcelo Tosattib050b012009-12-23 14:35:22 -02002636 int r, n, i;
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002637 struct kvm_memory_slot *memslot;
Marcelo Tosattib050b012009-12-23 14:35:22 -02002638 unsigned long is_dirty = 0;
2639 unsigned long *dirty_bitmap = NULL;
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002640
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002641 mutex_lock(&kvm->slots_lock);
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002642
Marcelo Tosattib050b012009-12-23 14:35:22 -02002643 r = -EINVAL;
2644 if (log->slot >= KVM_MEMORY_SLOTS)
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002645 goto out;
2646
Marcelo Tosattib050b012009-12-23 14:35:22 -02002647 memslot = &kvm->memslots->memslots[log->slot];
2648 r = -ENOENT;
2649 if (!memslot->dirty_bitmap)
2650 goto out;
2651
2652 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
2653
2654 r = -ENOMEM;
2655 dirty_bitmap = vmalloc(n);
2656 if (!dirty_bitmap)
2657 goto out;
2658 memset(dirty_bitmap, 0, n);
2659
2660 for (i = 0; !is_dirty && i < n/sizeof(long); i++)
2661 is_dirty = memslot->dirty_bitmap[i];
2662
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002663 /* If nothing is dirty, don't bother messing with page tables. */
2664 if (is_dirty) {
Marcelo Tosattib050b012009-12-23 14:35:22 -02002665 struct kvm_memslots *slots, *old_slots;
2666
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03002667 spin_lock(&kvm->mmu_lock);
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002668 kvm_mmu_slot_remove_write_access(kvm, log->slot);
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03002669 spin_unlock(&kvm->mmu_lock);
Marcelo Tosattib050b012009-12-23 14:35:22 -02002670
2671 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
2672 if (!slots)
2673 goto out_free;
2674
2675 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
2676 slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
2677
2678 old_slots = kvm->memslots;
2679 rcu_assign_pointer(kvm->memslots, slots);
2680 synchronize_srcu_expedited(&kvm->srcu);
2681 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
2682 kfree(old_slots);
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002683 }
Marcelo Tosattib050b012009-12-23 14:35:22 -02002684
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002685 r = 0;
Marcelo Tosattib050b012009-12-23 14:35:22 -02002686 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
2687 r = -EFAULT;
2688out_free:
2689 vfree(dirty_bitmap);
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002690out:
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002691 mutex_unlock(&kvm->slots_lock);
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002692 return r;
2693}
2694
Carsten Otte1fe779f2007-10-29 16:08:35 +01002695long kvm_arch_vm_ioctl(struct file *filp,
2696 unsigned int ioctl, unsigned long arg)
2697{
2698 struct kvm *kvm = filp->private_data;
2699 void __user *argp = (void __user *)arg;
Avi Kivity367e1312009-08-26 14:57:07 +03002700 int r = -ENOTTY;
Dave Hansenf0d66272008-08-11 10:01:45 -07002701 /*
2702 * This union makes it completely explicit to gcc-3.x
2703 * that these two variables' stack usage should be
2704 * combined, not added together.
2705 */
2706 union {
2707 struct kvm_pit_state ps;
Beth Kone9f42752009-07-07 11:50:38 -04002708 struct kvm_pit_state2 ps2;
Dave Hansenf0d66272008-08-11 10:01:45 -07002709 struct kvm_memory_alias alias;
Jan Kiszkac5ff41c2009-05-14 22:42:53 +02002710 struct kvm_pit_config pit_config;
Dave Hansenf0d66272008-08-11 10:01:45 -07002711 } u;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002712
2713 switch (ioctl) {
2714 case KVM_SET_TSS_ADDR:
2715 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
2716 if (r < 0)
2717 goto out;
2718 break;
Sheng Yangb927a3c2009-07-21 10:42:48 +08002719 case KVM_SET_IDENTITY_MAP_ADDR: {
2720 u64 ident_addr;
2721
2722 r = -EFAULT;
2723 if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
2724 goto out;
2725 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
2726 if (r < 0)
2727 goto out;
2728 break;
2729 }
Carsten Otte1fe779f2007-10-29 16:08:35 +01002730 case KVM_SET_MEMORY_REGION: {
2731 struct kvm_memory_region kvm_mem;
2732 struct kvm_userspace_memory_region kvm_userspace_mem;
2733
2734 r = -EFAULT;
2735 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
2736 goto out;
2737 kvm_userspace_mem.slot = kvm_mem.slot;
2738 kvm_userspace_mem.flags = kvm_mem.flags;
2739 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
2740 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
2741 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
2742 if (r)
2743 goto out;
2744 break;
2745 }
2746 case KVM_SET_NR_MMU_PAGES:
2747 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
2748 if (r)
2749 goto out;
2750 break;
2751 case KVM_GET_NR_MMU_PAGES:
2752 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
2753 break;
Dave Hansenf0d66272008-08-11 10:01:45 -07002754 case KVM_SET_MEMORY_ALIAS:
Carsten Otte1fe779f2007-10-29 16:08:35 +01002755 r = -EFAULT;
Dave Hansenf0d66272008-08-11 10:01:45 -07002756 if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
Carsten Otte1fe779f2007-10-29 16:08:35 +01002757 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07002758 r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002759 if (r)
2760 goto out;
2761 break;
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002762 case KVM_CREATE_IRQCHIP: {
2763 struct kvm_pic *vpic;
2764
2765 mutex_lock(&kvm->lock);
2766 r = -EEXIST;
2767 if (kvm->arch.vpic)
2768 goto create_irqchip_unlock;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002769 r = -ENOMEM;
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002770 vpic = kvm_create_pic(kvm);
2771 if (vpic) {
Carsten Otte1fe779f2007-10-29 16:08:35 +01002772 r = kvm_ioapic_init(kvm);
2773 if (r) {
Wei Yongjun72bb2fc2010-02-09 10:33:03 +08002774 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
2775 &vpic->dev);
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002776 kfree(vpic);
2777 goto create_irqchip_unlock;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002778 }
2779 } else
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002780 goto create_irqchip_unlock;
2781 smp_wmb();
2782 kvm->arch.vpic = vpic;
2783 smp_wmb();
Avi Kivity399ec802008-11-19 13:58:46 +02002784 r = kvm_setup_default_irq_routing(kvm);
2785 if (r) {
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002786 mutex_lock(&kvm->irq_lock);
Wei Yongjun72bb2fc2010-02-09 10:33:03 +08002787 kvm_ioapic_destroy(kvm);
2788 kvm_destroy_pic(kvm);
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002789 mutex_unlock(&kvm->irq_lock);
Avi Kivity399ec802008-11-19 13:58:46 +02002790 }
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002791 create_irqchip_unlock:
2792 mutex_unlock(&kvm->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002793 break;
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002794 }
Sheng Yang78376992008-01-28 05:10:22 +08002795 case KVM_CREATE_PIT:
Jan Kiszkac5ff41c2009-05-14 22:42:53 +02002796 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
2797 goto create_pit;
2798 case KVM_CREATE_PIT2:
2799 r = -EFAULT;
2800 if (copy_from_user(&u.pit_config, argp,
2801 sizeof(struct kvm_pit_config)))
2802 goto out;
2803 create_pit:
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002804 mutex_lock(&kvm->slots_lock);
Avi Kivity269e05e2009-01-05 15:21:42 +02002805 r = -EEXIST;
2806 if (kvm->arch.vpit)
2807 goto create_pit_unlock;
Sheng Yang78376992008-01-28 05:10:22 +08002808 r = -ENOMEM;
Jan Kiszkac5ff41c2009-05-14 22:42:53 +02002809 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
Sheng Yang78376992008-01-28 05:10:22 +08002810 if (kvm->arch.vpit)
2811 r = 0;
Avi Kivity269e05e2009-01-05 15:21:42 +02002812 create_pit_unlock:
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002813 mutex_unlock(&kvm->slots_lock);
Sheng Yang78376992008-01-28 05:10:22 +08002814 break;
Gleb Natapov49256632009-02-04 17:28:14 +02002815 case KVM_IRQ_LINE_STATUS:
Carsten Otte1fe779f2007-10-29 16:08:35 +01002816 case KVM_IRQ_LINE: {
2817 struct kvm_irq_level irq_event;
2818
2819 r = -EFAULT;
2820 if (copy_from_user(&irq_event, argp, sizeof irq_event))
2821 goto out;
2822 if (irqchip_in_kernel(kvm)) {
Gleb Natapov49256632009-02-04 17:28:14 +02002823 __s32 status;
Gleb Natapov49256632009-02-04 17:28:14 +02002824 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2825 irq_event.irq, irq_event.level);
Gleb Natapov49256632009-02-04 17:28:14 +02002826 if (ioctl == KVM_IRQ_LINE_STATUS) {
2827 irq_event.status = status;
2828 if (copy_to_user(argp, &irq_event,
2829 sizeof irq_event))
2830 goto out;
2831 }
Carsten Otte1fe779f2007-10-29 16:08:35 +01002832 r = 0;
2833 }
2834 break;
2835 }
2836 case KVM_GET_IRQCHIP: {
2837 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
Dave Hansenf0d66272008-08-11 10:01:45 -07002838 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002839
Dave Hansenf0d66272008-08-11 10:01:45 -07002840 r = -ENOMEM;
2841 if (!chip)
Carsten Otte1fe779f2007-10-29 16:08:35 +01002842 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07002843 r = -EFAULT;
2844 if (copy_from_user(chip, argp, sizeof *chip))
2845 goto get_irqchip_out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002846 r = -ENXIO;
2847 if (!irqchip_in_kernel(kvm))
Dave Hansenf0d66272008-08-11 10:01:45 -07002848 goto get_irqchip_out;
2849 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
2850 if (r)
2851 goto get_irqchip_out;
2852 r = -EFAULT;
2853 if (copy_to_user(argp, chip, sizeof *chip))
2854 goto get_irqchip_out;
2855 r = 0;
2856 get_irqchip_out:
2857 kfree(chip);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002858 if (r)
2859 goto out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002860 break;
2861 }
2862 case KVM_SET_IRQCHIP: {
2863 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
Dave Hansenf0d66272008-08-11 10:01:45 -07002864 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002865
Dave Hansenf0d66272008-08-11 10:01:45 -07002866 r = -ENOMEM;
2867 if (!chip)
Carsten Otte1fe779f2007-10-29 16:08:35 +01002868 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07002869 r = -EFAULT;
2870 if (copy_from_user(chip, argp, sizeof *chip))
2871 goto set_irqchip_out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002872 r = -ENXIO;
2873 if (!irqchip_in_kernel(kvm))
Dave Hansenf0d66272008-08-11 10:01:45 -07002874 goto set_irqchip_out;
2875 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
2876 if (r)
2877 goto set_irqchip_out;
2878 r = 0;
2879 set_irqchip_out:
2880 kfree(chip);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002881 if (r)
2882 goto out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002883 break;
2884 }
Sheng Yange0f63cb2008-03-04 00:50:59 +08002885 case KVM_GET_PIT: {
Sheng Yange0f63cb2008-03-04 00:50:59 +08002886 r = -EFAULT;
Dave Hansenf0d66272008-08-11 10:01:45 -07002887 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
Sheng Yange0f63cb2008-03-04 00:50:59 +08002888 goto out;
2889 r = -ENXIO;
2890 if (!kvm->arch.vpit)
2891 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07002892 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002893 if (r)
2894 goto out;
2895 r = -EFAULT;
Dave Hansenf0d66272008-08-11 10:01:45 -07002896 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
Sheng Yange0f63cb2008-03-04 00:50:59 +08002897 goto out;
2898 r = 0;
2899 break;
2900 }
2901 case KVM_SET_PIT: {
Sheng Yange0f63cb2008-03-04 00:50:59 +08002902 r = -EFAULT;
Dave Hansenf0d66272008-08-11 10:01:45 -07002903 if (copy_from_user(&u.ps, argp, sizeof u.ps))
Sheng Yange0f63cb2008-03-04 00:50:59 +08002904 goto out;
2905 r = -ENXIO;
2906 if (!kvm->arch.vpit)
2907 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07002908 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002909 if (r)
2910 goto out;
2911 r = 0;
2912 break;
2913 }
Beth Kone9f42752009-07-07 11:50:38 -04002914 case KVM_GET_PIT2: {
2915 r = -ENXIO;
2916 if (!kvm->arch.vpit)
2917 goto out;
2918 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
2919 if (r)
2920 goto out;
2921 r = -EFAULT;
2922 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
2923 goto out;
2924 r = 0;
2925 break;
2926 }
2927 case KVM_SET_PIT2: {
2928 r = -EFAULT;
2929 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
2930 goto out;
2931 r = -ENXIO;
2932 if (!kvm->arch.vpit)
2933 goto out;
2934 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
2935 if (r)
2936 goto out;
2937 r = 0;
2938 break;
2939 }
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02002940 case KVM_REINJECT_CONTROL: {
2941 struct kvm_reinject_control control;
2942 r = -EFAULT;
2943 if (copy_from_user(&control, argp, sizeof(control)))
2944 goto out;
2945 r = kvm_vm_ioctl_reinject(kvm, &control);
2946 if (r)
2947 goto out;
2948 r = 0;
2949 break;
2950 }
Ed Swierkffde22a2009-10-15 15:21:43 -07002951 case KVM_XEN_HVM_CONFIG: {
2952 r = -EFAULT;
2953 if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
2954 sizeof(struct kvm_xen_hvm_config)))
2955 goto out;
2956 r = -EINVAL;
2957 if (kvm->arch.xen_hvm_config.flags)
2958 goto out;
2959 r = 0;
2960 break;
2961 }
Glauber Costaafbcf7a2009-10-16 15:28:36 -04002962 case KVM_SET_CLOCK: {
2963 struct timespec now;
2964 struct kvm_clock_data user_ns;
2965 u64 now_ns;
2966 s64 delta;
2967
2968 r = -EFAULT;
2969 if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
2970 goto out;
2971
2972 r = -EINVAL;
2973 if (user_ns.flags)
2974 goto out;
2975
2976 r = 0;
2977 ktime_get_ts(&now);
2978 now_ns = timespec_to_ns(&now);
2979 delta = user_ns.clock - now_ns;
2980 kvm->arch.kvmclock_offset = delta;
2981 break;
2982 }
2983 case KVM_GET_CLOCK: {
2984 struct timespec now;
2985 struct kvm_clock_data user_ns;
2986 u64 now_ns;
2987
2988 ktime_get_ts(&now);
2989 now_ns = timespec_to_ns(&now);
2990 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
2991 user_ns.flags = 0;
2992
2993 r = -EFAULT;
2994 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
2995 goto out;
2996 r = 0;
2997 break;
2998 }
2999
Carsten Otte1fe779f2007-10-29 16:08:35 +01003000 default:
3001 ;
3002 }
3003out:
3004 return r;
3005}
3006
Zhang Xiantaoa16b0432007-11-16 14:38:21 +08003007static void kvm_init_msr_list(void)
Carsten Otte043405e2007-10-10 17:16:19 +02003008{
3009 u32 dummy[2];
3010 unsigned i, j;
3011
Glauber Costae3267cb2009-10-06 13:24:50 -04003012 /* skip the first msrs in the list. KVM-specific */
3013 for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
Carsten Otte043405e2007-10-10 17:16:19 +02003014 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
3015 continue;
3016 if (j < i)
3017 msrs_to_save[j] = msrs_to_save[i];
3018 j++;
3019 }
3020 num_msrs_to_save = j;
3021}
3022
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003023static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
3024 const void *v)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003025{
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003026 if (vcpu->arch.apic &&
3027 !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
3028 return 0;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003029
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003030 return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003031}
3032
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003033static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003034{
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003035 if (vcpu->arch.apic &&
3036 !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
3037 return 0;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003038
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003039 return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003040}
3041
Gleb Natapov1871c602010-02-10 14:21:32 +02003042gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3043{
3044 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3045 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3046}
3047
3048 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3049{
3050 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3051 access |= PFERR_FETCH_MASK;
3052 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3053}
3054
3055gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3056{
3057 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3058 access |= PFERR_WRITE_MASK;
3059 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3060}
3061
3062/* uses this to access any guest's mapped memory without checking CPL */
3063gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3064{
3065 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
3066}
3067
3068static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
3069 struct kvm_vcpu *vcpu, u32 access,
3070 u32 *error)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003071{
3072 void *data = val;
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003073 int r = X86EMUL_CONTINUE;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003074
3075 while (bytes) {
Gleb Natapov1871c602010-02-10 14:21:32 +02003076 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003077 unsigned offset = addr & (PAGE_SIZE-1);
Izik Eidus77c20022008-12-29 01:42:19 +02003078 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003079 int ret;
3080
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003081 if (gpa == UNMAPPED_GVA) {
3082 r = X86EMUL_PROPAGATE_FAULT;
3083 goto out;
3084 }
Izik Eidus77c20022008-12-29 01:42:19 +02003085 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003086 if (ret < 0) {
3087 r = X86EMUL_UNHANDLEABLE;
3088 goto out;
3089 }
Carsten Ottebbd9b642007-10-30 18:44:21 +01003090
Izik Eidus77c20022008-12-29 01:42:19 +02003091 bytes -= toread;
3092 data += toread;
3093 addr += toread;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003094 }
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003095out:
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003096 return r;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003097}
Izik Eidus77c20022008-12-29 01:42:19 +02003098
Gleb Natapov1871c602010-02-10 14:21:32 +02003099/* used for instruction fetching */
3100static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
3101 struct kvm_vcpu *vcpu, u32 *error)
3102{
3103 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3104 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
3105 access | PFERR_FETCH_MASK, error);
3106}
3107
3108static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
3109 struct kvm_vcpu *vcpu, u32 *error)
3110{
3111 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3112 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
3113 error);
3114}
3115
3116static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
3117 struct kvm_vcpu *vcpu, u32 *error)
3118{
3119 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
3120}
3121
Hannes Edercded19f2009-02-21 02:19:13 +01003122static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
Gleb Natapov1871c602010-02-10 14:21:32 +02003123 struct kvm_vcpu *vcpu, u32 *error)
Izik Eidus77c20022008-12-29 01:42:19 +02003124{
3125 void *data = val;
3126 int r = X86EMUL_CONTINUE;
3127
3128 while (bytes) {
Gleb Natapov1871c602010-02-10 14:21:32 +02003129 gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error);
Izik Eidus77c20022008-12-29 01:42:19 +02003130 unsigned offset = addr & (PAGE_SIZE-1);
3131 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
3132 int ret;
3133
3134 if (gpa == UNMAPPED_GVA) {
3135 r = X86EMUL_PROPAGATE_FAULT;
3136 goto out;
3137 }
3138 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
3139 if (ret < 0) {
3140 r = X86EMUL_UNHANDLEABLE;
3141 goto out;
3142 }
3143
3144 bytes -= towrite;
3145 data += towrite;
3146 addr += towrite;
3147 }
3148out:
3149 return r;
3150}
3151
Carsten Ottebbd9b642007-10-30 18:44:21 +01003152
Carsten Ottebbd9b642007-10-30 18:44:21 +01003153static int emulator_read_emulated(unsigned long addr,
3154 void *val,
3155 unsigned int bytes,
3156 struct kvm_vcpu *vcpu)
3157{
Carsten Ottebbd9b642007-10-30 18:44:21 +01003158 gpa_t gpa;
Gleb Natapov1871c602010-02-10 14:21:32 +02003159 u32 error_code;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003160
3161 if (vcpu->mmio_read_completed) {
3162 memcpy(val, vcpu->mmio_data, bytes);
Avi Kivityaec51dc2009-07-01 16:01:02 +03003163 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
3164 vcpu->mmio_phys_addr, *(u64 *)val);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003165 vcpu->mmio_read_completed = 0;
3166 return X86EMUL_CONTINUE;
3167 }
3168
Gleb Natapov1871c602010-02-10 14:21:32 +02003169 gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code);
3170
3171 if (gpa == UNMAPPED_GVA) {
3172 kvm_inject_page_fault(vcpu, addr, error_code);
3173 return X86EMUL_PROPAGATE_FAULT;
3174 }
Carsten Ottebbd9b642007-10-30 18:44:21 +01003175
3176 /* For APIC access vmexit */
3177 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3178 goto mmio;
3179
Gleb Natapov1871c602010-02-10 14:21:32 +02003180 if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
Izik Eidus77c20022008-12-29 01:42:19 +02003181 == X86EMUL_CONTINUE)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003182 return X86EMUL_CONTINUE;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003183
3184mmio:
3185 /*
3186 * Is this MMIO handled locally?
3187 */
Avi Kivityaec51dc2009-07-01 16:01:02 +03003188 if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) {
3189 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003190 return X86EMUL_CONTINUE;
3191 }
Avi Kivityaec51dc2009-07-01 16:01:02 +03003192
3193 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003194
3195 vcpu->mmio_needed = 1;
3196 vcpu->mmio_phys_addr = gpa;
3197 vcpu->mmio_size = bytes;
3198 vcpu->mmio_is_write = 0;
3199
3200 return X86EMUL_UNHANDLEABLE;
3201}
3202
Marcelo Tosatti3200f402008-03-29 20:17:59 -03003203int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
Avi Kivity9f811282008-03-02 14:06:05 +02003204 const void *val, int bytes)
3205{
3206 int ret;
3207
3208 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
3209 if (ret < 0)
3210 return 0;
Marcelo Tosattiad218f82008-12-01 22:32:05 -02003211 kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
Avi Kivity9f811282008-03-02 14:06:05 +02003212 return 1;
3213}
3214
Carsten Ottebbd9b642007-10-30 18:44:21 +01003215static int emulator_write_emulated_onepage(unsigned long addr,
3216 const void *val,
3217 unsigned int bytes,
3218 struct kvm_vcpu *vcpu)
3219{
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003220 gpa_t gpa;
Gleb Natapov1871c602010-02-10 14:21:32 +02003221 u32 error_code;
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003222
Gleb Natapov1871c602010-02-10 14:21:32 +02003223 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003224
3225 if (gpa == UNMAPPED_GVA) {
Gleb Natapov1871c602010-02-10 14:21:32 +02003226 kvm_inject_page_fault(vcpu, addr, error_code);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003227 return X86EMUL_PROPAGATE_FAULT;
3228 }
3229
3230 /* For APIC access vmexit */
3231 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3232 goto mmio;
3233
3234 if (emulator_write_phys(vcpu, gpa, val, bytes))
3235 return X86EMUL_CONTINUE;
3236
3237mmio:
Avi Kivityaec51dc2009-07-01 16:01:02 +03003238 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003239 /*
3240 * Is this MMIO handled locally?
3241 */
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003242 if (!vcpu_mmio_write(vcpu, gpa, bytes, val))
Carsten Ottebbd9b642007-10-30 18:44:21 +01003243 return X86EMUL_CONTINUE;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003244
3245 vcpu->mmio_needed = 1;
3246 vcpu->mmio_phys_addr = gpa;
3247 vcpu->mmio_size = bytes;
3248 vcpu->mmio_is_write = 1;
3249 memcpy(vcpu->mmio_data, val, bytes);
3250
3251 return X86EMUL_CONTINUE;
3252}
3253
3254int emulator_write_emulated(unsigned long addr,
3255 const void *val,
3256 unsigned int bytes,
3257 struct kvm_vcpu *vcpu)
3258{
3259 /* Crossing a page boundary? */
3260 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
3261 int rc, now;
3262
3263 now = -addr & ~PAGE_MASK;
3264 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
3265 if (rc != X86EMUL_CONTINUE)
3266 return rc;
3267 addr += now;
3268 val += now;
3269 bytes -= now;
3270 }
3271 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
3272}
3273EXPORT_SYMBOL_GPL(emulator_write_emulated);
3274
3275static int emulator_cmpxchg_emulated(unsigned long addr,
3276 const void *old,
3277 const void *new,
3278 unsigned int bytes,
3279 struct kvm_vcpu *vcpu)
3280{
Marcin Slusarz9f51e242009-08-09 21:54:00 +02003281 printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003282#ifndef CONFIG_X86_64
3283 /* guests cmpxchg8b have to be emulated atomically */
3284 if (bytes == 8) {
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003285 gpa_t gpa;
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003286 struct page *page;
Andrew Mortonc0b49b02008-02-04 22:27:18 -08003287 char *kaddr;
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003288 u64 val;
3289
Gleb Natapov1871c602010-02-10 14:21:32 +02003290 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003291
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003292 if (gpa == UNMAPPED_GVA ||
3293 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3294 goto emul_write;
3295
3296 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
3297 goto emul_write;
3298
3299 val = *(u64 *)new;
Izik Eidus72dc67a2008-02-10 18:04:15 +02003300
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003301 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
Izik Eidus72dc67a2008-02-10 18:04:15 +02003302
Andrew Mortonc0b49b02008-02-04 22:27:18 -08003303 kaddr = kmap_atomic(page, KM_USER0);
3304 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
3305 kunmap_atomic(kaddr, KM_USER0);
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003306 kvm_release_page_dirty(page);
3307 }
Marcelo Tosatti3200f402008-03-29 20:17:59 -03003308emul_write:
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003309#endif
3310
Carsten Ottebbd9b642007-10-30 18:44:21 +01003311 return emulator_write_emulated(addr, new, bytes, vcpu);
3312}
3313
3314static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
3315{
3316 return kvm_x86_ops->get_segment_base(vcpu, seg);
3317}
3318
3319int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
3320{
Marcelo Tosattia7052892008-09-23 13:18:35 -03003321 kvm_mmu_invlpg(vcpu, address);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003322 return X86EMUL_CONTINUE;
3323}
3324
3325int emulate_clts(struct kvm_vcpu *vcpu)
3326{
Avi Kivity4d4ec082009-12-29 18:07:30 +02003327 kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
Avi Kivity6b52d182010-01-21 15:31:47 +02003328 kvm_x86_ops->fpu_activate(vcpu);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003329 return X86EMUL_CONTINUE;
3330}
3331
3332int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
3333{
Jan Kiszkac76de352010-01-20 18:20:20 +01003334 return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003335}
3336
3337int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
3338{
3339 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003340
Jan Kiszkac76de352010-01-20 18:20:20 +01003341 return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003342}
3343
3344void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
3345{
Carsten Ottebbd9b642007-10-30 18:44:21 +01003346 u8 opcodes[4];
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003347 unsigned long rip = kvm_rip_read(vcpu);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003348 unsigned long rip_linear;
3349
Avi Kivityf76c7102008-06-13 22:45:42 +03003350 if (!printk_ratelimit())
Carsten Ottebbd9b642007-10-30 18:44:21 +01003351 return;
3352
Glauber Costa25be4602008-06-10 10:46:53 -03003353 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
3354
Gleb Natapov1871c602010-02-10 14:21:32 +02003355 kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu, NULL);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003356
3357 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
3358 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003359}
3360EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
3361
Harvey Harrison14af3f32008-02-19 10:25:50 -08003362static struct x86_emulate_ops emulate_ops = {
Gleb Natapov1871c602010-02-10 14:21:32 +02003363 .read_std = kvm_read_guest_virt_system,
3364 .fetch = kvm_fetch_guest_virt,
Carsten Ottebbd9b642007-10-30 18:44:21 +01003365 .read_emulated = emulator_read_emulated,
3366 .write_emulated = emulator_write_emulated,
3367 .cmpxchg_emulated = emulator_cmpxchg_emulated,
3368};
3369
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003370static void cache_all_regs(struct kvm_vcpu *vcpu)
3371{
3372 kvm_register_read(vcpu, VCPU_REGS_RAX);
3373 kvm_register_read(vcpu, VCPU_REGS_RSP);
3374 kvm_register_read(vcpu, VCPU_REGS_RIP);
3375 vcpu->arch.regs_dirty = ~0;
3376}
3377
Carsten Ottebbd9b642007-10-30 18:44:21 +01003378int emulate_instruction(struct kvm_vcpu *vcpu,
Carsten Ottebbd9b642007-10-30 18:44:21 +01003379 unsigned long cr2,
3380 u16 error_code,
Sheng Yang571008d2008-01-02 14:49:22 +08003381 int emulation_type)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003382{
Glauber Costa310b5d32009-05-12 16:21:06 -04003383 int r, shadow_mask;
Sheng Yang571008d2008-01-02 14:49:22 +08003384 struct decode_cache *c;
Avi Kivity851ba692009-08-24 11:10:17 +03003385 struct kvm_run *run = vcpu->run;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003386
Avi Kivity26eef702008-07-03 14:59:22 +03003387 kvm_clear_exception_queue(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003388 vcpu->arch.mmio_fault_cr2 = cr2;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003389 /*
Avi Kivity56e82312009-08-12 15:04:37 +03003390 * TODO: fix emulate.c to use guest_read/write_register
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003391 * instead of direct ->regs accesses, can save hundred cycles
3392 * on Intel for instructions that don't read/change RSP, for
3393 * for example.
3394 */
3395 cache_all_regs(vcpu);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003396
3397 vcpu->mmio_is_write = 0;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003398 vcpu->arch.pio.string = 0;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003399
Sheng Yang571008d2008-01-02 14:49:22 +08003400 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
Carsten Ottebbd9b642007-10-30 18:44:21 +01003401 int cs_db, cs_l;
3402 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
3403
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003404 vcpu->arch.emulate_ctxt.vcpu = vcpu;
Jan Kiszka91586a32009-10-05 13:07:21 +02003405 vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003406 vcpu->arch.emulate_ctxt.mode =
Gleb Natapova0044752010-02-10 14:21:31 +02003407 (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003408 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
Gleb Natapova0044752010-02-10 14:21:31 +02003409 ? X86EMUL_MODE_VM86 : cs_l
Carsten Ottebbd9b642007-10-30 18:44:21 +01003410 ? X86EMUL_MODE_PROT64 : cs_db
3411 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
3412
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003413 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
Sheng Yang571008d2008-01-02 14:49:22 +08003414
Andre Przywara0cb57622009-06-17 15:50:31 +02003415 /* Only allow emulation of specific instructions on #UD
3416 * (namely VMMCALL, sysenter, sysexit, syscall)*/
Sheng Yang571008d2008-01-02 14:49:22 +08003417 c = &vcpu->arch.emulate_ctxt.decode;
Andre Przywara0cb57622009-06-17 15:50:31 +02003418 if (emulation_type & EMULTYPE_TRAP_UD) {
3419 if (!c->twobyte)
3420 return EMULATE_FAIL;
3421 switch (c->b) {
3422 case 0x01: /* VMMCALL */
3423 if (c->modrm_mod != 3 || c->modrm_rm != 1)
3424 return EMULATE_FAIL;
3425 break;
3426 case 0x34: /* sysenter */
3427 case 0x35: /* sysexit */
3428 if (c->modrm_mod != 0 || c->modrm_rm != 0)
3429 return EMULATE_FAIL;
3430 break;
3431 case 0x05: /* syscall */
3432 if (c->modrm_mod != 0 || c->modrm_rm != 0)
3433 return EMULATE_FAIL;
3434 break;
3435 default:
3436 return EMULATE_FAIL;
3437 }
3438
3439 if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
3440 return EMULATE_FAIL;
3441 }
Sheng Yang571008d2008-01-02 14:49:22 +08003442
Avi Kivityf2b57562007-11-18 15:17:51 +02003443 ++vcpu->stat.insn_emulation;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003444 if (r) {
Avi Kivityf2b57562007-11-18 15:17:51 +02003445 ++vcpu->stat.insn_emulation_fail;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003446 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
3447 return EMULATE_DONE;
3448 return EMULATE_FAIL;
3449 }
3450 }
3451
Gleb Natapovba8afb62009-04-12 13:36:57 +03003452 if (emulation_type & EMULTYPE_SKIP) {
3453 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
3454 return EMULATE_DONE;
3455 }
3456
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003457 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
Glauber Costa310b5d32009-05-12 16:21:06 -04003458 shadow_mask = vcpu->arch.emulate_ctxt.interruptibility;
3459
3460 if (r == 0)
3461 kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003462
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003463 if (vcpu->arch.pio.string)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003464 return EMULATE_DO_MMIO;
3465
3466 if ((r || vcpu->mmio_is_write) && run) {
3467 run->exit_reason = KVM_EXIT_MMIO;
3468 run->mmio.phys_addr = vcpu->mmio_phys_addr;
3469 memcpy(run->mmio.data, vcpu->mmio_data, 8);
3470 run->mmio.len = vcpu->mmio_size;
3471 run->mmio.is_write = vcpu->mmio_is_write;
3472 }
3473
3474 if (r) {
3475 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
3476 return EMULATE_DONE;
3477 if (!vcpu->mmio_needed) {
3478 kvm_report_emulation_failure(vcpu, "mmio");
3479 return EMULATE_FAIL;
3480 }
3481 return EMULATE_DO_MMIO;
3482 }
3483
Jan Kiszka91586a32009-10-05 13:07:21 +02003484 kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003485
3486 if (vcpu->mmio_is_write) {
3487 vcpu->mmio_needed = 0;
3488 return EMULATE_DO_MMIO;
3489 }
3490
3491 return EMULATE_DONE;
3492}
3493EXPORT_SYMBOL_GPL(emulate_instruction);
3494
Carsten Ottede7d7892007-10-30 18:44:25 +01003495static int pio_copy_data(struct kvm_vcpu *vcpu)
3496{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003497 void *p = vcpu->arch.pio_data;
Izik Eidus0f346072008-12-29 01:42:20 +02003498 gva_t q = vcpu->arch.pio.guest_gva;
Carsten Ottede7d7892007-10-30 18:44:25 +01003499 unsigned bytes;
Izik Eidus0f346072008-12-29 01:42:20 +02003500 int ret;
Gleb Natapov1871c602010-02-10 14:21:32 +02003501 u32 error_code;
Carsten Ottede7d7892007-10-30 18:44:25 +01003502
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003503 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
3504 if (vcpu->arch.pio.in)
Gleb Natapov1871c602010-02-10 14:21:32 +02003505 ret = kvm_write_guest_virt(q, p, bytes, vcpu, &error_code);
Carsten Ottede7d7892007-10-30 18:44:25 +01003506 else
Gleb Natapov1871c602010-02-10 14:21:32 +02003507 ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code);
3508
3509 if (ret == X86EMUL_PROPAGATE_FAULT)
3510 kvm_inject_page_fault(vcpu, q, error_code);
3511
Izik Eidus0f346072008-12-29 01:42:20 +02003512 return ret;
Carsten Ottede7d7892007-10-30 18:44:25 +01003513}
3514
3515int complete_pio(struct kvm_vcpu *vcpu)
3516{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003517 struct kvm_pio_request *io = &vcpu->arch.pio;
Carsten Ottede7d7892007-10-30 18:44:25 +01003518 long delta;
3519 int r;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003520 unsigned long val;
Carsten Ottede7d7892007-10-30 18:44:25 +01003521
3522 if (!io->string) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003523 if (io->in) {
3524 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
3525 memcpy(&val, vcpu->arch.pio_data, io->size);
3526 kvm_register_write(vcpu, VCPU_REGS_RAX, val);
3527 }
Carsten Ottede7d7892007-10-30 18:44:25 +01003528 } else {
3529 if (io->in) {
3530 r = pio_copy_data(vcpu);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003531 if (r)
Gleb Natapov1871c602010-02-10 14:21:32 +02003532 goto out;
Carsten Ottede7d7892007-10-30 18:44:25 +01003533 }
3534
3535 delta = 1;
3536 if (io->rep) {
3537 delta *= io->cur_count;
3538 /*
3539 * The size of the register should really depend on
3540 * current address size.
3541 */
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003542 val = kvm_register_read(vcpu, VCPU_REGS_RCX);
3543 val -= delta;
3544 kvm_register_write(vcpu, VCPU_REGS_RCX, val);
Carsten Ottede7d7892007-10-30 18:44:25 +01003545 }
3546 if (io->down)
3547 delta = -delta;
3548 delta *= io->size;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003549 if (io->in) {
3550 val = kvm_register_read(vcpu, VCPU_REGS_RDI);
3551 val += delta;
3552 kvm_register_write(vcpu, VCPU_REGS_RDI, val);
3553 } else {
3554 val = kvm_register_read(vcpu, VCPU_REGS_RSI);
3555 val += delta;
3556 kvm_register_write(vcpu, VCPU_REGS_RSI, val);
3557 }
Carsten Ottede7d7892007-10-30 18:44:25 +01003558 }
Gleb Natapov1871c602010-02-10 14:21:32 +02003559out:
Carsten Ottede7d7892007-10-30 18:44:25 +01003560 io->count -= io->cur_count;
3561 io->cur_count = 0;
3562
3563 return 0;
3564}
3565
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003566static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
Carsten Ottede7d7892007-10-30 18:44:25 +01003567{
3568 /* TODO: String I/O for in kernel device */
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003569 int r;
Carsten Ottede7d7892007-10-30 18:44:25 +01003570
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003571 if (vcpu->arch.pio.in)
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003572 r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003573 vcpu->arch.pio.size, pd);
Carsten Ottede7d7892007-10-30 18:44:25 +01003574 else
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003575 r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
3576 vcpu->arch.pio.port, vcpu->arch.pio.size,
3577 pd);
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003578 return r;
Carsten Ottede7d7892007-10-30 18:44:25 +01003579}
3580
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003581static int pio_string_write(struct kvm_vcpu *vcpu)
Carsten Ottede7d7892007-10-30 18:44:25 +01003582{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003583 struct kvm_pio_request *io = &vcpu->arch.pio;
3584 void *pd = vcpu->arch.pio_data;
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003585 int i, r = 0;
Carsten Ottede7d7892007-10-30 18:44:25 +01003586
Carsten Ottede7d7892007-10-30 18:44:25 +01003587 for (i = 0; i < io->cur_count; i++) {
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003588 if (kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003589 io->port, io->size, pd)) {
3590 r = -EOPNOTSUPP;
3591 break;
3592 }
Carsten Ottede7d7892007-10-30 18:44:25 +01003593 pd += io->size;
3594 }
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003595 return r;
Carsten Ottede7d7892007-10-30 18:44:25 +01003596}
3597
Avi Kivity851ba692009-08-24 11:10:17 +03003598int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
Carsten Ottede7d7892007-10-30 18:44:25 +01003599{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003600 unsigned long val;
Carsten Ottede7d7892007-10-30 18:44:25 +01003601
Gleb Natapovf850e2e2010-02-10 14:21:33 +02003602 trace_kvm_pio(!in, port, size, 1);
3603
Carsten Ottede7d7892007-10-30 18:44:25 +01003604 vcpu->run->exit_reason = KVM_EXIT_IO;
3605 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003606 vcpu->run->io.size = vcpu->arch.pio.size = size;
Carsten Ottede7d7892007-10-30 18:44:25 +01003607 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003608 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
3609 vcpu->run->io.port = vcpu->arch.pio.port = port;
3610 vcpu->arch.pio.in = in;
3611 vcpu->arch.pio.string = 0;
3612 vcpu->arch.pio.down = 0;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003613 vcpu->arch.pio.rep = 0;
Carsten Ottede7d7892007-10-30 18:44:25 +01003614
Takuya Yoshikawa1976d2d2010-02-05 17:52:46 +09003615 if (!vcpu->arch.pio.in) {
3616 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
3617 memcpy(vcpu->arch.pio_data, &val, 4);
3618 }
Carsten Ottede7d7892007-10-30 18:44:25 +01003619
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003620 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
Carsten Ottede7d7892007-10-30 18:44:25 +01003621 complete_pio(vcpu);
3622 return 1;
3623 }
3624 return 0;
3625}
3626EXPORT_SYMBOL_GPL(kvm_emulate_pio);
3627
Avi Kivity851ba692009-08-24 11:10:17 +03003628int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
Carsten Ottede7d7892007-10-30 18:44:25 +01003629 int size, unsigned long count, int down,
3630 gva_t address, int rep, unsigned port)
3631{
3632 unsigned now, in_page;
Izik Eidus0f346072008-12-29 01:42:20 +02003633 int ret = 0;
Carsten Ottede7d7892007-10-30 18:44:25 +01003634
Gleb Natapovf850e2e2010-02-10 14:21:33 +02003635 trace_kvm_pio(!in, port, size, count);
3636
Carsten Ottede7d7892007-10-30 18:44:25 +01003637 vcpu->run->exit_reason = KVM_EXIT_IO;
3638 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003639 vcpu->run->io.size = vcpu->arch.pio.size = size;
Carsten Ottede7d7892007-10-30 18:44:25 +01003640 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003641 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
3642 vcpu->run->io.port = vcpu->arch.pio.port = port;
3643 vcpu->arch.pio.in = in;
3644 vcpu->arch.pio.string = 1;
3645 vcpu->arch.pio.down = down;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003646 vcpu->arch.pio.rep = rep;
Carsten Ottede7d7892007-10-30 18:44:25 +01003647
3648 if (!count) {
3649 kvm_x86_ops->skip_emulated_instruction(vcpu);
3650 return 1;
3651 }
3652
3653 if (!down)
3654 in_page = PAGE_SIZE - offset_in_page(address);
3655 else
3656 in_page = offset_in_page(address) + size;
3657 now = min(count, (unsigned long)in_page / size);
Izik Eidus0f346072008-12-29 01:42:20 +02003658 if (!now)
Carsten Ottede7d7892007-10-30 18:44:25 +01003659 now = 1;
Carsten Ottede7d7892007-10-30 18:44:25 +01003660 if (down) {
3661 /*
3662 * String I/O in reverse. Yuck. Kill the guest, fix later.
3663 */
3664 pr_unimpl(vcpu, "guest string pio down\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02003665 kvm_inject_gp(vcpu, 0);
Carsten Ottede7d7892007-10-30 18:44:25 +01003666 return 1;
3667 }
3668 vcpu->run->io.count = now;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003669 vcpu->arch.pio.cur_count = now;
Carsten Ottede7d7892007-10-30 18:44:25 +01003670
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003671 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
Carsten Ottede7d7892007-10-30 18:44:25 +01003672 kvm_x86_ops->skip_emulated_instruction(vcpu);
3673
Izik Eidus0f346072008-12-29 01:42:20 +02003674 vcpu->arch.pio.guest_gva = address;
Carsten Ottede7d7892007-10-30 18:44:25 +01003675
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003676 if (!vcpu->arch.pio.in) {
Carsten Ottede7d7892007-10-30 18:44:25 +01003677 /* string PIO write */
3678 ret = pio_copy_data(vcpu);
Gleb Natapov1871c602010-02-10 14:21:32 +02003679 if (ret == X86EMUL_PROPAGATE_FAULT)
Izik Eidus0f346072008-12-29 01:42:20 +02003680 return 1;
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003681 if (ret == 0 && !pio_string_write(vcpu)) {
Carsten Ottede7d7892007-10-30 18:44:25 +01003682 complete_pio(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003683 if (vcpu->arch.pio.count == 0)
Carsten Ottede7d7892007-10-30 18:44:25 +01003684 ret = 1;
3685 }
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003686 }
3687 /* no string PIO read support yet */
Carsten Ottede7d7892007-10-30 18:44:25 +01003688
3689 return ret;
3690}
3691EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
3692
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003693static void bounce_off(void *info)
3694{
3695 /* nothing */
3696}
3697
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003698static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
3699 void *data)
3700{
3701 struct cpufreq_freqs *freq = data;
3702 struct kvm *kvm;
3703 struct kvm_vcpu *vcpu;
3704 int i, send_ipi = 0;
3705
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003706 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
3707 return 0;
3708 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
3709 return 0;
Zachary Amsden0cca7902009-09-29 11:38:35 -10003710 per_cpu(cpu_tsc_khz, freq->cpu) = freq->new;
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003711
3712 spin_lock(&kvm_lock);
3713 list_for_each_entry(kvm, &vm_list, vm_list) {
Gleb Natapov988a2ca2009-06-09 15:56:29 +03003714 kvm_for_each_vcpu(i, vcpu, kvm) {
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003715 if (vcpu->cpu != freq->cpu)
3716 continue;
3717 if (!kvm_request_guest_time_update(vcpu))
3718 continue;
3719 if (vcpu->cpu != smp_processor_id())
3720 send_ipi++;
3721 }
3722 }
3723 spin_unlock(&kvm_lock);
3724
3725 if (freq->old < freq->new && send_ipi) {
3726 /*
3727 * We upscale the frequency. Must make the guest
3728 * doesn't see old kvmclock values while running with
3729 * the new frequency, otherwise we risk the guest sees
3730 * time go backwards.
3731 *
3732 * In case we update the frequency for another cpu
3733 * (which might be in guest context) send an interrupt
3734 * to kick the cpu out of guest context. Next time
3735 * guest context is entered kvmclock will be updated,
3736 * so the guest will not see stale values.
3737 */
3738 smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
3739 }
3740 return 0;
3741}
3742
3743static struct notifier_block kvmclock_cpufreq_notifier_block = {
3744 .notifier_call = kvmclock_cpufreq_notifier
3745};
3746
Zachary Amsdenb820cc02009-09-29 11:38:34 -10003747static void kvm_timer_init(void)
3748{
3749 int cpu;
3750
Zachary Amsdenb820cc02009-09-29 11:38:34 -10003751 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
Zachary Amsdenb820cc02009-09-29 11:38:34 -10003752 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
3753 CPUFREQ_TRANSITION_NOTIFIER);
Zachary Amsden6b7d7e72009-10-09 16:26:08 -10003754 for_each_online_cpu(cpu) {
3755 unsigned long khz = cpufreq_get(cpu);
3756 if (!khz)
3757 khz = tsc_khz;
3758 per_cpu(cpu_tsc_khz, cpu) = khz;
3759 }
Zachary Amsden0cca7902009-09-29 11:38:35 -10003760 } else {
3761 for_each_possible_cpu(cpu)
3762 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
Zachary Amsdenb820cc02009-09-29 11:38:34 -10003763 }
3764}
3765
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003766int kvm_arch_init(void *opaque)
Carsten Otte043405e2007-10-10 17:16:19 +02003767{
Zachary Amsdenb820cc02009-09-29 11:38:34 -10003768 int r;
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003769 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
3770
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003771 if (kvm_x86_ops) {
3772 printk(KERN_ERR "kvm: already loaded the other module\n");
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003773 r = -EEXIST;
3774 goto out;
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003775 }
3776
3777 if (!ops->cpu_has_kvm_support()) {
3778 printk(KERN_ERR "kvm: no hardware support\n");
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003779 r = -EOPNOTSUPP;
3780 goto out;
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003781 }
3782 if (ops->disabled_by_bios()) {
3783 printk(KERN_ERR "kvm: disabled by bios\n");
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003784 r = -EOPNOTSUPP;
3785 goto out;
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003786 }
3787
Avi Kivity97db56c2008-01-13 13:23:56 +02003788 r = kvm_mmu_module_init();
3789 if (r)
3790 goto out;
3791
3792 kvm_init_msr_list();
3793
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003794 kvm_x86_ops = ops;
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003795 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
Sheng Yang7b523452008-04-25 21:13:50 +08003796 kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
3797 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
Sheng Yang4b12f0d2009-04-27 20:35:42 +08003798 PT_DIRTY_MASK, PT64_NX_MASK, 0);
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003799
Zachary Amsdenb820cc02009-09-29 11:38:34 -10003800 kvm_timer_init();
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003801
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003802 return 0;
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003803
3804out:
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003805 return r;
Carsten Otte043405e2007-10-10 17:16:19 +02003806}
Hollis Blanchard8776e512007-10-31 17:24:24 -05003807
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003808void kvm_arch_exit(void)
3809{
Jan Kiszka888d2562009-04-17 19:24:58 +02003810 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
3811 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
3812 CPUFREQ_TRANSITION_NOTIFIER);
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003813 kvm_x86_ops = NULL;
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003814 kvm_mmu_module_exit();
3815}
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003816
Hollis Blanchard8776e512007-10-31 17:24:24 -05003817int kvm_emulate_halt(struct kvm_vcpu *vcpu)
3818{
3819 ++vcpu->stat.halt_exits;
3820 if (irqchip_in_kernel(vcpu->kvm)) {
Avi Kivitya4535292008-04-13 17:54:35 +03003821 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003822 return 1;
3823 } else {
3824 vcpu->run->exit_reason = KVM_EXIT_HLT;
3825 return 0;
3826 }
3827}
3828EXPORT_SYMBOL_GPL(kvm_emulate_halt);
3829
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05003830static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
3831 unsigned long a1)
3832{
3833 if (is_long_mode(vcpu))
3834 return a0;
3835 else
3836 return a0 | ((gpa_t)a1 << 32);
3837}
3838
Gleb Natapov55cd8e52010-01-17 15:51:22 +02003839int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
3840{
3841 u64 param, ingpa, outgpa, ret;
3842 uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
3843 bool fast, longmode;
3844 int cs_db, cs_l;
3845
3846 /*
3847 * hypercall generates UD from non zero cpl and real mode
3848 * per HYPER-V spec
3849 */
Avi Kivity3eeb3282010-01-21 15:31:48 +02003850 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
Gleb Natapov55cd8e52010-01-17 15:51:22 +02003851 kvm_queue_exception(vcpu, UD_VECTOR);
3852 return 0;
3853 }
3854
3855 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
3856 longmode = is_long_mode(vcpu) && cs_l == 1;
3857
3858 if (!longmode) {
Gleb Natapovccd46932010-01-19 15:06:38 +02003859 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
3860 (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
3861 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
3862 (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
3863 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
3864 (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
Gleb Natapov55cd8e52010-01-17 15:51:22 +02003865 }
3866#ifdef CONFIG_X86_64
3867 else {
3868 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
3869 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
3870 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
3871 }
3872#endif
3873
3874 code = param & 0xffff;
3875 fast = (param >> 16) & 0x1;
3876 rep_cnt = (param >> 32) & 0xfff;
3877 rep_idx = (param >> 48) & 0xfff;
3878
3879 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
3880
Gleb Natapovc25bc162010-01-17 15:51:24 +02003881 switch (code) {
3882 case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
3883 kvm_vcpu_on_spin(vcpu);
3884 break;
3885 default:
3886 res = HV_STATUS_INVALID_HYPERCALL_CODE;
3887 break;
3888 }
Gleb Natapov55cd8e52010-01-17 15:51:22 +02003889
3890 ret = res | (((u64)rep_done & 0xfff) << 32);
3891 if (longmode) {
3892 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
3893 } else {
3894 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
3895 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
3896 }
3897
3898 return 1;
3899}
3900
Hollis Blanchard8776e512007-10-31 17:24:24 -05003901int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
3902{
3903 unsigned long nr, a0, a1, a2, a3, ret;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05003904 int r = 1;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003905
Gleb Natapov55cd8e52010-01-17 15:51:22 +02003906 if (kvm_hv_hypercall_enabled(vcpu->kvm))
3907 return kvm_hv_hypercall(vcpu);
3908
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003909 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
3910 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
3911 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
3912 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
3913 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003914
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003915 trace_kvm_hypercall(nr, a0, a1, a2, a3);
Feng (Eric) Liu2714d1d2008-04-10 15:31:10 -04003916
Hollis Blanchard8776e512007-10-31 17:24:24 -05003917 if (!is_long_mode(vcpu)) {
3918 nr &= 0xFFFFFFFF;
3919 a0 &= 0xFFFFFFFF;
3920 a1 &= 0xFFFFFFFF;
3921 a2 &= 0xFFFFFFFF;
3922 a3 &= 0xFFFFFFFF;
3923 }
3924
Jan Kiszka07708c42009-08-03 18:43:28 +02003925 if (kvm_x86_ops->get_cpl(vcpu) != 0) {
3926 ret = -KVM_EPERM;
3927 goto out;
3928 }
3929
Hollis Blanchard8776e512007-10-31 17:24:24 -05003930 switch (nr) {
Avi Kivityb93463a2007-10-25 16:52:32 +02003931 case KVM_HC_VAPIC_POLL_IRQ:
3932 ret = 0;
3933 break;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05003934 case KVM_HC_MMU_OP:
3935 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
3936 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003937 default:
3938 ret = -KVM_ENOSYS;
3939 break;
3940 }
Jan Kiszka07708c42009-08-03 18:43:28 +02003941out:
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003942 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
Amit Shahf11c3a82008-02-21 01:00:30 +05303943 ++vcpu->stat.hypercalls;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05003944 return r;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003945}
3946EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
3947
3948int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
3949{
3950 char instruction[3];
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003951 unsigned long rip = kvm_rip_read(vcpu);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003952
Hollis Blanchard8776e512007-10-31 17:24:24 -05003953 /*
3954 * Blow out the MMU to ensure that no other VCPU has an active mapping
3955 * to ensure that the updated hypercall appears atomically across all
3956 * VCPUs.
3957 */
3958 kvm_mmu_zap_all(vcpu->kvm);
3959
Hollis Blanchard8776e512007-10-31 17:24:24 -05003960 kvm_x86_ops->patch_hypercall(vcpu, instruction);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003961
Takuya Yoshikawa7edcfac2010-02-01 22:11:52 +09003962 return emulator_write_emulated(rip, instruction, 3, vcpu);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003963}
3964
3965static u64 mk_cr_64(u64 curr_cr, u32 new_val)
3966{
3967 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
3968}
3969
3970void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
3971{
3972 struct descriptor_table dt = { limit, base };
3973
3974 kvm_x86_ops->set_gdt(vcpu, &dt);
3975}
3976
3977void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
3978{
3979 struct descriptor_table dt = { limit, base };
3980
3981 kvm_x86_ops->set_idt(vcpu, &dt);
3982}
3983
3984void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
3985 unsigned long *rflags)
3986{
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02003987 kvm_lmsw(vcpu, msw);
Jan Kiszka91586a32009-10-05 13:07:21 +02003988 *rflags = kvm_get_rflags(vcpu);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003989}
3990
3991unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
3992{
Joerg Roedel54e445c2008-04-30 17:56:02 +02003993 unsigned long value;
3994
Hollis Blanchard8776e512007-10-31 17:24:24 -05003995 switch (cr) {
3996 case 0:
Avi Kivity4d4ec082009-12-29 18:07:30 +02003997 value = kvm_read_cr0(vcpu);
Joerg Roedel54e445c2008-04-30 17:56:02 +02003998 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003999 case 2:
Joerg Roedel54e445c2008-04-30 17:56:02 +02004000 value = vcpu->arch.cr2;
4001 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004002 case 3:
Joerg Roedel54e445c2008-04-30 17:56:02 +02004003 value = vcpu->arch.cr3;
4004 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004005 case 4:
Avi Kivityfc78f512009-12-07 12:16:48 +02004006 value = kvm_read_cr4(vcpu);
Joerg Roedel54e445c2008-04-30 17:56:02 +02004007 break;
Joerg Roedel152ff9b2007-12-06 15:46:52 +01004008 case 8:
Joerg Roedel54e445c2008-04-30 17:56:02 +02004009 value = kvm_get_cr8(vcpu);
4010 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004011 default:
Harvey Harrisonb8688d52008-03-03 12:59:56 -08004012 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004013 return 0;
4014 }
Joerg Roedel54e445c2008-04-30 17:56:02 +02004015
4016 return value;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004017}
4018
4019void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
4020 unsigned long *rflags)
4021{
4022 switch (cr) {
4023 case 0:
Avi Kivity4d4ec082009-12-29 18:07:30 +02004024 kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
Jan Kiszka91586a32009-10-05 13:07:21 +02004025 *rflags = kvm_get_rflags(vcpu);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004026 break;
4027 case 2:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004028 vcpu->arch.cr2 = val;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004029 break;
4030 case 3:
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02004031 kvm_set_cr3(vcpu, val);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004032 break;
4033 case 4:
Avi Kivityfc78f512009-12-07 12:16:48 +02004034 kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
Hollis Blanchard8776e512007-10-31 17:24:24 -05004035 break;
Joerg Roedel152ff9b2007-12-06 15:46:52 +01004036 case 8:
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02004037 kvm_set_cr8(vcpu, val & 0xfUL);
Joerg Roedel152ff9b2007-12-06 15:46:52 +01004038 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004039 default:
Harvey Harrisonb8688d52008-03-03 12:59:56 -08004040 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004041 }
4042}
4043
Dan Kenigsberg07716712007-11-21 17:10:04 +02004044static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
4045{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004046 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
4047 int j, nent = vcpu->arch.cpuid_nent;
Dan Kenigsberg07716712007-11-21 17:10:04 +02004048
4049 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
4050 /* when no next entry is found, the current entry[i] is reselected */
Nitin A Kamble0fdf8e52008-11-05 15:56:21 -08004051 for (j = i + 1; ; j = (j + 1) % nent) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004052 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
Dan Kenigsberg07716712007-11-21 17:10:04 +02004053 if (ej->function == e->function) {
4054 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
4055 return j;
4056 }
4057 }
4058 return 0; /* silence gcc, even though control never reaches here */
4059}
4060
4061/* find an entry with matching function, matching index (if needed), and that
4062 * should be read next (if it's stateful) */
4063static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
4064 u32 function, u32 index)
4065{
4066 if (e->function != function)
4067 return 0;
4068 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
4069 return 0;
4070 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
Amit Shah19355472009-01-14 16:56:00 +00004071 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
Dan Kenigsberg07716712007-11-21 17:10:04 +02004072 return 0;
4073 return 1;
4074}
4075
Alexander Grafd8017472008-11-25 20:17:11 +01004076struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
4077 u32 function, u32 index)
Hollis Blanchard8776e512007-10-31 17:24:24 -05004078{
4079 int i;
Alexander Grafd8017472008-11-25 20:17:11 +01004080 struct kvm_cpuid_entry2 *best = NULL;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004081
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004082 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
Alexander Grafd8017472008-11-25 20:17:11 +01004083 struct kvm_cpuid_entry2 *e;
4084
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004085 e = &vcpu->arch.cpuid_entries[i];
Dan Kenigsberg07716712007-11-21 17:10:04 +02004086 if (is_matching_cpuid_entry(e, function, index)) {
4087 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
4088 move_to_next_stateful_cpuid_entry(vcpu, i);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004089 best = e;
4090 break;
4091 }
4092 /*
4093 * Both basic or both extended?
4094 */
4095 if (((e->function ^ function) & 0x80000000) == 0)
4096 if (!best || e->function > best->function)
4097 best = e;
4098 }
Alexander Grafd8017472008-11-25 20:17:11 +01004099 return best;
4100}
Sheng Yang0e851882009-12-18 16:48:46 +08004101EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
Alexander Grafd8017472008-11-25 20:17:11 +01004102
Dong, Eddie82725b22009-03-30 16:21:08 +08004103int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
4104{
4105 struct kvm_cpuid_entry2 *best;
4106
4107 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
4108 if (best)
4109 return best->eax & 0xff;
4110 return 36;
4111}
4112
Alexander Grafd8017472008-11-25 20:17:11 +01004113void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
4114{
4115 u32 function, index;
4116 struct kvm_cpuid_entry2 *best;
4117
4118 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
4119 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
4120 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
4121 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
4122 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
4123 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
4124 best = kvm_find_cpuid_entry(vcpu, function, index);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004125 if (best) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004126 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
4127 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
4128 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
4129 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004130 }
Hollis Blanchard8776e512007-10-31 17:24:24 -05004131 kvm_x86_ops->skip_emulated_instruction(vcpu);
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004132 trace_kvm_cpuid(function,
4133 kvm_register_read(vcpu, VCPU_REGS_RAX),
4134 kvm_register_read(vcpu, VCPU_REGS_RBX),
4135 kvm_register_read(vcpu, VCPU_REGS_RCX),
4136 kvm_register_read(vcpu, VCPU_REGS_RDX));
Hollis Blanchard8776e512007-10-31 17:24:24 -05004137}
4138EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
Hollis Blanchardd0752062007-10-31 17:24:25 -05004139
4140/*
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004141 * Check if userspace requested an interrupt window, and that the
4142 * interrupt window is open.
4143 *
4144 * No need to exit to userspace if we already have an interrupt queued.
4145 */
Avi Kivity851ba692009-08-24 11:10:17 +03004146static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004147{
Gleb Natapov80618232009-04-21 17:44:56 +03004148 return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
Avi Kivity851ba692009-08-24 11:10:17 +03004149 vcpu->run->request_interrupt_window &&
Gleb Natapov5df56642009-04-21 17:44:59 +03004150 kvm_arch_interrupt_allowed(vcpu));
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004151}
4152
Avi Kivity851ba692009-08-24 11:10:17 +03004153static void post_kvm_run_save(struct kvm_vcpu *vcpu)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004154{
Avi Kivity851ba692009-08-24 11:10:17 +03004155 struct kvm_run *kvm_run = vcpu->run;
4156
Jan Kiszka91586a32009-10-05 13:07:21 +02004157 kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02004158 kvm_run->cr8 = kvm_get_cr8(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004159 kvm_run->apic_base = kvm_get_apic_base(vcpu);
Jan Kiszka45312202008-12-11 16:54:54 +01004160 if (irqchip_in_kernel(vcpu->kvm))
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004161 kvm_run->ready_for_interrupt_injection = 1;
Jan Kiszka45312202008-12-11 16:54:54 +01004162 else
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004163 kvm_run->ready_for_interrupt_injection =
Gleb Natapovfa9726b2009-05-11 13:35:47 +03004164 kvm_arch_interrupt_allowed(vcpu) &&
4165 !kvm_cpu_has_interrupt(vcpu) &&
4166 !kvm_event_needs_reinjection(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004167}
4168
Avi Kivityb93463a2007-10-25 16:52:32 +02004169static void vapic_enter(struct kvm_vcpu *vcpu)
4170{
4171 struct kvm_lapic *apic = vcpu->arch.apic;
4172 struct page *page;
4173
4174 if (!apic || !apic->vapic_addr)
4175 return;
4176
4177 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
Izik Eidus72dc67a2008-02-10 18:04:15 +02004178
4179 vcpu->arch.apic->vapic_page = page;
Avi Kivityb93463a2007-10-25 16:52:32 +02004180}
4181
4182static void vapic_exit(struct kvm_vcpu *vcpu)
4183{
4184 struct kvm_lapic *apic = vcpu->arch.apic;
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004185 int idx;
Avi Kivityb93463a2007-10-25 16:52:32 +02004186
4187 if (!apic || !apic->vapic_addr)
4188 return;
4189
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004190 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivityb93463a2007-10-25 16:52:32 +02004191 kvm_release_page_dirty(apic->vapic_page);
4192 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004193 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivityb93463a2007-10-25 16:52:32 +02004194}
4195
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004196static void update_cr8_intercept(struct kvm_vcpu *vcpu)
4197{
4198 int max_irr, tpr;
4199
4200 if (!kvm_x86_ops->update_cr8_intercept)
4201 return;
4202
Avi Kivity88c808f2009-08-17 22:49:40 +03004203 if (!vcpu->arch.apic)
4204 return;
4205
Gleb Natapov8db3baa2009-05-11 13:35:54 +03004206 if (!vcpu->arch.apic->vapic_addr)
4207 max_irr = kvm_lapic_find_highest_irr(vcpu);
4208 else
4209 max_irr = -1;
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004210
4211 if (max_irr != -1)
4212 max_irr >>= 4;
4213
4214 tpr = kvm_lapic_get_cr8(vcpu);
4215
4216 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
4217}
4218
Avi Kivity851ba692009-08-24 11:10:17 +03004219static void inject_pending_event(struct kvm_vcpu *vcpu)
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004220{
4221 /* try to reinject previous events if any */
Gleb Natapovb59bb7b2009-07-09 15:33:51 +03004222 if (vcpu->arch.exception.pending) {
4223 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
4224 vcpu->arch.exception.has_error_code,
4225 vcpu->arch.exception.error_code);
4226 return;
4227 }
4228
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004229 if (vcpu->arch.nmi_injected) {
4230 kvm_x86_ops->set_nmi(vcpu);
4231 return;
4232 }
4233
4234 if (vcpu->arch.interrupt.pending) {
Gleb Natapov66fd3f72009-05-11 13:35:50 +03004235 kvm_x86_ops->set_irq(vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004236 return;
4237 }
4238
4239 /* try to inject new event if pending */
4240 if (vcpu->arch.nmi_pending) {
4241 if (kvm_x86_ops->nmi_allowed(vcpu)) {
4242 vcpu->arch.nmi_pending = false;
4243 vcpu->arch.nmi_injected = true;
4244 kvm_x86_ops->set_nmi(vcpu);
4245 }
4246 } else if (kvm_cpu_has_interrupt(vcpu)) {
4247 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
Gleb Natapov66fd3f72009-05-11 13:35:50 +03004248 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
4249 false);
4250 kvm_x86_ops->set_irq(vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004251 }
4252 }
4253}
4254
Avi Kivity851ba692009-08-24 11:10:17 +03004255static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004256{
4257 int r;
Gleb Natapov6a8b1d12009-05-11 13:35:51 +03004258 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
Avi Kivity851ba692009-08-24 11:10:17 +03004259 vcpu->run->request_interrupt_window;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004260
Marcelo Tosatti2e53d632008-02-20 14:47:24 -05004261 if (vcpu->requests)
4262 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
4263 kvm_mmu_unload(vcpu);
4264
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004265 r = kvm_mmu_reload(vcpu);
4266 if (unlikely(r))
4267 goto out;
4268
Avi Kivity2f52d582008-01-16 12:49:30 +02004269 if (vcpu->requests) {
4270 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
Marcelo Tosatti2f599712008-05-27 12:10:20 -03004271 __kvm_migrate_timers(vcpu);
Gerd Hoffmannc8076602009-02-04 17:52:04 +01004272 if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
4273 kvm_write_guest_time(vcpu);
Marcelo Tosatti4731d4c2008-09-23 13:18:39 -03004274 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
4275 kvm_mmu_sync_roots(vcpu);
Marcelo Tosattid4acf7e2008-06-06 16:37:35 -03004276 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
4277 kvm_x86_ops->tlb_flush(vcpu);
Avi Kivityb93463a2007-10-25 16:52:32 +02004278 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
4279 &vcpu->requests)) {
Avi Kivity851ba692009-08-24 11:10:17 +03004280 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
Avi Kivityb93463a2007-10-25 16:52:32 +02004281 r = 0;
4282 goto out;
4283 }
Joerg Roedel71c4dfa2008-02-26 16:49:16 +01004284 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
Avi Kivity851ba692009-08-24 11:10:17 +03004285 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
Joerg Roedel71c4dfa2008-02-26 16:49:16 +01004286 r = 0;
4287 goto out;
4288 }
Avi Kivity02daab22009-12-30 12:40:26 +02004289 if (test_and_clear_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests)) {
4290 vcpu->fpu_active = 0;
4291 kvm_x86_ops->fpu_deactivate(vcpu);
4292 }
Avi Kivity2f52d582008-01-16 12:49:30 +02004293 }
Avi Kivityb93463a2007-10-25 16:52:32 +02004294
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004295 preempt_disable();
4296
4297 kvm_x86_ops->prepare_guest_switch(vcpu);
Avi Kivity2608d7a2010-01-21 15:31:45 +02004298 if (vcpu->fpu_active)
4299 kvm_load_guest_fpu(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004300
4301 local_irq_disable();
4302
Marcelo Tosatti32f88402009-05-07 17:55:12 -03004303 clear_bit(KVM_REQ_KICK, &vcpu->requests);
4304 smp_mb__after_clear_bit();
4305
Marcelo Tosattid7690172008-09-08 15:23:48 -03004306 if (vcpu->requests || need_resched() || signal_pending(current)) {
Gleb Natapovc7f0f242009-07-07 15:27:32 +03004307 set_bit(KVM_REQ_KICK, &vcpu->requests);
Avi Kivity6c1428012008-01-15 18:27:32 +02004308 local_irq_enable();
4309 preempt_enable();
4310 r = 1;
4311 goto out;
4312 }
4313
Avi Kivity851ba692009-08-24 11:10:17 +03004314 inject_pending_event(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004315
Gleb Natapov6a8b1d12009-05-11 13:35:51 +03004316 /* enable NMI/IRQ window open exits if needed */
4317 if (vcpu->arch.nmi_pending)
4318 kvm_x86_ops->enable_nmi_window(vcpu);
4319 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
4320 kvm_x86_ops->enable_irq_window(vcpu);
4321
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004322 if (kvm_lapic_enabled(vcpu)) {
Gleb Natapov8db3baa2009-05-11 13:35:54 +03004323 update_cr8_intercept(vcpu);
4324 kvm_lapic_sync_to_vapic(vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004325 }
Avi Kivityb93463a2007-10-25 16:52:32 +02004326
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004327 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Marcelo Tosatti3200f402008-03-29 20:17:59 -03004328
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004329 kvm_guest_enter();
4330
Jan Kiszka42dbaa52008-12-15 13:52:10 +01004331 if (unlikely(vcpu->arch.switch_db_regs)) {
Jan Kiszka42dbaa52008-12-15 13:52:10 +01004332 set_debugreg(0, 7);
4333 set_debugreg(vcpu->arch.eff_db[0], 0);
4334 set_debugreg(vcpu->arch.eff_db[1], 1);
4335 set_debugreg(vcpu->arch.eff_db[2], 2);
4336 set_debugreg(vcpu->arch.eff_db[3], 3);
4337 }
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004338
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004339 trace_kvm_entry(vcpu->vcpu_id);
Avi Kivity851ba692009-08-24 11:10:17 +03004340 kvm_x86_ops->run(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004341
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004342 /*
4343 * If the guest has used debug registers, at least dr7
4344 * will be disabled while returning to the host.
4345 * If we don't have active breakpoints in the host, we don't
4346 * care about the messed up debug address registers. But if
4347 * we have some of them active, restore the old state.
4348 */
Frederic Weisbecker59d8eb52009-11-10 11:03:12 +01004349 if (hw_breakpoint_active())
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004350 hw_breakpoint_restore();
Jan Kiszka42dbaa52008-12-15 13:52:10 +01004351
Marcelo Tosatti32f88402009-05-07 17:55:12 -03004352 set_bit(KVM_REQ_KICK, &vcpu->requests);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004353 local_irq_enable();
4354
4355 ++vcpu->stat.exits;
4356
4357 /*
4358 * We must have an instruction between local_irq_enable() and
4359 * kvm_guest_exit(), so the timer interrupt isn't delayed by
4360 * the interrupt shadow. The stat.exits increment will do nicely.
4361 * But we need to prevent reordering, hence this barrier():
4362 */
4363 barrier();
4364
4365 kvm_guest_exit();
4366
4367 preempt_enable();
4368
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004369 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Marcelo Tosatti3200f402008-03-29 20:17:59 -03004370
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004371 /*
4372 * Profile KVM exit RIPs:
4373 */
4374 if (unlikely(prof_on == KVM_PROFILING)) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004375 unsigned long rip = kvm_rip_read(vcpu);
4376 profile_hit(KVM_PROFILING, (void *)rip);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004377 }
4378
Avi Kivity298101d2007-11-25 13:41:11 +02004379
Avi Kivityb93463a2007-10-25 16:52:32 +02004380 kvm_lapic_sync_from_vapic(vcpu);
4381
Avi Kivity851ba692009-08-24 11:10:17 +03004382 r = kvm_x86_ops->handle_exit(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004383out:
Marcelo Tosattid7690172008-09-08 15:23:48 -03004384 return r;
4385}
4386
Gleb Natapov09cec752009-03-23 15:11:44 +02004387
Avi Kivity851ba692009-08-24 11:10:17 +03004388static int __vcpu_run(struct kvm_vcpu *vcpu)
Marcelo Tosattid7690172008-09-08 15:23:48 -03004389{
4390 int r;
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004391 struct kvm *kvm = vcpu->kvm;
Marcelo Tosattid7690172008-09-08 15:23:48 -03004392
4393 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
Jan Kiszka1b10bf32008-09-30 10:41:06 +02004394 pr_debug("vcpu %d received sipi with vector # %x\n",
4395 vcpu->vcpu_id, vcpu->arch.sipi_vector);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004396 kvm_lapic_reset(vcpu);
Gleb Natapov5f179282008-10-07 15:42:33 +02004397 r = kvm_arch_vcpu_reset(vcpu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004398 if (r)
4399 return r;
4400 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004401 }
4402
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004403 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004404 vapic_enter(vcpu);
4405
4406 r = 1;
4407 while (r > 0) {
Gleb Natapovaf2152f2008-09-22 14:28:53 +03004408 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
Avi Kivity851ba692009-08-24 11:10:17 +03004409 r = vcpu_enter_guest(vcpu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004410 else {
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004411 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004412 kvm_vcpu_block(vcpu);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004413 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004414 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
Gleb Natapov09cec752009-03-23 15:11:44 +02004415 {
4416 switch(vcpu->arch.mp_state) {
4417 case KVM_MP_STATE_HALTED:
Marcelo Tosattid7690172008-09-08 15:23:48 -03004418 vcpu->arch.mp_state =
Gleb Natapov09cec752009-03-23 15:11:44 +02004419 KVM_MP_STATE_RUNNABLE;
4420 case KVM_MP_STATE_RUNNABLE:
4421 break;
4422 case KVM_MP_STATE_SIPI_RECEIVED:
4423 default:
4424 r = -EINTR;
4425 break;
4426 }
4427 }
Marcelo Tosattid7690172008-09-08 15:23:48 -03004428 }
4429
Gleb Natapov09cec752009-03-23 15:11:44 +02004430 if (r <= 0)
4431 break;
4432
4433 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
4434 if (kvm_cpu_has_pending_timer(vcpu))
4435 kvm_inject_pending_timer_irqs(vcpu);
4436
Avi Kivity851ba692009-08-24 11:10:17 +03004437 if (dm_request_for_irq_injection(vcpu)) {
Gleb Natapov09cec752009-03-23 15:11:44 +02004438 r = -EINTR;
Avi Kivity851ba692009-08-24 11:10:17 +03004439 vcpu->run->exit_reason = KVM_EXIT_INTR;
Gleb Natapov09cec752009-03-23 15:11:44 +02004440 ++vcpu->stat.request_irq_exits;
4441 }
4442 if (signal_pending(current)) {
4443 r = -EINTR;
Avi Kivity851ba692009-08-24 11:10:17 +03004444 vcpu->run->exit_reason = KVM_EXIT_INTR;
Gleb Natapov09cec752009-03-23 15:11:44 +02004445 ++vcpu->stat.signal_exits;
4446 }
4447 if (need_resched()) {
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004448 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
Gleb Natapov09cec752009-03-23 15:11:44 +02004449 kvm_resched(vcpu);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004450 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004451 }
4452 }
4453
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004454 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
Avi Kivity851ba692009-08-24 11:10:17 +03004455 post_kvm_run_save(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004456
Avi Kivityb93463a2007-10-25 16:52:32 +02004457 vapic_exit(vcpu);
4458
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004459 return r;
4460}
4461
4462int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4463{
4464 int r;
4465 sigset_t sigsaved;
4466
4467 vcpu_load(vcpu);
4468
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004469 if (vcpu->sigset_active)
4470 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
4471
Avi Kivityac9f6dc2008-07-06 15:48:31 +03004472 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
4473 kvm_vcpu_block(vcpu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004474 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Avi Kivityac9f6dc2008-07-06 15:48:31 +03004475 r = -EAGAIN;
4476 goto out;
4477 }
4478
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004479 /* re-sync apic's tpr */
4480 if (!irqchip_in_kernel(vcpu->kvm))
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02004481 kvm_set_cr8(vcpu, kvm_run->cr8);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004482
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004483 if (vcpu->arch.pio.cur_count) {
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004484 r = complete_pio(vcpu);
4485 if (r)
4486 goto out;
4487 }
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004488 if (vcpu->mmio_needed) {
4489 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
4490 vcpu->mmio_read_completed = 1;
4491 vcpu->mmio_needed = 0;
Marcelo Tosatti3200f402008-03-29 20:17:59 -03004492
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004493 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivity851ba692009-08-24 11:10:17 +03004494 r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0,
Sheng Yang571008d2008-01-02 14:49:22 +08004495 EMULTYPE_NO_DECODE);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004496 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004497 if (r == EMULATE_DO_MMIO) {
4498 /*
4499 * Read-modify-write. Back to userspace.
4500 */
4501 r = 0;
4502 goto out;
4503 }
4504 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004505 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
4506 kvm_register_write(vcpu, VCPU_REGS_RAX,
4507 kvm_run->hypercall.ret);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004508
Avi Kivity851ba692009-08-24 11:10:17 +03004509 r = __vcpu_run(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004510
4511out:
4512 if (vcpu->sigset_active)
4513 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
4514
4515 vcpu_put(vcpu);
4516 return r;
4517}
4518
4519int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4520{
4521 vcpu_load(vcpu);
4522
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004523 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4524 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4525 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4526 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4527 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
4528 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
4529 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4530 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004531#ifdef CONFIG_X86_64
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004532 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
4533 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
4534 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
4535 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
4536 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
4537 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
4538 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
4539 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004540#endif
4541
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004542 regs->rip = kvm_rip_read(vcpu);
Jan Kiszka91586a32009-10-05 13:07:21 +02004543 regs->rflags = kvm_get_rflags(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004544
4545 vcpu_put(vcpu);
4546
4547 return 0;
4548}
4549
4550int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4551{
4552 vcpu_load(vcpu);
4553
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004554 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
4555 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
4556 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
4557 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
4558 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
4559 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
4560 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
4561 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004562#ifdef CONFIG_X86_64
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004563 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
4564 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
4565 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
4566 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
4567 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
4568 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
4569 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
4570 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004571#endif
4572
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004573 kvm_rip_write(vcpu, regs->rip);
Jan Kiszka91586a32009-10-05 13:07:21 +02004574 kvm_set_rflags(vcpu, regs->rflags);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004575
Jan Kiszkab4f14ab2008-04-30 17:59:04 +02004576 vcpu->arch.exception.pending = false;
4577
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004578 vcpu_put(vcpu);
4579
4580 return 0;
4581}
4582
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004583void kvm_get_segment(struct kvm_vcpu *vcpu,
4584 struct kvm_segment *var, int seg)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004585{
Harvey Harrison14af3f32008-02-19 10:25:50 -08004586 kvm_x86_ops->get_segment(vcpu, var, seg);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004587}
4588
4589void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
4590{
4591 struct kvm_segment cs;
4592
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004593 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004594 *db = cs.db;
4595 *l = cs.l;
4596}
4597EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
4598
4599int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4600 struct kvm_sregs *sregs)
4601{
4602 struct descriptor_table dt;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004603
4604 vcpu_load(vcpu);
4605
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004606 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
4607 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
4608 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
4609 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
4610 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
4611 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004612
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004613 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
4614 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004615
4616 kvm_x86_ops->get_idt(vcpu, &dt);
4617 sregs->idt.limit = dt.limit;
4618 sregs->idt.base = dt.base;
4619 kvm_x86_ops->get_gdt(vcpu, &dt);
4620 sregs->gdt.limit = dt.limit;
4621 sregs->gdt.base = dt.base;
4622
Avi Kivity4d4ec082009-12-29 18:07:30 +02004623 sregs->cr0 = kvm_read_cr0(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004624 sregs->cr2 = vcpu->arch.cr2;
4625 sregs->cr3 = vcpu->arch.cr3;
Avi Kivityfc78f512009-12-07 12:16:48 +02004626 sregs->cr4 = kvm_read_cr4(vcpu);
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02004627 sregs->cr8 = kvm_get_cr8(vcpu);
Avi Kivityf6801df2010-01-21 15:31:50 +02004628 sregs->efer = vcpu->arch.efer;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004629 sregs->apic_base = kvm_get_apic_base(vcpu);
4630
Gleb Natapov923c61b2009-05-11 13:35:48 +03004631 memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004632
Gleb Natapov36752c92009-05-11 13:35:53 +03004633 if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
Gleb Natapov14d0bc12009-04-21 17:45:11 +03004634 set_bit(vcpu->arch.interrupt.nr,
4635 (unsigned long *)sregs->interrupt_bitmap);
Gleb Natapov16d7a192009-04-21 17:45:10 +03004636
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004637 vcpu_put(vcpu);
4638
4639 return 0;
4640}
4641
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03004642int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4643 struct kvm_mp_state *mp_state)
4644{
4645 vcpu_load(vcpu);
4646 mp_state->mp_state = vcpu->arch.mp_state;
4647 vcpu_put(vcpu);
4648 return 0;
4649}
4650
4651int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4652 struct kvm_mp_state *mp_state)
4653{
4654 vcpu_load(vcpu);
4655 vcpu->arch.mp_state = mp_state->mp_state;
4656 vcpu_put(vcpu);
4657 return 0;
4658}
4659
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004660static void kvm_set_segment(struct kvm_vcpu *vcpu,
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004661 struct kvm_segment *var, int seg)
4662{
Harvey Harrison14af3f32008-02-19 10:25:50 -08004663 kvm_x86_ops->set_segment(vcpu, var, seg);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004664}
4665
Izik Eidus37817f22008-03-24 23:14:53 +02004666static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
4667 struct kvm_segment *kvm_desct)
4668{
Akinobu Mita46a359e2009-07-18 23:58:32 +09004669 kvm_desct->base = get_desc_base(seg_desc);
4670 kvm_desct->limit = get_desc_limit(seg_desc);
Marcelo Tosattic93cd3a2008-07-19 19:08:07 -03004671 if (seg_desc->g) {
4672 kvm_desct->limit <<= 12;
4673 kvm_desct->limit |= 0xfff;
4674 }
Izik Eidus37817f22008-03-24 23:14:53 +02004675 kvm_desct->selector = selector;
4676 kvm_desct->type = seg_desc->type;
4677 kvm_desct->present = seg_desc->p;
4678 kvm_desct->dpl = seg_desc->dpl;
4679 kvm_desct->db = seg_desc->d;
4680 kvm_desct->s = seg_desc->s;
4681 kvm_desct->l = seg_desc->l;
4682 kvm_desct->g = seg_desc->g;
4683 kvm_desct->avl = seg_desc->avl;
4684 if (!selector)
4685 kvm_desct->unusable = 1;
4686 else
4687 kvm_desct->unusable = 0;
4688 kvm_desct->padding = 0;
4689}
4690
Amit Shahb8222ad2008-10-22 16:39:47 +05304691static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
4692 u16 selector,
4693 struct descriptor_table *dtable)
Izik Eidus37817f22008-03-24 23:14:53 +02004694{
4695 if (selector & 1 << 2) {
4696 struct kvm_segment kvm_seg;
4697
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004698 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
Izik Eidus37817f22008-03-24 23:14:53 +02004699
4700 if (kvm_seg.unusable)
4701 dtable->limit = 0;
4702 else
4703 dtable->limit = kvm_seg.limit;
4704 dtable->base = kvm_seg.base;
4705 }
4706 else
4707 kvm_x86_ops->get_gdt(vcpu, dtable);
4708}
4709
4710/* allowed just for 8 bytes segments */
4711static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4712 struct desc_struct *seg_desc)
4713{
4714 struct descriptor_table dtable;
4715 u16 index = selector >> 3;
Takuya Yoshikawa6f550482010-02-18 12:15:00 +02004716 int ret;
4717 u32 err;
4718 gva_t addr;
Izik Eidus37817f22008-03-24 23:14:53 +02004719
Amit Shahb8222ad2008-10-22 16:39:47 +05304720 get_segment_descriptor_dtable(vcpu, selector, &dtable);
Izik Eidus37817f22008-03-24 23:14:53 +02004721
4722 if (dtable.limit < index * 8 + 7) {
4723 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
Takuya Yoshikawac125c602010-02-01 22:11:04 +09004724 return X86EMUL_PROPAGATE_FAULT;
Izik Eidus37817f22008-03-24 23:14:53 +02004725 }
Takuya Yoshikawa6f550482010-02-18 12:15:00 +02004726 addr = dtable.base + index * 8;
4727 ret = kvm_read_guest_virt_system(addr, seg_desc, sizeof(*seg_desc),
4728 vcpu, &err);
4729 if (ret == X86EMUL_PROPAGATE_FAULT)
4730 kvm_inject_page_fault(vcpu, addr, err);
4731
4732 return ret;
Izik Eidus37817f22008-03-24 23:14:53 +02004733}
4734
4735/* allowed just for 8 bytes segments */
4736static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4737 struct desc_struct *seg_desc)
4738{
4739 struct descriptor_table dtable;
4740 u16 index = selector >> 3;
4741
Amit Shahb8222ad2008-10-22 16:39:47 +05304742 get_segment_descriptor_dtable(vcpu, selector, &dtable);
Izik Eidus37817f22008-03-24 23:14:53 +02004743
4744 if (dtable.limit < index * 8 + 7)
4745 return 1;
Gleb Natapov1871c602010-02-10 14:21:32 +02004746 return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
Izik Eidus37817f22008-03-24 23:14:53 +02004747}
4748
Gleb Natapov1871c602010-02-10 14:21:32 +02004749static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
4750 struct desc_struct *seg_desc)
4751{
4752 u32 base_addr = get_desc_base(seg_desc);
4753
4754 return kvm_mmu_gva_to_gpa_write(vcpu, base_addr, NULL);
4755}
4756
4757static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu,
Izik Eidus37817f22008-03-24 23:14:53 +02004758 struct desc_struct *seg_desc)
4759{
Akinobu Mita46a359e2009-07-18 23:58:32 +09004760 u32 base_addr = get_desc_base(seg_desc);
Izik Eidus37817f22008-03-24 23:14:53 +02004761
Gleb Natapov1871c602010-02-10 14:21:32 +02004762 return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL);
Izik Eidus37817f22008-03-24 23:14:53 +02004763}
4764
Izik Eidus37817f22008-03-24 23:14:53 +02004765static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
4766{
4767 struct kvm_segment kvm_seg;
4768
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004769 kvm_get_segment(vcpu, &kvm_seg, seg);
Izik Eidus37817f22008-03-24 23:14:53 +02004770 return kvm_seg.selector;
4771}
4772
Harvey Harrison2259e3a2008-08-22 13:29:17 -07004773static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
Avi Kivityf4bbd9a2008-08-20 15:51:42 +03004774{
4775 struct kvm_segment segvar = {
4776 .base = selector << 4,
4777 .limit = 0xffff,
4778 .selector = selector,
4779 .type = 3,
4780 .present = 1,
4781 .dpl = 3,
4782 .db = 0,
4783 .s = 1,
4784 .l = 0,
4785 .g = 0,
4786 .avl = 0,
4787 .unusable = 0,
4788 };
4789 kvm_x86_ops->set_segment(vcpu, &segvar, seg);
4790 return 0;
4791}
4792
Anthony Liguoric0c7c042009-08-11 15:57:59 -05004793static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
4794{
4795 return (seg != VCPU_SREG_LDTR) &&
4796 (seg != VCPU_SREG_TR) &&
Jan Kiszka91586a32009-10-05 13:07:21 +02004797 (kvm_get_rflags(vcpu) & X86_EFLAGS_VM);
Anthony Liguoric0c7c042009-08-11 15:57:59 -05004798}
4799
Marcelo Tosatticb84b552009-11-11 17:29:49 -02004800static void kvm_check_segment_descriptor(struct kvm_vcpu *vcpu, int seg,
4801 u16 selector)
4802{
4803 /* NULL selector is not valid for CS and SS */
4804 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
4805 if (!selector)
4806 kvm_queue_exception_e(vcpu, TS_VECTOR, selector >> 3);
4807}
4808
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004809int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4810 int type_bits, int seg)
Izik Eidus37817f22008-03-24 23:14:53 +02004811{
4812 struct kvm_segment kvm_seg;
Gleb Natapove01c2422010-01-25 12:01:04 +02004813 struct desc_struct seg_desc;
Izik Eidus37817f22008-03-24 23:14:53 +02004814
Avi Kivity3eeb3282010-01-21 15:31:48 +02004815 if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu))
Avi Kivityf4bbd9a2008-08-20 15:51:42 +03004816 return kvm_load_realmode_segment(vcpu, selector, seg);
Gleb Natapove01c2422010-01-25 12:01:04 +02004817
4818 if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
Izik Eidus37817f22008-03-24 23:14:53 +02004819 return 1;
Gleb Natapove01c2422010-01-25 12:01:04 +02004820 seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg);
Marcelo Tosatticb84b552009-11-11 17:29:49 -02004821
4822 kvm_check_segment_descriptor(vcpu, seg, selector);
Izik Eidus37817f22008-03-24 23:14:53 +02004823 kvm_seg.type |= type_bits;
4824
4825 if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
4826 seg != VCPU_SREG_LDTR)
4827 if (!kvm_seg.s)
4828 kvm_seg.unusable = 1;
4829
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004830 kvm_set_segment(vcpu, &kvm_seg, seg);
Gleb Natapove01c2422010-01-25 12:01:04 +02004831 if (selector && !kvm_seg.unusable && kvm_seg.s) {
4832 /* mark segment as accessed */
4833 seg_desc.type |= 1;
4834 save_guest_segment_descriptor(vcpu, selector, &seg_desc);
4835 }
Izik Eidus37817f22008-03-24 23:14:53 +02004836 return 0;
4837}
4838
4839static void save_state_to_tss32(struct kvm_vcpu *vcpu,
4840 struct tss_segment_32 *tss)
4841{
4842 tss->cr3 = vcpu->arch.cr3;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004843 tss->eip = kvm_rip_read(vcpu);
Jan Kiszka91586a32009-10-05 13:07:21 +02004844 tss->eflags = kvm_get_rflags(vcpu);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004845 tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4846 tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4847 tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4848 tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4849 tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4850 tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
4851 tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
4852 tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
Izik Eidus37817f22008-03-24 23:14:53 +02004853 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
4854 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
4855 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
4856 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
4857 tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
4858 tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
4859 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
Izik Eidus37817f22008-03-24 23:14:53 +02004860}
4861
4862static int load_state_from_tss32(struct kvm_vcpu *vcpu,
4863 struct tss_segment_32 *tss)
4864{
4865 kvm_set_cr3(vcpu, tss->cr3);
4866
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004867 kvm_rip_write(vcpu, tss->eip);
Jan Kiszka91586a32009-10-05 13:07:21 +02004868 kvm_set_rflags(vcpu, tss->eflags | 2);
Izik Eidus37817f22008-03-24 23:14:53 +02004869
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004870 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
4871 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
4872 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
4873 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
4874 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
4875 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
4876 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
4877 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
Izik Eidus37817f22008-03-24 23:14:53 +02004878
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004879 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
Izik Eidus37817f22008-03-24 23:14:53 +02004880 return 1;
4881
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004882 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
Izik Eidus37817f22008-03-24 23:14:53 +02004883 return 1;
4884
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004885 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
Izik Eidus37817f22008-03-24 23:14:53 +02004886 return 1;
4887
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004888 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
Izik Eidus37817f22008-03-24 23:14:53 +02004889 return 1;
4890
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004891 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
Izik Eidus37817f22008-03-24 23:14:53 +02004892 return 1;
4893
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004894 if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
Izik Eidus37817f22008-03-24 23:14:53 +02004895 return 1;
4896
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004897 if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
Izik Eidus37817f22008-03-24 23:14:53 +02004898 return 1;
4899 return 0;
4900}
4901
4902static void save_state_to_tss16(struct kvm_vcpu *vcpu,
4903 struct tss_segment_16 *tss)
4904{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004905 tss->ip = kvm_rip_read(vcpu);
Jan Kiszka91586a32009-10-05 13:07:21 +02004906 tss->flag = kvm_get_rflags(vcpu);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004907 tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4908 tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4909 tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4910 tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4911 tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4912 tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
4913 tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
4914 tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
Izik Eidus37817f22008-03-24 23:14:53 +02004915
4916 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
4917 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
4918 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
4919 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
4920 tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
Izik Eidus37817f22008-03-24 23:14:53 +02004921}
4922
4923static int load_state_from_tss16(struct kvm_vcpu *vcpu,
4924 struct tss_segment_16 *tss)
4925{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004926 kvm_rip_write(vcpu, tss->ip);
Jan Kiszka91586a32009-10-05 13:07:21 +02004927 kvm_set_rflags(vcpu, tss->flag | 2);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004928 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
4929 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
4930 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
4931 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
4932 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
4933 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
4934 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
4935 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
Izik Eidus37817f22008-03-24 23:14:53 +02004936
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004937 if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
Izik Eidus37817f22008-03-24 23:14:53 +02004938 return 1;
4939
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004940 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
Izik Eidus37817f22008-03-24 23:14:53 +02004941 return 1;
4942
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004943 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
Izik Eidus37817f22008-03-24 23:14:53 +02004944 return 1;
4945
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004946 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
Izik Eidus37817f22008-03-24 23:14:53 +02004947 return 1;
4948
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004949 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
Izik Eidus37817f22008-03-24 23:14:53 +02004950 return 1;
4951 return 0;
4952}
4953
Harvey Harrison8b2cf732008-04-27 12:14:13 -07004954static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
Gleb Natapovb237ac32009-03-30 16:03:24 +03004955 u16 old_tss_sel, u32 old_tss_base,
4956 struct desc_struct *nseg_desc)
Izik Eidus37817f22008-03-24 23:14:53 +02004957{
4958 struct tss_segment_16 tss_segment_16;
4959 int ret = 0;
4960
Marcelo Tosatti34198bf82008-07-16 19:07:11 -03004961 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
4962 sizeof tss_segment_16))
Izik Eidus37817f22008-03-24 23:14:53 +02004963 goto out;
4964
4965 save_state_to_tss16(vcpu, &tss_segment_16);
Izik Eidus37817f22008-03-24 23:14:53 +02004966
Marcelo Tosatti34198bf82008-07-16 19:07:11 -03004967 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
4968 sizeof tss_segment_16))
Izik Eidus37817f22008-03-24 23:14:53 +02004969 goto out;
Marcelo Tosatti34198bf82008-07-16 19:07:11 -03004970
Gleb Natapov1871c602010-02-10 14:21:32 +02004971 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
Marcelo Tosatti34198bf82008-07-16 19:07:11 -03004972 &tss_segment_16, sizeof tss_segment_16))
4973 goto out;
4974
Gleb Natapovb237ac32009-03-30 16:03:24 +03004975 if (old_tss_sel != 0xffff) {
4976 tss_segment_16.prev_task_link = old_tss_sel;
4977
4978 if (kvm_write_guest(vcpu->kvm,
Gleb Natapov1871c602010-02-10 14:21:32 +02004979 get_tss_base_addr_write(vcpu, nseg_desc),
Gleb Natapovb237ac32009-03-30 16:03:24 +03004980 &tss_segment_16.prev_task_link,
4981 sizeof tss_segment_16.prev_task_link))
4982 goto out;
4983 }
4984
Izik Eidus37817f22008-03-24 23:14:53 +02004985 if (load_state_from_tss16(vcpu, &tss_segment_16))
4986 goto out;
4987
4988 ret = 1;
4989out:
4990 return ret;
4991}
4992
Harvey Harrison8b2cf732008-04-27 12:14:13 -07004993static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
Gleb Natapovb237ac32009-03-30 16:03:24 +03004994 u16 old_tss_sel, u32 old_tss_base,
Izik Eidus37817f22008-03-24 23:14:53 +02004995 struct desc_struct *nseg_desc)
4996{
4997 struct tss_segment_32 tss_segment_32;
4998 int ret = 0;
4999
Marcelo Tosatti34198bf82008-07-16 19:07:11 -03005000 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
5001 sizeof tss_segment_32))
Izik Eidus37817f22008-03-24 23:14:53 +02005002 goto out;
5003
5004 save_state_to_tss32(vcpu, &tss_segment_32);
Izik Eidus37817f22008-03-24 23:14:53 +02005005
Marcelo Tosatti34198bf82008-07-16 19:07:11 -03005006 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
5007 sizeof tss_segment_32))
Izik Eidus37817f22008-03-24 23:14:53 +02005008 goto out;
Marcelo Tosatti34198bf82008-07-16 19:07:11 -03005009
Gleb Natapov1871c602010-02-10 14:21:32 +02005010 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
Marcelo Tosatti34198bf82008-07-16 19:07:11 -03005011 &tss_segment_32, sizeof tss_segment_32))
5012 goto out;
5013
Gleb Natapovb237ac32009-03-30 16:03:24 +03005014 if (old_tss_sel != 0xffff) {
5015 tss_segment_32.prev_task_link = old_tss_sel;
5016
5017 if (kvm_write_guest(vcpu->kvm,
Gleb Natapov1871c602010-02-10 14:21:32 +02005018 get_tss_base_addr_write(vcpu, nseg_desc),
Gleb Natapovb237ac32009-03-30 16:03:24 +03005019 &tss_segment_32.prev_task_link,
5020 sizeof tss_segment_32.prev_task_link))
5021 goto out;
5022 }
5023
Izik Eidus37817f22008-03-24 23:14:53 +02005024 if (load_state_from_tss32(vcpu, &tss_segment_32))
5025 goto out;
5026
5027 ret = 1;
5028out:
5029 return ret;
5030}
5031
5032int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
5033{
5034 struct kvm_segment tr_seg;
5035 struct desc_struct cseg_desc;
5036 struct desc_struct nseg_desc;
5037 int ret = 0;
Marcelo Tosatti34198bf82008-07-16 19:07:11 -03005038 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
5039 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
Izik Eidus37817f22008-03-24 23:14:53 +02005040
Gleb Natapov1871c602010-02-10 14:21:32 +02005041 old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
Izik Eidus37817f22008-03-24 23:14:53 +02005042
Marcelo Tosatti34198bf82008-07-16 19:07:11 -03005043 /* FIXME: Handle errors. Failure to read either TSS or their
5044 * descriptors should generate a pagefault.
5045 */
Izik Eidus37817f22008-03-24 23:14:53 +02005046 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
5047 goto out;
5048
Marcelo Tosatti34198bf82008-07-16 19:07:11 -03005049 if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
Izik Eidus37817f22008-03-24 23:14:53 +02005050 goto out;
5051
Izik Eidus37817f22008-03-24 23:14:53 +02005052 if (reason != TASK_SWITCH_IRET) {
5053 int cpl;
5054
5055 cpl = kvm_x86_ops->get_cpl(vcpu);
5056 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
5057 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
5058 return 1;
5059 }
5060 }
5061
Akinobu Mita46a359e2009-07-18 23:58:32 +09005062 if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
Izik Eidus37817f22008-03-24 23:14:53 +02005063 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
5064 return 1;
5065 }
5066
5067 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
Izik Eidus3fe913e2008-04-28 18:23:52 +03005068 cseg_desc.type &= ~(1 << 1); //clear the B flag
Marcelo Tosatti34198bf82008-07-16 19:07:11 -03005069 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
Izik Eidus37817f22008-03-24 23:14:53 +02005070 }
5071
5072 if (reason == TASK_SWITCH_IRET) {
Jan Kiszka91586a32009-10-05 13:07:21 +02005073 u32 eflags = kvm_get_rflags(vcpu);
5074 kvm_set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
Izik Eidus37817f22008-03-24 23:14:53 +02005075 }
5076
Gleb Natapov64a7ec02009-03-30 16:03:29 +03005077 /* set back link to prev task only if NT bit is set in eflags
5078 note that old_tss_sel is not used afetr this point */
5079 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
5080 old_tss_sel = 0xffff;
Izik Eidus37817f22008-03-24 23:14:53 +02005081
5082 if (nseg_desc.type & 8)
Gleb Natapovb237ac32009-03-30 16:03:24 +03005083 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel,
5084 old_tss_base, &nseg_desc);
Izik Eidus37817f22008-03-24 23:14:53 +02005085 else
Gleb Natapovb237ac32009-03-30 16:03:24 +03005086 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel,
5087 old_tss_base, &nseg_desc);
Izik Eidus37817f22008-03-24 23:14:53 +02005088
5089 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
Jan Kiszka91586a32009-10-05 13:07:21 +02005090 u32 eflags = kvm_get_rflags(vcpu);
5091 kvm_set_rflags(vcpu, eflags | X86_EFLAGS_NT);
Izik Eidus37817f22008-03-24 23:14:53 +02005092 }
5093
5094 if (reason != TASK_SWITCH_IRET) {
Izik Eidus3fe913e2008-04-28 18:23:52 +03005095 nseg_desc.type |= (1 << 1);
Izik Eidus37817f22008-03-24 23:14:53 +02005096 save_guest_segment_descriptor(vcpu, tss_selector,
5097 &nseg_desc);
5098 }
5099
Avi Kivity4d4ec082009-12-29 18:07:30 +02005100 kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0(vcpu) | X86_CR0_TS);
Izik Eidus37817f22008-03-24 23:14:53 +02005101 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
5102 tr_seg.type = 11;
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02005103 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
Izik Eidus37817f22008-03-24 23:14:53 +02005104out:
Izik Eidus37817f22008-03-24 23:14:53 +02005105 return ret;
5106}
5107EXPORT_SYMBOL_GPL(kvm_task_switch);
5108
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005109int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5110 struct kvm_sregs *sregs)
5111{
5112 int mmu_reset_needed = 0;
Gleb Natapov923c61b2009-05-11 13:35:48 +03005113 int pending_vec, max_bits;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005114 struct descriptor_table dt;
5115
5116 vcpu_load(vcpu);
5117
5118 dt.limit = sregs->idt.limit;
5119 dt.base = sregs->idt.base;
5120 kvm_x86_ops->set_idt(vcpu, &dt);
5121 dt.limit = sregs->gdt.limit;
5122 dt.base = sregs->gdt.base;
5123 kvm_x86_ops->set_gdt(vcpu, &dt);
5124
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005125 vcpu->arch.cr2 = sregs->cr2;
5126 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
Jan Kiszkadc7e7952009-07-01 20:52:03 +02005127 vcpu->arch.cr3 = sregs->cr3;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005128
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02005129 kvm_set_cr8(vcpu, sregs->cr8);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005130
Avi Kivityf6801df2010-01-21 15:31:50 +02005131 mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005132 kvm_x86_ops->set_efer(vcpu, sregs->efer);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005133 kvm_set_apic_base(vcpu, sregs->apic_base);
5134
Avi Kivity4d4ec082009-12-29 18:07:30 +02005135 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005136 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
Paul Knowlesd7306162008-02-06 11:02:35 +00005137 vcpu->arch.cr0 = sregs->cr0;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005138
Avi Kivityfc78f512009-12-07 12:16:48 +02005139 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005140 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
Marcelo Tosatti7c93be442009-10-26 16:48:33 -02005141 if (!is_long_mode(vcpu) && is_pae(vcpu)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005142 load_pdptrs(vcpu, vcpu->arch.cr3);
Marcelo Tosatti7c93be442009-10-26 16:48:33 -02005143 mmu_reset_needed = 1;
5144 }
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005145
5146 if (mmu_reset_needed)
5147 kvm_mmu_reset_context(vcpu);
5148
Gleb Natapov923c61b2009-05-11 13:35:48 +03005149 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
5150 pending_vec = find_first_bit(
5151 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
5152 if (pending_vec < max_bits) {
Gleb Natapov66fd3f72009-05-11 13:35:50 +03005153 kvm_queue_interrupt(vcpu, pending_vec, false);
Gleb Natapov923c61b2009-05-11 13:35:48 +03005154 pr_debug("Set back pending irq %d\n", pending_vec);
5155 if (irqchip_in_kernel(vcpu->kvm))
5156 kvm_pic_clear_isr_ack(vcpu->kvm);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005157 }
5158
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02005159 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
5160 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
5161 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
5162 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
5163 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
5164 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005165
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02005166 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
5167 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005168
Mikhail Ershov5f0269f2009-08-03 14:58:25 +03005169 update_cr8_intercept(vcpu);
5170
Marcelo Tosatti9c3e4aa2008-09-10 16:40:55 -03005171 /* Older userspace won't unhalt the vcpu on reset. */
Gleb Natapovc5af89b2009-06-09 15:56:26 +03005172 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
Marcelo Tosatti9c3e4aa2008-09-10 16:40:55 -03005173 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
Avi Kivity3eeb3282010-01-21 15:31:48 +02005174 !is_protmode(vcpu))
Marcelo Tosatti9c3e4aa2008-09-10 16:40:55 -03005175 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5176
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005177 vcpu_put(vcpu);
5178
5179 return 0;
5180}
5181
Jan Kiszkad0bfb942008-12-15 13:52:10 +01005182int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
5183 struct kvm_guest_debug *dbg)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005184{
Jan Kiszka355be0b2009-10-03 00:31:21 +02005185 unsigned long rflags;
Jan Kiszkaae675ef2008-12-15 13:52:10 +01005186 int i, r;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005187
5188 vcpu_load(vcpu);
5189
Jan Kiszka4f926bf22009-10-30 12:46:59 +01005190 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
5191 r = -EBUSY;
5192 if (vcpu->arch.exception.pending)
5193 goto unlock_out;
5194 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
5195 kvm_queue_exception(vcpu, DB_VECTOR);
5196 else
5197 kvm_queue_exception(vcpu, BP_VECTOR);
5198 }
5199
Jan Kiszka91586a32009-10-05 13:07:21 +02005200 /*
5201 * Read rflags as long as potentially injected trace flags are still
5202 * filtered out.
5203 */
5204 rflags = kvm_get_rflags(vcpu);
Jan Kiszka355be0b2009-10-03 00:31:21 +02005205
5206 vcpu->guest_debug = dbg->control;
5207 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
5208 vcpu->guest_debug = 0;
5209
5210 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
Jan Kiszkaae675ef2008-12-15 13:52:10 +01005211 for (i = 0; i < KVM_NR_DB_REGS; ++i)
5212 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
5213 vcpu->arch.switch_db_regs =
5214 (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
5215 } else {
5216 for (i = 0; i < KVM_NR_DB_REGS; i++)
5217 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
5218 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
5219 }
5220
Jan Kiszka94fe45d2009-10-18 13:24:44 +02005221 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
5222 vcpu->arch.singlestep_cs =
5223 get_segment_selector(vcpu, VCPU_SREG_CS);
5224 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu);
5225 }
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005226
Jan Kiszka91586a32009-10-05 13:07:21 +02005227 /*
5228 * Trigger an rflags update that will inject or remove the trace
5229 * flags.
5230 */
5231 kvm_set_rflags(vcpu, rflags);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01005232
Jan Kiszka355be0b2009-10-03 00:31:21 +02005233 kvm_x86_ops->set_guest_debug(vcpu, dbg);
5234
Jan Kiszka4f926bf22009-10-30 12:46:59 +01005235 r = 0;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005236
Jan Kiszka4f926bf22009-10-30 12:46:59 +01005237unlock_out:
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005238 vcpu_put(vcpu);
5239
5240 return r;
5241}
5242
5243/*
Hollis Blanchardd0752062007-10-31 17:24:25 -05005244 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
5245 * we have asm/x86/processor.h
5246 */
5247struct fxsave {
5248 u16 cwd;
5249 u16 swd;
5250 u16 twd;
5251 u16 fop;
5252 u64 rip;
5253 u64 rdp;
5254 u32 mxcsr;
5255 u32 mxcsr_mask;
5256 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
5257#ifdef CONFIG_X86_64
5258 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
5259#else
5260 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
5261#endif
5262};
5263
Zhang Xiantao8b006792007-11-16 13:05:55 +08005264/*
5265 * Translate a guest virtual address to a guest physical address.
5266 */
5267int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
5268 struct kvm_translation *tr)
5269{
5270 unsigned long vaddr = tr->linear_address;
5271 gpa_t gpa;
Marcelo Tosattif656ce02009-12-23 14:35:25 -02005272 int idx;
Zhang Xiantao8b006792007-11-16 13:05:55 +08005273
5274 vcpu_load(vcpu);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02005275 idx = srcu_read_lock(&vcpu->kvm->srcu);
Gleb Natapov1871c602010-02-10 14:21:32 +02005276 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02005277 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Zhang Xiantao8b006792007-11-16 13:05:55 +08005278 tr->physical_address = gpa;
5279 tr->valid = gpa != UNMAPPED_GVA;
5280 tr->writeable = 1;
5281 tr->usermode = 0;
Zhang Xiantao8b006792007-11-16 13:05:55 +08005282 vcpu_put(vcpu);
5283
5284 return 0;
5285}
5286
Hollis Blanchardd0752062007-10-31 17:24:25 -05005287int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5288{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005289 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
Hollis Blanchardd0752062007-10-31 17:24:25 -05005290
5291 vcpu_load(vcpu);
5292
5293 memcpy(fpu->fpr, fxsave->st_space, 128);
5294 fpu->fcw = fxsave->cwd;
5295 fpu->fsw = fxsave->swd;
5296 fpu->ftwx = fxsave->twd;
5297 fpu->last_opcode = fxsave->fop;
5298 fpu->last_ip = fxsave->rip;
5299 fpu->last_dp = fxsave->rdp;
5300 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
5301
5302 vcpu_put(vcpu);
5303
5304 return 0;
5305}
5306
5307int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5308{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005309 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
Hollis Blanchardd0752062007-10-31 17:24:25 -05005310
5311 vcpu_load(vcpu);
5312
5313 memcpy(fxsave->st_space, fpu->fpr, 128);
5314 fxsave->cwd = fpu->fcw;
5315 fxsave->swd = fpu->fsw;
5316 fxsave->twd = fpu->ftwx;
5317 fxsave->fop = fpu->last_opcode;
5318 fxsave->rip = fpu->last_ip;
5319 fxsave->rdp = fpu->last_dp;
5320 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
5321
5322 vcpu_put(vcpu);
5323
5324 return 0;
5325}
5326
5327void fx_init(struct kvm_vcpu *vcpu)
5328{
5329 unsigned after_mxcsr_mask;
5330
Andrea Arcangelibc1a34f2008-05-01 18:43:33 +02005331 /*
5332 * Touch the fpu the first time in non atomic context as if
5333 * this is the first fpu instruction the exception handler
5334 * will fire before the instruction returns and it'll have to
5335 * allocate ram with GFP_KERNEL.
5336 */
5337 if (!used_math())
Avi Kivityd6e88ae2008-07-10 16:53:33 +03005338 kvm_fx_save(&vcpu->arch.host_fx_image);
Andrea Arcangelibc1a34f2008-05-01 18:43:33 +02005339
Hollis Blanchardd0752062007-10-31 17:24:25 -05005340 /* Initialize guest FPU by resetting ours and saving into guest's */
5341 preempt_disable();
Avi Kivityd6e88ae2008-07-10 16:53:33 +03005342 kvm_fx_save(&vcpu->arch.host_fx_image);
5343 kvm_fx_finit();
5344 kvm_fx_save(&vcpu->arch.guest_fx_image);
5345 kvm_fx_restore(&vcpu->arch.host_fx_image);
Hollis Blanchardd0752062007-10-31 17:24:25 -05005346 preempt_enable();
5347
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005348 vcpu->arch.cr0 |= X86_CR0_ET;
Hollis Blanchardd0752062007-10-31 17:24:25 -05005349 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005350 vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
5351 memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
Hollis Blanchardd0752062007-10-31 17:24:25 -05005352 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
5353}
5354EXPORT_SYMBOL_GPL(fx_init);
5355
5356void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
5357{
Avi Kivity2608d7a2010-01-21 15:31:45 +02005358 if (vcpu->guest_fpu_loaded)
Hollis Blanchardd0752062007-10-31 17:24:25 -05005359 return;
5360
5361 vcpu->guest_fpu_loaded = 1;
Avi Kivityd6e88ae2008-07-10 16:53:33 +03005362 kvm_fx_save(&vcpu->arch.host_fx_image);
5363 kvm_fx_restore(&vcpu->arch.guest_fx_image);
Avi Kivity0c048512010-01-21 15:31:52 +02005364 trace_kvm_fpu(1);
Hollis Blanchardd0752062007-10-31 17:24:25 -05005365}
Hollis Blanchardd0752062007-10-31 17:24:25 -05005366
5367void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
5368{
5369 if (!vcpu->guest_fpu_loaded)
5370 return;
5371
5372 vcpu->guest_fpu_loaded = 0;
Avi Kivityd6e88ae2008-07-10 16:53:33 +03005373 kvm_fx_save(&vcpu->arch.guest_fx_image);
5374 kvm_fx_restore(&vcpu->arch.host_fx_image);
Avi Kivityf096ed82007-11-18 13:54:33 +02005375 ++vcpu->stat.fpu_reload;
Avi Kivity02daab22009-12-30 12:40:26 +02005376 set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
Avi Kivity0c048512010-01-21 15:31:52 +02005377 trace_kvm_fpu(0);
Hollis Blanchardd0752062007-10-31 17:24:25 -05005378}
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005379
5380void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
5381{
Joerg Roedel7f1ea202009-02-25 16:08:31 +01005382 if (vcpu->arch.time_page) {
5383 kvm_release_page_dirty(vcpu->arch.time_page);
5384 vcpu->arch.time_page = NULL;
5385 }
5386
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005387 kvm_x86_ops->vcpu_free(vcpu);
5388}
5389
5390struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
5391 unsigned int id)
5392{
Avi Kivity26e52152007-11-20 15:30:24 +02005393 return kvm_x86_ops->vcpu_create(kvm, id);
5394}
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005395
Avi Kivity26e52152007-11-20 15:30:24 +02005396int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
5397{
5398 int r;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005399
5400 /* We do fxsave: this must be aligned. */
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005401 BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005402
Sheng Yang0bed3b52008-10-09 16:01:54 +08005403 vcpu->arch.mtrr_state.have_fixed = 1;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005404 vcpu_load(vcpu);
5405 r = kvm_arch_vcpu_reset(vcpu);
5406 if (r == 0)
5407 r = kvm_mmu_setup(vcpu);
5408 vcpu_put(vcpu);
5409 if (r < 0)
5410 goto free_vcpu;
5411
Avi Kivity26e52152007-11-20 15:30:24 +02005412 return 0;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005413free_vcpu:
5414 kvm_x86_ops->vcpu_free(vcpu);
Avi Kivity26e52152007-11-20 15:30:24 +02005415 return r;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005416}
5417
Hollis Blanchardd40ccc62007-11-19 14:04:43 -06005418void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005419{
5420 vcpu_load(vcpu);
5421 kvm_mmu_unload(vcpu);
5422 vcpu_put(vcpu);
5423
5424 kvm_x86_ops->vcpu_free(vcpu);
5425}
5426
5427int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
5428{
Jan Kiszka448fa4a2008-09-26 09:30:48 +02005429 vcpu->arch.nmi_pending = false;
5430 vcpu->arch.nmi_injected = false;
5431
Jan Kiszka42dbaa52008-12-15 13:52:10 +01005432 vcpu->arch.switch_db_regs = 0;
5433 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
5434 vcpu->arch.dr6 = DR6_FIXED_1;
5435 vcpu->arch.dr7 = DR7_FIXED_1;
5436
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005437 return kvm_x86_ops->vcpu_reset(vcpu);
5438}
5439
Alexander Graf10474ae2009-09-15 11:37:46 +02005440int kvm_arch_hardware_enable(void *garbage)
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005441{
Zachary Amsden0cca7902009-09-29 11:38:35 -10005442 /*
5443 * Since this may be called from a hotplug notifcation,
5444 * we can't get the CPU frequency directly.
5445 */
5446 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
5447 int cpu = raw_smp_processor_id();
5448 per_cpu(cpu_tsc_khz, cpu) = 0;
5449 }
Avi Kivity18863bd2009-09-07 11:12:18 +03005450
5451 kvm_shared_msr_cpu_online();
5452
Alexander Graf10474ae2009-09-15 11:37:46 +02005453 return kvm_x86_ops->hardware_enable(garbage);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005454}
5455
5456void kvm_arch_hardware_disable(void *garbage)
5457{
5458 kvm_x86_ops->hardware_disable(garbage);
Avi Kivity3548bab2009-11-28 14:18:47 +02005459 drop_user_return_notifiers(garbage);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005460}
5461
5462int kvm_arch_hardware_setup(void)
5463{
5464 return kvm_x86_ops->hardware_setup();
5465}
5466
5467void kvm_arch_hardware_unsetup(void)
5468{
5469 kvm_x86_ops->hardware_unsetup();
5470}
5471
5472void kvm_arch_check_processor_compat(void *rtn)
5473{
5474 kvm_x86_ops->check_processor_compatibility(rtn);
5475}
5476
5477int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
5478{
5479 struct page *page;
5480 struct kvm *kvm;
5481 int r;
5482
5483 BUG_ON(vcpu->kvm == NULL);
5484 kvm = vcpu->kvm;
5485
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005486 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
Gleb Natapovc5af89b2009-06-09 15:56:26 +03005487 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
Avi Kivitya4535292008-04-13 17:54:35 +03005488 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005489 else
Avi Kivitya4535292008-04-13 17:54:35 +03005490 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005491
5492 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
5493 if (!page) {
5494 r = -ENOMEM;
5495 goto fail;
5496 }
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005497 vcpu->arch.pio_data = page_address(page);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005498
5499 r = kvm_mmu_create(vcpu);
5500 if (r < 0)
5501 goto fail_free_pio_data;
5502
5503 if (irqchip_in_kernel(kvm)) {
5504 r = kvm_create_lapic(vcpu);
5505 if (r < 0)
5506 goto fail_mmu_destroy;
5507 }
5508
Huang Ying890ca9a2009-05-11 16:48:15 +08005509 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
5510 GFP_KERNEL);
5511 if (!vcpu->arch.mce_banks) {
5512 r = -ENOMEM;
Wei Yongjun443c39b2010-01-22 14:21:29 +08005513 goto fail_free_lapic;
Huang Ying890ca9a2009-05-11 16:48:15 +08005514 }
5515 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
5516
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005517 return 0;
Wei Yongjun443c39b2010-01-22 14:21:29 +08005518fail_free_lapic:
5519 kvm_free_lapic(vcpu);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005520fail_mmu_destroy:
5521 kvm_mmu_destroy(vcpu);
5522fail_free_pio_data:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005523 free_page((unsigned long)vcpu->arch.pio_data);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005524fail:
5525 return r;
5526}
5527
5528void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
5529{
Marcelo Tosattif656ce02009-12-23 14:35:25 -02005530 int idx;
5531
Wei Yongjun36cb93f2010-01-22 14:18:47 +08005532 kfree(vcpu->arch.mce_banks);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005533 kvm_free_lapic(vcpu);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02005534 idx = srcu_read_lock(&vcpu->kvm->srcu);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005535 kvm_mmu_destroy(vcpu);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02005536 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005537 free_page((unsigned long)vcpu->arch.pio_data);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005538}
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005539
5540struct kvm *kvm_arch_create_vm(void)
5541{
5542 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
5543
5544 if (!kvm)
5545 return ERR_PTR(-ENOMEM);
5546
Marcelo Tosattifef9cce2009-12-23 14:35:17 -02005547 kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
5548 if (!kvm->arch.aliases) {
5549 kfree(kvm);
5550 return ERR_PTR(-ENOMEM);
5551 }
5552
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08005553 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +03005554 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005555
Sheng Yang5550af42008-10-15 20:15:06 +08005556 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
5557 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
5558
Marcelo Tosatti53f658b32008-12-11 20:45:05 +01005559 rdtscll(kvm->arch.vm_init_tsc);
5560
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005561 return kvm;
5562}
5563
5564static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
5565{
5566 vcpu_load(vcpu);
5567 kvm_mmu_unload(vcpu);
5568 vcpu_put(vcpu);
5569}
5570
5571static void kvm_free_vcpus(struct kvm *kvm)
5572{
5573 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03005574 struct kvm_vcpu *vcpu;
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005575
5576 /*
5577 * Unpin any mmu pages first.
5578 */
Gleb Natapov988a2ca2009-06-09 15:56:29 +03005579 kvm_for_each_vcpu(i, vcpu, kvm)
5580 kvm_unload_vcpu_mmu(vcpu);
5581 kvm_for_each_vcpu(i, vcpu, kvm)
5582 kvm_arch_vcpu_free(vcpu);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005583
Gleb Natapov988a2ca2009-06-09 15:56:29 +03005584 mutex_lock(&kvm->lock);
5585 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
5586 kvm->vcpus[i] = NULL;
5587
5588 atomic_set(&kvm->online_vcpus, 0);
5589 mutex_unlock(&kvm->lock);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005590}
5591
Sheng Yangad8ba2c2009-01-06 10:03:02 +08005592void kvm_arch_sync_events(struct kvm *kvm)
5593{
Sheng Yangba4cef32009-01-06 10:03:03 +08005594 kvm_free_all_assigned_devices(kvm);
Sheng Yangad8ba2c2009-01-06 10:03:02 +08005595}
5596
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005597void kvm_arch_destroy_vm(struct kvm *kvm)
5598{
Sheng Yang6eb55812008-10-31 12:37:41 +08005599 kvm_iommu_unmap_guest(kvm);
Sheng Yang78376992008-01-28 05:10:22 +08005600 kvm_free_pit(kvm);
Zhang Xiantaod7deeeb02007-12-14 10:17:34 +08005601 kfree(kvm->arch.vpic);
5602 kfree(kvm->arch.vioapic);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005603 kvm_free_vcpus(kvm);
5604 kvm_free_physmem(kvm);
Avi Kivity3d458302008-03-25 11:26:13 +02005605 if (kvm->arch.apic_access_page)
5606 put_page(kvm->arch.apic_access_page);
Sheng Yangb7ebfb02008-04-25 21:44:52 +08005607 if (kvm->arch.ept_identity_pagetable)
5608 put_page(kvm->arch.ept_identity_pagetable);
Marcelo Tosatti64749202010-01-19 12:45:23 -02005609 cleanup_srcu_struct(&kvm->srcu);
Marcelo Tosattifef9cce2009-12-23 14:35:17 -02005610 kfree(kvm->arch.aliases);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005611 kfree(kvm);
5612}
Zhang Xiantao0de10342007-11-20 16:25:04 +08005613
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005614int kvm_arch_prepare_memory_region(struct kvm *kvm,
5615 struct kvm_memory_slot *memslot,
Zhang Xiantao0de10342007-11-20 16:25:04 +08005616 struct kvm_memory_slot old,
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005617 struct kvm_userspace_memory_region *mem,
Zhang Xiantao0de10342007-11-20 16:25:04 +08005618 int user_alloc)
5619{
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005620 int npages = memslot->npages;
Zhang Xiantao0de10342007-11-20 16:25:04 +08005621
5622 /*To keep backward compatibility with older userspace,
5623 *x86 needs to hanlde !user_alloc case.
5624 */
5625 if (!user_alloc) {
5626 if (npages && !old.rmap) {
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005627 unsigned long userspace_addr;
5628
Izik Eidus72dc67a2008-02-10 18:04:15 +02005629 down_write(&current->mm->mmap_sem);
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005630 userspace_addr = do_mmap(NULL, 0,
5631 npages * PAGE_SIZE,
5632 PROT_READ | PROT_WRITE,
Avi Kivityacee3c02008-08-26 17:22:47 +03005633 MAP_PRIVATE | MAP_ANONYMOUS,
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005634 0);
Izik Eidus72dc67a2008-02-10 18:04:15 +02005635 up_write(&current->mm->mmap_sem);
Zhang Xiantao0de10342007-11-20 16:25:04 +08005636
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005637 if (IS_ERR((void *)userspace_addr))
5638 return PTR_ERR((void *)userspace_addr);
5639
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005640 memslot->userspace_addr = userspace_addr;
Zhang Xiantao0de10342007-11-20 16:25:04 +08005641 }
5642 }
5643
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005644
5645 return 0;
5646}
5647
5648void kvm_arch_commit_memory_region(struct kvm *kvm,
5649 struct kvm_userspace_memory_region *mem,
5650 struct kvm_memory_slot old,
5651 int user_alloc)
5652{
5653
5654 int npages = mem->memory_size >> PAGE_SHIFT;
5655
5656 if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
5657 int ret;
5658
5659 down_write(&current->mm->mmap_sem);
5660 ret = do_munmap(current->mm, old.userspace_addr,
5661 old.npages * PAGE_SIZE);
5662 up_write(&current->mm->mmap_sem);
5663 if (ret < 0)
5664 printk(KERN_WARNING
5665 "kvm_vm_ioctl_set_memory_region: "
5666 "failed to munmap memory\n");
5667 }
5668
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03005669 spin_lock(&kvm->mmu_lock);
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08005670 if (!kvm->arch.n_requested_mmu_pages) {
Zhang Xiantao0de10342007-11-20 16:25:04 +08005671 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
5672 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
5673 }
5674
5675 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03005676 spin_unlock(&kvm->mmu_lock);
Zhang Xiantao0de10342007-11-20 16:25:04 +08005677}
Zhang Xiantao1d737c82007-12-14 09:35:10 +08005678
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03005679void kvm_arch_flush_shadow(struct kvm *kvm)
5680{
5681 kvm_mmu_zap_all(kvm);
Marcelo Tosatti8986ecc2009-05-12 18:55:45 -03005682 kvm_reload_remote_mmus(kvm);
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03005683}
5684
Zhang Xiantao1d737c82007-12-14 09:35:10 +08005685int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
5686{
Avi Kivitya4535292008-04-13 17:54:35 +03005687 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
Gleb Natapova1b37102009-07-09 15:33:52 +03005688 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
5689 || vcpu->arch.nmi_pending ||
5690 (kvm_arch_interrupt_allowed(vcpu) &&
5691 kvm_cpu_has_interrupt(vcpu));
Zhang Xiantao1d737c82007-12-14 09:35:10 +08005692}
Zhang Xiantao57361992007-12-17 14:21:40 +08005693
Zhang Xiantao57361992007-12-17 14:21:40 +08005694void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
5695{
Marcelo Tosatti32f88402009-05-07 17:55:12 -03005696 int me;
5697 int cpu = vcpu->cpu;
Zhang Xiantao57361992007-12-17 14:21:40 +08005698
5699 if (waitqueue_active(&vcpu->wq)) {
5700 wake_up_interruptible(&vcpu->wq);
5701 ++vcpu->stat.halt_wakeup;
5702 }
Marcelo Tosatti32f88402009-05-07 17:55:12 -03005703
5704 me = get_cpu();
5705 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
5706 if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
5707 smp_send_reschedule(cpu);
Marcelo Tosattie9571ed2008-04-11 15:01:22 -03005708 put_cpu();
Zhang Xiantao57361992007-12-17 14:21:40 +08005709}
Gleb Natapov78646122009-03-23 12:12:11 +02005710
5711int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
5712{
5713 return kvm_x86_ops->interrupt_allowed(vcpu);
5714}
Marcelo Tosatti229456f2009-06-17 09:22:14 -03005715
Jan Kiszka94fe45d2009-10-18 13:24:44 +02005716unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
5717{
5718 unsigned long rflags;
5719
5720 rflags = kvm_x86_ops->get_rflags(vcpu);
5721 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
5722 rflags &= ~(unsigned long)(X86_EFLAGS_TF | X86_EFLAGS_RF);
5723 return rflags;
5724}
5725EXPORT_SYMBOL_GPL(kvm_get_rflags);
5726
5727void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
5728{
5729 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
5730 vcpu->arch.singlestep_cs ==
5731 get_segment_selector(vcpu, VCPU_SREG_CS) &&
5732 vcpu->arch.singlestep_rip == kvm_rip_read(vcpu))
5733 rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
5734 kvm_x86_ops->set_rflags(vcpu, rflags);
5735}
5736EXPORT_SYMBOL_GPL(kvm_set_rflags);
5737
Marcelo Tosatti229456f2009-06-17 09:22:14 -03005738EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
5739EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
5740EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
5741EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
5742EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
Joerg Roedel0ac406d2009-10-09 16:08:27 +02005743EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02005744EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
Joerg Roedel17897f32009-10-09 16:08:29 +02005745EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
Joerg Roedel236649d2009-10-09 16:08:30 +02005746EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
Joerg Roedelec1ff792009-10-09 16:08:31 +02005747EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
Joerg Roedel532a46b2009-10-09 16:08:32 +02005748EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);