blob: f594ddce728070e3606aef5ed1e6b0ab5e946ee7 [file] [log] [blame]
Sean Christopherson55d23752018-12-03 13:53:18 -08001// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/frame.h>
4#include <linux/percpu.h>
5
6#include <asm/debugreg.h>
7#include <asm/mmu_context.h>
8
9#include "cpuid.h"
10#include "hyperv.h"
11#include "mmu.h"
12#include "nested.h"
13#include "trace.h"
14#include "x86.h"
15
16static bool __read_mostly enable_shadow_vmcs = 1;
17module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
18
19static bool __read_mostly nested_early_check = 0;
20module_param(nested_early_check, bool, S_IRUGO);
21
22extern const ulong vmx_early_consistency_check_return;
23
24/*
25 * Hyper-V requires all of these, so mark them as supported even though
26 * they are just treated the same as all-context.
27 */
28#define VMX_VPID_EXTENT_SUPPORTED_MASK \
29 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
30 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
31 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
32 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
33
34#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
35
36enum {
37 VMX_VMREAD_BITMAP,
38 VMX_VMWRITE_BITMAP,
39 VMX_BITMAP_NR
40};
41static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
42
43#define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
44#define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
45
46static u16 shadow_read_only_fields[] = {
47#define SHADOW_FIELD_RO(x) x,
48#include "vmcs_shadow_fields.h"
49};
50static int max_shadow_read_only_fields =
51 ARRAY_SIZE(shadow_read_only_fields);
52
53static u16 shadow_read_write_fields[] = {
54#define SHADOW_FIELD_RW(x) x,
55#include "vmcs_shadow_fields.h"
56};
57static int max_shadow_read_write_fields =
58 ARRAY_SIZE(shadow_read_write_fields);
59
60void init_vmcs_shadow_fields(void)
61{
62 int i, j;
63
64 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
65 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
66
67 for (i = j = 0; i < max_shadow_read_only_fields; i++) {
68 u16 field = shadow_read_only_fields[i];
69
70 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
71 (i + 1 == max_shadow_read_only_fields ||
72 shadow_read_only_fields[i + 1] != field + 1))
73 pr_err("Missing field from shadow_read_only_field %x\n",
74 field + 1);
75
76 clear_bit(field, vmx_vmread_bitmap);
77#ifdef CONFIG_X86_64
78 if (field & 1)
79 continue;
80#endif
81 if (j < i)
82 shadow_read_only_fields[j] = field;
83 j++;
84 }
85 max_shadow_read_only_fields = j;
86
87 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
88 u16 field = shadow_read_write_fields[i];
89
90 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
91 (i + 1 == max_shadow_read_write_fields ||
92 shadow_read_write_fields[i + 1] != field + 1))
93 pr_err("Missing field from shadow_read_write_field %x\n",
94 field + 1);
95
96 /*
97 * PML and the preemption timer can be emulated, but the
98 * processor cannot vmwrite to fields that don't exist
99 * on bare metal.
100 */
101 switch (field) {
102 case GUEST_PML_INDEX:
103 if (!cpu_has_vmx_pml())
104 continue;
105 break;
106 case VMX_PREEMPTION_TIMER_VALUE:
107 if (!cpu_has_vmx_preemption_timer())
108 continue;
109 break;
110 case GUEST_INTR_STATUS:
111 if (!cpu_has_vmx_apicv())
112 continue;
113 break;
114 default:
115 break;
116 }
117
118 clear_bit(field, vmx_vmwrite_bitmap);
119 clear_bit(field, vmx_vmread_bitmap);
120#ifdef CONFIG_X86_64
121 if (field & 1)
122 continue;
123#endif
124 if (j < i)
125 shadow_read_write_fields[j] = field;
126 j++;
127 }
128 max_shadow_read_write_fields = j;
129}
130
131/*
132 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
133 * set the success or error code of an emulated VMX instruction (as specified
134 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
135 * instruction.
136 */
137static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
138{
139 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
140 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
141 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
142 return kvm_skip_emulated_instruction(vcpu);
143}
144
145static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
146{
147 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
148 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
149 X86_EFLAGS_SF | X86_EFLAGS_OF))
150 | X86_EFLAGS_CF);
151 return kvm_skip_emulated_instruction(vcpu);
152}
153
154static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
155 u32 vm_instruction_error)
156{
157 struct vcpu_vmx *vmx = to_vmx(vcpu);
158
159 /*
160 * failValid writes the error number to the current VMCS, which
161 * can't be done if there isn't a current VMCS.
162 */
163 if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
164 return nested_vmx_failInvalid(vcpu);
165
166 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
167 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
168 X86_EFLAGS_SF | X86_EFLAGS_OF))
169 | X86_EFLAGS_ZF);
170 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
171 /*
172 * We don't need to force a shadow sync because
173 * VM_INSTRUCTION_ERROR is not shadowed
174 */
175 return kvm_skip_emulated_instruction(vcpu);
176}
177
178static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
179{
180 /* TODO: not to reset guest simply here. */
181 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
182 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
183}
184
185static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
186{
187 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
188 vmcs_write64(VMCS_LINK_POINTER, -1ull);
189}
190
191static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
192{
193 struct vcpu_vmx *vmx = to_vmx(vcpu);
194
195 if (!vmx->nested.hv_evmcs)
196 return;
197
198 kunmap(vmx->nested.hv_evmcs_page);
199 kvm_release_page_dirty(vmx->nested.hv_evmcs_page);
200 vmx->nested.hv_evmcs_vmptr = -1ull;
201 vmx->nested.hv_evmcs_page = NULL;
202 vmx->nested.hv_evmcs = NULL;
203}
204
205/*
206 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
207 * just stops using VMX.
208 */
209static void free_nested(struct kvm_vcpu *vcpu)
210{
211 struct vcpu_vmx *vmx = to_vmx(vcpu);
212
213 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
214 return;
215
216 vmx->nested.vmxon = false;
217 vmx->nested.smm.vmxon = false;
218 free_vpid(vmx->nested.vpid02);
219 vmx->nested.posted_intr_nv = -1;
220 vmx->nested.current_vmptr = -1ull;
221 if (enable_shadow_vmcs) {
222 vmx_disable_shadow_vmcs(vmx);
223 vmcs_clear(vmx->vmcs01.shadow_vmcs);
224 free_vmcs(vmx->vmcs01.shadow_vmcs);
225 vmx->vmcs01.shadow_vmcs = NULL;
226 }
227 kfree(vmx->nested.cached_vmcs12);
228 kfree(vmx->nested.cached_shadow_vmcs12);
229 /* Unpin physical memory we referred to in the vmcs02 */
230 if (vmx->nested.apic_access_page) {
231 kvm_release_page_dirty(vmx->nested.apic_access_page);
232 vmx->nested.apic_access_page = NULL;
233 }
234 if (vmx->nested.virtual_apic_page) {
235 kvm_release_page_dirty(vmx->nested.virtual_apic_page);
236 vmx->nested.virtual_apic_page = NULL;
237 }
238 if (vmx->nested.pi_desc_page) {
239 kunmap(vmx->nested.pi_desc_page);
240 kvm_release_page_dirty(vmx->nested.pi_desc_page);
241 vmx->nested.pi_desc_page = NULL;
242 vmx->nested.pi_desc = NULL;
243 }
244
245 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
246
247 nested_release_evmcs(vcpu);
248
249 free_loaded_vmcs(&vmx->nested.vmcs02);
250}
251
252static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
253{
254 struct vcpu_vmx *vmx = to_vmx(vcpu);
255 int cpu;
256
257 if (vmx->loaded_vmcs == vmcs)
258 return;
259
260 cpu = get_cpu();
261 vmx_vcpu_put(vcpu);
262 vmx->loaded_vmcs = vmcs;
263 vmx_vcpu_load(vcpu, cpu);
264 put_cpu();
265
266 vm_entry_controls_reset_shadow(vmx);
267 vm_exit_controls_reset_shadow(vmx);
268 vmx_segment_cache_clear(vmx);
269}
270
271/*
272 * Ensure that the current vmcs of the logical processor is the
273 * vmcs01 of the vcpu before calling free_nested().
274 */
275void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
276{
277 vcpu_load(vcpu);
278 vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01);
279 free_nested(vcpu);
280 vcpu_put(vcpu);
281}
282
283static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
284 struct x86_exception *fault)
285{
286 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
287 struct vcpu_vmx *vmx = to_vmx(vcpu);
288 u32 exit_reason;
289 unsigned long exit_qualification = vcpu->arch.exit_qualification;
290
291 if (vmx->nested.pml_full) {
292 exit_reason = EXIT_REASON_PML_FULL;
293 vmx->nested.pml_full = false;
294 exit_qualification &= INTR_INFO_UNBLOCK_NMI;
295 } else if (fault->error_code & PFERR_RSVD_MASK)
296 exit_reason = EXIT_REASON_EPT_MISCONFIG;
297 else
298 exit_reason = EXIT_REASON_EPT_VIOLATION;
299
300 nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification);
301 vmcs12->guest_physical_address = fault->address;
302}
303
304static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
305{
306 WARN_ON(mmu_is_nested(vcpu));
307
308 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
309 kvm_init_shadow_ept_mmu(vcpu,
310 to_vmx(vcpu)->nested.msrs.ept_caps &
311 VMX_EPT_EXECUTE_ONLY_BIT,
312 nested_ept_ad_enabled(vcpu),
313 nested_ept_get_cr3(vcpu));
314 vcpu->arch.mmu->set_cr3 = vmx_set_cr3;
315 vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3;
316 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
317 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
318
319 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
320}
321
322static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
323{
324 vcpu->arch.mmu = &vcpu->arch.root_mmu;
325 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
326}
327
328static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
329 u16 error_code)
330{
331 bool inequality, bit;
332
333 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
334 inequality =
335 (error_code & vmcs12->page_fault_error_code_mask) !=
336 vmcs12->page_fault_error_code_match;
337 return inequality ^ bit;
338}
339
340
341/*
342 * KVM wants to inject page-faults which it got to the guest. This function
343 * checks whether in a nested guest, we need to inject them to L1 or L2.
344 */
345static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
346{
347 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
348 unsigned int nr = vcpu->arch.exception.nr;
349 bool has_payload = vcpu->arch.exception.has_payload;
350 unsigned long payload = vcpu->arch.exception.payload;
351
352 if (nr == PF_VECTOR) {
353 if (vcpu->arch.exception.nested_apf) {
354 *exit_qual = vcpu->arch.apf.nested_apf_token;
355 return 1;
356 }
357 if (nested_vmx_is_page_fault_vmexit(vmcs12,
358 vcpu->arch.exception.error_code)) {
359 *exit_qual = has_payload ? payload : vcpu->arch.cr2;
360 return 1;
361 }
362 } else if (vmcs12->exception_bitmap & (1u << nr)) {
363 if (nr == DB_VECTOR) {
364 if (!has_payload) {
365 payload = vcpu->arch.dr6;
366 payload &= ~(DR6_FIXED_1 | DR6_BT);
367 payload ^= DR6_RTM;
368 }
369 *exit_qual = payload;
370 } else
371 *exit_qual = 0;
372 return 1;
373 }
374
375 return 0;
376}
377
378
379static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
380 struct x86_exception *fault)
381{
382 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
383
384 WARN_ON(!is_guest_mode(vcpu));
385
386 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
387 !to_vmx(vcpu)->nested.nested_run_pending) {
388 vmcs12->vm_exit_intr_error_code = fault->error_code;
389 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
390 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
391 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
392 fault->address);
393 } else {
394 kvm_inject_page_fault(vcpu, fault);
395 }
396}
397
398static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
399{
400 return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
401}
402
403static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
404 struct vmcs12 *vmcs12)
405{
406 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
407 return 0;
408
409 if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) ||
410 !page_address_valid(vcpu, vmcs12->io_bitmap_b))
411 return -EINVAL;
412
413 return 0;
414}
415
416static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
417 struct vmcs12 *vmcs12)
418{
419 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
420 return 0;
421
422 if (!page_address_valid(vcpu, vmcs12->msr_bitmap))
423 return -EINVAL;
424
425 return 0;
426}
427
428static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
429 struct vmcs12 *vmcs12)
430{
431 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
432 return 0;
433
434 if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))
435 return -EINVAL;
436
437 return 0;
438}
439
440/*
441 * Check if MSR is intercepted for L01 MSR bitmap.
442 */
443static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
444{
445 unsigned long *msr_bitmap;
446 int f = sizeof(unsigned long);
447
448 if (!cpu_has_vmx_msr_bitmap())
449 return true;
450
451 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
452
453 if (msr <= 0x1fff) {
454 return !!test_bit(msr, msr_bitmap + 0x800 / f);
455 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
456 msr &= 0x1fff;
457 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
458 }
459
460 return true;
461}
462
463/*
464 * If a msr is allowed by L0, we should check whether it is allowed by L1.
465 * The corresponding bit will be cleared unless both of L0 and L1 allow it.
466 */
467static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
468 unsigned long *msr_bitmap_nested,
469 u32 msr, int type)
470{
471 int f = sizeof(unsigned long);
472
473 /*
474 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
475 * have the write-low and read-high bitmap offsets the wrong way round.
476 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
477 */
478 if (msr <= 0x1fff) {
479 if (type & MSR_TYPE_R &&
480 !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
481 /* read-low */
482 __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
483
484 if (type & MSR_TYPE_W &&
485 !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
486 /* write-low */
487 __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
488
489 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
490 msr &= 0x1fff;
491 if (type & MSR_TYPE_R &&
492 !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
493 /* read-high */
494 __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
495
496 if (type & MSR_TYPE_W &&
497 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
498 /* write-high */
499 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
500
501 }
502}
503
504/*
505 * Merge L0's and L1's MSR bitmap, return false to indicate that
506 * we do not use the hardware.
507 */
508static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
509 struct vmcs12 *vmcs12)
510{
511 int msr;
512 struct page *page;
513 unsigned long *msr_bitmap_l1;
514 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
515 /*
516 * pred_cmd & spec_ctrl are trying to verify two things:
517 *
518 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
519 * ensures that we do not accidentally generate an L02 MSR bitmap
520 * from the L12 MSR bitmap that is too permissive.
521 * 2. That L1 or L2s have actually used the MSR. This avoids
522 * unnecessarily merging of the bitmap if the MSR is unused. This
523 * works properly because we only update the L01 MSR bitmap lazily.
524 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
525 * updated to reflect this when L1 (or its L2s) actually write to
526 * the MSR.
527 */
528 bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
529 bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
530
531 /* Nothing to do if the MSR bitmap is not in use. */
532 if (!cpu_has_vmx_msr_bitmap() ||
533 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
534 return false;
535
536 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
537 !pred_cmd && !spec_ctrl)
538 return false;
539
540 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap);
541 if (is_error_page(page))
542 return false;
543
544 msr_bitmap_l1 = (unsigned long *)kmap(page);
545 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
546 /*
547 * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
548 * just lets the processor take the value from the virtual-APIC page;
549 * take those 256 bits directly from the L1 bitmap.
550 */
551 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
552 unsigned word = msr / BITS_PER_LONG;
553 msr_bitmap_l0[word] = msr_bitmap_l1[word];
554 msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
555 }
556 } else {
557 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
558 unsigned word = msr / BITS_PER_LONG;
559 msr_bitmap_l0[word] = ~0;
560 msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
561 }
562 }
563
564 nested_vmx_disable_intercept_for_msr(
565 msr_bitmap_l1, msr_bitmap_l0,
566 X2APIC_MSR(APIC_TASKPRI),
567 MSR_TYPE_W);
568
569 if (nested_cpu_has_vid(vmcs12)) {
570 nested_vmx_disable_intercept_for_msr(
571 msr_bitmap_l1, msr_bitmap_l0,
572 X2APIC_MSR(APIC_EOI),
573 MSR_TYPE_W);
574 nested_vmx_disable_intercept_for_msr(
575 msr_bitmap_l1, msr_bitmap_l0,
576 X2APIC_MSR(APIC_SELF_IPI),
577 MSR_TYPE_W);
578 }
579
580 if (spec_ctrl)
581 nested_vmx_disable_intercept_for_msr(
582 msr_bitmap_l1, msr_bitmap_l0,
583 MSR_IA32_SPEC_CTRL,
584 MSR_TYPE_R | MSR_TYPE_W);
585
586 if (pred_cmd)
587 nested_vmx_disable_intercept_for_msr(
588 msr_bitmap_l1, msr_bitmap_l0,
589 MSR_IA32_PRED_CMD,
590 MSR_TYPE_W);
591
592 kunmap(page);
593 kvm_release_page_clean(page);
594
595 return true;
596}
597
598static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
599 struct vmcs12 *vmcs12)
600{
601 struct vmcs12 *shadow;
602 struct page *page;
603
604 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
605 vmcs12->vmcs_link_pointer == -1ull)
606 return;
607
608 shadow = get_shadow_vmcs12(vcpu);
609 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
610
611 memcpy(shadow, kmap(page), VMCS12_SIZE);
612
613 kunmap(page);
614 kvm_release_page_clean(page);
615}
616
617static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
618 struct vmcs12 *vmcs12)
619{
620 struct vcpu_vmx *vmx = to_vmx(vcpu);
621
622 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
623 vmcs12->vmcs_link_pointer == -1ull)
624 return;
625
626 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
627 get_shadow_vmcs12(vcpu), VMCS12_SIZE);
628}
629
630/*
631 * In nested virtualization, check if L1 has set
632 * VM_EXIT_ACK_INTR_ON_EXIT
633 */
634static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
635{
636 return get_vmcs12(vcpu)->vm_exit_controls &
637 VM_EXIT_ACK_INTR_ON_EXIT;
638}
639
640static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
641{
642 return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
643}
644
645static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
646 struct vmcs12 *vmcs12)
647{
648 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
649 !page_address_valid(vcpu, vmcs12->apic_access_addr))
650 return -EINVAL;
651 else
652 return 0;
653}
654
655static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
656 struct vmcs12 *vmcs12)
657{
658 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
659 !nested_cpu_has_apic_reg_virt(vmcs12) &&
660 !nested_cpu_has_vid(vmcs12) &&
661 !nested_cpu_has_posted_intr(vmcs12))
662 return 0;
663
664 /*
665 * If virtualize x2apic mode is enabled,
666 * virtualize apic access must be disabled.
667 */
668 if (nested_cpu_has_virt_x2apic_mode(vmcs12) &&
669 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
670 return -EINVAL;
671
672 /*
673 * If virtual interrupt delivery is enabled,
674 * we must exit on external interrupts.
675 */
676 if (nested_cpu_has_vid(vmcs12) &&
677 !nested_exit_on_intr(vcpu))
678 return -EINVAL;
679
680 /*
681 * bits 15:8 should be zero in posted_intr_nv,
682 * the descriptor address has been already checked
683 * in nested_get_vmcs12_pages.
684 *
685 * bits 5:0 of posted_intr_desc_addr should be zero.
686 */
687 if (nested_cpu_has_posted_intr(vmcs12) &&
688 (!nested_cpu_has_vid(vmcs12) ||
689 !nested_exit_intr_ack_set(vcpu) ||
690 (vmcs12->posted_intr_nv & 0xff00) ||
691 (vmcs12->posted_intr_desc_addr & 0x3f) ||
692 (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))
693 return -EINVAL;
694
695 /* tpr shadow is needed by all apicv features. */
696 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
697 return -EINVAL;
698
699 return 0;
700}
701
702static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
Sean Christophersonf9b245e2018-12-12 13:30:08 -0500703 u32 count, u64 addr)
Sean Christopherson55d23752018-12-03 13:53:18 -0800704{
Sean Christopherson55d23752018-12-03 13:53:18 -0800705 int maxphyaddr;
Sean Christopherson55d23752018-12-03 13:53:18 -0800706
Sean Christopherson55d23752018-12-03 13:53:18 -0800707 if (count == 0)
708 return 0;
709 maxphyaddr = cpuid_maxphyaddr(vcpu);
710 if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
Sean Christophersonf9b245e2018-12-12 13:30:08 -0500711 (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
Sean Christopherson55d23752018-12-03 13:53:18 -0800712 return -EINVAL;
Sean Christophersonf9b245e2018-12-12 13:30:08 -0500713
Sean Christopherson55d23752018-12-03 13:53:18 -0800714 return 0;
715}
716
717static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
718 struct vmcs12 *vmcs12)
719{
Sean Christophersonf9b245e2018-12-12 13:30:08 -0500720 if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count,
721 vmcs12->vm_exit_msr_load_addr) ||
722 nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count,
723 vmcs12->vm_exit_msr_store_addr) ||
724 nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count,
725 vmcs12->vm_entry_msr_load_addr))
Sean Christopherson55d23752018-12-03 13:53:18 -0800726 return -EINVAL;
Sean Christophersonf9b245e2018-12-12 13:30:08 -0500727
Sean Christopherson55d23752018-12-03 13:53:18 -0800728 return 0;
729}
730
731static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
732 struct vmcs12 *vmcs12)
733{
734 if (!nested_cpu_has_pml(vmcs12))
735 return 0;
736
737 if (!nested_cpu_has_ept(vmcs12) ||
738 !page_address_valid(vcpu, vmcs12->pml_address))
739 return -EINVAL;
740
741 return 0;
742}
743
744static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
745 struct vmcs12 *vmcs12)
746{
747 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
748 !nested_cpu_has_ept(vmcs12))
749 return -EINVAL;
750 return 0;
751}
752
753static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
754 struct vmcs12 *vmcs12)
755{
756 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
757 !nested_cpu_has_ept(vmcs12))
758 return -EINVAL;
759 return 0;
760}
761
762static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
763 struct vmcs12 *vmcs12)
764{
765 if (!nested_cpu_has_shadow_vmcs(vmcs12))
766 return 0;
767
768 if (!page_address_valid(vcpu, vmcs12->vmread_bitmap) ||
769 !page_address_valid(vcpu, vmcs12->vmwrite_bitmap))
770 return -EINVAL;
771
772 return 0;
773}
774
775static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
776 struct vmx_msr_entry *e)
777{
778 /* x2APIC MSR accesses are not allowed */
779 if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)
780 return -EINVAL;
781 if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */
782 e->index == MSR_IA32_UCODE_REV)
783 return -EINVAL;
784 if (e->reserved != 0)
785 return -EINVAL;
786 return 0;
787}
788
789static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
790 struct vmx_msr_entry *e)
791{
792 if (e->index == MSR_FS_BASE ||
793 e->index == MSR_GS_BASE ||
794 e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */
795 nested_vmx_msr_check_common(vcpu, e))
796 return -EINVAL;
797 return 0;
798}
799
800static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
801 struct vmx_msr_entry *e)
802{
803 if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */
804 nested_vmx_msr_check_common(vcpu, e))
805 return -EINVAL;
806 return 0;
807}
808
809/*
810 * Load guest's/host's msr at nested entry/exit.
811 * return 0 for success, entry index for failure.
812 */
813static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
814{
815 u32 i;
816 struct vmx_msr_entry e;
817 struct msr_data msr;
818
819 msr.host_initiated = false;
820 for (i = 0; i < count; i++) {
821 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
822 &e, sizeof(e))) {
823 pr_debug_ratelimited(
824 "%s cannot read MSR entry (%u, 0x%08llx)\n",
825 __func__, i, gpa + i * sizeof(e));
826 goto fail;
827 }
828 if (nested_vmx_load_msr_check(vcpu, &e)) {
829 pr_debug_ratelimited(
830 "%s check failed (%u, 0x%x, 0x%x)\n",
831 __func__, i, e.index, e.reserved);
832 goto fail;
833 }
834 msr.index = e.index;
835 msr.data = e.value;
836 if (kvm_set_msr(vcpu, &msr)) {
837 pr_debug_ratelimited(
838 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
839 __func__, i, e.index, e.value);
840 goto fail;
841 }
842 }
843 return 0;
844fail:
845 return i + 1;
846}
847
848static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
849{
850 u32 i;
851 struct vmx_msr_entry e;
852
853 for (i = 0; i < count; i++) {
854 struct msr_data msr_info;
855 if (kvm_vcpu_read_guest(vcpu,
856 gpa + i * sizeof(e),
857 &e, 2 * sizeof(u32))) {
858 pr_debug_ratelimited(
859 "%s cannot read MSR entry (%u, 0x%08llx)\n",
860 __func__, i, gpa + i * sizeof(e));
861 return -EINVAL;
862 }
863 if (nested_vmx_store_msr_check(vcpu, &e)) {
864 pr_debug_ratelimited(
865 "%s check failed (%u, 0x%x, 0x%x)\n",
866 __func__, i, e.index, e.reserved);
867 return -EINVAL;
868 }
869 msr_info.host_initiated = false;
870 msr_info.index = e.index;
871 if (kvm_get_msr(vcpu, &msr_info)) {
872 pr_debug_ratelimited(
873 "%s cannot read MSR (%u, 0x%x)\n",
874 __func__, i, e.index);
875 return -EINVAL;
876 }
877 if (kvm_vcpu_write_guest(vcpu,
878 gpa + i * sizeof(e) +
879 offsetof(struct vmx_msr_entry, value),
880 &msr_info.data, sizeof(msr_info.data))) {
881 pr_debug_ratelimited(
882 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
883 __func__, i, e.index, msr_info.data);
884 return -EINVAL;
885 }
886 }
887 return 0;
888}
889
890static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
891{
892 unsigned long invalid_mask;
893
894 invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
895 return (val & invalid_mask) == 0;
896}
897
898/*
899 * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
900 * emulating VM entry into a guest with EPT enabled.
901 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
902 * is assigned to entry_failure_code on failure.
903 */
904static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
905 u32 *entry_failure_code)
906{
907 if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
908 if (!nested_cr3_valid(vcpu, cr3)) {
909 *entry_failure_code = ENTRY_FAIL_DEFAULT;
910 return 1;
911 }
912
913 /*
914 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
915 * must not be dereferenced.
916 */
917 if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) &&
918 !nested_ept) {
919 if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
920 *entry_failure_code = ENTRY_FAIL_PDPTE;
921 return 1;
922 }
923 }
924 }
925
926 if (!nested_ept)
927 kvm_mmu_new_cr3(vcpu, cr3, false);
928
929 vcpu->arch.cr3 = cr3;
930 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
931
932 kvm_init_mmu(vcpu, false);
933
934 return 0;
935}
936
937/*
938 * Returns if KVM is able to config CPU to tag TLB entries
939 * populated by L2 differently than TLB entries populated
940 * by L1.
941 *
942 * If L1 uses EPT, then TLB entries are tagged with different EPTP.
943 *
944 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
945 * with different VPID (L1 entries are tagged with vmx->vpid
946 * while L2 entries are tagged with vmx->nested.vpid02).
947 */
948static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
949{
950 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
951
952 return nested_cpu_has_ept(vmcs12) ||
953 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
954}
955
956static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
957{
958 struct vcpu_vmx *vmx = to_vmx(vcpu);
959
960 return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
961}
962
963
964static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
965{
966 return fixed_bits_valid(control, low, high);
967}
968
969static inline u64 vmx_control_msr(u32 low, u32 high)
970{
971 return low | ((u64)high << 32);
972}
973
974static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
975{
976 superset &= mask;
977 subset &= mask;
978
979 return (superset | subset) == superset;
980}
981
982static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
983{
984 const u64 feature_and_reserved =
985 /* feature (except bit 48; see below) */
986 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
987 /* reserved */
988 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
989 u64 vmx_basic = vmx->nested.msrs.basic;
990
991 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
992 return -EINVAL;
993
994 /*
995 * KVM does not emulate a version of VMX that constrains physical
996 * addresses of VMX structures (e.g. VMCS) to 32-bits.
997 */
998 if (data & BIT_ULL(48))
999 return -EINVAL;
1000
1001 if (vmx_basic_vmcs_revision_id(vmx_basic) !=
1002 vmx_basic_vmcs_revision_id(data))
1003 return -EINVAL;
1004
1005 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
1006 return -EINVAL;
1007
1008 vmx->nested.msrs.basic = data;
1009 return 0;
1010}
1011
1012static int
1013vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1014{
1015 u64 supported;
1016 u32 *lowp, *highp;
1017
1018 switch (msr_index) {
1019 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1020 lowp = &vmx->nested.msrs.pinbased_ctls_low;
1021 highp = &vmx->nested.msrs.pinbased_ctls_high;
1022 break;
1023 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1024 lowp = &vmx->nested.msrs.procbased_ctls_low;
1025 highp = &vmx->nested.msrs.procbased_ctls_high;
1026 break;
1027 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1028 lowp = &vmx->nested.msrs.exit_ctls_low;
1029 highp = &vmx->nested.msrs.exit_ctls_high;
1030 break;
1031 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1032 lowp = &vmx->nested.msrs.entry_ctls_low;
1033 highp = &vmx->nested.msrs.entry_ctls_high;
1034 break;
1035 case MSR_IA32_VMX_PROCBASED_CTLS2:
1036 lowp = &vmx->nested.msrs.secondary_ctls_low;
1037 highp = &vmx->nested.msrs.secondary_ctls_high;
1038 break;
1039 default:
1040 BUG();
1041 }
1042
1043 supported = vmx_control_msr(*lowp, *highp);
1044
1045 /* Check must-be-1 bits are still 1. */
1046 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
1047 return -EINVAL;
1048
1049 /* Check must-be-0 bits are still 0. */
1050 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
1051 return -EINVAL;
1052
1053 *lowp = data;
1054 *highp = data >> 32;
1055 return 0;
1056}
1057
1058static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
1059{
1060 const u64 feature_and_reserved_bits =
1061 /* feature */
1062 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1063 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1064 /* reserved */
1065 GENMASK_ULL(13, 9) | BIT_ULL(31);
1066 u64 vmx_misc;
1067
1068 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
1069 vmx->nested.msrs.misc_high);
1070
1071 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
1072 return -EINVAL;
1073
1074 if ((vmx->nested.msrs.pinbased_ctls_high &
1075 PIN_BASED_VMX_PREEMPTION_TIMER) &&
1076 vmx_misc_preemption_timer_rate(data) !=
1077 vmx_misc_preemption_timer_rate(vmx_misc))
1078 return -EINVAL;
1079
1080 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
1081 return -EINVAL;
1082
1083 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
1084 return -EINVAL;
1085
1086 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
1087 return -EINVAL;
1088
1089 vmx->nested.msrs.misc_low = data;
1090 vmx->nested.msrs.misc_high = data >> 32;
1091
1092 /*
1093 * If L1 has read-only VM-exit information fields, use the
1094 * less permissive vmx_vmwrite_bitmap to specify write
1095 * permissions for the shadow VMCS.
1096 */
1097 if (enable_shadow_vmcs && !nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
1098 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
1099
1100 return 0;
1101}
1102
1103static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
1104{
1105 u64 vmx_ept_vpid_cap;
1106
1107 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
1108 vmx->nested.msrs.vpid_caps);
1109
1110 /* Every bit is either reserved or a feature bit. */
1111 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
1112 return -EINVAL;
1113
1114 vmx->nested.msrs.ept_caps = data;
1115 vmx->nested.msrs.vpid_caps = data >> 32;
1116 return 0;
1117}
1118
1119static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1120{
1121 u64 *msr;
1122
1123 switch (msr_index) {
1124 case MSR_IA32_VMX_CR0_FIXED0:
1125 msr = &vmx->nested.msrs.cr0_fixed0;
1126 break;
1127 case MSR_IA32_VMX_CR4_FIXED0:
1128 msr = &vmx->nested.msrs.cr4_fixed0;
1129 break;
1130 default:
1131 BUG();
1132 }
1133
1134 /*
1135 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1136 * must be 1 in the restored value.
1137 */
1138 if (!is_bitwise_subset(data, *msr, -1ULL))
1139 return -EINVAL;
1140
1141 *msr = data;
1142 return 0;
1143}
1144
1145/*
1146 * Called when userspace is restoring VMX MSRs.
1147 *
1148 * Returns 0 on success, non-0 otherwise.
1149 */
1150int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1151{
1152 struct vcpu_vmx *vmx = to_vmx(vcpu);
1153
1154 /*
1155 * Don't allow changes to the VMX capability MSRs while the vCPU
1156 * is in VMX operation.
1157 */
1158 if (vmx->nested.vmxon)
1159 return -EBUSY;
1160
1161 switch (msr_index) {
1162 case MSR_IA32_VMX_BASIC:
1163 return vmx_restore_vmx_basic(vmx, data);
1164 case MSR_IA32_VMX_PINBASED_CTLS:
1165 case MSR_IA32_VMX_PROCBASED_CTLS:
1166 case MSR_IA32_VMX_EXIT_CTLS:
1167 case MSR_IA32_VMX_ENTRY_CTLS:
1168 /*
1169 * The "non-true" VMX capability MSRs are generated from the
1170 * "true" MSRs, so we do not support restoring them directly.
1171 *
1172 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1173 * should restore the "true" MSRs with the must-be-1 bits
1174 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1175 * DEFAULT SETTINGS".
1176 */
1177 return -EINVAL;
1178 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1179 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1180 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1181 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1182 case MSR_IA32_VMX_PROCBASED_CTLS2:
1183 return vmx_restore_control_msr(vmx, msr_index, data);
1184 case MSR_IA32_VMX_MISC:
1185 return vmx_restore_vmx_misc(vmx, data);
1186 case MSR_IA32_VMX_CR0_FIXED0:
1187 case MSR_IA32_VMX_CR4_FIXED0:
1188 return vmx_restore_fixed0_msr(vmx, msr_index, data);
1189 case MSR_IA32_VMX_CR0_FIXED1:
1190 case MSR_IA32_VMX_CR4_FIXED1:
1191 /*
1192 * These MSRs are generated based on the vCPU's CPUID, so we
1193 * do not support restoring them directly.
1194 */
1195 return -EINVAL;
1196 case MSR_IA32_VMX_EPT_VPID_CAP:
1197 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
1198 case MSR_IA32_VMX_VMCS_ENUM:
1199 vmx->nested.msrs.vmcs_enum = data;
1200 return 0;
1201 default:
1202 /*
1203 * The rest of the VMX capability MSRs do not support restore.
1204 */
1205 return -EINVAL;
1206 }
1207}
1208
1209/* Returns 0 on success, non-0 otherwise. */
1210int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
1211{
1212 switch (msr_index) {
1213 case MSR_IA32_VMX_BASIC:
1214 *pdata = msrs->basic;
1215 break;
1216 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1217 case MSR_IA32_VMX_PINBASED_CTLS:
1218 *pdata = vmx_control_msr(
1219 msrs->pinbased_ctls_low,
1220 msrs->pinbased_ctls_high);
1221 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
1222 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1223 break;
1224 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1225 case MSR_IA32_VMX_PROCBASED_CTLS:
1226 *pdata = vmx_control_msr(
1227 msrs->procbased_ctls_low,
1228 msrs->procbased_ctls_high);
1229 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
1230 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1231 break;
1232 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1233 case MSR_IA32_VMX_EXIT_CTLS:
1234 *pdata = vmx_control_msr(
1235 msrs->exit_ctls_low,
1236 msrs->exit_ctls_high);
1237 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
1238 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
1239 break;
1240 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1241 case MSR_IA32_VMX_ENTRY_CTLS:
1242 *pdata = vmx_control_msr(
1243 msrs->entry_ctls_low,
1244 msrs->entry_ctls_high);
1245 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
1246 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
1247 break;
1248 case MSR_IA32_VMX_MISC:
1249 *pdata = vmx_control_msr(
1250 msrs->misc_low,
1251 msrs->misc_high);
1252 break;
1253 case MSR_IA32_VMX_CR0_FIXED0:
1254 *pdata = msrs->cr0_fixed0;
1255 break;
1256 case MSR_IA32_VMX_CR0_FIXED1:
1257 *pdata = msrs->cr0_fixed1;
1258 break;
1259 case MSR_IA32_VMX_CR4_FIXED0:
1260 *pdata = msrs->cr4_fixed0;
1261 break;
1262 case MSR_IA32_VMX_CR4_FIXED1:
1263 *pdata = msrs->cr4_fixed1;
1264 break;
1265 case MSR_IA32_VMX_VMCS_ENUM:
1266 *pdata = msrs->vmcs_enum;
1267 break;
1268 case MSR_IA32_VMX_PROCBASED_CTLS2:
1269 *pdata = vmx_control_msr(
1270 msrs->secondary_ctls_low,
1271 msrs->secondary_ctls_high);
1272 break;
1273 case MSR_IA32_VMX_EPT_VPID_CAP:
1274 *pdata = msrs->ept_caps |
1275 ((u64)msrs->vpid_caps << 32);
1276 break;
1277 case MSR_IA32_VMX_VMFUNC:
1278 *pdata = msrs->vmfunc_controls;
1279 break;
1280 default:
1281 return 1;
1282 }
1283
1284 return 0;
1285}
1286
1287/*
1288 * Copy the writable VMCS shadow fields back to the VMCS12, in case
1289 * they have been modified by the L1 guest. Note that the "read-only"
1290 * VM-exit information fields are actually writable if the vCPU is
1291 * configured to support "VMWRITE to any supported field in the VMCS."
1292 */
1293static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
1294{
1295 const u16 *fields[] = {
1296 shadow_read_write_fields,
1297 shadow_read_only_fields
1298 };
1299 const int max_fields[] = {
1300 max_shadow_read_write_fields,
1301 max_shadow_read_only_fields
1302 };
1303 int i, q;
1304 unsigned long field;
1305 u64 field_value;
1306 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1307
1308 preempt_disable();
1309
1310 vmcs_load(shadow_vmcs);
1311
1312 for (q = 0; q < ARRAY_SIZE(fields); q++) {
1313 for (i = 0; i < max_fields[q]; i++) {
1314 field = fields[q][i];
1315 field_value = __vmcs_readl(field);
1316 vmcs12_write_any(get_vmcs12(&vmx->vcpu), field, field_value);
1317 }
1318 /*
1319 * Skip the VM-exit information fields if they are read-only.
1320 */
1321 if (!nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
1322 break;
1323 }
1324
1325 vmcs_clear(shadow_vmcs);
1326 vmcs_load(vmx->loaded_vmcs->vmcs);
1327
1328 preempt_enable();
1329}
1330
1331static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
1332{
1333 const u16 *fields[] = {
1334 shadow_read_write_fields,
1335 shadow_read_only_fields
1336 };
1337 const int max_fields[] = {
1338 max_shadow_read_write_fields,
1339 max_shadow_read_only_fields
1340 };
1341 int i, q;
1342 unsigned long field;
1343 u64 field_value = 0;
1344 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1345
1346 vmcs_load(shadow_vmcs);
1347
1348 for (q = 0; q < ARRAY_SIZE(fields); q++) {
1349 for (i = 0; i < max_fields[q]; i++) {
1350 field = fields[q][i];
1351 vmcs12_read_any(get_vmcs12(&vmx->vcpu), field, &field_value);
1352 __vmcs_writel(field, field_value);
1353 }
1354 }
1355
1356 vmcs_clear(shadow_vmcs);
1357 vmcs_load(vmx->loaded_vmcs->vmcs);
1358}
1359
1360static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
1361{
1362 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1363 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1364
1365 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1366 vmcs12->tpr_threshold = evmcs->tpr_threshold;
1367 vmcs12->guest_rip = evmcs->guest_rip;
1368
1369 if (unlikely(!(evmcs->hv_clean_fields &
1370 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
1371 vmcs12->guest_rsp = evmcs->guest_rsp;
1372 vmcs12->guest_rflags = evmcs->guest_rflags;
1373 vmcs12->guest_interruptibility_info =
1374 evmcs->guest_interruptibility_info;
1375 }
1376
1377 if (unlikely(!(evmcs->hv_clean_fields &
1378 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1379 vmcs12->cpu_based_vm_exec_control =
1380 evmcs->cpu_based_vm_exec_control;
1381 }
1382
1383 if (unlikely(!(evmcs->hv_clean_fields &
1384 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1385 vmcs12->exception_bitmap = evmcs->exception_bitmap;
1386 }
1387
1388 if (unlikely(!(evmcs->hv_clean_fields &
1389 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
1390 vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
1391 }
1392
1393 if (unlikely(!(evmcs->hv_clean_fields &
1394 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
1395 vmcs12->vm_entry_intr_info_field =
1396 evmcs->vm_entry_intr_info_field;
1397 vmcs12->vm_entry_exception_error_code =
1398 evmcs->vm_entry_exception_error_code;
1399 vmcs12->vm_entry_instruction_len =
1400 evmcs->vm_entry_instruction_len;
1401 }
1402
1403 if (unlikely(!(evmcs->hv_clean_fields &
1404 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1405 vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
1406 vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
1407 vmcs12->host_cr0 = evmcs->host_cr0;
1408 vmcs12->host_cr3 = evmcs->host_cr3;
1409 vmcs12->host_cr4 = evmcs->host_cr4;
1410 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
1411 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
1412 vmcs12->host_rip = evmcs->host_rip;
1413 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
1414 vmcs12->host_es_selector = evmcs->host_es_selector;
1415 vmcs12->host_cs_selector = evmcs->host_cs_selector;
1416 vmcs12->host_ss_selector = evmcs->host_ss_selector;
1417 vmcs12->host_ds_selector = evmcs->host_ds_selector;
1418 vmcs12->host_fs_selector = evmcs->host_fs_selector;
1419 vmcs12->host_gs_selector = evmcs->host_gs_selector;
1420 vmcs12->host_tr_selector = evmcs->host_tr_selector;
1421 }
1422
1423 if (unlikely(!(evmcs->hv_clean_fields &
1424 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1425 vmcs12->pin_based_vm_exec_control =
1426 evmcs->pin_based_vm_exec_control;
1427 vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
1428 vmcs12->secondary_vm_exec_control =
1429 evmcs->secondary_vm_exec_control;
1430 }
1431
1432 if (unlikely(!(evmcs->hv_clean_fields &
1433 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
1434 vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
1435 vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
1436 }
1437
1438 if (unlikely(!(evmcs->hv_clean_fields &
1439 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
1440 vmcs12->msr_bitmap = evmcs->msr_bitmap;
1441 }
1442
1443 if (unlikely(!(evmcs->hv_clean_fields &
1444 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
1445 vmcs12->guest_es_base = evmcs->guest_es_base;
1446 vmcs12->guest_cs_base = evmcs->guest_cs_base;
1447 vmcs12->guest_ss_base = evmcs->guest_ss_base;
1448 vmcs12->guest_ds_base = evmcs->guest_ds_base;
1449 vmcs12->guest_fs_base = evmcs->guest_fs_base;
1450 vmcs12->guest_gs_base = evmcs->guest_gs_base;
1451 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
1452 vmcs12->guest_tr_base = evmcs->guest_tr_base;
1453 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
1454 vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
1455 vmcs12->guest_es_limit = evmcs->guest_es_limit;
1456 vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
1457 vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
1458 vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
1459 vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
1460 vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
1461 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
1462 vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
1463 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
1464 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
1465 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
1466 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
1467 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
1468 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
1469 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
1470 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
1471 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
1472 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
1473 vmcs12->guest_es_selector = evmcs->guest_es_selector;
1474 vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
1475 vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
1476 vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
1477 vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
1478 vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
1479 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
1480 vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
1481 }
1482
1483 if (unlikely(!(evmcs->hv_clean_fields &
1484 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
1485 vmcs12->tsc_offset = evmcs->tsc_offset;
1486 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
1487 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
1488 }
1489
1490 if (unlikely(!(evmcs->hv_clean_fields &
1491 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
1492 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
1493 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
1494 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
1495 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
1496 vmcs12->guest_cr0 = evmcs->guest_cr0;
1497 vmcs12->guest_cr3 = evmcs->guest_cr3;
1498 vmcs12->guest_cr4 = evmcs->guest_cr4;
1499 vmcs12->guest_dr7 = evmcs->guest_dr7;
1500 }
1501
1502 if (unlikely(!(evmcs->hv_clean_fields &
1503 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
1504 vmcs12->host_fs_base = evmcs->host_fs_base;
1505 vmcs12->host_gs_base = evmcs->host_gs_base;
1506 vmcs12->host_tr_base = evmcs->host_tr_base;
1507 vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
1508 vmcs12->host_idtr_base = evmcs->host_idtr_base;
1509 vmcs12->host_rsp = evmcs->host_rsp;
1510 }
1511
1512 if (unlikely(!(evmcs->hv_clean_fields &
1513 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
1514 vmcs12->ept_pointer = evmcs->ept_pointer;
1515 vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
1516 }
1517
1518 if (unlikely(!(evmcs->hv_clean_fields &
1519 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
1520 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
1521 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
1522 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
1523 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
1524 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
1525 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
1526 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
1527 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
1528 vmcs12->guest_pending_dbg_exceptions =
1529 evmcs->guest_pending_dbg_exceptions;
1530 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
1531 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
1532 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
1533 vmcs12->guest_activity_state = evmcs->guest_activity_state;
1534 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
1535 }
1536
1537 /*
1538 * Not used?
1539 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1540 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1541 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1542 * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0;
1543 * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1;
1544 * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2;
1545 * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3;
1546 * vmcs12->page_fault_error_code_mask =
1547 * evmcs->page_fault_error_code_mask;
1548 * vmcs12->page_fault_error_code_match =
1549 * evmcs->page_fault_error_code_match;
1550 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1551 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1552 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1553 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1554 */
1555
1556 /*
1557 * Read only fields:
1558 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1559 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1560 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1561 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1562 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1563 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1564 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1565 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1566 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1567 * vmcs12->exit_qualification = evmcs->exit_qualification;
1568 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1569 *
1570 * Not present in struct vmcs12:
1571 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1572 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1573 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1574 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1575 */
1576
1577 return 0;
1578}
1579
1580static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
1581{
1582 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1583 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1584
1585 /*
1586 * Should not be changed by KVM:
1587 *
1588 * evmcs->host_es_selector = vmcs12->host_es_selector;
1589 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1590 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1591 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1592 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1593 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1594 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1595 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1596 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1597 * evmcs->host_cr0 = vmcs12->host_cr0;
1598 * evmcs->host_cr3 = vmcs12->host_cr3;
1599 * evmcs->host_cr4 = vmcs12->host_cr4;
1600 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1601 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1602 * evmcs->host_rip = vmcs12->host_rip;
1603 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1604 * evmcs->host_fs_base = vmcs12->host_fs_base;
1605 * evmcs->host_gs_base = vmcs12->host_gs_base;
1606 * evmcs->host_tr_base = vmcs12->host_tr_base;
1607 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1608 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1609 * evmcs->host_rsp = vmcs12->host_rsp;
1610 * sync_vmcs12() doesn't read these:
1611 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1612 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1613 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1614 * evmcs->ept_pointer = vmcs12->ept_pointer;
1615 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1616 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1617 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1618 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1619 * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0;
1620 * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1;
1621 * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2;
1622 * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3;
1623 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1624 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1625 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1626 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1627 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1628 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1629 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1630 * evmcs->page_fault_error_code_mask =
1631 * vmcs12->page_fault_error_code_mask;
1632 * evmcs->page_fault_error_code_match =
1633 * vmcs12->page_fault_error_code_match;
1634 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1635 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1636 * evmcs->tsc_offset = vmcs12->tsc_offset;
1637 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1638 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1639 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1640 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1641 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1642 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1643 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1644 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1645 *
1646 * Not present in struct vmcs12:
1647 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1648 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1649 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1650 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1651 */
1652
1653 evmcs->guest_es_selector = vmcs12->guest_es_selector;
1654 evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
1655 evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
1656 evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
1657 evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
1658 evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
1659 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
1660 evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
1661
1662 evmcs->guest_es_limit = vmcs12->guest_es_limit;
1663 evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
1664 evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
1665 evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
1666 evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
1667 evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
1668 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
1669 evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
1670 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
1671 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
1672
1673 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
1674 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
1675 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
1676 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
1677 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
1678 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
1679 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
1680 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
1681
1682 evmcs->guest_es_base = vmcs12->guest_es_base;
1683 evmcs->guest_cs_base = vmcs12->guest_cs_base;
1684 evmcs->guest_ss_base = vmcs12->guest_ss_base;
1685 evmcs->guest_ds_base = vmcs12->guest_ds_base;
1686 evmcs->guest_fs_base = vmcs12->guest_fs_base;
1687 evmcs->guest_gs_base = vmcs12->guest_gs_base;
1688 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
1689 evmcs->guest_tr_base = vmcs12->guest_tr_base;
1690 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
1691 evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
1692
1693 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
1694 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
1695
1696 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
1697 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
1698 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
1699 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
1700
1701 evmcs->guest_pending_dbg_exceptions =
1702 vmcs12->guest_pending_dbg_exceptions;
1703 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
1704 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
1705
1706 evmcs->guest_activity_state = vmcs12->guest_activity_state;
1707 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
1708
1709 evmcs->guest_cr0 = vmcs12->guest_cr0;
1710 evmcs->guest_cr3 = vmcs12->guest_cr3;
1711 evmcs->guest_cr4 = vmcs12->guest_cr4;
1712 evmcs->guest_dr7 = vmcs12->guest_dr7;
1713
1714 evmcs->guest_physical_address = vmcs12->guest_physical_address;
1715
1716 evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
1717 evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
1718 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
1719 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
1720 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
1721 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
1722 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
1723 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
1724
1725 evmcs->exit_qualification = vmcs12->exit_qualification;
1726
1727 evmcs->guest_linear_address = vmcs12->guest_linear_address;
1728 evmcs->guest_rsp = vmcs12->guest_rsp;
1729 evmcs->guest_rflags = vmcs12->guest_rflags;
1730
1731 evmcs->guest_interruptibility_info =
1732 vmcs12->guest_interruptibility_info;
1733 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
1734 evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
1735 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
1736 evmcs->vm_entry_exception_error_code =
1737 vmcs12->vm_entry_exception_error_code;
1738 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
1739
1740 evmcs->guest_rip = vmcs12->guest_rip;
1741
1742 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
1743
1744 return 0;
1745}
1746
1747/*
1748 * This is an equivalent of the nested hypervisor executing the vmptrld
1749 * instruction.
1750 */
1751static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
1752 bool from_launch)
1753{
1754 struct vcpu_vmx *vmx = to_vmx(vcpu);
1755 struct hv_vp_assist_page assist_page;
1756
1757 if (likely(!vmx->nested.enlightened_vmcs_enabled))
1758 return 1;
1759
1760 if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page)))
1761 return 1;
1762
1763 if (unlikely(!assist_page.enlighten_vmentry))
1764 return 1;
1765
1766 if (unlikely(assist_page.current_nested_vmcs !=
1767 vmx->nested.hv_evmcs_vmptr)) {
1768
1769 if (!vmx->nested.hv_evmcs)
1770 vmx->nested.current_vmptr = -1ull;
1771
1772 nested_release_evmcs(vcpu);
1773
1774 vmx->nested.hv_evmcs_page = kvm_vcpu_gpa_to_page(
1775 vcpu, assist_page.current_nested_vmcs);
1776
1777 if (unlikely(is_error_page(vmx->nested.hv_evmcs_page)))
1778 return 0;
1779
1780 vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page);
1781
1782 /*
1783 * Currently, KVM only supports eVMCS version 1
1784 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
1785 * value to first u32 field of eVMCS which should specify eVMCS
1786 * VersionNumber.
1787 *
1788 * Guest should be aware of supported eVMCS versions by host by
1789 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
1790 * expected to set this CPUID leaf according to the value
1791 * returned in vmcs_version from nested_enable_evmcs().
1792 *
1793 * However, it turns out that Microsoft Hyper-V fails to comply
1794 * to their own invented interface: When Hyper-V use eVMCS, it
1795 * just sets first u32 field of eVMCS to revision_id specified
1796 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
1797 * which is one of the supported versions specified in
1798 * CPUID.0x4000000A.EAX[0:15].
1799 *
1800 * To overcome Hyper-V bug, we accept here either a supported
1801 * eVMCS version or VMCS12 revision_id as valid values for first
1802 * u32 field of eVMCS.
1803 */
1804 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
1805 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
1806 nested_release_evmcs(vcpu);
1807 return 0;
1808 }
1809
1810 vmx->nested.dirty_vmcs12 = true;
1811 /*
1812 * As we keep L2 state for one guest only 'hv_clean_fields' mask
1813 * can't be used when we switch between them. Reset it here for
1814 * simplicity.
1815 */
1816 vmx->nested.hv_evmcs->hv_clean_fields &=
1817 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
1818 vmx->nested.hv_evmcs_vmptr = assist_page.current_nested_vmcs;
1819
1820 /*
1821 * Unlike normal vmcs12, enlightened vmcs12 is not fully
1822 * reloaded from guest's memory (read only fields, fields not
1823 * present in struct hv_enlightened_vmcs, ...). Make sure there
1824 * are no leftovers.
1825 */
1826 if (from_launch) {
1827 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1828 memset(vmcs12, 0, sizeof(*vmcs12));
1829 vmcs12->hdr.revision_id = VMCS12_REVISION;
1830 }
1831
1832 }
1833 return 1;
1834}
1835
1836void nested_sync_from_vmcs12(struct kvm_vcpu *vcpu)
1837{
1838 struct vcpu_vmx *vmx = to_vmx(vcpu);
1839
1840 /*
1841 * hv_evmcs may end up being not mapped after migration (when
1842 * L2 was running), map it here to make sure vmcs12 changes are
1843 * properly reflected.
1844 */
1845 if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs)
1846 nested_vmx_handle_enlightened_vmptrld(vcpu, false);
1847
1848 if (vmx->nested.hv_evmcs) {
1849 copy_vmcs12_to_enlightened(vmx);
1850 /* All fields are clean */
1851 vmx->nested.hv_evmcs->hv_clean_fields |=
1852 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
1853 } else {
1854 copy_vmcs12_to_shadow(vmx);
1855 }
1856
1857 vmx->nested.need_vmcs12_sync = false;
1858}
1859
1860static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
1861{
1862 struct vcpu_vmx *vmx =
1863 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
1864
1865 vmx->nested.preemption_timer_expired = true;
1866 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
1867 kvm_vcpu_kick(&vmx->vcpu);
1868
1869 return HRTIMER_NORESTART;
1870}
1871
1872static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
1873{
1874 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
1875 struct vcpu_vmx *vmx = to_vmx(vcpu);
1876
1877 /*
1878 * A timer value of zero is architecturally guaranteed to cause
1879 * a VMExit prior to executing any instructions in the guest.
1880 */
1881 if (preemption_timeout == 0) {
1882 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
1883 return;
1884 }
1885
1886 if (vcpu->arch.virtual_tsc_khz == 0)
1887 return;
1888
1889 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
1890 preemption_timeout *= 1000000;
1891 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
1892 hrtimer_start(&vmx->nested.preemption_timer,
1893 ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
1894}
1895
1896static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
1897{
1898 if (vmx->nested.nested_run_pending &&
1899 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
1900 return vmcs12->guest_ia32_efer;
1901 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
1902 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
1903 else
1904 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
1905}
1906
1907static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
1908{
1909 /*
1910 * If vmcs02 hasn't been initialized, set the constant vmcs02 state
1911 * according to L0's settings (vmcs12 is irrelevant here). Host
1912 * fields that come from L0 and are not constant, e.g. HOST_CR3,
1913 * will be set as needed prior to VMLAUNCH/VMRESUME.
1914 */
1915 if (vmx->nested.vmcs02_initialized)
1916 return;
1917 vmx->nested.vmcs02_initialized = true;
1918
1919 /*
1920 * We don't care what the EPTP value is we just need to guarantee
1921 * it's valid so we don't get a false positive when doing early
1922 * consistency checks.
1923 */
1924 if (enable_ept && nested_early_check)
1925 vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0));
1926
1927 /* All VMFUNCs are currently emulated through L0 vmexits. */
1928 if (cpu_has_vmx_vmfunc())
1929 vmcs_write64(VM_FUNCTION_CONTROL, 0);
1930
1931 if (cpu_has_vmx_posted_intr())
1932 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
1933
1934 if (cpu_has_vmx_msr_bitmap())
1935 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
1936
1937 if (enable_pml)
1938 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
1939
1940 /*
1941 * Set the MSR load/store lists to match L0's settings. Only the
1942 * addresses are constant (for vmcs02), the counts can change based
1943 * on L2's behavior, e.g. switching to/from long mode.
1944 */
1945 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1946 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
1947 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
1948
1949 vmx_set_constant_host_state(vmx);
1950}
1951
1952static void prepare_vmcs02_early_full(struct vcpu_vmx *vmx,
1953 struct vmcs12 *vmcs12)
1954{
1955 prepare_vmcs02_constant_state(vmx);
1956
1957 vmcs_write64(VMCS_LINK_POINTER, -1ull);
1958
1959 if (enable_vpid) {
1960 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
1961 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
1962 else
1963 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
1964 }
1965}
1966
1967static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
1968{
1969 u32 exec_control, vmcs12_exec_ctrl;
1970 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
1971
1972 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
1973 prepare_vmcs02_early_full(vmx, vmcs12);
1974
1975 /*
1976 * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
1977 * entry, but only if the current (host) sp changed from the value
1978 * we wrote last (vmx->host_rsp). This cache is no longer relevant
1979 * if we switch vmcs, and rather than hold a separate cache per vmcs,
1980 * here we just force the write to happen on entry. host_rsp will
1981 * also be written unconditionally by nested_vmx_check_vmentry_hw()
1982 * if we are doing early consistency checks via hardware.
1983 */
1984 vmx->host_rsp = 0;
1985
1986 /*
1987 * PIN CONTROLS
1988 */
1989 exec_control = vmcs12->pin_based_vm_exec_control;
1990
1991 /* Preemption timer setting is computed directly in vmx_vcpu_run. */
1992 exec_control |= vmcs_config.pin_based_exec_ctrl;
1993 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
1994 vmx->loaded_vmcs->hv_timer_armed = false;
1995
1996 /* Posted interrupts setting is only taken from vmcs12. */
1997 if (nested_cpu_has_posted_intr(vmcs12)) {
1998 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
1999 vmx->nested.pi_pending = false;
2000 } else {
2001 exec_control &= ~PIN_BASED_POSTED_INTR;
2002 }
2003 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
2004
2005 /*
2006 * EXEC CONTROLS
2007 */
2008 exec_control = vmx_exec_control(vmx); /* L0's desires */
2009 exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
2010 exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
2011 exec_control &= ~CPU_BASED_TPR_SHADOW;
2012 exec_control |= vmcs12->cpu_based_vm_exec_control;
2013
2014 /*
2015 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if
2016 * nested_get_vmcs12_pages can't fix it up, the illegal value
2017 * will result in a VM entry failure.
2018 */
2019 if (exec_control & CPU_BASED_TPR_SHADOW) {
2020 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
2021 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
2022 } else {
2023#ifdef CONFIG_X86_64
2024 exec_control |= CPU_BASED_CR8_LOAD_EXITING |
2025 CPU_BASED_CR8_STORE_EXITING;
2026#endif
2027 }
2028
2029 /*
2030 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2031 * for I/O port accesses.
2032 */
2033 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
2034 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
2035 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
2036
2037 /*
2038 * SECONDARY EXEC CONTROLS
2039 */
2040 if (cpu_has_secondary_exec_ctrls()) {
2041 exec_control = vmx->secondary_exec_control;
2042
2043 /* Take the following fields only from vmcs12 */
2044 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2045 SECONDARY_EXEC_ENABLE_INVPCID |
2046 SECONDARY_EXEC_RDTSCP |
2047 SECONDARY_EXEC_XSAVES |
2048 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2049 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2050 SECONDARY_EXEC_ENABLE_VMFUNC);
2051 if (nested_cpu_has(vmcs12,
2052 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
2053 vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
2054 ~SECONDARY_EXEC_ENABLE_PML;
2055 exec_control |= vmcs12_exec_ctrl;
2056 }
2057
2058 /* VMCS shadowing for L2 is emulated for now */
2059 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
2060
2061 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
2062 vmcs_write16(GUEST_INTR_STATUS,
2063 vmcs12->guest_intr_status);
2064
2065 /*
2066 * Write an illegal value to APIC_ACCESS_ADDR. Later,
2067 * nested_get_vmcs12_pages will either fix it up or
2068 * remove the VM execution control.
2069 */
2070 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
2071 vmcs_write64(APIC_ACCESS_ADDR, -1ull);
2072
2073 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
2074 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
2075
2076 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
2077 }
2078
2079 /*
2080 * ENTRY CONTROLS
2081 *
2082 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2083 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2084 * on the related bits (if supported by the CPU) in the hope that
2085 * we can avoid VMWrites during vmx_set_efer().
2086 */
2087 exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) &
2088 ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER;
2089 if (cpu_has_load_ia32_efer()) {
2090 if (guest_efer & EFER_LMA)
2091 exec_control |= VM_ENTRY_IA32E_MODE;
2092 if (guest_efer != host_efer)
2093 exec_control |= VM_ENTRY_LOAD_IA32_EFER;
2094 }
2095 vm_entry_controls_init(vmx, exec_control);
2096
2097 /*
2098 * EXIT CONTROLS
2099 *
2100 * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2101 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2102 * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2103 */
2104 exec_control = vmx_vmexit_ctrl();
2105 if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
2106 exec_control |= VM_EXIT_LOAD_IA32_EFER;
2107 vm_exit_controls_init(vmx, exec_control);
2108
2109 /*
2110 * Conceptually we want to copy the PML address and index from
2111 * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
2112 * since we always flush the log on each vmexit and never change
2113 * the PML address (once set), this happens to be equivalent to
2114 * simply resetting the index in vmcs02.
2115 */
2116 if (enable_pml)
2117 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
2118
2119 /*
2120 * Interrupt/Exception Fields
2121 */
2122 if (vmx->nested.nested_run_pending) {
2123 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2124 vmcs12->vm_entry_intr_info_field);
2125 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2126 vmcs12->vm_entry_exception_error_code);
2127 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2128 vmcs12->vm_entry_instruction_len);
2129 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
2130 vmcs12->guest_interruptibility_info);
2131 vmx->loaded_vmcs->nmi_known_unmasked =
2132 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
2133 } else {
2134 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
2135 }
2136}
2137
2138static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2139{
2140 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2141
2142 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2143 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2144 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
2145 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
2146 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
2147 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
2148 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
2149 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
2150 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
2151 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
2152 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
2153 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
2154 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
2155 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
2156 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
2157 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
2158 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
2159 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
2160 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
2161 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
2162 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
2163 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
2164 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
2165 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
2166 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
2167 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
2168 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
2169 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
2170 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
2171 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
2172 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
2173 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
2174 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
2175 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
2176 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
2177 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
2178 }
2179
2180 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2181 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
2182 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
2183 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
2184 vmcs12->guest_pending_dbg_exceptions);
2185 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
2186 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
2187
2188 /*
2189 * L1 may access the L2's PDPTR, so save them to construct
2190 * vmcs12
2191 */
2192 if (enable_ept) {
2193 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2194 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2195 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2196 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2197 }
2198 }
2199
2200 if (nested_cpu_has_xsaves(vmcs12))
2201 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
2202
2203 /*
2204 * Whether page-faults are trapped is determined by a combination of
2205 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
2206 * If enable_ept, L0 doesn't care about page faults and we should
2207 * set all of these to L1's desires. However, if !enable_ept, L0 does
2208 * care about (at least some) page faults, and because it is not easy
2209 * (if at all possible?) to merge L0 and L1's desires, we simply ask
2210 * to exit on each and every L2 page fault. This is done by setting
2211 * MASK=MATCH=0 and (see below) EB.PF=1.
2212 * Note that below we don't need special code to set EB.PF beyond the
2213 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2214 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2215 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2216 */
2217 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
2218 enable_ept ? vmcs12->page_fault_error_code_mask : 0);
2219 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
2220 enable_ept ? vmcs12->page_fault_error_code_match : 0);
2221
2222 if (cpu_has_vmx_apicv()) {
2223 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
2224 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
2225 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
2226 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
2227 }
2228
2229 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2230 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2231
2232 set_cr4_guest_host_mask(vmx);
2233
2234 if (kvm_mpx_supported()) {
2235 if (vmx->nested.nested_run_pending &&
2236 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2237 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
2238 else
2239 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
2240 }
2241}
2242
2243/*
2244 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2245 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2246 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2247 * guest in a way that will both be appropriate to L1's requests, and our
2248 * needs. In addition to modifying the active vmcs (which is vmcs02), this
2249 * function also has additional necessary side-effects, like setting various
2250 * vcpu->arch fields.
2251 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2252 * is assigned to entry_failure_code on failure.
2253 */
2254static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
2255 u32 *entry_failure_code)
2256{
2257 struct vcpu_vmx *vmx = to_vmx(vcpu);
2258 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2259
2260 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) {
2261 prepare_vmcs02_full(vmx, vmcs12);
2262 vmx->nested.dirty_vmcs12 = false;
2263 }
2264
2265 /*
2266 * First, the fields that are shadowed. This must be kept in sync
2267 * with vmcs_shadow_fields.h.
2268 */
2269 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2270 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2271 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
2272 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
2273 }
2274
2275 if (vmx->nested.nested_run_pending &&
2276 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
2277 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
2278 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
2279 } else {
2280 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
2281 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
2282 }
2283 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
2284
2285 vmx->nested.preemption_timer_expired = false;
2286 if (nested_cpu_has_preemption_timer(vmcs12))
2287 vmx_start_preemption_timer(vcpu);
2288
2289 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2290 * bitwise-or of what L1 wants to trap for L2, and what we want to
2291 * trap. Note that CR0.TS also needs updating - we do this later.
2292 */
2293 update_exception_bitmap(vcpu);
2294 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
2295 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2296
2297 if (vmx->nested.nested_run_pending &&
2298 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
2299 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
2300 vcpu->arch.pat = vmcs12->guest_ia32_pat;
2301 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2302 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
2303 }
2304
2305 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
2306
2307 if (kvm_has_tsc_control)
2308 decache_tsc_multiplier(vmx);
2309
2310 if (enable_vpid) {
2311 /*
2312 * There is no direct mapping between vpid02 and vpid12, the
2313 * vpid02 is per-vCPU for L0 and reused while the value of
2314 * vpid12 is changed w/ one invvpid during nested vmentry.
2315 * The vpid12 is allocated by L1 for L2, so it will not
2316 * influence global bitmap(for vpid01 and vpid02 allocation)
2317 * even if spawn a lot of nested vCPUs.
2318 */
2319 if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) {
2320 if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
2321 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
2322 __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false);
2323 }
2324 } else {
2325 /*
2326 * If L1 use EPT, then L0 needs to execute INVEPT on
2327 * EPTP02 instead of EPTP01. Therefore, delay TLB
2328 * flush until vmcs02->eptp is fully updated by
2329 * KVM_REQ_LOAD_CR3. Note that this assumes
2330 * KVM_REQ_TLB_FLUSH is evaluated after
2331 * KVM_REQ_LOAD_CR3 in vcpu_enter_guest().
2332 */
2333 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2334 }
2335 }
2336
2337 if (nested_cpu_has_ept(vmcs12))
2338 nested_ept_init_mmu_context(vcpu);
2339 else if (nested_cpu_has2(vmcs12,
2340 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2341 vmx_flush_tlb(vcpu, true);
2342
2343 /*
2344 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2345 * bits which we consider mandatory enabled.
2346 * The CR0_READ_SHADOW is what L2 should have expected to read given
2347 * the specifications by L1; It's not enough to take
2348 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
2349 * have more bits than L1 expected.
2350 */
2351 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
2352 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2353
2354 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
2355 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
2356
2357 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2358 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2359 vmx_set_efer(vcpu, vcpu->arch.efer);
2360
2361 /*
2362 * Guest state is invalid and unrestricted guest is disabled,
2363 * which means L1 attempted VMEntry to L2 with invalid state.
2364 * Fail the VMEntry.
2365 */
2366 if (vmx->emulation_required) {
2367 *entry_failure_code = ENTRY_FAIL_DEFAULT;
2368 return 1;
2369 }
2370
2371 /* Shadow page tables on either EPT or shadow page tables. */
2372 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
2373 entry_failure_code))
2374 return 1;
2375
2376 if (!enable_ept)
2377 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
2378
2379 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
2380 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
2381 return 0;
2382}
2383
2384static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
2385{
2386 if (!nested_cpu_has_nmi_exiting(vmcs12) &&
2387 nested_cpu_has_virtual_nmis(vmcs12))
2388 return -EINVAL;
2389
2390 if (!nested_cpu_has_virtual_nmis(vmcs12) &&
2391 nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING))
2392 return -EINVAL;
2393
2394 return 0;
2395}
2396
2397static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
2398{
2399 struct vcpu_vmx *vmx = to_vmx(vcpu);
2400 int maxphyaddr = cpuid_maxphyaddr(vcpu);
2401
2402 /* Check for memory type validity */
2403 switch (address & VMX_EPTP_MT_MASK) {
2404 case VMX_EPTP_MT_UC:
2405 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))
2406 return false;
2407 break;
2408 case VMX_EPTP_MT_WB:
2409 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))
2410 return false;
2411 break;
2412 default:
2413 return false;
2414 }
2415
2416 /* only 4 levels page-walk length are valid */
2417 if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
2418 return false;
2419
2420 /* Reserved bits should not be set */
2421 if (address >> maxphyaddr || ((address >> 7) & 0x1f))
2422 return false;
2423
2424 /* AD, if set, should be supported */
2425 if (address & VMX_EPTP_AD_ENABLE_BIT) {
2426 if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))
2427 return false;
2428 }
2429
2430 return true;
2431}
2432
Krish Sadhukhan461b4ba2018-12-12 13:30:07 -05002433/*
2434 * Checks related to VM-Execution Control Fields
2435 */
2436static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
2437 struct vmcs12 *vmcs12)
2438{
2439 struct vcpu_vmx *vmx = to_vmx(vcpu);
2440
2441 if (!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
2442 vmx->nested.msrs.pinbased_ctls_low,
2443 vmx->nested.msrs.pinbased_ctls_high) ||
2444 !vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
2445 vmx->nested.msrs.procbased_ctls_low,
2446 vmx->nested.msrs.procbased_ctls_high))
2447 return -EINVAL;
2448
2449 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
2450 !vmx_control_verify(vmcs12->secondary_vm_exec_control,
2451 vmx->nested.msrs.secondary_ctls_low,
2452 vmx->nested.msrs.secondary_ctls_high))
2453 return -EINVAL;
2454
2455 if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu) ||
2456 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
2457 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
2458 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
2459 nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
2460 nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
2461 nested_vmx_check_nmi_controls(vmcs12) ||
2462 nested_vmx_check_pml_controls(vcpu, vmcs12) ||
2463 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
2464 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
2465 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
2466 (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
2467 return -EINVAL;
2468
2469 if (nested_cpu_has_ept(vmcs12) &&
2470 !valid_ept_address(vcpu, vmcs12->ept_pointer))
2471 return -EINVAL;
2472
2473 if (nested_cpu_has_vmfunc(vmcs12)) {
2474 if (vmcs12->vm_function_control &
2475 ~vmx->nested.msrs.vmfunc_controls)
2476 return -EINVAL;
2477
2478 if (nested_cpu_has_eptp_switching(vmcs12)) {
2479 if (!nested_cpu_has_ept(vmcs12) ||
2480 !page_address_valid(vcpu, vmcs12->eptp_list_address))
2481 return -EINVAL;
2482 }
2483 }
2484
2485 return 0;
2486}
2487
Krish Sadhukhan16322a3b2018-12-12 13:30:06 -05002488static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu,
Krish Sadhukhan461b4ba2018-12-12 13:30:07 -05002489 struct vmcs12 *vmcs12)
Sean Christopherson55d23752018-12-03 13:53:18 -08002490{
2491 struct vcpu_vmx *vmx = to_vmx(vcpu);
2492 bool ia32e;
2493
2494 if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
2495 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
2496 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
2497
Krish Sadhukhan461b4ba2018-12-12 13:30:07 -05002498 if (nested_check_vm_execution_controls(vcpu, vmcs12))
Sean Christopherson55d23752018-12-03 13:53:18 -08002499 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
2500
2501 if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12))
2502 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
Krish Sadhukhan461b4ba2018-12-12 13:30:07 -05002503
2504 if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
2505 !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
2506 !nested_cr3_valid(vcpu, vmcs12->host_cr3))
2507 return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
Sean Christopherson55d23752018-12-03 13:53:18 -08002508
Krish Sadhukhan461b4ba2018-12-12 13:30:07 -05002509 if (!vmx_control_verify(vmcs12->vm_exit_controls,
Sean Christopherson55d23752018-12-03 13:53:18 -08002510 vmx->nested.msrs.exit_ctls_low,
2511 vmx->nested.msrs.exit_ctls_high) ||
2512 !vmx_control_verify(vmcs12->vm_entry_controls,
2513 vmx->nested.msrs.entry_ctls_low,
2514 vmx->nested.msrs.entry_ctls_high))
2515 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
2516
Sean Christopherson55d23752018-12-03 13:53:18 -08002517 /*
2518 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2519 * IA32_EFER MSR must be 0 in the field for that register. In addition,
2520 * the values of the LMA and LME bits in the field must each be that of
2521 * the host address-space size VM-exit control.
2522 */
2523 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
2524 ia32e = (vmcs12->vm_exit_controls &
2525 VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
2526 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
2527 ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
2528 ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
2529 return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
2530 }
2531
2532 /*
2533 * From the Intel SDM, volume 3:
2534 * Fields relevant to VM-entry event injection must be set properly.
2535 * These fields are the VM-entry interruption-information field, the
2536 * VM-entry exception error code, and the VM-entry instruction length.
2537 */
2538 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
2539 u32 intr_info = vmcs12->vm_entry_intr_info_field;
2540 u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
2541 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
2542 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
2543 bool should_have_error_code;
2544 bool urg = nested_cpu_has2(vmcs12,
2545 SECONDARY_EXEC_UNRESTRICTED_GUEST);
2546 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
2547
2548 /* VM-entry interruption-info field: interruption type */
2549 if (intr_type == INTR_TYPE_RESERVED ||
2550 (intr_type == INTR_TYPE_OTHER_EVENT &&
2551 !nested_cpu_supports_monitor_trap_flag(vcpu)))
2552 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
2553
2554 /* VM-entry interruption-info field: vector */
2555 if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
2556 (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
2557 (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
2558 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
2559
2560 /* VM-entry interruption-info field: deliver error code */
2561 should_have_error_code =
2562 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
2563 x86_exception_has_error_code(vector);
2564 if (has_error_code != should_have_error_code)
2565 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
2566
2567 /* VM-entry exception error code */
2568 if (has_error_code &&
2569 vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
2570 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
2571
2572 /* VM-entry interruption-info field: reserved bits */
2573 if (intr_info & INTR_INFO_RESVD_BITS_MASK)
2574 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
2575
2576 /* VM-entry instruction length */
2577 switch (intr_type) {
2578 case INTR_TYPE_SOFT_EXCEPTION:
2579 case INTR_TYPE_SOFT_INTR:
2580 case INTR_TYPE_PRIV_SW_EXCEPTION:
2581 if ((vmcs12->vm_entry_instruction_len > 15) ||
2582 (vmcs12->vm_entry_instruction_len == 0 &&
2583 !nested_cpu_has_zero_length_injection(vcpu)))
2584 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
2585 }
2586 }
2587
Sean Christopherson55d23752018-12-03 13:53:18 -08002588 return 0;
2589}
2590
2591static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
2592 struct vmcs12 *vmcs12)
2593{
2594 int r;
2595 struct page *page;
2596 struct vmcs12 *shadow;
2597
2598 if (vmcs12->vmcs_link_pointer == -1ull)
2599 return 0;
2600
2601 if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))
2602 return -EINVAL;
2603
2604 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
2605 if (is_error_page(page))
2606 return -EINVAL;
2607
2608 r = 0;
2609 shadow = kmap(page);
2610 if (shadow->hdr.revision_id != VMCS12_REVISION ||
2611 shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))
2612 r = -EINVAL;
2613 kunmap(page);
2614 kvm_release_page_clean(page);
2615 return r;
2616}
2617
Krish Sadhukhan16322a3b2018-12-12 13:30:06 -05002618static int nested_vmx_check_vmentry_postreqs(struct kvm_vcpu *vcpu,
Krish Sadhukhan461b4ba2018-12-12 13:30:07 -05002619 struct vmcs12 *vmcs12,
2620 u32 *exit_qual)
Sean Christopherson55d23752018-12-03 13:53:18 -08002621{
2622 bool ia32e;
2623
2624 *exit_qual = ENTRY_FAIL_DEFAULT;
2625
2626 if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
2627 !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))
2628 return 1;
2629
2630 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
2631 *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR;
2632 return 1;
2633 }
2634
2635 /*
2636 * If the load IA32_EFER VM-entry control is 1, the following checks
2637 * are performed on the field for the IA32_EFER MSR:
2638 * - Bits reserved in the IA32_EFER MSR must be 0.
2639 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
2640 * the IA-32e mode guest VM-exit control. It must also be identical
2641 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
2642 * CR0.PG) is 1.
2643 */
2644 if (to_vmx(vcpu)->nested.nested_run_pending &&
2645 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
2646 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
2647 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
2648 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
2649 ((vmcs12->guest_cr0 & X86_CR0_PG) &&
2650 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))
2651 return 1;
2652 }
2653
2654 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
2655 (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
2656 (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
2657 return 1;
2658
2659 return 0;
2660}
2661
2662static int __noclone nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
2663{
2664 struct vcpu_vmx *vmx = to_vmx(vcpu);
2665 unsigned long cr3, cr4;
2666
2667 if (!nested_early_check)
2668 return 0;
2669
2670 if (vmx->msr_autoload.host.nr)
2671 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
2672 if (vmx->msr_autoload.guest.nr)
2673 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
2674
2675 preempt_disable();
2676
2677 vmx_prepare_switch_to_guest(vcpu);
2678
2679 /*
2680 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
2681 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
2682 * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e.
2683 * there is no need to preserve other bits or save/restore the field.
2684 */
2685 vmcs_writel(GUEST_RFLAGS, 0);
2686
2687 vmcs_writel(HOST_RIP, vmx_early_consistency_check_return);
2688
2689 cr3 = __get_current_cr3_fast();
2690 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
2691 vmcs_writel(HOST_CR3, cr3);
2692 vmx->loaded_vmcs->host_state.cr3 = cr3;
2693 }
2694
2695 cr4 = cr4_read_shadow();
2696 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
2697 vmcs_writel(HOST_CR4, cr4);
2698 vmx->loaded_vmcs->host_state.cr4 = cr4;
2699 }
2700
2701 vmx->__launched = vmx->loaded_vmcs->launched;
2702
2703 asm(
2704 /* Set HOST_RSP */
2705 __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
2706 "mov %%" _ASM_SP ", %c[host_rsp](%0)\n\t"
2707
2708 /* Check if vmlaunch or vmresume is needed */
2709 "cmpl $0, %c[launched](%0)\n\t"
2710 "jne 1f\n\t"
2711 __ex("vmlaunch") "\n\t"
2712 "jmp 2f\n\t"
2713 "1: " __ex("vmresume") "\n\t"
2714 "2: "
2715 /* Set vmx->fail accordingly */
2716 "setbe %c[fail](%0)\n\t"
2717
2718 ".pushsection .rodata\n\t"
2719 ".global vmx_early_consistency_check_return\n\t"
2720 "vmx_early_consistency_check_return: " _ASM_PTR " 2b\n\t"
2721 ".popsection"
2722 :
2723 : "c"(vmx), "d"((unsigned long)HOST_RSP),
2724 [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
2725 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
2726 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp))
2727 : "rax", "cc", "memory"
2728 );
2729
2730 vmcs_writel(HOST_RIP, vmx_return);
2731
2732 preempt_enable();
2733
2734 if (vmx->msr_autoload.host.nr)
2735 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2736 if (vmx->msr_autoload.guest.nr)
2737 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2738
2739 if (vmx->fail) {
2740 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
2741 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
2742 vmx->fail = 0;
2743 return 1;
2744 }
2745
2746 /*
2747 * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
2748 */
2749 local_irq_enable();
2750 if (hw_breakpoint_active())
2751 set_debugreg(__this_cpu_read(cpu_dr7), 7);
2752
2753 /*
2754 * A non-failing VMEntry means we somehow entered guest mode with
2755 * an illegal RIP, and that's just the tip of the iceberg. There
2756 * is no telling what memory has been modified or what state has
2757 * been exposed to unknown code. Hitting this all but guarantees
2758 * a (very critical) hardware issue.
2759 */
2760 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
2761 VMX_EXIT_REASONS_FAILED_VMENTRY));
2762
2763 return 0;
2764}
2765STACK_FRAME_NON_STANDARD(nested_vmx_check_vmentry_hw);
2766
2767
2768static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
2769 struct vmcs12 *vmcs12);
2770
2771static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
2772{
2773 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2774 struct vcpu_vmx *vmx = to_vmx(vcpu);
2775 struct page *page;
2776 u64 hpa;
2777
2778 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
2779 /*
2780 * Translate L1 physical address to host physical
2781 * address for vmcs02. Keep the page pinned, so this
2782 * physical address remains valid. We keep a reference
2783 * to it so we can release it later.
2784 */
2785 if (vmx->nested.apic_access_page) { /* shouldn't happen */
2786 kvm_release_page_dirty(vmx->nested.apic_access_page);
2787 vmx->nested.apic_access_page = NULL;
2788 }
2789 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
2790 /*
2791 * If translation failed, no matter: This feature asks
2792 * to exit when accessing the given address, and if it
2793 * can never be accessed, this feature won't do
2794 * anything anyway.
2795 */
2796 if (!is_error_page(page)) {
2797 vmx->nested.apic_access_page = page;
2798 hpa = page_to_phys(vmx->nested.apic_access_page);
2799 vmcs_write64(APIC_ACCESS_ADDR, hpa);
2800 } else {
2801 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
2802 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
2803 }
2804 }
2805
2806 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
2807 if (vmx->nested.virtual_apic_page) { /* shouldn't happen */
2808 kvm_release_page_dirty(vmx->nested.virtual_apic_page);
2809 vmx->nested.virtual_apic_page = NULL;
2810 }
2811 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr);
2812
2813 /*
2814 * If translation failed, VM entry will fail because
2815 * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
2816 * Failing the vm entry is _not_ what the processor
2817 * does but it's basically the only possibility we
2818 * have. We could still enter the guest if CR8 load
2819 * exits are enabled, CR8 store exits are enabled, and
2820 * virtualize APIC access is disabled; in this case
2821 * the processor would never use the TPR shadow and we
2822 * could simply clear the bit from the execution
2823 * control. But such a configuration is useless, so
2824 * let's keep the code simple.
2825 */
2826 if (!is_error_page(page)) {
2827 vmx->nested.virtual_apic_page = page;
2828 hpa = page_to_phys(vmx->nested.virtual_apic_page);
2829 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
2830 }
2831 }
2832
2833 if (nested_cpu_has_posted_intr(vmcs12)) {
2834 if (vmx->nested.pi_desc_page) { /* shouldn't happen */
2835 kunmap(vmx->nested.pi_desc_page);
2836 kvm_release_page_dirty(vmx->nested.pi_desc_page);
2837 vmx->nested.pi_desc_page = NULL;
2838 }
2839 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
2840 if (is_error_page(page))
2841 return;
2842 vmx->nested.pi_desc_page = page;
2843 vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page);
2844 vmx->nested.pi_desc =
2845 (struct pi_desc *)((void *)vmx->nested.pi_desc +
2846 (unsigned long)(vmcs12->posted_intr_desc_addr &
2847 (PAGE_SIZE - 1)));
2848 vmcs_write64(POSTED_INTR_DESC_ADDR,
2849 page_to_phys(vmx->nested.pi_desc_page) +
2850 (unsigned long)(vmcs12->posted_intr_desc_addr &
2851 (PAGE_SIZE - 1)));
2852 }
2853 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
2854 vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
2855 CPU_BASED_USE_MSR_BITMAPS);
2856 else
2857 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
2858 CPU_BASED_USE_MSR_BITMAPS);
2859}
2860
2861/*
2862 * Intel's VMX Instruction Reference specifies a common set of prerequisites
2863 * for running VMX instructions (except VMXON, whose prerequisites are
2864 * slightly different). It also specifies what exception to inject otherwise.
2865 * Note that many of these exceptions have priority over VM exits, so they
2866 * don't have to be checked again here.
2867 */
2868static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
2869{
2870 if (!to_vmx(vcpu)->nested.vmxon) {
2871 kvm_queue_exception(vcpu, UD_VECTOR);
2872 return 0;
2873 }
2874
2875 if (vmx_get_cpl(vcpu)) {
2876 kvm_inject_gp(vcpu, 0);
2877 return 0;
2878 }
2879
2880 return 1;
2881}
2882
2883static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
2884{
2885 u8 rvi = vmx_get_rvi();
2886 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
2887
2888 return ((rvi & 0xf0) > (vppr & 0xf0));
2889}
2890
2891static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
2892 struct vmcs12 *vmcs12);
2893
2894/*
2895 * If from_vmentry is false, this is being called from state restore (either RSM
2896 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
2897+ *
2898+ * Returns:
2899+ * 0 - success, i.e. proceed with actual VMEnter
2900+ * 1 - consistency check VMExit
2901+ * -1 - consistency check VMFail
2902 */
2903int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
2904{
2905 struct vcpu_vmx *vmx = to_vmx(vcpu);
2906 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2907 bool evaluate_pending_interrupts;
2908 u32 exit_reason = EXIT_REASON_INVALID_STATE;
2909 u32 exit_qual;
2910
2911 evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
2912 (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
2913 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
2914 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
2915
2916 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
2917 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
2918 if (kvm_mpx_supported() &&
2919 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2920 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
2921
2922 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
2923
2924 prepare_vmcs02_early(vmx, vmcs12);
2925
2926 if (from_vmentry) {
2927 nested_get_vmcs12_pages(vcpu);
2928
2929 if (nested_vmx_check_vmentry_hw(vcpu)) {
2930 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
2931 return -1;
2932 }
2933
Krish Sadhukhan16322a3b2018-12-12 13:30:06 -05002934 if (nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
Sean Christopherson55d23752018-12-03 13:53:18 -08002935 goto vmentry_fail_vmexit;
2936 }
2937
2938 enter_guest_mode(vcpu);
2939 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
2940 vcpu->arch.tsc_offset += vmcs12->tsc_offset;
2941
2942 if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
2943 goto vmentry_fail_vmexit_guest_mode;
2944
2945 if (from_vmentry) {
2946 exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
2947 exit_qual = nested_vmx_load_msr(vcpu,
2948 vmcs12->vm_entry_msr_load_addr,
2949 vmcs12->vm_entry_msr_load_count);
2950 if (exit_qual)
2951 goto vmentry_fail_vmexit_guest_mode;
2952 } else {
2953 /*
2954 * The MMU is not initialized to point at the right entities yet and
2955 * "get pages" would need to read data from the guest (i.e. we will
2956 * need to perform gpa to hpa translation). Request a call
2957 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
2958 * have already been set at vmentry time and should not be reset.
2959 */
2960 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
2961 }
2962
2963 /*
2964 * If L1 had a pending IRQ/NMI until it executed
2965 * VMLAUNCH/VMRESUME which wasn't delivered because it was
2966 * disallowed (e.g. interrupts disabled), L0 needs to
2967 * evaluate if this pending event should cause an exit from L2
2968 * to L1 or delivered directly to L2 (e.g. In case L1 don't
2969 * intercept EXTERNAL_INTERRUPT).
2970 *
2971 * Usually this would be handled by the processor noticing an
2972 * IRQ/NMI window request, or checking RVI during evaluation of
2973 * pending virtual interrupts. However, this setting was done
2974 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
2975 * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
2976 */
2977 if (unlikely(evaluate_pending_interrupts))
2978 kvm_make_request(KVM_REQ_EVENT, vcpu);
2979
2980 /*
2981 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
2982 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
2983 * returned as far as L1 is concerned. It will only return (and set
2984 * the success flag) when L2 exits (see nested_vmx_vmexit()).
2985 */
2986 return 0;
2987
2988 /*
2989 * A failed consistency check that leads to a VMExit during L1's
2990 * VMEnter to L2 is a variation of a normal VMexit, as explained in
2991 * 26.7 "VM-entry failures during or after loading guest state".
2992 */
2993vmentry_fail_vmexit_guest_mode:
2994 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
2995 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
2996 leave_guest_mode(vcpu);
2997
2998vmentry_fail_vmexit:
2999 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3000
3001 if (!from_vmentry)
3002 return 1;
3003
3004 load_vmcs12_host_state(vcpu, vmcs12);
3005 vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
3006 vmcs12->exit_qualification = exit_qual;
3007 if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
3008 vmx->nested.need_vmcs12_sync = true;
3009 return 1;
3010}
3011
3012/*
3013 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3014 * for running an L2 nested guest.
3015 */
3016static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
3017{
3018 struct vmcs12 *vmcs12;
3019 struct vcpu_vmx *vmx = to_vmx(vcpu);
3020 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
3021 int ret;
3022
3023 if (!nested_vmx_check_permission(vcpu))
3024 return 1;
3025
3026 if (!nested_vmx_handle_enlightened_vmptrld(vcpu, true))
3027 return 1;
3028
3029 if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)
3030 return nested_vmx_failInvalid(vcpu);
3031
3032 vmcs12 = get_vmcs12(vcpu);
3033
3034 /*
3035 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3036 * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3037 * rather than RFLAGS.ZF, and no error number is stored to the
3038 * VM-instruction error field.
3039 */
3040 if (vmcs12->hdr.shadow_vmcs)
3041 return nested_vmx_failInvalid(vcpu);
3042
3043 if (vmx->nested.hv_evmcs) {
3044 copy_enlightened_to_vmcs12(vmx);
3045 /* Enlightened VMCS doesn't have launch state */
3046 vmcs12->launch_state = !launch;
3047 } else if (enable_shadow_vmcs) {
3048 copy_shadow_to_vmcs12(vmx);
3049 }
3050
3051 /*
3052 * The nested entry process starts with enforcing various prerequisites
3053 * on vmcs12 as required by the Intel SDM, and act appropriately when
3054 * they fail: As the SDM explains, some conditions should cause the
3055 * instruction to fail, while others will cause the instruction to seem
3056 * to succeed, but return an EXIT_REASON_INVALID_STATE.
3057 * To speed up the normal (success) code path, we should avoid checking
3058 * for misconfigurations which will anyway be caught by the processor
3059 * when using the merged vmcs02.
3060 */
3061 if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)
3062 return nested_vmx_failValid(vcpu,
3063 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
3064
3065 if (vmcs12->launch_state == launch)
3066 return nested_vmx_failValid(vcpu,
3067 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3068 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
3069
Krish Sadhukhan16322a3b2018-12-12 13:30:06 -05003070 ret = nested_vmx_check_vmentry_prereqs(vcpu, vmcs12);
Sean Christopherson55d23752018-12-03 13:53:18 -08003071 if (ret)
3072 return nested_vmx_failValid(vcpu, ret);
3073
3074 /*
3075 * We're finally done with prerequisite checking, and can start with
3076 * the nested entry.
3077 */
3078 vmx->nested.nested_run_pending = 1;
3079 ret = nested_vmx_enter_non_root_mode(vcpu, true);
3080 vmx->nested.nested_run_pending = !ret;
3081 if (ret > 0)
3082 return 1;
3083 else if (ret)
3084 return nested_vmx_failValid(vcpu,
3085 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3086
3087 /* Hide L1D cache contents from the nested guest. */
3088 vmx->vcpu.arch.l1tf_flush_l1d = true;
3089
3090 /*
3091 * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3092 * also be used as part of restoring nVMX state for
3093 * snapshot restore (migration).
3094 *
3095 * In this flow, it is assumed that vmcs12 cache was
3096 * trasferred as part of captured nVMX state and should
3097 * therefore not be read from guest memory (which may not
3098 * exist on destination host yet).
3099 */
3100 nested_cache_shadow_vmcs12(vcpu, vmcs12);
3101
3102 /*
3103 * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken
3104 * by event injection, halt vcpu.
3105 */
3106 if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
3107 !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) {
3108 vmx->nested.nested_run_pending = 0;
3109 return kvm_vcpu_halt(vcpu);
3110 }
3111 return 1;
3112}
3113
3114/*
3115 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3116 * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
3117 * This function returns the new value we should put in vmcs12.guest_cr0.
3118 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
3119 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3120 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
3121 * didn't trap the bit, because if L1 did, so would L0).
3122 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3123 * been modified by L2, and L1 knows it. So just leave the old value of
3124 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3125 * isn't relevant, because if L0 traps this bit it can set it to anything.
3126 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3127 * changed these bits, and therefore they need to be updated, but L0
3128 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3129 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3130 */
3131static inline unsigned long
3132vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3133{
3134 return
3135 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
3136 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
3137 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
3138 vcpu->arch.cr0_guest_owned_bits));
3139}
3140
3141static inline unsigned long
3142vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3143{
3144 return
3145 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
3146 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
3147 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
3148 vcpu->arch.cr4_guest_owned_bits));
3149}
3150
3151static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
3152 struct vmcs12 *vmcs12)
3153{
3154 u32 idt_vectoring;
3155 unsigned int nr;
3156
3157 if (vcpu->arch.exception.injected) {
3158 nr = vcpu->arch.exception.nr;
3159 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3160
3161 if (kvm_exception_is_soft(nr)) {
3162 vmcs12->vm_exit_instruction_len =
3163 vcpu->arch.event_exit_inst_len;
3164 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
3165 } else
3166 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
3167
3168 if (vcpu->arch.exception.has_error_code) {
3169 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
3170 vmcs12->idt_vectoring_error_code =
3171 vcpu->arch.exception.error_code;
3172 }
3173
3174 vmcs12->idt_vectoring_info_field = idt_vectoring;
3175 } else if (vcpu->arch.nmi_injected) {
3176 vmcs12->idt_vectoring_info_field =
3177 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
3178 } else if (vcpu->arch.interrupt.injected) {
3179 nr = vcpu->arch.interrupt.nr;
3180 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3181
3182 if (vcpu->arch.interrupt.soft) {
3183 idt_vectoring |= INTR_TYPE_SOFT_INTR;
3184 vmcs12->vm_entry_instruction_len =
3185 vcpu->arch.event_exit_inst_len;
3186 } else
3187 idt_vectoring |= INTR_TYPE_EXT_INTR;
3188
3189 vmcs12->idt_vectoring_info_field = idt_vectoring;
3190 }
3191}
3192
3193
3194static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
3195{
3196 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3197 gfn_t gfn;
3198
3199 /*
3200 * Don't need to mark the APIC access page dirty; it is never
3201 * written to by the CPU during APIC virtualization.
3202 */
3203
3204 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3205 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
3206 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3207 }
3208
3209 if (nested_cpu_has_posted_intr(vmcs12)) {
3210 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
3211 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3212 }
3213}
3214
3215static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
3216{
3217 struct vcpu_vmx *vmx = to_vmx(vcpu);
3218 int max_irr;
3219 void *vapic_page;
3220 u16 status;
3221
3222 if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
3223 return;
3224
3225 vmx->nested.pi_pending = false;
3226 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
3227 return;
3228
3229 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
3230 if (max_irr != 256) {
3231 vapic_page = kmap(vmx->nested.virtual_apic_page);
3232 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
3233 vapic_page, &max_irr);
3234 kunmap(vmx->nested.virtual_apic_page);
3235
3236 status = vmcs_read16(GUEST_INTR_STATUS);
3237 if ((u8)max_irr > ((u8)status & 0xff)) {
3238 status &= ~0xff;
3239 status |= (u8)max_irr;
3240 vmcs_write16(GUEST_INTR_STATUS, status);
3241 }
3242 }
3243
3244 nested_mark_vmcs12_pages_dirty(vcpu);
3245}
3246
3247static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
3248 unsigned long exit_qual)
3249{
3250 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3251 unsigned int nr = vcpu->arch.exception.nr;
3252 u32 intr_info = nr | INTR_INFO_VALID_MASK;
3253
3254 if (vcpu->arch.exception.has_error_code) {
3255 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
3256 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3257 }
3258
3259 if (kvm_exception_is_soft(nr))
3260 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3261 else
3262 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3263
3264 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3265 vmx_get_nmi_mask(vcpu))
3266 intr_info |= INTR_INFO_UNBLOCK_NMI;
3267
3268 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3269}
3270
3271static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
3272{
3273 struct vcpu_vmx *vmx = to_vmx(vcpu);
3274 unsigned long exit_qual;
3275 bool block_nested_events =
3276 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
3277
3278 if (vcpu->arch.exception.pending &&
3279 nested_vmx_check_exception(vcpu, &exit_qual)) {
3280 if (block_nested_events)
3281 return -EBUSY;
3282 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3283 return 0;
3284 }
3285
3286 if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
3287 vmx->nested.preemption_timer_expired) {
3288 if (block_nested_events)
3289 return -EBUSY;
3290 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
3291 return 0;
3292 }
3293
3294 if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
3295 if (block_nested_events)
3296 return -EBUSY;
3297 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
3298 NMI_VECTOR | INTR_TYPE_NMI_INTR |
3299 INTR_INFO_VALID_MASK, 0);
3300 /*
3301 * The NMI-triggered VM exit counts as injection:
3302 * clear this one and block further NMIs.
3303 */
3304 vcpu->arch.nmi_pending = 0;
3305 vmx_set_nmi_mask(vcpu, true);
3306 return 0;
3307 }
3308
3309 if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
3310 nested_exit_on_intr(vcpu)) {
3311 if (block_nested_events)
3312 return -EBUSY;
3313 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
3314 return 0;
3315 }
3316
3317 vmx_complete_nested_posted_interrupt(vcpu);
3318 return 0;
3319}
3320
3321static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
3322{
3323 ktime_t remaining =
3324 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
3325 u64 value;
3326
3327 if (ktime_to_ns(remaining) <= 0)
3328 return 0;
3329
3330 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
3331 do_div(value, 1000000);
3332 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
3333}
3334
3335/*
3336 * Update the guest state fields of vmcs12 to reflect changes that
3337 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
3338 * VM-entry controls is also updated, since this is really a guest
3339 * state bit.)
3340 */
3341static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3342{
3343 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
3344 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
3345
3346 vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3347 vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
3348 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
3349
3350 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
3351 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
3352 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
3353 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
3354 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
3355 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
3356 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
3357 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
3358 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
3359 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
3360 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
3361 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
3362 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
3363 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
3364 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
3365 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
3366 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
3367 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
3368 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
3369 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
3370 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
3371 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
3372 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
3373 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
3374 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
3375 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
3376 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
3377 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
3378 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
3379 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
3380 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
3381 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
3382 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
3383 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
3384 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
3385 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
3386
3387 vmcs12->guest_interruptibility_info =
3388 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
3389 vmcs12->guest_pending_dbg_exceptions =
3390 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
3391 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
3392 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
3393 else
3394 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
3395
3396 if (nested_cpu_has_preemption_timer(vmcs12)) {
3397 if (vmcs12->vm_exit_controls &
3398 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
3399 vmcs12->vmx_preemption_timer_value =
3400 vmx_get_preemption_timer_value(vcpu);
3401 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
3402 }
3403
3404 /*
3405 * In some cases (usually, nested EPT), L2 is allowed to change its
3406 * own CR3 without exiting. If it has changed it, we must keep it.
3407 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
3408 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
3409 *
3410 * Additionally, restore L2's PDPTR to vmcs12.
3411 */
3412 if (enable_ept) {
3413 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
3414 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
3415 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
3416 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
3417 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
3418 }
3419
3420 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
3421
3422 if (nested_cpu_has_vid(vmcs12))
3423 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
3424
3425 vmcs12->vm_entry_controls =
3426 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
3427 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
3428
3429 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) {
3430 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
3431 vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
3432 }
3433
3434 /* TODO: These cannot have changed unless we have MSR bitmaps and
3435 * the relevant bit asks not to trap the change */
3436 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
3437 vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
3438 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
3439 vmcs12->guest_ia32_efer = vcpu->arch.efer;
3440 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
3441 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
3442 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
3443 if (kvm_mpx_supported())
3444 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3445}
3446
3447/*
3448 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
3449 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
3450 * and this function updates it to reflect the changes to the guest state while
3451 * L2 was running (and perhaps made some exits which were handled directly by L0
3452 * without going back to L1), and to reflect the exit reason.
3453 * Note that we do not have to copy here all VMCS fields, just those that
3454 * could have changed by the L2 guest or the exit - i.e., the guest-state and
3455 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
3456 * which already writes to vmcs12 directly.
3457 */
3458static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
3459 u32 exit_reason, u32 exit_intr_info,
3460 unsigned long exit_qualification)
3461{
3462 /* update guest state fields: */
3463 sync_vmcs12(vcpu, vmcs12);
3464
3465 /* update exit information fields: */
3466
3467 vmcs12->vm_exit_reason = exit_reason;
3468 vmcs12->exit_qualification = exit_qualification;
3469 vmcs12->vm_exit_intr_info = exit_intr_info;
3470
3471 vmcs12->idt_vectoring_info_field = 0;
3472 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3473 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
3474
3475 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
3476 vmcs12->launch_state = 1;
3477
3478 /* vm_entry_intr_info_field is cleared on exit. Emulate this
3479 * instead of reading the real value. */
3480 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
3481
3482 /*
3483 * Transfer the event that L0 or L1 may wanted to inject into
3484 * L2 to IDT_VECTORING_INFO_FIELD.
3485 */
3486 vmcs12_save_pending_event(vcpu, vmcs12);
Krish Sadhukhana0d4f802018-12-04 19:00:13 -05003487
3488 /*
3489 * According to spec, there's no need to store the guest's
3490 * MSRs if the exit is due to a VM-entry failure that occurs
3491 * during or after loading the guest state. Since this exit
3492 * does not fall in that category, we need to save the MSRs.
3493 */
3494 if (nested_vmx_store_msr(vcpu,
3495 vmcs12->vm_exit_msr_store_addr,
3496 vmcs12->vm_exit_msr_store_count))
3497 nested_vmx_abort(vcpu,
3498 VMX_ABORT_SAVE_GUEST_MSR_FAIL);
Sean Christopherson55d23752018-12-03 13:53:18 -08003499 }
3500
3501 /*
3502 * Drop what we picked up for L2 via vmx_complete_interrupts. It is
3503 * preserved above and would only end up incorrectly in L1.
3504 */
3505 vcpu->arch.nmi_injected = false;
3506 kvm_clear_exception_queue(vcpu);
3507 kvm_clear_interrupt_queue(vcpu);
3508}
3509
3510/*
3511 * A part of what we need to when the nested L2 guest exits and we want to
3512 * run its L1 parent, is to reset L1's guest state to the host state specified
3513 * in vmcs12.
3514 * This function is to be called not only on normal nested exit, but also on
3515 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
3516 * Failures During or After Loading Guest State").
3517 * This function should be called when the active VMCS is L1's (vmcs01).
3518 */
3519static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
3520 struct vmcs12 *vmcs12)
3521{
3522 struct kvm_segment seg;
3523 u32 entry_failure_code;
3524
3525 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
3526 vcpu->arch.efer = vmcs12->host_ia32_efer;
3527 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
3528 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
3529 else
3530 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
3531 vmx_set_efer(vcpu, vcpu->arch.efer);
3532
3533 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
3534 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
3535 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
3536 vmx_set_interrupt_shadow(vcpu, 0);
3537
3538 /*
3539 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
3540 * actually changed, because vmx_set_cr0 refers to efer set above.
3541 *
3542 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
3543 * (KVM doesn't change it);
3544 */
3545 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
3546 vmx_set_cr0(vcpu, vmcs12->host_cr0);
3547
3548 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
3549 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
3550 vmx_set_cr4(vcpu, vmcs12->host_cr4);
3551
3552 nested_ept_uninit_mmu_context(vcpu);
3553
3554 /*
3555 * Only PDPTE load can fail as the value of cr3 was checked on entry and
3556 * couldn't have changed.
3557 */
3558 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
3559 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
3560
3561 if (!enable_ept)
3562 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
3563
3564 /*
3565 * If vmcs01 doesn't use VPID, CPU flushes TLB on every
3566 * VMEntry/VMExit. Thus, no need to flush TLB.
3567 *
3568 * If vmcs12 doesn't use VPID, L1 expects TLB to be
3569 * flushed on every VMEntry/VMExit.
3570 *
3571 * Otherwise, we can preserve TLB entries as long as we are
3572 * able to tag L1 TLB entries differently than L2 TLB entries.
3573 *
3574 * If vmcs12 uses EPT, we need to execute this flush on EPTP01
3575 * and therefore we request the TLB flush to happen only after VMCS EPTP
3576 * has been set by KVM_REQ_LOAD_CR3.
3577 */
3578 if (enable_vpid &&
3579 (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
3580 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3581 }
3582
3583 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
3584 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
3585 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
3586 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
3587 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
3588 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
3589 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
3590
3591 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
3592 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
3593 vmcs_write64(GUEST_BNDCFGS, 0);
3594
3595 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
3596 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
3597 vcpu->arch.pat = vmcs12->host_ia32_pat;
3598 }
3599 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
3600 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
3601 vmcs12->host_ia32_perf_global_ctrl);
3602
3603 /* Set L1 segment info according to Intel SDM
3604 27.5.2 Loading Host Segment and Descriptor-Table Registers */
3605 seg = (struct kvm_segment) {
3606 .base = 0,
3607 .limit = 0xFFFFFFFF,
3608 .selector = vmcs12->host_cs_selector,
3609 .type = 11,
3610 .present = 1,
3611 .s = 1,
3612 .g = 1
3613 };
3614 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
3615 seg.l = 1;
3616 else
3617 seg.db = 1;
3618 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
3619 seg = (struct kvm_segment) {
3620 .base = 0,
3621 .limit = 0xFFFFFFFF,
3622 .type = 3,
3623 .present = 1,
3624 .s = 1,
3625 .db = 1,
3626 .g = 1
3627 };
3628 seg.selector = vmcs12->host_ds_selector;
3629 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
3630 seg.selector = vmcs12->host_es_selector;
3631 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
3632 seg.selector = vmcs12->host_ss_selector;
3633 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
3634 seg.selector = vmcs12->host_fs_selector;
3635 seg.base = vmcs12->host_fs_base;
3636 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
3637 seg.selector = vmcs12->host_gs_selector;
3638 seg.base = vmcs12->host_gs_base;
3639 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
3640 seg = (struct kvm_segment) {
3641 .base = vmcs12->host_tr_base,
3642 .limit = 0x67,
3643 .selector = vmcs12->host_tr_selector,
3644 .type = 11,
3645 .present = 1
3646 };
3647 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
3648
3649 kvm_set_dr(vcpu, 7, 0x400);
3650 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
3651
3652 if (cpu_has_vmx_msr_bitmap())
3653 vmx_update_msr_bitmap(vcpu);
3654
3655 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
3656 vmcs12->vm_exit_msr_load_count))
3657 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
3658}
3659
3660static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
3661{
3662 struct shared_msr_entry *efer_msr;
3663 unsigned int i;
3664
3665 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
3666 return vmcs_read64(GUEST_IA32_EFER);
3667
3668 if (cpu_has_load_ia32_efer())
3669 return host_efer;
3670
3671 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
3672 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
3673 return vmx->msr_autoload.guest.val[i].value;
3674 }
3675
3676 efer_msr = find_msr_entry(vmx, MSR_EFER);
3677 if (efer_msr)
3678 return efer_msr->data;
3679
3680 return host_efer;
3681}
3682
3683static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
3684{
3685 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3686 struct vcpu_vmx *vmx = to_vmx(vcpu);
3687 struct vmx_msr_entry g, h;
3688 struct msr_data msr;
3689 gpa_t gpa;
3690 u32 i, j;
3691
3692 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
3693
3694 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
3695 /*
3696 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
3697 * as vmcs01.GUEST_DR7 contains a userspace defined value
3698 * and vcpu->arch.dr7 is not squirreled away before the
3699 * nested VMENTER (not worth adding a variable in nested_vmx).
3700 */
3701 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
3702 kvm_set_dr(vcpu, 7, DR7_FIXED_1);
3703 else
3704 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
3705 }
3706
3707 /*
3708 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
3709 * handle a variety of side effects to KVM's software model.
3710 */
3711 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
3712
3713 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
3714 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
3715
3716 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
3717 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
3718
3719 nested_ept_uninit_mmu_context(vcpu);
3720 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3721 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
3722
3723 /*
3724 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
3725 * from vmcs01 (if necessary). The PDPTRs are not loaded on
3726 * VMFail, like everything else we just need to ensure our
3727 * software model is up-to-date.
3728 */
3729 ept_save_pdptrs(vcpu);
3730
3731 kvm_mmu_reset_context(vcpu);
3732
3733 if (cpu_has_vmx_msr_bitmap())
3734 vmx_update_msr_bitmap(vcpu);
3735
3736 /*
3737 * This nasty bit of open coding is a compromise between blindly
3738 * loading L1's MSRs using the exit load lists (incorrect emulation
3739 * of VMFail), leaving the nested VM's MSRs in the software model
3740 * (incorrect behavior) and snapshotting the modified MSRs (too
3741 * expensive since the lists are unbound by hardware). For each
3742 * MSR that was (prematurely) loaded from the nested VMEntry load
3743 * list, reload it from the exit load list if it exists and differs
3744 * from the guest value. The intent is to stuff host state as
3745 * silently as possible, not to fully process the exit load list.
3746 */
3747 msr.host_initiated = false;
3748 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
3749 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
3750 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
3751 pr_debug_ratelimited(
3752 "%s read MSR index failed (%u, 0x%08llx)\n",
3753 __func__, i, gpa);
3754 goto vmabort;
3755 }
3756
3757 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
3758 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
3759 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
3760 pr_debug_ratelimited(
3761 "%s read MSR failed (%u, 0x%08llx)\n",
3762 __func__, j, gpa);
3763 goto vmabort;
3764 }
3765 if (h.index != g.index)
3766 continue;
3767 if (h.value == g.value)
3768 break;
3769
3770 if (nested_vmx_load_msr_check(vcpu, &h)) {
3771 pr_debug_ratelimited(
3772 "%s check failed (%u, 0x%x, 0x%x)\n",
3773 __func__, j, h.index, h.reserved);
3774 goto vmabort;
3775 }
3776
3777 msr.index = h.index;
3778 msr.data = h.value;
3779 if (kvm_set_msr(vcpu, &msr)) {
3780 pr_debug_ratelimited(
3781 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
3782 __func__, j, h.index, h.value);
3783 goto vmabort;
3784 }
3785 }
3786 }
3787
3788 return;
3789
3790vmabort:
3791 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
3792}
3793
3794/*
3795 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
3796 * and modify vmcs12 to make it see what it would expect to see there if
3797 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
3798 */
3799void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
3800 u32 exit_intr_info, unsigned long exit_qualification)
3801{
3802 struct vcpu_vmx *vmx = to_vmx(vcpu);
3803 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3804
3805 /* trying to cancel vmlaunch/vmresume is a bug */
3806 WARN_ON_ONCE(vmx->nested.nested_run_pending);
3807
3808 leave_guest_mode(vcpu);
3809
3810 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
3811 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3812
3813 if (likely(!vmx->fail)) {
3814 if (exit_reason == -1)
3815 sync_vmcs12(vcpu, vmcs12);
3816 else
3817 prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
3818 exit_qualification);
3819
3820 /*
3821 * Must happen outside of sync_vmcs12() as it will
3822 * also be used to capture vmcs12 cache as part of
3823 * capturing nVMX state for snapshot (migration).
3824 *
3825 * Otherwise, this flush will dirty guest memory at a
3826 * point it is already assumed by user-space to be
3827 * immutable.
3828 */
3829 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
Sean Christopherson55d23752018-12-03 13:53:18 -08003830 } else {
3831 /*
3832 * The only expected VM-instruction error is "VM entry with
3833 * invalid control field(s)." Anything else indicates a
3834 * problem with L0. And we should never get here with a
3835 * VMFail of any type if early consistency checks are enabled.
3836 */
3837 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
3838 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3839 WARN_ON_ONCE(nested_early_check);
3840 }
3841
3842 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3843
3844 /* Update any VMCS fields that might have changed while L2 ran */
3845 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
3846 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
3847 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
3848
3849 if (kvm_has_tsc_control)
3850 decache_tsc_multiplier(vmx);
3851
3852 if (vmx->nested.change_vmcs01_virtual_apic_mode) {
3853 vmx->nested.change_vmcs01_virtual_apic_mode = false;
3854 vmx_set_virtual_apic_mode(vcpu);
3855 } else if (!nested_cpu_has_ept(vmcs12) &&
3856 nested_cpu_has2(vmcs12,
3857 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3858 vmx_flush_tlb(vcpu, true);
3859 }
3860
3861 /* This is needed for same reason as it was needed in prepare_vmcs02 */
3862 vmx->host_rsp = 0;
3863
3864 /* Unpin physical memory we referred to in vmcs02 */
3865 if (vmx->nested.apic_access_page) {
3866 kvm_release_page_dirty(vmx->nested.apic_access_page);
3867 vmx->nested.apic_access_page = NULL;
3868 }
3869 if (vmx->nested.virtual_apic_page) {
3870 kvm_release_page_dirty(vmx->nested.virtual_apic_page);
3871 vmx->nested.virtual_apic_page = NULL;
3872 }
3873 if (vmx->nested.pi_desc_page) {
3874 kunmap(vmx->nested.pi_desc_page);
3875 kvm_release_page_dirty(vmx->nested.pi_desc_page);
3876 vmx->nested.pi_desc_page = NULL;
3877 vmx->nested.pi_desc = NULL;
3878 }
3879
3880 /*
3881 * We are now running in L2, mmu_notifier will force to reload the
3882 * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
3883 */
3884 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
3885
3886 if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs))
3887 vmx->nested.need_vmcs12_sync = true;
3888
3889 /* in case we halted in L2 */
3890 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3891
3892 if (likely(!vmx->fail)) {
3893 /*
3894 * TODO: SDM says that with acknowledge interrupt on
3895 * exit, bit 31 of the VM-exit interrupt information
3896 * (valid interrupt) is always set to 1 on
3897 * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
3898 * need kvm_cpu_has_interrupt(). See the commit
3899 * message for details.
3900 */
3901 if (nested_exit_intr_ack_set(vcpu) &&
3902 exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
3903 kvm_cpu_has_interrupt(vcpu)) {
3904 int irq = kvm_cpu_get_interrupt(vcpu);
3905 WARN_ON(irq < 0);
3906 vmcs12->vm_exit_intr_info = irq |
3907 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
3908 }
3909
3910 if (exit_reason != -1)
3911 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
3912 vmcs12->exit_qualification,
3913 vmcs12->idt_vectoring_info_field,
3914 vmcs12->vm_exit_intr_info,
3915 vmcs12->vm_exit_intr_error_code,
3916 KVM_ISA_VMX);
3917
3918 load_vmcs12_host_state(vcpu, vmcs12);
3919
3920 return;
3921 }
3922
3923 /*
3924 * After an early L2 VM-entry failure, we're now back
3925 * in L1 which thinks it just finished a VMLAUNCH or
3926 * VMRESUME instruction, so we need to set the failure
3927 * flag and the VM-instruction error field of the VMCS
3928 * accordingly, and skip the emulated instruction.
3929 */
3930 (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3931
3932 /*
3933 * Restore L1's host state to KVM's software model. We're here
3934 * because a consistency check was caught by hardware, which
3935 * means some amount of guest state has been propagated to KVM's
3936 * model and needs to be unwound to the host's state.
3937 */
3938 nested_vmx_restore_host_state(vcpu);
3939
3940 vmx->fail = 0;
3941}
3942
3943/*
3944 * Decode the memory-address operand of a vmx instruction, as recorded on an
3945 * exit caused by such an instruction (run by a guest hypervisor).
3946 * On success, returns 0. When the operand is invalid, returns 1 and throws
3947 * #UD or #GP.
3948 */
3949int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
3950 u32 vmx_instruction_info, bool wr, gva_t *ret)
3951{
3952 gva_t off;
3953 bool exn;
3954 struct kvm_segment s;
3955
3956 /*
3957 * According to Vol. 3B, "Information for VM Exits Due to Instruction
3958 * Execution", on an exit, vmx_instruction_info holds most of the
3959 * addressing components of the operand. Only the displacement part
3960 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
3961 * For how an actual address is calculated from all these components,
3962 * refer to Vol. 1, "Operand Addressing".
3963 */
3964 int scaling = vmx_instruction_info & 3;
3965 int addr_size = (vmx_instruction_info >> 7) & 7;
3966 bool is_reg = vmx_instruction_info & (1u << 10);
3967 int seg_reg = (vmx_instruction_info >> 15) & 7;
3968 int index_reg = (vmx_instruction_info >> 18) & 0xf;
3969 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
3970 int base_reg = (vmx_instruction_info >> 23) & 0xf;
3971 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
3972
3973 if (is_reg) {
3974 kvm_queue_exception(vcpu, UD_VECTOR);
3975 return 1;
3976 }
3977
3978 /* Addr = segment_base + offset */
3979 /* offset = base + [index * scale] + displacement */
3980 off = exit_qualification; /* holds the displacement */
3981 if (base_is_valid)
3982 off += kvm_register_read(vcpu, base_reg);
3983 if (index_is_valid)
3984 off += kvm_register_read(vcpu, index_reg)<<scaling;
3985 vmx_get_segment(vcpu, &s, seg_reg);
3986 *ret = s.base + off;
3987
3988 if (addr_size == 1) /* 32 bit */
3989 *ret &= 0xffffffff;
3990
3991 /* Checks for #GP/#SS exceptions. */
3992 exn = false;
3993 if (is_long_mode(vcpu)) {
3994 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
3995 * non-canonical form. This is the only check on the memory
3996 * destination for long mode!
3997 */
3998 exn = is_noncanonical_address(*ret, vcpu);
3999 } else if (is_protmode(vcpu)) {
4000 /* Protected mode: apply checks for segment validity in the
4001 * following order:
4002 * - segment type check (#GP(0) may be thrown)
4003 * - usability check (#GP(0)/#SS(0))
4004 * - limit check (#GP(0)/#SS(0))
4005 */
4006 if (wr)
4007 /* #GP(0) if the destination operand is located in a
4008 * read-only data segment or any code segment.
4009 */
4010 exn = ((s.type & 0xa) == 0 || (s.type & 8));
4011 else
4012 /* #GP(0) if the source operand is located in an
4013 * execute-only code segment
4014 */
4015 exn = ((s.type & 0xa) == 8);
4016 if (exn) {
4017 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4018 return 1;
4019 }
4020 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
4021 */
4022 exn = (s.unusable != 0);
4023 /* Protected mode: #GP(0)/#SS(0) if the memory
4024 * operand is outside the segment limit.
4025 */
4026 exn = exn || (off + sizeof(u64) > s.limit);
4027 }
4028 if (exn) {
4029 kvm_queue_exception_e(vcpu,
4030 seg_reg == VCPU_SREG_SS ?
4031 SS_VECTOR : GP_VECTOR,
4032 0);
4033 return 1;
4034 }
4035
4036 return 0;
4037}
4038
4039static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
4040{
4041 gva_t gva;
4042 struct x86_exception e;
4043
4044 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4045 vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
4046 return 1;
4047
4048 if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
4049 kvm_inject_page_fault(vcpu, &e);
4050 return 1;
4051 }
4052
4053 return 0;
4054}
4055
4056/*
4057 * Allocate a shadow VMCS and associate it with the currently loaded
4058 * VMCS, unless such a shadow VMCS already exists. The newly allocated
4059 * VMCS is also VMCLEARed, so that it is ready for use.
4060 */
4061static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
4062{
4063 struct vcpu_vmx *vmx = to_vmx(vcpu);
4064 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
4065
4066 /*
4067 * We should allocate a shadow vmcs for vmcs01 only when L1
4068 * executes VMXON and free it when L1 executes VMXOFF.
4069 * As it is invalid to execute VMXON twice, we shouldn't reach
4070 * here when vmcs01 already have an allocated shadow vmcs.
4071 */
4072 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
4073
4074 if (!loaded_vmcs->shadow_vmcs) {
4075 loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
4076 if (loaded_vmcs->shadow_vmcs)
4077 vmcs_clear(loaded_vmcs->shadow_vmcs);
4078 }
4079 return loaded_vmcs->shadow_vmcs;
4080}
4081
4082static int enter_vmx_operation(struct kvm_vcpu *vcpu)
4083{
4084 struct vcpu_vmx *vmx = to_vmx(vcpu);
4085 int r;
4086
4087 r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
4088 if (r < 0)
4089 goto out_vmcs02;
4090
4091 vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
4092 if (!vmx->nested.cached_vmcs12)
4093 goto out_cached_vmcs12;
4094
4095 vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
4096 if (!vmx->nested.cached_shadow_vmcs12)
4097 goto out_cached_shadow_vmcs12;
4098
4099 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
4100 goto out_shadow_vmcs;
4101
4102 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
4103 HRTIMER_MODE_REL_PINNED);
4104 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
4105
4106 vmx->nested.vpid02 = allocate_vpid();
4107
4108 vmx->nested.vmcs02_initialized = false;
4109 vmx->nested.vmxon = true;
4110 return 0;
4111
4112out_shadow_vmcs:
4113 kfree(vmx->nested.cached_shadow_vmcs12);
4114
4115out_cached_shadow_vmcs12:
4116 kfree(vmx->nested.cached_vmcs12);
4117
4118out_cached_vmcs12:
4119 free_loaded_vmcs(&vmx->nested.vmcs02);
4120
4121out_vmcs02:
4122 return -ENOMEM;
4123}
4124
4125/*
4126 * Emulate the VMXON instruction.
4127 * Currently, we just remember that VMX is active, and do not save or even
4128 * inspect the argument to VMXON (the so-called "VMXON pointer") because we
4129 * do not currently need to store anything in that guest-allocated memory
4130 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
4131 * argument is different from the VMXON pointer (which the spec says they do).
4132 */
4133static int handle_vmon(struct kvm_vcpu *vcpu)
4134{
4135 int ret;
4136 gpa_t vmptr;
4137 struct page *page;
4138 struct vcpu_vmx *vmx = to_vmx(vcpu);
4139 const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
4140 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
4141
4142 /*
4143 * The Intel VMX Instruction Reference lists a bunch of bits that are
4144 * prerequisite to running VMXON, most notably cr4.VMXE must be set to
4145 * 1 (see vmx_set_cr4() for when we allow the guest to set this).
4146 * Otherwise, we should fail with #UD. But most faulting conditions
4147 * have already been checked by hardware, prior to the VM-exit for
4148 * VMXON. We do test guest cr4.VMXE because processor CR4 always has
4149 * that bit set to 1 in non-root mode.
4150 */
4151 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
4152 kvm_queue_exception(vcpu, UD_VECTOR);
4153 return 1;
4154 }
4155
4156 /* CPL=0 must be checked manually. */
4157 if (vmx_get_cpl(vcpu)) {
4158 kvm_inject_gp(vcpu, 0);
4159 return 1;
4160 }
4161
4162 if (vmx->nested.vmxon)
4163 return nested_vmx_failValid(vcpu,
4164 VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
4165
4166 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
4167 != VMXON_NEEDED_FEATURES) {
4168 kvm_inject_gp(vcpu, 0);
4169 return 1;
4170 }
4171
4172 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4173 return 1;
4174
4175 /*
4176 * SDM 3: 24.11.5
4177 * The first 4 bytes of VMXON region contain the supported
4178 * VMCS revision identifier
4179 *
4180 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
4181 * which replaces physical address width with 32
4182 */
4183 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
4184 return nested_vmx_failInvalid(vcpu);
4185
4186 page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
4187 if (is_error_page(page))
4188 return nested_vmx_failInvalid(vcpu);
4189
4190 if (*(u32 *)kmap(page) != VMCS12_REVISION) {
4191 kunmap(page);
4192 kvm_release_page_clean(page);
4193 return nested_vmx_failInvalid(vcpu);
4194 }
4195 kunmap(page);
4196 kvm_release_page_clean(page);
4197
4198 vmx->nested.vmxon_ptr = vmptr;
4199 ret = enter_vmx_operation(vcpu);
4200 if (ret)
4201 return ret;
4202
4203 return nested_vmx_succeed(vcpu);
4204}
4205
4206static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
4207{
4208 struct vcpu_vmx *vmx = to_vmx(vcpu);
4209
4210 if (vmx->nested.current_vmptr == -1ull)
4211 return;
4212
4213 if (enable_shadow_vmcs) {
4214 /* copy to memory all shadowed fields in case
4215 they were modified */
4216 copy_shadow_to_vmcs12(vmx);
4217 vmx->nested.need_vmcs12_sync = false;
4218 vmx_disable_shadow_vmcs(vmx);
4219 }
4220 vmx->nested.posted_intr_nv = -1;
4221
4222 /* Flush VMCS12 to guest memory */
4223 kvm_vcpu_write_guest_page(vcpu,
4224 vmx->nested.current_vmptr >> PAGE_SHIFT,
4225 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
4226
4227 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
4228
4229 vmx->nested.current_vmptr = -1ull;
4230}
4231
4232/* Emulate the VMXOFF instruction */
4233static int handle_vmoff(struct kvm_vcpu *vcpu)
4234{
4235 if (!nested_vmx_check_permission(vcpu))
4236 return 1;
4237 free_nested(vcpu);
4238 return nested_vmx_succeed(vcpu);
4239}
4240
4241/* Emulate the VMCLEAR instruction */
4242static int handle_vmclear(struct kvm_vcpu *vcpu)
4243{
4244 struct vcpu_vmx *vmx = to_vmx(vcpu);
4245 u32 zero = 0;
4246 gpa_t vmptr;
4247
4248 if (!nested_vmx_check_permission(vcpu))
4249 return 1;
4250
4251 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4252 return 1;
4253
4254 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
4255 return nested_vmx_failValid(vcpu,
4256 VMXERR_VMCLEAR_INVALID_ADDRESS);
4257
4258 if (vmptr == vmx->nested.vmxon_ptr)
4259 return nested_vmx_failValid(vcpu,
4260 VMXERR_VMCLEAR_VMXON_POINTER);
4261
4262 if (vmx->nested.hv_evmcs_page) {
4263 if (vmptr == vmx->nested.hv_evmcs_vmptr)
4264 nested_release_evmcs(vcpu);
4265 } else {
4266 if (vmptr == vmx->nested.current_vmptr)
4267 nested_release_vmcs12(vcpu);
4268
4269 kvm_vcpu_write_guest(vcpu,
4270 vmptr + offsetof(struct vmcs12,
4271 launch_state),
4272 &zero, sizeof(zero));
4273 }
4274
4275 return nested_vmx_succeed(vcpu);
4276}
4277
4278static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
4279
4280/* Emulate the VMLAUNCH instruction */
4281static int handle_vmlaunch(struct kvm_vcpu *vcpu)
4282{
4283 return nested_vmx_run(vcpu, true);
4284}
4285
4286/* Emulate the VMRESUME instruction */
4287static int handle_vmresume(struct kvm_vcpu *vcpu)
4288{
4289
4290 return nested_vmx_run(vcpu, false);
4291}
4292
4293static int handle_vmread(struct kvm_vcpu *vcpu)
4294{
4295 unsigned long field;
4296 u64 field_value;
4297 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4298 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4299 gva_t gva = 0;
4300 struct vmcs12 *vmcs12;
4301
4302 if (!nested_vmx_check_permission(vcpu))
4303 return 1;
4304
4305 if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
4306 return nested_vmx_failInvalid(vcpu);
4307
4308 if (!is_guest_mode(vcpu))
4309 vmcs12 = get_vmcs12(vcpu);
4310 else {
4311 /*
4312 * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
4313 * to shadowed-field sets the ALU flags for VMfailInvalid.
4314 */
4315 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
4316 return nested_vmx_failInvalid(vcpu);
4317 vmcs12 = get_shadow_vmcs12(vcpu);
4318 }
4319
4320 /* Decode instruction info and find the field to read */
4321 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
4322 /* Read the field, zero-extended to a u64 field_value */
4323 if (vmcs12_read_any(vmcs12, field, &field_value) < 0)
4324 return nested_vmx_failValid(vcpu,
4325 VMXERR_UNSUPPORTED_VMCS_COMPONENT);
4326
4327 /*
4328 * Now copy part of this value to register or memory, as requested.
4329 * Note that the number of bits actually copied is 32 or 64 depending
4330 * on the guest's mode (32 or 64 bit), not on the given field's length.
4331 */
4332 if (vmx_instruction_info & (1u << 10)) {
4333 kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
4334 field_value);
4335 } else {
4336 if (get_vmx_mem_address(vcpu, exit_qualification,
4337 vmx_instruction_info, true, &gva))
4338 return 1;
4339 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
4340 kvm_write_guest_virt_system(vcpu, gva, &field_value,
4341 (is_long_mode(vcpu) ? 8 : 4), NULL);
4342 }
4343
4344 return nested_vmx_succeed(vcpu);
4345}
4346
4347
4348static int handle_vmwrite(struct kvm_vcpu *vcpu)
4349{
4350 unsigned long field;
4351 gva_t gva;
4352 struct vcpu_vmx *vmx = to_vmx(vcpu);
4353 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4354 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4355
4356 /* The value to write might be 32 or 64 bits, depending on L1's long
4357 * mode, and eventually we need to write that into a field of several
4358 * possible lengths. The code below first zero-extends the value to 64
4359 * bit (field_value), and then copies only the appropriate number of
4360 * bits into the vmcs12 field.
4361 */
4362 u64 field_value = 0;
4363 struct x86_exception e;
4364 struct vmcs12 *vmcs12;
4365
4366 if (!nested_vmx_check_permission(vcpu))
4367 return 1;
4368
4369 if (vmx->nested.current_vmptr == -1ull)
4370 return nested_vmx_failInvalid(vcpu);
4371
4372 if (vmx_instruction_info & (1u << 10))
4373 field_value = kvm_register_readl(vcpu,
4374 (((vmx_instruction_info) >> 3) & 0xf));
4375 else {
4376 if (get_vmx_mem_address(vcpu, exit_qualification,
4377 vmx_instruction_info, false, &gva))
4378 return 1;
4379 if (kvm_read_guest_virt(vcpu, gva, &field_value,
4380 (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
4381 kvm_inject_page_fault(vcpu, &e);
4382 return 1;
4383 }
4384 }
4385
4386
4387 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
4388 /*
4389 * If the vCPU supports "VMWRITE to any supported field in the
4390 * VMCS," then the "read-only" fields are actually read/write.
4391 */
4392 if (vmcs_field_readonly(field) &&
4393 !nested_cpu_has_vmwrite_any_field(vcpu))
4394 return nested_vmx_failValid(vcpu,
4395 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
4396
4397 if (!is_guest_mode(vcpu))
4398 vmcs12 = get_vmcs12(vcpu);
4399 else {
4400 /*
4401 * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
4402 * to shadowed-field sets the ALU flags for VMfailInvalid.
4403 */
4404 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
4405 return nested_vmx_failInvalid(vcpu);
4406 vmcs12 = get_shadow_vmcs12(vcpu);
4407 }
4408
4409 if (vmcs12_write_any(vmcs12, field, field_value) < 0)
4410 return nested_vmx_failValid(vcpu,
4411 VMXERR_UNSUPPORTED_VMCS_COMPONENT);
4412
4413 /*
4414 * Do not track vmcs12 dirty-state if in guest-mode
4415 * as we actually dirty shadow vmcs12 instead of vmcs12.
4416 */
4417 if (!is_guest_mode(vcpu)) {
4418 switch (field) {
4419#define SHADOW_FIELD_RW(x) case x:
4420#include "vmcs_shadow_fields.h"
4421 /*
4422 * The fields that can be updated by L1 without a vmexit are
4423 * always updated in the vmcs02, the others go down the slow
4424 * path of prepare_vmcs02.
4425 */
4426 break;
4427 default:
4428 vmx->nested.dirty_vmcs12 = true;
4429 break;
4430 }
4431 }
4432
4433 return nested_vmx_succeed(vcpu);
4434}
4435
4436static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
4437{
4438 vmx->nested.current_vmptr = vmptr;
4439 if (enable_shadow_vmcs) {
4440 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
4441 SECONDARY_EXEC_SHADOW_VMCS);
4442 vmcs_write64(VMCS_LINK_POINTER,
4443 __pa(vmx->vmcs01.shadow_vmcs));
4444 vmx->nested.need_vmcs12_sync = true;
4445 }
4446 vmx->nested.dirty_vmcs12 = true;
4447}
4448
4449/* Emulate the VMPTRLD instruction */
4450static int handle_vmptrld(struct kvm_vcpu *vcpu)
4451{
4452 struct vcpu_vmx *vmx = to_vmx(vcpu);
4453 gpa_t vmptr;
4454
4455 if (!nested_vmx_check_permission(vcpu))
4456 return 1;
4457
4458 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4459 return 1;
4460
4461 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
4462 return nested_vmx_failValid(vcpu,
4463 VMXERR_VMPTRLD_INVALID_ADDRESS);
4464
4465 if (vmptr == vmx->nested.vmxon_ptr)
4466 return nested_vmx_failValid(vcpu,
4467 VMXERR_VMPTRLD_VMXON_POINTER);
4468
4469 /* Forbid normal VMPTRLD if Enlightened version was used */
4470 if (vmx->nested.hv_evmcs)
4471 return 1;
4472
4473 if (vmx->nested.current_vmptr != vmptr) {
4474 struct vmcs12 *new_vmcs12;
4475 struct page *page;
4476
4477 page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
4478 if (is_error_page(page)) {
4479 /*
4480 * Reads from an unbacked page return all 1s,
4481 * which means that the 32 bits located at the
4482 * given physical address won't match the required
4483 * VMCS12_REVISION identifier.
4484 */
4485 nested_vmx_failValid(vcpu,
4486 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
4487 return kvm_skip_emulated_instruction(vcpu);
4488 }
4489 new_vmcs12 = kmap(page);
4490 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
4491 (new_vmcs12->hdr.shadow_vmcs &&
4492 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
4493 kunmap(page);
4494 kvm_release_page_clean(page);
4495 return nested_vmx_failValid(vcpu,
4496 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
4497 }
4498
4499 nested_release_vmcs12(vcpu);
4500
4501 /*
4502 * Load VMCS12 from guest memory since it is not already
4503 * cached.
4504 */
4505 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
4506 kunmap(page);
4507 kvm_release_page_clean(page);
4508
4509 set_current_vmptr(vmx, vmptr);
4510 }
4511
4512 return nested_vmx_succeed(vcpu);
4513}
4514
4515/* Emulate the VMPTRST instruction */
4516static int handle_vmptrst(struct kvm_vcpu *vcpu)
4517{
4518 unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
4519 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4520 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
4521 struct x86_exception e;
4522 gva_t gva;
4523
4524 if (!nested_vmx_check_permission(vcpu))
4525 return 1;
4526
4527 if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
4528 return 1;
4529
4530 if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva))
4531 return 1;
4532 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
4533 if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
4534 sizeof(gpa_t), &e)) {
4535 kvm_inject_page_fault(vcpu, &e);
4536 return 1;
4537 }
4538 return nested_vmx_succeed(vcpu);
4539}
4540
4541/* Emulate the INVEPT instruction */
4542static int handle_invept(struct kvm_vcpu *vcpu)
4543{
4544 struct vcpu_vmx *vmx = to_vmx(vcpu);
4545 u32 vmx_instruction_info, types;
4546 unsigned long type;
4547 gva_t gva;
4548 struct x86_exception e;
4549 struct {
4550 u64 eptp, gpa;
4551 } operand;
4552
4553 if (!(vmx->nested.msrs.secondary_ctls_high &
4554 SECONDARY_EXEC_ENABLE_EPT) ||
4555 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
4556 kvm_queue_exception(vcpu, UD_VECTOR);
4557 return 1;
4558 }
4559
4560 if (!nested_vmx_check_permission(vcpu))
4561 return 1;
4562
4563 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4564 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
4565
4566 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
4567
4568 if (type >= 32 || !(types & (1 << type)))
4569 return nested_vmx_failValid(vcpu,
4570 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4571
4572 /* According to the Intel VMX instruction reference, the memory
4573 * operand is read even if it isn't needed (e.g., for type==global)
4574 */
4575 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4576 vmx_instruction_info, false, &gva))
4577 return 1;
4578 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
4579 kvm_inject_page_fault(vcpu, &e);
4580 return 1;
4581 }
4582
4583 switch (type) {
4584 case VMX_EPT_EXTENT_GLOBAL:
4585 /*
4586 * TODO: track mappings and invalidate
4587 * single context requests appropriately
4588 */
4589 case VMX_EPT_EXTENT_CONTEXT:
4590 kvm_mmu_sync_roots(vcpu);
4591 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4592 break;
4593 default:
4594 BUG_ON(1);
4595 break;
4596 }
4597
4598 return nested_vmx_succeed(vcpu);
4599}
4600
4601static int handle_invvpid(struct kvm_vcpu *vcpu)
4602{
4603 struct vcpu_vmx *vmx = to_vmx(vcpu);
4604 u32 vmx_instruction_info;
4605 unsigned long type, types;
4606 gva_t gva;
4607 struct x86_exception e;
4608 struct {
4609 u64 vpid;
4610 u64 gla;
4611 } operand;
4612 u16 vpid02;
4613
4614 if (!(vmx->nested.msrs.secondary_ctls_high &
4615 SECONDARY_EXEC_ENABLE_VPID) ||
4616 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
4617 kvm_queue_exception(vcpu, UD_VECTOR);
4618 return 1;
4619 }
4620
4621 if (!nested_vmx_check_permission(vcpu))
4622 return 1;
4623
4624 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4625 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
4626
4627 types = (vmx->nested.msrs.vpid_caps &
4628 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
4629
4630 if (type >= 32 || !(types & (1 << type)))
4631 return nested_vmx_failValid(vcpu,
4632 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4633
4634 /* according to the intel vmx instruction reference, the memory
4635 * operand is read even if it isn't needed (e.g., for type==global)
4636 */
4637 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4638 vmx_instruction_info, false, &gva))
4639 return 1;
4640 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
4641 kvm_inject_page_fault(vcpu, &e);
4642 return 1;
4643 }
4644 if (operand.vpid >> 16)
4645 return nested_vmx_failValid(vcpu,
4646 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4647
4648 vpid02 = nested_get_vpid02(vcpu);
4649 switch (type) {
4650 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
4651 if (!operand.vpid ||
4652 is_noncanonical_address(operand.gla, vcpu))
4653 return nested_vmx_failValid(vcpu,
4654 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4655 if (cpu_has_vmx_invvpid_individual_addr()) {
4656 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
4657 vpid02, operand.gla);
4658 } else
4659 __vmx_flush_tlb(vcpu, vpid02, false);
4660 break;
4661 case VMX_VPID_EXTENT_SINGLE_CONTEXT:
4662 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
4663 if (!operand.vpid)
4664 return nested_vmx_failValid(vcpu,
4665 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4666 __vmx_flush_tlb(vcpu, vpid02, false);
4667 break;
4668 case VMX_VPID_EXTENT_ALL_CONTEXT:
4669 __vmx_flush_tlb(vcpu, vpid02, false);
4670 break;
4671 default:
4672 WARN_ON_ONCE(1);
4673 return kvm_skip_emulated_instruction(vcpu);
4674 }
4675
4676 return nested_vmx_succeed(vcpu);
4677}
4678
4679static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
4680 struct vmcs12 *vmcs12)
4681{
4682 u32 index = vcpu->arch.regs[VCPU_REGS_RCX];
4683 u64 address;
4684 bool accessed_dirty;
4685 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
4686
4687 if (!nested_cpu_has_eptp_switching(vmcs12) ||
4688 !nested_cpu_has_ept(vmcs12))
4689 return 1;
4690
4691 if (index >= VMFUNC_EPTP_ENTRIES)
4692 return 1;
4693
4694
4695 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
4696 &address, index * 8, 8))
4697 return 1;
4698
4699 accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
4700
4701 /*
4702 * If the (L2) guest does a vmfunc to the currently
4703 * active ept pointer, we don't have to do anything else
4704 */
4705 if (vmcs12->ept_pointer != address) {
4706 if (!valid_ept_address(vcpu, address))
4707 return 1;
4708
4709 kvm_mmu_unload(vcpu);
4710 mmu->ept_ad = accessed_dirty;
4711 mmu->mmu_role.base.ad_disabled = !accessed_dirty;
4712 vmcs12->ept_pointer = address;
4713 /*
4714 * TODO: Check what's the correct approach in case
4715 * mmu reload fails. Currently, we just let the next
4716 * reload potentially fail
4717 */
4718 kvm_mmu_reload(vcpu);
4719 }
4720
4721 return 0;
4722}
4723
4724static int handle_vmfunc(struct kvm_vcpu *vcpu)
4725{
4726 struct vcpu_vmx *vmx = to_vmx(vcpu);
4727 struct vmcs12 *vmcs12;
4728 u32 function = vcpu->arch.regs[VCPU_REGS_RAX];
4729
4730 /*
4731 * VMFUNC is only supported for nested guests, but we always enable the
4732 * secondary control for simplicity; for non-nested mode, fake that we
4733 * didn't by injecting #UD.
4734 */
4735 if (!is_guest_mode(vcpu)) {
4736 kvm_queue_exception(vcpu, UD_VECTOR);
4737 return 1;
4738 }
4739
4740 vmcs12 = get_vmcs12(vcpu);
4741 if ((vmcs12->vm_function_control & (1 << function)) == 0)
4742 goto fail;
4743
4744 switch (function) {
4745 case 0:
4746 if (nested_vmx_eptp_switching(vcpu, vmcs12))
4747 goto fail;
4748 break;
4749 default:
4750 goto fail;
4751 }
4752 return kvm_skip_emulated_instruction(vcpu);
4753
4754fail:
4755 nested_vmx_vmexit(vcpu, vmx->exit_reason,
4756 vmcs_read32(VM_EXIT_INTR_INFO),
4757 vmcs_readl(EXIT_QUALIFICATION));
4758 return 1;
4759}
4760
4761
4762static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
4763 struct vmcs12 *vmcs12)
4764{
4765 unsigned long exit_qualification;
4766 gpa_t bitmap, last_bitmap;
4767 unsigned int port;
4768 int size;
4769 u8 b;
4770
4771 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
4772 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
4773
4774 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4775
4776 port = exit_qualification >> 16;
4777 size = (exit_qualification & 7) + 1;
4778
4779 last_bitmap = (gpa_t)-1;
4780 b = -1;
4781
4782 while (size > 0) {
4783 if (port < 0x8000)
4784 bitmap = vmcs12->io_bitmap_a;
4785 else if (port < 0x10000)
4786 bitmap = vmcs12->io_bitmap_b;
4787 else
4788 return true;
4789 bitmap += (port & 0x7fff) / 8;
4790
4791 if (last_bitmap != bitmap)
4792 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
4793 return true;
4794 if (b & (1 << (port & 7)))
4795 return true;
4796
4797 port++;
4798 size--;
4799 last_bitmap = bitmap;
4800 }
4801
4802 return false;
4803}
4804
4805/*
4806 * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
4807 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
4808 * disinterest in the current event (read or write a specific MSR) by using an
4809 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
4810 */
4811static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
4812 struct vmcs12 *vmcs12, u32 exit_reason)
4813{
4814 u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
4815 gpa_t bitmap;
4816
4817 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
4818 return true;
4819
4820 /*
4821 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
4822 * for the four combinations of read/write and low/high MSR numbers.
4823 * First we need to figure out which of the four to use:
4824 */
4825 bitmap = vmcs12->msr_bitmap;
4826 if (exit_reason == EXIT_REASON_MSR_WRITE)
4827 bitmap += 2048;
4828 if (msr_index >= 0xc0000000) {
4829 msr_index -= 0xc0000000;
4830 bitmap += 1024;
4831 }
4832
4833 /* Then read the msr_index'th bit from this bitmap: */
4834 if (msr_index < 1024*8) {
4835 unsigned char b;
4836 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
4837 return true;
4838 return 1 & (b >> (msr_index & 7));
4839 } else
4840 return true; /* let L1 handle the wrong parameter */
4841}
4842
4843/*
4844 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
4845 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
4846 * intercept (via guest_host_mask etc.) the current event.
4847 */
4848static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
4849 struct vmcs12 *vmcs12)
4850{
4851 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4852 int cr = exit_qualification & 15;
4853 int reg;
4854 unsigned long val;
4855
4856 switch ((exit_qualification >> 4) & 3) {
4857 case 0: /* mov to cr */
4858 reg = (exit_qualification >> 8) & 15;
4859 val = kvm_register_readl(vcpu, reg);
4860 switch (cr) {
4861 case 0:
4862 if (vmcs12->cr0_guest_host_mask &
4863 (val ^ vmcs12->cr0_read_shadow))
4864 return true;
4865 break;
4866 case 3:
4867 if ((vmcs12->cr3_target_count >= 1 &&
4868 vmcs12->cr3_target_value0 == val) ||
4869 (vmcs12->cr3_target_count >= 2 &&
4870 vmcs12->cr3_target_value1 == val) ||
4871 (vmcs12->cr3_target_count >= 3 &&
4872 vmcs12->cr3_target_value2 == val) ||
4873 (vmcs12->cr3_target_count >= 4 &&
4874 vmcs12->cr3_target_value3 == val))
4875 return false;
4876 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
4877 return true;
4878 break;
4879 case 4:
4880 if (vmcs12->cr4_guest_host_mask &
4881 (vmcs12->cr4_read_shadow ^ val))
4882 return true;
4883 break;
4884 case 8:
4885 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
4886 return true;
4887 break;
4888 }
4889 break;
4890 case 2: /* clts */
4891 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
4892 (vmcs12->cr0_read_shadow & X86_CR0_TS))
4893 return true;
4894 break;
4895 case 1: /* mov from cr */
4896 switch (cr) {
4897 case 3:
4898 if (vmcs12->cpu_based_vm_exec_control &
4899 CPU_BASED_CR3_STORE_EXITING)
4900 return true;
4901 break;
4902 case 8:
4903 if (vmcs12->cpu_based_vm_exec_control &
4904 CPU_BASED_CR8_STORE_EXITING)
4905 return true;
4906 break;
4907 }
4908 break;
4909 case 3: /* lmsw */
4910 /*
4911 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
4912 * cr0. Other attempted changes are ignored, with no exit.
4913 */
4914 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
4915 if (vmcs12->cr0_guest_host_mask & 0xe &
4916 (val ^ vmcs12->cr0_read_shadow))
4917 return true;
4918 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
4919 !(vmcs12->cr0_read_shadow & 0x1) &&
4920 (val & 0x1))
4921 return true;
4922 break;
4923 }
4924 return false;
4925}
4926
4927static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
4928 struct vmcs12 *vmcs12, gpa_t bitmap)
4929{
4930 u32 vmx_instruction_info;
4931 unsigned long field;
4932 u8 b;
4933
4934 if (!nested_cpu_has_shadow_vmcs(vmcs12))
4935 return true;
4936
4937 /* Decode instruction info and find the field to access */
4938 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4939 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
4940
4941 /* Out-of-range fields always cause a VM exit from L2 to L1 */
4942 if (field >> 15)
4943 return true;
4944
4945 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
4946 return true;
4947
4948 return 1 & (b >> (field & 7));
4949}
4950
4951/*
4952 * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
4953 * should handle it ourselves in L0 (and then continue L2). Only call this
4954 * when in is_guest_mode (L2).
4955 */
4956bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
4957{
4958 u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
4959 struct vcpu_vmx *vmx = to_vmx(vcpu);
4960 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4961
4962 if (vmx->nested.nested_run_pending)
4963 return false;
4964
4965 if (unlikely(vmx->fail)) {
4966 pr_info_ratelimited("%s failed vm entry %x\n", __func__,
4967 vmcs_read32(VM_INSTRUCTION_ERROR));
4968 return true;
4969 }
4970
4971 /*
4972 * The host physical addresses of some pages of guest memory
4973 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
4974 * Page). The CPU may write to these pages via their host
4975 * physical address while L2 is running, bypassing any
4976 * address-translation-based dirty tracking (e.g. EPT write
4977 * protection).
4978 *
4979 * Mark them dirty on every exit from L2 to prevent them from
4980 * getting out of sync with dirty tracking.
4981 */
4982 nested_mark_vmcs12_pages_dirty(vcpu);
4983
4984 trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
4985 vmcs_readl(EXIT_QUALIFICATION),
4986 vmx->idt_vectoring_info,
4987 intr_info,
4988 vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
4989 KVM_ISA_VMX);
4990
4991 switch (exit_reason) {
4992 case EXIT_REASON_EXCEPTION_NMI:
4993 if (is_nmi(intr_info))
4994 return false;
4995 else if (is_page_fault(intr_info))
4996 return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
4997 else if (is_debug(intr_info) &&
4998 vcpu->guest_debug &
4999 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
5000 return false;
5001 else if (is_breakpoint(intr_info) &&
5002 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5003 return false;
5004 return vmcs12->exception_bitmap &
5005 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
5006 case EXIT_REASON_EXTERNAL_INTERRUPT:
5007 return false;
5008 case EXIT_REASON_TRIPLE_FAULT:
5009 return true;
5010 case EXIT_REASON_PENDING_INTERRUPT:
5011 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
5012 case EXIT_REASON_NMI_WINDOW:
5013 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
5014 case EXIT_REASON_TASK_SWITCH:
5015 return true;
5016 case EXIT_REASON_CPUID:
5017 return true;
5018 case EXIT_REASON_HLT:
5019 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
5020 case EXIT_REASON_INVD:
5021 return true;
5022 case EXIT_REASON_INVLPG:
5023 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5024 case EXIT_REASON_RDPMC:
5025 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
5026 case EXIT_REASON_RDRAND:
5027 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
5028 case EXIT_REASON_RDSEED:
5029 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
5030 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
5031 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
5032 case EXIT_REASON_VMREAD:
5033 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5034 vmcs12->vmread_bitmap);
5035 case EXIT_REASON_VMWRITE:
5036 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5037 vmcs12->vmwrite_bitmap);
5038 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
5039 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
5040 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
5041 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
5042 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
5043 /*
5044 * VMX instructions trap unconditionally. This allows L1 to
5045 * emulate them for its L2 guest, i.e., allows 3-level nesting!
5046 */
5047 return true;
5048 case EXIT_REASON_CR_ACCESS:
5049 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
5050 case EXIT_REASON_DR_ACCESS:
5051 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
5052 case EXIT_REASON_IO_INSTRUCTION:
5053 return nested_vmx_exit_handled_io(vcpu, vmcs12);
5054 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
5055 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
5056 case EXIT_REASON_MSR_READ:
5057 case EXIT_REASON_MSR_WRITE:
5058 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
5059 case EXIT_REASON_INVALID_STATE:
5060 return true;
5061 case EXIT_REASON_MWAIT_INSTRUCTION:
5062 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
5063 case EXIT_REASON_MONITOR_TRAP_FLAG:
5064 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
5065 case EXIT_REASON_MONITOR_INSTRUCTION:
5066 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
5067 case EXIT_REASON_PAUSE_INSTRUCTION:
5068 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
5069 nested_cpu_has2(vmcs12,
5070 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
5071 case EXIT_REASON_MCE_DURING_VMENTRY:
5072 return false;
5073 case EXIT_REASON_TPR_BELOW_THRESHOLD:
5074 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
5075 case EXIT_REASON_APIC_ACCESS:
5076 case EXIT_REASON_APIC_WRITE:
5077 case EXIT_REASON_EOI_INDUCED:
5078 /*
5079 * The controls for "virtualize APIC accesses," "APIC-
5080 * register virtualization," and "virtual-interrupt
5081 * delivery" only come from vmcs12.
5082 */
5083 return true;
5084 case EXIT_REASON_EPT_VIOLATION:
5085 /*
5086 * L0 always deals with the EPT violation. If nested EPT is
5087 * used, and the nested mmu code discovers that the address is
5088 * missing in the guest EPT table (EPT12), the EPT violation
5089 * will be injected with nested_ept_inject_page_fault()
5090 */
5091 return false;
5092 case EXIT_REASON_EPT_MISCONFIG:
5093 /*
5094 * L2 never uses directly L1's EPT, but rather L0's own EPT
5095 * table (shadow on EPT) or a merged EPT table that L0 built
5096 * (EPT on EPT). So any problems with the structure of the
5097 * table is L0's fault.
5098 */
5099 return false;
5100 case EXIT_REASON_INVPCID:
5101 return
5102 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
5103 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5104 case EXIT_REASON_WBINVD:
5105 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
5106 case EXIT_REASON_XSETBV:
5107 return true;
5108 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
5109 /*
5110 * This should never happen, since it is not possible to
5111 * set XSS to a non-zero value---neither in L1 nor in L2.
5112 * If if it were, XSS would have to be checked against
5113 * the XSS exit bitmap in vmcs12.
5114 */
5115 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
5116 case EXIT_REASON_PREEMPTION_TIMER:
5117 return false;
5118 case EXIT_REASON_PML_FULL:
5119 /* We emulate PML support to L1. */
5120 return false;
5121 case EXIT_REASON_VMFUNC:
5122 /* VM functions are emulated through L2->L0 vmexits. */
5123 return false;
5124 case EXIT_REASON_ENCLS:
5125 /* SGX is never exposed to L1 */
5126 return false;
5127 default:
5128 return true;
5129 }
5130}
5131
5132
5133static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
5134 struct kvm_nested_state __user *user_kvm_nested_state,
5135 u32 user_data_size)
5136{
5137 struct vcpu_vmx *vmx;
5138 struct vmcs12 *vmcs12;
5139 struct kvm_nested_state kvm_state = {
5140 .flags = 0,
5141 .format = 0,
5142 .size = sizeof(kvm_state),
5143 .vmx.vmxon_pa = -1ull,
5144 .vmx.vmcs_pa = -1ull,
5145 };
5146
5147 if (!vcpu)
5148 return kvm_state.size + 2 * VMCS12_SIZE;
5149
5150 vmx = to_vmx(vcpu);
5151 vmcs12 = get_vmcs12(vcpu);
5152
5153 if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled)
5154 kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
5155
5156 if (nested_vmx_allowed(vcpu) &&
5157 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
5158 kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
5159 kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
5160
5161 if (vmx_has_valid_vmcs12(vcpu)) {
5162 kvm_state.size += VMCS12_SIZE;
5163
5164 if (is_guest_mode(vcpu) &&
5165 nested_cpu_has_shadow_vmcs(vmcs12) &&
5166 vmcs12->vmcs_link_pointer != -1ull)
5167 kvm_state.size += VMCS12_SIZE;
5168 }
5169
5170 if (vmx->nested.smm.vmxon)
5171 kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
5172
5173 if (vmx->nested.smm.guest_mode)
5174 kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
5175
5176 if (is_guest_mode(vcpu)) {
5177 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
5178
5179 if (vmx->nested.nested_run_pending)
5180 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
5181 }
5182 }
5183
5184 if (user_data_size < kvm_state.size)
5185 goto out;
5186
5187 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
5188 return -EFAULT;
5189
5190 if (!vmx_has_valid_vmcs12(vcpu))
5191 goto out;
5192
5193 /*
5194 * When running L2, the authoritative vmcs12 state is in the
5195 * vmcs02. When running L1, the authoritative vmcs12 state is
5196 * in the shadow or enlightened vmcs linked to vmcs01, unless
5197 * need_vmcs12_sync is set, in which case, the authoritative
5198 * vmcs12 state is in the vmcs12 already.
5199 */
5200 if (is_guest_mode(vcpu)) {
5201 sync_vmcs12(vcpu, vmcs12);
5202 } else if (!vmx->nested.need_vmcs12_sync) {
5203 if (vmx->nested.hv_evmcs)
5204 copy_enlightened_to_vmcs12(vmx);
5205 else if (enable_shadow_vmcs)
5206 copy_shadow_to_vmcs12(vmx);
5207 }
5208
5209 if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
5210 return -EFAULT;
5211
5212 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
5213 vmcs12->vmcs_link_pointer != -1ull) {
5214 if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
5215 get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
5216 return -EFAULT;
5217 }
5218
5219out:
5220 return kvm_state.size;
5221}
5222
5223/*
5224 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
5225 */
5226void vmx_leave_nested(struct kvm_vcpu *vcpu)
5227{
5228 if (is_guest_mode(vcpu)) {
5229 to_vmx(vcpu)->nested.nested_run_pending = 0;
5230 nested_vmx_vmexit(vcpu, -1, 0, 0);
5231 }
5232 free_nested(vcpu);
5233}
5234
5235static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
5236 struct kvm_nested_state __user *user_kvm_nested_state,
5237 struct kvm_nested_state *kvm_state)
5238{
5239 struct vcpu_vmx *vmx = to_vmx(vcpu);
5240 struct vmcs12 *vmcs12;
5241 u32 exit_qual;
5242 int ret;
5243
5244 if (kvm_state->format != 0)
5245 return -EINVAL;
5246
5247 if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
5248 nested_enable_evmcs(vcpu, NULL);
5249
5250 if (!nested_vmx_allowed(vcpu))
5251 return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
5252
5253 if (kvm_state->vmx.vmxon_pa == -1ull) {
5254 if (kvm_state->vmx.smm.flags)
5255 return -EINVAL;
5256
5257 if (kvm_state->vmx.vmcs_pa != -1ull)
5258 return -EINVAL;
5259
5260 vmx_leave_nested(vcpu);
5261 return 0;
5262 }
5263
5264 if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
5265 return -EINVAL;
5266
5267 if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
5268 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
5269 return -EINVAL;
5270
5271 if (kvm_state->vmx.smm.flags &
5272 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
5273 return -EINVAL;
5274
5275 /*
5276 * SMM temporarily disables VMX, so we cannot be in guest mode,
5277 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
5278 * must be zero.
5279 */
5280 if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
5281 return -EINVAL;
5282
5283 if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
5284 !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
5285 return -EINVAL;
5286
5287 vmx_leave_nested(vcpu);
5288 if (kvm_state->vmx.vmxon_pa == -1ull)
5289 return 0;
5290
5291 vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa;
5292 ret = enter_vmx_operation(vcpu);
5293 if (ret)
5294 return ret;
5295
5296 /* Empty 'VMXON' state is permitted */
5297 if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
5298 return 0;
5299
5300 if (kvm_state->vmx.vmcs_pa != -1ull) {
5301 if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
5302 !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
5303 return -EINVAL;
5304
5305 set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
5306 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
5307 /*
5308 * Sync eVMCS upon entry as we may not have
5309 * HV_X64_MSR_VP_ASSIST_PAGE set up yet.
5310 */
5311 vmx->nested.need_vmcs12_sync = true;
5312 } else {
5313 return -EINVAL;
5314 }
5315
5316 if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
5317 vmx->nested.smm.vmxon = true;
5318 vmx->nested.vmxon = false;
5319
5320 if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
5321 vmx->nested.smm.guest_mode = true;
5322 }
5323
5324 vmcs12 = get_vmcs12(vcpu);
5325 if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12)))
5326 return -EFAULT;
5327
5328 if (vmcs12->hdr.revision_id != VMCS12_REVISION)
5329 return -EINVAL;
5330
5331 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
5332 return 0;
5333
5334 vmx->nested.nested_run_pending =
5335 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
5336
5337 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
5338 vmcs12->vmcs_link_pointer != -1ull) {
5339 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
5340
5341 if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12))
5342 return -EINVAL;
5343
5344 if (copy_from_user(shadow_vmcs12,
5345 user_kvm_nested_state->data + VMCS12_SIZE,
5346 sizeof(*vmcs12)))
5347 return -EFAULT;
5348
5349 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
5350 !shadow_vmcs12->hdr.shadow_vmcs)
5351 return -EINVAL;
5352 }
5353
Krish Sadhukhan16322a3b2018-12-12 13:30:06 -05005354 if (nested_vmx_check_vmentry_prereqs(vcpu, vmcs12) ||
5355 nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
Sean Christopherson55d23752018-12-03 13:53:18 -08005356 return -EINVAL;
5357
5358 vmx->nested.dirty_vmcs12 = true;
5359 ret = nested_vmx_enter_non_root_mode(vcpu, false);
5360 if (ret)
5361 return -EINVAL;
5362
5363 return 0;
5364}
5365
5366void nested_vmx_vcpu_setup(void)
5367{
5368 if (enable_shadow_vmcs) {
5369 /*
5370 * At vCPU creation, "VMWRITE to any supported field
5371 * in the VMCS" is supported, so use the more
5372 * permissive vmx_vmread_bitmap to specify both read
5373 * and write permissions for the shadow VMCS.
5374 */
5375 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
5376 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmread_bitmap));
5377 }
5378}
5379
5380/*
5381 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
5382 * returned for the various VMX controls MSRs when nested VMX is enabled.
5383 * The same values should also be used to verify that vmcs12 control fields are
5384 * valid during nested entry from L1 to L2.
5385 * Each of these control msrs has a low and high 32-bit half: A low bit is on
5386 * if the corresponding bit in the (32-bit) control field *must* be on, and a
5387 * bit in the high half is on if the corresponding bit in the control field
5388 * may be on. See also vmx_control_verify().
5389 */
5390void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
5391 bool apicv)
5392{
5393 /*
5394 * Note that as a general rule, the high half of the MSRs (bits in
5395 * the control fields which may be 1) should be initialized by the
5396 * intersection of the underlying hardware's MSR (i.e., features which
5397 * can be supported) and the list of features we want to expose -
5398 * because they are known to be properly supported in our code.
5399 * Also, usually, the low half of the MSRs (bits which must be 1) can
5400 * be set to 0, meaning that L1 may turn off any of these bits. The
5401 * reason is that if one of these bits is necessary, it will appear
5402 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
5403 * fields of vmcs01 and vmcs02, will turn these bits off - and
5404 * nested_vmx_exit_reflected() will not pass related exits to L1.
5405 * These rules have exceptions below.
5406 */
5407
5408 /* pin-based controls */
5409 rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
5410 msrs->pinbased_ctls_low,
5411 msrs->pinbased_ctls_high);
5412 msrs->pinbased_ctls_low |=
5413 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
5414 msrs->pinbased_ctls_high &=
5415 PIN_BASED_EXT_INTR_MASK |
5416 PIN_BASED_NMI_EXITING |
5417 PIN_BASED_VIRTUAL_NMIS |
5418 (apicv ? PIN_BASED_POSTED_INTR : 0);
5419 msrs->pinbased_ctls_high |=
5420 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
5421 PIN_BASED_VMX_PREEMPTION_TIMER;
5422
5423 /* exit controls */
5424 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
5425 msrs->exit_ctls_low,
5426 msrs->exit_ctls_high);
5427 msrs->exit_ctls_low =
5428 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
5429
5430 msrs->exit_ctls_high &=
5431#ifdef CONFIG_X86_64
5432 VM_EXIT_HOST_ADDR_SPACE_SIZE |
5433#endif
5434 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
5435 msrs->exit_ctls_high |=
5436 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
5437 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
5438 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
5439
5440 /* We support free control of debug control saving. */
5441 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
5442
5443 /* entry controls */
5444 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
5445 msrs->entry_ctls_low,
5446 msrs->entry_ctls_high);
5447 msrs->entry_ctls_low =
5448 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
5449 msrs->entry_ctls_high &=
5450#ifdef CONFIG_X86_64
5451 VM_ENTRY_IA32E_MODE |
5452#endif
5453 VM_ENTRY_LOAD_IA32_PAT;
5454 msrs->entry_ctls_high |=
5455 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
5456
5457 /* We support free control of debug control loading. */
5458 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
5459
5460 /* cpu-based controls */
5461 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
5462 msrs->procbased_ctls_low,
5463 msrs->procbased_ctls_high);
5464 msrs->procbased_ctls_low =
5465 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
5466 msrs->procbased_ctls_high &=
5467 CPU_BASED_VIRTUAL_INTR_PENDING |
5468 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
5469 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
5470 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
5471 CPU_BASED_CR3_STORE_EXITING |
5472#ifdef CONFIG_X86_64
5473 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
5474#endif
5475 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
5476 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
5477 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
5478 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
5479 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
5480 /*
5481 * We can allow some features even when not supported by the
5482 * hardware. For example, L1 can specify an MSR bitmap - and we
5483 * can use it to avoid exits to L1 - even when L0 runs L2
5484 * without MSR bitmaps.
5485 */
5486 msrs->procbased_ctls_high |=
5487 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
5488 CPU_BASED_USE_MSR_BITMAPS;
5489
5490 /* We support free control of CR3 access interception. */
5491 msrs->procbased_ctls_low &=
5492 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
5493
5494 /*
5495 * secondary cpu-based controls. Do not include those that
5496 * depend on CPUID bits, they are added later by vmx_cpuid_update.
5497 */
5498 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
5499 msrs->secondary_ctls_low,
5500 msrs->secondary_ctls_high);
5501 msrs->secondary_ctls_low = 0;
5502 msrs->secondary_ctls_high &=
5503 SECONDARY_EXEC_DESC |
5504 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
5505 SECONDARY_EXEC_APIC_REGISTER_VIRT |
5506 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
5507 SECONDARY_EXEC_WBINVD_EXITING;
5508
5509 /*
5510 * We can emulate "VMCS shadowing," even if the hardware
5511 * doesn't support it.
5512 */
5513 msrs->secondary_ctls_high |=
5514 SECONDARY_EXEC_SHADOW_VMCS;
5515
5516 if (enable_ept) {
5517 /* nested EPT: emulate EPT also to L1 */
5518 msrs->secondary_ctls_high |=
5519 SECONDARY_EXEC_ENABLE_EPT;
5520 msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
5521 VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
5522 if (cpu_has_vmx_ept_execute_only())
5523 msrs->ept_caps |=
5524 VMX_EPT_EXECUTE_ONLY_BIT;
5525 msrs->ept_caps &= ept_caps;
5526 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
5527 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
5528 VMX_EPT_1GB_PAGE_BIT;
5529 if (enable_ept_ad_bits) {
5530 msrs->secondary_ctls_high |=
5531 SECONDARY_EXEC_ENABLE_PML;
5532 msrs->ept_caps |= VMX_EPT_AD_BIT;
5533 }
5534 }
5535
5536 if (cpu_has_vmx_vmfunc()) {
5537 msrs->secondary_ctls_high |=
5538 SECONDARY_EXEC_ENABLE_VMFUNC;
5539 /*
5540 * Advertise EPTP switching unconditionally
5541 * since we emulate it
5542 */
5543 if (enable_ept)
5544 msrs->vmfunc_controls =
5545 VMX_VMFUNC_EPTP_SWITCHING;
5546 }
5547
5548 /*
5549 * Old versions of KVM use the single-context version without
5550 * checking for support, so declare that it is supported even
5551 * though it is treated as global context. The alternative is
5552 * not failing the single-context invvpid, and it is worse.
5553 */
5554 if (enable_vpid) {
5555 msrs->secondary_ctls_high |=
5556 SECONDARY_EXEC_ENABLE_VPID;
5557 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
5558 VMX_VPID_EXTENT_SUPPORTED_MASK;
5559 }
5560
5561 if (enable_unrestricted_guest)
5562 msrs->secondary_ctls_high |=
5563 SECONDARY_EXEC_UNRESTRICTED_GUEST;
5564
5565 if (flexpriority_enabled)
5566 msrs->secondary_ctls_high |=
5567 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
5568
5569 /* miscellaneous data */
5570 rdmsr(MSR_IA32_VMX_MISC,
5571 msrs->misc_low,
5572 msrs->misc_high);
5573 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
5574 msrs->misc_low |=
5575 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
5576 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
5577 VMX_MISC_ACTIVITY_HLT;
5578 msrs->misc_high = 0;
5579
5580 /*
5581 * This MSR reports some information about VMX support. We
5582 * should return information about the VMX we emulate for the
5583 * guest, and the VMCS structure we give it - not about the
5584 * VMX support of the underlying hardware.
5585 */
5586 msrs->basic =
5587 VMCS12_REVISION |
5588 VMX_BASIC_TRUE_CTLS |
5589 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
5590 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
5591
5592 if (cpu_has_vmx_basic_inout())
5593 msrs->basic |= VMX_BASIC_INOUT;
5594
5595 /*
5596 * These MSRs specify bits which the guest must keep fixed on
5597 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
5598 * We picked the standard core2 setting.
5599 */
5600#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
5601#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
5602 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
5603 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
5604
5605 /* These MSRs specify bits which the guest must keep fixed off. */
5606 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
5607 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
5608
5609 /* highest index: VMX_PREEMPTION_TIMER_VALUE */
5610 msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
5611}
5612
5613void nested_vmx_hardware_unsetup(void)
5614{
5615 int i;
5616
5617 if (enable_shadow_vmcs) {
5618 for (i = 0; i < VMX_BITMAP_NR; i++)
5619 free_page((unsigned long)vmx_bitmap[i]);
5620 }
5621}
5622
5623__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
5624{
5625 int i;
5626
5627 if (!cpu_has_vmx_shadow_vmcs())
5628 enable_shadow_vmcs = 0;
5629 if (enable_shadow_vmcs) {
5630 for (i = 0; i < VMX_BITMAP_NR; i++) {
5631 vmx_bitmap[i] = (unsigned long *)
5632 __get_free_page(GFP_KERNEL);
5633 if (!vmx_bitmap[i]) {
5634 nested_vmx_hardware_unsetup();
5635 return -ENOMEM;
5636 }
5637 }
5638
5639 init_vmcs_shadow_fields();
5640 }
5641
5642 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear,
5643 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
5644 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld,
5645 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst,
5646 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread,
5647 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume,
5648 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite,
5649 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff,
5650 exit_handlers[EXIT_REASON_VMON] = handle_vmon,
5651 exit_handlers[EXIT_REASON_INVEPT] = handle_invept,
5652 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid,
5653 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc,
5654
5655 kvm_x86_ops->check_nested_events = vmx_check_nested_events;
5656 kvm_x86_ops->get_nested_state = vmx_get_nested_state;
5657 kvm_x86_ops->set_nested_state = vmx_set_nested_state;
5658 kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages,
5659 kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs;
Vitaly Kuznetsove2e871a2018-12-10 18:21:55 +01005660 kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version;
Sean Christopherson55d23752018-12-03 13:53:18 -08005661
5662 return 0;
5663}