blob: 21cb6cd88765a58acd2c6c5dc3a0d4edf8dae224 [file] [log] [blame]
Sean Christopherson55d23752018-12-03 13:53:18 -08001// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/frame.h>
4#include <linux/percpu.h>
5
6#include <asm/debugreg.h>
7#include <asm/mmu_context.h>
8
9#include "cpuid.h"
10#include "hyperv.h"
11#include "mmu.h"
12#include "nested.h"
13#include "trace.h"
14#include "x86.h"
15
16static bool __read_mostly enable_shadow_vmcs = 1;
17module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
18
19static bool __read_mostly nested_early_check = 0;
20module_param(nested_early_check, bool, S_IRUGO);
21
Sean Christopherson55d23752018-12-03 13:53:18 -080022/*
23 * Hyper-V requires all of these, so mark them as supported even though
24 * they are just treated the same as all-context.
25 */
26#define VMX_VPID_EXTENT_SUPPORTED_MASK \
27 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
28 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
29 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
30 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
31
32#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
33
34enum {
35 VMX_VMREAD_BITMAP,
36 VMX_VMWRITE_BITMAP,
37 VMX_BITMAP_NR
38};
39static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
40
41#define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
42#define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
43
Sean Christopherson1c6f0b42019-05-07 08:36:25 -070044struct shadow_vmcs_field {
45 u16 encoding;
46 u16 offset;
47};
48static struct shadow_vmcs_field shadow_read_only_fields[] = {
49#define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
Sean Christopherson55d23752018-12-03 13:53:18 -080050#include "vmcs_shadow_fields.h"
51};
52static int max_shadow_read_only_fields =
53 ARRAY_SIZE(shadow_read_only_fields);
54
Sean Christopherson1c6f0b42019-05-07 08:36:25 -070055static struct shadow_vmcs_field shadow_read_write_fields[] = {
56#define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
Sean Christopherson55d23752018-12-03 13:53:18 -080057#include "vmcs_shadow_fields.h"
58};
59static int max_shadow_read_write_fields =
60 ARRAY_SIZE(shadow_read_write_fields);
61
Yi Wang8997f652019-01-21 15:27:05 +080062static void init_vmcs_shadow_fields(void)
Sean Christopherson55d23752018-12-03 13:53:18 -080063{
64 int i, j;
65
66 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
67 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
68
69 for (i = j = 0; i < max_shadow_read_only_fields; i++) {
Sean Christopherson1c6f0b42019-05-07 08:36:25 -070070 struct shadow_vmcs_field entry = shadow_read_only_fields[i];
71 u16 field = entry.encoding;
Sean Christopherson55d23752018-12-03 13:53:18 -080072
73 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
74 (i + 1 == max_shadow_read_only_fields ||
Sean Christopherson1c6f0b42019-05-07 08:36:25 -070075 shadow_read_only_fields[i + 1].encoding != field + 1))
Sean Christopherson55d23752018-12-03 13:53:18 -080076 pr_err("Missing field from shadow_read_only_field %x\n",
77 field + 1);
78
79 clear_bit(field, vmx_vmread_bitmap);
Sean Christopherson55d23752018-12-03 13:53:18 -080080 if (field & 1)
Sean Christopherson1c6f0b42019-05-07 08:36:25 -070081#ifdef CONFIG_X86_64
Sean Christopherson55d23752018-12-03 13:53:18 -080082 continue;
Sean Christopherson1c6f0b42019-05-07 08:36:25 -070083#else
84 entry.offset += sizeof(u32);
Sean Christopherson55d23752018-12-03 13:53:18 -080085#endif
Sean Christopherson1c6f0b42019-05-07 08:36:25 -070086 shadow_read_only_fields[j++] = entry;
Sean Christopherson55d23752018-12-03 13:53:18 -080087 }
88 max_shadow_read_only_fields = j;
89
90 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
Sean Christopherson1c6f0b42019-05-07 08:36:25 -070091 struct shadow_vmcs_field entry = shadow_read_write_fields[i];
92 u16 field = entry.encoding;
Sean Christopherson55d23752018-12-03 13:53:18 -080093
94 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
95 (i + 1 == max_shadow_read_write_fields ||
Sean Christopherson1c6f0b42019-05-07 08:36:25 -070096 shadow_read_write_fields[i + 1].encoding != field + 1))
Sean Christopherson55d23752018-12-03 13:53:18 -080097 pr_err("Missing field from shadow_read_write_field %x\n",
98 field + 1);
99
Sean Christophersonb6437802019-05-07 08:36:24 -0700100 WARN_ONCE(field >= GUEST_ES_AR_BYTES &&
101 field <= GUEST_TR_AR_BYTES,
Sean Christopherson1c6f0b42019-05-07 08:36:25 -0700102 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES");
Sean Christophersonb6437802019-05-07 08:36:24 -0700103
Sean Christopherson55d23752018-12-03 13:53:18 -0800104 /*
105 * PML and the preemption timer can be emulated, but the
106 * processor cannot vmwrite to fields that don't exist
107 * on bare metal.
108 */
109 switch (field) {
110 case GUEST_PML_INDEX:
111 if (!cpu_has_vmx_pml())
112 continue;
113 break;
114 case VMX_PREEMPTION_TIMER_VALUE:
115 if (!cpu_has_vmx_preemption_timer())
116 continue;
117 break;
118 case GUEST_INTR_STATUS:
119 if (!cpu_has_vmx_apicv())
120 continue;
121 break;
122 default:
123 break;
124 }
125
126 clear_bit(field, vmx_vmwrite_bitmap);
127 clear_bit(field, vmx_vmread_bitmap);
Sean Christopherson55d23752018-12-03 13:53:18 -0800128 if (field & 1)
Sean Christopherson1c6f0b42019-05-07 08:36:25 -0700129#ifdef CONFIG_X86_64
Sean Christopherson55d23752018-12-03 13:53:18 -0800130 continue;
Sean Christopherson1c6f0b42019-05-07 08:36:25 -0700131#else
132 entry.offset += sizeof(u32);
Sean Christopherson55d23752018-12-03 13:53:18 -0800133#endif
Sean Christopherson1c6f0b42019-05-07 08:36:25 -0700134 shadow_read_write_fields[j++] = entry;
Sean Christopherson55d23752018-12-03 13:53:18 -0800135 }
136 max_shadow_read_write_fields = j;
137}
138
139/*
140 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
141 * set the success or error code of an emulated VMX instruction (as specified
142 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
143 * instruction.
144 */
145static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
146{
147 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
148 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
149 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
150 return kvm_skip_emulated_instruction(vcpu);
151}
152
153static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
154{
155 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
156 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
157 X86_EFLAGS_SF | X86_EFLAGS_OF))
158 | X86_EFLAGS_CF);
159 return kvm_skip_emulated_instruction(vcpu);
160}
161
162static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
163 u32 vm_instruction_error)
164{
165 struct vcpu_vmx *vmx = to_vmx(vcpu);
166
167 /*
168 * failValid writes the error number to the current VMCS, which
169 * can't be done if there isn't a current VMCS.
170 */
171 if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
172 return nested_vmx_failInvalid(vcpu);
173
174 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
175 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
176 X86_EFLAGS_SF | X86_EFLAGS_OF))
177 | X86_EFLAGS_ZF);
178 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
179 /*
180 * We don't need to force a shadow sync because
181 * VM_INSTRUCTION_ERROR is not shadowed
182 */
183 return kvm_skip_emulated_instruction(vcpu);
184}
185
186static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
187{
188 /* TODO: not to reset guest simply here. */
189 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
190 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
191}
192
193static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
194{
Sean Christophersonfe7f895d2019-05-07 12:17:57 -0700195 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
Sean Christopherson55d23752018-12-03 13:53:18 -0800196 vmcs_write64(VMCS_LINK_POINTER, -1ull);
197}
198
199static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
200{
201 struct vcpu_vmx *vmx = to_vmx(vcpu);
202
203 if (!vmx->nested.hv_evmcs)
204 return;
205
KarimAllah Ahmeddee9c042019-01-31 21:24:42 +0100206 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
Sean Christopherson55d23752018-12-03 13:53:18 -0800207 vmx->nested.hv_evmcs_vmptr = -1ull;
Sean Christopherson55d23752018-12-03 13:53:18 -0800208 vmx->nested.hv_evmcs = NULL;
209}
210
211/*
212 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
213 * just stops using VMX.
214 */
215static void free_nested(struct kvm_vcpu *vcpu)
216{
217 struct vcpu_vmx *vmx = to_vmx(vcpu);
218
219 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
220 return;
221
222 vmx->nested.vmxon = false;
223 vmx->nested.smm.vmxon = false;
224 free_vpid(vmx->nested.vpid02);
225 vmx->nested.posted_intr_nv = -1;
226 vmx->nested.current_vmptr = -1ull;
227 if (enable_shadow_vmcs) {
228 vmx_disable_shadow_vmcs(vmx);
229 vmcs_clear(vmx->vmcs01.shadow_vmcs);
230 free_vmcs(vmx->vmcs01.shadow_vmcs);
231 vmx->vmcs01.shadow_vmcs = NULL;
232 }
233 kfree(vmx->nested.cached_vmcs12);
234 kfree(vmx->nested.cached_shadow_vmcs12);
235 /* Unpin physical memory we referred to in the vmcs02 */
236 if (vmx->nested.apic_access_page) {
237 kvm_release_page_dirty(vmx->nested.apic_access_page);
238 vmx->nested.apic_access_page = NULL;
239 }
KarimAllah Ahmed96c66e82019-01-31 21:24:37 +0100240 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
KarimAllah Ahmed3278e042019-01-31 21:24:38 +0100241 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
242 vmx->nested.pi_desc = NULL;
Sean Christopherson55d23752018-12-03 13:53:18 -0800243
244 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
245
246 nested_release_evmcs(vcpu);
247
248 free_loaded_vmcs(&vmx->nested.vmcs02);
249}
250
Sean Christopherson13b964a2019-05-07 09:06:31 -0700251static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
252 struct loaded_vmcs *prev)
253{
254 struct vmcs_host_state *dest, *src;
255
256 if (unlikely(!vmx->guest_state_loaded))
257 return;
258
259 src = &prev->host_state;
260 dest = &vmx->loaded_vmcs->host_state;
261
262 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base);
263 dest->ldt_sel = src->ldt_sel;
264#ifdef CONFIG_X86_64
265 dest->ds_sel = src->ds_sel;
266 dest->es_sel = src->es_sel;
267#endif
268}
269
Sean Christopherson55d23752018-12-03 13:53:18 -0800270static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
271{
272 struct vcpu_vmx *vmx = to_vmx(vcpu);
Sean Christopherson13b964a2019-05-07 09:06:31 -0700273 struct loaded_vmcs *prev;
Sean Christopherson55d23752018-12-03 13:53:18 -0800274 int cpu;
275
276 if (vmx->loaded_vmcs == vmcs)
277 return;
278
279 cpu = get_cpu();
Sean Christopherson13b964a2019-05-07 09:06:31 -0700280 prev = vmx->loaded_vmcs;
Sean Christopherson55d23752018-12-03 13:53:18 -0800281 vmx->loaded_vmcs = vmcs;
Sean Christopherson8ef863e2019-05-07 09:06:32 -0700282 vmx_vcpu_load_vmcs(vcpu, cpu);
Sean Christopherson13b964a2019-05-07 09:06:31 -0700283 vmx_sync_vmcs_host_state(vmx, prev);
Sean Christopherson55d23752018-12-03 13:53:18 -0800284 put_cpu();
285
286 vm_entry_controls_reset_shadow(vmx);
287 vm_exit_controls_reset_shadow(vmx);
Sean Christophersonc5f2c762019-05-07 12:17:55 -0700288 pin_controls_reset_shadow(vmx);
Sean Christopherson2183f562019-05-07 12:17:56 -0700289 exec_controls_reset_shadow(vmx);
Sean Christophersonfe7f895d2019-05-07 12:17:57 -0700290 secondary_exec_controls_reset_shadow(vmx);
Sean Christopherson55d23752018-12-03 13:53:18 -0800291 vmx_segment_cache_clear(vmx);
292}
293
294/*
295 * Ensure that the current vmcs of the logical processor is the
296 * vmcs01 of the vcpu before calling free_nested().
297 */
298void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
299{
300 vcpu_load(vcpu);
Paolo Bonzinib4b65b52019-01-29 19:12:35 +0100301 vmx_leave_nested(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -0800302 vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01);
303 free_nested(vcpu);
304 vcpu_put(vcpu);
305}
306
307static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
308 struct x86_exception *fault)
309{
310 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
311 struct vcpu_vmx *vmx = to_vmx(vcpu);
312 u32 exit_reason;
313 unsigned long exit_qualification = vcpu->arch.exit_qualification;
314
315 if (vmx->nested.pml_full) {
316 exit_reason = EXIT_REASON_PML_FULL;
317 vmx->nested.pml_full = false;
318 exit_qualification &= INTR_INFO_UNBLOCK_NMI;
319 } else if (fault->error_code & PFERR_RSVD_MASK)
320 exit_reason = EXIT_REASON_EPT_MISCONFIG;
321 else
322 exit_reason = EXIT_REASON_EPT_VIOLATION;
323
324 nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification);
325 vmcs12->guest_physical_address = fault->address;
326}
327
328static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
329{
330 WARN_ON(mmu_is_nested(vcpu));
331
332 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
333 kvm_init_shadow_ept_mmu(vcpu,
334 to_vmx(vcpu)->nested.msrs.ept_caps &
335 VMX_EPT_EXECUTE_ONLY_BIT,
336 nested_ept_ad_enabled(vcpu),
337 nested_ept_get_cr3(vcpu));
338 vcpu->arch.mmu->set_cr3 = vmx_set_cr3;
339 vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3;
340 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
341 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
342
343 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
344}
345
346static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
347{
348 vcpu->arch.mmu = &vcpu->arch.root_mmu;
349 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
350}
351
352static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
353 u16 error_code)
354{
355 bool inequality, bit;
356
357 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
358 inequality =
359 (error_code & vmcs12->page_fault_error_code_mask) !=
360 vmcs12->page_fault_error_code_match;
361 return inequality ^ bit;
362}
363
364
365/*
366 * KVM wants to inject page-faults which it got to the guest. This function
367 * checks whether in a nested guest, we need to inject them to L1 or L2.
368 */
369static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
370{
371 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
372 unsigned int nr = vcpu->arch.exception.nr;
373 bool has_payload = vcpu->arch.exception.has_payload;
374 unsigned long payload = vcpu->arch.exception.payload;
375
376 if (nr == PF_VECTOR) {
377 if (vcpu->arch.exception.nested_apf) {
378 *exit_qual = vcpu->arch.apf.nested_apf_token;
379 return 1;
380 }
381 if (nested_vmx_is_page_fault_vmexit(vmcs12,
382 vcpu->arch.exception.error_code)) {
383 *exit_qual = has_payload ? payload : vcpu->arch.cr2;
384 return 1;
385 }
386 } else if (vmcs12->exception_bitmap & (1u << nr)) {
387 if (nr == DB_VECTOR) {
388 if (!has_payload) {
389 payload = vcpu->arch.dr6;
390 payload &= ~(DR6_FIXED_1 | DR6_BT);
391 payload ^= DR6_RTM;
392 }
393 *exit_qual = payload;
394 } else
395 *exit_qual = 0;
396 return 1;
397 }
398
399 return 0;
400}
401
402
403static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
404 struct x86_exception *fault)
405{
406 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
407
408 WARN_ON(!is_guest_mode(vcpu));
409
410 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
411 !to_vmx(vcpu)->nested.nested_run_pending) {
412 vmcs12->vm_exit_intr_error_code = fault->error_code;
413 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
414 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
415 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
416 fault->address);
417 } else {
418 kvm_inject_page_fault(vcpu, fault);
419 }
420}
421
422static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
423{
424 return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
425}
426
427static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
428 struct vmcs12 *vmcs12)
429{
430 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
431 return 0;
432
433 if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) ||
434 !page_address_valid(vcpu, vmcs12->io_bitmap_b))
435 return -EINVAL;
436
437 return 0;
438}
439
440static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
441 struct vmcs12 *vmcs12)
442{
443 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
444 return 0;
445
446 if (!page_address_valid(vcpu, vmcs12->msr_bitmap))
447 return -EINVAL;
448
449 return 0;
450}
451
452static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
453 struct vmcs12 *vmcs12)
454{
455 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
456 return 0;
457
458 if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))
459 return -EINVAL;
460
461 return 0;
462}
463
464/*
465 * Check if MSR is intercepted for L01 MSR bitmap.
466 */
467static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
468{
469 unsigned long *msr_bitmap;
470 int f = sizeof(unsigned long);
471
472 if (!cpu_has_vmx_msr_bitmap())
473 return true;
474
475 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
476
477 if (msr <= 0x1fff) {
478 return !!test_bit(msr, msr_bitmap + 0x800 / f);
479 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
480 msr &= 0x1fff;
481 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
482 }
483
484 return true;
485}
486
487/*
488 * If a msr is allowed by L0, we should check whether it is allowed by L1.
489 * The corresponding bit will be cleared unless both of L0 and L1 allow it.
490 */
491static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
492 unsigned long *msr_bitmap_nested,
493 u32 msr, int type)
494{
495 int f = sizeof(unsigned long);
496
497 /*
498 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
499 * have the write-low and read-high bitmap offsets the wrong way round.
500 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
501 */
502 if (msr <= 0x1fff) {
503 if (type & MSR_TYPE_R &&
504 !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
505 /* read-low */
506 __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
507
508 if (type & MSR_TYPE_W &&
509 !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
510 /* write-low */
511 __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
512
513 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
514 msr &= 0x1fff;
515 if (type & MSR_TYPE_R &&
516 !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
517 /* read-high */
518 __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
519
520 if (type & MSR_TYPE_W &&
521 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
522 /* write-high */
523 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
524
525 }
526}
527
Marc Orracff7842019-04-01 23:55:59 -0700528static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) {
529 int msr;
530
531 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
532 unsigned word = msr / BITS_PER_LONG;
533
534 msr_bitmap[word] = ~0;
535 msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
536 }
537}
538
Sean Christopherson55d23752018-12-03 13:53:18 -0800539/*
540 * Merge L0's and L1's MSR bitmap, return false to indicate that
541 * we do not use the hardware.
542 */
543static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
544 struct vmcs12 *vmcs12)
545{
546 int msr;
Sean Christopherson55d23752018-12-03 13:53:18 -0800547 unsigned long *msr_bitmap_l1;
548 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
KarimAllah Ahmed31f0b6c2019-01-31 21:24:36 +0100549 struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map;
Sean Christopherson55d23752018-12-03 13:53:18 -0800550
551 /* Nothing to do if the MSR bitmap is not in use. */
552 if (!cpu_has_vmx_msr_bitmap() ||
553 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
554 return false;
555
KarimAllah Ahmed31f0b6c2019-01-31 21:24:36 +0100556 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
Sean Christopherson55d23752018-12-03 13:53:18 -0800557 return false;
558
KarimAllah Ahmed31f0b6c2019-01-31 21:24:36 +0100559 msr_bitmap_l1 = (unsigned long *)map->hva;
Sean Christopherson55d23752018-12-03 13:53:18 -0800560
Marc Orracff7842019-04-01 23:55:59 -0700561 /*
562 * To keep the control flow simple, pay eight 8-byte writes (sixteen
563 * 4-byte writes on 32-bit systems) up front to enable intercepts for
564 * the x2APIC MSR range and selectively disable them below.
565 */
566 enable_x2apic_msr_intercepts(msr_bitmap_l0);
Sean Christopherson55d23752018-12-03 13:53:18 -0800567
Marc Orracff7842019-04-01 23:55:59 -0700568 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
569 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
570 /*
571 * L0 need not intercept reads for MSRs between 0x800
572 * and 0x8ff, it just lets the processor take the value
573 * from the virtual-APIC page; take those 256 bits
574 * directly from the L1 bitmap.
575 */
576 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
577 unsigned word = msr / BITS_PER_LONG;
578
579 msr_bitmap_l0[word] = msr_bitmap_l1[word];
580 }
581 }
582
Sean Christopherson55d23752018-12-03 13:53:18 -0800583 nested_vmx_disable_intercept_for_msr(
584 msr_bitmap_l1, msr_bitmap_l0,
Marc Orracff7842019-04-01 23:55:59 -0700585 X2APIC_MSR(APIC_TASKPRI),
Marc Orrc73f4c92019-04-01 23:56:00 -0700586 MSR_TYPE_R | MSR_TYPE_W);
Marc Orracff7842019-04-01 23:55:59 -0700587
588 if (nested_cpu_has_vid(vmcs12)) {
589 nested_vmx_disable_intercept_for_msr(
590 msr_bitmap_l1, msr_bitmap_l0,
591 X2APIC_MSR(APIC_EOI),
592 MSR_TYPE_W);
593 nested_vmx_disable_intercept_for_msr(
594 msr_bitmap_l1, msr_bitmap_l0,
595 X2APIC_MSR(APIC_SELF_IPI),
596 MSR_TYPE_W);
597 }
Sean Christopherson55d23752018-12-03 13:53:18 -0800598 }
599
Sean Christophersond69129b2019-05-08 07:32:15 -0700600 /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
601 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
602 MSR_FS_BASE, MSR_TYPE_RW);
603
604 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
605 MSR_GS_BASE, MSR_TYPE_RW);
606
607 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
608 MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
609
610 /*
611 * Checking the L0->L1 bitmap is trying to verify two things:
612 *
613 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
614 * ensures that we do not accidentally generate an L02 MSR bitmap
615 * from the L12 MSR bitmap that is too permissive.
616 * 2. That L1 or L2s have actually used the MSR. This avoids
617 * unnecessarily merging of the bitmap if the MSR is unused. This
618 * works properly because we only update the L01 MSR bitmap lazily.
619 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
620 * updated to reflect this when L1 (or its L2s) actually write to
621 * the MSR.
622 */
623 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL))
Sean Christopherson55d23752018-12-03 13:53:18 -0800624 nested_vmx_disable_intercept_for_msr(
625 msr_bitmap_l1, msr_bitmap_l0,
626 MSR_IA32_SPEC_CTRL,
627 MSR_TYPE_R | MSR_TYPE_W);
628
Sean Christophersond69129b2019-05-08 07:32:15 -0700629 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD))
Sean Christopherson55d23752018-12-03 13:53:18 -0800630 nested_vmx_disable_intercept_for_msr(
631 msr_bitmap_l1, msr_bitmap_l0,
632 MSR_IA32_PRED_CMD,
633 MSR_TYPE_W);
634
KarimAllah Ahmed31f0b6c2019-01-31 21:24:36 +0100635 kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false);
Sean Christopherson55d23752018-12-03 13:53:18 -0800636
637 return true;
638}
639
640static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
641 struct vmcs12 *vmcs12)
642{
KarimAllah Ahmed88925302019-01-31 21:24:41 +0100643 struct kvm_host_map map;
Sean Christopherson55d23752018-12-03 13:53:18 -0800644 struct vmcs12 *shadow;
Sean Christopherson55d23752018-12-03 13:53:18 -0800645
646 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
647 vmcs12->vmcs_link_pointer == -1ull)
648 return;
649
650 shadow = get_shadow_vmcs12(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -0800651
KarimAllah Ahmed88925302019-01-31 21:24:41 +0100652 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
653 return;
Sean Christopherson55d23752018-12-03 13:53:18 -0800654
KarimAllah Ahmed88925302019-01-31 21:24:41 +0100655 memcpy(shadow, map.hva, VMCS12_SIZE);
656 kvm_vcpu_unmap(vcpu, &map, false);
Sean Christopherson55d23752018-12-03 13:53:18 -0800657}
658
659static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
660 struct vmcs12 *vmcs12)
661{
662 struct vcpu_vmx *vmx = to_vmx(vcpu);
663
664 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
665 vmcs12->vmcs_link_pointer == -1ull)
666 return;
667
668 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
669 get_shadow_vmcs12(vcpu), VMCS12_SIZE);
670}
671
672/*
673 * In nested virtualization, check if L1 has set
674 * VM_EXIT_ACK_INTR_ON_EXIT
675 */
676static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
677{
678 return get_vmcs12(vcpu)->vm_exit_controls &
679 VM_EXIT_ACK_INTR_ON_EXIT;
680}
681
682static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
683{
684 return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
685}
686
687static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
688 struct vmcs12 *vmcs12)
689{
690 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
691 !page_address_valid(vcpu, vmcs12->apic_access_addr))
692 return -EINVAL;
693 else
694 return 0;
695}
696
697static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
698 struct vmcs12 *vmcs12)
699{
700 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
701 !nested_cpu_has_apic_reg_virt(vmcs12) &&
702 !nested_cpu_has_vid(vmcs12) &&
703 !nested_cpu_has_posted_intr(vmcs12))
704 return 0;
705
706 /*
707 * If virtualize x2apic mode is enabled,
708 * virtualize apic access must be disabled.
709 */
710 if (nested_cpu_has_virt_x2apic_mode(vmcs12) &&
711 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
712 return -EINVAL;
713
714 /*
715 * If virtual interrupt delivery is enabled,
716 * we must exit on external interrupts.
717 */
718 if (nested_cpu_has_vid(vmcs12) &&
719 !nested_exit_on_intr(vcpu))
720 return -EINVAL;
721
722 /*
723 * bits 15:8 should be zero in posted_intr_nv,
724 * the descriptor address has been already checked
725 * in nested_get_vmcs12_pages.
726 *
727 * bits 5:0 of posted_intr_desc_addr should be zero.
728 */
729 if (nested_cpu_has_posted_intr(vmcs12) &&
730 (!nested_cpu_has_vid(vmcs12) ||
731 !nested_exit_intr_ack_set(vcpu) ||
732 (vmcs12->posted_intr_nv & 0xff00) ||
733 (vmcs12->posted_intr_desc_addr & 0x3f) ||
734 (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))
735 return -EINVAL;
736
737 /* tpr shadow is needed by all apicv features. */
738 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
739 return -EINVAL;
740
741 return 0;
742}
743
744static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
Sean Christophersonf9b245e2018-12-12 13:30:08 -0500745 u32 count, u64 addr)
Sean Christopherson55d23752018-12-03 13:53:18 -0800746{
Sean Christopherson55d23752018-12-03 13:53:18 -0800747 int maxphyaddr;
Sean Christopherson55d23752018-12-03 13:53:18 -0800748
Sean Christopherson55d23752018-12-03 13:53:18 -0800749 if (count == 0)
750 return 0;
751 maxphyaddr = cpuid_maxphyaddr(vcpu);
752 if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
Sean Christophersonf9b245e2018-12-12 13:30:08 -0500753 (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
Sean Christopherson55d23752018-12-03 13:53:18 -0800754 return -EINVAL;
Sean Christophersonf9b245e2018-12-12 13:30:08 -0500755
Sean Christopherson55d23752018-12-03 13:53:18 -0800756 return 0;
757}
758
Krish Sadhukhan61446ba2018-12-12 13:30:09 -0500759static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
760 struct vmcs12 *vmcs12)
Sean Christopherson55d23752018-12-03 13:53:18 -0800761{
Sean Christophersonf9b245e2018-12-12 13:30:08 -0500762 if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count,
763 vmcs12->vm_exit_msr_load_addr) ||
764 nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count,
Krish Sadhukhan61446ba2018-12-12 13:30:09 -0500765 vmcs12->vm_exit_msr_store_addr))
Sean Christopherson55d23752018-12-03 13:53:18 -0800766 return -EINVAL;
Sean Christophersonf9b245e2018-12-12 13:30:08 -0500767
Sean Christopherson55d23752018-12-03 13:53:18 -0800768 return 0;
769}
770
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -0500771static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
772 struct vmcs12 *vmcs12)
Krish Sadhukhan61446ba2018-12-12 13:30:09 -0500773{
774 if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count,
775 vmcs12->vm_entry_msr_load_addr))
776 return -EINVAL;
777
778 return 0;
779}
780
Sean Christopherson55d23752018-12-03 13:53:18 -0800781static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
782 struct vmcs12 *vmcs12)
783{
784 if (!nested_cpu_has_pml(vmcs12))
785 return 0;
786
787 if (!nested_cpu_has_ept(vmcs12) ||
788 !page_address_valid(vcpu, vmcs12->pml_address))
789 return -EINVAL;
790
791 return 0;
792}
793
794static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
795 struct vmcs12 *vmcs12)
796{
797 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
798 !nested_cpu_has_ept(vmcs12))
799 return -EINVAL;
800 return 0;
801}
802
803static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
804 struct vmcs12 *vmcs12)
805{
806 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
807 !nested_cpu_has_ept(vmcs12))
808 return -EINVAL;
809 return 0;
810}
811
812static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
813 struct vmcs12 *vmcs12)
814{
815 if (!nested_cpu_has_shadow_vmcs(vmcs12))
816 return 0;
817
818 if (!page_address_valid(vcpu, vmcs12->vmread_bitmap) ||
819 !page_address_valid(vcpu, vmcs12->vmwrite_bitmap))
820 return -EINVAL;
821
822 return 0;
823}
824
825static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
826 struct vmx_msr_entry *e)
827{
828 /* x2APIC MSR accesses are not allowed */
829 if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)
830 return -EINVAL;
831 if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */
832 e->index == MSR_IA32_UCODE_REV)
833 return -EINVAL;
834 if (e->reserved != 0)
835 return -EINVAL;
836 return 0;
837}
838
839static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
840 struct vmx_msr_entry *e)
841{
842 if (e->index == MSR_FS_BASE ||
843 e->index == MSR_GS_BASE ||
844 e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */
845 nested_vmx_msr_check_common(vcpu, e))
846 return -EINVAL;
847 return 0;
848}
849
850static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
851 struct vmx_msr_entry *e)
852{
853 if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */
854 nested_vmx_msr_check_common(vcpu, e))
855 return -EINVAL;
856 return 0;
857}
858
859/*
860 * Load guest's/host's msr at nested entry/exit.
861 * return 0 for success, entry index for failure.
862 */
863static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
864{
865 u32 i;
866 struct vmx_msr_entry e;
867 struct msr_data msr;
868
869 msr.host_initiated = false;
870 for (i = 0; i < count; i++) {
871 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
872 &e, sizeof(e))) {
873 pr_debug_ratelimited(
874 "%s cannot read MSR entry (%u, 0x%08llx)\n",
875 __func__, i, gpa + i * sizeof(e));
876 goto fail;
877 }
878 if (nested_vmx_load_msr_check(vcpu, &e)) {
879 pr_debug_ratelimited(
880 "%s check failed (%u, 0x%x, 0x%x)\n",
881 __func__, i, e.index, e.reserved);
882 goto fail;
883 }
884 msr.index = e.index;
885 msr.data = e.value;
886 if (kvm_set_msr(vcpu, &msr)) {
887 pr_debug_ratelimited(
888 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
889 __func__, i, e.index, e.value);
890 goto fail;
891 }
892 }
893 return 0;
894fail:
895 return i + 1;
896}
897
898static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
899{
900 u32 i;
901 struct vmx_msr_entry e;
902
903 for (i = 0; i < count; i++) {
904 struct msr_data msr_info;
905 if (kvm_vcpu_read_guest(vcpu,
906 gpa + i * sizeof(e),
907 &e, 2 * sizeof(u32))) {
908 pr_debug_ratelimited(
909 "%s cannot read MSR entry (%u, 0x%08llx)\n",
910 __func__, i, gpa + i * sizeof(e));
911 return -EINVAL;
912 }
913 if (nested_vmx_store_msr_check(vcpu, &e)) {
914 pr_debug_ratelimited(
915 "%s check failed (%u, 0x%x, 0x%x)\n",
916 __func__, i, e.index, e.reserved);
917 return -EINVAL;
918 }
919 msr_info.host_initiated = false;
920 msr_info.index = e.index;
921 if (kvm_get_msr(vcpu, &msr_info)) {
922 pr_debug_ratelimited(
923 "%s cannot read MSR (%u, 0x%x)\n",
924 __func__, i, e.index);
925 return -EINVAL;
926 }
927 if (kvm_vcpu_write_guest(vcpu,
928 gpa + i * sizeof(e) +
929 offsetof(struct vmx_msr_entry, value),
930 &msr_info.data, sizeof(msr_info.data))) {
931 pr_debug_ratelimited(
932 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
933 __func__, i, e.index, msr_info.data);
934 return -EINVAL;
935 }
936 }
937 return 0;
938}
939
940static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
941{
942 unsigned long invalid_mask;
943
944 invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
945 return (val & invalid_mask) == 0;
946}
947
948/*
949 * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
950 * emulating VM entry into a guest with EPT enabled.
951 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
952 * is assigned to entry_failure_code on failure.
953 */
954static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
955 u32 *entry_failure_code)
956{
957 if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
958 if (!nested_cr3_valid(vcpu, cr3)) {
959 *entry_failure_code = ENTRY_FAIL_DEFAULT;
Sean Christophersonc80add02019-04-11 12:18:09 -0700960 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -0800961 }
962
963 /*
964 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
965 * must not be dereferenced.
966 */
Paolo Bonzinibf03d4f2019-06-06 18:52:44 +0200967 if (is_pae_paging(vcpu) && !nested_ept) {
Sean Christopherson55d23752018-12-03 13:53:18 -0800968 if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
969 *entry_failure_code = ENTRY_FAIL_PDPTE;
Sean Christophersonc80add02019-04-11 12:18:09 -0700970 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -0800971 }
972 }
973 }
974
975 if (!nested_ept)
976 kvm_mmu_new_cr3(vcpu, cr3, false);
977
978 vcpu->arch.cr3 = cr3;
979 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
980
981 kvm_init_mmu(vcpu, false);
982
983 return 0;
984}
985
986/*
987 * Returns if KVM is able to config CPU to tag TLB entries
988 * populated by L2 differently than TLB entries populated
989 * by L1.
990 *
991 * If L1 uses EPT, then TLB entries are tagged with different EPTP.
992 *
993 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
994 * with different VPID (L1 entries are tagged with vmx->vpid
995 * while L2 entries are tagged with vmx->nested.vpid02).
996 */
997static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
998{
999 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1000
1001 return nested_cpu_has_ept(vmcs12) ||
1002 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
1003}
1004
1005static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
1006{
1007 struct vcpu_vmx *vmx = to_vmx(vcpu);
1008
1009 return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
1010}
1011
1012
1013static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
1014{
1015 return fixed_bits_valid(control, low, high);
1016}
1017
1018static inline u64 vmx_control_msr(u32 low, u32 high)
1019{
1020 return low | ((u64)high << 32);
1021}
1022
1023static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
1024{
1025 superset &= mask;
1026 subset &= mask;
1027
1028 return (superset | subset) == superset;
1029}
1030
1031static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
1032{
1033 const u64 feature_and_reserved =
1034 /* feature (except bit 48; see below) */
1035 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
1036 /* reserved */
1037 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
1038 u64 vmx_basic = vmx->nested.msrs.basic;
1039
1040 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
1041 return -EINVAL;
1042
1043 /*
1044 * KVM does not emulate a version of VMX that constrains physical
1045 * addresses of VMX structures (e.g. VMCS) to 32-bits.
1046 */
1047 if (data & BIT_ULL(48))
1048 return -EINVAL;
1049
1050 if (vmx_basic_vmcs_revision_id(vmx_basic) !=
1051 vmx_basic_vmcs_revision_id(data))
1052 return -EINVAL;
1053
1054 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
1055 return -EINVAL;
1056
1057 vmx->nested.msrs.basic = data;
1058 return 0;
1059}
1060
1061static int
1062vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1063{
1064 u64 supported;
1065 u32 *lowp, *highp;
1066
1067 switch (msr_index) {
1068 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1069 lowp = &vmx->nested.msrs.pinbased_ctls_low;
1070 highp = &vmx->nested.msrs.pinbased_ctls_high;
1071 break;
1072 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1073 lowp = &vmx->nested.msrs.procbased_ctls_low;
1074 highp = &vmx->nested.msrs.procbased_ctls_high;
1075 break;
1076 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1077 lowp = &vmx->nested.msrs.exit_ctls_low;
1078 highp = &vmx->nested.msrs.exit_ctls_high;
1079 break;
1080 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1081 lowp = &vmx->nested.msrs.entry_ctls_low;
1082 highp = &vmx->nested.msrs.entry_ctls_high;
1083 break;
1084 case MSR_IA32_VMX_PROCBASED_CTLS2:
1085 lowp = &vmx->nested.msrs.secondary_ctls_low;
1086 highp = &vmx->nested.msrs.secondary_ctls_high;
1087 break;
1088 default:
1089 BUG();
1090 }
1091
1092 supported = vmx_control_msr(*lowp, *highp);
1093
1094 /* Check must-be-1 bits are still 1. */
1095 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
1096 return -EINVAL;
1097
1098 /* Check must-be-0 bits are still 0. */
1099 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
1100 return -EINVAL;
1101
1102 *lowp = data;
1103 *highp = data >> 32;
1104 return 0;
1105}
1106
1107static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
1108{
1109 const u64 feature_and_reserved_bits =
1110 /* feature */
1111 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1112 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1113 /* reserved */
1114 GENMASK_ULL(13, 9) | BIT_ULL(31);
1115 u64 vmx_misc;
1116
1117 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
1118 vmx->nested.msrs.misc_high);
1119
1120 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
1121 return -EINVAL;
1122
1123 if ((vmx->nested.msrs.pinbased_ctls_high &
1124 PIN_BASED_VMX_PREEMPTION_TIMER) &&
1125 vmx_misc_preemption_timer_rate(data) !=
1126 vmx_misc_preemption_timer_rate(vmx_misc))
1127 return -EINVAL;
1128
1129 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
1130 return -EINVAL;
1131
1132 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
1133 return -EINVAL;
1134
1135 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
1136 return -EINVAL;
1137
1138 vmx->nested.msrs.misc_low = data;
1139 vmx->nested.msrs.misc_high = data >> 32;
1140
Sean Christopherson55d23752018-12-03 13:53:18 -08001141 return 0;
1142}
1143
1144static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
1145{
1146 u64 vmx_ept_vpid_cap;
1147
1148 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
1149 vmx->nested.msrs.vpid_caps);
1150
1151 /* Every bit is either reserved or a feature bit. */
1152 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
1153 return -EINVAL;
1154
1155 vmx->nested.msrs.ept_caps = data;
1156 vmx->nested.msrs.vpid_caps = data >> 32;
1157 return 0;
1158}
1159
1160static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1161{
1162 u64 *msr;
1163
1164 switch (msr_index) {
1165 case MSR_IA32_VMX_CR0_FIXED0:
1166 msr = &vmx->nested.msrs.cr0_fixed0;
1167 break;
1168 case MSR_IA32_VMX_CR4_FIXED0:
1169 msr = &vmx->nested.msrs.cr4_fixed0;
1170 break;
1171 default:
1172 BUG();
1173 }
1174
1175 /*
1176 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1177 * must be 1 in the restored value.
1178 */
1179 if (!is_bitwise_subset(data, *msr, -1ULL))
1180 return -EINVAL;
1181
1182 *msr = data;
1183 return 0;
1184}
1185
1186/*
1187 * Called when userspace is restoring VMX MSRs.
1188 *
1189 * Returns 0 on success, non-0 otherwise.
1190 */
1191int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1192{
1193 struct vcpu_vmx *vmx = to_vmx(vcpu);
1194
1195 /*
1196 * Don't allow changes to the VMX capability MSRs while the vCPU
1197 * is in VMX operation.
1198 */
1199 if (vmx->nested.vmxon)
1200 return -EBUSY;
1201
1202 switch (msr_index) {
1203 case MSR_IA32_VMX_BASIC:
1204 return vmx_restore_vmx_basic(vmx, data);
1205 case MSR_IA32_VMX_PINBASED_CTLS:
1206 case MSR_IA32_VMX_PROCBASED_CTLS:
1207 case MSR_IA32_VMX_EXIT_CTLS:
1208 case MSR_IA32_VMX_ENTRY_CTLS:
1209 /*
1210 * The "non-true" VMX capability MSRs are generated from the
1211 * "true" MSRs, so we do not support restoring them directly.
1212 *
1213 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1214 * should restore the "true" MSRs with the must-be-1 bits
1215 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1216 * DEFAULT SETTINGS".
1217 */
1218 return -EINVAL;
1219 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1220 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1221 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1222 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1223 case MSR_IA32_VMX_PROCBASED_CTLS2:
1224 return vmx_restore_control_msr(vmx, msr_index, data);
1225 case MSR_IA32_VMX_MISC:
1226 return vmx_restore_vmx_misc(vmx, data);
1227 case MSR_IA32_VMX_CR0_FIXED0:
1228 case MSR_IA32_VMX_CR4_FIXED0:
1229 return vmx_restore_fixed0_msr(vmx, msr_index, data);
1230 case MSR_IA32_VMX_CR0_FIXED1:
1231 case MSR_IA32_VMX_CR4_FIXED1:
1232 /*
1233 * These MSRs are generated based on the vCPU's CPUID, so we
1234 * do not support restoring them directly.
1235 */
1236 return -EINVAL;
1237 case MSR_IA32_VMX_EPT_VPID_CAP:
1238 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
1239 case MSR_IA32_VMX_VMCS_ENUM:
1240 vmx->nested.msrs.vmcs_enum = data;
1241 return 0;
1242 default:
1243 /*
1244 * The rest of the VMX capability MSRs do not support restore.
1245 */
1246 return -EINVAL;
1247 }
1248}
1249
1250/* Returns 0 on success, non-0 otherwise. */
1251int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
1252{
1253 switch (msr_index) {
1254 case MSR_IA32_VMX_BASIC:
1255 *pdata = msrs->basic;
1256 break;
1257 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1258 case MSR_IA32_VMX_PINBASED_CTLS:
1259 *pdata = vmx_control_msr(
1260 msrs->pinbased_ctls_low,
1261 msrs->pinbased_ctls_high);
1262 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
1263 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1264 break;
1265 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1266 case MSR_IA32_VMX_PROCBASED_CTLS:
1267 *pdata = vmx_control_msr(
1268 msrs->procbased_ctls_low,
1269 msrs->procbased_ctls_high);
1270 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
1271 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1272 break;
1273 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1274 case MSR_IA32_VMX_EXIT_CTLS:
1275 *pdata = vmx_control_msr(
1276 msrs->exit_ctls_low,
1277 msrs->exit_ctls_high);
1278 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
1279 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
1280 break;
1281 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1282 case MSR_IA32_VMX_ENTRY_CTLS:
1283 *pdata = vmx_control_msr(
1284 msrs->entry_ctls_low,
1285 msrs->entry_ctls_high);
1286 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
1287 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
1288 break;
1289 case MSR_IA32_VMX_MISC:
1290 *pdata = vmx_control_msr(
1291 msrs->misc_low,
1292 msrs->misc_high);
1293 break;
1294 case MSR_IA32_VMX_CR0_FIXED0:
1295 *pdata = msrs->cr0_fixed0;
1296 break;
1297 case MSR_IA32_VMX_CR0_FIXED1:
1298 *pdata = msrs->cr0_fixed1;
1299 break;
1300 case MSR_IA32_VMX_CR4_FIXED0:
1301 *pdata = msrs->cr4_fixed0;
1302 break;
1303 case MSR_IA32_VMX_CR4_FIXED1:
1304 *pdata = msrs->cr4_fixed1;
1305 break;
1306 case MSR_IA32_VMX_VMCS_ENUM:
1307 *pdata = msrs->vmcs_enum;
1308 break;
1309 case MSR_IA32_VMX_PROCBASED_CTLS2:
1310 *pdata = vmx_control_msr(
1311 msrs->secondary_ctls_low,
1312 msrs->secondary_ctls_high);
1313 break;
1314 case MSR_IA32_VMX_EPT_VPID_CAP:
1315 *pdata = msrs->ept_caps |
1316 ((u64)msrs->vpid_caps << 32);
1317 break;
1318 case MSR_IA32_VMX_VMFUNC:
1319 *pdata = msrs->vmfunc_controls;
1320 break;
1321 default:
1322 return 1;
1323 }
1324
1325 return 0;
1326}
1327
1328/*
Sean Christophersonfadcead2019-05-07 08:36:23 -07001329 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have
1330 * been modified by the L1 guest. Note, "writable" in this context means
1331 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of
1332 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only"
1333 * VM-exit information fields (which are actually writable if the vCPU is
1334 * configured to support "VMWRITE to any supported field in the VMCS").
Sean Christopherson55d23752018-12-03 13:53:18 -08001335 */
1336static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
1337{
Sean Christopherson55d23752018-12-03 13:53:18 -08001338 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
Sean Christophersonfadcead2019-05-07 08:36:23 -07001339 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07001340 struct shadow_vmcs_field field;
1341 unsigned long val;
Sean Christophersonfadcead2019-05-07 08:36:23 -07001342 int i;
Sean Christopherson55d23752018-12-03 13:53:18 -08001343
1344 preempt_disable();
1345
1346 vmcs_load(shadow_vmcs);
1347
Sean Christophersonfadcead2019-05-07 08:36:23 -07001348 for (i = 0; i < max_shadow_read_write_fields; i++) {
1349 field = shadow_read_write_fields[i];
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07001350 val = __vmcs_readl(field.encoding);
1351 vmcs12_write_any(vmcs12, field.encoding, field.offset, val);
Sean Christopherson55d23752018-12-03 13:53:18 -08001352 }
1353
1354 vmcs_clear(shadow_vmcs);
1355 vmcs_load(vmx->loaded_vmcs->vmcs);
1356
1357 preempt_enable();
1358}
1359
1360static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
1361{
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07001362 const struct shadow_vmcs_field *fields[] = {
Sean Christopherson55d23752018-12-03 13:53:18 -08001363 shadow_read_write_fields,
1364 shadow_read_only_fields
1365 };
1366 const int max_fields[] = {
1367 max_shadow_read_write_fields,
1368 max_shadow_read_only_fields
1369 };
Sean Christopherson55d23752018-12-03 13:53:18 -08001370 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07001371 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1372 struct shadow_vmcs_field field;
1373 unsigned long val;
1374 int i, q;
Sean Christopherson55d23752018-12-03 13:53:18 -08001375
1376 vmcs_load(shadow_vmcs);
1377
1378 for (q = 0; q < ARRAY_SIZE(fields); q++) {
1379 for (i = 0; i < max_fields[q]; i++) {
1380 field = fields[q][i];
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07001381 val = vmcs12_read_any(vmcs12, field.encoding,
1382 field.offset);
1383 __vmcs_writel(field.encoding, val);
Sean Christopherson55d23752018-12-03 13:53:18 -08001384 }
1385 }
1386
1387 vmcs_clear(shadow_vmcs);
1388 vmcs_load(vmx->loaded_vmcs->vmcs);
1389}
1390
1391static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
1392{
1393 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1394 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1395
1396 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1397 vmcs12->tpr_threshold = evmcs->tpr_threshold;
1398 vmcs12->guest_rip = evmcs->guest_rip;
1399
1400 if (unlikely(!(evmcs->hv_clean_fields &
1401 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
1402 vmcs12->guest_rsp = evmcs->guest_rsp;
1403 vmcs12->guest_rflags = evmcs->guest_rflags;
1404 vmcs12->guest_interruptibility_info =
1405 evmcs->guest_interruptibility_info;
1406 }
1407
1408 if (unlikely(!(evmcs->hv_clean_fields &
1409 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1410 vmcs12->cpu_based_vm_exec_control =
1411 evmcs->cpu_based_vm_exec_control;
1412 }
1413
1414 if (unlikely(!(evmcs->hv_clean_fields &
1415 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1416 vmcs12->exception_bitmap = evmcs->exception_bitmap;
1417 }
1418
1419 if (unlikely(!(evmcs->hv_clean_fields &
1420 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
1421 vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
1422 }
1423
1424 if (unlikely(!(evmcs->hv_clean_fields &
1425 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
1426 vmcs12->vm_entry_intr_info_field =
1427 evmcs->vm_entry_intr_info_field;
1428 vmcs12->vm_entry_exception_error_code =
1429 evmcs->vm_entry_exception_error_code;
1430 vmcs12->vm_entry_instruction_len =
1431 evmcs->vm_entry_instruction_len;
1432 }
1433
1434 if (unlikely(!(evmcs->hv_clean_fields &
1435 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1436 vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
1437 vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
1438 vmcs12->host_cr0 = evmcs->host_cr0;
1439 vmcs12->host_cr3 = evmcs->host_cr3;
1440 vmcs12->host_cr4 = evmcs->host_cr4;
1441 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
1442 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
1443 vmcs12->host_rip = evmcs->host_rip;
1444 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
1445 vmcs12->host_es_selector = evmcs->host_es_selector;
1446 vmcs12->host_cs_selector = evmcs->host_cs_selector;
1447 vmcs12->host_ss_selector = evmcs->host_ss_selector;
1448 vmcs12->host_ds_selector = evmcs->host_ds_selector;
1449 vmcs12->host_fs_selector = evmcs->host_fs_selector;
1450 vmcs12->host_gs_selector = evmcs->host_gs_selector;
1451 vmcs12->host_tr_selector = evmcs->host_tr_selector;
1452 }
1453
1454 if (unlikely(!(evmcs->hv_clean_fields &
1455 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1456 vmcs12->pin_based_vm_exec_control =
1457 evmcs->pin_based_vm_exec_control;
1458 vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
1459 vmcs12->secondary_vm_exec_control =
1460 evmcs->secondary_vm_exec_control;
1461 }
1462
1463 if (unlikely(!(evmcs->hv_clean_fields &
1464 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
1465 vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
1466 vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
1467 }
1468
1469 if (unlikely(!(evmcs->hv_clean_fields &
1470 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
1471 vmcs12->msr_bitmap = evmcs->msr_bitmap;
1472 }
1473
1474 if (unlikely(!(evmcs->hv_clean_fields &
1475 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
1476 vmcs12->guest_es_base = evmcs->guest_es_base;
1477 vmcs12->guest_cs_base = evmcs->guest_cs_base;
1478 vmcs12->guest_ss_base = evmcs->guest_ss_base;
1479 vmcs12->guest_ds_base = evmcs->guest_ds_base;
1480 vmcs12->guest_fs_base = evmcs->guest_fs_base;
1481 vmcs12->guest_gs_base = evmcs->guest_gs_base;
1482 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
1483 vmcs12->guest_tr_base = evmcs->guest_tr_base;
1484 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
1485 vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
1486 vmcs12->guest_es_limit = evmcs->guest_es_limit;
1487 vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
1488 vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
1489 vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
1490 vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
1491 vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
1492 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
1493 vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
1494 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
1495 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
1496 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
1497 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
1498 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
1499 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
1500 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
1501 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
1502 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
1503 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
1504 vmcs12->guest_es_selector = evmcs->guest_es_selector;
1505 vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
1506 vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
1507 vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
1508 vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
1509 vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
1510 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
1511 vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
1512 }
1513
1514 if (unlikely(!(evmcs->hv_clean_fields &
1515 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
1516 vmcs12->tsc_offset = evmcs->tsc_offset;
1517 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
1518 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
1519 }
1520
1521 if (unlikely(!(evmcs->hv_clean_fields &
1522 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
1523 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
1524 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
1525 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
1526 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
1527 vmcs12->guest_cr0 = evmcs->guest_cr0;
1528 vmcs12->guest_cr3 = evmcs->guest_cr3;
1529 vmcs12->guest_cr4 = evmcs->guest_cr4;
1530 vmcs12->guest_dr7 = evmcs->guest_dr7;
1531 }
1532
1533 if (unlikely(!(evmcs->hv_clean_fields &
1534 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
1535 vmcs12->host_fs_base = evmcs->host_fs_base;
1536 vmcs12->host_gs_base = evmcs->host_gs_base;
1537 vmcs12->host_tr_base = evmcs->host_tr_base;
1538 vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
1539 vmcs12->host_idtr_base = evmcs->host_idtr_base;
1540 vmcs12->host_rsp = evmcs->host_rsp;
1541 }
1542
1543 if (unlikely(!(evmcs->hv_clean_fields &
1544 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
1545 vmcs12->ept_pointer = evmcs->ept_pointer;
1546 vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
1547 }
1548
1549 if (unlikely(!(evmcs->hv_clean_fields &
1550 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
1551 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
1552 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
1553 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
1554 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
1555 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
1556 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
1557 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
1558 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
1559 vmcs12->guest_pending_dbg_exceptions =
1560 evmcs->guest_pending_dbg_exceptions;
1561 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
1562 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
1563 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
1564 vmcs12->guest_activity_state = evmcs->guest_activity_state;
1565 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
1566 }
1567
1568 /*
1569 * Not used?
1570 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1571 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1572 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1573 * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0;
1574 * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1;
1575 * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2;
1576 * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3;
1577 * vmcs12->page_fault_error_code_mask =
1578 * evmcs->page_fault_error_code_mask;
1579 * vmcs12->page_fault_error_code_match =
1580 * evmcs->page_fault_error_code_match;
1581 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1582 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1583 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1584 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1585 */
1586
1587 /*
1588 * Read only fields:
1589 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1590 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1591 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1592 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1593 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1594 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1595 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1596 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1597 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1598 * vmcs12->exit_qualification = evmcs->exit_qualification;
1599 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1600 *
1601 * Not present in struct vmcs12:
1602 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1603 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1604 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1605 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1606 */
1607
1608 return 0;
1609}
1610
1611static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
1612{
1613 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1614 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1615
1616 /*
1617 * Should not be changed by KVM:
1618 *
1619 * evmcs->host_es_selector = vmcs12->host_es_selector;
1620 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1621 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1622 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1623 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1624 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1625 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1626 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1627 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1628 * evmcs->host_cr0 = vmcs12->host_cr0;
1629 * evmcs->host_cr3 = vmcs12->host_cr3;
1630 * evmcs->host_cr4 = vmcs12->host_cr4;
1631 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1632 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1633 * evmcs->host_rip = vmcs12->host_rip;
1634 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1635 * evmcs->host_fs_base = vmcs12->host_fs_base;
1636 * evmcs->host_gs_base = vmcs12->host_gs_base;
1637 * evmcs->host_tr_base = vmcs12->host_tr_base;
1638 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1639 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1640 * evmcs->host_rsp = vmcs12->host_rsp;
Sean Christopherson3731905ef2019-05-07 08:36:27 -07001641 * sync_vmcs02_to_vmcs12() doesn't read these:
Sean Christopherson55d23752018-12-03 13:53:18 -08001642 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1643 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1644 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1645 * evmcs->ept_pointer = vmcs12->ept_pointer;
1646 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1647 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1648 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1649 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1650 * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0;
1651 * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1;
1652 * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2;
1653 * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3;
1654 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1655 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1656 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1657 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1658 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1659 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1660 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1661 * evmcs->page_fault_error_code_mask =
1662 * vmcs12->page_fault_error_code_mask;
1663 * evmcs->page_fault_error_code_match =
1664 * vmcs12->page_fault_error_code_match;
1665 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1666 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1667 * evmcs->tsc_offset = vmcs12->tsc_offset;
1668 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1669 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1670 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1671 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1672 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1673 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1674 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1675 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1676 *
1677 * Not present in struct vmcs12:
1678 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1679 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1680 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1681 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1682 */
1683
1684 evmcs->guest_es_selector = vmcs12->guest_es_selector;
1685 evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
1686 evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
1687 evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
1688 evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
1689 evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
1690 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
1691 evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
1692
1693 evmcs->guest_es_limit = vmcs12->guest_es_limit;
1694 evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
1695 evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
1696 evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
1697 evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
1698 evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
1699 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
1700 evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
1701 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
1702 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
1703
1704 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
1705 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
1706 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
1707 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
1708 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
1709 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
1710 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
1711 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
1712
1713 evmcs->guest_es_base = vmcs12->guest_es_base;
1714 evmcs->guest_cs_base = vmcs12->guest_cs_base;
1715 evmcs->guest_ss_base = vmcs12->guest_ss_base;
1716 evmcs->guest_ds_base = vmcs12->guest_ds_base;
1717 evmcs->guest_fs_base = vmcs12->guest_fs_base;
1718 evmcs->guest_gs_base = vmcs12->guest_gs_base;
1719 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
1720 evmcs->guest_tr_base = vmcs12->guest_tr_base;
1721 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
1722 evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
1723
1724 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
1725 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
1726
1727 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
1728 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
1729 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
1730 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
1731
1732 evmcs->guest_pending_dbg_exceptions =
1733 vmcs12->guest_pending_dbg_exceptions;
1734 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
1735 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
1736
1737 evmcs->guest_activity_state = vmcs12->guest_activity_state;
1738 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
1739
1740 evmcs->guest_cr0 = vmcs12->guest_cr0;
1741 evmcs->guest_cr3 = vmcs12->guest_cr3;
1742 evmcs->guest_cr4 = vmcs12->guest_cr4;
1743 evmcs->guest_dr7 = vmcs12->guest_dr7;
1744
1745 evmcs->guest_physical_address = vmcs12->guest_physical_address;
1746
1747 evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
1748 evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
1749 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
1750 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
1751 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
1752 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
1753 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
1754 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
1755
1756 evmcs->exit_qualification = vmcs12->exit_qualification;
1757
1758 evmcs->guest_linear_address = vmcs12->guest_linear_address;
1759 evmcs->guest_rsp = vmcs12->guest_rsp;
1760 evmcs->guest_rflags = vmcs12->guest_rflags;
1761
1762 evmcs->guest_interruptibility_info =
1763 vmcs12->guest_interruptibility_info;
1764 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
1765 evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
1766 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
1767 evmcs->vm_entry_exception_error_code =
1768 vmcs12->vm_entry_exception_error_code;
1769 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
1770
1771 evmcs->guest_rip = vmcs12->guest_rip;
1772
1773 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
1774
1775 return 0;
1776}
1777
1778/*
1779 * This is an equivalent of the nested hypervisor executing the vmptrld
1780 * instruction.
1781 */
1782static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
1783 bool from_launch)
1784{
1785 struct vcpu_vmx *vmx = to_vmx(vcpu);
1786 struct hv_vp_assist_page assist_page;
1787
1788 if (likely(!vmx->nested.enlightened_vmcs_enabled))
1789 return 1;
1790
1791 if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page)))
1792 return 1;
1793
1794 if (unlikely(!assist_page.enlighten_vmentry))
1795 return 1;
1796
1797 if (unlikely(assist_page.current_nested_vmcs !=
1798 vmx->nested.hv_evmcs_vmptr)) {
1799
1800 if (!vmx->nested.hv_evmcs)
1801 vmx->nested.current_vmptr = -1ull;
1802
1803 nested_release_evmcs(vcpu);
1804
KarimAllah Ahmeddee9c042019-01-31 21:24:42 +01001805 if (kvm_vcpu_map(vcpu, gpa_to_gfn(assist_page.current_nested_vmcs),
1806 &vmx->nested.hv_evmcs_map))
Sean Christopherson55d23752018-12-03 13:53:18 -08001807 return 0;
1808
KarimAllah Ahmeddee9c042019-01-31 21:24:42 +01001809 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
Sean Christopherson55d23752018-12-03 13:53:18 -08001810
1811 /*
1812 * Currently, KVM only supports eVMCS version 1
1813 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
1814 * value to first u32 field of eVMCS which should specify eVMCS
1815 * VersionNumber.
1816 *
1817 * Guest should be aware of supported eVMCS versions by host by
1818 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
1819 * expected to set this CPUID leaf according to the value
1820 * returned in vmcs_version from nested_enable_evmcs().
1821 *
1822 * However, it turns out that Microsoft Hyper-V fails to comply
1823 * to their own invented interface: When Hyper-V use eVMCS, it
1824 * just sets first u32 field of eVMCS to revision_id specified
1825 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
1826 * which is one of the supported versions specified in
1827 * CPUID.0x4000000A.EAX[0:15].
1828 *
1829 * To overcome Hyper-V bug, we accept here either a supported
1830 * eVMCS version or VMCS12 revision_id as valid values for first
1831 * u32 field of eVMCS.
1832 */
1833 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
1834 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
1835 nested_release_evmcs(vcpu);
1836 return 0;
1837 }
1838
1839 vmx->nested.dirty_vmcs12 = true;
1840 /*
1841 * As we keep L2 state for one guest only 'hv_clean_fields' mask
1842 * can't be used when we switch between them. Reset it here for
1843 * simplicity.
1844 */
1845 vmx->nested.hv_evmcs->hv_clean_fields &=
1846 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
1847 vmx->nested.hv_evmcs_vmptr = assist_page.current_nested_vmcs;
1848
1849 /*
1850 * Unlike normal vmcs12, enlightened vmcs12 is not fully
1851 * reloaded from guest's memory (read only fields, fields not
1852 * present in struct hv_enlightened_vmcs, ...). Make sure there
1853 * are no leftovers.
1854 */
1855 if (from_launch) {
1856 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1857 memset(vmcs12, 0, sizeof(*vmcs12));
1858 vmcs12->hdr.revision_id = VMCS12_REVISION;
1859 }
1860
1861 }
1862 return 1;
1863}
1864
Sean Christopherson3731905ef2019-05-07 08:36:27 -07001865void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
Sean Christopherson55d23752018-12-03 13:53:18 -08001866{
1867 struct vcpu_vmx *vmx = to_vmx(vcpu);
1868
1869 /*
1870 * hv_evmcs may end up being not mapped after migration (when
1871 * L2 was running), map it here to make sure vmcs12 changes are
1872 * properly reflected.
1873 */
1874 if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs)
1875 nested_vmx_handle_enlightened_vmptrld(vcpu, false);
1876
1877 if (vmx->nested.hv_evmcs) {
1878 copy_vmcs12_to_enlightened(vmx);
1879 /* All fields are clean */
1880 vmx->nested.hv_evmcs->hv_clean_fields |=
1881 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
1882 } else {
1883 copy_vmcs12_to_shadow(vmx);
1884 }
1885
Sean Christopherson3731905ef2019-05-07 08:36:27 -07001886 vmx->nested.need_vmcs12_to_shadow_sync = false;
Sean Christopherson55d23752018-12-03 13:53:18 -08001887}
1888
1889static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
1890{
1891 struct vcpu_vmx *vmx =
1892 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
1893
1894 vmx->nested.preemption_timer_expired = true;
1895 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
1896 kvm_vcpu_kick(&vmx->vcpu);
1897
1898 return HRTIMER_NORESTART;
1899}
1900
1901static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
1902{
1903 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
1904 struct vcpu_vmx *vmx = to_vmx(vcpu);
1905
1906 /*
1907 * A timer value of zero is architecturally guaranteed to cause
1908 * a VMExit prior to executing any instructions in the guest.
1909 */
1910 if (preemption_timeout == 0) {
1911 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
1912 return;
1913 }
1914
1915 if (vcpu->arch.virtual_tsc_khz == 0)
1916 return;
1917
1918 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
1919 preemption_timeout *= 1000000;
1920 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
1921 hrtimer_start(&vmx->nested.preemption_timer,
1922 ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
1923}
1924
1925static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
1926{
1927 if (vmx->nested.nested_run_pending &&
1928 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
1929 return vmcs12->guest_ia32_efer;
1930 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
1931 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
1932 else
1933 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
1934}
1935
1936static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
1937{
1938 /*
1939 * If vmcs02 hasn't been initialized, set the constant vmcs02 state
1940 * according to L0's settings (vmcs12 is irrelevant here). Host
1941 * fields that come from L0 and are not constant, e.g. HOST_CR3,
1942 * will be set as needed prior to VMLAUNCH/VMRESUME.
1943 */
1944 if (vmx->nested.vmcs02_initialized)
1945 return;
1946 vmx->nested.vmcs02_initialized = true;
1947
1948 /*
1949 * We don't care what the EPTP value is we just need to guarantee
1950 * it's valid so we don't get a false positive when doing early
1951 * consistency checks.
1952 */
1953 if (enable_ept && nested_early_check)
1954 vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0));
1955
1956 /* All VMFUNCs are currently emulated through L0 vmexits. */
1957 if (cpu_has_vmx_vmfunc())
1958 vmcs_write64(VM_FUNCTION_CONTROL, 0);
1959
1960 if (cpu_has_vmx_posted_intr())
1961 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
1962
1963 if (cpu_has_vmx_msr_bitmap())
1964 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
1965
Sean Christopherson4d6c9892019-05-07 09:06:30 -07001966 /*
1967 * The PML address never changes, so it is constant in vmcs02.
1968 * Conceptually we want to copy the PML index from vmcs01 here,
1969 * and then back to vmcs01 on nested vmexit. But since we flush
1970 * the log and reset GUEST_PML_INDEX on each vmexit, the PML
1971 * index is also effectively constant in vmcs02.
1972 */
1973 if (enable_pml) {
Sean Christopherson55d23752018-12-03 13:53:18 -08001974 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
Sean Christopherson4d6c9892019-05-07 09:06:30 -07001975 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
1976 }
Sean Christopherson55d23752018-12-03 13:53:18 -08001977
Sean Christophersonc538d572019-05-07 09:06:29 -07001978 if (cpu_has_vmx_encls_vmexit())
1979 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
1980
Sean Christopherson55d23752018-12-03 13:53:18 -08001981 /*
1982 * Set the MSR load/store lists to match L0's settings. Only the
1983 * addresses are constant (for vmcs02), the counts can change based
1984 * on L2's behavior, e.g. switching to/from long mode.
1985 */
1986 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1987 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
1988 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
1989
1990 vmx_set_constant_host_state(vmx);
1991}
1992
Paolo Bonzinib1346ab2019-06-06 17:24:00 +02001993static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
Sean Christopherson55d23752018-12-03 13:53:18 -08001994 struct vmcs12 *vmcs12)
1995{
1996 prepare_vmcs02_constant_state(vmx);
1997
1998 vmcs_write64(VMCS_LINK_POINTER, -1ull);
1999
2000 if (enable_vpid) {
2001 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
2002 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
2003 else
2004 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2005 }
2006}
2007
2008static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2009{
2010 u32 exec_control, vmcs12_exec_ctrl;
2011 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
2012
2013 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
Paolo Bonzinib1346ab2019-06-06 17:24:00 +02002014 prepare_vmcs02_early_rare(vmx, vmcs12);
Sean Christopherson55d23752018-12-03 13:53:18 -08002015
2016 /*
Sean Christopherson55d23752018-12-03 13:53:18 -08002017 * PIN CONTROLS
2018 */
Sean Christophersonc075c3e2019-05-07 12:17:53 -07002019 exec_control = vmx_pin_based_exec_ctrl(vmx);
2020 exec_control |= vmcs12->pin_based_vm_exec_control;
Sean Christopherson55d23752018-12-03 13:53:18 -08002021 /* Preemption timer setting is computed directly in vmx_vcpu_run. */
Sean Christopherson55d23752018-12-03 13:53:18 -08002022 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
2023 vmx->loaded_vmcs->hv_timer_armed = false;
2024
2025 /* Posted interrupts setting is only taken from vmcs12. */
2026 if (nested_cpu_has_posted_intr(vmcs12)) {
2027 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
2028 vmx->nested.pi_pending = false;
2029 } else {
2030 exec_control &= ~PIN_BASED_POSTED_INTR;
2031 }
Sean Christophersonc5f2c762019-05-07 12:17:55 -07002032 pin_controls_init(vmx, exec_control);
Sean Christopherson55d23752018-12-03 13:53:18 -08002033
2034 /*
2035 * EXEC CONTROLS
2036 */
2037 exec_control = vmx_exec_control(vmx); /* L0's desires */
2038 exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
2039 exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
2040 exec_control &= ~CPU_BASED_TPR_SHADOW;
2041 exec_control |= vmcs12->cpu_based_vm_exec_control;
2042
Sean Christophersonca2f5462019-05-07 09:06:33 -07002043 if (exec_control & CPU_BASED_TPR_SHADOW)
Sean Christopherson55d23752018-12-03 13:53:18 -08002044 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
Sean Christopherson55d23752018-12-03 13:53:18 -08002045#ifdef CONFIG_X86_64
Sean Christophersonca2f5462019-05-07 09:06:33 -07002046 else
Sean Christopherson55d23752018-12-03 13:53:18 -08002047 exec_control |= CPU_BASED_CR8_LOAD_EXITING |
2048 CPU_BASED_CR8_STORE_EXITING;
2049#endif
Sean Christopherson55d23752018-12-03 13:53:18 -08002050
2051 /*
2052 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2053 * for I/O port accesses.
2054 */
2055 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
2056 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
Sean Christopherson2183f562019-05-07 12:17:56 -07002057 exec_controls_init(vmx, exec_control);
Sean Christopherson55d23752018-12-03 13:53:18 -08002058
2059 /*
2060 * SECONDARY EXEC CONTROLS
2061 */
2062 if (cpu_has_secondary_exec_ctrls()) {
2063 exec_control = vmx->secondary_exec_control;
2064
2065 /* Take the following fields only from vmcs12 */
2066 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2067 SECONDARY_EXEC_ENABLE_INVPCID |
2068 SECONDARY_EXEC_RDTSCP |
2069 SECONDARY_EXEC_XSAVES |
2070 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2071 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2072 SECONDARY_EXEC_ENABLE_VMFUNC);
2073 if (nested_cpu_has(vmcs12,
2074 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
2075 vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
2076 ~SECONDARY_EXEC_ENABLE_PML;
2077 exec_control |= vmcs12_exec_ctrl;
2078 }
2079
2080 /* VMCS shadowing for L2 is emulated for now */
2081 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
2082
2083 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
2084 vmcs_write16(GUEST_INTR_STATUS,
2085 vmcs12->guest_intr_status);
2086
Sean Christophersonfe7f895d2019-05-07 12:17:57 -07002087 secondary_exec_controls_init(vmx, exec_control);
Sean Christopherson55d23752018-12-03 13:53:18 -08002088 }
2089
2090 /*
2091 * ENTRY CONTROLS
2092 *
2093 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2094 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2095 * on the related bits (if supported by the CPU) in the hope that
2096 * we can avoid VMWrites during vmx_set_efer().
2097 */
2098 exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) &
2099 ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER;
2100 if (cpu_has_load_ia32_efer()) {
2101 if (guest_efer & EFER_LMA)
2102 exec_control |= VM_ENTRY_IA32E_MODE;
2103 if (guest_efer != host_efer)
2104 exec_control |= VM_ENTRY_LOAD_IA32_EFER;
2105 }
2106 vm_entry_controls_init(vmx, exec_control);
2107
2108 /*
2109 * EXIT CONTROLS
2110 *
2111 * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2112 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2113 * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2114 */
2115 exec_control = vmx_vmexit_ctrl();
2116 if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
2117 exec_control |= VM_EXIT_LOAD_IA32_EFER;
2118 vm_exit_controls_init(vmx, exec_control);
2119
2120 /*
Sean Christopherson55d23752018-12-03 13:53:18 -08002121 * Interrupt/Exception Fields
2122 */
2123 if (vmx->nested.nested_run_pending) {
2124 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2125 vmcs12->vm_entry_intr_info_field);
2126 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2127 vmcs12->vm_entry_exception_error_code);
2128 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2129 vmcs12->vm_entry_instruction_len);
2130 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
2131 vmcs12->guest_interruptibility_info);
2132 vmx->loaded_vmcs->nmi_known_unmasked =
2133 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
2134 } else {
2135 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
2136 }
2137}
2138
Paolo Bonzinib1346ab2019-06-06 17:24:00 +02002139static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
Sean Christopherson55d23752018-12-03 13:53:18 -08002140{
2141 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2142
2143 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2144 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2145 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
2146 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
2147 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
2148 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
2149 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
2150 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
2151 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
2152 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
2153 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
2154 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
2155 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
2156 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
2157 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
2158 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
2159 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
2160 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
2161 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
2162 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07002163 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
2164 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
Sean Christopherson55d23752018-12-03 13:53:18 -08002165 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
2166 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
2167 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
2168 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
2169 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
2170 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
2171 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
2172 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
2173 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
2174 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
2175 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
2176 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
2177 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
2178 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
2179 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
2180 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
2181 }
2182
2183 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2184 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
2185 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
2186 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
2187 vmcs12->guest_pending_dbg_exceptions);
2188 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
2189 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
2190
2191 /*
2192 * L1 may access the L2's PDPTR, so save them to construct
2193 * vmcs12
2194 */
2195 if (enable_ept) {
2196 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2197 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2198 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2199 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2200 }
Sean Christophersonc27e5b02019-05-07 09:06:39 -07002201
2202 if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
2203 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2204 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
Sean Christopherson55d23752018-12-03 13:53:18 -08002205 }
2206
2207 if (nested_cpu_has_xsaves(vmcs12))
2208 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
2209
2210 /*
2211 * Whether page-faults are trapped is determined by a combination of
2212 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
2213 * If enable_ept, L0 doesn't care about page faults and we should
2214 * set all of these to L1's desires. However, if !enable_ept, L0 does
2215 * care about (at least some) page faults, and because it is not easy
2216 * (if at all possible?) to merge L0 and L1's desires, we simply ask
2217 * to exit on each and every L2 page fault. This is done by setting
2218 * MASK=MATCH=0 and (see below) EB.PF=1.
2219 * Note that below we don't need special code to set EB.PF beyond the
2220 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2221 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2222 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2223 */
2224 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
2225 enable_ept ? vmcs12->page_fault_error_code_mask : 0);
2226 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
2227 enable_ept ? vmcs12->page_fault_error_code_match : 0);
2228
2229 if (cpu_has_vmx_apicv()) {
2230 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
2231 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
2232 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
2233 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
2234 }
2235
2236 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2237 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2238
2239 set_cr4_guest_host_mask(vmx);
Sean Christopherson55d23752018-12-03 13:53:18 -08002240}
2241
2242/*
2243 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2244 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2245 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2246 * guest in a way that will both be appropriate to L1's requests, and our
2247 * needs. In addition to modifying the active vmcs (which is vmcs02), this
2248 * function also has additional necessary side-effects, like setting various
2249 * vcpu->arch fields.
2250 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2251 * is assigned to entry_failure_code on failure.
2252 */
2253static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
2254 u32 *entry_failure_code)
2255{
2256 struct vcpu_vmx *vmx = to_vmx(vcpu);
Sean Christophersonc7554efc2019-05-07 09:06:40 -07002257 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2258 bool load_guest_pdptrs_vmcs12 = false;
Sean Christopherson55d23752018-12-03 13:53:18 -08002259
Sean Christophersonc7554efc2019-05-07 09:06:40 -07002260 if (vmx->nested.dirty_vmcs12 || hv_evmcs) {
Paolo Bonzinib1346ab2019-06-06 17:24:00 +02002261 prepare_vmcs02_rare(vmx, vmcs12);
Sean Christopherson55d23752018-12-03 13:53:18 -08002262 vmx->nested.dirty_vmcs12 = false;
Sean Christophersonc7554efc2019-05-07 09:06:40 -07002263
2264 load_guest_pdptrs_vmcs12 = !hv_evmcs ||
2265 !(hv_evmcs->hv_clean_fields &
2266 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1);
Sean Christopherson55d23752018-12-03 13:53:18 -08002267 }
2268
Sean Christopherson55d23752018-12-03 13:53:18 -08002269 if (vmx->nested.nested_run_pending &&
2270 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
2271 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
2272 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
2273 } else {
2274 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
2275 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
2276 }
Sean Christopherson3b013a22019-05-07 09:06:28 -07002277 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
2278 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
2279 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
Sean Christopherson55d23752018-12-03 13:53:18 -08002280 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
2281
Sean Christopherson55d23752018-12-03 13:53:18 -08002282 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2283 * bitwise-or of what L1 wants to trap for L2, and what we want to
2284 * trap. Note that CR0.TS also needs updating - we do this later.
2285 */
2286 update_exception_bitmap(vcpu);
2287 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
2288 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2289
2290 if (vmx->nested.nested_run_pending &&
2291 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
2292 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
2293 vcpu->arch.pat = vmcs12->guest_ia32_pat;
2294 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2295 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
2296 }
2297
2298 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
2299
2300 if (kvm_has_tsc_control)
2301 decache_tsc_multiplier(vmx);
2302
2303 if (enable_vpid) {
2304 /*
2305 * There is no direct mapping between vpid02 and vpid12, the
2306 * vpid02 is per-vCPU for L0 and reused while the value of
2307 * vpid12 is changed w/ one invvpid during nested vmentry.
2308 * The vpid12 is allocated by L1 for L2, so it will not
2309 * influence global bitmap(for vpid01 and vpid02 allocation)
2310 * even if spawn a lot of nested vCPUs.
2311 */
2312 if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) {
2313 if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
2314 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
2315 __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false);
2316 }
2317 } else {
2318 /*
2319 * If L1 use EPT, then L0 needs to execute INVEPT on
2320 * EPTP02 instead of EPTP01. Therefore, delay TLB
2321 * flush until vmcs02->eptp is fully updated by
2322 * KVM_REQ_LOAD_CR3. Note that this assumes
2323 * KVM_REQ_TLB_FLUSH is evaluated after
2324 * KVM_REQ_LOAD_CR3 in vcpu_enter_guest().
2325 */
2326 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2327 }
2328 }
2329
2330 if (nested_cpu_has_ept(vmcs12))
2331 nested_ept_init_mmu_context(vcpu);
2332 else if (nested_cpu_has2(vmcs12,
2333 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2334 vmx_flush_tlb(vcpu, true);
2335
2336 /*
2337 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2338 * bits which we consider mandatory enabled.
2339 * The CR0_READ_SHADOW is what L2 should have expected to read given
2340 * the specifications by L1; It's not enough to take
2341 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
2342 * have more bits than L1 expected.
2343 */
2344 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
2345 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2346
2347 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
2348 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
2349
2350 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2351 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2352 vmx_set_efer(vcpu, vcpu->arch.efer);
2353
2354 /*
2355 * Guest state is invalid and unrestricted guest is disabled,
2356 * which means L1 attempted VMEntry to L2 with invalid state.
2357 * Fail the VMEntry.
2358 */
2359 if (vmx->emulation_required) {
2360 *entry_failure_code = ENTRY_FAIL_DEFAULT;
Sean Christophersonc80add02019-04-11 12:18:09 -07002361 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002362 }
2363
2364 /* Shadow page tables on either EPT or shadow page tables. */
2365 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
2366 entry_failure_code))
Sean Christophersonc80add02019-04-11 12:18:09 -07002367 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002368
Sean Christophersonc7554efc2019-05-07 09:06:40 -07002369 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */
2370 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) &&
2371 is_pae_paging(vcpu)) {
2372 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2373 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2374 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2375 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2376 }
2377
Sean Christopherson55d23752018-12-03 13:53:18 -08002378 if (!enable_ept)
2379 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
2380
Paolo Bonzinie9c16c72019-04-30 22:07:26 +02002381 kvm_rsp_write(vcpu, vmcs12->guest_rsp);
2382 kvm_rip_write(vcpu, vmcs12->guest_rip);
Sean Christopherson55d23752018-12-03 13:53:18 -08002383 return 0;
2384}
2385
2386static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
2387{
2388 if (!nested_cpu_has_nmi_exiting(vmcs12) &&
2389 nested_cpu_has_virtual_nmis(vmcs12))
2390 return -EINVAL;
2391
2392 if (!nested_cpu_has_virtual_nmis(vmcs12) &&
2393 nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING))
2394 return -EINVAL;
2395
2396 return 0;
2397}
2398
2399static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
2400{
2401 struct vcpu_vmx *vmx = to_vmx(vcpu);
2402 int maxphyaddr = cpuid_maxphyaddr(vcpu);
2403
2404 /* Check for memory type validity */
2405 switch (address & VMX_EPTP_MT_MASK) {
2406 case VMX_EPTP_MT_UC:
2407 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))
2408 return false;
2409 break;
2410 case VMX_EPTP_MT_WB:
2411 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))
2412 return false;
2413 break;
2414 default:
2415 return false;
2416 }
2417
2418 /* only 4 levels page-walk length are valid */
2419 if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
2420 return false;
2421
2422 /* Reserved bits should not be set */
2423 if (address >> maxphyaddr || ((address >> 7) & 0x1f))
2424 return false;
2425
2426 /* AD, if set, should be supported */
2427 if (address & VMX_EPTP_AD_ENABLE_BIT) {
2428 if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))
2429 return false;
2430 }
2431
2432 return true;
2433}
2434
Krish Sadhukhan461b4ba2018-12-12 13:30:07 -05002435/*
2436 * Checks related to VM-Execution Control Fields
2437 */
2438static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
2439 struct vmcs12 *vmcs12)
2440{
2441 struct vcpu_vmx *vmx = to_vmx(vcpu);
2442
2443 if (!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
2444 vmx->nested.msrs.pinbased_ctls_low,
2445 vmx->nested.msrs.pinbased_ctls_high) ||
2446 !vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
2447 vmx->nested.msrs.procbased_ctls_low,
2448 vmx->nested.msrs.procbased_ctls_high))
2449 return -EINVAL;
2450
2451 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
2452 !vmx_control_verify(vmcs12->secondary_vm_exec_control,
2453 vmx->nested.msrs.secondary_ctls_low,
2454 vmx->nested.msrs.secondary_ctls_high))
2455 return -EINVAL;
2456
2457 if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu) ||
2458 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
2459 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
2460 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
2461 nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
2462 nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
2463 nested_vmx_check_nmi_controls(vmcs12) ||
2464 nested_vmx_check_pml_controls(vcpu, vmcs12) ||
2465 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
2466 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
2467 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
2468 (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
2469 return -EINVAL;
2470
Sean Christophersonbc441212019-02-12 16:42:23 -08002471 if (!nested_cpu_has_preemption_timer(vmcs12) &&
2472 nested_cpu_has_save_preemption_timer(vmcs12))
2473 return -EINVAL;
2474
Krish Sadhukhan461b4ba2018-12-12 13:30:07 -05002475 if (nested_cpu_has_ept(vmcs12) &&
2476 !valid_ept_address(vcpu, vmcs12->ept_pointer))
2477 return -EINVAL;
2478
2479 if (nested_cpu_has_vmfunc(vmcs12)) {
2480 if (vmcs12->vm_function_control &
2481 ~vmx->nested.msrs.vmfunc_controls)
2482 return -EINVAL;
2483
2484 if (nested_cpu_has_eptp_switching(vmcs12)) {
2485 if (!nested_cpu_has_ept(vmcs12) ||
2486 !page_address_valid(vcpu, vmcs12->eptp_list_address))
2487 return -EINVAL;
2488 }
2489 }
2490
2491 return 0;
2492}
2493
Krish Sadhukhan61446ba2018-12-12 13:30:09 -05002494/*
2495 * Checks related to VM-Exit Control Fields
2496 */
2497static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
2498 struct vmcs12 *vmcs12)
2499{
2500 struct vcpu_vmx *vmx = to_vmx(vcpu);
2501
2502 if (!vmx_control_verify(vmcs12->vm_exit_controls,
2503 vmx->nested.msrs.exit_ctls_low,
2504 vmx->nested.msrs.exit_ctls_high) ||
2505 nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))
2506 return -EINVAL;
2507
2508 return 0;
2509}
2510
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002511/*
2512 * Checks related to VM-Entry Control Fields
2513 */
2514static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
2515 struct vmcs12 *vmcs12)
Sean Christopherson55d23752018-12-03 13:53:18 -08002516{
2517 struct vcpu_vmx *vmx = to_vmx(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08002518
Krish Sadhukhan61446ba2018-12-12 13:30:09 -05002519 if (!vmx_control_verify(vmcs12->vm_entry_controls,
Sean Christopherson55d23752018-12-03 13:53:18 -08002520 vmx->nested.msrs.entry_ctls_low,
2521 vmx->nested.msrs.entry_ctls_high))
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002522 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002523
2524 /*
2525 * From the Intel SDM, volume 3:
2526 * Fields relevant to VM-entry event injection must be set properly.
2527 * These fields are the VM-entry interruption-information field, the
2528 * VM-entry exception error code, and the VM-entry instruction length.
2529 */
2530 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
2531 u32 intr_info = vmcs12->vm_entry_intr_info_field;
2532 u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
2533 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
2534 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
2535 bool should_have_error_code;
2536 bool urg = nested_cpu_has2(vmcs12,
2537 SECONDARY_EXEC_UNRESTRICTED_GUEST);
2538 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
2539
2540 /* VM-entry interruption-info field: interruption type */
2541 if (intr_type == INTR_TYPE_RESERVED ||
2542 (intr_type == INTR_TYPE_OTHER_EVENT &&
2543 !nested_cpu_supports_monitor_trap_flag(vcpu)))
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002544 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002545
2546 /* VM-entry interruption-info field: vector */
2547 if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
2548 (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
2549 (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002550 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002551
2552 /* VM-entry interruption-info field: deliver error code */
2553 should_have_error_code =
2554 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
2555 x86_exception_has_error_code(vector);
2556 if (has_error_code != should_have_error_code)
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002557 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002558
2559 /* VM-entry exception error code */
2560 if (has_error_code &&
2561 vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002562 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002563
2564 /* VM-entry interruption-info field: reserved bits */
2565 if (intr_info & INTR_INFO_RESVD_BITS_MASK)
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002566 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002567
2568 /* VM-entry instruction length */
2569 switch (intr_type) {
2570 case INTR_TYPE_SOFT_EXCEPTION:
2571 case INTR_TYPE_SOFT_INTR:
2572 case INTR_TYPE_PRIV_SW_EXCEPTION:
2573 if ((vmcs12->vm_entry_instruction_len > 15) ||
2574 (vmcs12->vm_entry_instruction_len == 0 &&
2575 !nested_cpu_has_zero_length_injection(vcpu)))
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002576 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002577 }
2578 }
2579
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002580 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
2581 return -EINVAL;
2582
2583 return 0;
2584}
2585
Sean Christopherson5478ba32019-04-11 12:18:06 -07002586static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
2587 struct vmcs12 *vmcs12)
2588{
2589 if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
2590 nested_check_vm_exit_controls(vcpu, vmcs12) ||
2591 nested_check_vm_entry_controls(vcpu, vmcs12))
Paolo Bonzini98d9e852019-04-12 10:19:57 +02002592 return -EINVAL;
Sean Christopherson5478ba32019-04-11 12:18:06 -07002593
2594 return 0;
2595}
2596
Paolo Bonzini98d9e852019-04-12 10:19:57 +02002597static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
2598 struct vmcs12 *vmcs12)
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002599{
2600 bool ia32e;
2601
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002602 if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
2603 !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
2604 !nested_cr3_valid(vcpu, vmcs12->host_cr3))
Krish Sadhukhan254b2f32018-12-12 13:30:11 -05002605 return -EINVAL;
Krish Sadhukhan711eff32019-02-07 14:05:30 -05002606
2607 if (is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu) ||
2608 is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))
2609 return -EINVAL;
2610
Krish Sadhukhanf6b0db1f2019-04-08 17:35:11 -04002611 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
2612 !kvm_pat_valid(vmcs12->host_ia32_pat))
2613 return -EINVAL;
2614
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002615 /*
2616 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2617 * IA32_EFER MSR must be 0 in the field for that register. In addition,
2618 * the values of the LMA and LME bits in the field must each be that of
2619 * the host address-space size VM-exit control.
2620 */
2621 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
2622 ia32e = (vmcs12->vm_exit_controls &
2623 VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
2624 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
2625 ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
2626 ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
Krish Sadhukhan254b2f32018-12-12 13:30:11 -05002627 return -EINVAL;
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002628 }
2629
Sean Christopherson55d23752018-12-03 13:53:18 -08002630 return 0;
2631}
2632
2633static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
2634 struct vmcs12 *vmcs12)
2635{
KarimAllah Ahmed88925302019-01-31 21:24:41 +01002636 int r = 0;
Sean Christopherson55d23752018-12-03 13:53:18 -08002637 struct vmcs12 *shadow;
KarimAllah Ahmed88925302019-01-31 21:24:41 +01002638 struct kvm_host_map map;
Sean Christopherson55d23752018-12-03 13:53:18 -08002639
2640 if (vmcs12->vmcs_link_pointer == -1ull)
2641 return 0;
2642
2643 if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))
2644 return -EINVAL;
2645
KarimAllah Ahmed88925302019-01-31 21:24:41 +01002646 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
Sean Christopherson55d23752018-12-03 13:53:18 -08002647 return -EINVAL;
2648
KarimAllah Ahmed88925302019-01-31 21:24:41 +01002649 shadow = map.hva;
2650
Sean Christopherson55d23752018-12-03 13:53:18 -08002651 if (shadow->hdr.revision_id != VMCS12_REVISION ||
2652 shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))
2653 r = -EINVAL;
KarimAllah Ahmed88925302019-01-31 21:24:41 +01002654
2655 kvm_vcpu_unmap(vcpu, &map, false);
Sean Christopherson55d23752018-12-03 13:53:18 -08002656 return r;
2657}
2658
Sean Christopherson55d23752018-12-03 13:53:18 -08002659/*
2660 * Checks related to Guest Non-register State
2661 */
2662static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
2663{
2664 if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
2665 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
2666 return -EINVAL;
2667
2668 return 0;
2669}
2670
Sean Christopherson5478ba32019-04-11 12:18:06 -07002671static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
2672 struct vmcs12 *vmcs12,
2673 u32 *exit_qual)
Sean Christopherson55d23752018-12-03 13:53:18 -08002674{
2675 bool ia32e;
2676
2677 *exit_qual = ENTRY_FAIL_DEFAULT;
2678
2679 if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
2680 !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))
Sean Christophersonc80add02019-04-11 12:18:09 -07002681 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002682
Krish Sadhukhande2bc2b2019-04-08 17:35:12 -04002683 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
2684 !kvm_pat_valid(vmcs12->guest_ia32_pat))
Sean Christophersonc80add02019-04-11 12:18:09 -07002685 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002686
2687 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
2688 *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR;
Sean Christophersonc80add02019-04-11 12:18:09 -07002689 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002690 }
2691
2692 /*
2693 * If the load IA32_EFER VM-entry control is 1, the following checks
2694 * are performed on the field for the IA32_EFER MSR:
2695 * - Bits reserved in the IA32_EFER MSR must be 0.
2696 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
2697 * the IA-32e mode guest VM-exit control. It must also be identical
2698 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
2699 * CR0.PG) is 1.
2700 */
2701 if (to_vmx(vcpu)->nested.nested_run_pending &&
2702 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
2703 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
2704 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
2705 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
2706 ((vmcs12->guest_cr0 & X86_CR0_PG) &&
2707 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))
Sean Christophersonc80add02019-04-11 12:18:09 -07002708 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002709 }
2710
2711 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
Sean Christophersonc80add02019-04-11 12:18:09 -07002712 (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
2713 (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
2714 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002715
Sean Christopherson9c3e9222019-04-11 12:18:05 -07002716 if (nested_check_guest_non_reg_state(vmcs12))
Sean Christophersonc80add02019-04-11 12:18:09 -07002717 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002718
2719 return 0;
2720}
2721
Sean Christopherson453eafb2018-12-20 12:25:17 -08002722static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
Sean Christopherson55d23752018-12-03 13:53:18 -08002723{
2724 struct vcpu_vmx *vmx = to_vmx(vcpu);
2725 unsigned long cr3, cr4;
Sean Christophersonf1727b42019-01-25 07:40:58 -08002726 bool vm_fail;
Sean Christopherson55d23752018-12-03 13:53:18 -08002727
2728 if (!nested_early_check)
2729 return 0;
2730
2731 if (vmx->msr_autoload.host.nr)
2732 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
2733 if (vmx->msr_autoload.guest.nr)
2734 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
2735
2736 preempt_disable();
2737
2738 vmx_prepare_switch_to_guest(vcpu);
2739
2740 /*
2741 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
2742 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
2743 * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e.
2744 * there is no need to preserve other bits or save/restore the field.
2745 */
2746 vmcs_writel(GUEST_RFLAGS, 0);
2747
Sean Christopherson55d23752018-12-03 13:53:18 -08002748 cr3 = __get_current_cr3_fast();
2749 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
2750 vmcs_writel(HOST_CR3, cr3);
2751 vmx->loaded_vmcs->host_state.cr3 = cr3;
2752 }
2753
2754 cr4 = cr4_read_shadow();
2755 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
2756 vmcs_writel(HOST_CR4, cr4);
2757 vmx->loaded_vmcs->host_state.cr4 = cr4;
2758 }
2759
Sean Christopherson55d23752018-12-03 13:53:18 -08002760 asm(
Sean Christopherson453eafb2018-12-20 12:25:17 -08002761 "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
Sean Christopherson5a878162019-01-25 07:41:02 -08002762 "cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
2763 "je 1f \n\t"
Sean Christophersonfbda0fd2019-01-25 07:41:01 -08002764 __ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t"
Sean Christopherson5a878162019-01-25 07:41:02 -08002765 "mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
2766 "1: \n\t"
Sean Christopherson453eafb2018-12-20 12:25:17 -08002767 "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
Sean Christopherson55d23752018-12-03 13:53:18 -08002768
2769 /* Check if vmlaunch or vmresume is needed */
Sean Christopherson74dfa272019-01-25 07:41:00 -08002770 "cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
Sean Christopherson453eafb2018-12-20 12:25:17 -08002771
Sean Christophersonf1727b42019-01-25 07:40:58 -08002772 /*
2773 * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
2774 * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
2775 * Valid. vmx_vmenter() directly "returns" RFLAGS, and so the
Sean Christophersonbbc0b822019-01-25 07:40:59 -08002776 * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
Sean Christophersonf1727b42019-01-25 07:40:58 -08002777 */
Sean Christopherson453eafb2018-12-20 12:25:17 -08002778 "call vmx_vmenter\n\t"
2779
Sean Christophersonbbc0b822019-01-25 07:40:59 -08002780 CC_SET(be)
2781 : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail)
Sean Christopherson5a878162019-01-25 07:41:02 -08002782 : [HOST_RSP]"r"((unsigned long)HOST_RSP),
Sean Christopherson74dfa272019-01-25 07:41:00 -08002783 [loaded_vmcs]"r"(vmx->loaded_vmcs),
2784 [launched]"i"(offsetof(struct loaded_vmcs, launched)),
Sean Christopherson5a878162019-01-25 07:41:02 -08002785 [host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)),
Sean Christopherson453eafb2018-12-20 12:25:17 -08002786 [wordsize]"i"(sizeof(ulong))
Jan Beulich5a253552019-05-27 02:45:44 -06002787 : "memory"
Sean Christopherson55d23752018-12-03 13:53:18 -08002788 );
2789
Sean Christopherson55d23752018-12-03 13:53:18 -08002790 if (vmx->msr_autoload.host.nr)
2791 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2792 if (vmx->msr_autoload.guest.nr)
2793 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2794
Sean Christophersonf1727b42019-01-25 07:40:58 -08002795 if (vm_fail) {
Wanpeng Li541e8862019-05-17 16:49:50 +08002796 preempt_enable();
Sean Christopherson55d23752018-12-03 13:53:18 -08002797 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
2798 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
Sean Christopherson55d23752018-12-03 13:53:18 -08002799 return 1;
2800 }
2801
2802 /*
2803 * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
2804 */
2805 local_irq_enable();
2806 if (hw_breakpoint_active())
2807 set_debugreg(__this_cpu_read(cpu_dr7), 7);
Wanpeng Li541e8862019-05-17 16:49:50 +08002808 preempt_enable();
Sean Christopherson55d23752018-12-03 13:53:18 -08002809
2810 /*
2811 * A non-failing VMEntry means we somehow entered guest mode with
2812 * an illegal RIP, and that's just the tip of the iceberg. There
2813 * is no telling what memory has been modified or what state has
2814 * been exposed to unknown code. Hitting this all but guarantees
2815 * a (very critical) hardware issue.
2816 */
2817 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
2818 VMX_EXIT_REASONS_FAILED_VMENTRY));
2819
2820 return 0;
2821}
Sean Christopherson55d23752018-12-03 13:53:18 -08002822
2823static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
2824 struct vmcs12 *vmcs12);
2825
2826static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
2827{
2828 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2829 struct vcpu_vmx *vmx = to_vmx(vcpu);
KarimAllah Ahmed96c66e82019-01-31 21:24:37 +01002830 struct kvm_host_map *map;
Sean Christopherson55d23752018-12-03 13:53:18 -08002831 struct page *page;
2832 u64 hpa;
2833
2834 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
2835 /*
2836 * Translate L1 physical address to host physical
2837 * address for vmcs02. Keep the page pinned, so this
2838 * physical address remains valid. We keep a reference
2839 * to it so we can release it later.
2840 */
2841 if (vmx->nested.apic_access_page) { /* shouldn't happen */
2842 kvm_release_page_dirty(vmx->nested.apic_access_page);
2843 vmx->nested.apic_access_page = NULL;
2844 }
2845 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
2846 /*
2847 * If translation failed, no matter: This feature asks
2848 * to exit when accessing the given address, and if it
2849 * can never be accessed, this feature won't do
2850 * anything anyway.
2851 */
2852 if (!is_error_page(page)) {
2853 vmx->nested.apic_access_page = page;
2854 hpa = page_to_phys(vmx->nested.apic_access_page);
2855 vmcs_write64(APIC_ACCESS_ADDR, hpa);
2856 } else {
Sean Christophersonfe7f895d2019-05-07 12:17:57 -07002857 secondary_exec_controls_clearbit(vmx,
2858 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
Sean Christopherson55d23752018-12-03 13:53:18 -08002859 }
2860 }
2861
2862 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
KarimAllah Ahmed96c66e82019-01-31 21:24:37 +01002863 map = &vmx->nested.virtual_apic_map;
Sean Christopherson55d23752018-12-03 13:53:18 -08002864
KarimAllah Ahmed96c66e82019-01-31 21:24:37 +01002865 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) {
2866 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn));
Paolo Bonzini69090812019-04-15 15:16:17 +02002867 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
2868 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
2869 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
2870 /*
2871 * The processor will never use the TPR shadow, simply
2872 * clear the bit from the execution control. Such a
2873 * configuration is useless, but it happens in tests.
2874 * For any other configuration, failing the vm entry is
2875 * _not_ what the processor does but it's basically the
2876 * only possibility we have.
2877 */
Sean Christopherson2183f562019-05-07 12:17:56 -07002878 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW);
Sean Christophersonca2f5462019-05-07 09:06:33 -07002879 } else {
2880 /*
2881 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to
2882 * force VM-Entry to fail.
2883 */
2884 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
Sean Christopherson55d23752018-12-03 13:53:18 -08002885 }
2886 }
2887
2888 if (nested_cpu_has_posted_intr(vmcs12)) {
KarimAllah Ahmed3278e042019-01-31 21:24:38 +01002889 map = &vmx->nested.pi_desc_map;
2890
2891 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) {
2892 vmx->nested.pi_desc =
2893 (struct pi_desc *)(((void *)map->hva) +
2894 offset_in_page(vmcs12->posted_intr_desc_addr));
2895 vmcs_write64(POSTED_INTR_DESC_ADDR,
2896 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
Sean Christopherson55d23752018-12-03 13:53:18 -08002897 }
Sean Christopherson55d23752018-12-03 13:53:18 -08002898 }
2899 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
Sean Christopherson2183f562019-05-07 12:17:56 -07002900 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
Sean Christopherson55d23752018-12-03 13:53:18 -08002901 else
Sean Christopherson2183f562019-05-07 12:17:56 -07002902 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
Sean Christopherson55d23752018-12-03 13:53:18 -08002903}
2904
2905/*
2906 * Intel's VMX Instruction Reference specifies a common set of prerequisites
2907 * for running VMX instructions (except VMXON, whose prerequisites are
2908 * slightly different). It also specifies what exception to inject otherwise.
2909 * Note that many of these exceptions have priority over VM exits, so they
2910 * don't have to be checked again here.
2911 */
2912static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
2913{
2914 if (!to_vmx(vcpu)->nested.vmxon) {
2915 kvm_queue_exception(vcpu, UD_VECTOR);
2916 return 0;
2917 }
2918
2919 if (vmx_get_cpl(vcpu)) {
2920 kvm_inject_gp(vcpu, 0);
2921 return 0;
2922 }
2923
2924 return 1;
2925}
2926
2927static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
2928{
2929 u8 rvi = vmx_get_rvi();
2930 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
2931
2932 return ((rvi & 0xf0) > (vppr & 0xf0));
2933}
2934
2935static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
2936 struct vmcs12 *vmcs12);
2937
2938/*
2939 * If from_vmentry is false, this is being called from state restore (either RSM
2940 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
2941+ *
2942+ * Returns:
2943+ * 0 - success, i.e. proceed with actual VMEnter
2944+ * 1 - consistency check VMExit
2945+ * -1 - consistency check VMFail
2946 */
2947int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
2948{
2949 struct vcpu_vmx *vmx = to_vmx(vcpu);
2950 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2951 bool evaluate_pending_interrupts;
2952 u32 exit_reason = EXIT_REASON_INVALID_STATE;
2953 u32 exit_qual;
2954
Sean Christopherson2183f562019-05-07 12:17:56 -07002955 evaluate_pending_interrupts = exec_controls_get(vmx) &
Sean Christopherson55d23752018-12-03 13:53:18 -08002956 (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
2957 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
2958 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
2959
2960 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
2961 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
2962 if (kvm_mpx_supported() &&
2963 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2964 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
2965
2966 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
2967
2968 prepare_vmcs02_early(vmx, vmcs12);
2969
2970 if (from_vmentry) {
2971 nested_get_vmcs12_pages(vcpu);
2972
2973 if (nested_vmx_check_vmentry_hw(vcpu)) {
2974 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
2975 return -1;
2976 }
2977
Sean Christopherson5478ba32019-04-11 12:18:06 -07002978 if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
Sean Christopherson55d23752018-12-03 13:53:18 -08002979 goto vmentry_fail_vmexit;
2980 }
2981
2982 enter_guest_mode(vcpu);
2983 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
2984 vcpu->arch.tsc_offset += vmcs12->tsc_offset;
2985
2986 if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
2987 goto vmentry_fail_vmexit_guest_mode;
2988
2989 if (from_vmentry) {
2990 exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
2991 exit_qual = nested_vmx_load_msr(vcpu,
2992 vmcs12->vm_entry_msr_load_addr,
2993 vmcs12->vm_entry_msr_load_count);
2994 if (exit_qual)
2995 goto vmentry_fail_vmexit_guest_mode;
2996 } else {
2997 /*
2998 * The MMU is not initialized to point at the right entities yet and
2999 * "get pages" would need to read data from the guest (i.e. we will
3000 * need to perform gpa to hpa translation). Request a call
3001 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
3002 * have already been set at vmentry time and should not be reset.
3003 */
3004 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
3005 }
3006
3007 /*
3008 * If L1 had a pending IRQ/NMI until it executed
3009 * VMLAUNCH/VMRESUME which wasn't delivered because it was
3010 * disallowed (e.g. interrupts disabled), L0 needs to
3011 * evaluate if this pending event should cause an exit from L2
3012 * to L1 or delivered directly to L2 (e.g. In case L1 don't
3013 * intercept EXTERNAL_INTERRUPT).
3014 *
3015 * Usually this would be handled by the processor noticing an
3016 * IRQ/NMI window request, or checking RVI during evaluation of
3017 * pending virtual interrupts. However, this setting was done
3018 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
3019 * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
3020 */
3021 if (unlikely(evaluate_pending_interrupts))
3022 kvm_make_request(KVM_REQ_EVENT, vcpu);
3023
3024 /*
Paolo Bonzini359a6c32019-01-29 19:14:46 +01003025 * Do not start the preemption timer hrtimer until after we know
3026 * we are successful, so that only nested_vmx_vmexit needs to cancel
3027 * the timer.
3028 */
3029 vmx->nested.preemption_timer_expired = false;
3030 if (nested_cpu_has_preemption_timer(vmcs12))
3031 vmx_start_preemption_timer(vcpu);
3032
3033 /*
Sean Christopherson55d23752018-12-03 13:53:18 -08003034 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3035 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3036 * returned as far as L1 is concerned. It will only return (and set
3037 * the success flag) when L2 exits (see nested_vmx_vmexit()).
3038 */
3039 return 0;
3040
3041 /*
3042 * A failed consistency check that leads to a VMExit during L1's
3043 * VMEnter to L2 is a variation of a normal VMexit, as explained in
3044 * 26.7 "VM-entry failures during or after loading guest state".
3045 */
3046vmentry_fail_vmexit_guest_mode:
3047 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
3048 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3049 leave_guest_mode(vcpu);
3050
3051vmentry_fail_vmexit:
3052 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3053
3054 if (!from_vmentry)
3055 return 1;
3056
3057 load_vmcs12_host_state(vcpu, vmcs12);
3058 vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
3059 vmcs12->exit_qualification = exit_qual;
3060 if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
Sean Christopherson3731905ef2019-05-07 08:36:27 -07003061 vmx->nested.need_vmcs12_to_shadow_sync = true;
Sean Christopherson55d23752018-12-03 13:53:18 -08003062 return 1;
3063}
3064
3065/*
3066 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3067 * for running an L2 nested guest.
3068 */
3069static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
3070{
3071 struct vmcs12 *vmcs12;
3072 struct vcpu_vmx *vmx = to_vmx(vcpu);
3073 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
3074 int ret;
3075
3076 if (!nested_vmx_check_permission(vcpu))
3077 return 1;
3078
3079 if (!nested_vmx_handle_enlightened_vmptrld(vcpu, true))
3080 return 1;
3081
3082 if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)
3083 return nested_vmx_failInvalid(vcpu);
3084
3085 vmcs12 = get_vmcs12(vcpu);
3086
3087 /*
3088 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3089 * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3090 * rather than RFLAGS.ZF, and no error number is stored to the
3091 * VM-instruction error field.
3092 */
3093 if (vmcs12->hdr.shadow_vmcs)
3094 return nested_vmx_failInvalid(vcpu);
3095
3096 if (vmx->nested.hv_evmcs) {
3097 copy_enlightened_to_vmcs12(vmx);
3098 /* Enlightened VMCS doesn't have launch state */
3099 vmcs12->launch_state = !launch;
3100 } else if (enable_shadow_vmcs) {
3101 copy_shadow_to_vmcs12(vmx);
3102 }
3103
3104 /*
3105 * The nested entry process starts with enforcing various prerequisites
3106 * on vmcs12 as required by the Intel SDM, and act appropriately when
3107 * they fail: As the SDM explains, some conditions should cause the
3108 * instruction to fail, while others will cause the instruction to seem
3109 * to succeed, but return an EXIT_REASON_INVALID_STATE.
3110 * To speed up the normal (success) code path, we should avoid checking
3111 * for misconfigurations which will anyway be caught by the processor
3112 * when using the merged vmcs02.
3113 */
3114 if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)
3115 return nested_vmx_failValid(vcpu,
3116 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
3117
3118 if (vmcs12->launch_state == launch)
3119 return nested_vmx_failValid(vcpu,
3120 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3121 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
3122
Paolo Bonzini98d9e852019-04-12 10:19:57 +02003123 if (nested_vmx_check_controls(vcpu, vmcs12))
3124 return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
Sean Christopherson5478ba32019-04-11 12:18:06 -07003125
Paolo Bonzini98d9e852019-04-12 10:19:57 +02003126 if (nested_vmx_check_host_state(vcpu, vmcs12))
3127 return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
Sean Christopherson55d23752018-12-03 13:53:18 -08003128
3129 /*
3130 * We're finally done with prerequisite checking, and can start with
3131 * the nested entry.
3132 */
3133 vmx->nested.nested_run_pending = 1;
3134 ret = nested_vmx_enter_non_root_mode(vcpu, true);
3135 vmx->nested.nested_run_pending = !ret;
3136 if (ret > 0)
3137 return 1;
3138 else if (ret)
3139 return nested_vmx_failValid(vcpu,
3140 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3141
3142 /* Hide L1D cache contents from the nested guest. */
3143 vmx->vcpu.arch.l1tf_flush_l1d = true;
3144
3145 /*
3146 * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3147 * also be used as part of restoring nVMX state for
3148 * snapshot restore (migration).
3149 *
3150 * In this flow, it is assumed that vmcs12 cache was
3151 * trasferred as part of captured nVMX state and should
3152 * therefore not be read from guest memory (which may not
3153 * exist on destination host yet).
3154 */
3155 nested_cache_shadow_vmcs12(vcpu, vmcs12);
3156
3157 /*
Jim Mattson9ebdfe52018-11-26 11:22:32 -08003158 * If we're entering a halted L2 vcpu and the L2 vcpu won't be
3159 * awakened by event injection or by an NMI-window VM-exit or
3160 * by an interrupt-window VM-exit, halt the vcpu.
Sean Christopherson55d23752018-12-03 13:53:18 -08003161 */
3162 if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
Jim Mattson9ebdfe52018-11-26 11:22:32 -08003163 !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
3164 !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_NMI_PENDING) &&
3165 !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) &&
3166 (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
Sean Christopherson55d23752018-12-03 13:53:18 -08003167 vmx->nested.nested_run_pending = 0;
3168 return kvm_vcpu_halt(vcpu);
3169 }
3170 return 1;
3171}
3172
3173/*
3174 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3175 * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
3176 * This function returns the new value we should put in vmcs12.guest_cr0.
3177 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
3178 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3179 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
3180 * didn't trap the bit, because if L1 did, so would L0).
3181 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3182 * been modified by L2, and L1 knows it. So just leave the old value of
3183 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3184 * isn't relevant, because if L0 traps this bit it can set it to anything.
3185 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3186 * changed these bits, and therefore they need to be updated, but L0
3187 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3188 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3189 */
3190static inline unsigned long
3191vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3192{
3193 return
3194 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
3195 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
3196 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
3197 vcpu->arch.cr0_guest_owned_bits));
3198}
3199
3200static inline unsigned long
3201vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3202{
3203 return
3204 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
3205 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
3206 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
3207 vcpu->arch.cr4_guest_owned_bits));
3208}
3209
3210static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
3211 struct vmcs12 *vmcs12)
3212{
3213 u32 idt_vectoring;
3214 unsigned int nr;
3215
3216 if (vcpu->arch.exception.injected) {
3217 nr = vcpu->arch.exception.nr;
3218 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3219
3220 if (kvm_exception_is_soft(nr)) {
3221 vmcs12->vm_exit_instruction_len =
3222 vcpu->arch.event_exit_inst_len;
3223 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
3224 } else
3225 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
3226
3227 if (vcpu->arch.exception.has_error_code) {
3228 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
3229 vmcs12->idt_vectoring_error_code =
3230 vcpu->arch.exception.error_code;
3231 }
3232
3233 vmcs12->idt_vectoring_info_field = idt_vectoring;
3234 } else if (vcpu->arch.nmi_injected) {
3235 vmcs12->idt_vectoring_info_field =
3236 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
3237 } else if (vcpu->arch.interrupt.injected) {
3238 nr = vcpu->arch.interrupt.nr;
3239 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3240
3241 if (vcpu->arch.interrupt.soft) {
3242 idt_vectoring |= INTR_TYPE_SOFT_INTR;
3243 vmcs12->vm_entry_instruction_len =
3244 vcpu->arch.event_exit_inst_len;
3245 } else
3246 idt_vectoring |= INTR_TYPE_EXT_INTR;
3247
3248 vmcs12->idt_vectoring_info_field = idt_vectoring;
3249 }
3250}
3251
3252
3253static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
3254{
3255 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3256 gfn_t gfn;
3257
3258 /*
3259 * Don't need to mark the APIC access page dirty; it is never
3260 * written to by the CPU during APIC virtualization.
3261 */
3262
3263 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3264 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
3265 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3266 }
3267
3268 if (nested_cpu_has_posted_intr(vmcs12)) {
3269 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
3270 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3271 }
3272}
3273
3274static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
3275{
3276 struct vcpu_vmx *vmx = to_vmx(vcpu);
3277 int max_irr;
3278 void *vapic_page;
3279 u16 status;
3280
3281 if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
3282 return;
3283
3284 vmx->nested.pi_pending = false;
3285 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
3286 return;
3287
3288 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
3289 if (max_irr != 256) {
KarimAllah Ahmed96c66e82019-01-31 21:24:37 +01003290 vapic_page = vmx->nested.virtual_apic_map.hva;
3291 if (!vapic_page)
3292 return;
3293
Sean Christopherson55d23752018-12-03 13:53:18 -08003294 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
3295 vapic_page, &max_irr);
Sean Christopherson55d23752018-12-03 13:53:18 -08003296 status = vmcs_read16(GUEST_INTR_STATUS);
3297 if ((u8)max_irr > ((u8)status & 0xff)) {
3298 status &= ~0xff;
3299 status |= (u8)max_irr;
3300 vmcs_write16(GUEST_INTR_STATUS, status);
3301 }
3302 }
3303
3304 nested_mark_vmcs12_pages_dirty(vcpu);
3305}
3306
3307static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
3308 unsigned long exit_qual)
3309{
3310 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3311 unsigned int nr = vcpu->arch.exception.nr;
3312 u32 intr_info = nr | INTR_INFO_VALID_MASK;
3313
3314 if (vcpu->arch.exception.has_error_code) {
3315 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
3316 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3317 }
3318
3319 if (kvm_exception_is_soft(nr))
3320 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3321 else
3322 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3323
3324 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3325 vmx_get_nmi_mask(vcpu))
3326 intr_info |= INTR_INFO_UNBLOCK_NMI;
3327
3328 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3329}
3330
3331static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
3332{
3333 struct vcpu_vmx *vmx = to_vmx(vcpu);
3334 unsigned long exit_qual;
3335 bool block_nested_events =
3336 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
3337
3338 if (vcpu->arch.exception.pending &&
3339 nested_vmx_check_exception(vcpu, &exit_qual)) {
3340 if (block_nested_events)
3341 return -EBUSY;
3342 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3343 return 0;
3344 }
3345
3346 if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
3347 vmx->nested.preemption_timer_expired) {
3348 if (block_nested_events)
3349 return -EBUSY;
3350 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
3351 return 0;
3352 }
3353
3354 if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
3355 if (block_nested_events)
3356 return -EBUSY;
3357 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
3358 NMI_VECTOR | INTR_TYPE_NMI_INTR |
3359 INTR_INFO_VALID_MASK, 0);
3360 /*
3361 * The NMI-triggered VM exit counts as injection:
3362 * clear this one and block further NMIs.
3363 */
3364 vcpu->arch.nmi_pending = 0;
3365 vmx_set_nmi_mask(vcpu, true);
3366 return 0;
3367 }
3368
3369 if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
3370 nested_exit_on_intr(vcpu)) {
3371 if (block_nested_events)
3372 return -EBUSY;
3373 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
3374 return 0;
3375 }
3376
3377 vmx_complete_nested_posted_interrupt(vcpu);
3378 return 0;
3379}
3380
3381static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
3382{
3383 ktime_t remaining =
3384 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
3385 u64 value;
3386
3387 if (ktime_to_ns(remaining) <= 0)
3388 return 0;
3389
3390 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
3391 do_div(value, 1000000);
3392 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
3393}
3394
Sean Christopherson7952d762019-05-07 08:36:29 -07003395static bool is_vmcs12_ext_field(unsigned long field)
Sean Christopherson55d23752018-12-03 13:53:18 -08003396{
Sean Christopherson7952d762019-05-07 08:36:29 -07003397 switch (field) {
3398 case GUEST_ES_SELECTOR:
3399 case GUEST_CS_SELECTOR:
3400 case GUEST_SS_SELECTOR:
3401 case GUEST_DS_SELECTOR:
3402 case GUEST_FS_SELECTOR:
3403 case GUEST_GS_SELECTOR:
3404 case GUEST_LDTR_SELECTOR:
3405 case GUEST_TR_SELECTOR:
3406 case GUEST_ES_LIMIT:
3407 case GUEST_CS_LIMIT:
3408 case GUEST_SS_LIMIT:
3409 case GUEST_DS_LIMIT:
3410 case GUEST_FS_LIMIT:
3411 case GUEST_GS_LIMIT:
3412 case GUEST_LDTR_LIMIT:
3413 case GUEST_TR_LIMIT:
3414 case GUEST_GDTR_LIMIT:
3415 case GUEST_IDTR_LIMIT:
3416 case GUEST_ES_AR_BYTES:
3417 case GUEST_DS_AR_BYTES:
3418 case GUEST_FS_AR_BYTES:
3419 case GUEST_GS_AR_BYTES:
3420 case GUEST_LDTR_AR_BYTES:
3421 case GUEST_TR_AR_BYTES:
3422 case GUEST_ES_BASE:
3423 case GUEST_CS_BASE:
3424 case GUEST_SS_BASE:
3425 case GUEST_DS_BASE:
3426 case GUEST_FS_BASE:
3427 case GUEST_GS_BASE:
3428 case GUEST_LDTR_BASE:
3429 case GUEST_TR_BASE:
3430 case GUEST_GDTR_BASE:
3431 case GUEST_IDTR_BASE:
3432 case GUEST_PENDING_DBG_EXCEPTIONS:
3433 case GUEST_BNDCFGS:
3434 return true;
3435 default:
3436 break;
3437 }
Sean Christopherson55d23752018-12-03 13:53:18 -08003438
Sean Christopherson7952d762019-05-07 08:36:29 -07003439 return false;
3440}
3441
3442static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
3443 struct vmcs12 *vmcs12)
3444{
3445 struct vcpu_vmx *vmx = to_vmx(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08003446
3447 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
3448 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
3449 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
3450 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
3451 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
3452 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
3453 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
3454 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
3455 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
3456 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
3457 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
3458 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
3459 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
3460 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
3461 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
3462 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
3463 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
3464 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
3465 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
Sean Christopherson55d23752018-12-03 13:53:18 -08003466 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
3467 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
3468 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
3469 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
3470 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
3471 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
3472 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
3473 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
3474 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
3475 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
3476 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
3477 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
3478 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
3479 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
3480 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
Sean Christopherson7952d762019-05-07 08:36:29 -07003481 vmcs12->guest_pending_dbg_exceptions =
3482 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
3483 if (kvm_mpx_supported())
3484 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3485
3486 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false;
3487}
3488
3489static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
3490 struct vmcs12 *vmcs12)
3491{
3492 struct vcpu_vmx *vmx = to_vmx(vcpu);
3493 int cpu;
3494
3495 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare)
3496 return;
3497
3498
3499 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01);
3500
3501 cpu = get_cpu();
3502 vmx->loaded_vmcs = &vmx->nested.vmcs02;
3503 vmx_vcpu_load(&vmx->vcpu, cpu);
3504
3505 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
3506
3507 vmx->loaded_vmcs = &vmx->vmcs01;
3508 vmx_vcpu_load(&vmx->vcpu, cpu);
3509 put_cpu();
3510}
3511
3512/*
3513 * Update the guest state fields of vmcs12 to reflect changes that
3514 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
3515 * VM-entry controls is also updated, since this is really a guest
3516 * state bit.)
3517 */
3518static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3519{
3520 struct vcpu_vmx *vmx = to_vmx(vcpu);
3521
3522 if (vmx->nested.hv_evmcs)
3523 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
3524
3525 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs;
3526
3527 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
3528 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
3529
3530 vmcs12->guest_rsp = kvm_rsp_read(vcpu);
3531 vmcs12->guest_rip = kvm_rip_read(vcpu);
3532 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
3533
3534 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
3535 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
Sean Christopherson55d23752018-12-03 13:53:18 -08003536
Sean Christophersonde70d272019-05-07 09:06:36 -07003537 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
3538 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
3539 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
3540
Sean Christopherson55d23752018-12-03 13:53:18 -08003541 vmcs12->guest_interruptibility_info =
3542 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
Sean Christopherson7952d762019-05-07 08:36:29 -07003543
Sean Christopherson55d23752018-12-03 13:53:18 -08003544 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
3545 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
3546 else
3547 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
3548
Paolo Bonzinib4b65b52019-01-29 19:12:35 +01003549 if (nested_cpu_has_preemption_timer(vmcs12) &&
3550 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
Sean Christopherson55d23752018-12-03 13:53:18 -08003551 vmcs12->vmx_preemption_timer_value =
3552 vmx_get_preemption_timer_value(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08003553
3554 /*
3555 * In some cases (usually, nested EPT), L2 is allowed to change its
3556 * own CR3 without exiting. If it has changed it, we must keep it.
3557 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
3558 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
3559 *
3560 * Additionally, restore L2's PDPTR to vmcs12.
3561 */
3562 if (enable_ept) {
3563 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
Sean Christophersonc7554efc2019-05-07 09:06:40 -07003564 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
3565 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
3566 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
3567 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
3568 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
3569 }
Sean Christopherson55d23752018-12-03 13:53:18 -08003570 }
3571
3572 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
3573
3574 if (nested_cpu_has_vid(vmcs12))
3575 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
3576
3577 vmcs12->vm_entry_controls =
3578 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
3579 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
3580
Sean Christopherson699a1ac2019-05-07 09:06:37 -07003581 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS)
Sean Christopherson55d23752018-12-03 13:53:18 -08003582 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
Sean Christopherson55d23752018-12-03 13:53:18 -08003583
Sean Christopherson55d23752018-12-03 13:53:18 -08003584 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
3585 vmcs12->guest_ia32_efer = vcpu->arch.efer;
Sean Christopherson55d23752018-12-03 13:53:18 -08003586}
3587
3588/*
3589 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
3590 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
3591 * and this function updates it to reflect the changes to the guest state while
3592 * L2 was running (and perhaps made some exits which were handled directly by L0
3593 * without going back to L1), and to reflect the exit reason.
3594 * Note that we do not have to copy here all VMCS fields, just those that
3595 * could have changed by the L2 guest or the exit - i.e., the guest-state and
3596 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
3597 * which already writes to vmcs12 directly.
3598 */
3599static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
3600 u32 exit_reason, u32 exit_intr_info,
3601 unsigned long exit_qualification)
3602{
Sean Christopherson55d23752018-12-03 13:53:18 -08003603 /* update exit information fields: */
Sean Christopherson55d23752018-12-03 13:53:18 -08003604 vmcs12->vm_exit_reason = exit_reason;
3605 vmcs12->exit_qualification = exit_qualification;
3606 vmcs12->vm_exit_intr_info = exit_intr_info;
3607
3608 vmcs12->idt_vectoring_info_field = 0;
3609 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3610 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
3611
3612 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
3613 vmcs12->launch_state = 1;
3614
3615 /* vm_entry_intr_info_field is cleared on exit. Emulate this
3616 * instead of reading the real value. */
3617 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
3618
3619 /*
3620 * Transfer the event that L0 or L1 may wanted to inject into
3621 * L2 to IDT_VECTORING_INFO_FIELD.
3622 */
3623 vmcs12_save_pending_event(vcpu, vmcs12);
Krish Sadhukhana0d4f802018-12-04 19:00:13 -05003624
3625 /*
3626 * According to spec, there's no need to store the guest's
3627 * MSRs if the exit is due to a VM-entry failure that occurs
3628 * during or after loading the guest state. Since this exit
3629 * does not fall in that category, we need to save the MSRs.
3630 */
3631 if (nested_vmx_store_msr(vcpu,
3632 vmcs12->vm_exit_msr_store_addr,
3633 vmcs12->vm_exit_msr_store_count))
3634 nested_vmx_abort(vcpu,
3635 VMX_ABORT_SAVE_GUEST_MSR_FAIL);
Sean Christopherson55d23752018-12-03 13:53:18 -08003636 }
3637
3638 /*
3639 * Drop what we picked up for L2 via vmx_complete_interrupts. It is
3640 * preserved above and would only end up incorrectly in L1.
3641 */
3642 vcpu->arch.nmi_injected = false;
3643 kvm_clear_exception_queue(vcpu);
3644 kvm_clear_interrupt_queue(vcpu);
3645}
3646
3647/*
3648 * A part of what we need to when the nested L2 guest exits and we want to
3649 * run its L1 parent, is to reset L1's guest state to the host state specified
3650 * in vmcs12.
3651 * This function is to be called not only on normal nested exit, but also on
3652 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
3653 * Failures During or After Loading Guest State").
3654 * This function should be called when the active VMCS is L1's (vmcs01).
3655 */
3656static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
3657 struct vmcs12 *vmcs12)
3658{
3659 struct kvm_segment seg;
3660 u32 entry_failure_code;
3661
3662 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
3663 vcpu->arch.efer = vmcs12->host_ia32_efer;
3664 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
3665 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
3666 else
3667 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
3668 vmx_set_efer(vcpu, vcpu->arch.efer);
3669
Paolo Bonzinie9c16c72019-04-30 22:07:26 +02003670 kvm_rsp_write(vcpu, vmcs12->host_rsp);
3671 kvm_rip_write(vcpu, vmcs12->host_rip);
Sean Christopherson55d23752018-12-03 13:53:18 -08003672 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
3673 vmx_set_interrupt_shadow(vcpu, 0);
3674
3675 /*
3676 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
3677 * actually changed, because vmx_set_cr0 refers to efer set above.
3678 *
3679 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
3680 * (KVM doesn't change it);
3681 */
3682 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
3683 vmx_set_cr0(vcpu, vmcs12->host_cr0);
3684
3685 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
3686 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
3687 vmx_set_cr4(vcpu, vmcs12->host_cr4);
3688
3689 nested_ept_uninit_mmu_context(vcpu);
3690
3691 /*
3692 * Only PDPTE load can fail as the value of cr3 was checked on entry and
3693 * couldn't have changed.
3694 */
3695 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
3696 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
3697
3698 if (!enable_ept)
3699 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
3700
3701 /*
3702 * If vmcs01 doesn't use VPID, CPU flushes TLB on every
3703 * VMEntry/VMExit. Thus, no need to flush TLB.
3704 *
3705 * If vmcs12 doesn't use VPID, L1 expects TLB to be
3706 * flushed on every VMEntry/VMExit.
3707 *
3708 * Otherwise, we can preserve TLB entries as long as we are
3709 * able to tag L1 TLB entries differently than L2 TLB entries.
3710 *
3711 * If vmcs12 uses EPT, we need to execute this flush on EPTP01
3712 * and therefore we request the TLB flush to happen only after VMCS EPTP
3713 * has been set by KVM_REQ_LOAD_CR3.
3714 */
3715 if (enable_vpid &&
3716 (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
3717 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3718 }
3719
3720 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
3721 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
3722 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
3723 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
3724 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
3725 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
3726 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
3727
3728 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
3729 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
3730 vmcs_write64(GUEST_BNDCFGS, 0);
3731
3732 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
3733 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
3734 vcpu->arch.pat = vmcs12->host_ia32_pat;
3735 }
3736 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
3737 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
3738 vmcs12->host_ia32_perf_global_ctrl);
3739
3740 /* Set L1 segment info according to Intel SDM
3741 27.5.2 Loading Host Segment and Descriptor-Table Registers */
3742 seg = (struct kvm_segment) {
3743 .base = 0,
3744 .limit = 0xFFFFFFFF,
3745 .selector = vmcs12->host_cs_selector,
3746 .type = 11,
3747 .present = 1,
3748 .s = 1,
3749 .g = 1
3750 };
3751 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
3752 seg.l = 1;
3753 else
3754 seg.db = 1;
3755 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
3756 seg = (struct kvm_segment) {
3757 .base = 0,
3758 .limit = 0xFFFFFFFF,
3759 .type = 3,
3760 .present = 1,
3761 .s = 1,
3762 .db = 1,
3763 .g = 1
3764 };
3765 seg.selector = vmcs12->host_ds_selector;
3766 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
3767 seg.selector = vmcs12->host_es_selector;
3768 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
3769 seg.selector = vmcs12->host_ss_selector;
3770 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
3771 seg.selector = vmcs12->host_fs_selector;
3772 seg.base = vmcs12->host_fs_base;
3773 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
3774 seg.selector = vmcs12->host_gs_selector;
3775 seg.base = vmcs12->host_gs_base;
3776 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
3777 seg = (struct kvm_segment) {
3778 .base = vmcs12->host_tr_base,
3779 .limit = 0x67,
3780 .selector = vmcs12->host_tr_selector,
3781 .type = 11,
3782 .present = 1
3783 };
3784 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
3785
3786 kvm_set_dr(vcpu, 7, 0x400);
3787 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
3788
3789 if (cpu_has_vmx_msr_bitmap())
3790 vmx_update_msr_bitmap(vcpu);
3791
3792 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
3793 vmcs12->vm_exit_msr_load_count))
3794 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
3795}
3796
3797static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
3798{
3799 struct shared_msr_entry *efer_msr;
3800 unsigned int i;
3801
3802 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
3803 return vmcs_read64(GUEST_IA32_EFER);
3804
3805 if (cpu_has_load_ia32_efer())
3806 return host_efer;
3807
3808 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
3809 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
3810 return vmx->msr_autoload.guest.val[i].value;
3811 }
3812
3813 efer_msr = find_msr_entry(vmx, MSR_EFER);
3814 if (efer_msr)
3815 return efer_msr->data;
3816
3817 return host_efer;
3818}
3819
3820static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
3821{
3822 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3823 struct vcpu_vmx *vmx = to_vmx(vcpu);
3824 struct vmx_msr_entry g, h;
3825 struct msr_data msr;
3826 gpa_t gpa;
3827 u32 i, j;
3828
3829 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
3830
3831 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
3832 /*
3833 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
3834 * as vmcs01.GUEST_DR7 contains a userspace defined value
3835 * and vcpu->arch.dr7 is not squirreled away before the
3836 * nested VMENTER (not worth adding a variable in nested_vmx).
3837 */
3838 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
3839 kvm_set_dr(vcpu, 7, DR7_FIXED_1);
3840 else
3841 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
3842 }
3843
3844 /*
3845 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
3846 * handle a variety of side effects to KVM's software model.
3847 */
3848 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
3849
3850 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
3851 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
3852
3853 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
3854 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
3855
3856 nested_ept_uninit_mmu_context(vcpu);
Paolo Bonzini2b279242019-04-15 15:57:19 +02003857
3858 /*
3859 * This is only valid if EPT is in use, otherwise the vmcs01 GUEST_CR3
3860 * points to shadow pages! Fortunately we only get here after a WARN_ON
3861 * if EPT is disabled, so a VMabort is perfectly fine.
3862 */
3863 if (enable_ept) {
3864 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3865 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
3866 } else {
3867 nested_vmx_abort(vcpu, VMX_ABORT_VMCS_CORRUPTED);
3868 }
Sean Christopherson55d23752018-12-03 13:53:18 -08003869
3870 /*
3871 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
3872 * from vmcs01 (if necessary). The PDPTRs are not loaded on
3873 * VMFail, like everything else we just need to ensure our
3874 * software model is up-to-date.
3875 */
3876 ept_save_pdptrs(vcpu);
3877
3878 kvm_mmu_reset_context(vcpu);
3879
3880 if (cpu_has_vmx_msr_bitmap())
3881 vmx_update_msr_bitmap(vcpu);
3882
3883 /*
3884 * This nasty bit of open coding is a compromise between blindly
3885 * loading L1's MSRs using the exit load lists (incorrect emulation
3886 * of VMFail), leaving the nested VM's MSRs in the software model
3887 * (incorrect behavior) and snapshotting the modified MSRs (too
3888 * expensive since the lists are unbound by hardware). For each
3889 * MSR that was (prematurely) loaded from the nested VMEntry load
3890 * list, reload it from the exit load list if it exists and differs
3891 * from the guest value. The intent is to stuff host state as
3892 * silently as possible, not to fully process the exit load list.
3893 */
3894 msr.host_initiated = false;
3895 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
3896 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
3897 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
3898 pr_debug_ratelimited(
3899 "%s read MSR index failed (%u, 0x%08llx)\n",
3900 __func__, i, gpa);
3901 goto vmabort;
3902 }
3903
3904 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
3905 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
3906 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
3907 pr_debug_ratelimited(
3908 "%s read MSR failed (%u, 0x%08llx)\n",
3909 __func__, j, gpa);
3910 goto vmabort;
3911 }
3912 if (h.index != g.index)
3913 continue;
3914 if (h.value == g.value)
3915 break;
3916
3917 if (nested_vmx_load_msr_check(vcpu, &h)) {
3918 pr_debug_ratelimited(
3919 "%s check failed (%u, 0x%x, 0x%x)\n",
3920 __func__, j, h.index, h.reserved);
3921 goto vmabort;
3922 }
3923
3924 msr.index = h.index;
3925 msr.data = h.value;
3926 if (kvm_set_msr(vcpu, &msr)) {
3927 pr_debug_ratelimited(
3928 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
3929 __func__, j, h.index, h.value);
3930 goto vmabort;
3931 }
3932 }
3933 }
3934
3935 return;
3936
3937vmabort:
3938 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
3939}
3940
3941/*
3942 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
3943 * and modify vmcs12 to make it see what it would expect to see there if
3944 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
3945 */
3946void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
3947 u32 exit_intr_info, unsigned long exit_qualification)
3948{
3949 struct vcpu_vmx *vmx = to_vmx(vcpu);
3950 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3951
3952 /* trying to cancel vmlaunch/vmresume is a bug */
3953 WARN_ON_ONCE(vmx->nested.nested_run_pending);
3954
3955 leave_guest_mode(vcpu);
3956
Paolo Bonzinib4b65b52019-01-29 19:12:35 +01003957 if (nested_cpu_has_preemption_timer(vmcs12))
3958 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
3959
Sean Christopherson55d23752018-12-03 13:53:18 -08003960 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
3961 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3962
3963 if (likely(!vmx->fail)) {
Sean Christopherson3731905ef2019-05-07 08:36:27 -07003964 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
Sean Christophersonf4f83162019-05-07 08:36:26 -07003965
3966 if (exit_reason != -1)
Sean Christopherson55d23752018-12-03 13:53:18 -08003967 prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
3968 exit_qualification);
3969
3970 /*
Sean Christopherson3731905ef2019-05-07 08:36:27 -07003971 * Must happen outside of sync_vmcs02_to_vmcs12() as it will
Sean Christopherson55d23752018-12-03 13:53:18 -08003972 * also be used to capture vmcs12 cache as part of
3973 * capturing nVMX state for snapshot (migration).
3974 *
3975 * Otherwise, this flush will dirty guest memory at a
3976 * point it is already assumed by user-space to be
3977 * immutable.
3978 */
3979 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
Sean Christopherson55d23752018-12-03 13:53:18 -08003980 } else {
3981 /*
3982 * The only expected VM-instruction error is "VM entry with
3983 * invalid control field(s)." Anything else indicates a
3984 * problem with L0. And we should never get here with a
3985 * VMFail of any type if early consistency checks are enabled.
3986 */
3987 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
3988 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3989 WARN_ON_ONCE(nested_early_check);
3990 }
3991
3992 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3993
3994 /* Update any VMCS fields that might have changed while L2 ran */
3995 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
3996 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
3997 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
3998
3999 if (kvm_has_tsc_control)
4000 decache_tsc_multiplier(vmx);
4001
4002 if (vmx->nested.change_vmcs01_virtual_apic_mode) {
4003 vmx->nested.change_vmcs01_virtual_apic_mode = false;
4004 vmx_set_virtual_apic_mode(vcpu);
4005 } else if (!nested_cpu_has_ept(vmcs12) &&
4006 nested_cpu_has2(vmcs12,
4007 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
4008 vmx_flush_tlb(vcpu, true);
4009 }
4010
Sean Christopherson55d23752018-12-03 13:53:18 -08004011 /* Unpin physical memory we referred to in vmcs02 */
4012 if (vmx->nested.apic_access_page) {
4013 kvm_release_page_dirty(vmx->nested.apic_access_page);
4014 vmx->nested.apic_access_page = NULL;
4015 }
KarimAllah Ahmed96c66e82019-01-31 21:24:37 +01004016 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
KarimAllah Ahmed3278e042019-01-31 21:24:38 +01004017 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
4018 vmx->nested.pi_desc = NULL;
Sean Christopherson55d23752018-12-03 13:53:18 -08004019
4020 /*
4021 * We are now running in L2, mmu_notifier will force to reload the
4022 * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
4023 */
4024 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4025
4026 if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs))
Sean Christopherson3731905ef2019-05-07 08:36:27 -07004027 vmx->nested.need_vmcs12_to_shadow_sync = true;
Sean Christopherson55d23752018-12-03 13:53:18 -08004028
4029 /* in case we halted in L2 */
4030 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4031
4032 if (likely(!vmx->fail)) {
4033 /*
4034 * TODO: SDM says that with acknowledge interrupt on
4035 * exit, bit 31 of the VM-exit interrupt information
4036 * (valid interrupt) is always set to 1 on
4037 * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
4038 * need kvm_cpu_has_interrupt(). See the commit
4039 * message for details.
4040 */
4041 if (nested_exit_intr_ack_set(vcpu) &&
4042 exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
4043 kvm_cpu_has_interrupt(vcpu)) {
4044 int irq = kvm_cpu_get_interrupt(vcpu);
4045 WARN_ON(irq < 0);
4046 vmcs12->vm_exit_intr_info = irq |
4047 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
4048 }
4049
4050 if (exit_reason != -1)
4051 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
4052 vmcs12->exit_qualification,
4053 vmcs12->idt_vectoring_info_field,
4054 vmcs12->vm_exit_intr_info,
4055 vmcs12->vm_exit_intr_error_code,
4056 KVM_ISA_VMX);
4057
4058 load_vmcs12_host_state(vcpu, vmcs12);
4059
4060 return;
4061 }
4062
4063 /*
4064 * After an early L2 VM-entry failure, we're now back
4065 * in L1 which thinks it just finished a VMLAUNCH or
4066 * VMRESUME instruction, so we need to set the failure
4067 * flag and the VM-instruction error field of the VMCS
4068 * accordingly, and skip the emulated instruction.
4069 */
4070 (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4071
4072 /*
4073 * Restore L1's host state to KVM's software model. We're here
4074 * because a consistency check was caught by hardware, which
4075 * means some amount of guest state has been propagated to KVM's
4076 * model and needs to be unwound to the host's state.
4077 */
4078 nested_vmx_restore_host_state(vcpu);
4079
4080 vmx->fail = 0;
4081}
4082
4083/*
4084 * Decode the memory-address operand of a vmx instruction, as recorded on an
4085 * exit caused by such an instruction (run by a guest hypervisor).
4086 * On success, returns 0. When the operand is invalid, returns 1 and throws
4087 * #UD or #GP.
4088 */
4089int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03004090 u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
Sean Christopherson55d23752018-12-03 13:53:18 -08004091{
4092 gva_t off;
4093 bool exn;
4094 struct kvm_segment s;
4095
4096 /*
4097 * According to Vol. 3B, "Information for VM Exits Due to Instruction
4098 * Execution", on an exit, vmx_instruction_info holds most of the
4099 * addressing components of the operand. Only the displacement part
4100 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4101 * For how an actual address is calculated from all these components,
4102 * refer to Vol. 1, "Operand Addressing".
4103 */
4104 int scaling = vmx_instruction_info & 3;
4105 int addr_size = (vmx_instruction_info >> 7) & 7;
4106 bool is_reg = vmx_instruction_info & (1u << 10);
4107 int seg_reg = (vmx_instruction_info >> 15) & 7;
4108 int index_reg = (vmx_instruction_info >> 18) & 0xf;
4109 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
4110 int base_reg = (vmx_instruction_info >> 23) & 0xf;
4111 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
4112
4113 if (is_reg) {
4114 kvm_queue_exception(vcpu, UD_VECTOR);
4115 return 1;
4116 }
4117
4118 /* Addr = segment_base + offset */
4119 /* offset = base + [index * scale] + displacement */
4120 off = exit_qualification; /* holds the displacement */
Sean Christopherson946c5222019-01-23 14:39:23 -08004121 if (addr_size == 1)
4122 off = (gva_t)sign_extend64(off, 31);
4123 else if (addr_size == 0)
4124 off = (gva_t)sign_extend64(off, 15);
Sean Christopherson55d23752018-12-03 13:53:18 -08004125 if (base_is_valid)
4126 off += kvm_register_read(vcpu, base_reg);
4127 if (index_is_valid)
4128 off += kvm_register_read(vcpu, index_reg)<<scaling;
4129 vmx_get_segment(vcpu, &s, seg_reg);
Sean Christopherson55d23752018-12-03 13:53:18 -08004130
Sean Christopherson8570f9e2019-01-23 14:39:24 -08004131 /*
4132 * The effective address, i.e. @off, of a memory operand is truncated
4133 * based on the address size of the instruction. Note that this is
4134 * the *effective address*, i.e. the address prior to accounting for
4135 * the segment's base.
4136 */
Sean Christopherson55d23752018-12-03 13:53:18 -08004137 if (addr_size == 1) /* 32 bit */
Sean Christopherson8570f9e2019-01-23 14:39:24 -08004138 off &= 0xffffffff;
4139 else if (addr_size == 0) /* 16 bit */
4140 off &= 0xffff;
Sean Christopherson55d23752018-12-03 13:53:18 -08004141
4142 /* Checks for #GP/#SS exceptions. */
4143 exn = false;
4144 if (is_long_mode(vcpu)) {
Sean Christopherson8570f9e2019-01-23 14:39:24 -08004145 /*
4146 * The virtual/linear address is never truncated in 64-bit
4147 * mode, e.g. a 32-bit address size can yield a 64-bit virtual
4148 * address when using FS/GS with a non-zero base.
4149 */
4150 *ret = s.base + off;
4151
Sean Christopherson55d23752018-12-03 13:53:18 -08004152 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
4153 * non-canonical form. This is the only check on the memory
4154 * destination for long mode!
4155 */
4156 exn = is_noncanonical_address(*ret, vcpu);
Paolo Bonzinie0dfacb2019-01-30 17:25:38 +01004157 } else {
Sean Christopherson8570f9e2019-01-23 14:39:24 -08004158 /*
4159 * When not in long mode, the virtual/linear address is
4160 * unconditionally truncated to 32 bits regardless of the
4161 * address size.
4162 */
4163 *ret = (s.base + off) & 0xffffffff;
4164
Sean Christopherson55d23752018-12-03 13:53:18 -08004165 /* Protected mode: apply checks for segment validity in the
4166 * following order:
4167 * - segment type check (#GP(0) may be thrown)
4168 * - usability check (#GP(0)/#SS(0))
4169 * - limit check (#GP(0)/#SS(0))
4170 */
4171 if (wr)
4172 /* #GP(0) if the destination operand is located in a
4173 * read-only data segment or any code segment.
4174 */
4175 exn = ((s.type & 0xa) == 0 || (s.type & 8));
4176 else
4177 /* #GP(0) if the source operand is located in an
4178 * execute-only code segment
4179 */
4180 exn = ((s.type & 0xa) == 8);
4181 if (exn) {
4182 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4183 return 1;
4184 }
4185 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
4186 */
4187 exn = (s.unusable != 0);
Sean Christopherson34333cc2019-01-23 14:39:25 -08004188
4189 /*
4190 * Protected mode: #GP(0)/#SS(0) if the memory operand is
4191 * outside the segment limit. All CPUs that support VMX ignore
4192 * limit checks for flat segments, i.e. segments with base==0,
4193 * limit==0xffffffff and of type expand-up data or code.
Sean Christopherson55d23752018-12-03 13:53:18 -08004194 */
Sean Christopherson34333cc2019-01-23 14:39:25 -08004195 if (!(s.base == 0 && s.limit == 0xffffffff &&
4196 ((s.type & 8) || !(s.type & 4))))
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03004197 exn = exn || ((u64)off + len - 1 > s.limit);
Sean Christopherson55d23752018-12-03 13:53:18 -08004198 }
4199 if (exn) {
4200 kvm_queue_exception_e(vcpu,
4201 seg_reg == VCPU_SREG_SS ?
4202 SS_VECTOR : GP_VECTOR,
4203 0);
4204 return 1;
4205 }
4206
4207 return 0;
4208}
4209
4210static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
4211{
4212 gva_t gva;
4213 struct x86_exception e;
4214
4215 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03004216 vmcs_read32(VMX_INSTRUCTION_INFO), false,
4217 sizeof(*vmpointer), &gva))
Sean Christopherson55d23752018-12-03 13:53:18 -08004218 return 1;
4219
4220 if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
4221 kvm_inject_page_fault(vcpu, &e);
4222 return 1;
4223 }
4224
4225 return 0;
4226}
4227
4228/*
4229 * Allocate a shadow VMCS and associate it with the currently loaded
4230 * VMCS, unless such a shadow VMCS already exists. The newly allocated
4231 * VMCS is also VMCLEARed, so that it is ready for use.
4232 */
4233static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
4234{
4235 struct vcpu_vmx *vmx = to_vmx(vcpu);
4236 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
4237
4238 /*
4239 * We should allocate a shadow vmcs for vmcs01 only when L1
4240 * executes VMXON and free it when L1 executes VMXOFF.
4241 * As it is invalid to execute VMXON twice, we shouldn't reach
4242 * here when vmcs01 already have an allocated shadow vmcs.
4243 */
4244 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
4245
4246 if (!loaded_vmcs->shadow_vmcs) {
4247 loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
4248 if (loaded_vmcs->shadow_vmcs)
4249 vmcs_clear(loaded_vmcs->shadow_vmcs);
4250 }
4251 return loaded_vmcs->shadow_vmcs;
4252}
4253
4254static int enter_vmx_operation(struct kvm_vcpu *vcpu)
4255{
4256 struct vcpu_vmx *vmx = to_vmx(vcpu);
4257 int r;
4258
4259 r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
4260 if (r < 0)
4261 goto out_vmcs02;
4262
Ben Gardon41836832019-02-11 11:02:52 -08004263 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
Sean Christopherson55d23752018-12-03 13:53:18 -08004264 if (!vmx->nested.cached_vmcs12)
4265 goto out_cached_vmcs12;
4266
Ben Gardon41836832019-02-11 11:02:52 -08004267 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
Sean Christopherson55d23752018-12-03 13:53:18 -08004268 if (!vmx->nested.cached_shadow_vmcs12)
4269 goto out_cached_shadow_vmcs12;
4270
4271 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
4272 goto out_shadow_vmcs;
4273
4274 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
4275 HRTIMER_MODE_REL_PINNED);
4276 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
4277
4278 vmx->nested.vpid02 = allocate_vpid();
4279
4280 vmx->nested.vmcs02_initialized = false;
4281 vmx->nested.vmxon = true;
Luwei Kangee85dec2018-10-24 16:05:16 +08004282
4283 if (pt_mode == PT_MODE_HOST_GUEST) {
4284 vmx->pt_desc.guest.ctl = 0;
4285 pt_update_intercept_for_msr(vmx);
4286 }
4287
Sean Christopherson55d23752018-12-03 13:53:18 -08004288 return 0;
4289
4290out_shadow_vmcs:
4291 kfree(vmx->nested.cached_shadow_vmcs12);
4292
4293out_cached_shadow_vmcs12:
4294 kfree(vmx->nested.cached_vmcs12);
4295
4296out_cached_vmcs12:
4297 free_loaded_vmcs(&vmx->nested.vmcs02);
4298
4299out_vmcs02:
4300 return -ENOMEM;
4301}
4302
4303/*
4304 * Emulate the VMXON instruction.
4305 * Currently, we just remember that VMX is active, and do not save or even
4306 * inspect the argument to VMXON (the so-called "VMXON pointer") because we
4307 * do not currently need to store anything in that guest-allocated memory
4308 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
4309 * argument is different from the VMXON pointer (which the spec says they do).
4310 */
4311static int handle_vmon(struct kvm_vcpu *vcpu)
4312{
4313 int ret;
4314 gpa_t vmptr;
KarimAllah Ahmed2e408932019-01-31 21:24:31 +01004315 uint32_t revision;
Sean Christopherson55d23752018-12-03 13:53:18 -08004316 struct vcpu_vmx *vmx = to_vmx(vcpu);
4317 const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
4318 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
4319
4320 /*
4321 * The Intel VMX Instruction Reference lists a bunch of bits that are
4322 * prerequisite to running VMXON, most notably cr4.VMXE must be set to
4323 * 1 (see vmx_set_cr4() for when we allow the guest to set this).
4324 * Otherwise, we should fail with #UD. But most faulting conditions
4325 * have already been checked by hardware, prior to the VM-exit for
4326 * VMXON. We do test guest cr4.VMXE because processor CR4 always has
4327 * that bit set to 1 in non-root mode.
4328 */
4329 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
4330 kvm_queue_exception(vcpu, UD_VECTOR);
4331 return 1;
4332 }
4333
4334 /* CPL=0 must be checked manually. */
4335 if (vmx_get_cpl(vcpu)) {
4336 kvm_inject_gp(vcpu, 0);
4337 return 1;
4338 }
4339
4340 if (vmx->nested.vmxon)
4341 return nested_vmx_failValid(vcpu,
4342 VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
4343
4344 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
4345 != VMXON_NEEDED_FEATURES) {
4346 kvm_inject_gp(vcpu, 0);
4347 return 1;
4348 }
4349
4350 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4351 return 1;
4352
4353 /*
4354 * SDM 3: 24.11.5
4355 * The first 4 bytes of VMXON region contain the supported
4356 * VMCS revision identifier
4357 *
4358 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
4359 * which replaces physical address width with 32
4360 */
KarimAllah Ahmede0bf2662019-01-31 21:24:43 +01004361 if (!page_address_valid(vcpu, vmptr))
Sean Christopherson55d23752018-12-03 13:53:18 -08004362 return nested_vmx_failInvalid(vcpu);
4363
KarimAllah Ahmed2e408932019-01-31 21:24:31 +01004364 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
4365 revision != VMCS12_REVISION)
Sean Christopherson55d23752018-12-03 13:53:18 -08004366 return nested_vmx_failInvalid(vcpu);
4367
Sean Christopherson55d23752018-12-03 13:53:18 -08004368 vmx->nested.vmxon_ptr = vmptr;
4369 ret = enter_vmx_operation(vcpu);
4370 if (ret)
4371 return ret;
4372
4373 return nested_vmx_succeed(vcpu);
4374}
4375
4376static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
4377{
4378 struct vcpu_vmx *vmx = to_vmx(vcpu);
4379
4380 if (vmx->nested.current_vmptr == -1ull)
4381 return;
4382
Sean Christopherson7952d762019-05-07 08:36:29 -07004383 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
4384
Sean Christopherson55d23752018-12-03 13:53:18 -08004385 if (enable_shadow_vmcs) {
4386 /* copy to memory all shadowed fields in case
4387 they were modified */
4388 copy_shadow_to_vmcs12(vmx);
Sean Christopherson3731905ef2019-05-07 08:36:27 -07004389 vmx->nested.need_vmcs12_to_shadow_sync = false;
Sean Christopherson55d23752018-12-03 13:53:18 -08004390 vmx_disable_shadow_vmcs(vmx);
4391 }
4392 vmx->nested.posted_intr_nv = -1;
4393
4394 /* Flush VMCS12 to guest memory */
4395 kvm_vcpu_write_guest_page(vcpu,
4396 vmx->nested.current_vmptr >> PAGE_SHIFT,
4397 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
4398
4399 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
4400
4401 vmx->nested.current_vmptr = -1ull;
4402}
4403
4404/* Emulate the VMXOFF instruction */
4405static int handle_vmoff(struct kvm_vcpu *vcpu)
4406{
4407 if (!nested_vmx_check_permission(vcpu))
4408 return 1;
4409 free_nested(vcpu);
4410 return nested_vmx_succeed(vcpu);
4411}
4412
4413/* Emulate the VMCLEAR instruction */
4414static int handle_vmclear(struct kvm_vcpu *vcpu)
4415{
4416 struct vcpu_vmx *vmx = to_vmx(vcpu);
4417 u32 zero = 0;
4418 gpa_t vmptr;
4419
4420 if (!nested_vmx_check_permission(vcpu))
4421 return 1;
4422
4423 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4424 return 1;
4425
KarimAllah Ahmede0bf2662019-01-31 21:24:43 +01004426 if (!page_address_valid(vcpu, vmptr))
Sean Christopherson55d23752018-12-03 13:53:18 -08004427 return nested_vmx_failValid(vcpu,
4428 VMXERR_VMCLEAR_INVALID_ADDRESS);
4429
4430 if (vmptr == vmx->nested.vmxon_ptr)
4431 return nested_vmx_failValid(vcpu,
4432 VMXERR_VMCLEAR_VMXON_POINTER);
4433
KarimAllah Ahmeddee9c042019-01-31 21:24:42 +01004434 if (vmx->nested.hv_evmcs_map.hva) {
Sean Christopherson55d23752018-12-03 13:53:18 -08004435 if (vmptr == vmx->nested.hv_evmcs_vmptr)
4436 nested_release_evmcs(vcpu);
4437 } else {
4438 if (vmptr == vmx->nested.current_vmptr)
4439 nested_release_vmcs12(vcpu);
4440
4441 kvm_vcpu_write_guest(vcpu,
4442 vmptr + offsetof(struct vmcs12,
4443 launch_state),
4444 &zero, sizeof(zero));
4445 }
4446
4447 return nested_vmx_succeed(vcpu);
4448}
4449
4450static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
4451
4452/* Emulate the VMLAUNCH instruction */
4453static int handle_vmlaunch(struct kvm_vcpu *vcpu)
4454{
4455 return nested_vmx_run(vcpu, true);
4456}
4457
4458/* Emulate the VMRESUME instruction */
4459static int handle_vmresume(struct kvm_vcpu *vcpu)
4460{
4461
4462 return nested_vmx_run(vcpu, false);
4463}
4464
4465static int handle_vmread(struct kvm_vcpu *vcpu)
4466{
4467 unsigned long field;
4468 u64 field_value;
4469 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4470 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03004471 int len;
Sean Christopherson55d23752018-12-03 13:53:18 -08004472 gva_t gva = 0;
4473 struct vmcs12 *vmcs12;
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07004474 short offset;
Sean Christopherson55d23752018-12-03 13:53:18 -08004475
4476 if (!nested_vmx_check_permission(vcpu))
4477 return 1;
4478
4479 if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
4480 return nested_vmx_failInvalid(vcpu);
4481
4482 if (!is_guest_mode(vcpu))
4483 vmcs12 = get_vmcs12(vcpu);
4484 else {
4485 /*
4486 * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
4487 * to shadowed-field sets the ALU flags for VMfailInvalid.
4488 */
4489 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
4490 return nested_vmx_failInvalid(vcpu);
4491 vmcs12 = get_shadow_vmcs12(vcpu);
4492 }
4493
4494 /* Decode instruction info and find the field to read */
4495 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07004496
4497 offset = vmcs_field_to_offset(field);
4498 if (offset < 0)
Sean Christopherson55d23752018-12-03 13:53:18 -08004499 return nested_vmx_failValid(vcpu,
4500 VMXERR_UNSUPPORTED_VMCS_COMPONENT);
4501
Sean Christopherson7952d762019-05-07 08:36:29 -07004502 if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
4503 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4504
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07004505 /* Read the field, zero-extended to a u64 field_value */
4506 field_value = vmcs12_read_any(vmcs12, field, offset);
4507
Sean Christopherson55d23752018-12-03 13:53:18 -08004508 /*
4509 * Now copy part of this value to register or memory, as requested.
4510 * Note that the number of bits actually copied is 32 or 64 depending
4511 * on the guest's mode (32 or 64 bit), not on the given field's length.
4512 */
4513 if (vmx_instruction_info & (1u << 10)) {
4514 kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
4515 field_value);
4516 } else {
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03004517 len = is_64_bit_mode(vcpu) ? 8 : 4;
Sean Christopherson55d23752018-12-03 13:53:18 -08004518 if (get_vmx_mem_address(vcpu, exit_qualification,
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03004519 vmx_instruction_info, true, len, &gva))
Sean Christopherson55d23752018-12-03 13:53:18 -08004520 return 1;
4521 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03004522 kvm_write_guest_virt_system(vcpu, gva, &field_value, len, NULL);
Sean Christopherson55d23752018-12-03 13:53:18 -08004523 }
4524
4525 return nested_vmx_succeed(vcpu);
4526}
4527
Sean Christophersone2174292019-05-07 08:36:28 -07004528static bool is_shadow_field_rw(unsigned long field)
4529{
4530 switch (field) {
4531#define SHADOW_FIELD_RW(x, y) case x:
4532#include "vmcs_shadow_fields.h"
4533 return true;
4534 default:
4535 break;
4536 }
4537 return false;
4538}
4539
4540static bool is_shadow_field_ro(unsigned long field)
4541{
4542 switch (field) {
4543#define SHADOW_FIELD_RO(x, y) case x:
4544#include "vmcs_shadow_fields.h"
4545 return true;
4546 default:
4547 break;
4548 }
4549 return false;
4550}
Sean Christopherson55d23752018-12-03 13:53:18 -08004551
4552static int handle_vmwrite(struct kvm_vcpu *vcpu)
4553{
4554 unsigned long field;
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03004555 int len;
Sean Christopherson55d23752018-12-03 13:53:18 -08004556 gva_t gva;
4557 struct vcpu_vmx *vmx = to_vmx(vcpu);
4558 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4559 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4560
4561 /* The value to write might be 32 or 64 bits, depending on L1's long
4562 * mode, and eventually we need to write that into a field of several
4563 * possible lengths. The code below first zero-extends the value to 64
4564 * bit (field_value), and then copies only the appropriate number of
4565 * bits into the vmcs12 field.
4566 */
4567 u64 field_value = 0;
4568 struct x86_exception e;
4569 struct vmcs12 *vmcs12;
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07004570 short offset;
Sean Christopherson55d23752018-12-03 13:53:18 -08004571
4572 if (!nested_vmx_check_permission(vcpu))
4573 return 1;
4574
4575 if (vmx->nested.current_vmptr == -1ull)
4576 return nested_vmx_failInvalid(vcpu);
4577
4578 if (vmx_instruction_info & (1u << 10))
4579 field_value = kvm_register_readl(vcpu,
4580 (((vmx_instruction_info) >> 3) & 0xf));
4581 else {
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03004582 len = is_64_bit_mode(vcpu) ? 8 : 4;
Sean Christopherson55d23752018-12-03 13:53:18 -08004583 if (get_vmx_mem_address(vcpu, exit_qualification,
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03004584 vmx_instruction_info, false, len, &gva))
Sean Christopherson55d23752018-12-03 13:53:18 -08004585 return 1;
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03004586 if (kvm_read_guest_virt(vcpu, gva, &field_value, len, &e)) {
Sean Christopherson55d23752018-12-03 13:53:18 -08004587 kvm_inject_page_fault(vcpu, &e);
4588 return 1;
4589 }
4590 }
4591
4592
4593 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
4594 /*
4595 * If the vCPU supports "VMWRITE to any supported field in the
4596 * VMCS," then the "read-only" fields are actually read/write.
4597 */
4598 if (vmcs_field_readonly(field) &&
4599 !nested_cpu_has_vmwrite_any_field(vcpu))
4600 return nested_vmx_failValid(vcpu,
4601 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
4602
Sean Christopherson7952d762019-05-07 08:36:29 -07004603 if (!is_guest_mode(vcpu)) {
Sean Christopherson55d23752018-12-03 13:53:18 -08004604 vmcs12 = get_vmcs12(vcpu);
Sean Christopherson7952d762019-05-07 08:36:29 -07004605
4606 /*
4607 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
4608 * vmcs12, else we may crush a field or consume a stale value.
4609 */
4610 if (!is_shadow_field_rw(field))
4611 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4612 } else {
Sean Christopherson55d23752018-12-03 13:53:18 -08004613 /*
4614 * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
4615 * to shadowed-field sets the ALU flags for VMfailInvalid.
4616 */
4617 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
4618 return nested_vmx_failInvalid(vcpu);
4619 vmcs12 = get_shadow_vmcs12(vcpu);
4620 }
4621
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07004622 offset = vmcs_field_to_offset(field);
4623 if (offset < 0)
4624 return nested_vmx_failValid(vcpu,
4625 VMXERR_UNSUPPORTED_VMCS_COMPONENT);
4626
Sean Christophersonb6437802019-05-07 08:36:24 -07004627 /*
4628 * Some Intel CPUs intentionally drop the reserved bits of the AR byte
4629 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM
4630 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE
4631 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD
4632 * from L1 will return a different value than VMREAD from L2 (L1 sees
4633 * the stripped down value, L2 sees the full value as stored by KVM).
4634 */
4635 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES)
4636 field_value &= 0x1f0ff;
4637
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07004638 vmcs12_write_any(vmcs12, field, offset, field_value);
Sean Christopherson55d23752018-12-03 13:53:18 -08004639
4640 /*
Sean Christophersone2174292019-05-07 08:36:28 -07004641 * Do not track vmcs12 dirty-state if in guest-mode as we actually
4642 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated
4643 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't
4644 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path.
Sean Christopherson55d23752018-12-03 13:53:18 -08004645 */
Sean Christophersone2174292019-05-07 08:36:28 -07004646 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) {
4647 /*
4648 * L1 can read these fields without exiting, ensure the
4649 * shadow VMCS is up-to-date.
4650 */
4651 if (enable_shadow_vmcs && is_shadow_field_ro(field)) {
4652 preempt_disable();
4653 vmcs_load(vmx->vmcs01.shadow_vmcs);
Sean Christophersonfadcead2019-05-07 08:36:23 -07004654
Sean Christophersone2174292019-05-07 08:36:28 -07004655 __vmcs_writel(field, field_value);
Sean Christophersonfadcead2019-05-07 08:36:23 -07004656
Sean Christophersone2174292019-05-07 08:36:28 -07004657 vmcs_clear(vmx->vmcs01.shadow_vmcs);
4658 vmcs_load(vmx->loaded_vmcs->vmcs);
4659 preempt_enable();
Sean Christopherson55d23752018-12-03 13:53:18 -08004660 }
Sean Christophersone2174292019-05-07 08:36:28 -07004661 vmx->nested.dirty_vmcs12 = true;
Sean Christopherson55d23752018-12-03 13:53:18 -08004662 }
4663
4664 return nested_vmx_succeed(vcpu);
4665}
4666
4667static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
4668{
4669 vmx->nested.current_vmptr = vmptr;
4670 if (enable_shadow_vmcs) {
Sean Christophersonfe7f895d2019-05-07 12:17:57 -07004671 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
Sean Christopherson55d23752018-12-03 13:53:18 -08004672 vmcs_write64(VMCS_LINK_POINTER,
4673 __pa(vmx->vmcs01.shadow_vmcs));
Sean Christopherson3731905ef2019-05-07 08:36:27 -07004674 vmx->nested.need_vmcs12_to_shadow_sync = true;
Sean Christopherson55d23752018-12-03 13:53:18 -08004675 }
4676 vmx->nested.dirty_vmcs12 = true;
4677}
4678
4679/* Emulate the VMPTRLD instruction */
4680static int handle_vmptrld(struct kvm_vcpu *vcpu)
4681{
4682 struct vcpu_vmx *vmx = to_vmx(vcpu);
4683 gpa_t vmptr;
4684
4685 if (!nested_vmx_check_permission(vcpu))
4686 return 1;
4687
4688 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4689 return 1;
4690
KarimAllah Ahmede0bf2662019-01-31 21:24:43 +01004691 if (!page_address_valid(vcpu, vmptr))
Sean Christopherson55d23752018-12-03 13:53:18 -08004692 return nested_vmx_failValid(vcpu,
4693 VMXERR_VMPTRLD_INVALID_ADDRESS);
4694
4695 if (vmptr == vmx->nested.vmxon_ptr)
4696 return nested_vmx_failValid(vcpu,
4697 VMXERR_VMPTRLD_VMXON_POINTER);
4698
4699 /* Forbid normal VMPTRLD if Enlightened version was used */
4700 if (vmx->nested.hv_evmcs)
4701 return 1;
4702
4703 if (vmx->nested.current_vmptr != vmptr) {
KarimAllah Ahmedb146b832019-01-31 21:24:35 +01004704 struct kvm_host_map map;
Sean Christopherson55d23752018-12-03 13:53:18 -08004705 struct vmcs12 *new_vmcs12;
Sean Christopherson55d23752018-12-03 13:53:18 -08004706
KarimAllah Ahmedb146b832019-01-31 21:24:35 +01004707 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) {
Sean Christopherson55d23752018-12-03 13:53:18 -08004708 /*
4709 * Reads from an unbacked page return all 1s,
4710 * which means that the 32 bits located at the
4711 * given physical address won't match the required
4712 * VMCS12_REVISION identifier.
4713 */
Vitaly Kuznetsov826c1362019-01-09 18:22:56 +01004714 return nested_vmx_failValid(vcpu,
Sean Christopherson55d23752018-12-03 13:53:18 -08004715 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
Sean Christopherson55d23752018-12-03 13:53:18 -08004716 }
KarimAllah Ahmedb146b832019-01-31 21:24:35 +01004717
4718 new_vmcs12 = map.hva;
4719
Sean Christopherson55d23752018-12-03 13:53:18 -08004720 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
4721 (new_vmcs12->hdr.shadow_vmcs &&
4722 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
KarimAllah Ahmedb146b832019-01-31 21:24:35 +01004723 kvm_vcpu_unmap(vcpu, &map, false);
Sean Christopherson55d23752018-12-03 13:53:18 -08004724 return nested_vmx_failValid(vcpu,
4725 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
4726 }
4727
4728 nested_release_vmcs12(vcpu);
4729
4730 /*
4731 * Load VMCS12 from guest memory since it is not already
4732 * cached.
4733 */
4734 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
KarimAllah Ahmedb146b832019-01-31 21:24:35 +01004735 kvm_vcpu_unmap(vcpu, &map, false);
Sean Christopherson55d23752018-12-03 13:53:18 -08004736
4737 set_current_vmptr(vmx, vmptr);
4738 }
4739
4740 return nested_vmx_succeed(vcpu);
4741}
4742
4743/* Emulate the VMPTRST instruction */
4744static int handle_vmptrst(struct kvm_vcpu *vcpu)
4745{
4746 unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
4747 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4748 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
4749 struct x86_exception e;
4750 gva_t gva;
4751
4752 if (!nested_vmx_check_permission(vcpu))
4753 return 1;
4754
4755 if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
4756 return 1;
4757
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03004758 if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
4759 true, sizeof(gpa_t), &gva))
Sean Christopherson55d23752018-12-03 13:53:18 -08004760 return 1;
4761 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
4762 if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
4763 sizeof(gpa_t), &e)) {
4764 kvm_inject_page_fault(vcpu, &e);
4765 return 1;
4766 }
4767 return nested_vmx_succeed(vcpu);
4768}
4769
4770/* Emulate the INVEPT instruction */
4771static int handle_invept(struct kvm_vcpu *vcpu)
4772{
4773 struct vcpu_vmx *vmx = to_vmx(vcpu);
4774 u32 vmx_instruction_info, types;
4775 unsigned long type;
4776 gva_t gva;
4777 struct x86_exception e;
4778 struct {
4779 u64 eptp, gpa;
4780 } operand;
4781
4782 if (!(vmx->nested.msrs.secondary_ctls_high &
4783 SECONDARY_EXEC_ENABLE_EPT) ||
4784 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
4785 kvm_queue_exception(vcpu, UD_VECTOR);
4786 return 1;
4787 }
4788
4789 if (!nested_vmx_check_permission(vcpu))
4790 return 1;
4791
4792 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4793 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
4794
4795 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
4796
4797 if (type >= 32 || !(types & (1 << type)))
4798 return nested_vmx_failValid(vcpu,
4799 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4800
4801 /* According to the Intel VMX instruction reference, the memory
4802 * operand is read even if it isn't needed (e.g., for type==global)
4803 */
4804 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03004805 vmx_instruction_info, false, sizeof(operand), &gva))
Sean Christopherson55d23752018-12-03 13:53:18 -08004806 return 1;
4807 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
4808 kvm_inject_page_fault(vcpu, &e);
4809 return 1;
4810 }
4811
4812 switch (type) {
4813 case VMX_EPT_EXTENT_GLOBAL:
4814 /*
4815 * TODO: track mappings and invalidate
4816 * single context requests appropriately
4817 */
4818 case VMX_EPT_EXTENT_CONTEXT:
4819 kvm_mmu_sync_roots(vcpu);
4820 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4821 break;
4822 default:
4823 BUG_ON(1);
4824 break;
4825 }
4826
4827 return nested_vmx_succeed(vcpu);
4828}
4829
4830static int handle_invvpid(struct kvm_vcpu *vcpu)
4831{
4832 struct vcpu_vmx *vmx = to_vmx(vcpu);
4833 u32 vmx_instruction_info;
4834 unsigned long type, types;
4835 gva_t gva;
4836 struct x86_exception e;
4837 struct {
4838 u64 vpid;
4839 u64 gla;
4840 } operand;
4841 u16 vpid02;
4842
4843 if (!(vmx->nested.msrs.secondary_ctls_high &
4844 SECONDARY_EXEC_ENABLE_VPID) ||
4845 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
4846 kvm_queue_exception(vcpu, UD_VECTOR);
4847 return 1;
4848 }
4849
4850 if (!nested_vmx_check_permission(vcpu))
4851 return 1;
4852
4853 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4854 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
4855
4856 types = (vmx->nested.msrs.vpid_caps &
4857 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
4858
4859 if (type >= 32 || !(types & (1 << type)))
4860 return nested_vmx_failValid(vcpu,
4861 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4862
4863 /* according to the intel vmx instruction reference, the memory
4864 * operand is read even if it isn't needed (e.g., for type==global)
4865 */
4866 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03004867 vmx_instruction_info, false, sizeof(operand), &gva))
Sean Christopherson55d23752018-12-03 13:53:18 -08004868 return 1;
4869 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
4870 kvm_inject_page_fault(vcpu, &e);
4871 return 1;
4872 }
4873 if (operand.vpid >> 16)
4874 return nested_vmx_failValid(vcpu,
4875 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4876
4877 vpid02 = nested_get_vpid02(vcpu);
4878 switch (type) {
4879 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
4880 if (!operand.vpid ||
4881 is_noncanonical_address(operand.gla, vcpu))
4882 return nested_vmx_failValid(vcpu,
4883 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4884 if (cpu_has_vmx_invvpid_individual_addr()) {
4885 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
4886 vpid02, operand.gla);
4887 } else
4888 __vmx_flush_tlb(vcpu, vpid02, false);
4889 break;
4890 case VMX_VPID_EXTENT_SINGLE_CONTEXT:
4891 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
4892 if (!operand.vpid)
4893 return nested_vmx_failValid(vcpu,
4894 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4895 __vmx_flush_tlb(vcpu, vpid02, false);
4896 break;
4897 case VMX_VPID_EXTENT_ALL_CONTEXT:
4898 __vmx_flush_tlb(vcpu, vpid02, false);
4899 break;
4900 default:
4901 WARN_ON_ONCE(1);
4902 return kvm_skip_emulated_instruction(vcpu);
4903 }
4904
4905 return nested_vmx_succeed(vcpu);
4906}
4907
4908static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
4909 struct vmcs12 *vmcs12)
4910{
Sean Christopherson2b3eaf82019-04-30 10:36:19 -07004911 u32 index = kvm_rcx_read(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08004912 u64 address;
4913 bool accessed_dirty;
4914 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
4915
4916 if (!nested_cpu_has_eptp_switching(vmcs12) ||
4917 !nested_cpu_has_ept(vmcs12))
4918 return 1;
4919
4920 if (index >= VMFUNC_EPTP_ENTRIES)
4921 return 1;
4922
4923
4924 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
4925 &address, index * 8, 8))
4926 return 1;
4927
4928 accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
4929
4930 /*
4931 * If the (L2) guest does a vmfunc to the currently
4932 * active ept pointer, we don't have to do anything else
4933 */
4934 if (vmcs12->ept_pointer != address) {
4935 if (!valid_ept_address(vcpu, address))
4936 return 1;
4937
4938 kvm_mmu_unload(vcpu);
4939 mmu->ept_ad = accessed_dirty;
4940 mmu->mmu_role.base.ad_disabled = !accessed_dirty;
4941 vmcs12->ept_pointer = address;
4942 /*
4943 * TODO: Check what's the correct approach in case
4944 * mmu reload fails. Currently, we just let the next
4945 * reload potentially fail
4946 */
4947 kvm_mmu_reload(vcpu);
4948 }
4949
4950 return 0;
4951}
4952
4953static int handle_vmfunc(struct kvm_vcpu *vcpu)
4954{
4955 struct vcpu_vmx *vmx = to_vmx(vcpu);
4956 struct vmcs12 *vmcs12;
Sean Christopherson2b3eaf82019-04-30 10:36:19 -07004957 u32 function = kvm_rax_read(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08004958
4959 /*
4960 * VMFUNC is only supported for nested guests, but we always enable the
4961 * secondary control for simplicity; for non-nested mode, fake that we
4962 * didn't by injecting #UD.
4963 */
4964 if (!is_guest_mode(vcpu)) {
4965 kvm_queue_exception(vcpu, UD_VECTOR);
4966 return 1;
4967 }
4968
4969 vmcs12 = get_vmcs12(vcpu);
4970 if ((vmcs12->vm_function_control & (1 << function)) == 0)
4971 goto fail;
4972
4973 switch (function) {
4974 case 0:
4975 if (nested_vmx_eptp_switching(vcpu, vmcs12))
4976 goto fail;
4977 break;
4978 default:
4979 goto fail;
4980 }
4981 return kvm_skip_emulated_instruction(vcpu);
4982
4983fail:
4984 nested_vmx_vmexit(vcpu, vmx->exit_reason,
4985 vmcs_read32(VM_EXIT_INTR_INFO),
4986 vmcs_readl(EXIT_QUALIFICATION));
4987 return 1;
4988}
4989
4990
4991static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
4992 struct vmcs12 *vmcs12)
4993{
4994 unsigned long exit_qualification;
4995 gpa_t bitmap, last_bitmap;
4996 unsigned int port;
4997 int size;
4998 u8 b;
4999
5000 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
5001 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
5002
5003 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5004
5005 port = exit_qualification >> 16;
5006 size = (exit_qualification & 7) + 1;
5007
5008 last_bitmap = (gpa_t)-1;
5009 b = -1;
5010
5011 while (size > 0) {
5012 if (port < 0x8000)
5013 bitmap = vmcs12->io_bitmap_a;
5014 else if (port < 0x10000)
5015 bitmap = vmcs12->io_bitmap_b;
5016 else
5017 return true;
5018 bitmap += (port & 0x7fff) / 8;
5019
5020 if (last_bitmap != bitmap)
5021 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
5022 return true;
5023 if (b & (1 << (port & 7)))
5024 return true;
5025
5026 port++;
5027 size--;
5028 last_bitmap = bitmap;
5029 }
5030
5031 return false;
5032}
5033
5034/*
5035 * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
5036 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
5037 * disinterest in the current event (read or write a specific MSR) by using an
5038 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
5039 */
5040static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
5041 struct vmcs12 *vmcs12, u32 exit_reason)
5042{
Sean Christopherson2b3eaf82019-04-30 10:36:19 -07005043 u32 msr_index = kvm_rcx_read(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08005044 gpa_t bitmap;
5045
5046 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
5047 return true;
5048
5049 /*
5050 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
5051 * for the four combinations of read/write and low/high MSR numbers.
5052 * First we need to figure out which of the four to use:
5053 */
5054 bitmap = vmcs12->msr_bitmap;
5055 if (exit_reason == EXIT_REASON_MSR_WRITE)
5056 bitmap += 2048;
5057 if (msr_index >= 0xc0000000) {
5058 msr_index -= 0xc0000000;
5059 bitmap += 1024;
5060 }
5061
5062 /* Then read the msr_index'th bit from this bitmap: */
5063 if (msr_index < 1024*8) {
5064 unsigned char b;
5065 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
5066 return true;
5067 return 1 & (b >> (msr_index & 7));
5068 } else
5069 return true; /* let L1 handle the wrong parameter */
5070}
5071
5072/*
5073 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
5074 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
5075 * intercept (via guest_host_mask etc.) the current event.
5076 */
5077static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
5078 struct vmcs12 *vmcs12)
5079{
5080 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5081 int cr = exit_qualification & 15;
5082 int reg;
5083 unsigned long val;
5084
5085 switch ((exit_qualification >> 4) & 3) {
5086 case 0: /* mov to cr */
5087 reg = (exit_qualification >> 8) & 15;
5088 val = kvm_register_readl(vcpu, reg);
5089 switch (cr) {
5090 case 0:
5091 if (vmcs12->cr0_guest_host_mask &
5092 (val ^ vmcs12->cr0_read_shadow))
5093 return true;
5094 break;
5095 case 3:
5096 if ((vmcs12->cr3_target_count >= 1 &&
5097 vmcs12->cr3_target_value0 == val) ||
5098 (vmcs12->cr3_target_count >= 2 &&
5099 vmcs12->cr3_target_value1 == val) ||
5100 (vmcs12->cr3_target_count >= 3 &&
5101 vmcs12->cr3_target_value2 == val) ||
5102 (vmcs12->cr3_target_count >= 4 &&
5103 vmcs12->cr3_target_value3 == val))
5104 return false;
5105 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
5106 return true;
5107 break;
5108 case 4:
5109 if (vmcs12->cr4_guest_host_mask &
5110 (vmcs12->cr4_read_shadow ^ val))
5111 return true;
5112 break;
5113 case 8:
5114 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
5115 return true;
5116 break;
5117 }
5118 break;
5119 case 2: /* clts */
5120 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
5121 (vmcs12->cr0_read_shadow & X86_CR0_TS))
5122 return true;
5123 break;
5124 case 1: /* mov from cr */
5125 switch (cr) {
5126 case 3:
5127 if (vmcs12->cpu_based_vm_exec_control &
5128 CPU_BASED_CR3_STORE_EXITING)
5129 return true;
5130 break;
5131 case 8:
5132 if (vmcs12->cpu_based_vm_exec_control &
5133 CPU_BASED_CR8_STORE_EXITING)
5134 return true;
5135 break;
5136 }
5137 break;
5138 case 3: /* lmsw */
5139 /*
5140 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
5141 * cr0. Other attempted changes are ignored, with no exit.
5142 */
5143 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5144 if (vmcs12->cr0_guest_host_mask & 0xe &
5145 (val ^ vmcs12->cr0_read_shadow))
5146 return true;
5147 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
5148 !(vmcs12->cr0_read_shadow & 0x1) &&
5149 (val & 0x1))
5150 return true;
5151 break;
5152 }
5153 return false;
5154}
5155
5156static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
5157 struct vmcs12 *vmcs12, gpa_t bitmap)
5158{
5159 u32 vmx_instruction_info;
5160 unsigned long field;
5161 u8 b;
5162
5163 if (!nested_cpu_has_shadow_vmcs(vmcs12))
5164 return true;
5165
5166 /* Decode instruction info and find the field to access */
5167 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5168 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5169
5170 /* Out-of-range fields always cause a VM exit from L2 to L1 */
5171 if (field >> 15)
5172 return true;
5173
5174 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
5175 return true;
5176
5177 return 1 & (b >> (field & 7));
5178}
5179
5180/*
5181 * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
5182 * should handle it ourselves in L0 (and then continue L2). Only call this
5183 * when in is_guest_mode (L2).
5184 */
5185bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
5186{
5187 u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
5188 struct vcpu_vmx *vmx = to_vmx(vcpu);
5189 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5190
5191 if (vmx->nested.nested_run_pending)
5192 return false;
5193
5194 if (unlikely(vmx->fail)) {
5195 pr_info_ratelimited("%s failed vm entry %x\n", __func__,
5196 vmcs_read32(VM_INSTRUCTION_ERROR));
5197 return true;
5198 }
5199
5200 /*
5201 * The host physical addresses of some pages of guest memory
5202 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
5203 * Page). The CPU may write to these pages via their host
5204 * physical address while L2 is running, bypassing any
5205 * address-translation-based dirty tracking (e.g. EPT write
5206 * protection).
5207 *
5208 * Mark them dirty on every exit from L2 to prevent them from
5209 * getting out of sync with dirty tracking.
5210 */
5211 nested_mark_vmcs12_pages_dirty(vcpu);
5212
5213 trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
5214 vmcs_readl(EXIT_QUALIFICATION),
5215 vmx->idt_vectoring_info,
5216 intr_info,
5217 vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
5218 KVM_ISA_VMX);
5219
5220 switch (exit_reason) {
5221 case EXIT_REASON_EXCEPTION_NMI:
5222 if (is_nmi(intr_info))
5223 return false;
5224 else if (is_page_fault(intr_info))
5225 return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
5226 else if (is_debug(intr_info) &&
5227 vcpu->guest_debug &
5228 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
5229 return false;
5230 else if (is_breakpoint(intr_info) &&
5231 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5232 return false;
5233 return vmcs12->exception_bitmap &
5234 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
5235 case EXIT_REASON_EXTERNAL_INTERRUPT:
5236 return false;
5237 case EXIT_REASON_TRIPLE_FAULT:
5238 return true;
5239 case EXIT_REASON_PENDING_INTERRUPT:
5240 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
5241 case EXIT_REASON_NMI_WINDOW:
5242 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
5243 case EXIT_REASON_TASK_SWITCH:
5244 return true;
5245 case EXIT_REASON_CPUID:
5246 return true;
5247 case EXIT_REASON_HLT:
5248 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
5249 case EXIT_REASON_INVD:
5250 return true;
5251 case EXIT_REASON_INVLPG:
5252 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5253 case EXIT_REASON_RDPMC:
5254 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
5255 case EXIT_REASON_RDRAND:
5256 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
5257 case EXIT_REASON_RDSEED:
5258 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
5259 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
5260 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
5261 case EXIT_REASON_VMREAD:
5262 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5263 vmcs12->vmread_bitmap);
5264 case EXIT_REASON_VMWRITE:
5265 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5266 vmcs12->vmwrite_bitmap);
5267 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
5268 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
5269 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
5270 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
5271 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
5272 /*
5273 * VMX instructions trap unconditionally. This allows L1 to
5274 * emulate them for its L2 guest, i.e., allows 3-level nesting!
5275 */
5276 return true;
5277 case EXIT_REASON_CR_ACCESS:
5278 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
5279 case EXIT_REASON_DR_ACCESS:
5280 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
5281 case EXIT_REASON_IO_INSTRUCTION:
5282 return nested_vmx_exit_handled_io(vcpu, vmcs12);
5283 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
5284 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
5285 case EXIT_REASON_MSR_READ:
5286 case EXIT_REASON_MSR_WRITE:
5287 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
5288 case EXIT_REASON_INVALID_STATE:
5289 return true;
5290 case EXIT_REASON_MWAIT_INSTRUCTION:
5291 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
5292 case EXIT_REASON_MONITOR_TRAP_FLAG:
5293 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
5294 case EXIT_REASON_MONITOR_INSTRUCTION:
5295 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
5296 case EXIT_REASON_PAUSE_INSTRUCTION:
5297 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
5298 nested_cpu_has2(vmcs12,
5299 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
5300 case EXIT_REASON_MCE_DURING_VMENTRY:
5301 return false;
5302 case EXIT_REASON_TPR_BELOW_THRESHOLD:
5303 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
5304 case EXIT_REASON_APIC_ACCESS:
5305 case EXIT_REASON_APIC_WRITE:
5306 case EXIT_REASON_EOI_INDUCED:
5307 /*
5308 * The controls for "virtualize APIC accesses," "APIC-
5309 * register virtualization," and "virtual-interrupt
5310 * delivery" only come from vmcs12.
5311 */
5312 return true;
5313 case EXIT_REASON_EPT_VIOLATION:
5314 /*
5315 * L0 always deals with the EPT violation. If nested EPT is
5316 * used, and the nested mmu code discovers that the address is
5317 * missing in the guest EPT table (EPT12), the EPT violation
5318 * will be injected with nested_ept_inject_page_fault()
5319 */
5320 return false;
5321 case EXIT_REASON_EPT_MISCONFIG:
5322 /*
5323 * L2 never uses directly L1's EPT, but rather L0's own EPT
5324 * table (shadow on EPT) or a merged EPT table that L0 built
5325 * (EPT on EPT). So any problems with the structure of the
5326 * table is L0's fault.
5327 */
5328 return false;
5329 case EXIT_REASON_INVPCID:
5330 return
5331 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
5332 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5333 case EXIT_REASON_WBINVD:
5334 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
5335 case EXIT_REASON_XSETBV:
5336 return true;
5337 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
5338 /*
5339 * This should never happen, since it is not possible to
5340 * set XSS to a non-zero value---neither in L1 nor in L2.
5341 * If if it were, XSS would have to be checked against
5342 * the XSS exit bitmap in vmcs12.
5343 */
5344 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
5345 case EXIT_REASON_PREEMPTION_TIMER:
5346 return false;
5347 case EXIT_REASON_PML_FULL:
5348 /* We emulate PML support to L1. */
5349 return false;
5350 case EXIT_REASON_VMFUNC:
5351 /* VM functions are emulated through L2->L0 vmexits. */
5352 return false;
5353 case EXIT_REASON_ENCLS:
5354 /* SGX is never exposed to L1 */
5355 return false;
5356 default:
5357 return true;
5358 }
5359}
5360
5361
5362static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
5363 struct kvm_nested_state __user *user_kvm_nested_state,
5364 u32 user_data_size)
5365{
5366 struct vcpu_vmx *vmx;
5367 struct vmcs12 *vmcs12;
5368 struct kvm_nested_state kvm_state = {
5369 .flags = 0,
5370 .format = 0,
5371 .size = sizeof(kvm_state),
5372 .vmx.vmxon_pa = -1ull,
5373 .vmx.vmcs_pa = -1ull,
5374 };
5375
5376 if (!vcpu)
5377 return kvm_state.size + 2 * VMCS12_SIZE;
5378
5379 vmx = to_vmx(vcpu);
5380 vmcs12 = get_vmcs12(vcpu);
5381
5382 if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled)
5383 kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
5384
5385 if (nested_vmx_allowed(vcpu) &&
5386 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
5387 kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
5388 kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
5389
5390 if (vmx_has_valid_vmcs12(vcpu)) {
5391 kvm_state.size += VMCS12_SIZE;
5392
5393 if (is_guest_mode(vcpu) &&
5394 nested_cpu_has_shadow_vmcs(vmcs12) &&
5395 vmcs12->vmcs_link_pointer != -1ull)
5396 kvm_state.size += VMCS12_SIZE;
5397 }
5398
5399 if (vmx->nested.smm.vmxon)
5400 kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
5401
5402 if (vmx->nested.smm.guest_mode)
5403 kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
5404
5405 if (is_guest_mode(vcpu)) {
5406 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
5407
5408 if (vmx->nested.nested_run_pending)
5409 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
5410 }
5411 }
5412
5413 if (user_data_size < kvm_state.size)
5414 goto out;
5415
5416 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
5417 return -EFAULT;
5418
5419 if (!vmx_has_valid_vmcs12(vcpu))
5420 goto out;
5421
5422 /*
5423 * When running L2, the authoritative vmcs12 state is in the
5424 * vmcs02. When running L1, the authoritative vmcs12 state is
5425 * in the shadow or enlightened vmcs linked to vmcs01, unless
Sean Christopherson3731905ef2019-05-07 08:36:27 -07005426 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative
Sean Christopherson55d23752018-12-03 13:53:18 -08005427 * vmcs12 state is in the vmcs12 already.
5428 */
5429 if (is_guest_mode(vcpu)) {
Sean Christopherson3731905ef2019-05-07 08:36:27 -07005430 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
Sean Christopherson7952d762019-05-07 08:36:29 -07005431 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
Sean Christopherson3731905ef2019-05-07 08:36:27 -07005432 } else if (!vmx->nested.need_vmcs12_to_shadow_sync) {
Sean Christopherson55d23752018-12-03 13:53:18 -08005433 if (vmx->nested.hv_evmcs)
5434 copy_enlightened_to_vmcs12(vmx);
5435 else if (enable_shadow_vmcs)
5436 copy_shadow_to_vmcs12(vmx);
5437 }
5438
Tom Roeder3a33d032019-01-24 13:48:20 -08005439 /*
5440 * Copy over the full allocated size of vmcs12 rather than just the size
5441 * of the struct.
5442 */
5443 if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
Sean Christopherson55d23752018-12-03 13:53:18 -08005444 return -EFAULT;
5445
5446 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
5447 vmcs12->vmcs_link_pointer != -1ull) {
5448 if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
Tom Roeder3a33d032019-01-24 13:48:20 -08005449 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
Sean Christopherson55d23752018-12-03 13:53:18 -08005450 return -EFAULT;
5451 }
5452
5453out:
5454 return kvm_state.size;
5455}
5456
5457/*
5458 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
5459 */
5460void vmx_leave_nested(struct kvm_vcpu *vcpu)
5461{
5462 if (is_guest_mode(vcpu)) {
5463 to_vmx(vcpu)->nested.nested_run_pending = 0;
5464 nested_vmx_vmexit(vcpu, -1, 0, 0);
5465 }
5466 free_nested(vcpu);
5467}
5468
5469static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
5470 struct kvm_nested_state __user *user_kvm_nested_state,
5471 struct kvm_nested_state *kvm_state)
5472{
5473 struct vcpu_vmx *vmx = to_vmx(vcpu);
5474 struct vmcs12 *vmcs12;
5475 u32 exit_qual;
5476 int ret;
5477
5478 if (kvm_state->format != 0)
5479 return -EINVAL;
5480
Sean Christopherson55d23752018-12-03 13:53:18 -08005481 if (!nested_vmx_allowed(vcpu))
5482 return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
5483
5484 if (kvm_state->vmx.vmxon_pa == -1ull) {
5485 if (kvm_state->vmx.smm.flags)
5486 return -EINVAL;
5487
5488 if (kvm_state->vmx.vmcs_pa != -1ull)
5489 return -EINVAL;
5490
5491 vmx_leave_nested(vcpu);
5492 return 0;
5493 }
5494
5495 if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
5496 return -EINVAL;
5497
5498 if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
5499 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
5500 return -EINVAL;
5501
5502 if (kvm_state->vmx.smm.flags &
5503 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
5504 return -EINVAL;
5505
5506 /*
5507 * SMM temporarily disables VMX, so we cannot be in guest mode,
5508 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
5509 * must be zero.
5510 */
5511 if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
5512 return -EINVAL;
5513
5514 if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
5515 !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
5516 return -EINVAL;
5517
5518 vmx_leave_nested(vcpu);
5519 if (kvm_state->vmx.vmxon_pa == -1ull)
5520 return 0;
5521
Aaron Lewis332d0792019-05-02 11:31:33 -07005522 if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
5523 nested_enable_evmcs(vcpu, NULL);
5524
Sean Christopherson55d23752018-12-03 13:53:18 -08005525 vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa;
5526 ret = enter_vmx_operation(vcpu);
5527 if (ret)
5528 return ret;
5529
5530 /* Empty 'VMXON' state is permitted */
Jim Mattsone8ab8d22019-01-17 11:55:58 -08005531 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
Sean Christopherson55d23752018-12-03 13:53:18 -08005532 return 0;
5533
5534 if (kvm_state->vmx.vmcs_pa != -1ull) {
5535 if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
5536 !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
5537 return -EINVAL;
5538
5539 set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
5540 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
5541 /*
5542 * Sync eVMCS upon entry as we may not have
5543 * HV_X64_MSR_VP_ASSIST_PAGE set up yet.
5544 */
Sean Christopherson3731905ef2019-05-07 08:36:27 -07005545 vmx->nested.need_vmcs12_to_shadow_sync = true;
Sean Christopherson55d23752018-12-03 13:53:18 -08005546 } else {
5547 return -EINVAL;
5548 }
5549
5550 if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
5551 vmx->nested.smm.vmxon = true;
5552 vmx->nested.vmxon = false;
5553
5554 if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
5555 vmx->nested.smm.guest_mode = true;
5556 }
5557
5558 vmcs12 = get_vmcs12(vcpu);
5559 if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12)))
5560 return -EFAULT;
5561
5562 if (vmcs12->hdr.revision_id != VMCS12_REVISION)
5563 return -EINVAL;
5564
5565 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
5566 return 0;
5567
Sean Christopherson21be4ca2019-05-08 11:04:32 -07005568 vmx->nested.nested_run_pending =
5569 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
5570
5571 ret = -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08005572 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
5573 vmcs12->vmcs_link_pointer != -1ull) {
5574 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
5575
Paolo Bonzinidb809272019-05-20 11:55:36 +02005576 if (kvm_state->size < sizeof(*kvm_state) + VMCS12_SIZE + sizeof(*vmcs12))
Sean Christopherson21be4ca2019-05-08 11:04:32 -07005577 goto error_guest_mode;
Sean Christopherson55d23752018-12-03 13:53:18 -08005578
5579 if (copy_from_user(shadow_vmcs12,
5580 user_kvm_nested_state->data + VMCS12_SIZE,
Sean Christopherson21be4ca2019-05-08 11:04:32 -07005581 sizeof(*vmcs12))) {
5582 ret = -EFAULT;
5583 goto error_guest_mode;
5584 }
Sean Christopherson55d23752018-12-03 13:53:18 -08005585
5586 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
5587 !shadow_vmcs12->hdr.shadow_vmcs)
Sean Christopherson21be4ca2019-05-08 11:04:32 -07005588 goto error_guest_mode;
Sean Christopherson55d23752018-12-03 13:53:18 -08005589 }
5590
Sean Christopherson5478ba32019-04-11 12:18:06 -07005591 if (nested_vmx_check_controls(vcpu, vmcs12) ||
5592 nested_vmx_check_host_state(vcpu, vmcs12) ||
5593 nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
Sean Christopherson21be4ca2019-05-08 11:04:32 -07005594 goto error_guest_mode;
Sean Christopherson55d23752018-12-03 13:53:18 -08005595
5596 vmx->nested.dirty_vmcs12 = true;
5597 ret = nested_vmx_enter_non_root_mode(vcpu, false);
Sean Christopherson21be4ca2019-05-08 11:04:32 -07005598 if (ret)
5599 goto error_guest_mode;
Sean Christopherson55d23752018-12-03 13:53:18 -08005600
5601 return 0;
Sean Christopherson21be4ca2019-05-08 11:04:32 -07005602
5603error_guest_mode:
5604 vmx->nested.nested_run_pending = 0;
5605 return ret;
Sean Christopherson55d23752018-12-03 13:53:18 -08005606}
5607
5608void nested_vmx_vcpu_setup(void)
5609{
5610 if (enable_shadow_vmcs) {
Sean Christopherson55d23752018-12-03 13:53:18 -08005611 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
Sean Christophersonfadcead2019-05-07 08:36:23 -07005612 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
Sean Christopherson55d23752018-12-03 13:53:18 -08005613 }
5614}
5615
5616/*
5617 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
5618 * returned for the various VMX controls MSRs when nested VMX is enabled.
5619 * The same values should also be used to verify that vmcs12 control fields are
5620 * valid during nested entry from L1 to L2.
5621 * Each of these control msrs has a low and high 32-bit half: A low bit is on
5622 * if the corresponding bit in the (32-bit) control field *must* be on, and a
5623 * bit in the high half is on if the corresponding bit in the control field
5624 * may be on. See also vmx_control_verify().
5625 */
5626void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
5627 bool apicv)
5628{
5629 /*
5630 * Note that as a general rule, the high half of the MSRs (bits in
5631 * the control fields which may be 1) should be initialized by the
5632 * intersection of the underlying hardware's MSR (i.e., features which
5633 * can be supported) and the list of features we want to expose -
5634 * because they are known to be properly supported in our code.
5635 * Also, usually, the low half of the MSRs (bits which must be 1) can
5636 * be set to 0, meaning that L1 may turn off any of these bits. The
5637 * reason is that if one of these bits is necessary, it will appear
5638 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
5639 * fields of vmcs01 and vmcs02, will turn these bits off - and
5640 * nested_vmx_exit_reflected() will not pass related exits to L1.
5641 * These rules have exceptions below.
5642 */
5643
5644 /* pin-based controls */
5645 rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
5646 msrs->pinbased_ctls_low,
5647 msrs->pinbased_ctls_high);
5648 msrs->pinbased_ctls_low |=
5649 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
5650 msrs->pinbased_ctls_high &=
5651 PIN_BASED_EXT_INTR_MASK |
5652 PIN_BASED_NMI_EXITING |
5653 PIN_BASED_VIRTUAL_NMIS |
5654 (apicv ? PIN_BASED_POSTED_INTR : 0);
5655 msrs->pinbased_ctls_high |=
5656 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
5657 PIN_BASED_VMX_PREEMPTION_TIMER;
5658
5659 /* exit controls */
5660 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
5661 msrs->exit_ctls_low,
5662 msrs->exit_ctls_high);
5663 msrs->exit_ctls_low =
5664 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
5665
5666 msrs->exit_ctls_high &=
5667#ifdef CONFIG_X86_64
5668 VM_EXIT_HOST_ADDR_SPACE_SIZE |
5669#endif
5670 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
5671 msrs->exit_ctls_high |=
5672 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
5673 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
5674 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
5675
5676 /* We support free control of debug control saving. */
5677 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
5678
5679 /* entry controls */
5680 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
5681 msrs->entry_ctls_low,
5682 msrs->entry_ctls_high);
5683 msrs->entry_ctls_low =
5684 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
5685 msrs->entry_ctls_high &=
5686#ifdef CONFIG_X86_64
5687 VM_ENTRY_IA32E_MODE |
5688#endif
5689 VM_ENTRY_LOAD_IA32_PAT;
5690 msrs->entry_ctls_high |=
5691 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
5692
5693 /* We support free control of debug control loading. */
5694 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
5695
5696 /* cpu-based controls */
5697 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
5698 msrs->procbased_ctls_low,
5699 msrs->procbased_ctls_high);
5700 msrs->procbased_ctls_low =
5701 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
5702 msrs->procbased_ctls_high &=
5703 CPU_BASED_VIRTUAL_INTR_PENDING |
5704 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
5705 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
5706 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
5707 CPU_BASED_CR3_STORE_EXITING |
5708#ifdef CONFIG_X86_64
5709 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
5710#endif
5711 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
5712 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
5713 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
5714 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
5715 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
5716 /*
5717 * We can allow some features even when not supported by the
5718 * hardware. For example, L1 can specify an MSR bitmap - and we
5719 * can use it to avoid exits to L1 - even when L0 runs L2
5720 * without MSR bitmaps.
5721 */
5722 msrs->procbased_ctls_high |=
5723 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
5724 CPU_BASED_USE_MSR_BITMAPS;
5725
5726 /* We support free control of CR3 access interception. */
5727 msrs->procbased_ctls_low &=
5728 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
5729
5730 /*
5731 * secondary cpu-based controls. Do not include those that
5732 * depend on CPUID bits, they are added later by vmx_cpuid_update.
5733 */
Vitaly Kuznetsov6b1971c2019-02-07 11:42:14 +01005734 if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
5735 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
5736 msrs->secondary_ctls_low,
5737 msrs->secondary_ctls_high);
5738
Sean Christopherson55d23752018-12-03 13:53:18 -08005739 msrs->secondary_ctls_low = 0;
5740 msrs->secondary_ctls_high &=
5741 SECONDARY_EXEC_DESC |
5742 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
5743 SECONDARY_EXEC_APIC_REGISTER_VIRT |
5744 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
5745 SECONDARY_EXEC_WBINVD_EXITING;
5746
5747 /*
5748 * We can emulate "VMCS shadowing," even if the hardware
5749 * doesn't support it.
5750 */
5751 msrs->secondary_ctls_high |=
5752 SECONDARY_EXEC_SHADOW_VMCS;
5753
5754 if (enable_ept) {
5755 /* nested EPT: emulate EPT also to L1 */
5756 msrs->secondary_ctls_high |=
5757 SECONDARY_EXEC_ENABLE_EPT;
5758 msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
5759 VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
5760 if (cpu_has_vmx_ept_execute_only())
5761 msrs->ept_caps |=
5762 VMX_EPT_EXECUTE_ONLY_BIT;
5763 msrs->ept_caps &= ept_caps;
5764 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
5765 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
5766 VMX_EPT_1GB_PAGE_BIT;
5767 if (enable_ept_ad_bits) {
5768 msrs->secondary_ctls_high |=
5769 SECONDARY_EXEC_ENABLE_PML;
5770 msrs->ept_caps |= VMX_EPT_AD_BIT;
5771 }
5772 }
5773
5774 if (cpu_has_vmx_vmfunc()) {
5775 msrs->secondary_ctls_high |=
5776 SECONDARY_EXEC_ENABLE_VMFUNC;
5777 /*
5778 * Advertise EPTP switching unconditionally
5779 * since we emulate it
5780 */
5781 if (enable_ept)
5782 msrs->vmfunc_controls =
5783 VMX_VMFUNC_EPTP_SWITCHING;
5784 }
5785
5786 /*
5787 * Old versions of KVM use the single-context version without
5788 * checking for support, so declare that it is supported even
5789 * though it is treated as global context. The alternative is
5790 * not failing the single-context invvpid, and it is worse.
5791 */
5792 if (enable_vpid) {
5793 msrs->secondary_ctls_high |=
5794 SECONDARY_EXEC_ENABLE_VPID;
5795 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
5796 VMX_VPID_EXTENT_SUPPORTED_MASK;
5797 }
5798
5799 if (enable_unrestricted_guest)
5800 msrs->secondary_ctls_high |=
5801 SECONDARY_EXEC_UNRESTRICTED_GUEST;
5802
5803 if (flexpriority_enabled)
5804 msrs->secondary_ctls_high |=
5805 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
5806
5807 /* miscellaneous data */
5808 rdmsr(MSR_IA32_VMX_MISC,
5809 msrs->misc_low,
5810 msrs->misc_high);
5811 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
5812 msrs->misc_low |=
5813 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
5814 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
5815 VMX_MISC_ACTIVITY_HLT;
5816 msrs->misc_high = 0;
5817
5818 /*
5819 * This MSR reports some information about VMX support. We
5820 * should return information about the VMX we emulate for the
5821 * guest, and the VMCS structure we give it - not about the
5822 * VMX support of the underlying hardware.
5823 */
5824 msrs->basic =
5825 VMCS12_REVISION |
5826 VMX_BASIC_TRUE_CTLS |
5827 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
5828 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
5829
5830 if (cpu_has_vmx_basic_inout())
5831 msrs->basic |= VMX_BASIC_INOUT;
5832
5833 /*
5834 * These MSRs specify bits which the guest must keep fixed on
5835 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
5836 * We picked the standard core2 setting.
5837 */
5838#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
5839#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
5840 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
5841 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
5842
5843 /* These MSRs specify bits which the guest must keep fixed off. */
5844 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
5845 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
5846
5847 /* highest index: VMX_PREEMPTION_TIMER_VALUE */
5848 msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
5849}
5850
5851void nested_vmx_hardware_unsetup(void)
5852{
5853 int i;
5854
5855 if (enable_shadow_vmcs) {
5856 for (i = 0; i < VMX_BITMAP_NR; i++)
5857 free_page((unsigned long)vmx_bitmap[i]);
5858 }
5859}
5860
5861__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
5862{
5863 int i;
5864
Paolo Bonzini2b279242019-04-15 15:57:19 +02005865 /*
5866 * Without EPT it is not possible to restore L1's CR3 and PDPTR on
5867 * VMfail, because they are not available in vmcs01. Just always
5868 * use hardware checks.
5869 */
5870 if (!enable_ept)
5871 nested_early_check = 1;
5872
Sean Christopherson55d23752018-12-03 13:53:18 -08005873 if (!cpu_has_vmx_shadow_vmcs())
5874 enable_shadow_vmcs = 0;
5875 if (enable_shadow_vmcs) {
5876 for (i = 0; i < VMX_BITMAP_NR; i++) {
Ben Gardon41836832019-02-11 11:02:52 -08005877 /*
5878 * The vmx_bitmap is not tied to a VM and so should
5879 * not be charged to a memcg.
5880 */
Sean Christopherson55d23752018-12-03 13:53:18 -08005881 vmx_bitmap[i] = (unsigned long *)
5882 __get_free_page(GFP_KERNEL);
5883 if (!vmx_bitmap[i]) {
5884 nested_vmx_hardware_unsetup();
5885 return -ENOMEM;
5886 }
5887 }
5888
5889 init_vmcs_shadow_fields();
5890 }
5891
5892 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear,
5893 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
5894 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld,
5895 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst,
5896 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread,
5897 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume,
5898 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite,
5899 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff,
5900 exit_handlers[EXIT_REASON_VMON] = handle_vmon,
5901 exit_handlers[EXIT_REASON_INVEPT] = handle_invept,
5902 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid,
5903 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc,
5904
5905 kvm_x86_ops->check_nested_events = vmx_check_nested_events;
5906 kvm_x86_ops->get_nested_state = vmx_get_nested_state;
5907 kvm_x86_ops->set_nested_state = vmx_set_nested_state;
5908 kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages,
5909 kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs;
Vitaly Kuznetsove2e871a2018-12-10 18:21:55 +01005910 kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version;
Sean Christopherson55d23752018-12-03 13:53:18 -08005911
5912 return 0;
5913}